diff --git "a/4601.jsonl" "b/4601.jsonl" new file mode 100644--- /dev/null +++ "b/4601.jsonl" @@ -0,0 +1,188 @@ +{"seq_id":"479654863","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport re\n\n\n# 截取带u'/'的导航点\ndef cut_slash(x):\n if u'/' in x:\n return x[0:x.index(u'/')]\n else:\n return x\n\n\ndef find_pair(x, traj):\n pair = []\n for idx in range(len(x)):\n if x[idx] == traj:\n pair.append(x[idx - 1])\n pair.append(x[idx + 1])\n return pair\n\n\ndef str_match(str):\n pattern = re.compile(r'[^P]\\d{1,3}$')\n result = re.match(pattern, str)\n if result:\n return True\n else:\n return False\n\n\nraw_data = [u'SHZ', u'B221', u'DST', u'R596', u'SULEM/N0447F280', u'R596', u'BERBA', u'B576', u'APU', u'B1', u'ANLOT',\n u'W4', u'TNN', u'SN2F']\nfilter_data = []\n# 截取带u'/'字符的导航点\nfor ix in range(len(raw_data)):\n filter_data.append(cut_slash(raw_data[ix]))\n\n# 将航路的名称存入列表\nfpl = []\nfor selected in filter_data:\n if str_match(selected):\n fpl.append(selected)\n# print (fpl)\n\n# 搜寻航路两端的导航点组合(此处疑似存在bug)\noder = {}\nfor waypoint_name in fpl:\n ls = find_pair(filter_data, waypoint_name)\n oder.update({waypoint_name: ls})\n# print(oder)\ndf = pd.read_csv('/Users/lwb/Downloads/fms_t_waypoint_split.csv', encoding='GBK')\npoints_all = pd.read_csv('/Users/lwb/Downloads/fpl_waypoints.csv', encoding='GBK')\nordered_line = []\nfor key in oder:\n name = df[df.airWay == key]\n name = name.set_index('seq')\n # 导航点组合,两端导航点在官方航路数据中的索引位置。\n point_begin = name[name.point == oder[key][0]].index.tolist()[0]\n point_end = name[name.point == oder[key][-1]].index.tolist()[0]\n # (如ix=3,ix=6,[3:6])将中间索引为ix=4,5的导航点数据补充\n airway = []\n if point_begin > point_end:\n for item in range(point_end, point_begin + 1):\n airway.append(name.ix[item].point)\n airway.reverse()\n else:\n for item in range(point_begin, point_end + 1):\n airway.append(name.ix[item].point)\n print(airway)\n # 导航点去重\n for character in airway:\n if character in ordered_line:\n continue\n else:\n ordered_line.append(character)\nprint(ordered_line)\n\n# 绘制折线图\nlon_list = []\nlat_list = []\nfor points in ordered_line:\n lon_list.append(points_all[points_all.point == points].lon.tolist()[0])\n lat_list.append(points_all[points_all.point == points].lat.tolist()[0])\nplt.plot(lon_list, lat_list, color='b', linewidth=2, alpha=0.6)\n# 绘制散点图\nplt.scatter(lon_list, lat_list, c='red', alpha=1, marker='o', label='A593')\nfor i in range(len(ordered_line)):\n plt.text(lon_list[i], lat_list[i], str(ordered_line[i]), ha='left', style='italic', va='bottom', family='serif',\n fontsize=5)\n plt.text(lon_list[i], lat_list[i], str(i + 1), ha='right', style='italic', va='top', family='serif',\n fontsize=5)\nplt.xlabel(\"longitude (degree)\") # X轴标签\nplt.ylabel(\"latitude (degree)\") # Y轴标签\nplt.title(\"Navigation points for flights\") # 标题\npath = '/Users/lwb/Downloads/test.png'\nplt.savefig(path, dpi=600)\n","sub_path":"src/fpl/fms_fpl.py","file_name":"fms_fpl.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"547486956","text":"import os\nimport torch\nfrom torch.optim import *\nimport torchvision\nfrom torchvision.transforms import *\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader\n\nimport numpy as np\nimport json\nimport argparse\nimport csv\nfrom model import AVENet\nfrom datasets import GetAudioVideoDataset\nfrom utils import AverageMeter, accuracy \nimport cv2\nfrom sklearn.metrics import auc\nimport xml.etree.ElementTree as ET\nimport pdb\nfrom PIL import Image\ndef get_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', default='',type=str,help='Root directory path of data')\n parser.add_argument('--image_size',default=224,type=int,help='Height and width of inputs')\n parser.add_argument('--gt_path',default='',type=str)\n parser.add_argument('--summaries_dir',default='',type=str,help='Model path')\n parser.add_argument('--test',default='',type=str,help='test csv files')\n parser.add_argument('--batch_size', default=1, type=int, help='Batch Size')\n parser.add_argument('--epsilon', default=0.65, type=float, help='pos')\n parser.add_argument('--epsilon2', default=0.4, type=float, help='neg')\n parser.add_argument('--tri_map',action='store_true')\n parser.set_defaults(tri_map=True)\n parser.add_argument('--Neg',action='store_true')\n parser.set_defaults(Neg=True)\n\n return parser.parse_args() \n\nclass Evaluator():\n\n def __init__(self):\n super(Evaluator, self).__init__()\n self.ciou = []\n\n def cal_CIOU(self, infer, gtmap, thres=0.01):\n # pdb.set_trace()\n # gtmap = np.reshape(gtmap,[256, 256])\n # infer = np.reshape(infer,[256, 256])\n infer_map = np.zeros((224, 224))\n # infer_map = np.zeros((20,20))\n infer_map[infer>=thres] = 1\n ciou = np.sum(infer_map*gtmap) / (np.sum(gtmap)+np.sum(infer_map*(gtmap==0)))\n self.ciou.append(ciou)\n return ciou, np.sum(infer_map*gtmap),(np.sum(gtmap)+np.sum(infer_map*(gtmap==0)))\n\n\n def cal_AUC(self):\n results = []\n for i in range(21):\n result = np.sum(np.array(self.ciou)>=0.05*i)\n result = result / len(self.ciou)\n results.append(result)\n x = [0.05*i for i in range(21)]\n auc = sklearn.metrics.auc(x, results)\n print(results)\n return auc\n\n def final(self):\n ciou = np.mean(np.array(self.ciou)>=0.5)\n return ciou\n\n def clear(self):\n self.ciou = []\n\n\ndef normalize_img(value, vmax=None, vmin=None):\n vmin = value.min() if vmin is None else vmin\n vmax = value.max() if vmax is None else vmax\n if not (vmax - vmin) == 0:\n value = (value - vmin) / (vmax - vmin) # vmin..vmax\n\n return value\n\ndef main():\n args = get_arguments()\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n model= AVENet(args) \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model = nn.DataParallel(model)\n model = model.cuda()\n \n print('load pretrained model.')\n checkpoint = torch.load(args.summaries_dir)\n\n model_dict = model.state_dict()\n pretrained_dict = checkpoint['model_state_dict']\n \n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n\n model.to(device)\n testdataset = GetAudioVideoDataset(args, mode='test')\n testdataloader = DataLoader(testdataset, batch_size=args.batch_size, shuffle=False,num_workers = 16)\n\n softmax = nn.Softmax(dim=1)\n print(\"Loaded dataloader.\")\n\n model.eval()\n accuracies = AverageMeter()\n accuracies5 = AverageMeter()\n iou = []\n for step, (image, spec, audio,name,im) in enumerate(testdataloader):\n print('%d / %d' % (step,len(testdataloader) - 1))\n spec = Variable(spec).cuda()\n image = Variable(image).cuda()\n heatmap,_,Pos,Neg = model(image.float(),spec.float(),args)\n im_arr = im.data.cpu().numpy()\n heatmap_arr = heatmap.data.cpu().numpy()\n Pos = Pos.data.cpu().numpy()\n Neg = Neg.data.cpu().numpy()\n audio = audio.numpy()\n\n for i in range(spec.shape[0]):\n heatmap_now = cv2.resize(heatmap_arr[i,0], dsize=(224, 224), interpolation=cv2.INTER_LINEAR)\n heatmap_now = normalize_img(-heatmap_now)\n gt = ET.parse(args.gt_path + '%s.xml' % name[i][:-4]).getroot()\n gt_map = np.zeros([224,224])\n bboxs = []\n for child in gt: \n for childs in child:\n bbox = []\n if childs.tag == 'bbox':\n for index,ch in enumerate(childs):\n if index == 0:\n continue\n bbox.append(int(224 * int(ch.text)/256))\n bboxs.append(bbox)\n\n for item_ in bboxs:\n temp = np.zeros([224,224])\n (xmin,ymin,xmax,ymax) = item_[0],item_[1],item_[2],item_[3]\n temp[item_[1]:item_[3],item_[0]:item_[2]] = 1\n gt_map += temp\n gt_map /= 2\n gt_map[gt_map>1] = 1\n pred = heatmap_now\n pred = 1 - pred\n threshold = np.sort(pred.flatten())[int(pred.shape[0] * pred.shape[1] / 2)]\n pred[pred>threshold] = 1\n pred[pred<1] = 0\n evaluator = Evaluator()\n ciou,inter,union = evaluator.cal_CIOU(pred,gt_map,0.5)\n iou.append(ciou)\n results = []\n for i in range(21):\n result = np.sum(np.array(iou) >= 0.05 * i)\n result = result / len(iou)\n results.append(result)\n x = [0.05 * i for i in range(21)]\n auc_ = auc(x, results)\n print('cIoU' , np.sum(np.array(iou) >= 0.5)/len(iou))\n print('auc',auc_)\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"272279972","text":"from prac_07.guitar import Guitar\r\n\r\ndef main():\r\n one_guitar = Guitar(\"Gibson L-5 CES\", 1922, 16035.40)\r\n\r\n print(one_guitar)\r\n\r\n name = \"Gibson L-5 CES\"\r\n year = 1922\r\n cost = 16035.40\r\n\r\n second_name = \"Another Guitar\"\r\n second_year = 2012\r\n\r\n first_guitar = Guitar(name, year, cost)\r\n second_guitar = Guitar(second_name, second_year)\r\n\r\n print(\"{} get_age() - Expected {}. Got {}\".format(first_guitar.name, 96,\r\n first_guitar.get_age()))\r\n print(\"{} get_age() - Expected {}. Got {}\".format(second_guitar.name, 6,\r\n second_guitar.get_age()))\r\n\r\n print(\"{} is_vintage() - Expected {}. Got {}\".format(first_guitar.name, True, first_guitar.is_vintage()))\r\n print(\"{} is_vintage() - Expected {}. Got {}\".format(second_guitar.name, False, second_guitar.is_vintage()))\r\n\r\nmain()","sub_path":"prac_07/guitar_test.py","file_name":"guitar_test.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"25459002","text":"\"\"\"\nCopyright (c) 2015 SONATA-NFV\nALL RIGHTS RESERVED.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\nNeither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]\nnor the names of its contributors may be used to endorse or promote\nproducts derived from this software without specific prior written\npermission.\nThis work has been performed in the framework of the SONATA project,\nfunded by the European Commission under Grant number 671517 through\nthe Horizon 2020 and 5G-PPP programmes. The authors would like to\nacknowledge the contributions of their colleagues of the SONATA\npartner consortium (www.sonata-nfv.eu).\n\"\"\"\n\nimport unittest\nimport yaml\nimport threading\nimport logging\n\nfrom multiprocessing import Process\nfrom test.fakessm import fakeSM\nfrom sonmanobase.messaging import ManoBrokerRequestResponseConnection\n\nlogging.basicConfig(level=logging.INFO)\nlogging.getLogger('amqp-storm').setLevel(logging.INFO)\nLOG = logging.getLogger(\"son-mano-plugins:sm_template_test\")\nlogging.getLogger(\"son-mano-base:messaging\").setLevel(logging.INFO)\nLOG.setLevel(logging.INFO)\n\n\n\n\nclass testSMTemplate(unittest.TestCase):\n \"\"\"\n Tests the registration process of the Placement Executive to the broker\n and the plugin manager, and the heartbeat process.\n \"\"\"\n\n\n def setUp(self):\n #a new Placement Executive in another process for each test\n self.ssm_proc = Process(target=fakeSM)\n self.ssm_proc.daemon = True\n\n #make a new connection with the broker before each test\n self.manoconn = ManoBrokerRequestResponseConnection('son-plugin.SpecificManagerRegistry')\n\n #Some threading events that can be used during the tests\n self.wait_for_event = threading.Event()\n self.wait_for_event.clear()\n\n def tearDown(self):\n #Killing the Placement Executive\n if self.ssm_proc is not None:\n self.ssm_proc.terminate()\n del self.ssm_proc\n\n #Killing the connection with the broker\n try:\n self.manoconn.stop_connection()\n except Exception as e:\n LOG.exception(\"Stop connection exception.\")\n\n #Clearing the threading helpers\n del self.wait_for_event\n\n #Method that terminates the timer that waits for an event\n def eventFinished(self):\n self.wait_for_event.set()\n\n #Method that starts a timer, waiting for an event\n def waitForEvent(self, timeout=5, msg=\"Event timed out.\"):\n if not self.wait_for_event.wait(timeout):\n self.assertEqual(True, False, msg=msg)\n\n\n def testSMTemplate(self):\n \"\"\"\n TEST: This test verifies whether the SSM/FSM template is sending out a message,\n and whether it contains all the needed info on the\n specific.manager.registry.ssm.registration topic to register to the SSM/FSM.\n \"\"\"\n\n # STEP3a: When receiving the message, we need to check whether all fields present.\n def on_register_receive(ch, method, properties, message):\n\n msg = yaml.load(message)\n\n # CHECK: The message should be a dictionary.\n self.assertTrue(isinstance(msg, dict), msg='message is not a dictionary')\n # CHECK: The dictionary should have a key 'specific_manager_name'.\n self.assertIn('specific_manager_name', msg.keys(), msg='no specific_manager_name provided in message.')\n if isinstance(msg['specific_manager_name'], str):\n # CHECK: The value of 'specific_manager_name' should not be an empty string.\n self.assertTrue(len(msg['specific_manager_name']) > 0, msg='empty specific_manager_name provided.')\n else:\n # CHECK: The value of 'specific_manager_name' should be a string\n self.assertEqual(True, False, msg='specific_manager_name is not a string')\n # CHECK: The dictionary should have a key 'version'.\n self.assertIn('version', msg.keys(), msg='No version provided in message.')\n if isinstance(msg['version'], str):\n # CHECK: The value of 'version' should not be an empty string.\n self.assertTrue(len(msg['version']) > 0, msg='empty version provided.')\n else:\n # CHECK: The value of 'version' should be a string\n self.assertEqual(True, False, msg='version is not a string')\n # CHECK: The dictionary should have a key 'description'\n self.assertIn('description', msg.keys(), msg='No description provided in message.')\n if isinstance(msg['description'], str):\n # CHECK: The value of 'description' should not be an empty string.\n self.assertTrue(len(msg['description']) > 0, msg='empty description provided.')\n else:\n # CHECK: The value of 'description' should be a string\n self.assertEqual(True, False, msg='description is not a string')\n\n # CHECK: The dictionary should have a key 'specific_manager_type'\n if isinstance(msg['specific_manager_type'], str):\n # CHECK: The value of 'specific_manager_type' should not be an empty string.\n self.assertTrue(len(msg['specific_manager_type']) > 0, msg='empty specific_manager_type provided.')\n else:\n # CHECK: The value of 'specific_manager_type' should be a string\n self.assertEqual(True, False, msg='specific_manager_type is not a string')\n\n # CHECK: The dictionary should have a key 'service_name'\n if isinstance(msg['service_name'], str):\n # CHECK: The value of 'service_name' should not be an empty string.\n self.assertTrue(len(msg['service_name']) > 0, msg='empty service_name id provided.')\n else:\n # CHECK: The value of 'service_name' should be a string\n self.assertEqual(True, False, msg='service_name is not a string')\n\n # stop waiting\n self.eventFinished()\n\n\n # STEP1: Listen to the specific.manager.registry.ssm.registration topic\n self.manoconn.subscribe(on_register_receive, 'specific.manager.registry.ssm.registration')\n\n # STEP2: Start the SSM\n self.ssm_proc.start()\n\n # STEP3b: When not receiving the message, the test failed\n self.waitForEvent(timeout=5, msg=\"message not received.\")\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"son-sm-template/test/test_template.py","file_name":"test_template.py","file_ext":"py","file_size_in_byte":6894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"649908403","text":"# -*- coding: utf-8 -*-\nimport xlrd\nimport time\n\nimport pandas\nimport gomill.sgf\nimport numpy as np\n\nfrom visualization.go_driver import GoDriver\nfrom board_evaluation.pachi_player import Pachi\n\n\n# set default board size.\nBOARD_SIZE = 9\n\npachi = Pachi(pachi_path=\"thirdparty/pachi-pachi-12.10-jowa/pachi\")\ngodriver = GoDriver(\"data/working/cnn_5layer_64filter\", board_size=BOARD_SIZE)\n\n\ndef parse_sgf_file(sgf_content):\n while 'AB' in sgf_content:\n pos_b = sgf_content.find('AB')\n sgf_content = to_sequence(sgf_content, pos_b, ';B')\n while 'AW' in sgf_content:\n pos_w = sgf_content.find('AW')\n sgf_content = to_sequence(sgf_content, pos_w, ';W')\n return sgf_content\n\n\ndef to_sequence(sgf_content, pos, prefix):\n pos += 1\n start_pos = pos + 1\n while pos + 4 < len(sgf_content) and sgf_content[pos + 4] == ']':\n pos += 4\n end_pos = pos + 1\n sgf_string = sgf_content[start_pos: end_pos]\n res = \"\"\n for i in range(0, len(sgf_string), 4):\n res += prefix + sgf_string[i: i + 4]\n res = sgf_content[: start_pos - 2] + res + sgf_content[end_pos:]\n return res\n\n\ndef board_eval(sgf_content):\n # reset go board\n # skip komi, we will handle this later\n godriver.reset_board()\n sgf_content = parse_sgf_file(sgf_content)\n try:\n sgf = gomill.sgf.Sgf_game.from_string(sgf_content)\n except ValueError:\n print('WARNING: no SGF data found')\n # if this is not a sgf file, we return blank command\n return np.zeros((BOARD_SIZE, BOARD_SIZE))\n sgf_iterator = sgf.main_sequence_iter()\n while True:\n try:\n it = sgf_iterator.next()\n color, move = it.get_move()\n if color is None:\n it = sgf_iterator.next()\n color, move = it.get_move()\n except StopIteration:\n break\n if move is not None:\n godriver.play(color, move)\n # scale the value range to [-1, 1]\n nn_matrix = godriver.evaluate_current_board() * 2 - 1\n\n # pachi player is used to reinforcement the neural network\n pachi_matrix, score = pachi.get_final_score_matrix(sgf_content)\n if pachi_matrix is None:\n pachi_matrix = nn_matrix\n else:\n assert len(pachi_matrix) == BOARD_SIZE ** 2\n pachi_matrix = np.array(pachi_matrix).reshape(BOARD_SIZE, BOARD_SIZE)\n\n final_matrix = 0.1 * nn_matrix + 0.9 * pachi_matrix\n\n # (0, 0) -> A1, (0, 8) ->I1, (8, 8)->I9\n return final_matrix\n\n\nif __name__ == '__main__':\n # readbook = xlrd.open_workbook(\"./data/kifu_test/test9x9.xlsx\")\n # sheet = readbook.sheet_by_index(0)\n # start_time = time.time()\n # for i in range(sheet.nrows):\n # sgf_content = sheet.cell(i, 3).value.encode('utf-8')\n # try:\n # score = board_eval(sgf_content)\n # print(\"sgf id {} score {}\".format(i, np.sum(score)))\n # except Exception as e:\n # print(\"sgf id {} exception {}\".format(i, e))\n # if i % 100 == 0:\n # print(\"time used: {}s\".format(time.time() - start_time))\n\n sql = pandas.read_csv(\"./data/kifu_test/test_sql.csv\")\n sgfs = sql[\"Sgf\"]\n print(\"sgf nums {}\".format(sgfs.size))\n start_time = time.time()\n for i, f in enumerate(sgfs):\n sgf_content = f.encode('utf-8')\n try:\n score = board_eval(sgf_content)\n print(\"sgf id {} score {}\".format(i, np.sum(score)))\n except Exception as e:\n print(\"sgf id {} exception {}\".format(i, e))\n if i % 100 == 0:\n print(\"time used: {}s\".format(time.time() - start_time))\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"373462395","text":"# Copyright 2021 VMware, Inc.\n# SPDX-License-Identifier: Apache-2.0\nfrom typing import Optional\n\nimport click\nfrom vdk.internal.control.configuration.defaults_config import (\n reset_default_authentication_disable,\n)\nfrom vdk.internal.control.configuration.defaults_config import (\n reset_default_rest_api_url,\n)\nfrom vdk.internal.control.configuration.defaults_config import reset_default_team_name\nfrom vdk.internal.control.configuration.defaults_config import (\n write_default_authentication_disable,\n)\nfrom vdk.internal.control.configuration.defaults_config import (\n write_default_rest_api_url,\n)\nfrom vdk.internal.control.configuration.defaults_config import write_default_team_name\n\n# Default command implies parity for set-default and reset-default sections bellow.\n# Each option that supports set-default is expected to implement reset-default.\n\n\n@click.command(\n name=\"set-default\",\n help=\"Set defaults that will be used in the commands of the tool.\",\n)\n@click.option(\n \"-t\",\n \"--team\",\n type=click.STRING,\n help=\"Set the default team name that will be used in all the commands that require a team.\",\n)\n@click.option(\n \"-u\",\n \"--rest-api-url\",\n type=click.STRING,\n help=\"Set the default REST API url that will be used in all the commands that require it.\",\n)\n@click.option(\n \"--authentication-disable\",\n type=click.STRING,\n is_flag=True,\n default=None,\n help=\"Disables authentication for all the commands that operate against the Control Service.\",\n)\n@click.pass_context\ndef set_default_command(\n ctx, team: str, rest_api_url: str, authentication_disable: Optional[bool]\n):\n if team is not None:\n write_default_team_name(team)\n if rest_api_url is not None:\n write_default_rest_api_url(rest_api_url)\n if authentication_disable is not None:\n write_default_authentication_disable(str(authentication_disable))\n\n\n@click.command(\n name=\"reset-default\",\n short_help=\"Reset the defaults that will be used in the commands of the tool.\",\n help=\"Reset the defaults that will be used in the commands of the tool.\",\n)\n@click.option(\n \"-t\",\n \"--team\",\n is_flag=True,\n flag_value=True,\n default=False,\n help=\"Reset the default team name.\",\n)\n@click.option(\n \"-u\",\n \"--rest-api-url\",\n is_flag=True,\n flag_value=True,\n default=False,\n help=\"Reset the default REST API url.\",\n)\n@click.option(\n \"--authentication-disable\",\n is_flag=True,\n flag_value=True,\n default=False,\n help=\"Reset the disable authentication flag.\",\n)\n@click.pass_context\ndef reset_default_command(\n ctx, team: bool, rest_api_url: bool, authentication_disable: bool\n):\n if team:\n reset_default_team_name()\n if rest_api_url:\n reset_default_rest_api_url()\n if authentication_disable:\n reset_default_authentication_disable()\n","sub_path":"projects/vdk-control-cli/src/vdk/internal/control/command_groups/common_group/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"493468290","text":"import logging\n\nfrom variables import Value, Number\nfrom identifiers import Identifier, Pidentifier\nfrom jump_fillers import JumpFiller, ForDownToLoopJumpFiller, ForLoopJumpFiller, LoopJumpFiller\nfrom operations import AssignmentOperations, ConditionOperations\n\n\n\n\nclass WhileLoop:\n def __init__(self, condition, do_commands):\n self.condition = condition\n self.do_commands = do_commands\n\n def generate_code(self, program):\n loop_end = LoopJumpFiller(program.line_no)\n line_to_fill = self.condition.generate_code(program)\n\n # while is endless or while has condition we cannot predict\n if line_to_fill is not False:\n if type(line_to_fill) is list:\n condition_end = JumpFiller(line_to_fill)\n program.stack.append(condition_end)\n program.stack.append(loop_end)\n [program.stack.append(do_command) for do_command in self.do_commands]\n\n def get_variable_names(self):\n return self.condition.get_variable_names()\n\n\nclass DoWhileLoop(WhileLoop):\n\n def generate_code(self, program):\n while_loop = WhileLoop(self.condition, self.do_commands)\n program.stack.append(while_loop)\n [program.stack.append(do_command) for do_command in self.do_commands]\n\n\nclass ForLoop:\n def __init__(self, i, startValue, endValue, commands):\n self.i = i\n self.startValue = startValue\n self.endValue = endValue\n self.commands = commands\n\n def generate_code(self, program):\n\n if type(self.startValue) is not Value:\n self.startValue = Value(self.startValue)\n if type(self.endValue) is not Value:\n self.endValue = Value(self.endValue)\n\n Pidentifier(self.i).generate_code(program)\n iterator_assignment = AssignmentOperations(Identifier(self.i), self.startValue)\n iterator_variable = program.get_variable(self.i)\n iterator_assignment.generate_code(program)\n iterator_variable.is_iterator = True\n\n Pidentifier(self.i + '_end0').generate_code(program)\n end_iterator_variable = program.get_variable(self.i + '_end0')\n end_iterator_assignment = AssignmentOperations(Identifier(self.i + '_end0'), self.endValue)\n end_iterator_assignment.generate_code(program)\n end_iterator_variable.is_iterator = True\n\n # create loop end to know where we should return\n loop_end = ForLoopJumpFiller(program.line_no, iterator=self.i)\n\n # generate condition\n condition = ConditionOperations(\"LEQ\", Identifier(self.i), Identifier(self.i + '_end0'))\n line_to_fill = condition.generate_code(program)\n\n # add command end to fill jumps in condition\n if line_to_fill is not False:\n if type(line_to_fill) is list:\n condition_end = JumpFiller(line_to_fill)\n program.stack.append(condition_end)\n\n # add commands\n program.stack.append(loop_end)\n [program.stack.append(command) for command in self.commands]\n\n def get_variable_names(self):\n result = []\n if type(self.i) is not Number:\n result.extend(self.i)\n if type(self.startValue) is not Number:\n result.extend(self.startValue.get_variable_names())\n if type(self.endValue) is not Number:\n result.extend(self.endValue.get_variable_names())\n return tuple(result)\n\n\nclass ForDownToLoop(ForLoop):\n\n def generate_code(self, program):\n # assignment of iterator\n Pidentifier(self.i).generate_code(program)\n iterator_variable = program.get_variable(self.i)\n iterator_assignment = AssignmentOperations(Identifier(self.i),\n Value(self.startValue))\n iterator_assignment.generate_code(program)\n iterator_variable.is_iterator = True\n\n Pidentifier(self.i + '_end0').generate_code(program)\n end_iterator_variable = program.get_variable(self.i + '_end0')\n end_iterator_assignment = AssignmentOperations(Identifier(self.i + '_end0'),\n Value(self.endValue))\n end_iterator_assignment.generate_code(program)\n end_iterator_variable.is_iterator = True\n\n # create loop end to know where we should return\n loop_end = ForDownToLoopJumpFiller(program.line_no, iterator=self.i)\n\n # generate condition\n condition = ConditionOperations(\"GEQ\",\n Identifier(self.i),\n Identifier(self.i + '_end0'))\n line_to_fill = condition.generate_code(program)\n\n # add command end to fill jumps in condition\n if line_to_fill is not False:\n if type(line_to_fill) is list:\n condition_end = JumpFiller(line_to_fill)\n program.stack.append(condition_end)\n\n # add commands\n program.stack.append(loop_end)\n [program.stack.append(command) for command in self.commands]\n","sub_path":"compiler/loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"148794136","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 28 11:24:33 2022\n\n@author: weichan\n\"\"\"\n\nimport pandas as pd\nfrom scipy.stats import rankdata\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport multiprocessing \nimport os \n\n\n#read in files\nroot = sys.argv[-1]\ncollection = {}\nprint(root)\nfor subdir,dirs,files in os.walk(root):\n for file in files:\n\n with open(root + \"/\" + file) as input_file:\n for line in input_file:\n line = line.strip()\n line=line.split(\" \")\n key= line[3].replace('\"','')\n #key= line[1].replace('ID','')\n if key in collection:\n collection[key].append(int(line[0]))\n else:\n collection[key] = [int(line[0])]\n \n input_file.close()\n\n\ndata = pd.DataFrame(collection.items(), columns=['ID', 'Reads'])\n#average expression\nmean_coll = []\nfor i in data[\"Reads\"]:\n av = sum(i) / len(i)\n mean_coll.append(av)\ndata['MeanRandom'] = pd.Series(mean_coll)\n\n#count data\ncounts = pd.read_csv(sys.argv[1], sep = \"\\t\")\ncounts = counts.loc[:,[\"Name\",\"NumReads\",\"Length\"]] \ncounts = counts.loc[counts['NumReads'] >= 1] ##### This makes the plot nicer without changing the raw data\nprint(\"Data loaded. Calculating p values...\")\n\n#now the plot #subplot for sequence-based and random\nplt.rcParams.update({'font.size': 20})\nalldata = data.merge(counts, left_on=\"ID\", right_on='Name', how = 'outer')\nfig = plt.figure(figsize=(12, 10), dpi=300)\n\nax1 = fig.add_subplot(1,2,1)\nax1.scatter(list(np.log10(alldata[\"Length\"])), list(np.log10(alldata[\"NumReads\"])), \n color=\"#3b528b\", label= 'Sequence-based mapping', alpha=0.2)\n\nplt.xlabel(\"Log$_{10}$ Length of read\")\nplt.ylabel(\"Average Log$_{10}$ Count of mapping\")\n\nax2 = fig.add_subplot(1,2,2, sharex=ax1, sharey=ax1)\nax2.scatter(list(np.log10(alldata[\"Length\"])), list(np.log10(alldata[\"MeanRandom\"])), \n color='#fde725', label= 'Random mapping', alpha=0.2)\n\nplt.xlabel(\"Log$_{10}$ Length of read\")\nplt.ylabel(\"Average Log$_{10}$ Count of mapping\")\nfig.savefig(sys.argv[2])\n\n\n#now the permutation test\nalldata = alldata.drop(['ID'], axis=1)\n#permutation test\n#pvalue = number of times permutation has more reads than mapping/number of permutations \n\n#multipletest/ false discovery rate correction by benjamini-hochberg\ndef fdr(p_vals):\n\n ranked_p_values = rankdata(p_vals)\n fdr = p_vals * len(p_vals) / ranked_p_values \n fdr[fdr > 1] = 1 #if greater 1, just make it 1 \n\n return fdr \n\n#this snippet is the pertubtaion test\n##parallel\n#input is a list of elements\ndef pertubation(reads):\n if not type(reads) == list: #needed because of merge nans\n pval = 0\n return pval\n else: \n for count, element in enumerate(alldata[\"Reads\"].values):\n if element == reads:\n pval = sum(i >= alldata[\"NumReads\"][count] for i in element) / len(element) \n return pval\n\n#p-values\n#cores for parallel\na_pool = multiprocessing.Pool(int(sys.argv[3]))\npvals = a_pool.map(pertubation, alldata[\"Reads\"])\nalldata[\"p-value\"]=pvals\nalldata = alldata.drop(['Reads'], axis=1)\n\nprint(\"Calculate adjusted p values...\")\n\nalldata2 = alldata.loc[alldata['p-value'] < 0.05]\n\n#p value correction \nalldata2[\"adjusted p-value\"] = fdr(alldata2[\"p-value\"])\n\nalldata2 = alldata2.set_index(\"Name\")\nalldata2 = alldata2.loc[alldata2.index.dropna()]\n\nsignif = alldata2.loc[alldata2['adjusted p-value'] < 0.05] #adjusted \n\nout = signif.to_csv(sys.argv[4], sep='\\t', index=True)\n","sub_path":"Scripts/PipeStats.py","file_name":"PipeStats.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"84577897","text":"a = 1 \nb = 1\n\nfibonacci = [a,b]\n\nnum = int(input(\"Input fibonacci range:\"))\n\nfor i in range(num):\n a,b = b,a+b\n print(\"a:\",a,\"b:\",b)\n fibonacci.append(b)\n \nprint(fibonacci)","sub_path":"f-han-keceli/Python/Loop Structures/fibonacci_series.py","file_name":"fibonacci_series.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"52455398","text":"import os\nimport json\nimport re\nimport copy\nimport numpy as np\nfrom json import JSONEncoder\n\n\ntemplate = {\n \"N_JOBS\": 10,\n \"N_TOOLS\": 10,\n \"MAGAZINE_SIZE\": 0,\n \"matrix_m\": \"N_JOBS\",\n \"matrix_n\": \"N_TOOLS\",\n \"matrix\": [\n [1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0],\n [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n ],\n\n}\n\n\nclass MarkedList:\n _list = None\n\n def __init__(self, l):\n self._list = l\n\n\nclass CustomJSONEncoder(JSONEncoder):\n def default(self, o):\n if isinstance(o, MarkedList):\n return \"##<{}>##\".format(o._list)\n\n\nclass Converter:\n out_folder = \"/Users/simonvermeir/Documents/industrial-engineering/SchoolCurrent/MasterProef/Master-Thesis-SSP\" \\\n \"/data/instances/yanasse\"\n root_folder = \"/Users/simonvermeir/Documents/industrial-engineering/SchoolCurrent/MasterProef/Master-Thesis-SSP\" \\\n \"/data/raw_instances/Yanasse\"\n files = \"/Users/simonvermeir/Documents/industrial-engineering/SchoolCurrent/MasterProef/Master-Thesis-SSP/data/instances/yanasse_files.json\"\n\n rn = [\"L\"]\n extension = \".txt\"\n variation_map = dict()\n\n file_descriptor = {\n \"original\": \"\",\n \"root_folder\": \"yanasse\",\n \"instance\": \"\",\n \"author\": \"yan\",\n \"n_jobs\": 10,\n \"n_tools\": 10,\n \"magazine_size\": 10,\n \"variation\": 100\n }\n\n def __init__(self) -> None:\n super().__init__()\n self.create_out_folder()\n\n def create_out_folder(self):\n try:\n os.mkdir(self.out_folder)\n except OSError:\n print(\"Creation of the directory %s failed\" % self.out_folder)\n\n\n\n def convert(self):\n self.root_folder = self.root_folder + \"/\" + \"Tabela\"\n root_folder_i = self.root_folder\n tabelas = [1,2,3,4,5]\n self.rn = [\"L\"]\n self.extension = \".txt\"\n r1l = [34,33,34,26,8]\n r2 = 10\n i = 0\n for r1 in r1l:\n root_folder_i = self.root_folder + str(tabelas[i])\n for ra in range(1, r1 + 1):\n for rb in range(1, r2 + 1):\n original = self.rn[0] + str(ra) + \"-\" + str(rb) + self.extension\n input_file_path = root_folder_i + \"/\" + self.rn[0] + str(ra) + \"-\" + str(rb) + self.extension\n self.file_descriptor[\"original\"] = original\n #print(self.file_descriptor)\n self.extract(input_file_path, tabelas[i])\n i+=1\n\n\n def extract(self, input_file,tabela):\n matrix = []\n\n with open(input_file) as fr:\n pline = fr.readline().strip().split(\" \")\n\n n_jobs = int(pline[0])\n n_tools = int(pline[1])\n magazine_size = int(pline[2])\n while True:\n line = fr.readline()\n\n if line is None or line is '':\n break\n\n matrix_line = line.strip().split(\" \")\n if(tabela is 4):\n matrix_line = line.strip().split(\" \")\n matrix_line = [int(element) for element in matrix_line]\n matrix.append(matrix_line)\n\n matrix_transpose = self.transpose_matrix(matrix)\n matrix_tranpose_marked = self.mark_matrix(matrix_transpose)\n\n self.file_descriptor[\"n_jobs\"] = n_jobs\n self.file_descriptor[\"n_tools\"] = n_tools\n self.file_descriptor[\"magazine_size\"] = magazine_size\n self.file_descriptor[\"variation\"] = self.get_variation(n_tools, n_jobs, magazine_size)\n self.file_descriptor[\"instance\"] = self.file_descriptor[\"author\"] + \"_\" + str(n_jobs) + \"_\" + str(\n n_tools) + \"_\" + str(magazine_size) + \"_\" + str(self.file_descriptor[\"variation\"])\n\n self.addFile()\n self.add_problem(matrix_tranpose_marked, n_tools, n_jobs, magazine_size)\n\n def get_variation(self, n_tools, n_jobs, magazine_size):\n key = str(n_tools) + \"_\" + str(n_jobs) + \"_\" + str(magazine_size)\n self.variation_map[key] = self.variation_map.get(key, 0) + 1\n return self.variation_map[key]\n\n\n def addFile(self):\n with open(self.files, 'r') as f:\n # parsing JSON string:\n j = json.load(f)\n s = j[\"files\"]\n s.append(self.file_descriptor)\n\n with open(self.files,'w') as write_file:\n json.dump(j, write_file , indent=4)\n\n\n def add_problem(self, matrix, n_tools, n_jobs, magazine_size):\n\n output_directory = self.out_folder.strip(\" \") + \"/\" + self.file_descriptor[\"instance\"]\n output_file_path = output_directory + \"/\" + self.file_descriptor[\"instance\"] + \".json\"\n\n result = copy.deepcopy(template)\n\n result[\"MAGAZINE_SIZE\"] = magazine_size\n result[\"N_JOBS\"] = n_jobs\n result[\"N_TOOLS\"] = n_tools\n result[\"matrix\"] = matrix\n\n d = json.dumps(result, indent=4, cls=CustomJSONEncoder)\n d = d.replace('\"##<', \"\").replace('>##\"', \"\")\n\n try:\n os.mkdir(output_directory)\n except OSError:\n print(\"Creation of the directory %s failed\" % output_directory)\n\n with open(output_file_path, \"w\") as write_file:\n write_file.write(d)\n\n\n\n def mark_matrix(self, matrix):\n out = []\n for i in range(len(matrix)):\n out.append(MarkedList(matrix[i]))\n return out\n\n def transpose_matrix(self, matrix):\n a = np.array(matrix)\n a = a.transpose()\n\n return a.tolist()\n\n def get_trailing_number(self, s):\n m = re.search(r'\\d+$', s)\n return int(m.group()) if m else None\n\n\nconverter = Converter()\nconverter.convert()\n","sub_path":"python/data_processing/Yanasse.py","file_name":"Yanasse.py","file_ext":"py","file_size_in_byte":6001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"37906203","text":"import setuptools\n\nVERSION = '0.0.1'\n\nsetuptools.setup(\n name='stashboard',\n version=VERSION,\n description='Stash Horizon Dashboard',\n author='Rackspace',\n author_email='stash-devs@rackspace.com',\n packages=setuptools.find_packages(exclude=['test']),\n include_package_data=True,\n install_requires=[\n 'python-troveclient',\n ],\n classifiers=[],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"446106246","text":"from lxml import html\nimport requests\nimport json\n\ndef main(picktick, shipper, tracking_number):\n \n request_url = 'https://my.yrc.com/dynamic/national/servlet'\n\n def parameter_builder(tracking_id):\n form_data = {\n 'CONTROLLER' : 'com.rdwy.ec.rextracking.http.controller.ProcessPublicTrackingController',\n 'type' : '0',\n 'pro0' : tracking_id,\n }\n return form_data\n \n form_data = parameter_builder(tracking_number)\n r = requests.post(request_url, params=form_data)\n order_data = html.fromstring(r.content)\n date_a = order_data.xpath(\"//tr[@class='rowodd']/td[3]/text()\")\n date_b = order_data.xpath(\"//tr[@class='rowodd']/td[4]/text()\")\n \n inner_counter = 0\n for dat_point in date_a:\n x_dat_point = dat_point.replace(u'\\xa0',u'')\n t_dat_point = x_dat_point.replace(u'\\t',u'')\n n_dat_point = t_dat_point.replace(u'\\n',u'')\n date_a[inner_counter] = n_dat_point\n inner_counter += 1\n \n inner_counter = 0\n for dat_point in date_b:\n x_dat_point = dat_point.replace(u'\\xa0',u'')\n t_dat_point = x_dat_point.replace(u'\\t',u'')\n n_dat_point = t_dat_point.replace(u'\\n',u'')\n date_b[inner_counter] = n_dat_point\n inner_counter += 1\n \n try: \n new_file[picktick] = [tracking_number, date_a[0], date_b[0], shipper]\n except:\n new_file[picktick] = [tracking_number, 'No Data', 'No Data', shipper]\n \n return new_file\n","sub_path":"sales-cutoff/YRC_Roadway/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"250441279","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 17 23:09:37 2019\r\n\r\n@author: Sajal\r\n\"\"\"\r\n\r\nimport numpy as nm\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\ndataset = pd.read_csv('Social_Network_Ads.csv')\r\n#Dividing in X and Y\r\nX = dataset.iloc[:, [2,3]].values\r\nY = dataset.iloc[:, 4].values\r\n\r\n#Splitting the dataset into the Training set and Test set\r\nfrom sklearn.cross_validation import train_test_split\r\nxTrain, xTest, yTrain, yTest = train_test_split(X, Y, test_size = 0.25, random_state = 0)\r\n\r\n#Feature Scaling\r\nfrom sklearn.preprocessing import StandardScaler #class import\r\nsc = StandardScaler() # create object \r\nxTrain = sc.fit_transform(xTrain)\r\nxTest = sc.fit_transform(xTest)\r\n\r\n#fitting KNN to the training set\r\nfrom sklearn.tree import DecisionTreeClassifier #importing class\r\nclassifier = DecisionTreeClassifier( criterion = 'entropy', random_state = 0 ) #create object \r\nclassifier.fit(xTrain,yTrain)\r\n \r\n#predicting the Test set result\r\nyPredict = classifier.predict(xTest) \r\n\r\n#Making the confusion matrix \r\nfrom sklearn.metrics import confusion_matrix #import function\r\ncm = confusion_matrix(yTest, yPredict)\r\n\r\n\r\n#visualizing the training set results\r\nfrom matplotlib.colors import ListedColormap\r\nX_set, y_set = xTrain, yTrain\r\nX1, X2 = nm.meshgrid(nm.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\r\n nm.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\r\nplt.contourf(X1, X2, classifier.predict(nm.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\r\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\r\nplt.xlim(X1.min(), X1.max())\r\nplt.ylim(X2.min(), X2.max())\r\nfor i, j in enumerate(nm.unique(y_set)):\r\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\r\n c = ListedColormap(('red', 'green'))(i), label = j)\r\nplt.title('K Nearest Neighbour (Training set)')\r\nplt.xlabel('Age')\r\nplt.ylabel('Estimated Salary')\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n#Visualizing the test set result\r\nfrom matplotlib.colors import ListedColormap\r\nX_set, y_set = xTest, yTest\r\nX1, X2 = nm.meshgrid(nm.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\r\n nm.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\r\nplt.contourf(X1, X2, classifier.predict(nm.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\r\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\r\nplt.xlim(X1.min(), X1.max())\r\nplt.ylim(X2.min(), X2.max())\r\nfor i, j in enumerate(nm.unique(y_set)):\r\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\r\n c = ListedColormap(('red', 'green'))(i), label = j)\r\nplt.title('K Nearest Neighbour (Test set)')\r\nplt.xlabel('Age')\r\nplt.ylabel('Estimated Salary')\r\nplt.legend()\r\nplt.show()\r\n\r\n","sub_path":"Decission Tree/Decission Tree/Decision Tree classification.py","file_name":"Decision Tree classification.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"476011348","text":"#!/usr/bin/python3\n\nimport unittest\n\nimport reachability\n\nclass SolverSolveTestCase(unittest.TestCase):\n\n def setUp(self):\n self.solver = reachability.Solver()\n self.solver._input = self.generate_input\n self.solver._output = self.accumulate_output\n self.output_list = []\n\n def tearDown(self):\n pass\n\n def generate_input(self):\n return self.input_list.pop(0)\n\n def accumulate_output(self, text):\n return self.output_list.append(text)\n\n def test_one_component(self):\n self.input_list = [\n '4 4',\n '1 2',\n '3 2',\n '4 3',\n '1 4',\n '1 4',\n ]\n expected_result = [\n 1\n ]\n\n self.solver.solve()\n\n self.assertEqual(expected_result, self.output_list)\n\n def test_two_components(self):\n self.input_list = [\n '4 2',\n '1 2',\n '3 2',\n '1 4',\n ]\n expected_result = [\n 0\n ]\n\n self.solver.solve()\n\n self.assertEqual(expected_result, self.output_list)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"course3-graphs/assignments/assignment_001_reachability/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"2666517","text":"import gym\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport torch.multiprocessing as mp\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch.optim as optim\nimport os\nimport random\nimport matplotlib.pyplot as plt\n\n# It seems that this line can speed up the process.\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n# some parameters\nMAX_EPISODES = 200\nMAX_STEPS = 200\nenv = gym.make('Pendulum-v0')\nS_DIM = env.observation_space.shape[0]\nA_DIM = env.action_space.shape[0]\nA_MAX = env.action_space.high[0]\n\n\n# return a noise\n# Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab\nclass OrnsteinUhlenbeckActionNoise:\n\n\tdef __init__(self, mu = 0, theta = 0.15, sigma = 0.2):\n\t\tself.action_dim = A_DIM\n\t\tself.mu = mu\n\t\tself.theta = theta\n\t\tself.sigma = sigma\n\t\tself.X = np.ones(self.action_dim) * self.mu\n\n\tdef reset(self):\n\t\tself.X = np.ones(self.action_dim) * self.mu\n\n\tdef sample(self):\n\t\tdx = self.theta * (self.mu - self.X)\n\t\tdx = dx + self.sigma * np.random.randn(len(self.X))\n\t\tself.X = self.X + dx\n\t\treturn self.X\n\n\n# memory to store past transitions\nclass Memory:\n\n\tdef __init__(self):\n\t\tself.memory = []\n\t\tself.len = 0\n\t\tself.capacity = 10000\n\t\tself.batch_size = 32\n\n\t# get a batch of transitions from memory\n\tdef sample(self):\n\t\tbatch = random.sample(self.memory, self.batch_size)\n\t\t[s, a, r, s_] = zip(*batch)\n\t\ts = torch.Tensor(s)\n\t\ta = torch.Tensor(a)\n\t\tr = torch.Tensor(r)\n\t\ts_ = torch.Tensor(s_)\n\t\treturn s, a, r, s_\n\n\t# push a transition into memory\n\tdef push(self, s, a, r, s1):\n\t\ttransition = (s,a,r,s1)\t\n\t\tif self.len >= self.capacity:\n\t\t\tself.len = 0\n\t\t\tself.memory[self.len] = transition\n\t\telse:\n\t\t\tself.len += 1\n\t\t\tself.memory.append(transition)\n\n\nclass Critic(nn.Module):\n\n\tdef __init__(self):\n\t\tsuper(Critic, self).__init__()\n\t\tself.state_dim = S_DIM\n\t\tself.action_dim = A_DIM\n\n\t\tself.fcs1 = nn.Linear(self.state_dim,64)\n\t\tself.fcs2 = nn.Linear(64,32)\n\t\tself.fca1 = nn.Linear(self.action_dim,32)\n\t\tself.fc2 = nn.Linear(64,32)\n\t\tself.fc3 = nn.Linear(32,1)\n\n\tdef forward(self, state, action):\n\t\ts1 = F.relu(self.fcs1(state))\n\t\ts2 = F.relu(self.fcs2(s1))\n\t\ta1 = F.relu(self.fca1(action))\n\t\tx = torch.cat((s2,a1),dim=1)\n\t\tx = F.relu(self.fc2(x))\n\t\tx = self.fc3(x)\n\t\treturn x\n\n\nclass Actor(nn.Module):\n\n\tdef __init__(self):\n\t\tsuper(Actor, self).__init__()\n\t\tself.state_dim = S_DIM\n\t\tself.action_dim = A_DIM\n\t\tself.action_lim = A_MAX\n\n\t\tself.fc1 = nn.Linear(self.state_dim,64)\n\t\tself.fc2 = nn.Linear(64,32)\n\t\tself.fc3 = nn.Linear(32,self.action_dim)\n\n\tdef forward(self, state):\n\t\tx = F.relu(self.fc1(state))\n\t\tx = F.relu(self.fc2(x))\n\t\taction = torch.tanh(self.fc3(x))\n\t\taction = action * self.action_lim\n\t\treturn action\n\n\nclass Agent:\n\n\tdef __init__(self, memory):\n\t\t# some parameters\n\t\tself.state_dim = S_DIM\n\t\tself.action_dim = A_DIM\n\t\tself.action_lim = A_MAX\n\t\tself.tau = 0.001\n\t\tself.lr = 0.001\n\t\tself.gamma = 0.99\n\t\tself.memory = memory\n\n\t\tself.noise = OrnsteinUhlenbeckActionNoise()\n\n\t\tself.actor = Actor()\n\t\tself.target_actor = Actor()\n\t\tself.actor_optimizer = optim.Adam(self.actor.parameters(),lr=self.lr)\n\n\t\tself.critic = Critic()\n\t\tself.target_critic = Critic()\n\t\tself.critic_optimizer = optim.Adam(self.critic.parameters(),lr=self.lr)\n\n\t\t# copy parameters from actor to target_actor\n\t\tself.hard_update(self.target_actor, self.actor)\n\t\t# copy parameters from critic to target_critic\n\t\tself.hard_update(self.target_critic, self.critic)\n\n\t# get an action with noise\n\tdef get_exploration_action(self, state):\n\t\tstate = torch.Tensor(state)\n\t\taction = self.actor.forward(state).detach()\n\t\tnew_action = action + torch.Tensor(self.noise.sample() * self.action_lim)\n\t\treturn new_action.numpy()\n\n\t# copy parameters from source network to target network\n\tdef hard_update(self, target, source):\n\t\tfor target_param, param in zip(target.parameters(), source.parameters()):\n\t\t\ttarget_param.data.copy_(param.data)\n\t\n\t# target <= tau * source + (1 - tau) * target\n\tdef soft_update(self, target, source):\n\t\tfor target_param, param in zip(target.parameters(), source.parameters()):\n\t\t\ttarget_param.data.copy_(target_param.data * (1.0 - self.tau) + param.data * self.tau)\n\n\t# update neural network\n\tdef learn(self):\n\t\t# get a batch of transition from memory\n\t\ts1,a1,r1,s2 = self.memory.sample()\n\t\ts1 = Variable(s1)\n\t\ta1 = Variable(a1)\n\t\tr1 = Variable(r1)\n\t\ts2 = Variable(s2)\n\n\t\t# optimize critic net\n\t\t# a2 = mu'(s2)\n\t\ta2 = self.target_actor.forward(s2).detach()\n\t\t# next_val = Q'(s2, a2)\n\t\tnext_val = self.target_critic.forward(s2, a2).detach().view(-1)\n\t\t# y = r + gamma * Q'(s2, a2)\n\t\ty_expected = r1 + self.gamma * next_val\n\t\t# y_predicted = Q(s1, a1)\n\t\ty_predicted = self.critic.forward(s1, a1).view(-1)\n\t\t\n\t\tloss_critic = F.smooth_l1_loss(y_predicted, y_expected)\n\t\t\n\t\tself.critic_optimizer.zero_grad()\n\t\tloss_critic.backward()\n\t\tself.critic_optimizer.step()\n\n\t\t# optimize actor net\n\t\tpred_a1 = self.actor.forward(s1)\n\t\tloss_actor = -1 * torch.sum(self.critic.forward(s1, pred_a1))\n\t\tself.actor_optimizer.zero_grad()\n\t\tloss_actor.backward()\n\t\tself.actor_optimizer.step()\n\n\t\t# target <= tau * source + (1 - tau) * target\n\t\tself.soft_update(self.target_actor, self.actor)\n\t\tself.soft_update(self.target_critic, self.critic)\n\n\n\ndef main():\n\t# initialize\n\tmemory = Memory()\n\tagent = Agent(memory)\n\tres = []\n\tavg_res = []\n\tfor episodes in range(MAX_EPISODES):\n\t\tstate = env.reset()\n\t\tep_r = 0\n\t\tfor steps in range(MAX_STEPS):\n\t\t\t# show 1 episode every 10 episodes\n\t\t\tif episodes % 10 == 0:\n\t\t\t\tenv.render()\n\t\t\t# get action\n\t\t\taction = agent.get_exploration_action(state)\n\t\t\t# take action\n\t\t\tnext_state, reward, _, _ = env.step(action)\n\t\t\t# push transition into memory\n\t\t\tmemory.push(state, action, reward, next_state)\n\t\t\t# start learn after 2 episodes\n\t\t\t# to fill memory at the beginning\n\t\t\tif episodes > 2:\n\t\t\t\tagent.learn()\n\n\t\t\tep_r += reward\n\t\t\tstate = next_state\n\t\t\n\t\tres.append(ep_r)\n\t\t# avg_res is average reward of the last 10 episodes approximately\n\t\tif avg_res:\n\t\t\tavg_res.append(avg_res[-1] * 0.9 + ep_r * 0.1)\n\t\telse:\n\t\t\tavg_res.append(ep_r)\n\t\tprint(\"Ep:\", episodes, \"| Ep_r: %d\" % ep_r)\n\t# draw chart\n\tplt.ion()\n\tplt.figure()\n\tplt.plot(res)\n\tplt.plot(avg_res)\n\tplt.ylabel(\"ep reward\")\n\tplt.xlabel(\"ep\")\n\tplt.ioff()\n\tplt.show()\n\n\nif __name__ == \"__main__\":\n\tp = mp.Process(target=main)\n\tp.start()\n\tp.join()","sub_path":"Assignment-5-A3C-DDPG/DDPG.py","file_name":"DDPG.py","file_ext":"py","file_size_in_byte":6299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"501658156","text":"import re\nn = int(input())\n\nfor i in range(n):\n text = input()\n pattern = r\"\\|([A-Z]{4,})\\|:#([A-Za-z]+\\s[A-Za-z]+)#\"\n match = re.match(pattern, text)\n\n if match is None:\n print(\"Access denied!\")\n continue\n\n name = match[1]\n title = match[2]\n\n print(f'{name}, The {title}')\n print(f'>> Strength: {len(name)}')\n print(f'>> Armour: {len(title)}')","sub_path":"08. Exams/Final Exam Retake - 13 December 2019/02. Boss Rush.py","file_name":"02. Boss Rush.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"105402453","text":"class LinkedList:\n \"\"\"Defines a Singly Linked List.\n\n attributes: head\n \"\"\"\n \n def __init__(self):\n \"\"\"Create a new list with a Sentinel Node\"\"\"\n self.head = ListNode()\n\n def insert(self,x,p):\n \"\"\"Insert element x in the position after p\"\"\"\n temp = ListNode()\n temp.value = x\n temp.next = p.next\n p.next = temp\n\n def delete(self,p):\n \"\"\"Delete the node following node p in the linked list.\"\"\"\n if p.next != None:\n p.next = p.next.next\n else:\n print(\"No element after present after this node.\")\n\n def print(self):\n \"\"\" Print all the elements of a list in a row.\"\"\"\n temp = self.head.next\n toret = []\n while True:\n toret.append(temp.value)\n if temp.next == None:\n break\n temp = temp.next\n return toret\n\n def insertAtIndex(self,x,i):\n \"\"\"Insert value x at list position i. (The position of the first element is taken to be 0.)\"\"\"\n temp = self.head\n track = 0\n while True:\n if track == i:\n newTemp = ListNode()\n newTemp.value = x\n newTemp.next = temp.next\n temp.next = newTemp\n break\n track = track + 1\n if temp.next == None:\n print(\"Not enough elements in the list.\")\n break\n temp = temp.next\n\n def search(self, x):\n \"\"\"Search for value x in the list. Return a reference to the first node with value x; return None if no such node is found.\"\"\"\n temp = self.head\n while True:\n if temp.value == x:\n return temp\n if temp.next == None:\n return None\n temp = temp.next\n\n def isExisting(self, x):\n \"\"\"Search for value x in the list. Return a reference to the first node with value x; return None if no such node is found.\"\"\"\n temp = self.head\n while True:\n if temp.value == x:\n return True\n if temp.next == None:\n return False\n temp = temp.next\n\n def len(self):\n \"\"\"Return the length (the number of elements) in the Linked List.\"\"\"\n temp = self.head\n track = 0\n while temp.next != None:\n track = track + 1\n temp = temp.next\n return track\n\n def isEmpty(self):\n \"\"\"Return True if the Linked List has no elements, False otherwise.\"\"\"\n if self.head.next == None:\n return True\n return False\n\n def reverse(self):\n current = self.head.next\n prev = None\n while current != None:\n next_one = current.next\n current.next = prev\n prev = current\n current = next_one\n self.head.next = prev\n\nclass ListNode:\n \"\"\"Represents a node of a Singly Linked List.\n\n attributes: value, next. \n \"\"\"\n def __init__(self,k=\"\",val=None,nxt=None):\n self.key=k\n self.value=val\n self.next=nxt\n\nclass HashTable:\n def __init__(self, leng=30):\n self.length = leng\n self.T = [None for i in range(self.length)]\n\n def insert(self, strr):\n insertKey = hashFunc(strr, self.length)\n if self.T[insertKey] == None:\n self.T[insertKey] = LinkedList()\n index = self.T[insertKey].len()\n self.T[insertKey].insertAtIndex(strr, index)\n\n def keys(self):\n positions = []\n for i in range(len(self.T)):\n if self.T[i] != None:\n positions.append(i)\n return positions\n\n def search(self, strr):\n searchKey = hashFunc(strr, self.length)\n if self.T[searchKey] != None:\n return self.T[searchKey].isExisting(strr)\n return False\n\ndef hashFunc(string, leng):\n summ = 0\n for i in string:\n summ += ord(i)\n return (summ % leng)\n\ndef main():\n f = open('small.dict')\n words = f.readlines()\n words = [line[:-1] for line in words]\n dictionary = HashTable()\n for a in words:\n dictionary.insert(a)\n w = input(\"Enter a word to check its validity: \")\n if dictionary.search(w):\n print(\"Valid word.\")\n else:\n print(\"Not valid.\")\n\nif __name__ == '__main__':\n main()","sub_path":"IT204 - Data Structures And Algorithms Lab/lab4/2a.py","file_name":"2a.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"478687763","text":"from utilities.logging import Logging\nfrom music.song import Song\n\n\nclass Playlist:\n \"Music Playlist class\"\n\n def __init__(self, utilities, ytmusic, dbh, info=None):\n self.u = utilities # Utilities Object\n self.ytm = ytmusic # youtube Music API object\n self.dbh = dbh # database handle\n self.id = None # pk from database\n\n self.name = None\n self.rating = 0\n self.description = None\n self.track_count = 0\n self.songs = []\n self.yt_id = None\n if info:\n if 'name' in info:\n self.name = info['name']\n if 'description' in info:\n self.description = info['description']\n if 'trackCount' in info:\n self.track_count = int(info['trackCount'])\n if 'songs' in info:\n self.songs = info['songs']\n if 'rating' in info:\n self.rating = int(info['rating'])\n\n def print_attributes(self):\n self.u.log('Playlist:')\n self.u.log(' Name : {}'.format(self.name))\n self.u.log(' Rating : {}'.format(self.rating))\n self.u.log(' Track Count: {}'.format(self.track_count))\n\n def get_songs_from_youtube_playlist_id(self):\n if not self.yt_id:\n self.u.log('No yt id set')\n return\n \n pl = self.ytm.get_playlist(self.yt_id, limit=(self.track_count + 1))\n for track in pl['tracks']:\n s = Song(self.u,self.ytm,self.dbh)\n s.load_song_from_youtube(track)\n self.songs.append(s)\n\n\n def load_playlist_from_youtube(self, youtube_playlist):\n # self.u.pprintd(youtube_playlist)\n\n if 'id' in youtube_playlist:\n self.yt_id = youtube_playlist['id']\n elif 'playlistId' in youtube_playlist:\n self.yt_id = youtube_playlist['playlistId']\n if 'title' in youtube_playlist:\n self.name = youtube_playlist['title']\n if 'description' in youtube_playlist:\n self.description = youtube_playlist['description']\n if 'trackCount' in youtube_playlist:\n self.track_count = int(youtube_playlist['trackCount'])\n elif 'count' in youtube_playlist:\n self.track_count = int(youtube_playlist['count'])\n if 'tracks' in youtube_playlist:\n self.songs = youtube_playlist['tracks']\n else:\n # get songs from pl\n self.get_songs_from_youtube_playlist_id()\n self.print_attributes()\n\n def save(self):\n # save items in the playlist to db\n self.u.debug('Saving playlist: {}'.format(self.name))\n # iterate songs and save\n for song in self.songs:\n self.u.debug('Saving song: {}'.format(song.title))\n song.save()\n","sub_path":"music/playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"533499626","text":"import pexpect\nimport time \nfrom colors import *\n\nbulb = '44:A6:E5:1E:0D:F7'\n\n#red, green, blue, pink, purple, orange, yellow, lime, light blue, off, on\nnums = [\n RED,\n GREEN,\n BLUE,\n PINK,\n ORANGE,\n YELLOW,\n LIME,\n LIGHT_BLUE\n]\n\ngatt = pexpect.spawn('gatttool -I')\n\ngatt.sendline('connect ' + bulb)\ngatt.expect('Connection successful')\n\nwhile True :\n for i in nums :\n gatt.sendline('char-write-cmd 0x0025 ' + i)\n time.sleep(.01)\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"218471422","text":"import pandas as pd\nimport sys,os\nimport matplotlib.pyplot as plt\nimport argparse\n# insert at 1, 0 is the script path (or '' in REPL)\nsys.path.insert(1, '../')\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\nimport os\nimport numpy as np\nimport pandas as pd\nimport json\nfrom matplotlib.pyplot import figure\n\nparser = argparse.ArgumentParser(description='mAP comparisons')\n\nparser.add_argument('--compare_list', nargs='+', help='Experiments to compare')\nparser.add_argument('--dset_name', type=str, help='Dataset name, either coco or lvis',default='coco')\nparser.add_argument('--categories', nargs='+', help='Columns to compare',default=['all'])\nparser.add_argument('--rids', nargs='+', help='Columns to compare',default=[0])\nparser.add_argument('--metric_columns', nargs='+', help='Columns to compare',default=[0])\nargs = parser.parse_args()\nmetric_names=np.array(['AP','AP50','AP75','APs','APm','APl','AR1','AR10','AR100','ARs','ARm','ARl'])\ncwd=\"./\"\n\nif args.dset_name=='coco':\n validation_path = \"../../../../datasets/coco/annotations/instances_val2017.json\"\n num_categories=80\nvalidation_path=os.path.join(cwd,validation_path)\ncocoGt=COCO(validation_path)\nrids=[int(rid) for rid in args.rids]\nresults={}\nresults['columns'] = metric_names[args.metric_columns].tolist()\nfor counter,exp_name in enumerate(args.compare_list):\n resFile=f'../jsons/{args.dset_name}/{exp_name}/model_{rids[counter]}.json'\n cocoDt=cocoGt.loadRes(resFile)\n cocoDt.loadAnns()\n # running evaluation\n cocoEval = COCOeval(cocoGt,cocoDt,'bbox')\n if not args.categories:\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n temp={}\n temp[0]=[np.array(cocoEval.stats)[args.metric_columns]].tolist()\n results[exp_name] = temp\n elif args.categories[0] == 'all':\n categories= [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]\n temp={}\n for i in results['columns']:\n temp[i]=[]\n for cat in categories:\n cocoEval.params.catIds = [cat]\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n for k,i in enumerate(results['columns']):\n temp[i].append(cocoEval.stats[k])\n results[exp_name]=temp\n else:\n categories= args.categories\n temp={}\n for i in results['columns']:\n temp[i]=[]\n for cat in categories:\n cocoEval.params.catIds = [cat]\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n for k,i in enumerate(results['columns']):\n temp[i].append(cocoEval.stats[k])\n results[exp_name]=temp\n\npath2save='./mAP_viz/'+\"_\".join(args.compare_list)\nif not os.path.exists(path2save):\n os.mkdir(path2save)\n\nprint(results)\njson.dump(results, open(os.path.join(path2save,\"results.json\"), 'w' ) )\ndf = pd.read_csv('../coco_files/idf.csv')\ncoco_names=[n.rstrip() for n in open(\"../coco_files/coco.names\").readlines()]\nindices=df['idf_weights'].sort_values().index.values\ncoco_names=np.array(coco_names)[indices]\n\ndifference = 0\nfor mc in results['columns']:\n for exp_name in args.compare_list:\n difference = np.array(results[exp_name][mc]) - difference\n figure(num=None, figsize=(20, 10), dpi=80, facecolor='w', edgecolor='k')\n plt.xticks(rotation=90)\n plt.bar(coco_names,difference[indices])\n plt.savefig(os.path.join(path2save,mc+'.png'))\n plt.close()\n \n \n \n \n\n\n\n \n \n\n\n","sub_path":"torchvision_models/notebooks/get_map.py","file_name":"get_map.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"240021335","text":"def numberOfConnections(gridOfNodes):\n # time complexity O(n^2)\n connections = 0\n if len(gridOfNodes) <= 1:\n return connections\n\n prev_nodes_sum = sum([x for x in gridOfNodes[0] if x == 1])\n for row in gridOfNodes[1:]:\n curr_nodes_sum = sum([x for x in row if x == 1])\n if curr_nodes_sum > 0:\n connections += prev_nodes_sum * curr_nodes_sum\n prev_nodes_sum = curr_nodes_sum\n return connections\n\n\nif __name__ == \"__main__\":\n grid1 = [[1, 0, 1, 1], [0, 1, 1, 0], [0, 0, 0, 0], [0, 1, 0, 0]]\n print(numberOfConnections(grid1)) # 8","sub_path":"hackerrank/messagebird_test/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"14507205","text":"# -*- coding:utf-8 -*-\nimport re\n\ndef main(f_name, target):\n pattern = re.compile(\"^\"+target)\n with open(f_name,\"r\",encoding=\"utf-8\") as f:\n for line in f:\n if pattern.match(line):\n print(line)\n \nif __name__ == \"__main__\":\n main(\"README.md\", \"##\")","sub_path":"03_find_pattern/my_python_ex3.py","file_name":"my_python_ex3.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"214087108","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nu'''\nпроверить, содержит ли число равные количества нулей и единиц в двоичном представлении\n1234567\n'''\n\nfrom __future__ import print_function\nfrom sys import stdin, stdout\nfrom string import *\nfrom numpy import argmax, argwhere, amax\n\ndef f(s):\n zcount = 0\n ucount = 0\n for c in s:\n if c == '1': ucount += 1\n if c == '0': zcount += 1\n if ucount == zcount:\n return True\n else:\n return False\n\nwords = stdin.read().strip().split()\n\nfor word in words:\n if word[0] in '+-':\n word = word[3:]\n else:\n word = word[2:]\n print('True' if f(word) else 'False')\n\n\n","sub_path":"contlab12/33/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"269170120","text":"# -*- coding: utf-8 -*-\n\n# Creer une fonction pour afficher les valeurs de la suite de fibonacci\nimport time\n\nlist_fibo = [0,1]\n\ndef fibonacci(terme):\n\n a = 0\n b = 1\n c = 0\n\n if terme == 0:\n return 0\n elif terme == 1:\n return 1\n else:\n # ceci est la façon itératif d'afficher la liste\n \n for i in range(1,terme):\n c = a + b\n a=b\n b=c\n list_fibo.append(c)\n\n return list_fibo\n \n # ceci est la façon recursif pour afficher juste une valeur de la suite de fibonacci.\n # On appelle une fonction recursive quand elle s'appelle elle même\n # la fonction s'auto-appelle jusqu'a atteindre les plus petite valeur ( càd 0 ou 1 ) comme un arbre si on ce le dessine puis les additionnes toutes.\n # return fibonacci(terme-1)+fibonacci(terme-2)\n\n\n\n# test de performence avec time dans certain cas l'itératif ou le recursif peut plus performant\n\nt0 = time.time()\nprint(fibonacci(7))\nt1 = time.time()\n\nprint(\"Elapsed time: {0}\".format(t1-t0))","sub_path":"Algo/17.fibonacci.py","file_name":"17.fibonacci.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"339801242","text":"# Reference: https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html\n\nfrom __future__ import unicode_literals, print_function, division\nfrom io import open\nimport unicodedata\nimport string\nimport re\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n################################################################################\n\nSOS_token = 0\nEOS_token = 1\n\nclass Lang:\n def __init__ (self, name):\n self.name = name\n self.word2count = {}\n self.word2index = {}\n self.index2word = {SOS_token: \"SOS\", EOS_token: \"EOS\"}\n self.n_words = 2 # include SOS and EOS\n\n def add_sentence (self, sentence):\n for word in sentence.split(' '):\n self.add_word(word)\n\n def add_word (self, word):\n if word not in self.word2index:\n self.word2count[word] = 1\n self.word2index[word] = self.n_words\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\ndef normalize_string (s):\n def unicode_to_ascii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n )\n s = unicode_to_ascii(s.lower().strip())\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n return s\n\ndef read_langs(lang1, lang2, reverse=False):\n lines = open('%s-%s.txt' % (lang1, lang2), encoding='utf-8').read().strip().split('\\n')\n pairs = [[normalize_string(s) for s in line.split('\\t')] for line in lines]\n\n if reverse:\n pairs = [list(reversed(p)) for p in pairs]\n input_lang = Lang(lang2)\n output_lang = Lang(lang1)\n else:\n input_lang = Lang(lang1)\n output_lang = Lang(lang2)\n\n return input_lang, output_lang, pairs\n\ndef filter (pair):\n MAX_LENGTH = 10\n en_prefixes = (\n \"i am \", \"i m \",\n \"he is\", \"he s \",\n \"she is\", \"she s\",\n \"you are\", \"you re \",\n \"we are\", \"we re \",\n \"they are\", \"they re \"\n )\n return pair[1].startswith(en_prefixes) and \\\n len(pair[0].split(' ')) < MAX_LENGTH and \\\n len(pair[1].split(' ')) < MAX_LENGTH\n\ndef load_data (lang1, lang2):\n print(\"Reading data...\")\n input_lang, output_lang, pairs = read_langs(lang1, lang2, reverse=True)\n print(\"Read %s sentence pairs\" % len(pairs))\n pairs = [pair for pair in pairs if filter(pair)]\n pairs = pairs[:10000]\n print(\"Trimmed to %s sentence pairs\" % len(pairs))\n random.shuffle(pairs)\n for pair in pairs:\n input_lang.add_sentence(pair[0])\n output_lang.add_sentence(pair[1])\n print(\"Total words: (%s %s) (%s %s)\" % (input_lang.name, input_lang.n_words, output_lang.name, output_lang.n_words))\n return input_lang, output_lang, pairs\n\ndef sentence_to_indices (sentence, lang):\n \"\"\"\n @param sentence (string)\n @param lang (Lang)\n @return indices (array(seq_len, 1))\n \"\"\"\n indices = [lang.word2index[word] for word in sentence.split(' ')]\n indices.append(EOS_token)\n return torch.tensor(indices, dtype=torch.long, device=device).unsqueeze(dim=1)\n\ndef indices_to_sentence (indices, lang):\n \"\"\"\n @param indices (array(seq_len, 1))\n @param lang (Lang)\n @return sentence (string)\n \"\"\"\n sentence = ''.join([(lang.index2word[indices[i,0].item()] + ' ') for i in range(indices.size(0))])\n return sentence\n\ninput_lang, output_lang, pairs = load_data('en', 'fr')\ninputs = [sentence_to_indices(pairs[i][0], input_lang) for i in range(len(pairs))]\ntargets = [sentence_to_indices(pairs[i][1], output_lang) for i in range(len(pairs))]\n\n################################################################################\n\nepochs = 5\nembedding_size = 300\nhidden_size = 256\nlearning_rate = 0.0001\nteacher_forcing_ratio = 1.0\n\nprint_every = 100\n\nclass EncoderGru (nn.Module):\n def __init__ (self, dict_size, embedding_size, hidden_size):\n super(EncoderGru, self).__init__()\n\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(dict_size, embedding_size)\n self.gru = nn.GRU(embedding_size, hidden_size)\n\n self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)\n\n def forward (self, input):\n \"\"\"\n @param input (seq_len, batch_size) dtype=long\n @return output (seq_len, batch_size, hidden_size)\n \"\"\"\n self.hidden = torch.zeros(1, 1, self.hidden_size, device=device)\n embedded = self.embedding(input) # embedded (tensor(seq_len, batch_size, embedding_size))\n output, self.hidden = self.gru(embedded, self.hidden) # output (tensor(seq_len, batch_size, hidden_size))\n return output\n\nclass DecoderGru (nn.Module):\n def __init__ (self, dict_size, embedding_size, hidden_size):\n super(DecoderGru, self).__init__()\n\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(dict_size, embedding_size)\n self.gru = nn.GRU(embedding_size, hidden_size)\n self.out = nn.Linear(hidden_size, dict_size)\n\n self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)\n self.loss_func = nn.NLLLoss()\n\n def forward (self, input, prev_hidden, encoder_output=None):\n \"\"\"\n @param input (1, batch_size) dtype=long\n @param prev_hidden (1, batch_size, hidden_size)\n @param encoder_output (seq_len, batch_size, hidden_size)\n @return output (1, batch_size, dict_size)\n @return self.hidden (1, batch_size, hidden_size)\n \"\"\"\n embedded = self.embedding(input) # embedded (tensor(1, batch_size, embedding_size))\n _, self.hidden = self.gru(embedded, prev_hidden)\n output = F.log_softmax(self.out(self.hidden), dim=2)\n return output, self.hidden\n\nclass DecoderGruAttn (nn.Module):\n def __init__ (self, dict_size, embedding_size, hidden_size):\n super(DecoderGruAttn, self).__init__()\n\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(dict_size, embedding_size)\n self.gru = nn.GRU(embedding_size, hidden_size)\n self.align = nn.Linear(hidden_size, hidden_size)\n self.attn = nn.Linear(hidden_size * 2, hidden_size)\n self.out = nn.Linear(hidden_size, dict_size)\n\n self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)\n self.loss_func = nn.NLLLoss()\n\n def forward (self, input, prev_hidden, encoder_output):\n \"\"\"\n @param input (1, batch_size) dtype=long\n @param prev_hidden (1, batch_size, hidden_size)\n @param encoder_output (seq_len, batch_size, hidden_size)\n @return output (1, batch_size, dict_size)\n @return self.hidden (1, batch_size, hidden_size)\n \"\"\"\n embedded = self.embedding(input) # embedded (tensor(1, batch_size, embedding_size))\n _, self.hidden = self.gru(embedded, prev_hidden)\n attn_scores = torch.bmm(self.hidden.t(), self.align(encoder_output).t().transpose(1,2)) # attn_weights (batch_size, 1, seq_len)\n attn_weights = F.softmax(attn_scores, dim=2) # attn_weights (batch_size, 1, seq_len)\n context = torch.bmm(attn_weights, encoder_output.t()).t() # context (1, batch_size, hidden_size)\n attnal = F.tanh(self.attn(torch.cat((context, self.hidden), dim=2))) # attentional (1, batch_size, hidden_size)\n output = F.log_softmax(self.out(attnal), dim=2) # output (1, batch_size, dict_size)\n return output, self.hidden\n\n# class DecoderGruLocalAttn (nn.Module):\n\ndef train (input, target, encoder, decoder):\n input_seq_len = input.size(0)\n target_seq_len = target.size(0)\n\n encoder.optimizer.zero_grad()\n decoder.optimizer.zero_grad()\n loss = 0.0\n\n encoder_output = encoder(input)\n decoder_input = torch.tensor([[SOS_token]], device=device)\n prev_hidden = encoder.hidden\n\n teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n if teacher_forcing:\n for i in range(target_seq_len):\n decoder_output, prev_hidden = decoder(decoder_input, prev_hidden, encoder_output)\n loss += decoder.loss_func(decoder_output.squeeze(0), target[i])\n decoder_input = torch.tensor([[target[i]]], device=device)\n else:\n for i in range(target_seq_len):\n decoder_output, prev_hidden = decoder(decoder_input, prev_hidden, encoder_output)\n loss += decoder.loss_func(decoder_output.squeeze(0), target[i])\n topv, topi = decoder_output.topk(1)\n decoder_input = torch.tensor([[topi.detach().item()]], device=device)\n if decoder_input.item() == EOS_token:\n break\n\n loss.backward()\n decoder.optimizer.step()\n encoder.optimizer.step()\n loss = loss.item() / target_seq_len\n return loss\n\ndef train_all (inputs, targets, encoder, decoder, epochs):\n \"\"\"\n @param inputs ([array(seq_len, 1) dtype=long])\n @param targets ([array(seq_len, 1) dtype=long])\n \"\"\"\n examples = len(inputs)\n for epoch in range(epochs):\n print('Epoch %s' % epoch)\n loss_sum = 0.0\n for example in range(examples):\n input = inputs[example]\n target = targets[example]\n loss = train(input, target, encoder, decoder)\n loss_sum += loss\n\n if example % print_every == print_every - 1:\n loss_avg = loss_sum / print_every\n print('Loss %s' % loss_avg)\n loss_sum = 0.0\n\ndef eval (input, target, encoder, decoder):\n output = []\n with torch.no_grad():\n input_seq_len = input.size(0)\n target_seq_len = target.size(0)\n\n encoder_output = encoder(input)\n decoder_input = torch.tensor([[SOS_token]], device=device)\n prev_hidden = encoder.hidden\n\n for i in range(20):\n decoder_output, prev_hidden = decoder(decoder_input, prev_hidden, encoder_output)\n topv, topi = decoder_output.topk(1)\n decoder_input = torch.tensor([[topi.detach().item()]], device=device)\n output.append([topi.detach().item()])\n if decoder_input.item() == EOS_token:\n break\n output = torch.tensor(output, dtype=torch.long)\n return output\n\ndef eval_some (inputs, targets, encoder, decoder, n):\n for i in range(n):\n index = random.randint(0, len(pairs)-1)\n input = inputs[index]\n target = targets[index]\n output = eval(input, target, encoder, decoder)\n print('< ' + indices_to_sentence(input, input_lang))\n print('= ' + indices_to_sentence(target, output_lang))\n print('> ' + indices_to_sentence(output, output_lang))\n print('')\n\nencoder = EncoderGru(input_lang.n_words, embedding_size=embedding_size, hidden_size=hidden_size).to(device)\ndecoder = DecoderGruAttn(output_lang.n_words, embedding_size=embedding_size, hidden_size=hidden_size).to(device)\ntrain_all(inputs, targets, encoder, decoder, epochs=epochs)\neval_some(inputs, targets, encoder, decoder, 10)\n","sub_path":"ml-nmt/fr-en-2.py","file_name":"fr-en-2.py","file_ext":"py","file_size_in_byte":11194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"545351161","text":"from tkinter import *\nfrom tkinter import ttk \nimport tkinter as tk\nimport simplex\nfrom tkinter import messagebox\nfrom locale import _strip_padding\nfrom distutils.dist import command_re\nroot=Tk()\n\nr=0\nc=0\n\ndef wypiszTablice(tab):\n print(\"-----------------\")\n for row in tab:\n print(row)\n print(\"-----------------\")\n\ndef fFile():\n num=[]\n num1=[]\n\n file = open(\"testowedane.txt\", \"r\")\n for line in file:\n fs = [float(f) for f in line.split(\",\")]\n num.append(fs)\n z=num[0][0]\n c=num[1][0] \n for x in range(2, len(num)):\n num1.append(num[x]) \n simplex.simplex(num1) \n messagebox.showinfo(\"Simplex\", \"Wczytano z testowedane.txt, zapisano result.txt\") \n \ndef hello(dawword,daw3,daw,daw2,b_in,a_in,self):\n daw4=[]\n for i in range(len(daw)):\n aaaa=daw[i].get()\n daw4.append(aaaa) \n daw.clear()\n for j in range(len(daw4)):\n bbbb=daw4[j]\n daw.append(bbbb)\n daw4.clear()\n for ii in range(len(daw2)):\n aa=daw2[ii].get()\n daw4.append(aa)\n daw2.clear()\n for jj in range(len(daw4)):\n bb=daw4[jj]\n daw2.append(bb)\n daw4.clear() \n daw3.append(daw)\n daw3.extend(split_list(daw2, b_in))\n \n for row in dawword:\n if row.get() == \"max\":\n for g in range(len(daw3[0])-a_in):\n daw3[0][g]=daw3[0][g]*-1\n \n if row.get() == \">=\":\n print(row)\n print(row.get())\n print(dawword.index(row))\n for g in range(len(daw3[1])-a_in):\n daw3[dawword.index(row)][g]=daw3[dawword.index(row)][g]*-1 \n \n simplex.simplex(daw3) \n daw.clear()\n daw2.clear()\n daw3.clear()\n dawword.clear() \n self.destroy()\n messagebox.showinfo(\"Simplex\", \"Wyniki zapisano w pliku result.txt\") \n \ndef split_list(alist, wanted_parts):\n length = len(alist)\n return [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts] \n for i in range(wanted_parts) ]\n\ndef disallow():\n window.bell()\n\ndef validate2(value_if_allowed,text):\n if text in \"0123456789.'0.0'\":\n return True\n else:\n disallow()\n return False \ndef validate(value_if_allowed,text):\n if text in '0123456789.':\n return True\n else:\n disallow()\n return False\ndef close_window(self): \n self.destroy() \n \ndef adder(*args): # adder()\n def wypiszTablice(tab):\n print(\"-----------------\")\n for row in tab:\n print(row)\n print(\"-----------------\")\n \n try: \n a_in=a.get()\n b_in=b.get()\n window1 = tk.Toplevel(root)\n vcmd2 = (window1.register(validate2),'%P', '%S')\n entry = {}\n entry2 = {}\n daw=[]\n daw2=[]\n daw3=[]\n dawword=[]\n label = {}\n i = 1\n j = 0\n items2 = [\"min\",\"max\"]\n\n \n lb1=Label(window1, text='Funkcja celu:')\n lb1.grid(row=0, column=1,sticky = W)\n cb1_val = StringVar()\n cb1 = ttk.Combobox(window1, textvariable=cb1_val, height=4)\n cb1['values'] = items2 \n cb1.grid(row=1,column=1) \n dawword.append(cb1_val)\n \n f=DoubleVar();\n f.set(0.0);\n daw.append(f);\n for index in range(a_in):\n e_val = DoubleVar()\n e = Entry(window1,textvariable=e_val,validate=\"key\", validatecommand=vcmd2,width=10)\n \n e.grid(sticky=(W,E))\n e.grid(row=1, column=index+2)\n entry[index] = e\n\n daw.append(e_val)\n \n lb = Label(window1, text='x'+str(i))\n lb.grid(row=2, column=index+2)\n label[index] = lb\n i += 1 \n for iii in range(a_in):\n daw.append(f)\n items = [\"<=\",\">=\",\"=\"]\n \n lb1=Label(window1, text='Ograniczenia:')\n lb1.grid(row=4, column=1,sticky = W) \n jedyn=DoubleVar()\n jedyn.set(1.0)\n l_j=0\n for index2 in range(b_in):\n cb_val = StringVar()\n cb = ttk.Combobox(window1, textvariable=cb_val, height=4)\n \n cb['values'] = items \n cb.grid(row=5+index2,column=1) \n dawword.append(cb_val)\n for index1 in range(a_in+1):\n e_val2 = DoubleVar()\n e1 = Entry(window1,textvariable=e_val2,validate=\"key\", validatecommand=vcmd2,width=10)\n e1.grid(sticky=(W,E))\n e1.grid(row=5+index2, column=index1+2)\n entry2[index1] = e1\n daw2.append(e_val2)\n lb = Label(window1, text='x'+str(index1))\n lb.grid(row=6+index2, column=index1+2)\n label[index1] = lb\n for jjj in range(a_in):\n if(jjj==l_j): \n daw2.append(jedyn)\n else:\n daw2.append(f)\n l_j+=1 \n ttk.Button(window1,text=\"START\",command=lambda: hello(dawword,daw3,daw,daw2,b_in,a_in,window1)).grid(row=6+b_in+1,\ncolumn=a_in+1,sticky=E) \n ttk.Button(window1,text=\"EXIT\",command=lambda: close_window(window1)).grid(row=6+b_in+1,column=a_in+2,sticky=E) \n except ValueError:\n pass\n\nroot.title(\"Simpex Python\") \nwindow=ttk.Frame(root,padding=\"12 12 12 12\") \nwindow.grid(column=0,row=0,sticky=(N,S))\nwindow.columnconfigure(0,weight=1) \nwindow.rowconfigure(0,weight=1) \n\n\n\ntxtin=StringVar() \ntxtout=StringVar() \na=IntVar() \nb=IntVar()\n\n\nvcmd = (window.register(validate),'%P', '%S')\ntxt_entry=ttk.Entry(window,width=7,textvariable=txtin)\n\nttk.Label(window,text=\"Podaj liczbe zmiennych w modelu:\").grid(row=1,\ncolumn=2,sticky=(W,E))\n\nttk.Label(window,text=\"zm = \").grid(row=2,column=1,sticky=E)\na_entry=ttk.Entry(window,width=3,validate=\"key\", validatecommand=vcmd,textvariable=a)\n\na_entry.grid(row=2,column=2,sticky=(W,E))\n\nttk.Label(window,text=\"Podaj liczbe ograniczen:\").grid(row=3,\ncolumn=2,sticky=(W,E))\n\nttk.Label(window, text=\"ogr = \").grid(row=4,column=1,sticky=E)\nb_entry=ttk.Entry(window,width=3,validate=\"key\", validatecommand=vcmd,textvariable=b)\nb_entry.grid(row=4,column=2,sticky=(W,E))\n\nttk.Button(window,text=\"OK\",command=adder).grid(row=6,\ncolumn=2,sticky=E) \nttk.Button(window,text=\"Z Pliku...\",command=fFile).grid(row=7,\ncolumn=2,sticky=E)\n\nroot.mainloop() \nroot.destroy() \n","sub_path":"WojciechMandziak/wykonywalny.py","file_name":"wykonywalny.py","file_ext":"py","file_size_in_byte":6499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"569618289","text":"from collections import OrderedDict\nfrom os import path\n\nimport cv2\n\nfrom cvbase.io import check_file_exist, mkdir_or_exist, scandir\nfrom cvbase.opencv import USE_OPENCV3\n\nif USE_OPENCV3:\n from cv2 import (CAP_PROP_FRAME_WIDTH, CAP_PROP_FRAME_HEIGHT, CAP_PROP_FPS,\n CAP_PROP_FRAME_COUNT, CAP_PROP_FOURCC,\n CAP_PROP_POS_FRAMES, VideoWriter_fourcc)\nelse:\n from cv2.cv import CV_CAP_PROP_FRAME_WIDTH as CAP_PROP_FRAME_WIDTH\n from cv2.cv import CV_CAP_PROP_FRAME_HEIGHT as CAP_PROP_FRAME_HEIGHT\n from cv2.cv import CV_CAP_PROP_FPS as CAP_PROP_FPS\n from cv2.cv import CV_CAP_PROP_FRAME_COUNT as CAP_PROP_FRAME_COUNT\n from cv2.cv import CV_CAP_PROP_FOURCC as CAP_PROP_FOURCC\n from cv2.cv import CV_CAP_PROP_POS_FRAMES as CAP_PROP_POS_FRAMES\n from cv2.cv import CV_FOURCC as VideoWriter_fourcc\n\n\nclass Cache(object):\n\n def __init__(self, capacity):\n self._cache = OrderedDict()\n self._capacity = int(capacity)\n if capacity <= 0:\n raise ValueError('capacity must be a positive integer')\n\n @property\n def capacity(self):\n return self._capacity\n\n @property\n def size(self):\n return len(self._cache)\n\n def put(self, key, val):\n if key in self._cache:\n return\n if len(self._cache) >= self.capacity:\n self._cache.popitem(last=False)\n self._cache[key] = val\n\n def get(self, key, default=None):\n val = self._cache[key] if key in self._cache else default\n return val\n\n\nclass VideoReader(object):\n\n def __init__(self, filename, cache_capacity=0):\n check_file_exist(filename, 'Video file not found: ' + filename)\n self._vcap = cv2.VideoCapture(filename)\n self._cache = Cache(cache_capacity) if cache_capacity > 0 else None\n self._position = 0\n # get basic info\n self._width = int(self._vcap.get(CAP_PROP_FRAME_WIDTH))\n self._height = int(self._vcap.get(CAP_PROP_FRAME_HEIGHT))\n self._fps = int(round(self._vcap.get(CAP_PROP_FPS)))\n self._frame_cnt = int(self._vcap.get(CAP_PROP_FRAME_COUNT))\n self._fourcc = self._vcap.get(CAP_PROP_FOURCC)\n\n @property\n def vcap(self):\n return self._vcap\n\n @property\n def width(self):\n return self._width\n\n @property\n def height(self):\n return self._height\n\n @property\n def fps(self):\n return self._fps\n\n @property\n def frame_cnt(self):\n return self._frame_cnt\n\n @property\n def fourcc(self):\n return self._fourcc\n\n @property\n def position(self):\n return self._position\n\n def _get_real_position(self):\n return int(round(self._vcap.get(CAP_PROP_POS_FRAMES)))\n\n def _set_real_position(self, frame_id):\n self._vcap.set(CAP_PROP_POS_FRAMES, frame_id)\n pos = self._get_real_position()\n for _ in range(frame_id - pos):\n self._vcap.read()\n self._position = frame_id\n\n def read(self):\n pos = self._position + 1\n if self._cache:\n img = self._cache.get(pos)\n if img:\n ret = True\n else:\n if self._position != self._get_real_position():\n self._set_real_position(self._position)\n ret, img = self._vcap.read()\n if ret:\n self._cache.put(pos, img)\n else:\n ret, img = self._vcap.read()\n if ret:\n self._position = pos\n return (ret, img)\n\n def get_frame(self, frame_id):\n if frame_id <= 0 or frame_id > self._frame_cnt:\n raise ValueError('frame_id must be between 1 and frame_cnt')\n if frame_id == self._position + 1:\n return self.read()\n if self._cache:\n img = self._cache.get(frame_id)\n if img:\n self._position = frame_id\n return (True, img)\n self._set_real_position(frame_id - 1)\n ret, img = self._vcap.read()\n if ret:\n self._position += 1\n if self._cache:\n self._cache.put(self._position, img)\n return (ret, img)\n\n def current_frame(self):\n if self._position == 0:\n return None\n return self._cache.get(self._position)\n\n def cvt2frames(self,\n frame_dir,\n file_start=0,\n filename_digit=6,\n ext='jpg',\n start=0,\n max_num=0,\n print_interval=0):\n mkdir_or_exist(frame_dir)\n if max_num == 0:\n task_num = self.frame_cnt - start\n else:\n task_num = min(self.frame_cnt - start, max_num)\n if task_num <= 0:\n raise ValueError('start must be less than total frame number')\n if start > 0:\n self._set_real_position(start)\n converted = 0\n while converted < task_num:\n ret, img = self.read()\n if not ret:\n break\n file_idx = converted + file_start\n filename = path.join(\n frame_dir,\n '{0:0{1}d}.{2}'.format(file_idx, filename_digit, ext))\n cv2.imwrite(filename, img)\n converted += 1\n if print_interval > 0 and converted % print_interval == 0:\n print(\n 'video2frame progress: {}/{}'.format(converted, task_num))\n\n def __iter__(self):\n self._set_real_position(0)\n return self\n\n def next(self):\n ret, img = self.read()\n if ret:\n return img\n else:\n raise StopIteration\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self._vcap.release()\n\n\ndef frames2video(frame_dir,\n video_file,\n fps=30,\n fourcc='XVID',\n filename_digit=6,\n ext='jpg',\n start=0,\n end=0):\n \"\"\"read the frame images from a directory and join them as a video\n \"\"\"\n if end == 0:\n max_idx = len([name for name in scandir(frame_dir, ext)]) - 1\n else:\n max_idx = end\n first_file = path.join(frame_dir,\n '{0:0{1}d}.{2}'.format(start, filename_digit, ext))\n check_file_exist(first_file, 'The start frame not found: ' + first_file)\n img = cv2.imread(first_file)\n height, width = img.shape[:2]\n vwriter = cv2.VideoWriter(video_file,\n VideoWriter_fourcc(*fourcc), fps,\n (width, height))\n idx = start\n while idx <= max_idx:\n filename = path.join(frame_dir,\n '{0:0{1}d}.{2}'.format(idx, filename_digit, ext))\n img = cv2.imread(filename)\n vwriter.write(img)\n idx += 1\n vwriter.release()\n","sub_path":"cvbase/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":6909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"95174012","text":"# -*- coding: utf-8 -*-\nimport sys\nsys.path.append(\"../..\")\nfrom Func import case_main\nfrom Conf import conf_dir\n\n'''\n 约车 car_book\n create: 2017.09.29 15:58 by liangm\n modify: by\n'''\n\n# ============== 接口自定义参数 ===============================\ninterface_name = \"car_book\" # 接口名称(取自cfg.ini)\ncase_name = interface_name # case 名称(取自data_m.ini)\nconf_dir = conf_dir.conf_dir\n#conf_dir = \"C:\\\\Users\\\\liangm\\\\PycharmProjects\\\\xingjia_interface\\\\script\\\\main_test\\\\Config\\\\\"\nconf_ini = conf_dir + \"cfg.ini\"\ndata_ini = conf_dir + \"data.ini\"\n#setdown_py = \"./init_reset/car_cancel.py\" # 清场,调用其它接口\n\n# ============== 接口测试执行 ===============================\ncase_runner = case_main.test_case(interface_name, case_name, data_ini, conf_ini)\n\ncase_runner.conf_global() # 获取全局配置\ncase_runner.run_main(2,\"post\") # 执行\n#case_runner.teardown_final(setdown_py) # 清场\n","sub_path":"xingjia_interface/script/main_test/Test_case/C-APP/car_book.py","file_name":"car_book.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"219488571","text":"#! /usr/bin/env python3\n\n#-\n# SPDX-License-Identifier: BSD-2-Clause\n#\n# Copyright (c) 2018 Alexandre Joannou\n# Copyright (c) 2019 Peter Rugg\n# All rights reserved.\n#\n# This software was developed by SRI International and the University of\n# Cambridge Computer Laboratory (Department of Computer Science and\n# Technology) under DARPA contract HR0011-18-C-0016 (\"ECATS\"), as part of the\n# DARPA SSITH research programme.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n#\n\nimport argparse\nimport os\nimport signal\nimport os.path as op\nimport subprocess as sub\nimport time\n\n################################\n# Parse command line arguments #\n################################################################################\n\ndef auto_int (x):\n return int(x,0)\n\ndef auto_pos_int (x):\n val = int(x,0)\n if val <= 0:\n raise argparse.ArgumentTypeError(\"argument must be a positive int. Got {:d}.\".format(val))\n return val\n\ndef auto_write_fd (fname):\n return open(fname, 'w')\n\nknown_rvfi_dii = set({'spike','rvbs','sail','piccolo','ibex','ariane','manual'})\nknown_vengine = set({'QCVEngine'})\nknown_architectures = set({'rv32i','rv32ic','rv64i','rv64ic','rv64g','rv64gc','rv32ixcheri','rv64ixcheri', 'rvxcheri'})\nknown_generators = set({'internal','sail','manual'})\n\nparser = argparse.ArgumentParser(description='Runs a TestRIG configuration')\n\n# model args\nparser.add_argument('-a', '--implementation-A', metavar='IMP', choices=known_rvfi_dii,\n default='rvbs',\n help=\"The implementation A to use. (one of {:s})\".format(str(known_rvfi_dii)))\nparser.add_argument('--implementation-A-port', metavar='PORT', type=auto_int, default=5000,\n help=\"The port to use for implementation A's rvfi-dii server\")\nparser.add_argument('--implementation-A-log', metavar='PATH',\n default=None, type=auto_write_fd,\n #nargs='?', const=sub.PIPE,\n help=\"Turn on logging for implementation A's rvfi-dii server (optionally specifying a file path)\")\n# implementation args\nparser.add_argument('-b', '--implementation-B', metavar='IMP', choices=known_rvfi_dii,\n default='spike',\n help=\"The implementation B to use. (one of {:s})\".format(str(known_rvfi_dii)))\nparser.add_argument('--implementation-B-port', metavar='PORT', type=auto_int, default=5001,\n help=\"The port to use for implementation B's rvfi-dii server\")\nparser.add_argument('--implementation-B-log', metavar='PATH',\n default=None, type=auto_write_fd,\n #nargs='?', const=sub.PIPE,\n help=\"Turn on logging for implementation B's rvfi-dii server (optionally specifying a file path)\")\n# verification engine args\nparser.add_argument('-e', '--verification-engine', metavar='VENG', choices=known_vengine,\n default='QCVEngine',\n help=\"The verification engine to use. (one of {:s})\".format(str(known_vengine)))\n# general configuration args\nparser.add_argument('-s', '--spawn-delay', metavar='DELAYSEC', default=1, type=auto_int,\n help=\"Specify a number of seconds to wait between server creation and verification engine startup.\")\nparser.add_argument('-v', '--verbose', action='count', default=0,\n help=\"Increase verbosity level by adding more \\\"v\\\".\")\nparser.add_argument('-S', '--save-dir', metavar= 'SAVEDIR', type=str,\n help=\"Keep running, saving each failure to directory provided.\")\nparser.add_argument('-n', '--number-of-tests', metavar= 'NTESTS', type=auto_int,\n default=100, help=\"Runs the verification engine for NTESTS tests.\")\nparser.add_argument('-t', '--trace-file', metavar= 'FILENAME', type=str,\n help=\"Runs the test specified in FILENAME\")\nparser.add_argument('-d', '--trace-dir', metavar= 'DIRNAME', type=str,\n help=\"Runs the tests contained in DIRNAME\")\nparser.add_argument('--path-to-rvbs-dir', metavar='PATH', type=str,\n #default='rvbs-rv32i-rvfi-dii',\n default=op.join(op.dirname(op.realpath(__file__)), \"../../riscv-implementations/RVBS/output/\"),\n help=\"The PATH to the rvbs executable directory\")\nparser.add_argument('--path-to-ibex-dir', metavar='PATH', type=str,\n default=op.join(op.dirname(op.realpath(__file__)), \"../../riscv-implementations/ibex/verilator/obj_dir\"),\n help=\"The PATH to the ibex executable directory\")\nparser.add_argument('--path-to-ariane-dir', metavar='PATH', type=str,\n default=op.join(op.dirname(op.realpath(__file__)), \"../../riscv-implementations/ariane/work-rvfi\"),\n help=\"The PATH to the ariane executable directory\")\nparser.add_argument('--path-to-spike', metavar='PATH', type=str,\n default=op.join(op.dirname(op.realpath(__file__)), \"../../riscv-implementations/riscv-isa-sim/build/spike\"),\n help=\"The PATH to the spike executable\")\nparser.add_argument('--path-to-piccolo', metavar='PATH', type=str,\n default=op.join(op.dirname(op.realpath(__file__)), \"../../riscv-implementations/Piccolo/builds/RV64IUxCHERI_Piccolo_bluesim/exe_HW_sim\"),\n help=\"The PATH to the Piccolo executable\")\nparser.add_argument('--path-to-QCVEngine', metavar='PATH', type=str,\n #default='QCVEngine',\n default=op.join(op.dirname(op.realpath(__file__)), \"../../vengines/QuickCheckVEngine/dist/build/QCVEngine/QCVEngine\"),\n help=\"The PATH to the QCVEngine executable\")\nparser.add_argument('-T', '--timeout', metavar='TIMEOUT',\n default='10000000',\n help=\"The architecture to verify. (one of {:s})\".format(str(known_architectures)))\nparser.add_argument('--path-to-sail-riscv-dir', metavar='PATH', type=str,\n default=op.join(op.dirname(op.realpath(__file__)), \"../../riscv-implementations/sail-riscv/c_emulator/\"),\n help=\"The PATH to the sail-riscv executable directory\")\nparser.add_argument('-r', '--architecture', metavar='ARCH', choices=known_architectures,\n default='rv64ic',\n help=\"The architecture to verify. (one of {:s})\".format(str(known_architectures)))\nparser.add_argument('--generator', metavar='GENERATOR', choices=known_generators,\n default='internal',\n help=\"The instruction generator to use. (one of {:s})\".format(str(known_generators)))\nparser.add_argument('--path-to-generator', metavar='PATH', type=str,\n default=op.join(op.dirname(op.realpath(__file__)), \"../../vengines/sail-riscv-test-generation/main.native\"),\n help=\"The PATH to the instruction generation (not needed for internal or manual generators)\")\nparser.add_argument('--generator-port', metavar='PORT', default=5002, type=auto_int,\n help=\"Use instruction generator on given port.\")\nparser.add_argument('--generator-log', metavar='PATH', default=None, type=auto_write_fd,\n help=\"Log instruction generator output\")\n\nargs = parser.parse_args()\n\n###########\n# helpers #\n###########\n\ndef verboseprint(lvl,msg):\n if args.verbose >= lvl:\n print(msg)\n\ndef input_y_n(prompt):\n s = input(prompt)\n return s.lower() in [\"\", \"y\", \"ye\", \"yes\"]\n\n# figure out which rvbs simulator to use\nrvbs_sim = {\n 'rv32i': \"rvbs-rv32IZicsrZifencei\",\n 'rv64i': \"rvbs-rv64IZicsrZifencei\",\n 'rv64ic': \"rvbs-rv64ICZicsrZifencei\",\n 'rv64g': \"rvbs-rv64IZicsrZifencei\",\n 'rv64gc': \"rvbs-rv64ICZicsrZifencei\",\n 'rv32ixcheri': \"rvbs-rv32IZicsrZifenceiXcheri\",\n 'rv64ixcheri': \"rvbs-rv64IZicsrZifenceiXcheri\"\n}.get(args.architecture, \"rvbs-rv64IZicsrZifenceiXcheri\")+\"-rvfi-dii\"\n\n# figure out which ibex simulator to use\nibex_sim = {\n 'rv32ic': \"Vibex_core_avalon\"\n}.get(args.architecture, \"Vibex_core_avalon\")\n\n# figure out which ariane simulator to use\nariane_sim = {\n 'rv64ic': \"Variane_core_avalon\"\n}.get(args.architecture, \"Variane_core_avalon\")\n\n# figure out which sail simulator to use\nsail_sim = {\n 'rv32i': \"riscv_sim_RV32\",\n 'rv64i': \"riscv_sim_RV64\",\n 'rv64ic': \"riscv_sim_RV64\",\n 'rv64g': \"riscv_sim_RV64\",\n 'rv64gc': \"riscv_sim_RV64\",\n 'rv32ixcheri': \"riscv_sim_RV32\",\n 'rv64ixcheri': \"riscv_sim_RV64\"\n}.get(args.architecture, \"riscv_sim_RV64\")\n\n#########################\n# spawn rvfi_dii server #\n#########################\n\ndef spawn_rvfi_dii_server(name, port, log, arch=\"rv32i\"):\n ## few common variables\n use_log = open(os.devnull,\"w\")\n if log:\n use_log = log\n if 'x' in arch:\n # x Splits the standard RISC-V extensions (e.g. rv32i) from non-standard ones like CHERI\n [isa, extension] = arch.split('x')\n else:\n # No extension specified in the architecture string\n [isa, extension] = [arch, \"\"]\n\n env2 = os.environ.copy()\n cmd = []\n ##############################################################################\n if (name == 'spike'):\n cmd = [args.path_to_spike, \"--rvfi-dii-port\", str(port),\"--isa={:s}\".format(isa), \"-m0x80000000:0x10000\"]\n if \"LD_LIBRARY_PATH\" in env2:\n env2[\"LD_LIBRARY_PATH\"] = \"%s:%s\" % (env2[\"LD_LIBRARY_PATH\"], op.dirname(args.path_to_spike))\n else:\n env2[\"LD_LIBRARY_PATH\"] = op.dirname(args.path_to_spike)\n\n if log:\n cmd += [\"-l\"]\n\n if extension != \"\" and extension != \"cheri\":\n cmd += [\"--extension={:s}\".format(extension)]\n ##############################################################################\n elif (name == 'rvbs'):\n env2[\"RVFI_DII_PORT\"] = str(port)\n cmd = [op.join(args.path_to_rvbs_dir, rvbs_sim)]\n if log:\n cmd += [\"+itrace\"]\n ##############################################################################\n elif (name == 'piccolo'):\n env2[\"RVFI_DII_PORT\"] = str(port)\n cmd = [args.path_to_piccolo]\n ##############################################################################\n elif (name == 'sail'):\n full_sail_sim = op.join(args.path_to_sail_riscv_dir, sail_sim)\n if 'c' in isa:\n cmd = [full_sail_sim, \"-m\", \"-r\", str(port)]\n else:\n cmd = [full_sail_sim, \"-C\", \"-m\", \"-r\", str(port)]\n ##############################################################################\n elif (name == 'ibex'):\n print(\"selected IBEX\");\n env2[\"RVFI_DII_PORT\"] = str(port)\n cmd = [op.join(args.path_to_ibex_dir, ibex_sim), \"--rvfi-dii-port\", str(port)]\n ##############################################################################\n elif (name == 'ariane'):\n print(\"selected ARIANE\");\n env2[\"RVFI_DII_PORT\"] = str(port)\n cmd = [op.join(args.path_to_ariane_dir, ariane_sim), \"--rvfi-dii-port\", str(port)]\n ##############################################################################\n elif (name == 'manual'):\n return None\n ##############################################################################\n else:\n print(\"Unknown rvfi-dii server {:s}\".format(name))\n return None\n ##############################################################################\n print(\"running rvfi-dii server as: \", \" \".join(cmd))\n p = sub.Popen(cmd, env=env2, stdin=None, stdout=use_log, stderr=use_log)\n print('spawned {:s} rvfi-dii server on port: {:d}'.format(name, port))\n return p\n\n#############################\n# spawn verification engine #\n#############################\n\ndef spawn_vengine(name, mport, iport, arch, timeout):\n if (name == 'QCVEngine'):\n cmd = [args.path_to_QCVEngine, '-a', str(mport), '-b', str(iport), '-r', str(arch), '-T', str(timeout)]\n cmd += ['-n', str(args.number_of_tests)]\n if args.verbose > 0:\n cmd += ['-v']\n if (args.generator != 'internal'):\n cmd += ['-i', str(args.generator_port)]\n if (args.trace_file):\n print(\"using trace_file {:s}\".format(args.trace_file))\n cmd += ['-t', args.trace_file]\n if (args.trace_dir):\n print(\"using trace_dir {:s}\".format(args.trace_dir))\n cmd += ['-d', args.trace_dir]\n if (args.save_dir):\n cmd += ['-s', args.save_dir]\n print(\"running qcvengine as: \", \" \".join(cmd))\n p = sub.Popen(cmd)\n return p\n else:\n if generator:\n generator.kill()\n print(\"Unknown verification engine {:s}\".format(name))\n\n#######################################\n# spawn instruction generation engine #\n#######################################\n\ndef spawn_generator(name, arch, log):\n if name == \"sail\":\n if log:\n use_log = log\n elif args.verbose > 0:\n use_log = os.sys.stdout\n else:\n use_log = open(os.devnull,\"w\")\n\n if 'x' in arch:\n # x Splits the standard RISC-V exenstions (e.g. rv32i) from non-standard ones like CHERI\n [isa, extension] = arch.split('x')\n else:\n # No extension specified in the architecture string\n [isa, extension] = [arch, \"\"]\n cmd = [args.path_to_generator, '-p', str(args.generator_port)]\n if not ('c' in isa):\n cmd += ['-no_compressed']\n\n print(\"running sail generator as: \", \" \".join(cmd))\n generator = sub.Popen(cmd, stdout=use_log, stderr=use_log)\n print('spawned sail instruction generator on port: {:d}'.format(args.generator_port))\n return generator\n else:\n return None\n\n#################\n# main function #\n#################\n\ndef main():\n def kill_procs(servA, servB, gen, vengine):\n if servA:\n print(\"killing implementation A's rvfi-dii server\")\n servA.kill()\n if servB:\n print(\"killing implementation B's rvfi-dii server\")\n servB.kill()\n if generator:\n print(\"killing generator\")\n gen.kill()\n if vengine:\n print(\"killing vengine\")\n vengine.kill()\n\n def handle_SIGINT(sig, frame):\n kill_procs(a,b,generator,e)\n exit(0)\n\n signal.signal(signal.SIGINT, handle_SIGINT)\n\n a = None\n b = None\n e = None\n generator = None\n try:\n a = spawn_rvfi_dii_server(args.implementation_A, args.implementation_A_port, args.implementation_A_log, args.architecture)\n b = spawn_rvfi_dii_server(args.implementation_B, args.implementation_B_port, args.implementation_B_log, args.architecture)\n\n time.sleep(args.spawn_delay) # small delay to give time to the spawned servers to be ready to listen\n\n e = spawn_vengine(args.verification_engine, args.implementation_A_port, args.implementation_B_port, args.architecture, args.timeout)\n generator = spawn_generator(args.generator, args.architecture, args.generator_log)\n\n e.wait()\n finally:\n print('run terminated')\n kill_procs(a,b,generator,e)\n sub.Popen([\"/bin/stty\", \"sane\"])\n exit(0)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"utils/scripts/runTestRIG.py","file_name":"runTestRIG.py","file_ext":"py","file_size_in_byte":15006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"337946165","text":"from __future__ import division\nimport pylab as plt\nimport FlowCytometryTools\nfrom FlowCytometryTools import test_data_dir, test_data_file, FCMeasurement, FCPlate, ThresholdGate, PolyGate\nfrom sklearn.cluster import KMeans, MeanShift, estimate_bandwidth, DBSCAN,MiniBatchKMeans,spectral_clustering\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KernelDensity\nfrom sklearn.cross_validation import train_test_split, cross_val_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn import metrics\n#import FlowFunctions as ff\nimport scipy.signal as signal\nimport pandas as pd\nfrom scipy.sparse import hstack\nimport numpy as np\nfrom scipy.stats import gaussian_kde\n\n\nplate = FCPlate.from_dir(ID='Demo Plate', path= '/Users/WRShoemaker/github/Task2/FlowCyto/20151203/CTC_FVD_Hoechst_test120315/', parser='name')\nplate = plate.transform('hlog', channels=['FSC-A', 'SSC-A', 'PI (B)-A', 'Alexa Fluor 488-A', 'Pacific Blue-A'])\nplate = plate.dropna()\ngate = ThresholdGate(2000.0, 'Pacific Blue-A', region='below')\ngated_sample_beads_A3= plate['A3'].gate(gate)\n\nplateData = plate['A3'].data[['Pacific Blue-A', 'PI (B)-A']]\n\nCTCnumpy = gated_sample_beads_A3[['PI (B)-A']].values\nDNAnumpy = gated_sample_beads_A3[['Pacific Blue-A']].values\n\ndef get_kdens_choose_kernel(xlist,expand, kernel = 0.5):\n \"\"\" Finds the kernel density function across a vector of values \"\"\"\n xlist = xlist[np.logical_not(np.isnan(xlist))]\n density = gaussian_kde(xlist)\n n = len(xlist)\n if expand == False:\n xs = np.linspace(min(xlist),max(xlist),n)\n else:\n xs = np.linspace(min(xlist - expand),max(xlist + expand),n)\n #xs = np.linspace(0.0,1.0,n)\n density.covariance_factor = lambda : kernel\n density._compute_covariance()\n D = [xs,density(xs)]\n return D\n\ndef CV_KDE(oneD_array, expand = 1000):\n # remove +/- inf\n oneD_array = oneD_array[np.logical_not(np.isnan(oneD_array))]\n grid = GridSearchCV(KernelDensity(),\n {'bandwidth': np.logspace(0.1, 5.0, 30)},\n cv=50) # 20-fold cross-validation\n grid.fit(oneD_array[:, None])\n x_grid = np.linspace(np.amin(oneD_array), np.amax(oneD_array), 10000)\n # add nothing to the end of grid and pdf so you can get a nice looking kde\n kde = grid.best_estimator_\n pdf = np.exp(kde.score_samples(x_grid[:, None]))\n # returns grod for x-axis, pdf, and bandwidth\n return_tuple = (x_grid, pdf, kde.bandwidth)\n return return_tuple\n\n#returnKDE = get_kdens_choose_kernel(CTCnumpy, 1000, kernel = 0.05 )\n\n\n#km = KMeans(n_clusters=2, init='k-means++', n_init=10,\n# max_iter=300, tol=1e-04, random_state=0)\n\n#X = StandardScaler().fit_transform(plateData)\n\n##############################################################################\n# Compute DBSCAN\n\n#km = MiniBatchKMeans(n_clusters=2, init='k-means++', n_init=10,\n# max_iter=300, tol=1e-04, random_state=0)\n#km = spectral_clustering(X, n_clusters=2, eigen_solver='arpack')\n\n\n#y_km = km.fit_predict(X)\n#plt.scatter(X[y_km==0,0], X[y_km==0,1], s=25,\n# c='green', marker='s', label='Active')\n#plt.scatter(X[y_km==1,0], X[y_km==1,1], s=25,\n# c='red', marker='o', label='Dormant')\n\n#plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:,1],\n# s=200, marker='*', c='red', label='centroids')\n\n#plt.legend()\n#plt.grid()\n#plt.tight_layout()\n#plt.savefig('./figures/centroids.png', dpi=300)\n#plt.show()\n\n#plt.savefig('./figures/centroids.png', dpi=300)\n#plt.show()\n\n\n#toPlot = ['A3', 'B1', 'B3', 'C2']\ntoPlot = ['C2']\n\nfor x in toPlot:\n gated_sample_beads_A3_1 = plate[x].gate(gate)\n CTCnumpy_1 = gated_sample_beads_A3_1[['PI (B)-A']].values\n returnKDE_1 = CV_KDE(CTCnumpy_1)\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n #ax.scatter(DNAnumpy, CTCnumpy)\n #ax.set_xlim([-1000,4000])\n\n plt.plot(returnKDE_1[0], returnKDE_1[1],color = 'b', linestyle = '-', label=\"N = 1000, B = 1\")\n plt.xlabel('Metabolic activity (CTC)', fontsize = 18)\n plt.ylabel('Probability', fontsize = 18)\n output = x + '.png'\n #plt.savefig(output)\n plt.savefig(output, bbox_inches = \"tight\", pad_inches = 0.4, dpi = 600)\n #plt.xscale()\n plt.close()\n","sub_path":"Prelim_Fig/Rewrite/flowFig.py","file_name":"flowFig.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"143370480","text":"import pymongo\nfrom pymongo import MongoClient\n\ncluster = MongoClient(\"mongodb+srv://admin:admin@cluster0-hz7um.mongodb.net/test?retryWrites=true&w=majority\")\ndb = cluster[\"test\"]\ncollection = db[\"test\"]\npost = [\n{\"name\":\"balu\",\"age\":22},\n{\"name\":\"babu\",\"age\":52},\n{\"name\":\"naidu\",\"age\":87}\n]\n\nresults = collection.find({})\n\nfor result in results:\n print(result[\"name\"])\n","sub_path":"mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"88736600","text":"# This file is part of the Trezor project.\n#\n# Copyright (C) 2012-2019 SatoshiLabs and contributors\n#\n# This library is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License version 3\n# as published by the Free Software Foundation.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the License along with this library.\n# If not, see .\n\nimport pytest\n\nfrom trezorlib import cardano, messages\nfrom trezorlib.exceptions import TrezorFailure\n\nfrom ...common import parametrize_using_common_fixtures\n\npytestmark = [\n pytest.mark.altcoin,\n pytest.mark.cardano,\n pytest.mark.skip_t1,\n]\n\n\n@parametrize_using_common_fixtures(\n \"cardano/sign_tx.json\", \"cardano/sign_tx.slip39.json\"\n)\ndef test_cardano_sign_tx(client, parameters, result):\n inputs = [cardano.create_input(i) for i in parameters[\"inputs\"]]\n outputs = [cardano.create_output(o) for o in parameters[\"outputs\"]]\n certificates = [cardano.create_certificate(c) for c in parameters[\"certificates\"]]\n withdrawals = [cardano.create_withdrawal(w) for w in parameters[\"withdrawals\"]]\n\n expected_responses = [messages.PassphraseRequest()]\n expected_responses += [\n messages.ButtonRequest(code=messages.ButtonRequestType.Other)\n for i in range(len(parameters[\"input_flow\"]))\n ]\n expected_responses.append(messages.CardanoSignedTx())\n\n def input_flow():\n for sequence in parameters[\"input_flow\"]:\n yield\n for action in sequence:\n if action == \"SWIPE\":\n client.debug.swipe_up()\n elif action == \"YES\":\n client.debug.press_yes()\n else:\n raise ValueError(\"Invalid input action\")\n\n with client:\n client.set_expected_responses(expected_responses)\n client.set_input_flow(input_flow)\n response = cardano.sign_tx(\n client=client,\n inputs=inputs,\n outputs=outputs,\n fee=parameters[\"fee\"],\n ttl=parameters[\"ttl\"],\n certificates=certificates,\n withdrawals=withdrawals,\n metadata=bytes.fromhex(parameters[\"metadata\"]),\n protocol_magic=parameters[\"protocol_magic\"],\n network_id=parameters[\"network_id\"],\n )\n assert response.tx_hash.hex() == result[\"tx_hash\"]\n assert response.serialized_tx.hex() == result[\"serialized_tx\"]\n\n\n@parametrize_using_common_fixtures(\"cardano/sign_tx.failed.json\")\ndef test_cardano_sign_tx_failed(client, parameters, result):\n inputs = [cardano.create_input(i) for i in parameters[\"inputs\"]]\n outputs = [cardano.create_output(o) for o in parameters[\"outputs\"]]\n certificates = [cardano.create_certificate(c) for c in parameters[\"certificates\"]]\n withdrawals = [cardano.create_withdrawal(w) for w in parameters[\"withdrawals\"]]\n\n expected_responses = [messages.PassphraseRequest(), messages.Failure()]\n\n with client:\n client.set_expected_responses(expected_responses)\n\n with pytest.raises(TrezorFailure, match=result[\"error_message\"]):\n cardano.sign_tx(\n client=client,\n inputs=inputs,\n outputs=outputs,\n fee=parameters[\"fee\"],\n ttl=parameters[\"ttl\"],\n certificates=certificates,\n withdrawals=withdrawals,\n metadata=bytes.fromhex(parameters[\"metadata\"]),\n protocol_magic=parameters[\"protocol_magic\"],\n network_id=parameters[\"network_id\"],\n )\n","sub_path":"tests/device_tests/cardano/test_sign_tx.py","file_name":"test_sign_tx.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"613655933","text":"# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\n\nclass KMeans(object):\n\n def __init__(self, dataset, k, max_iter=100):\n self.dataset = dataset\n self.k = k\n self.center_set = [] # [i]にカテゴリiに対応する重心入ってる\n self.max_iter = max_iter # 最大反復数\n\n def distance(self, x, y):\n return np.sum((x - y) ** 2)\n \n def clustering(self, dataset):\n # 初期カテゴリはランダムに与える\n category = [random.randint(0, self.k - 1) for i in range(len(self.dataset))]\n \n for iter_ in range(self.max_iter):\n #print(iter_) # 反復数\n tmp_center_set = []\n for i in range(self.k):\n center = np.zeros((4,))\n num = 0\n for j in range(len(self.dataset)):\n if (category[j] == i):\n center += dataset[j]\n num += 1\n if num == 0: # カテゴリiに属するデータ消えたとき前回の重心から更新しない\n print(\"center消失\")\n tmp_center_set.append(self.center_set[i])\n else:\n tmp_center_set.append(center/num)\n self.center_set = tmp_center_set \n \n new_category = [] \n for j in range(len(self.dataset)):\n tmp = []\n for l in range(self.k):\n tmp.append(self.distance(self.center_set[l], self.dataset[j]))\n new_category.append(tmp.index(min(tmp)))\n \n if category == new_category: # カテゴリ更新されなくなったら終了\n break\n\n category = new_category\n\n return category\n\nclass IrisDataset(object):\n\n def __init__(self, data, label, species, attributes, length):\n self.data = data\n self.label = label\n self.species = species\n self.attributes = attributes\n self.length = length\n\n\ndef make_iris_dataset(normalize=False):\n data = []\n label = []\n species = [\"Iris-setosa\", \"Iris-versicolor\", \"Iris-virginica\"]\n attributes = {\"Sepal length\":0, \"Sepal weigh\":1, \"Petal length\":2, \"Petal weigh\":3}\n filepath = \"./iris.data\"\n \n for line in open(filepath, \"r\"):\n element = line.split(',')\n data.append(list(map(float, element[:4])))\n label.append(element[4].strip())\n\n data = np.array(data)\n data = np.array(data)\n if normalize:\n mean = data.mean(axis=0, keepdims=True)\n std = np.std(data, axis=0, keepdims=True)\n tmp = (data - mean)/std\n data = tmp\n label = np.array(label)\n species = np.array(species)\n length = label.shape[0]\n \n iris_dataset = IrisDataset(data, label, species, attributes, length) \n\n return iris_dataset\n\ndef graph(dataset, k, label_x, label_y):\n x = dataset.attributes[label_x]\n y = dataset.attributes[label_y]\n\n model = KMeans(dataset.data, k)\n label = model.clustering(dataset.data)\n center = np.array(model.center_set)\n color_list = ['red', 'blue', 'green', 'orange', 'pink', 'yellow', 'black', 'purple','gray']\n X = dataset.data[:,x]\n Y = dataset.data[:,y]\n center_X = center[:,x]\n center_Y = center[:,y]\n\n for i in range(k):\n tmp_x = []\n tmp_y = []\n for j in range(len(label)):\n if label[j] == i:\n tmp_x.append(X[j])\n tmp_y.append(Y[j])\n plt.scatter(tmp_x, tmp_y, c=color_list[i])\n plt.plot(center_X[i], center_Y[i], marker='*', ms=30, c=color_list[i])\n\n plt.title('k = {}'.format(k))\n plt.xlabel(label_x)\n plt.ylabel(label_y)\n plt.show()\n\ndef main():\n iris_dataset = make_iris_dataset()\n for i in [2,3,4,5]:\n graph(iris_dataset, i, \"Sepal length\", \"Sepal weigh\")\n\nif __name__ == '__main__':\n main()","sub_path":"assignment_1/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"552365728","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('forum', '0007_thread_locked'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='thread',\n name='summary',\n field=models.TextField(default='', max_length=128),\n preserve_default=False,\n ),\n ]\n","sub_path":"DundonaldGolfClub/forum/migrations/0008_thread_summary.py","file_name":"0008_thread_summary.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"533355071","text":"#!/Users/spoll/Virtualenvs/automate/bin/python3\n # spamseq.py - Finds and fixes sequences in spam files\n\nimport os\nfrom os.path import join, isfile\nimport shutil\n\ndef main():\n prefix = 'spam'\n target_index = 1\n mypath = 'delicious'\n\n contents = os.listdir(mypath)\n files = [f for f in contents if isfile(join(mypath, f)) and\n f.startswith('spam')]\n for f in files:\n target_str = 'spam' + str(target_index).zfill(3) + '.txt'\n if f != target_str:\n # print('About to change %s to %s' % (join(mypath, f), join(mypath, target_str)))\n shutil.move(join(mypath, f), join(mypath, target_str))\n target_index += 1\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python/Automate the Boring Stuff/ch09/spamseq.py","file_name":"spamseq.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"561898121","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the alternatingCharacters function below.\ndef alternatingCharacters(s):\n last=s[0]\n array=[]\n count=1\n sum=0\n for i in range(1,len(s)):\n if(s[i]==last):\n count+=1\n else:\n array.append(count)\n count=1\n last=s[i]\n array.append(count)\n for i in array:\n sum+=(i-1)\n return(sum) \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n q = int(input())\n\n for q_itr in range(q):\n s = input()\n\n result = alternatingCharacters(s)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","sub_path":"Alternating Characters.py","file_name":"Alternating Characters.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"431584380","text":"\ndef word_ngram(s: str, n: int) -> list:\n word_list = s.split()\n return [word_list[i:i + n] for i in range(len(word_list) - n + 1)]\n\n\ndef char_ngram(s: str, n: int) -> list:\n return [s[i:i + n] for i in range(len(s) - n + 1)]\n\n\nif __name__ == \"__main__\":\n s = \"I am an NLPer\"\n n = 3\n\n print(f\"単語{n}gram : {word_ngram(s, n)}\")\n print(f\"文字{n}gram : {char_ngram(s, n)}\")\n\n# 関数, typehint, __name__ ","sub_path":"yoshimura/chapter01/knock05.py","file_name":"knock05.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"187279547","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\nMOTA = [0, 58, 57, 55, 54, 51, 47, 43, 39, 33, 29]\n\nplt.title(\"Result\")\nplt.xlabel(\"Number Of Objects\")\nplt.ylabel(\"MOTA(%)\")\nplt.plot(MOTA, 'ro')\nplt.show()\n","sub_path":"TLD2/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"207775796","text":"#https://www.acmicpc.net/problem/6603\ndef prev_permutation(n):\n\ti, j = len(n)-1, 0\n\tif i <= 0: \n\t\treturn False\n\twhile True:\n\t\tif n[i-1] > n[i]:\n\t\t\tbreak\n\t\ti = i - 1\n\tif i <= 0: \n\t\treturn False\n\tj = len(n)-1\n\twhile True:\n\t\tif j >= i and n[j] < n[i-1]:\n\t\t\tbreak\n\t\tj = j - 1\n\tn[i-1], n[j] = n[j], n[i-1]\n\tj = len(n)-1\n\twhile i < j:\n\t\tn[i], n[j] = n[j], n[i]\n\t\ti = i + 1\n\t\tj = j - 1\n\treturn True\n\nwhile True:\n\tn = list(map(int,input().split()))\n\tif n[0] == 0:\n\t\tbreak\n\tl = n[0]\n\tdel n[0]\n\t\n\tbit = list(range(l))\n\tbit[:] = [0]*l\n\tbit[:6] = [1]*6\n\t\n\tn.sort()\n\twhile True:\n\t\t#print(bit)\n\t\tfor i in range(l):\n\t\t\tif bit[i] == 1:\n\t\t\t\tprint(str(n[i])+\" \",end=\"\")\t\n\t\tprint(\"\")\n\t\t\n\t\tif not prev_permutation(bit):\n\t\t\tbreak\n\tprint(\"\")\n","sub_path":"brute-force/permutation/p6603.py","file_name":"p6603.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"614659049","text":"\"\"\"\n 作者:西西\n 版本:4.0\n 日期:2019/06/29\n 功能:通过BeautifulSoup爬虫获取所有城市数据\n\n\"\"\"\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_city_aqi(city_pinyin):\n \"\"\"\n 获取城市的AQI\n :param url:\n :return:\n \"\"\"\n url = 'http://pm25.in/' + city_pinyin\n r=requests.get(url,timeout=30)\n soup=BeautifulSoup(r.text,'lxml')\n div_list=soup.find_all('div',{'class':'span1'})\n city_aqi=[]\n for i in range(8):\n div_content=div_list[i]\n caption=div_content.find('div',{'class':'caption'}).text.strip()\n value=div_content.find('div',{'class':'value'}).text.strip()\n city_aqi.append((caption,value))\n # print(r.status_code)\n return city_aqi\n\n\ndef get_all_cities():\n \"\"\"\n 获取所有城市\n :return:\n \"\"\"\n url='http://pm25.in/'\n city_list=[]\n r = requests.get(url, timeout=30)\n soup = BeautifulSoup(r.text, 'lxml')\n city_div=soup.find_all('div',{'class':'bottom'})[1]\n city_link_list=city_div.find_all('a')\n for city_link in city_link_list:\n city_name=city_link.text\n city_pinyin=city_link['href'][1:]\n city_list.append((city_name,city_pinyin))\n return city_list\n\n\ndef main():\n city_list=get_all_cities()\n\n for city in city_list:\n city_name=city[0]\n city_pinyin=city[1]\n city_aqi=get_city_aqi(city_pinyin)\n\n print(city_name,city_aqi)\n\n\nif __name__ == '__main__':\n main()","sub_path":"day6/aqi_4.0.py","file_name":"aqi_4.0.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"542297039","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport time\nimport requests\n\nif __name__ == '__main__':\n url_first = 'https://sc.chinaz.com/tupian/index.html'\n url_base = 'https://sc.chinaz.com/tupian/index_%d.html'\n base = 'http:'\n try:\n for i in range(2):\n url = url_first if not i else url_base % (i + 1)\n response = requests.get(url=url)\n html = response.text\n # ?表示非贪婪模式(遇到第一个\"就停止),贪婪模式会一路匹配下去\n pattern = r'' # .表示任意字符,*表示零个或多个,写了(),只会返回()中的内容\n img_urls = re.findall(pattern, html)\n img_urls = [base + i for i in img_urls]\n img_names = [i.rsplit('/', maxsplit=1)[-1] for i in img_urls]\n for img_url, img_name in zip(img_urls, img_names):\n response = requests.get(url=img_url, timeout=10)\n content = response.content\n with open('./chinaz/%s' % img_name, mode='wb') as fp:\n fp.write(content)\n print('第{page}页的图片:{name}保存成功'.format(page=i + 1, name=img_name))\n time.sleep(1)\n except Exception as e:\n with open('.exception.txt', 'a', encoding='utf-8') as fp: # a,追加\n fp.write(str(e) + '\\n')\n","sub_path":"04_exception_pictures.py","file_name":"04_exception_pictures.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"601638618","text":"# -*- coding: utf-8 -*-\r\n\r\n# 조건식이 다수개인 경우의 처리방법\r\n# if ~ elif ~ else\r\n# if 1번째 조건식 :\r\n# 1번째 조건식이 참일 경우 실행할 문장\r\n# elif 2번째 조건식 :\r\n# 2번째 조건식이 참일 경우 실행할 문장\r\n# ...\r\n# elif N번째 조건식 :\r\n# N번째 조건식이 참일 경우 실행할 문장\r\n# else :\r\n# 모든 조건식이 거짓일 경우 실행할 문장\r\n\r\nprint(\"1. 한식\")\r\nprint(\"2. 일식\")\r\nprint(\"3. 중식\")\r\nmenu = int(input('메뉴를 선택하세요 : '))\r\n\r\noutput = None\r\nif menu == 1 :\r\n output = \"오늘 한식 메뉴는 김치찌개입니다.\"\r\nelif menu == 2 :\r\n output = \"오늘 일식 메뉴는 돈까스입니다.\"\r\nelif menu == 3 :\r\n output = \"오늘 중식 메뉴는 짜장면입니다.\"\r\nelse :\r\n output = \"1 ~ 3번 까지의 메뉴만 존재합니다.\"\r\n \r\nprint(output)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"day_03/if_05.py","file_name":"if_05.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"393487365","text":"import ujson\nfrom functools import wraps\nfrom voluptuous import Schema, MultipleInvalid, PREVENT_EXTRA\n\n\ndef construct_response(exception):\n \"\"\"\n :param exception: MultipleInvalid exception\n :return: json\n\n Делает json с ошибками из MultipleInvalid exception.\n \"\"\"\n result = []\n if isinstance(exception, MultipleInvalid):\n for e in exception.errors:\n result.append({\n 'msg': str(e),\n 'error_type': e.error_type\n })\n\n return ujson.dumps(result)\n\n\ndef dvalidate(schema, required=False, extra=PREVENT_EXTRA):\n \"\"\"\n :param schema: dict в формате voluptuous\n :param required: если True, все параметры схемы оязательны\n :param extra: стратегия поведения в случае нехватки или избытка параметров\n :return: результат функции если параметры валидны, иначе сообщение об ошибке\n\n декоратор для валидации параметров у tornado хендлеров\n\n использование:\n class SomeTornadoHandler:\n @dvalidate({'a': int, 'b': str})\n def get(self):\n self.write('success')\n\n \"\"\"\n assert isinstance(schema, dict)\n schema = Schema(schema, required, extra)\n\n def true_decorator(fun):\n @wraps(fun)\n def wrapper(_self, *args, **kwargs):\n try:\n schema(_self.args)\n result = fun(_self, *args, **kwargs)\n return result\n\n except MultipleInvalid as e:\n _self.write(construct_response(e))\n\n return wrapper\n\n return true_decorator\n\n\ndef mvalidate(required=False, extra=PREVENT_EXTRA, **schema):\n \"\"\"\n параметры как у dvalidate\n\n ручной вариант dvalidate\n \"\"\"\n schema = Schema(schema, required, extra)\n return schema\n","sub_path":"utils/schema_validators.py","file_name":"schema_validators.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"522665978","text":"# reads a file romeo.txt and collects word from it\n\nfname = input(\"Enter file name: \")\nfh = open(fname)\nlst = list()\nfor line in fh:\n line = line.rstrip()\n line = line.split()\n for word in line:\n if word not in lst:\n lst.append(word)\n else:\n continue\n\nlst.sort()\nprint(lst)\n","sub_path":"course_2/ex_08_01.py","file_name":"ex_08_01.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"639939278","text":"import picamera,RPi.GPIO as GPIO\n\nbutton = 14\nballoon = 2\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(button, GPIO.IN, GPIO.PUD_UP)\nGPIO.setup(balloon, GPIO.OUT)\n\nwith picamera.PiCamera() as camera:\n camera.resolution=(640,480)\n camera.framerate=(90)\n camera.start_recording('my_video2.h264')\n camera.wait_recording(10)\n \n\n print(\"Ready...\")\n GPIO.wait_for_edge(button, GPIO.FALLING)\n GPIO.output(balloon,True)\n sleep(5)\n GPIO.output(balloon,False)\n print(\"Pop!\")\n camera.wait_recording(20)\n camera.stop_recording()\n\n","sub_path":"PiCamera/video code.py","file_name":"video code.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"214945003","text":"#! /usr/bin/env python3\n\nimport logging\nfrom grab import Grab\nfrom hashlib import sha1\n\nimport global_vars\nimport database_functions\nimport image_downloader\n\n\nURL = 'http://maytoni.su/'\nexclude_collection_urls = ['http://maytoni.su/arhiv/', 'http://maytoni.su/abazhury/']\n\nERROR_LOG = '/home/smart_eternity/test/maytoni.log'\nlogging.basicConfig(format = '%(levelname)-8s [%(asctime)s] %(message)s', level = logging.DEBUG, filename = '{0}'.format(ERROR_LOG))\n\ng = Grab()\ng.setup(timeout = global_vars.grab_timeout, connect_timeout = global_vars.grab_timeout)\n\n\ndef get_collection_urls():\n logging.info('Парсим ссылки на коллекции...')\n \n lst = []\n \n try:\n g.go(URL)\n \n collection_xpath = g.doc.select('//li[@class=\"dropdown\"]/a')\n for element in collection_xpath:\n href = element.attr('href')\n if href not in exclude_collection_urls:\n lst.append(href)\n except Exception as e:\n global_vars.ERRORS += 1\n logging.error(e)\n \n return lst \n\ndef get_allpages_urls(collection_urls):\n logging.info('Парсим ссылки на все страницы коллекций...')\n \n lst = []\n \n try:\n for collection_url in collection_urls:\n g.go(collection_url)\n \n try:\n collection_pages_xpath = g.doc.select('//div[@class=\"pagination\"]/div[@class=\"links\"]/a')\n for collection_item_page in collection_pages_xpath:\n lst.append(collection_item_page.attr('href'))\n except Exception as e:\n logging.warn('У коллекции всего 1 страница, нужно бы проверить.') \n except Exception as e:\n global_vars.ERRORS += 1\n logging.error(e)\n\n return sorted(set(collection_urls + lst))\n #return(['http://maytoni.su/elegant/'])\n\ndef get_item_urls(allpages_urls):\n logging.info('Парсим ссылки на каждый товар...')\n \n lst = []\n \n try:\n for page_url in allpages_urls:\n g.go(page_url)\n \n item_url_xpath = g.doc.select('//div[@class=\"image\"]/a')\n for item_url in item_url_xpath:\n lst.append(item_url.attr('href'))\n except Exception as e:\n global_vars.ERRORS += 1\n logging.error(e)\n \n return(lst)\n \n \n\ndef parse_items(): \n collection_urls = get_collection_urls()\n allpages_urls = get_allpages_urls(collection_urls)\n item_urls = get_item_urls(allpages_urls)\n #item_urls = ['http://maytoni.su/elegant/arm172-11-g.html']\n \n try:\n for item_url in item_urls:\n g.go(item_url)\n \n # TODO xpath\n manufacturer = 'MAYTONI'\n \n model = g.doc.select('.//*[@id=\"content\"]/h1').text()\n \n sku = model\n \n image = g.doc.select('//a[@id=\"zoom1\"]').attr('href')\n \n for num in range(1, 20):\n try: \n description_table_xpath = g.doc.select('//div[@id=\"tab-description\"]//table/tbody/tr[{0}]'.format(num)).text()\n if 'Тип' in description_table_xpath:\n lamp_type = g.doc.select('//div[@id=\"tab-description\"]//table/tbody/tr[{0}]/td[2]'.format(num)).text()\n if 'Диаметр' in description_table_xpath:\n diameter = g.doc.select('//div[@id=\"tab-description\"]//table/tbody/tr[{0}]/td[2]'.format(num)).text()\n if 'Высота' in description_table_xpath:\n height = g.doc.select('//div[@id=\"tab-description\"]//table/tbody/tr[{0}]/td[2]'.format(num)).text()\n if 'Коллекция' in description_table_xpath:\n collection = g.doc.select('//div[@id=\"tab-description\"]//table/tbody/tr[{0}]/td[2]'.format(num)).text()\n if 'Ламп' in description_table_xpath:\n lamp_count = g.doc.select('//div[@id=\"tab-description\"]//table/tbody/tr[{0}]/td[2]'.format(num)).text()\n \n description = g.doc.select('.//div[@id=\"tab-description\"]//p').text()\n if 'люстра' in description:\n lamp_type = 'Люстры' \n except:\n # описания на этом ебаном сайте может и не быть\n pass\n \n try:\n price_xpath = g.doc.select('//div[@class=\"price\"]//span[@class=\"price-old\"]').text()\n special_price_xpath = g.doc.select('//div[@class=\"price\"]//span[@class=\"price-new\"]').text()\n price = float(''.join(x for x in price_xpath if x.isdigit()))\n special_price = float(''.join(x for x in special_price_xpath if x.isdigit()))\n except:\n price_xpath = g.doc.select('//div[@class=\"price\"]').text()\n price = float(''.join(x for x in price_xpath if x.isdigit()))\n special_price = 0\n\n if manufacturer in global_vars.exclude_manufacturers:\n logging.info('Этот товар не нужен, парсим дальше...')\n else:\n manufacturer_id = global_vars.manufacturer_ids[manufacturer]\n category_id = global_vars.category_ids[lamp_type]\n hash_object = sha1(image.encode())\n image_name = hash_object.hexdigest()\n catalog_name = manufacturer.replace(\" \", \"_\")\n image_path_in_db = 'catalog' + '/' + catalog_name + '/' + image_name + '.jpg'\n \n logging.info('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}, {11}, {12}, {13}, {14}'.format(item_url, model, sku, manufacturer, collection, lamp_type, height, diameter, lamp_count, price, special_price, image, image_path_in_db, category_id, description))\n db_success = database_functions.store_items_to_database(model, sku, manufacturer, manufacturer_id, collection, price, special_price, height, diameter, lamp_count, image_path_in_db, description, category_id)\n # Нам не нужно качать картинку если товар не залился на сайт\n if db_success:\n image_downloader.download_images(image, catalog_name, image_name)\n\n logging.info('\\n')\n \n except Exception as e:\n global_vars.ERRORS += 1\n logging.error(e)\n \n logging.error('Общее число ошибок: {0}'.format(global_vars.ERRORS))\n logging.warn('Общее число замечаний: {0}'.format(global_vars.WARNINGS))\n \n \n\nif __name__ == \"__main__\":\n parse_items()","sub_path":"maytoni.py","file_name":"maytoni.py","file_ext":"py","file_size_in_byte":6894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"117563268","text":"import json\nimport logging\nfrom collections import OrderedDict\nfrom pathlib import Path\nfrom typing import Dict, Iterable, List, Optional, Union\n\nimport numpy as np\nimport torch\nfrom sentence_transformers import __version__\nfrom sentence_transformers.util import import_from_string\nfrom summarizer import Summarizer\nfrom torch import Tensor, nn\nfrom tqdm.auto import tqdm\nfrom transformers import BertConfig, BertModel, BertTokenizer\n\nfrom .modules import BertModule, PoolingModule\n\n_ST_INCOMPATIBLE_VERSION_TO_LATEST_HF = '0.2.6'\n\n\nclass BertSummarizer:\n @staticmethod\n def load(model: str, tokenizer: BertTokenizer, device=None) -> Summarizer:\n config = BertConfig.from_pretrained(model)\n config.output_hidden_states = True\n bert_model = BertModel.from_pretrained(model, config=config)\n if device is not None:\n bert_model = bert_model.to(device)\n return Summarizer(custom_model=bert_model, custom_tokenizer=tokenizer)\n\n\nclass SentenceTransformer(nn.Sequential):\n input_attrs = ['input_ids', 'token_type_ids', 'attention_mask']\n\n def __init__(self, model_path: str = None,\n modules: Iterable[nn.Module] = None, device: str = None):\n \"\"\"Sentence Transformer Class.\n\n This class is slightly modified from the original version.\n Original source: https://github.com/UKPLab/sentence-transformers\n\n :param model_path: Path to the config and modules json file.\n :param modules: Iterable object of `nn.Module` instances.\n :param device: Computation device to choose.\n \"\"\"\n if model_path is not None:\n model_path = Path(model_path)\n modules_file = model_path.joinpath('modules.json')\n assert model_path.is_dir()\n assert modules_file.is_file()\n logging.info(f'Loading model from path: {model_path}')\n\n config_file = model_path.joinpath('config.json')\n if config_file.is_file():\n with config_file.open('r') as file:\n cfg = json.load(file)\n if cfg['__version__'] > __version__:\n logging.warning(\n \"You try to use a model that was created with \"\n \"version {}, however, your version is {}. This \"\n \"might cause unexpected behavior or errors. In \"\n \"that case, try to update to the latest version\"\n \".\\n\\n\\n\".format(cfg['__version__'], __version__),\n )\n if modules is not None:\n if not isinstance(modules, OrderedDict):\n modules = OrderedDict([(str(i), m) for i, m in modules])\n else:\n modules = OrderedDict()\n with modules_file.open('r') as file:\n contained_modules = json.load(file)\n\n if __version__ <= _ST_INCOMPATIBLE_VERSION_TO_LATEST_HF:\n models = [BertModule, PoolingModule]\n for model, config in zip(models, contained_modules):\n path = model_path.joinpath(config['path'])\n modules[config['name']] = model(path.as_posix())\n else:\n for config in contained_modules:\n model = import_from_string(config['type'])\n path = model_path.joinpath(config['path'])\n modules[config['name']] = model.load(path.as_posix())\n\n super().__init__(modules)\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n logging.info(f'using pytorch device {device}')\n self.device = torch.device(device)\n self.to(device)\n # methods from the first module e.g., ``0_BERT```\n self.get_sentence_features = self._first_module().get_sentence_features\n self.max_seq_length = self._first_module().max_seq_length\n self.tokenize = self._first_module().tokenize\n self.tokenizer = self._first_module().tokenizer\n\n def _first_module(self):\n return self._modules[next(iter(self._modules))]\n\n def _last_module(self):\n return self._modules[next(reversed(self._modules))]\n\n def encode(self, sentences: List[str],\n batch_size: int = 8, show_progress: bool = True) -> np.array:\n \"\"\"Encode an iterable of string sequences to a embedding matrix.\"\"\"\n self.eval()\n lengths = np.argsort([len(sent) for sent in sentences])\n maxsize = lengths.size\n batches = range(0, maxsize, batch_size)\n if show_progress:\n batches = tqdm(batches, desc='batches')\n\n embeddings = []\n for i in batches:\n splits = []\n for j in lengths[i: min(i + batch_size, maxsize)]:\n tokens = self.tokenizer.tokenize(sentences[j])\n splits.append(tokens)\n\n batch = self.tokenizer(text=splits,\n is_pretokenized=True,\n padding='longest',\n return_tensors='pt').to(self.device)\n with torch.no_grad():\n output = self.forward(batch)\n embedding = output['sentence_embedding']\n embeddings.extend(embedding.to('cpu').numpy())\n\n embeddings = [embeddings[i] for i in np.argsort(lengths)]\n return np.array(embeddings)\n\n def embed(self, inputs: Dict[str, Tensor], coding: str = 'sentence',\n astype: str = 'torch') -> Union[Tensor, np.array]:\n \"\"\"Transform inputs to embeddings.\n\n :param inputs: Dict[str, Tensor], inputs with tensors set to device.\n :param coding: ``sentence`` for sentence embedding | ``token`` for\n token embeddings outputs.\n :param astype: return the embedding as ``torch`` or ``numpy`` type.\n \"\"\"\n self.eval()\n with torch.no_grad():\n output = self.forward(inputs)\n coding = 'token_embeddings' if coding == 'token' \\\n else 'sentence_embedding'\n\n embedding = output[coding]\n if coding == 'token_embeddings':\n attn_mask = output['attention_mask']\n attn_mask = attn_mask.unsqueeze(-1).expand(\n embedding.size()).float()\n embedding = embedding * attn_mask\n\n if astype == 'numpy':\n embedding = embedding.to('cpu').numpy()\n\n return embedding\n\n def encode_sentence(self, text: Union[str, List[str], List[int]],\n max_seq_length: int = None) -> Dict[str, Tensor]:\n \"\"\"Encode a sequence to inputs of token-ids, segment-ids and mask.\n\n Note: The `text` parameter expects values (a string or tokens) without\n added special tokens, e.g,. `[CLS]` and `[SEP]` or in integer form.\n\n :param text: Sequence to be encoded. This can be a string, a list of\n strings (tokenized string using the `tokenizer.tokenize` method)\n or a list of integers (tokenized string ids using the `tokenize`\n or `tokenizer.convert_tokens_to_ids` method.\n \"\"\"\n if max_seq_length is None:\n if isinstance(text, str):\n text = self.tokenize(text, add_special_tokens=False)\n max_seq_length = len(text)\n elif isinstance(text[0], int) or isinstance(text[0], str) \\\n and isinstance(text[:1], list): # is valid string token\n max_seq_length = len(text)\n\n # Prepend two spaces for [CLS] and [SEP] special tokens.\n max_length = min(max_seq_length, self.max_seq_length) + 2\n inputs = self.tokenizer.encode_plus(text=text,\n max_length=max_length,\n padding=True,\n return_tensors='pt')\n return inputs.to(self.device)\n\n def encode_sentences(\n self,\n text: Union[str, List[str], List[List[str]]],\n padding: Union[str, bool] = 'max_length',\n truncation: Union[str, bool] = True,\n max_seq_length: Optional[int] = None,\n is_pretokenized: Optional[bool] = False) -> Dict[str, Tensor]:\n \"\"\"Encode sequence(s) to inputs of token-ids, segments, and mask.\n\n NOTE: The `text` param expects a sequence or list of sequences\n of strings without added special tokens, e.g,. `[CLS]` and\n `[SEP]`. This method should be used only with sequences of\n `type=str` and not of `type=int`.\n\n * Padding and truncation strategy: `padding to specific length`\n\n - Encoding a list of sequences of strings (sentences) List[str]\n\n ```python\n encode_sentences(batch_sentences, # or tokenized batch\n padding='max_length',\n truncation=True,\n max_seq_length=None, # computed automatically\n is_pretokenized=False)\n ```\n\n - The following arg values cause a fallback to model's default\n max_length (meaning, a custom max_length value is ignored).\n\n ```python\n ...\n max_length = len(max(batch_pretokenized, key=len))\n encode_batch(batch_pretokenized, # or batch_sentences\n padding='longest', < causes fallback >\n truncation=False, < causes fallback >\n max_seq_length=max_length, < will be ignored >\n is_pretokenized=False) # applies to true|false\n ```\n :param text: A sequence or batch of sequences to be encoded.\n Each sequence can be a string or a list of strings (pre-\n tokenized string). If the sequences are provided as list\n of strings (pretokenized), and `max_seq_length` is given\n then, you must set `is_pretokenized=True` (to lift the\n ambiguity with a batch of sequences).\n \"\"\"\n if max_seq_length is None:\n # If a string sequence.\n if isinstance(text, str):\n text = self.tokenizer.tokenize(text)\n max_seq_length = len(text)\n\n elif isinstance(text, list):\n # If list of string sequences.\n if isinstance(text[0][:1], str):\n sequences: List[List[str]] = []\n max_seqlen = 0\n for string in text:\n tokens = self.tokenizer.tokenize(string)\n max_seqlen = max(max_seqlen, len(tokens))\n sequences.append(tokens)\n text, max_seq_length = sequences, max_seqlen\n # If list of list of token sequences.\n elif isinstance(text[0][:1], list):\n max_seq_length = len(max(text, key=len))\n\n # Any text variation is pre-tokenized within this < if > block.\n is_pretokenized = True\n\n # Prepend two spaces for [CLS] and [SEP] special tokens.\n max_length = min(max_seq_length, self.max_seq_length) + 2\n batch = self.tokenizer(text=text,\n is_pretokenized=is_pretokenized,\n padding=padding,\n truncation=truncation,\n max_length=max_length,\n return_tensors='pt').to(self.device)\n return batch\n","sub_path":"corona_nlp/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":11564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"504876515","text":"from logger import Logger\nimport wx\nimport time\nimport numpy as np\nimport os\n# from apscheduler.schedulers.background import BackgroundScheduler\n\n\nclass Job(object):\n def __init__(self, spec, inst_drivers, frame):\n self.inst_drivers = inst_drivers\n self.spec = spec\n frame_log = Text_Log(frame.job_disp_log)\n self.logger = Logger(self, inst_drivers, frame_log)\n self.frame = frame\n self.frame.SetTitle(f\"{spec.get('job_name', '')}\") # Was u\"\"\n self.graphs = []\n self.frame.add_table(4, len(spec.get(\"logged_operations\", {}))+len(spec.get(\"references\", {}))-1)\n self.auto_profile = AutoProfile(self)\n self.frame.add_profile_table(self.auto_profile)\n self.n = 0\n self.pauseStart = time.time() / 60\n\n # self.sched = BackgroundScheduler()\n # self.sched.add_job(func=self.update_graphs, trigger='interval', seconds=5)\n # self.sched.add_job(func=self.update_table, trigger='interval', seconds=5)\n # self.sched.add_job(func=self.update_autoprofile, trigger='interval', seconds=5)\n # self.sched.start()\n\n def update_cycle(self):\n self.update_table()\n self.update_graphs()\n self.update_autoprofile()\n\n def load_profile(self):\n pass\n\n def auto_profile_actions(self, actions):\n self.frame.reading_text.SetLabel(\"Writing:\") # Was u\"\"\n for inst_op, val in actions:\n # inst_op = inst_op.split(\".\")\n self.frame.current_reading.SetLabel(f\"{inst_op} = {val}\") # Was u\"\"\n if inst_op != \"\":\n i_id, op_id = inst_op.split(\".\")\n op_check = self.logger.instruments.get(i_id).spec.get(\"operations\", {}).get(op_id, {}).get(\"check_set\", \"\")\n inst_driver = self.inst_drivers.get(i_id)\n try:\n if op_check == \"\":\n inst_driver.write_instrument(op_id, [val])\n else:\n curset = self.auto_profile.check_instrument(i_id, op_check)\n i = 0\n while curset != float(val) and i < 5:\n i = i + 1\n print(f\"Write attempt {i}\")\n inst_driver.write_instrument(op_id, [val])\n curset = self.auto_profile.check_instrument(i_id, op_check)\n print(f\"{float(val)} = {curset}?\")\n except:\n print(\"auto profile action error\")\n self.frame.reading_text.SetLabel(\"Waiting...\") # Was u\"\"\n self.frame.current_reading.SetLabel(\"\") # Was u\"\"\n\n def update_autoprofile(self):\n self.auto_profile.update()\n\n def new_autoprofile_col(self):\n name, inst_ops = self.frame.get_autoprofile_new_action_dlg()\n if name != \"cancelled\":\n self.auto_profile.new_set_op(name, inst_ops)\n\n def next_point(self):\n self.auto_profile.next_point()\n\n def save_points(self):\n self.auto_profile.save_points()\n\n def new_point(self):\n self.auto_profile.new_point()\n\n def reset(self):\n pass\n\n def new_run(self):\n pass\n\n def next_n(self, n):\n pass\n\n def last_n(self, n):\n pass\n\n def assured_soak(self):\n pass\n\n def pause(self):\n self.logger.pause()\n self.pauseStart = time.time() / 60\n\n def resume(self):\n self.logger.delay = self.logger.delay + (time.time() / 60) - self.pauseStart\n self.logger.resume()\n\n def start(self):\n self.logger.start()\n self.auto_profile.point_start_time = time.time() / 60\n self.auto_profile.move_to_point(self.auto_profile.current_point)\n\n def stop(self):\n self.logger.stop()\n self.frame.Destroy()\n # self.sched.shutdown()\n\n def add_graph(self, plt): # Adds the graph to the list of graphs\n graph_names = []\n for i in range(len(self.graphs)):\n graph_names.append(self.graphs[i][0][1])\n axis_choices = self.logger.opref.copy()\n name, x, y = self.frame.get_add_graph_dialog(axis_choices)\n while graph_names.count(name) > 0:\n name = name + \"\\'\"\n if name == \"cancelled\":\n return \"cancelled\"\n else: # Procedure wasn't cancelled\n self.graphs.append([(plt, name), (x, y)])\n self.update_graphs()\n return name\n\n def append_graph(self, plt): # Adds a new line to the selected graph\n graph_choices = []\n for i in range(len(self.graphs)):\n graph_choices.append(self.graphs[i][0][1])\n axis_choices = self.logger.opref.copy()\n index, y = self.frame.get_append_graph_dialog(graph_choices, axis_choices)\n axis_check = []\n for i in range(len(self.graphs[index]) - 1):\n axis_check.append(self.graphs[index][i + 1][1])\n if index != -1: # Procedure wasn't cancelled\n if axis_check.count(y) == 0:\n x = self.graphs[index][1][0]\n self.graphs[index].append((x, y))\n self.update_graphs()\n else:\n print(f\"{y} already in {self.graphs[index][0][1]}.\")\n\n def detract_graph(self, plt): # Removes a line from the selected graph\n graph_choices = []\n for i in range(len(self.graphs)):\n graph_choices.append(self.graphs[i][0][1])\n graph_index = self.frame.get_detract_graph_graph_dialog(graph_choices)\n if graph_index > -1: # Procedure wasn't cancelled\n axis_choices = []\n for i in range(len(self.graphs[graph_index])-1):\n axis_choices.append(self.graphs[graph_index][i+1][1])\n if len(axis_choices) > 1:\n axis_index = self.frame.get_detract_graph_axis_dialog(axis_choices)\n if axis_index > -1: # Procedure wasn't cancelled\n self.graphs[graph_index].pop(axis_index+1)\n self.update_graphs()\n else:\n print(\"Can't detract last line.\")\n\n def remove_graph(self, plt): # Removes the selected graph\n graph_choices = []\n for i in range(len(self.graphs)):\n graph_choices.append(self.graphs[i][0][1])\n index = self.frame.get_remove_graph_dialog(graph_choices)\n if index > -1: # Procedure wasn't cancelled\n self.graphs.pop(index)\n return index + 3\n\n def update_graphs(self): # Updates the data depicted in the graphs\n for g in self.graphs: # g in form of [[graph object, name], [x1, y1], [x2, y2], etc...]\n leg = []\n plt = g[0][0].figure.gca()\n plt.clear()\n for i in range(len(g)):\n if i != 0:\n x = g[i][0]\n y = g[i][1]\n inst_x, op = x.split('.')\n if inst_x == \"reference\":\n x_val = [d.get(x) for d in self.logger.storeref]\n else:\n x_val = [d[1].get(x) for d in self.logger.store]\n inst_y, op = y.split('.')\n if inst_y == \"reference\":\n y_val = [d.get(y) for d in self.logger.storeref]\n y_name = op\n else:\n y_val = [d[1].get(y) for d in self.logger.store]\n if inst_y != \"time\":\n try:\n y_name = self.logger.job_spec[\"details\"][inst_y][op]\n except KeyError:\n y_name = self.logger.instruments.get(inst_y).spec[\"operations\"][op][\"name\"]\n else:\n y_name = op\n leg.append(y_name)\n plt.plot(x_val, y_val)\n plt.legend(leg)\n g[0][0].canvas.draw()\n\n def generate_graph(self, graph):\n self.graphs.append(graph)\n self.update_graphs()\n\n def update_table(self):\n self.frame.update_table(0)\n\n\nclass AutoProfile(object):\n def __init__(self, job):\n self.job = job\n self.profile_header = [\"Points\", \"Soak\", \"Assured\"]\n self.points = 1\n self.points_list = [1]\n self.soak = [50]\n self.assured = [0]\n self.operations = {} # format \"Name\":(inst_op,[points])\n self.title = \"\" # The first line of the autoprofile is the name of the file.\n self.h_name = [] # The second line of the autoprofile contains the names.\n self.h_set = [] # The third line of the autoprofile contains the commands to set the setpoints.\n # self.h_check = []\n # self.h_actual = []\n self.stdev_list = [] # This list contains the data required for the assured switch.\n self.current_stdev = \"\" # This defines what the standard deviation is of.\n self.a_dif = 0.1 # This is the difference between the actual and measured values that assured allows.\n self.a_std = 0.1 # This is the standard deviation of measured values that assured allows.\n\n self.current_point = 0\n self.point_start_time = time.time() / 60\n self.transtime = \"\" # Was u\"\"\n\n def reset(self):\n self.profile_header = [\"Points\", \"Soak\", \"Assured\"]\n self.points = 1\n self.points_list = [1]\n self.soak = [1]\n self.assured = [0]\n self.operations = {}\n self.job.frame.job_book.SetPageText(2, \"Profile\")\n self.grid_refresh()\n\n def load_file(self, direc, fn):\n grid = self.job.frame.grid_auto_profile\n rows1 = len(self.profile_header)\n cols1 = self.points\n try:\n with open(os.path.join(direc, fn), 'r') as file:\n titles = file.readline().strip().split(',')\n self.title = titles[0]\n while self.title[0] != 'a': # Remove \"\" from first item\n self.title = self.title[1:]\n self.job.frame.job_book.SetPageText(2, self.title)\n self.h_name = file.readline().strip().split(',')\n self.profile_header = self.h_name.copy()\n self.h_set = file.readline().strip().split(',')\n # self.h_check = file.readline().strip().split(',')\n # self.h_actual = file.readline().strip().split(',')\n d1 = {}\n for name, inst_op in zip(self.h_name, self.h_set):\n d1[name] = (inst_op, [])\n for line in file:\n line = line.strip().split(',')\n for i in range(len(self.h_name)):\n name = self.h_name[i]\n d1[name][1].append(line[i])\n # Extra bit to remove the mandatory fields from the operations list\n # Reset the lists\n self.points_list = []\n self.soak = []\n self.assured = []\n # Fill the lists again\n for i in range(len(d1[self.h_name[0]][1])):\n self.points_list.append(d1[\"Points\"][1][i])\n self.soak.append(d1[\"Soak\"][1][i])\n self.assured.append(d1[\"Assured\"][1][i])\n # Remove the data from d1\n d1.pop(\"Points\")\n d1.pop(\"Soak\")\n d1.pop(\"Assured\")\n # Back to normal code\n self.points = len(d1[self.h_name[3]][1])\n self.operations = d1\n\n except ValueError as e:\n print(e)\n print('not a valid autoprofile')\n except OSError as e:\n print(e)\n print('not a valid autoprofile')\n\n # bSizer = self.job.frame.bSizer181\n # self.job.frame.grid_auto_profile.Destroy()\n # bSizer.Remove(0)\n # grid = wx.grid.Grid(self.job.frame.auto_profile, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0)\n # self.job.frame.grid_auto_profile = grid\n # bSizer.Prepend(grid, 1, wx.ALL | wx.EXPAND, 5)\n # self.job.frame.auto_profile.Layout()\n # self.job.frame.add_profile_table(self)\n self.move_to_point(self.current_point)\n\n def new_set_op(self, name, inst_ops, default=0):\n points = [0 for _ in range(self.points)]\n self.operations[name] = (inst_ops, points)\n self.h_name.append(name)\n self.profile_header = self.h_name.copy()\n self.h_set.append(inst_ops)\n grid = self.job.frame.grid_auto_profile\n # msg = wx.grid.GridTableMessage(grid.table,\n # wx.grid.GRIDTABLE_NOTIFY_COLS_APPENDED, 1)\n # grid.ProcessTableMessage(msg)\n self.grid_refresh()\n\n def new_point(self):\n self.points += 1\n self.points_list.append(self.points)\n self.soak.append(self.soak[-1])\n self.assured.append(self.assured[-1])\n op2 = {}\n for name, op_pts in self.operations.items():\n op = op_pts[0]\n pts = op_pts[1]\n pts.append(pts[-1])\n op2[name] = (op, pts)\n self.operations = op2\n # grid = self.job.frame.grid_auto_profile\n # msg = wx.grid.GridTableMessage(grid.table,\n # wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED, 1)\n # grid.ProcessTableMessage(msg)\n self.grid_refresh()\n\n def set_value(self, name, point, value): # to check for out of range and name not found\n if name == \"Soak\":\n self.soak[point] = value\n elif name == \"Assured\":\n self.assured[point] = value\n elif name == \"Points\":\n self.points_list[point] = value\n else:\n op_pts = self.operations.get(name) # check\n op_pts[1][point] = value\n\n def get_value(self, name, point): # to check for out of range and name not found\n if name == \"Soak\":\n value = self.soak[point]\n elif name == \"Assured\":\n value = self.assured[point]\n elif name == \"Points\":\n value = self.points_list[point]\n else:\n op_pts = self.operations.get(name) # check\n value = op_pts[1][point]\n return value\n\n def get_header(self):\n return self.profile_header\n\n def get_current_point(self):\n return self.current_point\n\n def next_point(self):\n self.job.logger.point_to_file()\n if self.points == self.current_point+1:\n self.move_to_point(0)\n else:\n self.move_to_point(self.current_point+1)\n\n def save_points(self):\n self.job.logger.point_to_file()\n\n def move_to_point(self, point):\n self.current_point = point\n self.grid_refresh()\n self.point_start_time = time.time() / 60\n actions = []\n if not self.job.logger.paused:\n for inst_op, vals in self.operations.values():\n if vals[point] != \"\": # \"\" will not change the set point\n actions.append((inst_op, vals[point]))\n self.job.auto_profile_actions(actions)\n\n\n def update(self):\n t1 = self.point_start_time + float(self.soak[self.current_point]) # - self.job.logger.delay # if paused\n index = 2 + int(self.assured[self.current_point])\n timeleft = (t1 - (time.time()/60))\n if index < 3:\n if timeleft < 0:\n self.transtime = \"Now\" # Was u\"\"\n self.next_point()\n else:\n timeleft = round(timeleft, 1)\n self.transtime = f\"{timeleft}\" # Was u\"\"\n else:\n inst, ops = self.h_set[index].split('.')\n opc = self.job.logger.instruments.get(inst).spec.get(\"operations\", {}).get(ops, {}).get(\"check_set\", \"\")\n opa = self.job.logger.instruments.get(inst).spec.get(\"operations\", {}).get(ops, {}).get(\"check_actual\", \"\")\n # inst, opc = self.h_check[index].split('.')\n # inst, opa = self.h_actual[index].split('.')\n if opc == \"\" or opa == \"\":\n if timeleft < 0:\n self.transtime = \"Now\" # Was u\"\"\n self.next_point()\n else:\n timeleft = round(timeleft, 1)\n self.transtime = f\"{timeleft}\" # Was u\"\"\n else:\n value1 = self.check_instrument(inst, opc)\n value2 = self.check_instrument(inst, opa)\n if opa == self.current_stdev:\n self.stdev_list.append(value2) # If this is the same operation as last time, append the data.\n else:\n self.current_stdev = opa[index] # Otherwise, start a new array for the new operation.\n self.stdev_list = []\n self.stdev_list.append(value2)\n\n # if timeleft < 0: # Modify this order so that time is after checks, for constant delay after settling.\n # dif = value2 - value1\n # std = self.stdev()\n # if abs(dif) < self.a_dif:\n # if std < self.a_std:\n # self.transtime = \"Now\" # Was u\"\"\n # self.next_point()\n # else:\n # self.transtime = f\"Waiting till stdev ({std}) is less than {self.a_std}.\" # Was u\"\"\n # else:\n # self.transtime = f\"Waiting till ({dif}) is less than {self.a_dif}.\" # Was u\"\"\n # else:\n # self.transtime = f\"{timeleft}\" # Was u\"\"\n\n dif = value2 - value1\n if abs(dif) < self.a_dif: # Check if the value is relatively accurate\n std = self.stdev()\n if std < self.a_std: # Check if the value is relatively precise\n if timeleft < 0: # Check if the value has been accurate and precise for long enough\n self.transtime = \"Now\" # Was u\"\"\n self.next_point()\n else:\n self.transtime = f\"{timeleft}\" # Was u\"\"\n else:\n self.point_start_time = time.time() / 60\n self.transtime = f\"Waiting till stdev ({std}) is less than {self.a_std}.\" # Was u\"\"\n else:\n self.point_start_time = time.time() / 60\n self.transtime = f\"Waiting till ({dif}) is less than {self.a_dif}.\" # Was u\"\"\n\n\n def check_instrument(self, inst_id, operation_id):\n inst = self.job.logger.instruments.get(inst_id)\n result = inst.read_instrument(operation_id)\n return result[1]\n\n def stdev(self):\n if self.job.logger.window < len(self.stdev_list):\n std = np.std(self.stdev_list[-self.job.logger.window:])\n else:\n std = np.std(self.stdev_list)\n return std\n\n def highlight_row(self):\n grid = self.job.frame.grid_auto_profile\n for col in range(len(self.profile_header)):\n for row in range(self.points):\n grid.SetCellBackgroundColour(row, col, wx.Colour(255, 255, 255))\n for col in range(len(self.profile_header)):\n grid.SetCellBackgroundColour(self.current_point, col, wx.Colour(230, 235, 245))\n\n def grid_refresh(self):\n self.highlight_row()\n grid = self.job.frame.grid_auto_profile\n # Fix the number of rows and columns\n # currows = grid.GetNumberRows()\n # desrows = self.points\n # print(currows, desrows)\n # if currows < desrows:\n # grid.AppendRows(desrows - currows)\n # elif currows > desrows:\n # grid.DeleteRows(desrows - 1, currows - desrows)\n # curcols = grid.GetNumberCols()\n # descols = len(self.get_header())\n # print(curcols, descols)\n # if curcols < descols:\n # grid.AppendCols(descols - curcols)\n # elif curcols > descols:\n # grid.DeleteCols(descols - 1, curcols - descols)\n # Refresh the table\n grid.AutoSizeRows()\n # grid.AutoSizeColumns() TODO\n grid.ForceRefresh()\n\n\nclass Text_Log(object):\n def __init__(self, textctrl):\n self.out = textctrl\n\n def write(self, text):\n text = str(text)+'\\n'\n self.out.WriteText(text)\n","sub_path":"hs-logger/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":20390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"553220888","text":"import SimpleITK as sitk\nimport scipy.ndimage.morphology\nfrom pathlib import Path\nimport argparse\nimport numpy as np\n\n\ndef fillholes_nii(nii_file,\n out_file):\n \"\"\"Binary fill holes using scipy.ndimage\n\n Args:\n nii_file (str/Path): input nii mask file\n out_file (str/Path): output path to save modified file\n \"\"\"\n # Read nii image.\n mask = sitk.ReadImage(str(nii_file))\n\n # Binary fill holes.\n input_mask = sitk.GetArrayFromImage(mask)\n output_mask = fillholes(input_mask)\n filled_mask = sitk.GetImageFromArray(output_mask)\n filled_mask.SetDirection(mask.GetDirection())\n filled_mask.SetOrigin(mask.GetOrigin())\n filled_mask.SetSpacing(mask.GetSpacing())\n\n # Write result to outpath.\n writer = sitk.ImageFileWriter()\n print(\"Writing output to :\", out_file)\n writer.SetFileName(str(out_file))\n writer.Execute(filled_mask)\n\n\ndef fillholes(input_mask):\n \"\"\"Binary fill holes using scipy.ndimage\n \n Args:\n input_mask (np.array): input mask\n \n Returns:\n np.array: filled mask\n \"\"\"\n output_mask= scipy.ndimage.morphology.binary_fill_holes(input_mask)\n output_mask = output_mask.astype(np.uint8)\n return output_mask\n\n\ndef main():\n # Commandline argument parsing.\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('nii_input', help='Input .nii file')\n parser.add_argument('-p', '--Praefix', help='Praefix for output filename')\n parser.add_argument('-o', '--Output', help='Output .nii file')\n args = parser.parse_args()\n\n nii_file = Path(args.nii_input)\n\n print('Filling mask holes, nii image : ', str(nii_file))\n\n # If outpath is not set, use praefix and input filepath.\n outpath = args.Output\n if not outpath:\n praefix = 'filled_'\n if args.Praefix:\n praefix = args.Praefix\n outpath = nii_file.parent.joinpath(praefix + nii_file.name)\n\n fillholes_nii(nii_file, outpath)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"midastools/misc/fillholes.py","file_name":"fillholes.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"2216844","text":"import time\nimport xbmc\nimport os\nimport xbmcgui\nimport urllib2\n\ndef menuoptions():\n dialog = xbmcgui.Dialog()\n funcs = (\n function1,\n function2,\n function3\n )\n \n call = dialog.select('[B][COLOR=yellow]CerebroTV[/COLOR][COLOR=red] CCTV & Webcams[/COLOR][/B]', ['[B][COLOR=green]CCTV & Web Cams 1[/COLOR][/B]', '[B][COLOR=green]CCTV & Web Cams 2[/COLOR][/B]', '[B][COLOR=green]NASA Live Streams[/COLOR][/B]'])\n # dialog.selectreturns\n # 0 -> escape pressed\n # 1 -> first item\n # 2 -> second item\n if call:\n # esc is not pressed\n if call < 0:\n return\n func = funcs[call-3]\n #dp = xbmcgui.DialogProgress()\n #dp.create(\"[COLOR tomato]CerebroTV[/COLOR]\",\"\"+str(func)+\" -3\",\"PLEASE EXIT KODI OR PULL THE POWER LEAD\")\n #xbmc.sleep(1000)\n return func()\n else:\n func = funcs[call]\n #dp = xbmcgui.DialogProgress()\n #dp.create(\"[COLOR tomato]CerebroTV[/COLOR]\",\"\"+str(func)+\" +0\",\"PLEASE EXIT KODI OR PULL THE POWER LEAD\")\n #xbmc.sleep(1000)\n return func()\n return \n\n\ndef function1():\n xbmc.executebuiltin('ActivateWindow(10025,\"plugin://plugin.video.goodfellas/?fanart=http%3a%2f%2fwww.pixhoster.info%2ff%2f2016-09%2f1fa780e2d827b35ee5038bf8b2c8dd63.png&mode=1&name=Web%20Cams&url=http%3a%2f%2fgoo.gl%2fZqroXR\",return)')\n\ndef function2():\n xbmc.executebuiltin('ActivateWindow(10025,\"plugin://plugin.video.ProjectCypher/?action=directory&content=addons&url=http%3a%2f%2fcypher-media.com%2fcypher%2fcctv.xml\",return)')\n\ndef function3():\n xbmc.executebuiltin('ActivateWindow(10025,\"plugin://plugin.video.ProjectCypher/?action=directory&content=addons&url=http%3a%2f%2fignorame\",return)')\n\nmenuoptions()","sub_path":"zips/script.mtvbcctv/addon.py","file_name":"addon.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"592826281","text":"\"\"\"DBus implementation with glib.\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport logging\nfrom typing import Any\n\nfrom dbus_next import BusType, InvalidIntrospectionError, Message, MessageType\nfrom dbus_next.aio import MessageBus\nfrom dbus_next.introspection import Node\nfrom dbus_next.signature import Variant\n\nfrom ..exceptions import (\n DBusFatalError,\n DBusInterfaceError,\n DBusInterfaceMethodError,\n DBusNotConnectedError,\n DBusParseError,\n)\n\n\ndef _remove_dbus_signature(data: Any) -> Any:\n if isinstance(data, Variant):\n return _remove_dbus_signature(data.value)\n elif isinstance(data, dict):\n for k in data:\n data[k] = _remove_dbus_signature(data[k])\n return data\n elif isinstance(data, list):\n new_list = []\n for item in data:\n new_list.append(_remove_dbus_signature(item))\n return new_list\n else:\n return data\n\n\n_LOGGER: logging.Logger = logging.getLogger(__name__)\n\nDBUS_METHOD_GETALL: str = \"org.freedesktop.DBus.Properties.GetAll\"\nDBUS_METHOD_SET: str = \"org.freedesktop.DBus.Properties.Set\"\n\n\nclass DBus:\n \"\"\"DBus handler.\"\"\"\n\n def __init__(self, bus_name: str, object_path: str) -> None:\n \"\"\"Initialize dbus object.\"\"\"\n self.bus_name: str = bus_name\n self.object_path: str = object_path\n self.methods: set[str] = set()\n self.signals: set[str] = set()\n self._bus: MessageBus | None = None\n\n def __del__(self):\n \"\"\"Delete dbus object.\"\"\"\n if self._bus:\n self._bus.disconnect()\n\n @staticmethod\n async def connect(bus_name: str, object_path: str) -> DBus:\n \"\"\"Read object data.\"\"\"\n self = DBus(bus_name, object_path)\n\n # pylint: disable=protected-access\n await self._init_proxy()\n\n _LOGGER.debug(\"Connect to D-Bus: %s - %s\", bus_name, object_path)\n return self\n\n def _add_interfaces(self, introspection: Any):\n # Read available methods\n for interface in introspection.interfaces:\n interface_name = interface.name\n\n # Methods\n for method in interface.methods:\n method_name = method.name\n self.methods.add(f\"{interface_name}.{method_name}\")\n\n # Signals\n for signal in interface.signals:\n signal_name = signal.name\n self.signals.add(f\"{interface_name}.{signal_name}\")\n\n async def _init_proxy(self) -> None:\n \"\"\"Read interface data.\"\"\"\n # Wait for dbus object to be available after restart\n introspection: Node | None = None\n try:\n self._bus = await MessageBus(bus_type=BusType.SYSTEM).connect()\n except Exception as err:\n raise DBusFatalError() from err\n\n for _ in range(3):\n try:\n introspection = await self._bus.introspect(\n self.bus_name, self.object_path, timeout=10\n )\n except InvalidIntrospectionError as err:\n raise DBusParseError(\n f\"Can't parse introspect data: {err}\", _LOGGER.error\n ) from err\n except (EOFError, asyncio.TimeoutError):\n _LOGGER.warning(\n \"Busy system at %s - %s\", self.bus_name, self.object_path\n )\n else:\n break\n\n await asyncio.sleep(3)\n\n if introspection is None:\n raise DBusFatalError(\n \"Could not get introspection data after 3 attempts\", _LOGGER.error\n )\n\n self._add_interfaces(introspection)\n\n def _prepare_args(self, *args: list[Any]) -> tuple[str, list[Any]]:\n signature = \"\"\n arg_list = []\n\n for arg in args:\n _LOGGER.debug(\"...arg %s (type %s)\", str(arg), type(arg))\n if isinstance(arg, bool):\n signature += \"b\"\n arg_list.append(arg)\n elif isinstance(arg, int):\n signature += \"i\"\n arg_list.append(arg)\n elif isinstance(arg, float):\n signature += \"d\"\n arg_list.append(arg)\n elif isinstance(arg, str):\n signature += \"s\"\n arg_list.append(arg)\n elif isinstance(arg, tuple):\n signature += arg[0]\n arg_list.append(arg[1])\n else:\n raise DBusFatalError(f\"Type {type(arg)} not supported\")\n\n return signature, arg_list\n\n async def call_dbus(self, method: str, *args: list[Any]) -> str:\n \"\"\"Call a dbus method.\"\"\"\n method_parts = method.split(\".\")\n\n signature, arg_list = self._prepare_args(*args)\n\n _LOGGER.debug(\"Call %s on %s\", method, self.object_path)\n reply = await self._bus.call(\n Message(\n destination=self.bus_name,\n path=self.object_path,\n interface=\".\".join(method_parts[:-1]),\n member=method_parts[-1],\n signature=signature,\n body=arg_list,\n )\n )\n\n if reply.message_type == MessageType.ERROR:\n if reply.error_name == \"org.freedesktop.DBus.Error.ServiceUnknown\":\n raise DBusInterfaceError(reply.body[0])\n if reply.error_name == \"org.freedesktop.DBus.Error.UnknownMethod\":\n raise DBusInterfaceMethodError(reply.body[0])\n if reply.error_name == \"org.freedesktop.DBus.Error.Disconnected\":\n raise DBusNotConnectedError()\n if reply.body and len(reply.body) > 0:\n raise DBusFatalError(reply.body[0])\n raise DBusFatalError()\n\n return _remove_dbus_signature(reply.body)\n\n async def get_properties(self, interface: str) -> dict[str, Any]:\n \"\"\"Read all properties from interface.\"\"\"\n try:\n return (await self.call_dbus(DBUS_METHOD_GETALL, interface))[0]\n except IndexError as err:\n _LOGGER.error(\"No attributes returned for %s\", interface)\n raise DBusFatalError() from err\n\n async def set_property(\n self,\n interface: str,\n name: str,\n value: Any,\n ) -> list[Any] | dict[str, Any] | None:\n \"\"\"Set a property from interface.\"\"\"\n return await self.call_dbus(DBUS_METHOD_SET, interface, name, value)\n\n def signal(self, signal_member) -> DBusSignalWrapper:\n \"\"\"Get signal context manager for this object.\"\"\"\n return DBusSignalWrapper(self, signal_member)\n\n async def wait_signal(self, signal_member) -> Any:\n \"\"\"Wait for signal on this object.\"\"\"\n async with self.signal(signal_member) as signal:\n return await signal.wait_for_signal()\n\n def __getattr__(self, name: str) -> DBusCallWrapper:\n \"\"\"Map to dbus method.\"\"\"\n return getattr(DBusCallWrapper(self, self.bus_name), name)\n\n\nclass DBusCallWrapper:\n \"\"\"Wrapper a DBus interface for a call.\"\"\"\n\n def __init__(self, dbus: DBus, interface: str) -> None:\n \"\"\"Initialize wrapper.\"\"\"\n self.dbus: DBus = dbus\n self.interface: str = interface\n\n def __call__(self) -> None:\n \"\"\"Catch this method from being called.\"\"\"\n _LOGGER.error(\"D-Bus method %s not exists!\", self.interface)\n raise DBusInterfaceMethodError()\n\n def __getattr__(self, name: str):\n \"\"\"Map to dbus method.\"\"\"\n interface = f\"{self.interface}.{name}\"\n\n if interface not in self.dbus.methods:\n return DBusCallWrapper(self.dbus, interface)\n\n def _method_wrapper(*args):\n \"\"\"Wrap method.\n\n Return a coroutine\n \"\"\"\n return self.dbus.call_dbus(interface, *args)\n\n return _method_wrapper\n\n\nclass DBusSignalWrapper:\n \"\"\"Wrapper for D-Bus Signal.\"\"\"\n\n def __init__(self, dbus: DBus, signal_member: str) -> None:\n \"\"\"Initialize wrapper.\"\"\"\n self._dbus: DBus = dbus\n signal_parts = signal_member.split(\".\")\n self._interface = \".\".join(signal_parts[:-1])\n self._member = signal_parts[-1]\n self._match: str = f\"type='signal',interface={self._interface},member={self._member},path={self._dbus.object_path}\"\n self._messages: asyncio.Queue[Message] = asyncio.Queue()\n\n def _message_handler(self, msg: Message):\n if msg.message_type != MessageType.SIGNAL:\n return\n\n _LOGGER.debug(\n \"Signal message received %s, %s.%s object %s\",\n msg.body,\n msg.interface,\n msg.member,\n msg.path,\n )\n if (\n msg.interface != self._interface\n or msg.member != self._member\n or msg.path != self._dbus.object_path\n ):\n return\n\n self._messages.put_nowait(msg)\n\n async def __aenter__(self):\n \"\"\"Install match for signals and start collecting signal messages.\"\"\"\n\n _LOGGER.debug(\"Install match for signal %s.%s\", self._interface, self._member)\n await self._dbus._bus.call(\n Message(\n destination=\"org.freedesktop.DBus\",\n interface=\"org.freedesktop.DBus\",\n path=\"/org/freedesktop/DBus\",\n member=\"AddMatch\",\n signature=\"s\",\n body=[self._match],\n )\n )\n\n self._dbus._bus.add_message_handler(self._message_handler)\n return self\n\n async def wait_for_signal(self) -> Message:\n \"\"\"Wait for signal and returns signal payload.\"\"\"\n msg = await self._messages.get()\n return msg.body\n\n async def __aexit__(self, exc_t, exc_v, exc_tb):\n \"\"\"Stop collecting signal messages and remove match for signals.\"\"\"\n\n self._dbus._bus.remove_message_handler(self._message_handler)\n\n await self._dbus._bus.call(\n Message(\n destination=\"org.freedesktop.DBus\",\n interface=\"org.freedesktop.DBus\",\n path=\"/org/freedesktop/DBus\",\n member=\"RemoveMatch\",\n signature=\"s\",\n body=[self._match],\n )\n )\n","sub_path":"supervisor/utils/dbus.py","file_name":"dbus.py","file_ext":"py","file_size_in_byte":10173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"455483698","text":"# write python program to read weight in Kg and height in m and calculate BMI of person \ntry:\n\n\theight_in_meters = float(input(\"Enter Heigt of person: \"))\n\tweight_in_kg = int(input(\"Enter weight of person: \"))\n\tBMI = weight_in_kg/height_in_meters**2\n\tprint(\" Body Mass Index: \",BMI)\n\n\nexcept Exception as e:\n\tprint(str(e))","sub_path":"bmi.py","file_name":"bmi.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"93697777","text":"import numpy as np\nfrom pandas import read_csv\nimport matplotlib.pyplot as plt\nfrom lib.plots import label_and_save_plot, draw_pie_with_others, create_hists\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import scale\nimport const\nimport scipy.stats\nimport seaborn as sns\n\ndf = read_csv('data/ensembl-source.csv')\n\ndata = df['source'].value_counts()\nfig, ax = plt.subplots()\nchart, t, r = ax.pie(data, autopct='%1.1f%%',\n startangle=90, textprops={'fontsize': 0})\nlabels = [i + ' ' + percent.get_text() for i, percent in zip(data.index, r)]\nax.legend(labels=labels)\nlabel_and_save_plot(plt, 'img/coding_non_coding_status.jpg',\n 'Ensembl gene source classification')\n\n\ndf = read_csv('data/meta-simple-repeat.csv')\n\ndraw_pie_with_others(plt, df['chrom'], const.CHROM_NAMES)\nlabel_and_save_plot(plt, 'img/repeats/chrom.jpg', 'Reference sequence chromosome or scaffold')\n\ncolumns = ['A', 'C', 'G', 'T']\nplt.violinplot(df[columns].values, showmeans=True)\nplt.xticks(range(1, len(columns) + 1), columns)\nlabel_and_save_plot(plt, 'img/repeats/ACGT_violin.jpg', 'ACGT violin')\n\n\ncreate_hists(plt, df, nrows=2, ncols=2, columns=['A', 'C', 'G', 'T'],\n titles=['A Percent', 'C Percent', 'G Percent', 'T Percent'], n_bins=100)\nlabel_and_save_plot(plt, 'img/repeats/ACGT_percent.jpg')\n\ncreate_hists(plt, df, nrows=2, columns=['perMatch', 'perIndel'],\n titles=['Percentage Match', 'Percentage Indel'])\nlabel_and_save_plot(plt, 'img/repeats/Percentage_Match_Indel.jpg')\n\ncreate_hists(plt, df, nrows=3, columns=['period', 'copyNum', 'consensusSize'],\n titles=['Length of repeat unit', 'Mean number of copies of repeat',\n 'Length of consensus sequence'], log=False)\nlabel_and_save_plot(plt, 'img/repeats/period_copyNum_consensusSize.jpg')\n\nlabel_and_save_plot(plt, 'img/repeats/log2_period_copyNum_consensusSize.jpg')\ncreate_hists(plt, df, nrows=3, columns=['period', 'copyNum', 'consensusSize'],\n titles=['Log Length of repeat unit', 'Log Mean number of copies of repeat',\n 'Log Length of consensus sequence'], log=True)\nlabel_and_save_plot(plt, 'img/repeats/log2_period_copyNum_consensusSize.jpg')\n\ndata = np.sort(df['entropy'])\nplt.plot(data)\nlabel_and_save_plot(plt, 'img/repeats/entropy.jpg', subtitle='Entropy (sorted)',\n xlabel='Repeat #', ylabel='Entropy')\n\nfig, ax = plt.subplots()\nax.hist(df['entropy'], 100)\nlabel_and_save_plot(plt, 'img/repeats/entropy_histogram.jpg', subtitle='Entropy Histogramm')\n\n\nfig, ax = plt.subplots(nrows=2)\nax0, ax1 = ax.flatten()\ndata = np.sort(df['score'])\nax0.plot(data)\nax0.set_title('Alignment Score\\n= 2*match-7*mismatch-7*indel; minscore=50')\nax1.hist(df['score'], 100, log=True)\nfig.tight_layout()\nlabel_and_save_plot(plt, 'img/repeats/score.jpg')\n\ndata = df[['period', 'copyNum', 'consensusSize', 'perMatch',\n 'perIndel', 'score', 'entropy', 'A', 'C', 'G', 'T']]\n\nplot = sns.clustermap(data.corr())\nplot.fig.suptitle('CPG islands correlation')\nplot.savefig('img/repeats/heatmap_corr.jpg')\n\ndata = scale(data)\npca = PCA(n_components=2)\nreduced = pca.fit_transform(data)\nX = reduced[:, 0]\nY = reduced[:, 1]\nfig, ax = plt.subplots()\nax.scatter(X, Y, s=0.5)\nlabel_and_save_plot(plt, 'img/repeats/pca.jpg', title='PCA saved '\n + str(sum(pca.explained_variance_ratio_ * 100)) + ' % of information')\n#print(pca.components_)\n#print(np.argmax(pca.components_[0]))\n\nfig, ax = plt.subplots()\nax.scatter(df['period'], df['consensusSize'], s=0.5)\nlabel_and_save_plot(plt, 'img/repeats/scatter_period_consensusSize.jpg',\n title='Length of repeat unit - Length of consensus sequence',\n xlabel='Length of repeat unit', ylabel='Length of consensus sequence')\nfig, ax = plt.subplots()\nax.scatter(range(len(df)), df['period'] - df['consensusSize'], s=1)\nlabel_and_save_plot(plt, 'img/repeats/scatter_period_minus_consensusSize.jpg',\n title='Length of repeat unit minus Length of consensus sequence')\n\nfig, ax = plt.subplots()\nax.scatter(df['C'], df['G'], s=0.5)\nlabel_and_save_plot(plt, 'img/repeats/scatter_CG.jpg',\n title=\"Percent of C's in repeat unit - Percent of G's in repeat unit\",\n xlabel='C', ylabel='G')\nfig, ax = plt.subplots()\nax.scatter(df['A'], df['T'], s=1)\nlabel_and_save_plot(plt, 'img/repeats/scatter_AT.jpg',\n title=\"Percent of A's in repeat unit - Percent of T's in repeat unit\",\n xlabel='A', ylabel='T')\n","sub_path":"Drosophila/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"464349749","text":"import turtle\n\npencere = turtle.Screen()\nkaplumbik = turtle.Turtle()\n\nkaplumbik.pensize(5)\n\nfor j in range(5):\n\n for i in range(4):\n kaplumbik.forward(20)\n kaplumbik.left(90)\n\n kaplumbik.penup()\n kaplumbik.forward(40)\n kaplumbik.pendown()\n\npencere.mainloop()","sub_path":"Python3/How to Think Like a Computer Scientist/4 - Functions/4.9.1 Exercises - X.py","file_name":"4.9.1 Exercises - X.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"13899836","text":"# reverse the order of PRICES\n# input is a list of PRICES, newest day's\n# prices are lower indices\n# The best day to buy must be before the\n# best day to sell\n# Starting at the end of the array, store that BUY_INDICE\n# store the SELL_INDICE and DIFF between the first newer element that is less than the first elem\n# loop through the array comparing PRICES[INDICE] to each\n# newer day. if that comparison is > DIFF, store that SELL_INDICE and DIFF\n# repeat until you reach index 1\n\n\ndef picker(prices):\n buy_indice = len(prices) - 1\n sell_indice = None\n diff = -1\n i = buy_indice - 1\n j = buy_indice\n \n while j > 0:\n while i >= 0:\n temp_diff = prices[j] - prices[i]\n #print(temp_diff)\n if (temp_diff > 0 and temp_diff > diff):\n diff = temp_diff\n sell_indice = i\n buy_indice = j\n i -= 1\n j -= 1\n i = j - 1\n\n return [sell_indice, buy_indice]\n\n","sub_path":"python/stock_picker.py","file_name":"stock_picker.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"329571680","text":"import sys\nimport copy\nimport pysam\nimport bisect\n\n# input: sorted BAM + phased tab-delimited genotypes (chr, pos, pat, mat) + precontact.py output\n# output: read name + a list of segments (is read 2, query start, query end, ref name, ref start, ref end, strand, haplotype: 0=left, 1=right, -1=unknown, -2=disagree)\n\n\n# read IO locations from arguments\ninputBamFile=sys.argv[1]\ninputGenoFile=open(sys.argv[2],\"r\")\nchrName=sys.argv[3]\n\n# parameters\nminQual = 20\n\n# open BAM files\nsamfile = pysam.AlignmentFile(inputBamFile, \"rb\")\n\n# initialize data for sorted fragments (each fragment: [start, end], and corresponding phase: phase)\nfragmentData = []\nphaseData = []\n\n# find phased reads from the genotype file\ncounter = 0\nfor inputGenoFileLine in inputGenoFile:\n inputGenoFileLineData = inputGenoFileLine.strip().split(\"\\t\")\n inputChr = inputGenoFileLineData[0]\n if inputChr != chrName:\n continue\n inputPos = int(inputGenoFileLineData[1])\n leftNucleotide = inputGenoFileLineData[2]\n rightNucleotide = inputGenoFileLineData[3]\n leftCount = 0\n rightCount = 0\n for pileupcolumn in samfile.pileup(inputChr, inputPos-1, inputPos):\n if pileupcolumn.pos == inputPos - 1: # find the position of the SNP\n for pileupread in pileupcolumn.pileups:\n if not pileupread.is_del and not pileupread.is_refskip:\n read = pileupread.alignment\n # skip bad reads\n if read.is_duplicate or read.is_qcfail or read.is_secondary:\n continue\n if read.mapping_quality < minQual:\n continue\n \n # find phase\n currentNucleotide = pileupread.alignment.query_sequence[pileupread.query_position]\n if currentNucleotide == leftNucleotide:\n leftCount += 1\n elif currentNucleotide == rightNucleotide:\n rightCount += 1\n sys.stdout.write(inputChr+'\\t'+str(inputPos)+'\\t'+str(leftCount)+'\\t'+str(rightCount)+'\\n')\n ","sub_path":"legacy/count_snp_chr_txt.py","file_name":"count_snp_chr_txt.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"589131289","text":"from pydantic import BaseModel\n\nfrom .utils import parse_code\n\n\nclass Response:\n \"\"\"\n response object\n\n :param codes: list of HTTP status code, format('HTTP_[0-9]{3}'), 'HTTP200'\n :param code_models: dict of : <`pydantic.BaseModel`>\n \"\"\"\n\n def __init__(self, *codes, **code_models):\n for code in codes:\n assert code in DEFAULT_CODE_DESC, 'invalid HTTP status code'\n\n for code, model in code_models.items():\n assert code in DEFAULT_CODE_DESC, 'invalid HTTP status code'\n assert issubclass(model, BaseModel), 'invalid `pydantic.BaseModel`'\n\n self.codes = codes\n self.code_models = code_models\n\n def has_model(self):\n \"\"\"\n :returns: boolean -- does this response has models or not\n \"\"\"\n return True if self.code_models else False\n\n def find_model(self, code):\n \"\"\"\n :param code: ``r'\\\\d{3}'``\n \"\"\"\n return self.code_models.get(f'HTTP_{code}')\n\n @property\n def models(self):\n \"\"\"\n :returns: dict_values -- all the models in this response\n \"\"\"\n return self.code_models.values()\n\n def generate_spec(self):\n \"\"\"\n generate the spec for responses\n\n :returns: JSON\n \"\"\"\n responses = {}\n for code in self.codes:\n responses[parse_code(code)] = {'description': DEFAULT_CODE_DESC[code]}\n\n for code, model in self.code_models.items():\n responses[parse_code(code)] = {\n 'description': DEFAULT_CODE_DESC[code],\n 'content': {\n 'application/json': {\n 'schema': {\n '$ref': f'#/components/schemas/{model.__name__}'\n }\n }\n }\n }\n\n return responses\n\n\n# according to https://tools.ietf.org/html/rfc2616#section-10\n# https://tools.ietf.org/html/rfc7231#section-6.1\n# https://developer.mozilla.org/sv-SE/docs/Web/HTTP/Status\nDEFAULT_CODE_DESC = {\n # Information 1xx\n 'HTTP_100': 'Continue',\n 'HTTP_101': 'Switching Protocols',\n # Successful 2xx\n 'HTTP_200': 'OK',\n 'HTTP_201': 'Created',\n 'HTTP_202': 'Accepted',\n 'HTTP_203': 'Non-Authoritative Information',\n 'HTTP_204': 'No Content',\n 'HTTP_205': 'Reset Content',\n 'HTTP_206': 'Partial Content',\n # Redirection 3xx\n 'HTTP_300': 'Multiple Choices',\n 'HTTP_301': 'Moved Permanently',\n 'HTTP_302': 'Found',\n 'HTTP_303': 'See Other',\n 'HTTP_304': 'Not Modified',\n 'HTTP_305': 'Use Proxy',\n 'HTTP_306': '(Unused)',\n 'HTTP_307': 'Temporary Redirect',\n 'HTTP_308': 'Permanent Redirect',\n # Client Error 4xx\n 'HTTP_400': 'Bad Request',\n 'HTTP_401': 'Unauthorized',\n 'HTTP_402': 'Payment Required',\n 'HTTP_403': 'Forbidden',\n 'HTTP_404': 'Not Found',\n 'HTTP_405': 'Method Not Allowed',\n 'HTTP_406': 'Not Acceptable',\n 'HTTP_407': 'Proxy Authentication Required',\n 'HTTP_408': 'Request Timeout',\n 'HTTP_409': 'Conflict',\n 'HTTP_410': 'Gone',\n 'HTTP_411': 'Length Required',\n 'HTTP_412': 'Precondition Failed',\n 'HTTP_413': 'Request Entity Too Large',\n 'HTTP_414': 'Request-URI Too Long',\n 'HTTP_415': 'Unsupported Media Type',\n 'HTTP_416': 'Requested Range Not Satisfiable',\n 'HTTP_417': 'Expectation Failed',\n 'HTTP_418': \"I'm a teapot\",\n 'HTTP_421': 'Misdirected Request',\n 'HTTP_422': 'Unprocessable Entity',\n 'HTTP_423': 'Locked',\n 'HTTP_424': 'Failed Dependency',\n 'HTTP_425': 'Too Early',\n 'HTTP_426': 'Upgrade Required',\n 'HTTP_428': 'Precondition Required',\n 'HTTP_429': 'Too Many Requests',\n 'HTTP_431': 'Request Header Fields Too Large',\n 'HTTP_451': 'Unavailable For Legal Reasons',\n # Server Error 5xx\n 'HTTP_500': 'Internal Server Error',\n 'HTTP_501': 'Not Implemented',\n 'HTTP_502': 'Bad Gateway',\n 'HTTP_503': 'Service Unavailable',\n 'HTTP_504': 'Gateway Timeout',\n 'HTTP_505': 'HTTP Version Not Supported',\n 'HTTP_506': 'Variant Also negotiates',\n 'HTTP_507': 'Insufficient Sotrage',\n 'HTTP_508': 'Loop Detected',\n 'HTTP_511': 'Network Authentication Required',\n}\n","sub_path":"spectree/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"625369526","text":"from puzzle1 import read_file\nfrom puzzle1 import get_seat\nfrom puzzle1 import to_id\n\nfrom puzzle1 import n_rows, n_cols\n\nimport numpy as np\nimport sys\n\ndef main(filename):\n\n tickets = read_file(filename)\n seats = [get_seat(x) for x in tickets]\n plain = np.zeros((n_rows, n_cols))\n\n for s in seats:\n r,c = s\n plain[r][c] = 1\n\n in_bounds = lambda x,y: 0 <= x < n_rows and 0 <= y < n_cols\n\n for i in range(n_rows):\n for j in range(n_cols):\n\n if plain[i][j] != 0:\n continue\n\n res = True\n for k in range(-1,2,1):\n for w in range(-1,2,1):\n if (k,w) != (0,0) and in_bounds(i+k,j+w) and plain[i+k][j+w] == 0:\n res = False\n \n if res is True:\n return to_id(i,j)\n\n\nif __name__ == \"__main__\":\n np.set_printoptions(threshold=sys.maxsize)\n print(main(\"input-5.txt\"))","sub_path":"day-5/puzzle2.py","file_name":"puzzle2.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"651714552","text":"import clutter\nimport sys\nimport re\nimport random\nimport gobject\nimport datetime\nimport os\nimport urllib\nimport logging\nfrom crew.dds import baseslide\nfrom crew.dds.contrib import feedparser\n\nclass SlashdotDisplay(baseslide.BaseSlide):\n def __init__(self, feedURL):\n \"\"\" Initializes the stage and score for this slide. \"\"\"\n baseslide.BaseSlide.__init__(self)\n self.feedURL = feedURL\n self.rssfeed = None\n self.rssitems = []\n self.titleitems = []\n self.setupBackground()\n self.setupSlider()\n self.addrss(feedURL)\n\n def setupBackground(self):\n stageBackground = clutter.Texture('background.png')\n stageBackground.set_position(0, 0)\n self.group.add(stageBackground)\n\n def setupSlider(self):\n self.slider = clutter.Texture('slider.png')\n self.slider.set_position(0,0)\n self.slider.set_opacity(255)\n self.group.add(self.slider)\n\n def event_beforeshow(self):\n self.refresh()\n\n def event_loop(self):\n top_story_id = random.randint(0, len(self.item_positions)-1)\n top_entry = self.rssfeed.entries[top_story_id]\n top_story_y = self.item_positions[top_story_id]\n timeline = clutter.Timeline(500)\n alpha = clutter.Alpha(timeline, clutter.LINEAR)\n path = clutter.Path()\n path.add_move_to(945, int(self.slider.get_y()))\n path.add_line_to(945, int(top_story_y-12))\n self.move_slider_behavior = clutter.BehaviourPath(alpha, path)\n self.move_slider_behavior.apply(self.slider)\n timeline.connect('completed', lambda x:\n self.update_top_story(top_entry))\n timeline.start()\n\n def update_top_story(self, top_entry):\n for x in self.titleitems:\n self.group.remove(x)\n self.addTopStory(self.RemoveHTMLTags(top_entry.title),\n self.RemoveHTMLTags(top_entry.summary))\n\n def refresh(self):\n for x in self.rssitems:\n self.group.remove(x)\n for x in self.titleitems:\n self.group.remove(x)\n self.addrss(self.feedURL)\n\n def addTopStoryTitle(self, topstorytitle):\n title = clutter.Text()\n title.set_font_name(\"sans serif 24\")\n title.set_text(topstorytitle)\n title.set_markup('%s' % topstorytitle)\n title.set_line_wrap(True)\n title.set_line_wrap_mode(2)\n title.set_width(850)\n title.set_color(clutter.color_from_string(\"black\"))\n title.set_position(50, 200)\n self.titleitems.append(title)\n self.group.add(title)\n return title\n\n def addTopStoryText(self, topstorytext, toptitle):\n content = clutter.Text()\n content.set_text(topstorytext)\n content.set_font_name(\"serif 21\")\n content.set_line_wrap(True)\n content.set_justify(True)\n content.set_line_wrap_mode(2)\n content.set_color(clutter.color_from_string(\"black\"))\n content.set_position(50, 210 + toptitle.get_height())\n content.set_height(1080-260-toptitle.get_height())\n content.set_width(850)\n content.set_ellipsize(3) #Omit characters at the end of the text\n self.group.add(content)\n self.titleitems.append(content)\n\n def addTopStory(self, title, body):\n self.titleitems = []\n self.addTopStoryText(body.replace(\"\\n\", \" \").replace(\n \"Read more of this story at Slashdot.\", \"\"),\n self.addTopStoryTitle(title))\n\n def feedpath(self):\n return os.path.join(os.path.dirname(__file__), 'slashdot.rss')\n\n def oldfeed(self):\n if not os.path.exists(self.feedpath()):\n return True\n now = datetime.datetime.now()\n stats = os.stat(self.feedpath())\n lmdate = datetime.datetime.fromtimestamp(stats[8])\n delta = datetime.timedelta(hours=1)\n if not lmdate > (now-delta):\n return True\n\n def download_fetch_feed(self, feedURL):\n oldfeed = self.oldfeed()\n if oldfeed:\n logging.debug('Fetching feed URL: %s' % feedURL)\n urllib.urlretrieve(feedURL, self.feedpath())\n if self.rssfeed is None or oldfeed:\n self.rssfeed = feedparser.parse(open(self.feedpath()))\n\n def addrss(self, feedURL):\n \"\"\" Adds the RSS feed information to this slide. \"\"\"\n #TODO: ERROR CHECKING: MAKE SURE WE DON'T EXPLODE WITH A BAD FEED\n self.download_fetch_feed(feedURL)\n self.rssitems = []\n\n y = 200\n self.item_positions = []\n for entry in self.rssfeed.entries:\n dy, added = self.add_entry_group(entry, y)\n if added:\n self.item_positions.append(y)\n y += dy + 20\n else:\n break\n\n top_story_id = random.randint(0, len(self.item_positions)-1)\n top_entry = self.rssfeed.entries[top_story_id]\n top_story_y = self.item_positions[top_story_id]\n self.slider.set_position(945, top_story_y-12)\n self.addTopStory(self.RemoveHTMLTags(top_entry.title),\n self.RemoveHTMLTags(top_entry.summary))\n\n for x in self.rssitems:\n self.group.add(x)\n\n\n def add_entry_group(self, entry, starty):\n topstorytitle = self.RemoveHTMLTags(entry.title)\n title = clutter.Text()\n title.set_font_name(\"sans serif 18\")\n title.set_text(topstorytitle)\n title.set_width(870)\n title.set_ellipsize(3)\n title.set_color(clutter.color_from_string(\"black\"))\n title.set_position(1000, starty)\n #title.set_line_wrap()\n #title.set_line_wrap_mode(2)\n if (title.get_height() + starty + 50) < 1080:\n self.rssitems.append(title)\n return (title.get_height(), True)\n return (title.get_height(), False)\n\n# Put the ClutterGroup containing all the slide information\n# in the top level, so that DDS can get at it.\napp = SlashdotDisplay(\"http://rss.slashdot.org/Slashdot/slashdot\")\nslide = app.group\n\nif __name__ == '__main__':\n app.do_standalone_display()\n\n","sub_path":"slashdot/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":5595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"274582676","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 31 21:30:16 2017\n\n@author: owen\n\"\"\"\n\n#Given a non-empty array containing only positive integers, find if the array can be partitioned into two subsets such that the sum of elements in both subsets is equal.\n#\n#Note:\n#\n# Each of the array element will not exceed 100.\n# The array size will not exceed 200.\n\n# 是subset,不需要连续(即一刀切成两个和相等的数组)\n\n# DP, 0-1 backpack\n# DFS, extend to Partition to K Equal Sum Subsets\n# bitwise\n \nclass Solution(object):\n def canPartition(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n # 2D-DP, like 0-1 backpack, TLE\n sums = sum(nums)\n if sums & 1:\n return False\n \n n = len(nums)\n target = sums // 2\n dp = [[False] * (target + 1) for __ in range(n + 1)] # dp[i][j]: if it can find a subset with a sum of j from the first i numbers \n for i in range(n + 1):\n dp[i][0] = True\n \n for i in range(1, n + 1):\n for j in range(1, target + 1):\n dp[i][j] = dp[i - 1][j]\n if j >= nums[i - 1]:\n dp[i][j] |= dp[i - 1][j - nums[i - 1]]\n \n return dp[n][target]\n \n \nclass Solution(object):\n def canPartition(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n # 1D-DP, like 0-1 backpack\n sums = sum(nums)\n if sums & 1:\n return False\n \n n = len(nums)\n target = sums // 2\n if max(nums) > target:\n return False\n \n dp = [False] * (target + 1) # dp[i][j]: if it can find a subset with a sum of j from the first i numbers \n dp[0] = True\n for i in range(n):\n for j in range(target, nums[i] - 1, -1):\n dp[j] |= dp[j - nums[i]]\n \n return dp[target]\n \n \n#class Solution(object):\n# def canPartition(self, nums):\n# \"\"\"\n# :type nums: List[int]\n# :rtype: bool\n# \"\"\"\n# # DFS, TLE, must pruning\n# sums = sum(nums)\n# if sums & 1:\n# return False\n# \n# n = len(nums)\n# target = sums // 2\n# return self.dfs(nums, 0, n, target)\n# \n# def dfs(self, nums, start, length, target):\n# if target <= 0:\n# return target == 0\n# \n# for i in range(start, length):\n# if self.dfs(nums, i + 1, length, target - nums[i]):\n# return True\n# \n# return False\n \n\nclass Solution(object):\n def canPartition(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n # DFS + pruning\n sums = sum(nums)\n if sums & 1:\n return False\n \n n = len(nums)\n target = sums // 2\n nums.sort(reverse = True) # for pruning, When the answer is true and involves subsets with a low size, this method of placing elements will consider these lower size subsets sooner.\n return self.dfs(nums, 0, n, target)\n \n def dfs(self, nums, start, length, target):\n if start == length:\n return target == 0\n \n if target == 0:\n return True\n \n for i in range(start, length):\n if target - nums[i] < 0: # for pruning\n break\n \n if self.dfs(nums, i + 1, length, target - nums[i]):\n return True\n \n return False\n \n \n#class Solution(object):\n# def canPartition(self, nums):\n# \"\"\"\n# :type nums: List[int]\n# :rtype: bool\n# \"\"\"\n# total=sum(nums)\n# if total&1==1:\n# return False\n# \n# max_val=max(nums)\n# target=total//2\n# if max_val>target:\n# return False\n# \n# dp=[False]*(total+1) # dp[i]: if the subarray whose sum is i exist\n# dp[0]=True\n# for num in nums:\n# for i in range(total-num,-1,-1): # Notice the seq\n# if dp[i]:\n# dp[i+num]=True\n# \n# return dp[target]\n \n# http://bookshadow.com/weblog/2016/10/09/leetcode-partition-equal-subset-sum/\n#class Solution(object):\n# def canPartition(self, nums):\n# \"\"\"\n# :type nums: List[int]\n# :rtype: bool\n# \"\"\"\n# total = sum(nums)\n# if total & 1:\n# return False\n# nset = set([0])\n# for n in nums:\n# for m in nset.copy():\n# nset.add(m + n)\n# return total // 2 in nset\n \n \nif __name__==\"__main__\":\n print(Solution().canPartition([1, 5, 11, 5]))\n print(Solution().canPartition([1, 2, 5]))\n print(Solution().canPartition([2, 2, 3, 5]))","sub_path":"416. Partition Equal Subset Sum.py","file_name":"416. Partition Equal Subset Sum.py","file_ext":"py","file_size_in_byte":4909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"378642766","text":"import math\nimport os\nimport random\nimport string\n\nimport numpy as np\nimport torch\nfrom GPy.util.linalg import dpotri, mdot\nfrom numpy.core.umath_tests import inner1d\nfrom numpy.ma import trace\nfrom scipy import linalg\nfrom scipy.linalg import det, inv, lapack\n\nPRECISION = np.float64\nTORCH_PRECISION = torch.double\ndefault_tensor_type = torch.cuda.DoubleTensor\n\n\nclass PosDefMatrix(object):\n def __init__(self, num_latent, num_inducing):\n self.is_outdated = True\n self.matrix = np.empty([num_latent, num_inducing, num_inducing], dtype=PRECISION)\n self.inverse = np.empty([num_latent, num_inducing, num_inducing], dtype=PRECISION)\n self.cholesky = np.empty([num_latent, num_inducing, num_inducing], dtype=PRECISION)\n self.log_determinant = np.empty([num_latent], dtype=PRECISION)\n\n def update(self, kernels, inducing_locations):\n if not self.is_outdated:\n return\n\n for i in range(len(kernels)):\n self.matrix[i] = kernels[i].kernel(inducing_locations[i])\n self.cholesky[i] = jitchol(self.matrix[i])\n self.inverse[i] = inv_chol(self.cholesky[i])\n self.log_determinant[i] = pddet(self.cholesky[i])\n\n self.is_outdated = False\n\n def set_outdated(self):\n self.is_outdated = True\n\ndef torchify(func):\n def wrapper(*args):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n if device.type == 'cuda':\n torch.set_default_tensor_type(default_tensor_type)\n new_args = [torch.from_numpy(arg).to(device) if isinstance(arg, np.ndarray) else arg for arg in args]\n result = func(*new_args)\n if isinstance(result, tuple):\n return tuple(res.cpu().numpy() for res in result)\n return result.cpu().numpy()\n return wrapper\n\ndef weighted_average(weights, points, num_samples):\n \"\"\"\n calculates (condll * X).mean(axis=1) using variance reduction method.\n\n number of control variables = number of samples / 10\n\n Parameters\n ----------\n condll : ndarray\n dimensions: s * N\n X : ndarray\n dimensions: s * N\n\n Returns\n -------\n :returns: a matrix of dimension N\n \"\"\"\n points = points.T\n weights = weights.T\n cvsamples = num_samples // 10\n pz = points[:, 0:cvsamples]\n py = np.multiply(weights[:, 0:cvsamples], pz)\n above = np.multiply((py.T - py.mean(1)), pz.T).sum(axis=0) / (cvsamples - 1)\n below = np.square(pz).sum(axis=1) / (cvsamples - 1)\n cvopt = np.divide(above, below)\n cvopt = np.nan_to_num(cvopt)\n grads = np.multiply(weights, points) - np.multiply(cvopt, points.T).T\n\n return grads.mean(axis=1)\n\ndef mdiag_dot(A, B):\n \"\"\"\n Given input matrices ``A`` and ``B``, this function returns the diagonal terms of the matrix product of A and B\n\n Returns\n -------\n output : ndarray\n diag(AB)\n \"\"\"\n return np.einsum('ij,ji -> i', A, B)\n\n\ndef KL_normal(m1, sigma1, m2, sigma2):\n \"\"\"\n Calculates the KL divergence between two normal distributions specified by\n N(``mu1``, ``sigma1``), N(``mu2``, ``sigma2``)\n \"\"\"\n\n return 1 / 2 * (math.log(det(sigma2) / det(sigma1)) - len(m1) + trace(mdot(inv(sigma2), sigma1)) + \\\n mdot((m2 - m1).T, inv(sigma2) , m2- m1))\n\n\ndef cross_ent_normal(m1, sigma1, m2, sigma2):\n \"\"\"\n Calculates the cross entropy between two normal distributions specified by\n N(``mu1``, ``sigma1``), N(``mu2``, ``sigma2``)\n \"\"\"\n\n return -KL_normal(m1, sigma1, m2, sigma2) - 1 / 2 * math.log(det(2 * math.pi * math.e * sigma1))\n\n\ndef jitchol(A, maxtries=5):\n \"\"\"\n Calculates the Cholesky decomposition of ``A``. In the case that it is not possible to calculate the Cholesky,\n a jitter will be added to ``A``.\n\n Note\n ----\n This method is adopted from the GPy package\n \"\"\"\n\n A = np.ascontiguousarray(A)\n L, info = lapack.dpotrf(A, lower=1)\n if info == 0:\n return L\n else:\n diagA = np.diag(A)\n if np.any(diagA <= 0.):\n raise JitChol(\"not pd: non-positive diagonal elements\")\n jitter = diagA.mean() * 1e-6\n while maxtries > 0 and np.isfinite(jitter):\n try:\n L = linalg.cholesky(A + np.eye(A.shape[0]) * jitter, lower=True)\n return L\n except:\n jitter *= 10\n finally:\n maxtries -= 1\n raise JitChol(\"not positive definite, even with jitter.\")\n\n\ndef pddet(L):\n \"\"\"\n Determinant of a positive definite matrix, only symmetric matricies though\n\n Note\n ----\n This method is adopted from the GPy package\n \"\"\"\n\n logdetA = 2*sum(np.log(np.diag(L)))\n return logdetA\n\n\ndef inv_chol(L):\n \"\"\"\n Given that ``L`` is the Cholesky decomposition of A, this method returns A^-1\n\n Note\n ----\n This method is adopted from the GPy package\n \"\"\"\n\n Ai, _ = dpotri(np.asfortranarray(L), lower=1)\n return Ai\n\n\ndef log_diag_gaussian(m1, m2, s_log):\n \"\"\"\n Returns PDF of a normal distribution as follows:\n\n N(m1| m2, exp(s_log)), where the covariance matrix is diagonal\n \"\"\"\n const = -1 / 2 * s_log.sum() - float(len(s_log)) / 2 * np.log(2 * math.pi)\n return const + -1 / 2 * np.dot((m1 - m2) / np.exp(s_log), (m1-m2).T)\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\ndef check_dir_exists(dir_name):\n \"\"\"\n Checks if folder ``dir_name`` exists, and if it does not exist, it will be created.\n \"\"\"\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\ndef id_generator(size=4, chars=string.ascii_uppercase + string.digits):\n \"\"\"generates a random sequence of character of length ``size``\"\"\"\n\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef tr_AB(A, B):\n \"\"\" Given two matrices ``A`` and ``B``, this function return trace (AB) \"\"\"\n return np.sum(inner1d(A, B.T))\n\n\ndef get_git():\n \"\"\"\n If the current directory is a git repository, this function extracts the hash code, and current branch\n\n Returns\n -------\n hash : string\n hash code of current commit\n\n branch : string\n current branch\n \"\"\"\n try:\n from subprocess import Popen, PIPE\n\n gitproc = Popen(['git', 'show-ref'], stdout = PIPE)\n (stdout, stderr) = gitproc.communicate()\n\n gitproc = Popen(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stdout = PIPE)\n (branch, stderr) = gitproc.communicate()\n branch = branch.split('\\n')[0]\n for row in stdout.split('\\n'):\n if row.find(branch) != -1:\n hash = row.split()[0]\n break\n except:\n hash = None\n branch = None\n return hash, branch\n\n\ndef drange(start, stop, step):\n \"\"\"\n Generates an array of floats starting from ``start`` ending with ``stop`` with step ``step``\n \"\"\"\n r = start\n while r < stop:\n yield r\n r += step\n\nclass JitChol(Exception):\n def __init__(self, message):\n super().__init__()\n self.message = message\n\n\nif __name__ == \"__main__\":\n import torch\n\n @torchify\n def tt(a, b):\n return a.mm(b)\n\n print(tt(np.array([[1., 2.]]), np.array([[3., 4.]]).T))\n","sub_path":"savigp/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"280534979","text":"import torch\nfrom torch import nn\nfrom model.resnet50 import ResNet50, Bottleneck, ResNet101, ResNet18\nfrom model.resnet_ibn_a import resnet50_ibn_a\n\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('Conv') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('BatchNorm') != -1:\n if m.affine:\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)\n\n\ndef weights_init_classifier(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.normal_(m.weight, std=0.001)\n if m.bias:\n nn.init.constant_(m.bias, 0.0)\n\n\nclass ClassificationNet(nn.Module):\n in_planes = 2048\n\n def __init__(self, num_classes, last_stride, model_path,\n model_name, pretrained_choice):\n super(ClassificationNet, self).__init__()\n if model_name == 'resnet50':\n self.backbone = ResNet50(last_stride=last_stride, block=Bottleneck)\n if model_name == 'resnet101':\n self.backbone = ResNet101(last_stride=last_stride, block=Bottleneck)\n if model_name == 'resnet18':\n self.backbone = ResNet18(last_stride=last_stride, block=Bottleneck) \n\n elif model_name == 'resnet50_ibn_a':\n self.backbone = resnet50_ibn_a(last_stride)\n\n if pretrained_choice == 'imagenet':\n self.backbone.load_param(model_path)\n print('Loading pretrained ImageNet model......')\n\n self.gap = nn.AdaptiveAvgPool2d(1)\n self.num_classes = num_classes\n\n self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)\n self.classifier.apply(weights_init_classifier)\n\n # classifier tree method\n\n # self.classifier_list = nn.Sequential()\n # for i in range(0,self.num_classes-1):\n # classifier = nn.Linear(self.in_planes, 1, bias=False)\n # classifier.apply(weights_init_classifier)\n # self.classifier_list.add_module('classifier'+str(i),classifier)\n # self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n feat = self.gap(self.backbone(x)) # (b, 2048, 1, 1)\n feat = feat.view(feat.shape[0], -1) # flatten to (bs, 2048)\n\n if self.training:\n cls_score = self.classifier(feat)\n\n # classifier tree method\n\n # score = []\n # out_score = []\n # sum_value = 0\n # for i in range(0,self.num_classes-1):\n # cls_score = self.classifier_list[i](feat)\n # # print(cls_score.device)\n # score.append(self.sigmoid(cls_score))\n # for i in range(0,self.num_classes):\n # neg_rate = 1\n # for j in range(0,i):\n # neg_rate *= (1-score[j])\n # if ib\n for i in range(0, len(firstlist)-1, 2):\n try:\n if firstlist[i] < firstlist[i+1]:\n firstlist[i+1] = 0\n solution1 += 1\n if firstlist[i+1] > firstlist[i+2]:\n firstlist[i+1] = 0\n solution1 += 1\n except IndexError:\n solution1 -=1\n print(\"sol1\", solution1)\n\n solution2 = 0\n # A< b\n for i in range(0, len(A)-1, 2):\n try:\n if A[i] > A[i+1]:\n print(\"sol2 if1\", i, A[i], A[i+1])\n A[i+1] = 0\n solution2 += 1\n if A[i+1] < A[i+2]:\n print(\"sol2 if2\", i+1, A[1+1], A[i+2])\n print(A)\n A[i+2] = 0\n solution2 += 1\n except IndexError:\n continue\n print(\"sol2\", solution2)\n return min(solution1, solution2)\n\nprint(solution(test))\n\n","sub_path":"obss.py","file_name":"obss.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"185540189","text":"# -*- coding: utf-8 -*-\nimport sys\nimport re\n\ndef file_content(filename):\n d_file = open(filename, \"r\",encoding=\"utf-8\")\n content=d_file.read()\n return content\n\nfrom bs4 import BeautifulSoup\n\n\ndef rewrite_table(cont):\n cont = cont.replace(\"\\n\", \"\\t\")\n soup = BeautifulSoup(cont, 'lxml')\n #取到所有的正文\n datas = soup.find_all(\"table\")\n\n for data in datas:\n #print(repr(data))\n d=rewrite(data)\n if d:\n data.string = d\n try:\n data.unwrap()\n except:\n print(\"there have a error rewrite_table--->data.unwrap()\")\n return repr(soup)\n\ndef rewrite(tag):\n result=\"\"\n trs = tag.find_all(\"tr\")\n val_dict = {}\n val_list = []\n #val_list存的是[[内容1],[内容2],......]\n for tr in trs:\n ths = tr.find_all(\"th\")\n if ths:\n lih = []\n for i, val in enumerate(ths):\n lih.append(val.get_text().strip())\n if i in val_dict:\n val_dict[i] = val_dict.get(i) + \"\\t\" + val.get_text().strip()\n else:\n val_dict[i] = val.get_text().strip()\n val_list.append(lih)\n\n tds = tr.find_all(\"td\")\n if tds:\n li = []\n for i, val in enumerate(tds):\n li.append(val.get_text().strip())\n if i in val_dict:\n val_dict[i] = val_dict.get(i) + \"\\t\" + val.get_text().strip()\n else:\n val_dict[i] = val.get_text().strip()\n val_list.append(li)\n\n\n\n\n\n table_cont = propess_table(val_list)\n #print(table_cont)\n if table_cont:\n result = result + \"\\n\" + table_cont\n return result\n\ndef IsGoodTable(val_list):\n if val_list and len(val_list[0][0])>=2:\n if \":\" in val_list[0][0]:\n return False\n num = len(val_list[0])\n for index in range(1, len(val_list)):\n if len(val_list[index]) != num:\n return False\n return True\n else:\n return False\n\n\n\ndef propess_table(val_list):\n if IsGoodTable(val_list):\n list_cout = []\n if len(val_list) == 2 and len(val_list[0]) == len(val_list[1]):\n #print(\"---------------------------------------------------------------table1\")\n for x in range(len(val_list[0])):\n list_cout.append(val_list[0][x] + \":\" + val_list[1][x])\n # print(val_list[0][x] + \":\" + val_list[1][x])\n return \"\\n\".join(list_cout)\n elif len(val_list) > 1 and len(val_list[0])>1 and val_list[0][0] == \"中标候选人\" and val_list[0][1] == \"第一候选人\":\n #print(\"---------------------------------------------------------------table2\")\n for y in range(len(val_list[0]) - 1):\n for x in range(len(val_list) - 1):\n # print( val_list[0][y+1])\n list_cout.append(val_list[0][y + 1] + val_list[x + 1][0] + \":\" + val_list[x + 1][y + 1])\n # print(val_list[0][y + 1] + val_list[x + 1][0] + \":\" + val_list[x + 1][y + 1])\n return \"\\n\".join(list_cout)\n elif len(val_list[0]) == 2:\n #print(\"---------------------------------------------------------------table3\")\n for x in range(len(val_list)):\n list_cout.append(val_list[x][0] + \":\" + val_list[x][1])\n\n return \"\\n\".join(list_cout)\n elif len(val_list) > 1 and val_list[0]:\n #print(\"---------------------------------------------------------------table4\")\n for x in range(len(val_list) - 1):\n for y in range(len(val_list[0])):\n if len(val_list[0]) == len(val_list[x + 1]):\n list_cout.append(val_list[0][y] + \":\" + val_list[x + 1][y])\n # print(val_list[0][y] + \":\" + val_list[x + 1][y])\n return \"\\n\".join(list_cout)\n else:\n\n pass\n else:\n #print(\"---------------------------------------------------------------not good table\")\n pass\n\n\nif __name__ == '__main__':\n pass","sub_path":"html_parse.py","file_name":"html_parse.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"159054507","text":"from sightengine.client import SightengineClient\n\nclient = SightengineClient('API user', 'API secret')\n\n##### feedback\n\nfeedback1 = client.feedback('nudity', 'raw', 'https://d3m9459r9kwism.cloudfront.net/img/examples/example5.jpg')\nfeedback2 = client.feedback('nudity','safe', '/path/to/local/file.jpg')\n\nprint(feedback1)\nprint(feedback2)\n\n####### check image\n\ncheckNudity = client.check('nudity')\n\noutput = checkNudity.set_file('/path/to/local/file.jpg')\noutput2 = checkNudity.set_url('https://d3m9459r9kwism.cloudfront.net/img/examples/example5.jpg')\n\n# assign binary_image\noutput3 = checkNudity.set_bytes(binary_image)\n\nprint(output)\nprint(output2)\nprint(output3)\n\n####### check video\n\ncheck = client.check('nudity', 'wad')\noutput = check.video('http://www.quirksmode.org/html5/videos/big_buck_bunny.webm', 'http://requestb.in/1nm1vw11')\n\nprint(output)","sub_path":"examples/nudity.py","file_name":"nudity.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"489236208","text":"import unittest\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nfrom keras_crf.crf import CRF\n\n\nclass ModelBuildTest(unittest.TestCase):\n\n def _build_model(self):\n sequence_input = tf.keras.layers.Input(shape=(None,), dtype=tf.int32, name='sequence_input')\n sequence_mask = tf.keras.layers.Lambda(lambda x: tf.greater(x, 0))(sequence_input)\n outputs = tf.keras.layers.Embedding(21128, 128)(sequence_input)\n outputs = tf.keras.layers.Dense(256)(outputs)\n crf = CRF(7)\n # mask is important to compute sequence length in CRF\n outputs = crf(outputs, mask=sequence_mask)\n model = tf.keras.Model(inputs=sequence_input, outputs=outputs)\n model.compile(\n loss=crf.neg_log_likelihood,\n metrics=[crf.accuracy],\n optimizer=tf.keras.optimizers.Adam(4e-5)\n )\n return model\n\n def test_build_model(self):\n model = self._build_model()\n model.summary()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/model_build_test.py","file_name":"model_build_test.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"646919937","text":"import h5py\nimport sys\nimport argparse\nimport numpy as np\nimport math\nimport tensorflow as tf\n#import tensorflow_probability as tfp\n#import matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\n\n\ndef load_data(data):\n print('load data:',data)\n d = h5py.File(data, 'r')\n hit_time = d['firstHitTimeByPMT'][:]\n n_pe = d['nPEByPMT'][:]\n theta_PMT = d['infoPMT'][:] # 0 to 180 degree\n d.close()\n ### normalize theta ##############\n theta_PMT[:] = theta_PMT[:]/180\n print(\"hit_time:\",hit_time.shape,\", n_pe:\", n_pe.shape, \",theta:\", theta_PMT.shape, \",event sizes:\", n_pe.shape[0])\n hit_time, n_pe = shuffle(hit_time, n_pe)\n return hit_time, n_pe, theta_PMT\n\ndef Normal_cost(mu, sigma, y):\n dist = tf.distributions.Normal(loc=mu, scale=sigma)\n return tf.reduce_mean(-dist.log_prob(y))\n\ndef Possion_cost(rate, y):\n #dist = tfp.distributions.Poisson(rate=rate, allow_nan_stats=False)\n #return tf.reduce_mean(-dist.log_prob(y))\n result = y*tf.math.log(rate) - tf.math.lgamma(1. + y) - rate\n return tf.reduce_mean(-result)\n\ndef mae_cost(pred_y, label_y):\n pred_y = tf.sort(pred_y , axis=0,direction='ASCENDING',name=None)\n label_y = tf.sort(label_y, axis=0,direction='ASCENDING',name=None)\n abs_diff = tf.math.abs(pred_y - label_y)\n return tf.reduce_mean(abs_diff)\n\ndef mse_cost(pred_y, label_y):\n pred_y = tf.sort(pred_y , axis=0,direction='ASCENDING',name=None)\n label_y = tf.sort(label_y, axis=0,direction='ASCENDING',name=None)\n diff = tf.math.pow((pred_y - label_y), 2)\n return tf.reduce_mean(diff)\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description='Run MDN training. '\n 'Sensible defaults come from https://github.com/taboola/mdn-tensorflow-notebook-example/blob/master/mdn.ipynb',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n parser.add_argument('--datafile', action='store', type=str,\n help='HDF5 file paths')\n parser.add_argument('--nb-epochs', action='store', type=int, default=50,\n help='Number of epochs to train for.')\n parser.add_argument('--batch-size', action='store', type=int, default=2,\n help='batch size per update')\n parser.add_argument('--ckpt_path', action='store', type=str,\n help='ckpt file paths')\n parser.add_argument('--produceEvent', action='store', type=int,\n help='produceEvent')\n parser.add_argument('--outFileName', action='store', type=str,\n help='outFileName file paths')\n parser.add_argument('--validation_file', action='store', type=str,\n help='validation_file file paths')\n\n\n\n\n\n\n parser.add_argument('--disc-lr', action='store', type=float, default=2e-5,\n help='Adam learning rate for discriminator')\n\n parser.add_argument('--gen-lr', action='store', type=float, default=2e-4,\n help='Adam learning rate for generator')\n\n parser.add_argument('--adam-beta', action='store', type=float, default=0.5,\n help='Adam beta_1 parameter')\n\n return parser\n\nif __name__ == '__main__':\n\n print('start...')\n #physical_devices = tf.config.experimental.list_physical_devices('GPU')\n #if physical_devices:\n # tf.config.experimental.set_memory_growth(physical_devices[0], True)\n #####################################\n parser = get_parser()\n parse_args = parser.parse_args()\n epochs = parse_args.nb_epochs\n batch_size = parse_args.batch_size\n datafile = parse_args.datafile\n ckpt_path = parse_args.ckpt_path\n produceEvent = parse_args.produceEvent\n outFileName = parse_args.outFileName\n validation_file = parse_args.validation_file\n #####################################\n print('constructing graph')\n tf.reset_default_graph()\n x = tf.placeholder(name='x',shape=(None,2),dtype=tf.float32)\n y = tf.placeholder(name='y',shape=(None,1),dtype=tf.float32)\n layer = x\n for _ in range(3):\n layer = tf.layers.dense(inputs=layer, units=12, activation=tf.nn.tanh)\n #Pred_y = tf.layers.dense(inputs=layer, units=1, activation=lambda x: tf.nn.elu(x) + 1)\n Pred_y = tf.layers.dense(inputs=layer, units=1, activation=lambda x: tf.nn.relu(x))\n cost = mse_cost(Pred_y, y)\n learning_rate = 0.0003\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)\n # Add ops to save and restore all the variables.\n saver = tf.train.Saver()\n ########################################\n print('preparing data')\n f_DataSet = open(datafile, 'r')\n Data = []\n Event = []\n Batch = []\n for line in f_DataSet: \n idata = line.strip('\\n')\n idata = idata.strip(' ')\n if \"#\" in idata: continue ##skip the commented one\n Data.append(idata)\n print(idata)\n d = h5py.File(str(idata), 'r')\n ievent = d['infoMC'].shape[0]\n d.close()\n Event.append(float(ievent))\n Batch.append(int(float(ievent)/batch_size))\n total_event = sum(Event)\n f_DataSet.close() \n print('total sample:', total_event)\n print('All Batch:', Batch)\n ########################################\n print('commencing training')\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(epochs):\n total_cost = 0\n for ib in range(len(Batch)):\n hit_time, n_pe, theta = load_data(Data[ib])\n theta = theta.repeat(batch_size,axis=0)\n ibatch = Batch[ib]\n print('ib {0}, ibatch {1}'.format(ib, ibatch))\n for index in range(ibatch):\n hit_time_batch = hit_time [index * batch_size:(index + 1) * batch_size]\n n_pe_batch = n_pe [index * batch_size:(index + 1) * batch_size]\n for itheta in range(theta.shape[1]):\n train_x = theta [:,itheta:itheta+1]\n noise = np.random.uniform(-1, 1, (train_x.shape[0], 1))\n input_x = np.concatenate ((train_x, noise), axis=-1)\n train_y = n_pe_batch[:,itheta:itheta+1]\n if np.any(np.isnan(train_x)): print('find Nan in train_x')\n if np.any(np.isnan(train_y)): print('find Nan in train_y')\n _, c = sess.run([optimizer,cost], feed_dict={x:input_x, y:train_y})\n total_cost += c\n avg_cost = total_cost/(sum(Batch)*theta.shape[1])\n if epoch % 1 == 0:\n print('Epoch {0} | cost = {1:.4f}'.format(epoch,avg_cost))\n ### validation ############################\n print('Do validation')\n hit_time, n_pe, theta = load_data(validation_file)\n theta = theta.repeat(n_pe.shape[0],axis=0)\n cost_valid = 0 \n for itheta in range(theta.shape[1]):\n valid_x = theta [:,itheta:itheta+1]\n noise = np.random.uniform(-1, 1, (valid_x.shape[0], 1))\n input_x = np.concatenate ((valid_x, noise), axis=-1)\n rate_pred, c_pred = sess.run([Pred_y,cost],feed_dict={x:input_x, y:n_pe[:,itheta:itheta+1]})\n #print('valid cost = {0:.4f}'.format(c_pred))\n cost_valid = cost_valid + c_pred/theta.shape[1]\n print('ave valid cost = {0:.4f}'.format(cost_valid))\n \n #### produce predicted data #################\n print('Saving produced data')\n theta_list = list(set(theta[0,:]))\n theta_list.sort() \n print('theta_list=', len(theta_list))\n pred_n_pe = np.full((produceEvent, len(theta_list) ), 0 ,dtype=np.float32)#init\n for i in theta_list:\n ithe = np.full((produceEvent, 1 ), i ,dtype=np.float32)\n noise = np.random.uniform(-1, 1, (ithe.shape[0], 1))\n input_x = np.concatenate ((ithe, noise), axis=-1)\n y_pred = sess.run(Pred_y,feed_dict={x:input_x})\n y_pred = y_pred.reshape((-1,1))\n pred_n_pe[:,theta_list.index(i):theta_list.index(i)+1] = y_pred\n hf = h5py.File(outFileName, 'w')\n hf.create_dataset('pred_n_pe', data=pred_n_pe)\n hf.create_dataset('theta_set', data=np.array(theta_list))\n hf.close()\n print('Saved produced data %s'%outFileName)\n ############## Save the variables to disk.\n if False:\n save_path = saver.save(sess, \"%s/model.ckpt\"%(ckpt_path))\n print(\"Model saved in path: %s\" % save_path)\n print('done')\n","sub_path":"FastSim/JUNO/LearnPDF/learn_pdf.py","file_name":"learn_pdf.py","file_ext":"py","file_size_in_byte":8595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"293966613","text":"import vlc\nimport time\nimport pathlib\nimport platform\nimport os\n\n# Tkinter\nimport tkinter as Tk\nfrom tkinter import ttk\n\n# Threading\nfrom threading import Thread, Event\n\nclass ttkTimer(Thread):\n\t\"\"\"a class serving same function as wxTimer... but there may be better ways to do this\n\t\"\"\"\n\tdef __init__(self, callback, tick):\n\t\tThread.__init__(self)\n\t\tself.callback = callback\n\t\tself.stopFlag = Event()\n\t\tself.tick = tick\n\t\tself.iters = 0\n\n\tdef run(self):\n\t\twhile not self.stopFlag.wait(self.tick):\n\t\t\tself.iters += 1\n\t\t\tself.callback()\n\n\tdef stop(self):\n\t\tself.stopFlag.set()\n\n\tdef get(self):\n\t\treturn self.iters\n\ndef _quit():\n\tprint(\"_quit: bye\")\n\troot = Tk_get_root()\n\troot.quit() # stops mainloop\n\troot.destroy() # this is necessary on Windows to prevent\n\t\t\t\t\t# Fatal Python Error: PyEval_RestoreThread: NULL tstate\n\tos._exit(1)\n\nclass Player(Tk.Frame):\n\t\"\"\"The main window has to deal with events.\n\t\"\"\"\n\tdef __init__(self, parent, config, title=\"Player\"):\n\t\tTk.Frame.__init__(self, parent)\n\n\t\tself.parent = parent\n\n\t\tif title == None:\n\t\t\ttitle = \"tk_vlc\"\n\t\tself.parent.title(title)\n\n\t\t# The second panel holds controls\n\t\tself.player = None\n\t\t# self.videopanel = ttk.Frame(self.parent)\n\t\t# self.canvas = Tk.Canvas(self.videopanel).pack(fill=Tk.BOTH,expand=1)\n\t\t# self.videopanel.pack(fill=Tk.BOTH,expand=1)\n\n\t\tctrlpanel = ttk.Frame(self.parent)\n\t\tpause = ttk.Button(ctrlpanel, text=\"Pause\", command=self.OnPause)\n\t\tplay = ttk.Button(ctrlpanel, text=\"Play\", command=self.OnPlay)\n\t\tstop = ttk.Button(ctrlpanel, text=\"Stop\", command=self.OnStop)\n\t\tvolume = ttk.Button(ctrlpanel, text=\"Volume\", command=self.OnSetVolume)\n\t\tpause.pack(side=Tk.LEFT)\n\t\tplay.pack(side=Tk.LEFT)\n\t\tstop.pack(side=Tk.LEFT)\n\t\tvolume.pack(side=Tk.LEFT)\n\t\tself.volume_var = Tk.IntVar()\n\t\tself.volslider = Tk.Scale(ctrlpanel, variable=self.volume_var, command=self.volume_sel,\n\t\t\t\tfrom_=0, to=100, orient=Tk.HORIZONTAL, length=100)\n\t\tself.volslider.pack(side=Tk.LEFT)\n\t\tctrlpanel.pack(side=Tk.BOTTOM)\n\n\t\tctrlpanel2 = ttk.Frame(self.parent)\n\t\tself.scale_var = Tk.DoubleVar()\n\t\tself.timeslider_last_val = \"\"\n\t\tself.timeslider = Tk.Scale(ctrlpanel2, variable=self.scale_var, command=self.scale_sel,\n\t\t\t\tfrom_=0, to=1000, orient=Tk.HORIZONTAL, length=500)\n\t\tself.timeslider.pack(side=Tk.BOTTOM, fill=Tk.X,expand=1)\n\t\tself.timeslider_last_update = time.time()\n\t\tctrlpanel2.pack(side=Tk.BOTTOM,fill=Tk.X)\n\t\tself.playedstr = Tk.StringVar()\n\t\tself.playedstr.set(\"\")\n\t\tself.played = Tk.Label(ctrlpanel2, textvariable=self.playedstr)\n\t\tself.played.pack()\n\n\n\t\t# VLC player controls\n\t\tself.Instance = vlc.Instance()\n\t\tself.player = self.Instance.media_player_new()\n\n\t\tself.player.audio_set_volume(config['default_volume'])\n\t\tself.volslider.set(config['default_volume'])\n\n\n\t\t# below is a test, now use the File->Open file menu\n\t\t#media = self.Instance.media_new('output.mp4')\n\t\t#self.player.set_media(media)\n\t\t#self.player.play() # hit the player button\n\t\t#self.player.video_set_deinterlace(str_to_bytes('yadif'))\n\n\t\tself.timer = ttkTimer(self.OnTimer, 1.0)\n\t\tself.timer.start()\n\t\tself.parent.update()\n\n\t\t#self.player.set_hwnd(self.GetHandle()) # for windows, OnOpen does does this\n\n\tdef addMedia(self, path):\n\t\tself.medianame = os.path.basename(path)\n\t\tself.media = self.Instance.media_new(path)\n\t\tself.player.set_media(self.media)\n\t\tself.OnPlay()\n\t\tself.volslider.set(self.player.audio_get_volume())\n\n\tdef OnPlay(self):\n\t\t\"\"\"Toggle the status to Play/Pause.\n\t\tIf no file is loaded, open the dialog window.\n\t\t\"\"\"\n\t\t# check if there is a file to play, otherwise open a\n\t\t# Tk.FileDialog to select a file\n\t\tif not self.player.get_media():\n\t\t\tpass\n\t\telse:\n\t\t\t# Try to launch the media, if this fails display an error message\n\t\t\tif self.player.play() == -1:\n\t\t\t\tself.errorDialog(\"Unable to play.\")\n\t\t\telse:\n\t\t\t\tself.playedstr.set(self.medianame)\n\n\t# def GetHandle(self):\n\t# return self.videopanel.winfo_id()\n\n\t#def OnPause(self, evt):\n\tdef OnPause(self):\n\t\t\"\"\"Pause the player.\n\t\t\"\"\"\n\t\tself.player.pause()\n\n\tdef OnStop(self):\n\t\t\"\"\"Stop the player.\n\t\t\"\"\"\n\t\tself.player.stop()\n\t\t# reset the time slider\n\t\tself.timeslider.set(0)\n\n\tdef OnTimer(self):\n\t\t\"\"\"Update the time slider according to the current movie time.\n\t\t\"\"\"\n\t\tif self.player == None:\n\t\t\treturn\n\t\t# since the self.player.get_length can change while playing,\n\t\t# re-set the timeslider to the correct range.\n\t\tlength = self.player.get_length()\n\t\tdbl = length * 0.001\n\t\tself.timeslider.config(to=dbl)\n\n\t\t# update the time on the slider\n\t\ttyme = self.player.get_time()\n\t\tif tyme == -1:\n\t\t\ttyme = 0\n\t\tdbl = tyme * 0.001\n\t\tself.timeslider_last_val = (\"%.0f\" % dbl) + \".0\"\n\t\t# don't want to programatically change slider while user is messing with it.\n\t\t# wait 2 seconds after user lets go of slider\n\t\tif time.time() > (self.timeslider_last_update + 2.0):\n\t\t\tself.timeslider.set(dbl)\n\n\tdef scale_sel(self, evt):\n\t\tif self.player == None:\n\t\t\treturn\n\t\tnval = self.scale_var.get()\n\t\tsval = str(nval)\n\t\tif self.timeslider_last_val != sval:\n\t\t\t# this is a hack. The timer updates the time slider.\n\t\t\t# This change causes this rtn (the 'slider has changed' rtn) to be invoked.\n\t\t\t# I can't tell the difference between when the user has manually moved the slider and when\n\t\t\t# the timer changed the slider. But when the user moves the slider tkinter only notifies\n\t\t\t# this rtn about once per second and when the slider has quit moving.\n\t\t\t# Also, the tkinter notification value has no fractional seconds.\n\t\t\t# The timer update rtn saves off the last update value (rounded to integer seconds) in timeslider_last_val\n\t\t\t# if the notification time (sval) is the same as the last saved time timeslider_last_val then\n\t\t\t# we know that this notification is due to the timer changing the slider.\n\t\t\t# otherwise the notification is due to the user changing the slider.\n\t\t\t# if the user is changing the slider then I have the timer routine wait for at least\n\t\t\t# 2 seconds before it starts updating the slider again (so the timer doesn't start fighting with the\n\t\t\t# user)\n\t\t\tself.timeslider_last_update = time.time()\n\t\t\tmval = \"%.0f\" % (nval * 1000)\n\t\t\tself.player.set_time(int(mval)) # expects milliseconds\n\n\n\tdef volume_sel(self, evt):\n\t\tif self.player == None:\n\t\t\treturn\n\t\tvolume = self.volume_var.get()\n\t\tif volume > 100:\n\t\t\tvolume = 100\n\t\tif self.player.audio_set_volume(volume) == -1:\n\t\t\tself.errorDialog(\"Failed to set volume\")\n\n\n\n\tdef OnToggleVolume(self, evt):\n\t\t\"\"\"Mute/Unmute according to the audio button.\n\t\t\"\"\"\n\t\tis_mute = self.player.audio_get_mute()\n\n\t\tself.player.audio_set_mute(not is_mute)\n\t\t# update the volume slider;\n\t\t# since vlc volume range is in [0, 200],\n\t\t# and our volume slider has range [0, 100], just divide by 2.\n\t\tself.volume_var.set(self.player.audio_get_volume())\n\n\tdef OnSetVolume(self):\n\t\t\"\"\"Set the volume according to the volume sider.\n\t\t\"\"\"\n\t\tvolume = self.volume_var.get()\n\t\t# vlc.MediaPlayer.audio_set_volume returns 0 if success, -1 otherwise\n\t\tif volume > 100:\n\t\t\tvolume = 100\n\t\tif self.player.audio_set_volume(volume) == -1:\n\t\t\tself.errorDialog(\"Failed to set volume\")\n\n\tdef errorDialog(self, errormessage):\n\t\t\"\"\"Display a simple error dialog.\n\t\t\"\"\"\n\t\tTk.tkMessageBox.showerror(self, 'Error', errormessage)\n","sub_path":"MultiPlay/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":7159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"421121182","text":"import random\nfrom generator.renderscreen import RenderScreen as RenderScreen\nfrom generator.jsonhelper import JsonHelper as JsonHelper\nfrom generator.screendefinition import ScreenDefinition\n\nclass RenderScreens(): \n def __init__(self, directory, numberOfScreens, numberOfItemsOnScreenMax,\n numberOfItemsOnScreenMin, dpi, widthInches, heightInches, testScreen, randomScreen,\n jitterlow, jitterhigh, incrementer, cleanupcycles, render):\n #print(\"__init__\")\n self.directory = directory\n self.numberOfScreens = numberOfScreens\n self.numberOfItemsOnScreenMax = numberOfItemsOnScreenMax\n self.numberOfItemsOnScreenMin = numberOfItemsOnScreenMin\n self.dpi = dpi\n self.widthInches = widthInches\n self.heightInches = heightInches\n self.testScreen = testScreen\n self.randomScreen = randomScreen\n self.jitterlow = jitterlow\n self.jitterhigh = jitterhigh\n self.incrementer = incrementer\n self.cleanupcycles = cleanupcycles\n self.render = render\n \n def renderScreens(self):\n print(\"renderScreens\")\n for scrnum in range(self.numberOfScreens): \n numberOfItems = random.randint(self.numberOfItemsOnScreenMin,\n self.numberOfItemsOnScreenMax)\n filename = str(scrnum)\n widthInPixels = self.widthInches * self.dpi\n heightInPixels = self.heightInches * self.dpi\n figurewidth = self.widthInches\n figureheight = self.heightInches\n screenDefinition = ScreenDefinition(widthInPixels,\n heightInPixels,\n self.jitterlow,\n self.jitterhigh,\n self.incrementer,\n self.cleanupcycles)\n rs = RenderScreen(self.directory, filename, numberOfItems, self.dpi, self.widthInches,\n self.heightInches, self.jitterlow, self.jitterhigh,\n self.incrementer, self.cleanupcycles, self.testScreen, screenDefinition) \n\n if self.testScreen:\n screenDefinition.testItems()\n if self.randomScreen:\n screenDefinition.randomGenerate(numberOfItems)\n screenDefinition.cleanUp() \n if self.render:\n rs.renderScreen()\n else:\n jh = JsonHelper()\n jh.encode(self.directory\n +filename,\n screenDefinition,\n figurewidth,\n figureheight,\n self.dpi)","sub_path":"generator/renderscreens.py","file_name":"renderscreens.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"193137137","text":"from sys import stdin\ninput = stdin.readline\n\ndef _9461(n : int):\n P = [1, 1, 1, 2, 2] + [0] * (n - 5)\n\n for i in range(5, n):\n P[i] = P[i - 1] + P[i - 5]\n\n return P[n - 1]\n\n\nif __name__ == \"__main__\":\n testcase = int(input())\n for _ in range(testcase):\n print(_9461(int(input())))\n","sub_path":"Algorithm/SANGJUN/9461.py","file_name":"9461.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"537535387","text":"\"\"\"empty message\n\nRevision ID: d1fc63ce0983\nRevises: f65d7b059f87\nCreate Date: 2018-10-10 11:10:04.291044\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd1fc63ce0983'\ndown_revision = 'f65d7b059f87'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('handshake', sa.Column('from_request', sa.String(length=255), server_default='', nullable=True))\n op.add_column('outcome', sa.Column('from_request', sa.String(length=255), server_default='', nullable=True))\n op.add_column('shaker', sa.Column('from_request', sa.String(length=255), server_default='', nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('shaker', 'from_request')\n op.drop_column('outcome', 'from_request')\n op.drop_column('handshake', 'from_request')\n # ### end Alembic commands ###\n","sub_path":"restapi/migrations/versions/d1fc63ce0983_.py","file_name":"d1fc63ce0983_.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"253857296","text":"#!/usr/bin/env python2\n\nimport random\n\nfilename = \"20k.txt\"\n\ndef main(filename):\n\twith open(filename) as words_file:\n\t\twords = list(set(word.strip() for word in words_file))\n\twhile True:\n\t\tlength = raw_input(\"Words:\")\n\t\ttry:\n\t\t\tlength = int(length)\n\t\t\tprint(\" \".join(random.choice(words) for _ in range(length)))\n\t\texcept:\n\t\t\tprint(\"Please enter a valid number.\")\n\n\nif __name__ == \"__main__\":\n\tmain(filename)\n","sub_path":"correcthorse.py","file_name":"correcthorse.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"422082592","text":"import smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\nclass mail:\n\tdef __init__(self,address, wild,location):\n\t\tself.email = address\n\t\tself.wildtype = wild\n\t\tself.loc = location\n\tdef sent(self, emsg,wildtype):\n\t\tme = \"no-reply@corrna.cs.mcgill.ca\"\n\t\tmsg['Subject'] = \"Results \" + str(self.wildtype)\n\t\tmsg[\"From\"] = me\n\t\tmsg[\"To\"] = address\n\t\thtml = \"link: \"+self.loc+\"
\"+emsg+\"\"\n\t\tmsg.attach(MIMEText(html, 'html'))\n\t\ts = smtplib.SMTP('localhost')\n\t\ts.sendemail(me, address, msg.as_string())\n\t\ts.quit\n\t\t\t\n","sub_path":"public_html/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"19332335","text":"import nmap\n\nclass NMAPUtil:\n def get_active_ips(self):\n nm = nmap.PortScanner()\n\n nm.scan(hosts='10.0.0.0/24', arguments='-n -sP')\n\n hosts_list = [(x, nm[x]['status']['state']) for x in nm.all_hosts()]\n\n active_ips = set()\n for host, status in hosts_list:\n if str(status).lower() == \"up\":\n active_ips.add(host)\n\n return active_ips\n","sub_path":"extras/data_transfer/nmap_util.py","file_name":"nmap_util.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"183842326","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0008_auto_20170927_2340'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ExampleModel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('model_pic', models.ImageField(upload_to='media/', default='media/xx.jpg')),\n ],\n ),\n ]\n","sub_path":"blog/migrations/0009_examplemodel.py","file_name":"0009_examplemodel.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"145541062","text":"DOMAIN_GENERAL_DATABUNCH_NAME = 'domain_general_databunch'\nDOMAIN_SPECIFIC_DATABUNCH_NAME = 'domain_specific_databunch'\nTARGET_CLASSIFIER_DATABUNCH_NAME = 'target_classifier_databunch'\n\nGENERAL_LM_FILENAME = \"generallanguagemodel\"\nGENERAL_LM_VOCAB_FILENAME = \"generallanguagemodelvocab\"\nDOMAIN_SPECIFIC_LM_FILENAME = \"domainspecificlanguagemodel\"\nDOMAIN_SPECIFIC_ENCODER_FILENAME = \"domainspecificencoder\"\n\nTARGET_CLASSIFIER_EXPORT_FILENAME = \"targetclassifier.pkl\"","sub_path":"fincher/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"307714497","text":"from django.conf.urls.defaults import *\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nhandler500 = 'boonegrown.apps.core.views.server_error'\nhandler404 = 'boonegrown.apps.core.views.not_found_error'\n\nurlpatterns = patterns('',\n url(r'^$',\n 'django.views.generic.simple.direct_to_template',\n {'template': 'index.html'},\n name='index'),\n (r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"239893270","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 13 17:03:07 2019\r\n\r\n@author: kasy\r\n\"\"\"\r\n\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport skimage\r\n\r\ntest_path = '../dataset_transfer/STARE/train/images/'\r\ntest_list = []\r\n\r\nfor i in os.listdir(test_path):\r\n test_list.append(os.path.join(test_path, i))\r\n \r\ndef clahe(gray_img):\r\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\r\n imgs_equalized = clahe.apply(np.array(gray_img, dtype=np.uint8))\r\n return imgs_equalized \r\n \r\ndef LUV_contrast_norm(img):\r\n #BGR, cv2\r\n #assert img.shape[2] != 3\r\n L_channel = img[..., 0]\r\n L_clahe = clahe(L_channel)\r\n \r\n new_LUV = np.stack([L_clahe, img[..., 1], img[..., 2]], 2)\r\n new_RGB = cv2.cvtColor(new_LUV, cv2.COLOR_LUV2RGB)\r\n \r\n return new_RGB\r\n\r\n\r\nfig, axes = plt.subplots(2, 6, figsize=(25,8))\r\n\r\nfig.suptitle('Origin rgb and pre-processed rgb in LUV', fontsize=20)\r\nfor i in range(6):\r\n img = cv2.imread(test_list[i])\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n img_LUV = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\r\n \r\n img_norm = LUV_contrast_norm(img_LUV)\r\n \r\n axes[0, i].imshow(img)\r\n axes[1, i].imshow(img_norm)\r\n \r\naxes[0,0].set_ylabel('Origin rgb', size='large')\r\naxes[1,0].set_ylabel('Norm rgb', size='large')\r\n\r\nplt.savefig('LUV.png', format='png', dpi=300)\r\nplt.show()\r\n \r\n","sub_path":"visualization/pre_LUV.py","file_name":"pre_LUV.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"112663078","text":"\n\nfrom xai.brain.wordbase.nouns._corollary import _COROLLARY\n\n#calss header\nclass _COROLLARIES(_COROLLARY, ):\n\tdef __init__(self,): \n\t\t_COROLLARY.__init__(self)\n\t\tself.name = \"COROLLARIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"corollary\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_corollaries.py","file_name":"_corollaries.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"105865181","text":"\"\"\"\nArray X is greater than array Y if the first \nnon-matching element in both arrays has a greater \nvalue in X than in Y.\nFor example, for arrays X and Y such that:\n\nX = [1, 2, 4, 3, 5]\nY = [1, 2, 3, 4, 5]\n\nX is greater than Y because the first element that \ndoes not match is larger in X (i.e. for X[2] and Y[2], X[2] > Y[2]).\nA contiguous subarray is defined by an interval of \nthe indices. In other words, a contiguous subarray \nis a subarray which has consecutive indexes.\nWrite a function that, given a zero-indexed array \nA consisting of N integers and an integer K, returns \nthe largest contiguous subarray of length K from all \nthe contiguous subarrays of length K.\nFor example, given array A and K = 4 such that:\n\nA = [1, 4, 3, 2, 5]\n\nthe function should return [4, 3, 2, 5], because \nthere are two subarrays of size 4:\n[1, 4, 3, 2]\n[4, 3, 2, 5]\nand the largest subarray is [4, 3, 2, 5].\n\nAssume that:\n 1 ≤ K ≤ N ≤ 100;\n 1 ≤ A[J] ≤ 1000;\n\ngiven an array A contains N distinct integers.\nIn your solution, focus on correctness. The \nperformance of your solution will not be the \nprimary focus of the assessment.\n\"\"\"\n\ndef solution(N, K):\n \"\"\"\n returns the largest contiguous subarray \n of length K from all the contiguous \n subarrays of length K\n\n note: I assume that a subarray A is larger \n than subarray B if the first non-matching\n element in A is greater than B.\n \"\"\"\n a=0; b=K-1\n ans=[ 0 for i in range(K) ]\n while b!=len(N):\n tmp=N[a:b+1] #new subarray\n i=0; ok=True\n while ok and ians[i]: # is A greater than B?\n ans=tmp\n \n a+=1; b+=1\n return ans\n \n\n\ndef main():\n N=[10,20,30,100]\n K=4\n print(solution(N, K))\nmain()","sub_path":"EXTRA/G2-Largest Subarray/subarray.py","file_name":"subarray.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"553841950","text":"import time\nimport datetime\n\nimport config\nfrom mail import send_email\nfrom currency import DowntimeException\n\ntime.sleep(300)\nerrors = 0\n\n\nwhile True:\n title = []\n body = []\n\n try:\n for currency in config.currencies:\n temp_title, temp_body = currency.generate_mail_lists()\n title.extend(temp_title)\n body.extend(temp_body)\n body.append('
\\n') # make line between currencies\n\n if len(title) > 0:\n send_email(config.SMTP_SERVER, config.PORT, config.LOGIN, config.PASSWORD, config.RECIPENTS,\n ' '.join(title),\n '
\\n'.join(body)\n )\n errors = 0\n\n except DowntimeException:\n time.sleep(300)\n continue\n\n except Exception as e:\n errors += 1\n\n with open('logs', 'a+') as f:\n f.write(\n '{0}: {1}!\\n'.format(\n str(datetime.datetime.now().replace(microsecond=0)),\n str(e)\n )\n )\n\n if errors >= 5:\n break\n\n time.sleep(600)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"540213059","text":"from bs4 import BeautifulSoup\nfrom flask import Flask, flash, make_response, render_template, request, redirect, abort\nfrom os import environ\nfrom redis_namespace import StrictRedis\nfrom uuid import uuid4\nimport re\n\napp = Flask(__name__)\nlevel = int(environ['XSS_LEVEL'])\nredis = StrictRedis(\n\thost=environ.get('REDIS_HOST', 'localhost'),\n\tport=environ.get('REDIS_PORT', '6379'),\n\tnamespace=f'{level}:')\n\ntriggers = [\"script\", \"onabort\", \"onblur\", \"onchange\", \"onclick\", \"ondblclick\",\n \"ondragdrop\", \"onerror\", \"onfocus\", \"onkeydown\", \"onkeypress\",\n \"onkeyup\", \"onload\", \"onmousedown\", \"onmousemove\", \"onmouseout\",\n \"onmouseover\", \"onmouseup\", \"onmove\", \"onreset\", \"onresize\", \"onselect\",\n \"onsubmit\", \"onunload\", \"javascript\"]\ncamelTriggers = [\"onAbort\", \"onBlur\", \"onChange\", \"onClick\", \"onDblClick\",\n \"onDragDrop\", \"onError\", \"onFocus\", \"onKeyDown\", \"onKeyPress\",\n \"onKeyUp\", \"onLoad\", \"onMouseDown\", \"onMouseMove\", \"onMouseOut\",\n \"onMouseOver\", \"onMouseUp\", \"onMove\", \"onReset\", \"onResize\", \"onSelect\",\n \"onSubmit\", \"onUnload\", \"javaScript\"]\n\n\ndef filter(data):\n if (level == 1):\n for trig in triggers:\n Trig = trig[0].upper()+trig[1:]\n data = data.replace(trig,\"\")\n data = data.replace(Trig,\"\")\n for trig in camelTriggers:\n Trig = trig[0].upper()+trig[1:]\n data=data.replace(trig,\"\")\n data=data.replace(Trig,\"\")\n elif (level == 2):\n for trig in triggers:\n data = re.sub(trig,\"\",data, flags=re.IGNORECASE)\n elif (level == 3):\n for trig in triggers:\n while(re.search(trig,data,flags=re.IGNORECASE)):\n data=re.sub(trig,\"\",data, flags=re.IGNORECASE)\n else:\n data=\"\"\n return data\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n@app.route('/posts', methods=['POST'])\ndef submit():\n\tdata = request.form['input']\n\tuuid = str(uuid4())\n\tredis.set(uuid, filter(data).encode())\n\treturn redirect(f'/post/{uuid}')\n\n@app.route('/view', methods=['POST'])\ndef view():\n return render_template('source.html', level = level)\n\n@app.route('/post/')\ndef level1(uuid):\n\tif redis.exists(uuid):\n\t\tresp = make_response(render_template('post.html', post=redis.get(uuid).decode()))\n\t\treturn resp\n\tabort(404)\n\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0', port=8000)\n","sub_path":"challenges/filter-1/deploy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"299719022","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 11 10:14:41 2018\n\n@author: ernestmordret\n\"\"\"\noutput_dir = (\"output\",) # output folder\npath_to_fasta = (\"\",) #\ntol = (0.005,)\nn_term_prob_cutoff = (0.05,)\nc_term_prob_cutoff = (0.05,)\npositional_probability_cutoff = (0.95,)\nfdr = (0.01,)\nregex = (\"gene_symbol\\:(.*)(\\s|$)\",) # regex for extraction gene symbol from fasta\nexcluded_samples = ([],)\npath_to_evidence = (\"/path/to/evidence.txt\",)\npath_to_matched_features = (\"path/to/matchedFeatures.txt\",)\npath_to_peptides = (\n \"/path/to/peptides.txt\",\n) # path to MaxQuant's table, allPeptides.txt\nmz_tol = 10 * 10 ** -6 # m/z tolerance for the fetching unidentified features\nrt_tol = 0.3 # # retention time tolerance for the fetching unidentified features\n","sub_path":"substitutions/params_example.py","file_name":"params_example.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"393367119","text":"from django.contrib import admin\nfrom django.urls import re_path, include\nfrom django.conf import settings\nfrom django.views import static\nfrom . import views\n\n\nurlpatterns = [\n re_path(r'^$', views.main, name='main'),\n re_path(r'^admin/', admin.site.urls),\n re_path(r'^theme/', include(('theme.urls', 'theme'), namespace='theme')),\n re_path(r'^information/', include(('information.urls', 'information'), namespace='information')),\n re_path(r'^FAQ/', include(('FAQ.urls', 'FAQs'), namespace='FAQ'))\n]\n\n\nstatic_list = [\n (settings.STATIC_URL, settings.STATIC_ROOT),\n (settings.MEDIA_URL, settings.MEDIA_ROOT),\n]\n\n\nfor (prefix_url, root) in static_list:\n if '://' not in prefix_url: # 외부 서버에서 서빙하는 것이 아니라면\n prefix_url = prefix_url.lstrip('/')\n url_pattern = r'^' + prefix_url + r'(?P.+)'\n pattern = re_path(url_pattern, static.serve, kwargs={'document_root': root})\n urlpatterns.append(pattern)","sub_path":"Desktop/rooms/bucheon/ch1/mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"192224057","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom einops import rearrange\nfrom einops.layers.torch import Rearrange\n\n# helpers\n\ndef pair(t):\n return t if isinstance(t, tuple) else (t, t)\n\ndef posemb_sincos_2d(h, w, dim, temperature: int = 10000, dtype = torch.float32):\n y, x = torch.meshgrid(torch.arange(h), torch.arange(w), indexing=\"ij\")\n assert (dim % 4) == 0, \"feature dimension must be multiple of 4 for sincos emb\"\n omega = torch.arange(dim // 4) / (dim // 4 - 1)\n omega = 1.0 / (temperature ** omega)\n\n y = y.flatten()[:, None] * omega[None, :]\n x = x.flatten()[:, None] * omega[None, :]\n pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim=1)\n return pe.type(dtype)\n\n# they use a query-key normalization that is equivalent to rms norm (no mean-centering, learned gamma), from vit 22B paper\n\n# in latest tweet, seem to claim more stable training at higher learning rates\n# unsure if this has taken off within Brain, or it has some hidden drawback\n\nclass RMSNorm(nn.Module):\n def __init__(self, heads, dim):\n super().__init__()\n self.scale = dim ** 0.5\n self.gamma = nn.Parameter(torch.ones(heads, 1, dim) / self.scale)\n\n def forward(self, x):\n normed = F.normalize(x, dim = -1)\n return normed * self.scale * self.gamma\n\n# classes\n\nclass FeedForward(nn.Module):\n def __init__(self, dim, hidden_dim):\n super().__init__()\n self.net = nn.Sequential(\n nn.LayerNorm(dim),\n nn.Linear(dim, hidden_dim),\n nn.GELU(),\n nn.Linear(hidden_dim, dim),\n )\n def forward(self, x):\n return self.net(x)\n\nclass Attention(nn.Module):\n def __init__(self, dim, heads = 8, dim_head = 64):\n super().__init__()\n inner_dim = dim_head * heads\n self.heads = heads\n self.norm = nn.LayerNorm(dim)\n\n self.attend = nn.Softmax(dim = -1)\n\n self.q_norm = RMSNorm(heads, dim_head)\n self.k_norm = RMSNorm(heads, dim_head)\n\n self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)\n self.to_out = nn.Linear(inner_dim, dim, bias = False)\n\n def forward(self, x):\n x = self.norm(x)\n\n qkv = self.to_qkv(x).chunk(3, dim = -1)\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)\n\n q = self.q_norm(q)\n k = self.k_norm(k)\n\n dots = torch.matmul(q, k.transpose(-1, -2))\n\n attn = self.attend(dots)\n\n out = torch.matmul(attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\nclass Transformer(nn.Module):\n def __init__(self, dim, depth, heads, dim_head, mlp_dim):\n super().__init__()\n self.norm = nn.LayerNorm(dim)\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n Attention(dim, heads = heads, dim_head = dim_head),\n FeedForward(dim, mlp_dim)\n ]))\n def forward(self, x):\n for attn, ff in self.layers:\n x = attn(x) + x\n x = ff(x) + x\n return self.norm(x)\n\nclass SimpleViT(nn.Module):\n def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels = 3, dim_head = 64):\n super().__init__()\n image_height, image_width = pair(image_size)\n patch_height, patch_width = pair(patch_size)\n\n assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'\n\n patch_dim = channels * patch_height * patch_width\n\n self.to_patch_embedding = nn.Sequential(\n Rearrange(\"b c (h p1) (w p2) -> b (h w) (p1 p2 c)\", p1 = patch_height, p2 = patch_width),\n nn.LayerNorm(patch_dim),\n nn.Linear(patch_dim, dim),\n nn.LayerNorm(dim),\n )\n\n self.pos_embedding = posemb_sincos_2d(\n h = image_height // patch_height,\n w = image_width // patch_width,\n dim = dim,\n ) \n\n self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim)\n\n self.pool = \"mean\"\n self.to_latent = nn.Identity()\n\n self.linear_head = nn.LayerNorm(dim)\n\n def forward(self, img):\n device = img.device\n\n x = self.to_patch_embedding(img)\n x += self.pos_embedding.to(device, dtype=x.dtype)\n\n x = self.transformer(x)\n x = x.mean(dim = 1)\n\n x = self.to_latent(x)\n return self.linear_head(x)\n","sub_path":"vit_pytorch/simple_vit_with_qk_norm.py","file_name":"simple_vit_with_qk_norm.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"235040370","text":"from . import db\n\n\nclass Role(db.Model):\n __tablename__ = 'roles'\n id = db.Column(db.Integer, primary_key = True)\n name = db.Column(db.String, unique = True)\n default = db.Column(db.Boolean, default = False, index = True)\n permissions = db.Column(db.Integer)\n users = db.relationship('User', backref='role', lazy='dynamic')\n\n @staticmethod\n def insert_roles():\n roles = {\n 'User': (Permissions.FOLLOW |\n Permissions.COMMENT |\n Permissions.WRITE_ARTICLES, True),\n 'Moderator': (Permissions.FOLLOW |\n Permissions.COMMENT |\n Permissions.WRITE_ARTICLES |\n Permissions.MODERATE_COMMENTS, False),\n 'Administrator': (0xff, False)\n }\n\n for r in roles:\n role = Role.query.filter_by(name=r).first()\n if role is None:\n role = Role(name=r)\n role.permissions = roles[r][0]\n role.default = roles[r][1]\n db.session.add(role)\n db.session.commit()\n\n def __repr__(self):\n return ''.format(self.name, self.id)\n\nclass Permissions:\n FOLLOW = 0x01\n COMMENT = 0x02\n WRITE_ARTICLES = 0x04\n MODERATE_COMMENTS = 0x08\n ADMINISTRATOR = 0X80\n","sub_path":"app/models/roles.py","file_name":"roles.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"99875772","text":"#!/usr/bin/python\nfrom Tkinter import *\nimport time\ngui = Tk()\nscreen_height = gui.winfo_screenheight()\nscreen_width = gui.winfo_screenwidth()\n\n# gui.geometry(str(screen_width) + \"x\" + str(screen_height))\n# c = Canvas(gui ,width=screen_width ,height=screen_height)\n# c.pack()\n# oval = c.create_oval(5,5,60,60,fill='pink')\n# xd = 5\n# yd = 10\n\nframes = [PhotoImage(file='rick.gif',format = 'gif -index %i' %(i)) for i in range(61)]\n\ndef update(ind):\n if ind == 61:\n ind = 0\n frame = frames[ind]\n ind += 1\n label.configure(image=frame)\n gui.after(100, update, ind)\nlabel = Label(gui)\nlabel.pack()\ngui.after(0, update, 0)\n\n# while True:\n# c.move(oval,xd,yd)\n# p=c.coords(oval)\n# if p[3] >= screen_height or p[1] <=0:\n# yd = -yd\n# if p[2] >= screen_width or p[0] <=0:\n# xd = -xd\n# gui.update()\n# time.sleep(0.025) \ngui.title(\"First title\")\ngui.mainloop()","sub_path":"CherryAgent/graphic_interface/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"196749993","text":"from torch.autograd import Variable\nfrom layers.modules import MultiBoxLoss\nfrom tqdm import tqdm\nfrom utils.utils import adjust_learning_rate\n\n\nclass Client:\n def __init__(self, client_id, dataloader, cfg, args, device='cpu'):\n self.client_id = client_id\n self.dataloader = dataloader\n self.device = device\n self.__model = None\n self.cfg = cfg\n self.args = args\n self.index_step = 1\n\n @property\n def model(self):\n return self.__model\n\n @model.setter\n def model(self, model):\n self.__model = model\n\n def client_update(self, optimizer, optimizer_args, local_epoch, n_round):\n raise NotImplementedError\n\n def __len__(self):\n return len(self.dataloader.dataset)\n\n\nclass FedAvgClient(Client):\n def client_update(self, optimizer, optimizer_args, local_epoch, n_round):\n self.model.train()\n self.model.to(self.device)\n\n optimizer = optimizer(self.model.parameters(), **optimizer_args)\n if n_round in self.args.lr_stage == 0:\n adjust_learning_rate(optimizer, self.args.lr, 0.1, self.index_step)\n self.index_step += 1\n\n # to do\n criterion = MultiBoxLoss(self.cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,\n False, self.args.cuda)\n\n print(\"Client training ...\")\n for epoch in range(local_epoch):\n for images, targets in tqdm(self.dataloader):\n # load train data\n if self.args.cuda:\n images = Variable(images.cuda())\n targets = [Variable(ann.cuda(), volatile=True) for ann in targets]\n else:\n images = Variable(images)\n targets = [Variable(ann, volatile=True) for ann in targets]\n\n # forward\n out = self.model(images)\n # backprop\n optimizer.zero_grad()\n loss_l, loss_c = criterion(out, targets)\n loss = loss_l + loss_c\n loss.backward()\n optimizer.step()\n\n self.model.to(\"cpu\")\n","sub_path":"src/fed_zoo/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"586795231","text":"\n#-*- coding: utf-8 -*-\nimport sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import QThread\nfrom ioInterface import Ui_ioInterface\nfrom transitions import Machine\nfrom time import sleep\n\nfrom Adafruit_MotorHAT import Adafruit_StepperMotor, Adafruit_MotorHAT\nfrom hardware import conveyor, dispenser,stepper,expander,ejector,tof, i2cutil,piface\n\nimport wiringpi\nimport threading,subprocess,smbus\nimport time\nimport atexit\n\nwatchdog = False\n\n#Constant declarations\n\n#PiFace outputs\nPIFACE_RELAY0=200 #LED0\nPIFACE_RELAY1=201 #LED1\nPIFACE_LED2=202\nPIFACE_LED3=203\nPIFACE_LED4=204\nPIFACE_LED5=205\nPIFACE_LED6=206\nPIFACE_LED7=207 #used as board level heartbeat\n\n#PiFace outputs\nPIFACE_S0=208\nPIFACE_S1=209\nPIFACE_S2=210\nPIFACE_S3=211\n\nON=1\nOFF=0\n\n\n#Help functions\ndef swEmergencyStop():\n # motorHAT i2c board1\n stepper1.e_stop()\n # conveyor release\n conveyor1.reset()\n\n#stepper indexing passing stepper object, number of steps, direction and coil sytle\ndef stepper_worker(stepper, numsteps, direction, style):\n stepper.step(numsteps, direction, style)\n\n#ejector passing GUI start stop angle\ndef ejector_worker(ejector,servoStart,servoStop):\n ejector.eject(servoStart,servoStop)\n\nclass EdgeTrigger(object):\n def __init__(self, callback):\n self.value = None\n self.callback = callback\n\n def __call__(self, value):\n #if value != self.value:\n #this one will trigger a 0 to 1 transition only\n if value < self.value:\n self.callback(self.value, value)\n self.value = value\n\n#FSM definitions\n\nclass Conveyor_FSM(Machine):\n\n # states and transitions\n def __init__(self):\n self.running=True\n self.isEnabled = False\n self.isEjectEnabled = False\n self.isDispenseEnabled = False\n\n states = ['init', 'start', 'stop', 'dispense', 'eject']\n Machine.__init__(self, states=states, initial='init')\n\n self.add_transition('trigger', 'init', 'start', conditions='is_enabled')\n self.add_transition('trigger', 'start', 'stop', conditions='is_beam_broken')\n self.add_transition('trigger', 'stop', 'dispense', conditions='is_ready')\n self.add_transition('trigger', 'dispense', 'eject')\n self.add_transition('trigger', 'eject', 'start')\n\n # external transition control from GUI\n\n def shutdown(self):\n self.running=False\n\n def enable(self):\n self.isEnabled = True\n\n def disable(self):\n self.isEnabled = False\n\n def enable_eject(self):\n self.isEjectEnabled = True\n\n def disable_eject(self):\n self.isEjectEnabled = False\n\n def enable_dispense(self):\n self.isDispenseEnabled = True\n\n def disable_dispense(self):\n self.isDispenseEnabled = False\n\n\n #what to do when you enter a state\n def on_enter_init(self):\n conveyor1.stop()\n\n def on_enter_start(self):\n conveyor1.fwd()\n sleep(.2)\n\n def on_enter_stop(self):\n conveyor1.stop()\n\n def on_enter_dispense(self):\n if self.isDispenseEnabled:\n dispenserUSB0.dispense(dispenser.connect_to_serial_port(dispenser.list_of_serial_ports()))\n else:\n sleep(1) #slow down for testing\n\n def on_enter_eject(self):\n if self.isEjectEnabled:\n ejector1.eject(150, 600)\n else:\n sleep(1) #slow down for testing\n\n #transition conditions\n\n def is_enabled(self):\n return self.isEnabled\n\n def is_disabled(self):\n return not self.isEnabled\n\n\n def is_beam_broken(self):\n return not wiringpi.digitalRead(212) #break beam sensor wiringpi.digitalRead(15)\n\n def is_ready(self):\n return True\n\n #bypass right now when not connected\n #return dispenserUSB0.is_ready(dispenser.connect_to_serial_port(dispenser.list_of_serial_ports()))\n\nclass Bin_FSM(Machine):\n\n # states and transitions\n def __init__(self, name, high_limit_pin, low_limit_pin, level_limit_pin):\n self.name = name\n self.running=True\n\n #gui controlled\n self.isEnabled=False\n\n #tof distance sensor data\n self.distance=0\n self.high_limit=0\n self.ready_limit=100\n self.low_limit=600\n\n #hardware pin assignments\n self.high_level_pin = high_limit_pin #65\n self.low_level_pin = low_limit_pin #66\n self.level_limit_pin = level_limit_pin #81\n\n self.stepper = stepper.Stepper(\"Bin-1\", 0x60, 1, 65, 66, 81) # generic stepper object\n\n states = ['init', 'start', 'up', 'down']\n Machine.__init__(self, states=states, initial='init')\n\n self.add_transition('trigger', 'init', 'start',conditions='is_enabled')\n self.add_transition('trigger', 'start', 'down')\n self.add_transition('trigger', 'down', 'down', unless='is_low_limit') #loop\n self.add_transition('trigger', 'down', 'up', conditions='is_low_limit')\n self.add_transition('trigger', 'up', 'up', unless='is_high_level') #loop is high_level\n self.add_transition('trigger', 'up', 'down', conditions='is_high_level') #same as loop condittion\n self.add_transition('trigger', 'up', 'down', conditions='is_high_limit')\n\n\n # external transition control from GUI\n\n def shutdown(self):\n self.running = False\n\n def enable(self):\n self.isEnabled = True\n\n def disable(self):\n self.isEnabled = False\n\n # set GUI parameter for distance sensor limits\n\n def set_high_limit(self,value):\n self.high_limit=value\n\n def set_ready_limit(self,value):\n self.ready_limit=value\n\n def set_low_limit(self,value):\n self.low_limit=value\n\n #state machine task and transitions\n\n def on_enter_init(self):\n self.stepper.stop()\n\n def on_enter_start(self):\n self.stepper.stop()\n\n def on_enter_up(self):\n #while not wiringpi.digitalRead(self.high_level_pin) and wiringpi.digitalRead(self.level_limit_pin) :\n self.stepper.oneStep_up()\n\n def on_enter_down(self):\n # while not wiringpi.digitalRead(self.low_level_pin) :\n self.stepper.oneStep_down()\n\n #transitions\n\n def is_enabled(self):\n return self.isEnabled\n\n def is_disabled(self):\n return not self.isEnabled\n\n def is_high_limit(self):\n return wiringpi.digitalRead(self.high_level_pin)\n\n def is_low_limit(self):\n return wiringpi.digitalRead(self.low_level_pin)\n\n def is_high_level(self):\n return wiringpi.digitalRead(self.high_level_pin)\\\n or not wiringpi.digitalRead(self.level_limit_pin) # or tof_sensor1.get_distance() <= self.ready_limit\n\n def is_distance_ready(self):\n return tof_sensor1.get_distance() < self.ready_limit\n\n#main GUI program\nclass MyGuiProgram(Ui_ioInterface):\n\n def __init__(self, dialog):\n Ui_ioInterface.__init__(self)\n self.setupUi(dialog)\n\n # create state machine\n self.belt = Conveyor_FSM()\n self.bin1 = Bin_FSM( 1, 65, 66, 81) #bin number,high_limit_pin,low_limit_pinl,lvl_limit_pin for control logic\n self.running = True\n\n #just for run method in seperate Thread\n self.stepper1 = stepper.Stepper(\"Bin-1\", 0x60, 1, 65, 66, 81)\n\n #register callbacks for button events\n #dispenser buttons\n self.btnDispense.clicked.connect(self.cb_dispense)\n self.btnGetStatus.clicked.connect(self.cb_get_status)\n self.btnReset.clicked.connect(self.cb_reset_card_dispenser)\n self.btnInit.clicked.connect(self.cb_write_hold_card)\n\n #stepper buttons\n self.btnUP.clicked.connect(self.cb_up)\n self.btnDN.clicked.connect(self.cb_down)\n\n #ejector buttons\n self.btnEject.clicked.connect(self.cb_eject)\n\n #Relay board conveyor control buttons\n self.btnFwd.clicked.connect(self.cb_fwd)\n self.btnStop.clicked.connect(self.cb_stop)\n self.btnRev.clicked.connect(self.cb_rev)\n self.btnReset.clicked.connect(self.cb_reset)\n\n #start thread from UI\n\n self.btnAUTO_THREAD.clicked.connect(self.cb_bin1_thread)\n self.btnMANUAL_THREAD.clicked.connect(self.cb_manual_thread)\n self.btnCONVEYOR_THREAD.clicked.connect(self.cb_conveyor_thread)\n\n # register 0 to 1 'rising edge' edge detector callback\n self.detector_bin1_stuck = EdgeTrigger(self.cb_bin1_card_stuck)\n self.detector_bin1_high_limit = EdgeTrigger(self.cb_bin1_high_limit)\n self.detector_bin1_low_limit = EdgeTrigger(self.cb_bin1_low_limit)\n self.detector_conveyor_index= EdgeTrigger(self.cb_conveyor_index)\n self.detector_bin1_level_limit = EdgeTrigger(self.cb_bin1_level_limit)\n\n # max counts interleaved mode for progress bar display\n self.cnt_max=6000\n self.cnt=0\n self.idxcount=0\n\n #self.bin1.run() #run bin1 thread method\n self.stepper1.start()\n\n #threads get started here\n def cb_auto_thread(self):\n #tauto = threading.Thread()\n tAuto = threading.Thread(name=\"EjectorBin1 AUTO started\", target=self.run_bin1_FSM)\n tAuto.daemon=True\n tAuto.start()\n\n def cb_manual_thread(self):\n #tmanual = threading.Thread()\n tManual = threading.Thread(name=\"EjectorBin1 MANUAL started\", target=self.stepper)\n tManual.daemon=True\n tManual.start()\n\n def cb_conveyor_thread(self):\n # tConveyor = threading.Thread()\n tConveyor = threading.Thread(name=\"Conveyor started\", target=self.run_conveyor_FSM)\n tConveyor.daemon = True\n tConveyor.start()\n\n def cb_bin1_thread(self):\n # tConveyor = threading.Thread()\n tBin1 = threading.Thread(name=\"Bin started\", target=self.run_bin1_FSM)\n tBin1.daemon = True\n tBin1.start()\n\n\n #run Finite State maxchines\n\n def shutdown(self):\n self.running = False\n\n def run_conveyor_FSM(self):\n txt = threading.currentThread().getName()\n self.Input_Status.addItem(txt)\n\n while self.running:\n\n if self.btnEnableDispense.isChecked():\n self.belt.enable_dispense()\n else:\n self.belt.disable_dispense()\n\n if self.btnEnableEject.isChecked():\n self.belt.enable_eject()\n else:\n self.belt.disable_eject()\n\n if self.btnEnable.isChecked():\n self.belt.enable()\n self.belt.trigger()\n else:\n self.belt.disable()\n self.belt.to_init()\n sleep(.5)\n\n def run_bin1_FSM(self):\n self.cnt = 0\n txt = threading.currentThread().getName()\n self.Input_Status.addItem(txt)\n\n while self.running:\n\n #reset counter at high or low limit\n if wiringpi.digitalRead(65) or wiringpi.digitalRead(66):\n self.cnt=0\n\n # indexing counter display based on state\n if self.bin1.is_up() and self.btnEnableCycle.isChecked():\n self.cnt +=1\n self.CycleCountBin1.display(self.cnt)\n self.bin1Level.setValue(self.cnt)\n # self.progressBar.setValue(self.cnt)\n\n if self.bin1.is_down() and self.btnEnableCycle.isChecked():\n self.cnt -= 1\n self.CycleCountBin1.display(self.cnt)\n self.bin1Level.setValue(self.cnt + self.cnt_max)\n # self.progressBar.setValue(self.cnt + self.cnt_max)\n\n if self.btnEnableCycle.isChecked():\n self.bin1.enable() # enable initial transitions\n self.bin1.trigger()\n else:\n self.bin1.disable()\n sleep(.5)\n\n #callbacks from button presses\n #manual indexing of stepper and ejector via a thread to not interrupt GUI\n\n def cb_up(self):\n txt = \"stepper up event triggered!\"\n self.Input_Status.addItem(txt)\n st1 = threading.Thread(name=\"my up thread\",target=stepper_worker,\n args=(stepper1, 4, Adafruit_MotorHAT.FORWARD, Adafruit_MotorHAT.INTERLEAVE))\n st1.start()\n\n def cb_down(self):\n txt = \"stepper dn event triggered!\"\n self.Input_Status.addItem(txt)\n st1 = threading.Thread(name=\"my dn thread\",target=stepper_worker,\n args=(stepper1, 4, Adafruit_MotorHAT.BACKWARD, Adafruit_MotorHAT.INTERLEAVE))\n st1.start()\n\n def cb_eject(self):\n # manual ejector control (passing GUI elements)\n txt = \"ejector event triggered!\"\n self.Input_Status.addItem(txt)\n st1 = threading.Thread(name=\"my ejector thread\", target=ejector_worker,\n args=(ejector1,self.servoStart.value(), self.servoStop.value()))\n st1.start()\n\n #manual dispenser controls\n\n def cb_dispense(self):\n dispenserUSB0.dispense(dispenser.connect_to_serial_port(dispenser.list_of_serial_ports()))\n\n def cb_reset_card_dispenser(self):\n dispenserUSB0.reset_card_dispenser(dispenser.connect_to_serial_port(dispenser.list_of_serial_ports()))\n\n def cb_write_hold_card(self):\n dispenserUSB0.write_hold_card(dispenser.connect_to_serial_port(dispenser.list_of_serial_ports()))\n\n def cb_get_status(self):\n self.lblDispenserStatus.setText(\n dispenserUSB0.request_status(dispenser.connect_to_serial_port(dispenser.list_of_serial_ports())))\n\n #manual conveyor controls\n\n def cb_fwd(self):\n conveyor1.fwd()\n\n def cb_rev(self):\n conveyor1.rev()\n\n def cb_stop(self):\n conveyor1.stop()\n\n def cb_reset(self):\n conveyor1.reset()\n self.idxcount = 0\n self.lcd_idxcounter.display(self.idxcount)\n\n #manual stepper control via a dial\n #rewrite somehow into s FSM ???\n def stepper(self):\n\n self.cnt = 0\n txt = threading.currentThread().getName()\n self.Input_Status.addItem(txt)\n\n while True:\n if self.bin1Level_target.value()> self.bin1Level.value() and not self.bin1_high_limit :\n stepper1.up()\n self.cnt = self.cnt + 1\n self.CycleCountBin1.display(self.cnt)\n self.bin1Level.setValue(self.cnt)\n\n elif self.bin1Level_target.value()< self.bin1Level.value() and not self.bin1_low_limit :\n stepper1.down()\n self.cnt = self.cnt-1\n self.CycleCountBin1.display(self.cnt)\n self.bin1Level.setValue(self.cnt)\n\n else:\n stepper1.stop()\n #update progressbar\n self.progressBar.setValue(self.cnt)\n\n # edge detector callbacks functions for the sensors\n\n def cb_bin1_level_limit(self,oldVal, newVal):\n txt = \"bin1 level reached\"\n self.Input_Status.addItem(txt)\n\n def cb_conveyor_index(self, oldVal, newVal):\n self.idxcount = self.idxcount + 1\n self.lcd_idxcounter.display(self.idxcount)\n\n def cb_bin1_card_stuck(self,oldVal, newVal):\n txt = \"bin1 card stuck\"\n self.Input_Status.addItem(txt)\n\n def cb_bin1_high_limit(self,oldVal, newVal):\n txt = \"bin1 high limit reached!\"\n self.Input_Status.addItem(txt)\n txt = str(abs(self.cnt))\n self.Input_Status.addItem(txt)\n #reset counter\n self.cnt = 0\n\n def cb_bin1_low_limit(self, oldVal, newVal):\n txt = \"bin1 low limit reached!\"\n self.Input_Status.addItem(txt)\n txt = str(abs(self.cnt))\n self.Input_Status.addItem(txt)\n #reset counter\n self.cnt = 0\n\n #Qtimer based functions\n\n def io_polling(self):\n\n\n #sometimes missing the event in other loops that are faster\n # assign inputs to monitor with edge detector\n self.detector_bin1_stuck(wiringpi.digitalRead(81))\n self.detector_bin1_level_limit(not wiringpi.digitalRead(82))\n\n self.detector_bin1_high_limit(wiringpi.digitalRead(65))\n self.detector_bin1_low_limit(wiringpi.digitalRead(66))\n\n #raspberry PiFace I/O\n self.detector_conveyor_index(not wiringpi.digitalRead(212)) #break beam sesnor wiringpi.digitalRead(15))\n\n #create new signals for UI\n self.ok_to_run=not wiringpi.digitalRead(65) and not wiringpi.digitalRead(66)\n self.bin1_high_limit = wiringpi.digitalRead(65)\n self.bin1_low_limit = wiringpi.digitalRead(66)\n self.beam_broken=not wiringpi.digitalRead(212) #break beam sesnor wiringpi.digitalRead(15)\n self.bin1_stuck = wiringpi.digitalRead(81)\n self.bin1_level_limit=not wiringpi.digitalRead(82)\n\n #create FSM updates on UI\n self.lblBin1_FSM.setText(self.bin1.state)\n self.lblConveyor_FSM.setText(self.belt.state)\n\n #create lcd displays\n #self.tofDistance.display(tof_sensor1.get_distance())\n #self.progressBar.setValue(tof_sensor1.get_distance())\n #self.bin1.set_distance(tof_sensor1.get_distance())\n #self.bin1.set_ready_limit(self.tofDistanceSP.intValue())\n\n if wiringpi.digitalRead(65):\n self.lcd_65.display(1)\n else:\n self.lcd_65.display(0)\n\n if wiringpi.digitalRead(66):\n self.lcd_66.display(1)\n else:\n self.lcd_66.display(0)\n\n if wiringpi.digitalRead(81):\n self.lcd_81.display(1)\n else:\n self.lcd_81.display(0)\n\n if wiringpi.digitalRead(82):\n self.lcd_82.display(1)\n else:\n self.lcd_82.display(0)\n\n if wiringpi.digitalRead(212): #break beam sesnor wiringpi.digitalRead(15):\n self.lcd_15.display(1)\n else:\n self.lcd_15.display(0)\n\n def heartbeat(self):\n global watchdog\n watchdog^=True\n if watchdog:\n self.lcd_0.display(1)\n wiringpi.digitalWrite(PIFACE_LED7,ON)\n else:\n self.lcd_0.display(0)\n wiringpi.digitalWrite(PIFACE_LED7,OFF)\n\nif __name__ == '__main__':\n\n #check what hardware is connected\n i2cutil.check_i2c_devices()\n\n #initalize the io hardware interfaces here so they can be used by the UI and the FSM !!!!\n expander.init()\n piface.init()\n\n #piFace SPI interface\n conveyor1 = conveyor.Conveyor()\n\n '''\n\n #Create instance objects from class definitions in hardware that exists\n if i2cutil.is_connected_to_device(0x29):\n tof_sensor1 = tof.VL6180X(address=0x29, debug=False)\n tof_sensor1.default_settings()\n\n if i2cutil.is_connected_to_device(0x41):\n ejector1 = ejector.Ejector(\"Bin-1\", 0x41, 1)\n\n if i2cutil.is_connected_to_device(0x60):\n stepper1 = stepper.Stepper(\"Bin-1\", 0x60, 1, 65, 66, 81)\n'''\n #create and instance of the dispenser class\n dispenserUSB0 = dispenser.Dispenser(dispenser.connect_to_serial_port('/dev/ttyUSB0'))\n\n\n #create user interface150\n app = QtWidgets.QApplication(sys.argv)\n dialog = QtWidgets.QTabWidget()\n prog = MyGuiProgram(dialog)\n\n dialog.show()\n\n #setup timers to run input updates\n timer=QtCore.QTimer()\n timer.timeout.connect(prog.heartbeat)\n timer.start(500)\n\n #interface updates\n iotimer = QtCore.QTimer()\n iotimer.timeout.connect(prog.io_polling)\n iotimer.start(50)\n\n #regsiter software e-stop\n atexit.register(swEmergencyStop)\n\n #start the whole thing\n sys.exit(app.exec_())","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"244051829","text":"import re\n\nREGEX = {\n \"class_name\": re.compile(r'^[A-Z]\\w*$'),\n \"method_signature\": re.compile(r'^([^(){}]+)\\((.*)\\)$'),\n \"aggregation\": re.compile('^<>([\\d*]?)-+(\\w*)-*([\\d*]?)>$'), # A <>-wings-> B\n \"inheritance\": re.compile('^<<$') # <<\n}\n\n#re.compile(r'^ $')\n\n\"\"\"\nA REGEX or regular expression is a special text string for describing a search pattern.\nFor example, you could use the regular expression \\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,6}\\b to search for an email address.\n\"\"\"\n","sub_path":"prexel/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"210651627","text":"from django.shortcuts import render, redirect\nfrom nbanews import models\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom django.core import serializers\nfrom django.forms.models import model_to_dict\n\n# Create your views here.\n\ndef index(request):\n # nbanews = models.NBANewsModel.objects.all()\n nbanews = models.NBANewsModel.objects.order_by('-id').values('id', 'title','feature_pic','created_at')\n return render(request, \"index.html\", locals())\n\ndef detail(request, newid=None):\n resp = {}\n try:\n post = models.NBANewsModel.objects.get(id=newid) # 取得新聞內容\n new = {}\n new['id'] = post.id\n new['feature_pic'] = post.feature_pic\n new['title'] = post.title\n new['content'] = post.content\n new['created_at'] = post.created_at\n\n resp['status'] = 'success'\n resp['data'] = new\n resp['message'] = '成功撈取訊息'\n except models.NBANewsModel.DoesNotExist:\n resp['status'] = 'error'\n resp['message'] = '找不到資料'\n return JsonResponse(resp)\n\ndef checkLatest(request,newscount=None):\n nbanews = models.NBANewsModel.objects.order_by('-id').all()\n\n resp = {}\n resp['status'] = 'success'\n resp['data'] = []\n resp['message'] = '有新的焦點新聞'\n\n if nbanews.count() > int(newscount):\n latestnews = nbanews[:nbanews.count()-int(newscount)]\n # return HttpResponse(latestnews[0].content)\n for latestnew in latestnews:\n tmp = {\n 'id': latestnew.id,\n 'title': latestnew.title,\n 'feature_pic': latestnew.feature_pic,\n 'content': latestnew.content,\n 'created_at': latestnew.created_at,\n }\n resp['data'].append(tmp)\n else:\n resp['status'] = 'error'\n resp['data'] = []\n resp['message'] = '沒有新的焦點新聞'\n\n return JsonResponse(resp)","sub_path":"nbanews/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"3238230","text":"#!/usr/bin/env python\n# coding: utf-8\nfrom chart.models import FiveIndexByMonth\nimport pandas as pd\nimport json\nimport datetime\ndef count_month(y,m):\n return ((y-2001)*12+m-1)\n\n# 我們的建議\ndef draw_predict(num):\n data = pd.DataFrame(list(FiveIndexByMonth.objects.all().values()))\n total = 10\n monthly_Settlement = []\n for i in range(num,data.shape[0]-1):\n if data[\"cycle\"][i+1] == 1:\n MXWD, MXEF, CSIYHYI, JPGCCOMP, SBWBL = 0.25,0.25,0.1,0.35,0.05\n if data[\"cycle\"][i+1] == 2:\n MXWD, MXEF, CSIYHYI, JPGCCOMP, SBWBL = 0.3,0.5,0.1,0.1,0\n if data[\"cycle\"][i+1] == 3:\n MXWD, MXEF, CSIYHYI, JPGCCOMP, SBWBL = 0.2,0.3,0.05,0.4,0.05\n if data[\"cycle\"][i+1] == 4:\n MXWD, MXEF, CSIYHYI, JPGCCOMP, SBWBL = 0,0,0,0.2,0.8\n total = total * ( MXWD*(data[\"index_1\"][i+1]/data[\"index_1\"][i])+\n MXEF*(data[\"index_2\"][i+1]/data[\"index_2\"][i])+\n CSIYHYI*(data[\"index_3\"][i+1]/data[\"index_3\"][i])+\n JPGCCOMP*(data[\"index_4\"][i+1]/data[\"index_4\"][i])+\n SBWBL*(data[\"index_5\"][i+1]/data[\"index_5\"][i]))\n monthly_Settlement.append(total)\n return monthly_Settlement\n\n# 保守型\ndef draw_conservative(num):\n data = pd.DataFrame(list(FiveIndexByMonth.objects.all().values()))\n total = 10\n monthly_Settlement = []\n MXWD, MXEF, CSIYHYI, JPGCCOMP, SBWBL = 0.05,0.05,0.05,0.2,0.65\n for i in range(num,data.shape[0]-1):\n total = total * ( MXWD*(data[\"index_1\"][i+1]/data[\"index_1\"][i])+\n MXEF*(data[\"index_2\"][i+1]/data[\"index_2\"][i])+\n CSIYHYI*(data[\"index_3\"][i+1]/data[\"index_3\"][i])+\n JPGCCOMP*(data[\"index_4\"][i+1]/data[\"index_4\"][i])+\n SBWBL*(data[\"index_5\"][i+1]/data[\"index_5\"][i]))\n monthly_Settlement.append(total)\n return monthly_Settlement\ndef draw_fifty(num):\n data = pd.DataFrame(list(FiveIndexByMonth.objects.all().values()))\n total = 10\n monthly_Settlement = []\n MXWD, SBWBL = 0.5, 0.5\n for i in range(num,data.shape[0]-1):\n total = total * ( MXWD*(data[\"index_1\"][i+1]/data[\"index_1\"][i])+\n SBWBL*(data[\"index_5\"][i+1]/data[\"index_5\"][i]))\n monthly_Settlement.append(total)\n return monthly_Settlement\n\n# def get_json(num,monthly_Settlement,data_name):\n# data_list=[]\n# for i in range(0,len(monthly_Settlement)):\n# record=[ datetime.datetime(2001,i+num,1).strftime(\"%f\") , monthly_Settlement[i] ]\n# data_json.append(record)\n# res ={name:data_name,data:data_list}\n\n# return res","sub_path":"chart/cal_chart.py","file_name":"cal_chart.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"214251823","text":"# coding:iso-8859-9 Türkçe\r\n# p_14002.py: Miraslayan metodlarının miras metodlarını override/esgeçme örneği.\r\n\r\nclass Kişi:\r\n def __init__ (self, ad, soyad, yıl):\r\n self.ad = ad\r\n self.soyad = soyad\r\n self.yıl = yıl\r\n def __str__ (self): return self.ad + \" \" + self.soyad + \", \" + str (2019 - self.yıl)\r\n\r\n\r\nclass Personel (Kişi): # Miras...\r\n def __init__ (self, ad, soyad, yıl, pno):\r\n super().__init__ (ad, soyad, yıl) # Override/esgeçme Kişi__init__\r\n self.pno = pno\r\n def __str__ (self): return super().__str__() + \"; \" + self.pno # Override/esgeçme Kişi__str__\r\n\r\n\r\nx1 = Kişi (\"M.Nihat\", \"Yavaş\", 1957)\r\nx2 = Kişi (\"Z.Nihal\", \"Candan\", 1955)\r\n\r\ny1 = Personel (\"M.Ali\", \"Göktürk\", 2010, \"20190429-001\")\r\ny2 = Personel (\"Atilla\", \"Göktürk\", 1982, \"20190429-051\")\r\n\r\nprint (\"Ad soyad ve yaş:\", x1)\r\nprint (\"Ad soyad ve yaş:\", x2)\r\nprint()\r\nprint (\"Ad soyad, yaş ve personel no:\", y1)\r\nprint (\"Ad soyad, yaş ve personel no:\", y2)\r\n\r\n\"\"\"Çıktı:\r\n>python p_14002.py\r\nAd soyad ve yaş: M.Nihat Yavaş, 62\r\nAd soyad ve yaş: Z.Nihal Candan, 64\r\n\r\nAd soyad, yaş ve personel no: M.Ali Göktürk, 9; 20190429-001\r\nAd soyad, yaş ve personel no: Atilla Göktürk, 37; 20190429-051\r\n\"\"\"","sub_path":"Bernd Klein (520) ile Python/p_14002.py","file_name":"p_14002.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"424291928","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\n\n\n\n# In[3]:\n\n\ndef ptetaplot(ptbins,etabins,data,ax,title):\n etabinstext=[]\n ptbinstext=[]\n for i in range(len(ptbins)):\n if i==len(ptbins)-1:\n ptbinstext.append('overflow')\n continue\n ptbinstext.append(str(ptbins[i])+'-'+str(ptbins[i+1]))\n for i in range(len(etabins)):\n if i==len(etabins)-1:\n etabinstext.append('overflow')\n continue\n etabinstext.append(str(etabins[i])+'-'+str(etabins[i+1]))\n import seaborn as sns\n ptbinstext.reverse()\n import pandas as pd\n df = pd.DataFrame(data=data, columns=etabinstext, index=ptbinstext)\n df=df[::-1].reset_index(drop=True)\n sns.heatmap(df, square=False,ax=ax,cmap=\"Blues\",annot=True,cbar=False)\n ax.set_yticklabels(labels=ptbinstext,va='center')\n ax.set_ylabel(\"$p_T$ bins(GeV)\")\n ax.set_xlabel(\"$\\eta$ bins\")\n ax.set_title(title)\n \ndef ptetaRwtTested(Sigdf,Bkgdf,ptbins,etabins,Wt,NWt,ele_pt='ele_pt',scl_eta='scl_eta',od='.'):\n print(\"Reweighting Now...\")\n Sdata=[]\n Bdata=[]\n Wtdata=[]\n Sigdf[NWt]=1\n Bkgdf[NWt]=1\n for i in range(len(ptbins)):\n Bdatai=[]\n Sdatai=[]\n Wtdatai=[]\n for j in range(len(etabins)):\n if i==(len(ptbins)-1) and j<(len(etabins)-1):\n sel=ele_pt+'>@ptbins[@i] & '+scl_eta+'>@etabins[@j] & '+scl_eta+'<@etabins[@j+1]'\n Bsum=Bkgdf.query(sel)[Wt].sum()\n Bdatai.append(Bsum)\n Ssum=Sigdf.query(sel)[Wt].sum()\n Sdatai.append(Ssum)\n #print(\"BSum \"+str(Bsum))\n #print(\"SSum \"+str(Ssum))\n if Bsum>0 and Ssum>0:\n #print(\"Entering1\")\n Wtdatai.append(Ssum/Bsum)\n Bkgdf.loc[sel,NWt]=Ssum/Bsum\n else:\n #print(\"Entering2\")\n Wtdatai.append(1)\n Bkgdf.loc[sel,NWt]=1\n continue \n if i<(len(ptbins)-1) and j==(len(etabins)-1):\n sel=ele_pt+'>@ptbins[@i] & '+ele_pt+'<=@ptbins[@i+1] & '+scl_eta+'>@etabins[@j]'\n Bsum=Bkgdf.query(sel)[Wt].sum()\n Bdatai.append(Bsum)\n Ssum=Sigdf.query(sel)[Wt].sum()\n Sdatai.append(Ssum)\n #print(\"BSum \"+str(Bsum))\n #print(\"SSum \"+str(Ssum))\n if Bsum>0 and Ssum>0:\n #print(\"Entering1\")\n Wtdatai.append(Ssum/Bsum)\n Bkgdf.loc[sel,NWt]=Ssum/Bsum\n else:\n #print(\"Entering2\")\n Wtdatai.append(1)\n Bkgdf.loc[sel,NWt]=1\n continue \n if i==(len(ptbins)-1) and j==(len(etabins)-1):\n sel=ele_pt+'>@ptbins[@i] & '+scl_eta+'>@etabins[@j]'\n Bsum=Bkgdf.query(sel)[Wt].sum()\n Bdatai.append(Bsum)\n Ssum=Sigdf.query(sel)[Wt].sum()\n Sdatai.append(Ssum)\n #print(\"BSum \"+str(Bsum))\n #print(\"SSum \"+str(Ssum))\n if Bsum>0 and Ssum>0:\n #print(\"Entering1\")\n Wtdatai.append(Ssum/Bsum)\n Bkgdf.loc[sel,NWt]=Ssum/Bsum\n else:\n #print(\"Entering2\")\n Wtdatai.append(1)\n Bkgdf.loc[sel,NWt]=1\n continue \n sel=ele_pt+'>@ptbins[@i] & '+ele_pt+'<=@ptbins[@i+1] & scl_eta>@etabins[@j] & '+scl_eta+'<@etabins[@j+1]'\n Bsum=Bkgdf.query(sel)[Wt].sum()\n Bdatai.append(Bsum)\n Ssum=Sigdf.query(sel)[Wt].sum()\n Sdatai.append(Ssum)\n #print(\"BSum \"+str(Bsum))\n #print(\"SSum \"+str(Ssum))\n if Bsum>0 and Ssum>0:\n #print(\"Entering1\")\n Wtdatai.append(Ssum/Bsum)\n Bkgdf.loc[sel,NWt]=Ssum/Bsum\n else:\n #print(\"Entering2\")\n Wtdatai.append(1)\n Bkgdf.loc[sel,NWt]=1\n Bdata.append(Bdatai)\n Sdata.append(Sdatai)\n Wtdata.append(Wtdatai)\n Sigdf[NWt]=Sigdf[Wt]\n Bkgdf[NWt]*=Bkgdf[Wt]\n BdataWtd=[]\n for Wtdatal, Bdatal in zip(Wtdata,Bdata):\n BdataWtd.append([a * b for a, b in zip(Wtdatal, Bdatal)])\n fig, axes = plt.subplots(1,4,figsize=(20,5))\n ptetaplot(ptbins,etabins,Sdata,axes[0],\"Signal Bins\")\n ptetaplot(ptbins,etabins,Bdata,axes[1],\"Background Bins\")\n ptetaplot(ptbins,etabins,BdataWtd,axes[2],\"Background Bins Reweighted\")\n ptetaplot(ptbins,etabins,Wtdata,axes[3],\"Background Bins per event weight\")\n plt.savefig(od+\"/ReweightingPlot.pdf\")\n plt.savefig(od+\"/ReweightingPlot.pdf\")\n return Sigdf[NWt],Bkgdf[NWt]\n\n\n# In[4]:\n\n\n\n\n\n# In[5]:\n\n\ndef dataptetastrip(data1):\n data=data1\n for ptlist in data:\n ptlist[-2]=ptlist[-2]+ptlist[-1]\n ptlist.pop(-1)\n data[-2] = [sum(i) for i in zip(data[-2], data[-1])]\n data.pop(-1)\n return data\n\n\ndef df_pteta_rwt(Mdf,\n label,\n returnOnlyPosWeights=0, \n ptw = [10,30,40,50,200,10000], \n etaw = [-1.5,-1.0,1.0,1.5], \n eta='', \n pt='',\n SumWeightCol=\"wt\",\n NewWeightCol=\"NewWt\",target=1,cand=0):\n #Mdf=Ndf.copy()\n ptwt = [1.0]*len(ptw)\n etawt = [1.0]*len(etaw)\n \n for k in range(len(etaw)):\n if k == len(etaw)-1:\n continue\n for i in range(len(ptw)):\n if i == len(ptw)-1:\n continue\n\n targetSum = Mdf.loc[(Mdf[pt] ptw[i]) & (Mdf[eta] etaw[k]) &(Mdf[label]==target),SumWeightCol].sum()\n candSum = Mdf.loc[(Mdf[pt] ptw[i]) & (Mdf[eta] etaw[k]) &(Mdf[label]==cand),SumWeightCol].sum()\n\n #print('Number of xsec events in signal for pt '+str(ptw[i])+' to '+str(ptw[i+1])+ 'before weighing = '+str(targetSum))\n #print('Number of xsec events in background for pt '+str(ptw[i])+' to '+str(ptw[i+1])+ 'before weighing = '+str(candSum))\n\n if candSum>0 and targetSum>0:\n ptwt[i]=candSum/(targetSum)\n else:\n ptwt[i]=0\n Mdf.loc[(Mdf[pt] ptw[i]) \n & (Mdf[eta] etaw[k]) \n &(Mdf[label]==cand),\"rwt\"] = 1.0\n Mdf.loc[(Mdf[pt] ptw[i]) \n & (Mdf[eta] etaw[k]) \n &(Mdf[label]==target),\"rwt\"] = ptwt[i]\n\n Mdf.loc[:,NewWeightCol] = Mdf.loc[:,\"rwt\"]*Mdf.loc[:,SumWeightCol]\n\n MtargetSum = Mdf.loc[Mdf[label]==target,NewWeightCol].sum()\n McandSum = Mdf.loc[Mdf[label]==cand,NewWeightCol].sum()\n print('Number of events in signal after weighing = '+str(MtargetSum))\n print('Number of events in background after weighing = '+str(McandSum))\n\n if returnOnlyPosWeights==0: return 0\n else:\n return ptwt\n \n \n \ndef df_pteta_rwt(Mdf,\n label,\n returnOnlyPosWeights=0,\n ptw = [10,30,40,50,200,10000],\n etaw = [-1.5,-1.0,1.0,1.5],\n eta='',\n pt='',\n SumWeightCol=\"wt\",\n NewWeightCol=\"NewWt\",cand=\"\",\n Classes=[\"\"]):\n Mdf[\"rwt\"]=1\n Mdf[NewWeightCol]=1\n ptwt = [1.0]*len(ptw)\n etawt = [1.0]*len(etaw)\n\n for k in range(len(etaw)):\n if k == len(etaw)-1:\n continue\n for i in range(len(ptw)):\n if i == len(ptw)-1:\n continue\n for target in Classes:\n if target != cand:\n targetSum = Mdf.loc[(Mdf[pt] ptw[i])\n & (Mdf[eta] etaw[k])\n &(Mdf[label]==target),SumWeightCol].sum()\n candSum = Mdf.loc[(Mdf[pt] ptw[i])\n & (Mdf[eta] etaw[k])\n &(Mdf[label]==cand),SumWeightCol].sum()\n\n #print('Number of xsec events in signal for pt '+str(ptw[i])+' to '+str(ptw[i+1])+ 'before weighing = '+str(targetSum))\n #print('Number of xsec events in background for pt '+str(ptw[i])+' to '+str(ptw[i+1])+ 'before weighing = '+str(candSum))\n\n if candSum>0 and targetSum>0:\n ptwt[i]=candSum/(targetSum)\n else:\n ptwt[i]=0\n\n Mdf.loc[(Mdf[pt] ptw[i])\n & (Mdf[eta] etaw[k])\n &(Mdf[label]==cand),\"rwt\"] = 1.0\n Mdf.loc[(Mdf[pt] ptw[i])\n & (Mdf[eta] etaw[k])\n &(Mdf[label]==target),\"rwt\"] = ptwt[i]\n\n Mdf.loc[:,NewWeightCol] = Mdf.loc[:,\"rwt\"]*Mdf.loc[:,SumWeightCol]\n\n for justclass in Classes:\n Sum = Mdf.loc[Mdf[label]==justclass,NewWeightCol].sum()\n print(f'Number of events in {justclass} after weighing = '+str(Sum))\n\n return Mdf[NewWeightCol]\n","sub_path":"Tools/ptetaRwt.py","file_name":"ptetaRwt.py","file_ext":"py","file_size_in_byte":9530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"593755230","text":"import time, pandas\nfrom typing import Callable, Dict, Tuple\nfrom matplotlib import pyplot as plot\n\ndef benchmark(trials:int):\n \"\"\" Testing decorator for the fibonacci class' methods \"\"\"\n def benchmark_method(function:Callable[[int],int]) -> Callable[[int],Tuple[float,str]]:\n def time_wrapper(*args) -> Tuple[float,str]:\n \"\"\" Return the time taken to run a fibonacci method in microseconds \"\"\"\n t1 = time.time()\n for _ in range(trials):\n function(*args)\n return ((time.time()-t1)/trials) * 1e6, function.__name__\n return time_wrapper\n return benchmark_method\n\nclass fibonacci():\n \"\"\" A class to explore one basic idea of Dynamic Programming, memorization of otherwise repeated work. \"\"\"\n\n def __init__(self):\n self.known : Dict[int,int]= dict()\n\n def clear(self):\n self.known = dict()\n\n \n @staticmethod\n def fib(n:int) -> int:\n \"\"\" Calculate the fibonacci number at the nth index using the basic exponential time algorithm. \\n\n Time complexity, T(n), is (I'm told) equal to Phi^n, or much simpler to verify, T(n) >= 2^(n/2) \n \"\"\"\n if n<= 2:\n return 1\n else:\n return fibonacci.fib(n-1) + fibonacci.fib(n-2)\n\n @benchmark(10000)\n def top_down_mem_fib(self, n:int) -> int:\n \"\"\" Calculate the nth fibonacci number using a top down memorizing algorithm. \\n\n Reduces time complexity to O(n) \n \"\"\"\n if n in self.known.keys():\n return self.known[n]\n \n if n <= 2:\n return 1\n\n ret = self.top_down_mem_fib(n-1) + self.top_down_mem_fib(n-2)\n self.known[n] = ret\n return ret\n\n ## The bottom up apporach avoids recursion, avoiding the standard python protection that\n ## stops any program reaching a high level of recursions \n ## (1000 calls standard, but there are ways to subvert it)\n @benchmark(10000)\n def bottom_up_mem_fib(self, n:int) -> int:\n \"\"\" Calculate the nth fib number using a memorizing algorithm with a single function call \"\"\"\n if n in self.known.keys():\n return self.known[n]\n\n for i in range(n + 1):\n if i <= 2:\n r = 1\n else:\n r = self.known[i-1] + self.known[i-2]\n self.known[i] = r\n\n return self.known[n]\n\n @benchmark(10000)\n def pruned_bottom_up_mem_fib(self, n:int) -> int:\n \"\"\" Calculate the nth fib number using linear time with less function calls, \n while restricting the memory to the last two values found. \n \"\"\"\n if n in self.known.keys():\n return self.known[n]\n\n for i in range(n + 1):\n if i <= 2:\n r = 1\n else:\n r = self.known[i-1] + self.known[i-2]\n del self.known[i-2]\n\n self.known[i] = r\n\n return self.known[n]\n\ndef test():\n \"\"\" Render plots showing the performance of the different approaches to calculating the fibonacci sequence \"\"\"\n df = pandas.DataFrame()\n fib = fibonacci()\n for N in [25,50,100,200, 400]:\n time, name = fib.top_down_mem_fib(N)\n df.loc[N,name] = time\n fib.clear()\n time, name = fib.bottom_up_mem_fib(N)\n df.loc[N,name] = time\n fib.clear()\n time, name = fib.pruned_bottom_up_mem_fib(N)\n df.loc[N,name] = time\n fib.clear()\n\n df.plot.line(ylim=0)\n plot.rcParams[\"font.size\"] = 15\n plot.xlabel(\"N\")\n plot.ylabel(\"Microseconds\")\n plot.savefig(\"top_down_dominates.png\")\n df.drop('top_down_mem_fib',axis=1).plot.line(ylim=0)\n plot.rcParams[\"font.size\"] = 15\n plot.xlabel(\"N\")\n plot.ylabel(\"Microseconds\")\n plot.savefig(\"a_fair_fight.png\")\n print('This one example demonstrates the gains that can be had in reducing function calls.')\n print('A hidden benefit is the amount of memory being saved in the stack as well, regardless of pruning.')\n\n \n\n## TODO: add graph showing memory taken\nif __name__ == \"__main__\":\n test()\n\n\n\n\n","sub_path":"DynamicFibonacci.py","file_name":"DynamicFibonacci.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"381125878","text":"from pathlib import Path\nimport os\n\nclass Entity(object):\n def __init__(self, data, work_root):\n self.__dict__ = data\n self.work_root = None\n self.id = None\n self.name = None\n self.work_dir = None\n\n def createDir(self):\n existing_dir = self._getDir()\n current_dir = Path(f'{self.work_root}/{self.name}')\n id_file = Path(f'{self.work_root}/{self.name}/.ID-{self.id}')\n self.work_dir = current_dir\n if not existing_dir :\n current_dir.mkdir(exist_ok=True)\n current_dir.chmod(0o750)\n os.chown(current_dir.resolve(),10619,12964) #sboymans:cuppen\n id_file.touch()\n id_file.chmod(0o700)\n os.chown(id_file.resolve(),10619,12964) #sboymans:cuppen\n elif existing_dir != current_dir:\n existing_dir.rename(current_dir)\n\n return current_dir\n\n def _getDir(self):\n p = Path(self.work_root)\n for d in p.iterdir():\n id_file = Path(f'{d}/.ID-{self.id}')\n if id_file.exists(): return d\n # (id,name) = d.name.split(':',1)\n # if int(id) == int(self.id): return d\n return None\n","sub_path":"elan_objects/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"626100149","text":"'''\n# 훈련속도 높이는 4가지 방법\n1. 연결 가중치에 좋은 초기화 전략 사용하기\n2. 좋은 활성화 함수 사용하기\n3. 배치 정규화 사용\n4. 사전 훈련된 네트워크의 일부 재사용 ( 보조 작업 or 비지도 학습을 사용하여 만들 수 있는)\n'''\nfrom tensorflow import keras\n###고속 옵티마이저\n\n#1. 모멘텀 최적화\n# : 경사 하강법 보다 10배 빠르게 진행됨\noptimizer = keras.optimizer.SGD(lr = 0.001, momentum = 0.9) # 일반적인 값 0.9\n\n\n#2. 네스테로프 가속 경사\n# : 일반적으로 기본 모멘텀 최적화보다 훈련 속도가 빠름\noptimizer = keras.optimizers.SGD(lr = 0.001, momentum = 0.9, nesterov = True)\n\n\n#3. AdaGrad\n# : 가장 가파른 차원을 따라 gradient 벡터의 스케일을 감소시킴\n# : 학습률을 감소시키지만 경사가 완만한 차원보다 가파른 차원에 대해 더 빠르게 감소한다. = 적응적 학습률\n# 단점 : 간단한 2차방적식 문제에 대해서는 잘 작동하지만 훈련할 때 너무 일찍 멈추는 경우가 종종 있음\n# => 심층 신경망에는 사용 X\n\n\n#4. RMSProp\n# : (훈련 시작부터의 모든 gradient가 아닌) 가장 최근 반복에서 비롯된 gradient만 누적함\n# : Adam이 나오기 전까지 연구자들이 가장 선호하는 최적화 알고리즘이였음\noptimizer = keras.optimizers.RMSProp(lr = 0.001, rho = 0.9)\n\n\n#5. Adam \n# : Adam = adaptive moment estimation(적응적 모멘트 추정)\n# : 모멘텀 최적화 + RMSProp\n# : 모멘텀 최적화 처럼 gradient의 지수 감소 평균 따르고 / RMSProp처럼 gradient 제곱의 지수 감소된 평균 따름\noptimizer = keras.optimizers.Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.9)\n\n#5-1. AdaMax\n\n#5-2. Nadam\n# : Adam + 네스테로프 기법\n# : 종종 Adam보다 조금 더 빠르게 수렴됨","sub_path":"homework/Hands_on/chapter11/p434_optimizer.py","file_name":"p434_optimizer.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"309871249","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport time, datetime\nimport functools\n\nfrom google.protobuf.message import Message\nfrom google.protobuf.pyext._message import RepeatedCompositeContainer\nimport yaml\n\n\nclass Mail(object):\n\n def __init__(self, api_id, api_type, **kw):\n\n if 'handler_id' not in kw:\n kw['handler_id'] = '{}_{}'.format(api_id, api_type)\n\n if 'sync' not in kw:\n kw['sync'] = False\n\n if 'ret_code' not in kw:\n kw['ret_code'] = 0\n\n kw.update({\n 'api_id': api_id,\n 'api_type': api_type\n })\n\n self._kw = kw\n\n def __getitem__(self, key):\n return self._kw[key]\n\n def __repr__(self):\n return repr(self._kw)\n\n def get(self, key, default=None):\n return self._kw.get(key, default)\n\n\ndef timeit(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n t0_ = time.time()\n ret = func(*args, **kwargs)\n print('%s in %.6f secs' % (\n func.__name__, time.time() - t0_))\n return ret\n return wrapper\n\n\ndef load_config(path=None):\n if path is None:\n dirname = os.path.dirname(__file__)\n path = os.path.join(dirname, 'config.yaml')\n with open(path, 'r') as f:\n conf = yaml.load(f)\n return conf\n\n\ndef message2dict_(msg, including_default_value_fields=True):\n \"\"\"\n Convert protobuf message to dict\n \"\"\"\n\n dct = {}\n\n if isinstance(msg, Message):\n\n if including_default_value_fields:\n for field in msg.DESCRIPTOR.fields:\n dct[field.name] = field.default_value\n\n fields = msg.ListFields()\n for field, value in fields:\n dct[field.name] = message2dict(value)\n\n return dct\n\n elif isinstance(msg, RepeatedCompositeContainer):\n return list(map(message2dict, msg))\n\n else:\n return msg\n\n\n@timeit\ndef message2dict(msg, including_default_value_fields=True):\n \"\"\"\n Convert protobuf message to dict\n \"\"\"\n # return msg\n\n dct = {}\n\n if isinstance(msg, Message):\n\n for field in msg.DESCRIPTOR.fields:\n name = field.name\n dct[name] = message2dict(getattr(msg, name))\n \n return dct\n\n elif isinstance(msg, RepeatedCompositeContainer):\n return list(map(message2dict, msg))\n\n else:\n return msg \n\ndef message2tuple(msg, kind):\n \"\"\"\n Convert protobuf message to namedtuple\n Doesn't support nested messages\n \"\"\"\n\n dct = {}\n\n for field in msg.DESCRIPTOR.fields:\n name = field.name\n dct[name] = getattr(msg, name)\n \n ret = kind(**dct)\n \n return ret\n\n\ndef int2datetime(n_date=None, n_time=None, utc=False):\n if n_date is None and n_time is None:\n raise ValueError\n elif n_date and n_time is None:\n dt = datetime.datetime.strptime('{}'.format(n_date), '%Y%m%d')\n elif n_date is None and n_time:\n dt = datetime.datetime.strptime('{}'.format(n_time), '%H%M%S%f').time()\n else:\n dt = datetime.datetime.strptime(\n '{}{}'.format(n_date, n_time),\n '%Y%m%d%H%M%S%f')\n if utc:\n return dt.astimezone(datetime.timezone.utc)\n return dt\n\n\ndef _convert(ss):\n import re\n return '_'.join(re.findall('[A-Z][^A-Z]*', ss)).upper()","sub_path":"fast_trader/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"308295948","text":"from io import BytesIO\nimport Tkinter as tk\nimport urllib # not urllib.request\nfrom PIL import Image, ImageTk\n\nroot = tk.Tk()\nurl = \"http://imgs.xkcd.com/comics/python.png\"\n\nu = urllib.urlopen(url)\nraw_data = u.read()\nu.close()\n\nim = Image.open(BytesIO(raw_data))\nimage = ImageTk.PhotoImage(im)\nlabel = tk.Label(image=image)\nlabel.pack()\nroot.mainloop()","sub_path":"testimage.py","file_name":"testimage.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"490268759","text":"from Tools.scraper_tool import *\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\n# from Webdrivers.Personal.WT_Database.emailer import send_email\ncss_selectors = ['#cb-select-all-1','#bulk-action-selector-top','#bulk-action-selector-top > option.hide-if-no-js','#doaction', '#bulk_edit']\nurl = 'https://pruve.org/wp-admin/edit.php?post_type=question&paged=2082'\ndriver.get(url)\ndriver.find_element_by_css_selector('#user_login').send_keys('jacrew')\ndriver.find_element_by_css_selector('#user_pass').send_keys('Paulrevere31!')\ndriver.find_element_by_css_selector('#wp-submit').click()\nwait(2)\nfor i in range(2602,6865):\n url = 'https://pruve.org/wp-admin/edit.php?post_type=question&paged='+str(i)\n driver.get(url)\n # if i % 500 == 0:\n # try:\n # send_email('appiispanen@gmail.com','Server has reached '+str(i), 'hey Drew,\\n Your scraping iteration has on page #'+str(i)+'.\\n Currently on '+url+'.\\n Good luck!')\n # except:\n # print(\"############################# E M A I L F A I L E D. #############################\")\n for css_selector in css_selectors:\n try:\n driver.find_element_by_css_selector(css_selector).click()\n except:\n driver.refresh()\n try:\n driver.find_element_by_css_selector(css_selector).click()\n except:\n print(\"Not working - - \", css_selector, driver.current_url)\n if css_selector == '#bulk_edit':\n try:\n WebDriverWait(driver, 30).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '#message > p:nth-child(1)'))\n )\n except:\n print(\"Impatient - - \", css_selector, driver.current_url)\n continue\n wait(1)","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"566465560","text":"__author__ = \"sam.diefenbacher\"\n# Collaborated with danny rash\n\nimport random\t\n\ndef main():\n\n\tDeck = []\n\tPlayerAHand = []\n\tPlayerBHand = []\n\tgameCounter = 0\n\n\n\t# Create deck. Cards are represented by an integer value\n\tfor x in range(52):\n\t\tDeck.append(x)\n\t\n\t# Shuffle the deck\n\trandom.shuffle(Deck)\n\t\n\t# Deal 1/2 the cards to each player\n\tfor x in range(26):\n\t\tPlayerAHand.append(Deck.pop())\n\t\tPlayerBHand.append(Deck.pop())\n\t\n\t# Main Gameplay\n\t\t\n\twhile len(PlayerAHand) > 0 and len(PlayerBHand) > 0:\n\t\tgameCounter = gameCounter + 1\n\t\tPlayerAHand, PlayerBHand = playRound(PlayerAHand, PlayerBHand)\n\t\tif gameCounter == 1000:\n\t\t\tbreak\n\n\t# End of game\n\n\tprint(\"There were \", gameCounter, \" rounds played\")\n\tprint( )\n\tprint(\"Player A had \",len(PlayerAHand), \"cards, and player B had \",len(PlayerBHand), \"cards played\")\n\ndef playRound(PlayerAHand, PlayerBHand):\n\t\n\tPlayerACard = PlayerAHand.pop()\n\tPlayerBCard = PlayerBHand.pop()\n\t\n\tif getRank(PlayerACard) == getRank(PlayerBCard):\n\t\n\t\tPlayerAHand.append(PlayerACard)\n\t\tPlayerBHand.append(PlayerBCard)\n\t\tWAR(PlayerAHand, PlayerBHand)\n\t\n\telse:\n\t\t\n\t\tif getRank(PlayerACard) > getRank(PlayerBCard):\n\t\t\tPlayerAHand.insert(0,PlayerACard)\n\n\t\telse:\n\t\t\tPlayerBHand.insert(0,PlayerBCard)\n\t\n\treturn PlayerAHand, PlayerBHand\n\n\ndef WAR(PlayerA, PlayerB):\n\t\tif len(PlayerA) > 5 and len(PlayerB) > 5:\n\t\t\tPlayerAHand = []\n\t\t\tPlayerBHand = []\n\t\t\tfor x in range(5):\n\t\t\t\tPlayerAHand.append(PlayerA.pop())\n\t\t\t\tPlayerBHand.append(PlayerB.pop())\n\t\t\tif getRank(PlayerAHand[4]) > getRank(PlayerBHand[4]):\n\t\t\t\tPlayerA = PlayerAHand + PlayerBHand + PlayerA\n\t\t\telif getRank(PlayerAHand[4]) < getRank(PlayerBHand[4]):\n\t\t\t\tPlayerA = PlayerAHand + PlayerBHand + PlayerB\n\t\telse:\n\t\t\t\tPlayerA, PlayerB = Loser(PlayerA, PlayerB)\n\n\t\treturn PlayerA, PlayerB\n\ndef Loser(PlayerA, PlayerB):\n\t# Lose Cards\n\t\t\n\t\treturn PlayerA, PlayerB\n\n\t# getRank Function\n\t\ndef getRank(anyCard):\n\treturn anyCard % 13\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"WarV3.py","file_name":"WarV3.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"81171569","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# %matplotlib inline\n\nfrom collections import Counter\nimport math\nfrom math import log\n\nimport pprint\n\ndef create_data():\n datasets = [\n ['晴','29','85','否','0'],\n ['晴','26','88','是','0'],\n ['多云','28','78','否','1'],\n ['雨','21','96','否','1'],\n ['雨','20','80','否','1'],\n ['雨','18','70','是','0'],\n ['多云','18','65','是','1'],\n ['晴','22','90','否','0'],\n ['晴','21','68','否','1'],\n ['雨','24','80','否','1'],\n ['晴','24','63','是','1'],\n ['多云','22','90','是','1'],\n ['多云','27','75','否','1'],\n ['雨','21','80','是','0']\n ]\n labels = [u'天气',u'温度',u'湿度',u'是否有风',u'是否前往游乐场']\n\n return datasets,labels\n\n# 熵\ndef calc_ent(datasets):\n data_length = len(datasets)\n label_count = {}\n for i in range(data_length):\n label = datasets[i][-1]\n if label not in label_count:\n label_count[label] = 0\n label_count[label] += 1\n ent = -sum([(p/data_length)*log(p/data_length, 2) for p in label_count.values()])\n return ent\n\n# 经验条件熵\ndef cond_ent(datasets, axis=0):\n data_length = len(datasets)\n feature_sets = {}\n for i in range(data_length):\n feature = datasets[i][axis]\n if feature not in feature_sets:\n feature_sets[feature] = []\n feature_sets[feature].append(datasets[i])\n cond_ent = sum([(len(p)/data_length)*calc_ent(p) for p in feature_sets.values()])\n return cond_ent\n\n# 信息增益\ndef info_gain(ent, cond_ent):\n return ent - cond_ent\n\n# def info_gain_train(datasets):\n# # # count = len(datasets[0]) - 1\n# # # ent = calc_ent(datasets)\n# # # best_feature = []\n# # # for c in range(count):\n# # # c_info_gain = info_gain(ent, cond_ent(datasets, axis=c))\n# # # best_feature.append((c, c_info_gain))\n# # # print('特征({}) - info_gain - {:.3f}'.format(labels[c], c_info_gain))\n# # # # 比较大小\n# # # best_ = max(best_feature, key=lambda x: x[-1])\n# # # return '特征({})的信息增益最大,选择为根节点特征'.format(labels[best_[0]])\n\nres1 = -(4/9*math.log(4/9,2)+5/9*math.log(5/9,2))\nres2 = -(4/5*math.log(4/5,2)+1/5*math.log(1/5,2))\nres = 0.940-(9/14*res1+5/14*res2)\n\nprint(res)","sub_path":"01_Python常见基础知识/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"32562869","text":"from flask import Flask\nfrom flask import jsonify\nimport feature_extraction\nimport requests\nimport pickle\nimport numpy as np\nimport sklearn\nfrom werkzeug.utils import secure_filename\nfrom flask import (\n Blueprint, flash, g, redirect, render_template, request, session, url_for\n)\nfrom sklearn.preprocessing import StandardScaler\n\napp = Flask(__name__)\nmodel = pickle.load(open('RandomForestClassifier.pkl', 'rb'))\n\n@app.route('/',methods=['GET'])\ndef Home():\n return render_template('index.html')\n\n\nstandard_to = StandardScaler()\n@app.route(\"/predict\", methods=['POST'])\ndef predict():\n if request.method == 'POST':\n url = request.form['url']\n print(url)\n X_new = []\n\n X_input = url\n X_new=feature_extraction.generate_data_set(X_input)\n print(X_new)\n X_new = np.array(X_new).reshape(1,-1)\n print(X_new)\n\n\n prediction = model.predict(X_new)\n print(prediction)\n output = prediction[0] \n print(output)\n if output == 1:\n return render_template('index.html',prediction_text = \"Good Url\")\n else:\n return render_template('index.html',prediction_text1 = \"Malicious Url\")\n# =============================================================================\n# except:\n# return render_template('index.html',prediction_text = \"Close to Phishing Url\")\n# =============================================================================\n\n\n \n\nif __name__==\"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"6719458","text":"# -*- coding: utf-8 -*-\n# Copyright 2017 GIG Technology NV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@license_version:1.3@@\n\nimport httplib\nimport logging\nimport os\nimport urllib\nimport uuid\n\nimport webapp2\nfrom google.appengine.ext.deferred import deferred\n\nfrom framework.bizz.authentication import login_user, logout_user, get_current_user_id, get_current_session\nfrom framework.bizz.session import is_valid_session\nfrom framework.handlers import render_error_page, render_page\nfrom framework.plugin_loader import get_config, get_auth_plugin\nfrom framework.utils import now\nfrom mcfw.consts import MISSING\nfrom mcfw.exceptions import HttpException, HttpBadRequestException\nfrom plugins.its_you_online_auth.bizz.authentication import get_user_scopes_from_access_token, get_jwt, get_redirect_uri\nfrom plugins.its_you_online_auth.bizz.profile import set_user_information\nfrom plugins.its_you_online_auth.bizz.settings import get_organization\nfrom plugins.its_you_online_auth.exceptions.organizations import OrganizationNotFoundException\nfrom plugins.its_you_online_auth.models import OauthState, Profile\nfrom plugins.its_you_online_auth.plugin_consts import NAMESPACE, SOURCE_WEB, SOURCE_APP\nfrom plugins.its_you_online_auth.plugin_utils import get_users_organization\nfrom plugins.its_you_online_auth.to.config import ItsYouOnlineConfiguration\n\n\nclass SigninHandler(webapp2.RequestHandler):\n def get(self):\n session = get_current_session()\n if is_valid_session(session):\n self.redirect('/')\n return\n\n config = get_config(NAMESPACE)\n if not config.login_with_organization:\n self.redirect('/login/continue?%s' % self.request.query)\n return\n render_page(self.response, os.path.join('unauthenticated', 'signin.html'), plugin_name=NAMESPACE)\n\n\nclass LogoutHandler(webapp2.RequestHandler):\n def get(self):\n user_id = get_current_user_id()\n if user_id:\n logout_user(self.response)\n self.redirect('/')\n\n\nclass AppLoginHandler(webapp2.RequestHandler):\n def get(self):\n params = {\n 'source': SOURCE_APP\n }\n config = get_config(NAMESPACE)\n if config.login_with_organization:\n self.redirect('/login/organization?%s' % urllib.urlencode(params))\n else:\n params['organization_id'] = config.root_organization.name\n if config.required_scopes and config.required_scopes is not MISSING:\n # provide extra scopes\n params['scope'] = config.required_scopes\n self.redirect('/login/redirect?%s' % urllib.urlencode(params))\n\n\nclass PickOrganizationHandler(webapp2.RequestHandler):\n def get(self):\n organization_id = self.request.GET.get('organization_id', None)\n source = self.request.GET.get('source', SOURCE_WEB)\n\n error = None\n if organization_id:\n config = get_config(NAMESPACE)\n if organization_id != config.root_organization.name:\n try:\n get_organization(organization_id)\n except OrganizationNotFoundException as e:\n error = e.message\n\n if not error:\n self.redirect('/login/redirect?%s' % self.request.query)\n return\n\n template_parameters = {\n 'source': source,\n 'error': error\n }\n render_page(self.response, os.path.join('unauthenticated', 'organization.html'), plugin_name=NAMESPACE,\n template_parameters=template_parameters)\n\n\nclass OauthAuthorizeHandler(webapp2.RequestHandler):\n def get(self):\n organization_id = self.request.GET.get('organization_id', None)\n source = self.request.GET.get('source', SOURCE_WEB)\n extra_scopes = self.request.GET.get('scope', '').lstrip(',')\n register = self.request.GET.get('register', False)\n\n config = get_config(NAMESPACE)\n assert isinstance(config, ItsYouOnlineConfiguration)\n\n if not organization_id and config.login_with_organization:\n self.redirect('/login/organization')\n return\n\n if config.login_with_organization:\n if organization_id != config.root_organization.name:\n try:\n get_organization(organization_id)\n except OrganizationNotFoundException as e:\n render_error_page(self.response, httplib.BAD_REQUEST, e.message)\n return\n\n if source not in [SOURCE_WEB, SOURCE_APP]:\n render_error_page(self.response, httplib.BAD_REQUEST, 'Bad Request')\n return\n\n if config.login_with_organization:\n if organization_id == config.root_organization.name:\n if source == SOURCE_APP:\n render_error_page(self.response, httplib.BAD_REQUEST, 'Bad Request')\n return\n else:\n sub_org = organization_id\n else:\n sub_org = get_users_organization(config, organization_id)\n scope = 'user:memberof:%s' % sub_org\n elif config.require_memberof:\n scope = 'user:memberof:%s' % config.root_organization.name\n else:\n scope = ''\n\n if scope:\n scope += ','\n scope += extra_scopes\n\n params = {\n 'response_type': 'code',\n 'client_id': config.root_organization.name,\n 'redirect_uri': get_redirect_uri(config, source),\n 'scope': scope,\n 'state': str(uuid.uuid4())\n }\n\n login_state = OauthState(key=OauthState.create_key(params['state']))\n login_state.timestamp = now()\n login_state.organization_id = organization_id\n login_state.source = source\n login_state.completed = False\n login_state.put()\n\n if register:\n params['register'] = 1\n oauth_url = '%s/authorize?%s' % (get_auth_plugin().oauth_base_url, urllib.urlencode(params))\n logging.info('Redirecting to %s', oauth_url)\n self.redirect(str(oauth_url))\n\n\nclass DoLoginHandler(OauthAuthorizeHandler):\n def get(self, **kwargs):\n super(DoLoginHandler, self).get()\n\n\nclass Oauth2CallbackHandler(webapp2.RequestHandler):\n def get(self):\n code = self.request.GET.get('code', None)\n state = self.request.GET.get('state', None)\n try:\n if not (code or state):\n logging.debug('Code or state are missing.\\nCode: %s\\nState:%s', code, state)\n raise HttpBadRequestException()\n\n login_state = OauthState.create_key(state).get()\n if not login_state:\n logging.debug('Login state not found')\n raise HttpBadRequestException()\n\n config = get_config(NAMESPACE)\n assert isinstance(config, ItsYouOnlineConfiguration)\n if config.login_with_organization:\n username, scopes = get_user_scopes_from_access_token(code, login_state)\n jwt = None\n else:\n jwt, username, scopes = get_jwt(code, login_state)\n except HttpException as e:\n render_error_page(self.response, e.http_code, e.error)\n return\n\n _, session = login_user(self.response, username, scopes, jwt)\n self.redirect('/')\n\n if config.fetch_information:\n deferred.defer(set_user_information, Profile.create_key(username), session.key, _queue='iyo-requests')\n\n\nclass ContinueLoginHandler(webapp2.RequestHandler):\n def get(self, register=False, **kwargs):\n # Redirect to /login/organization if an organization is required to login\n # else immediately redirect to to itsyou.online\n config = get_config(NAMESPACE) # type: ItsYouOnlineConfiguration\n if config.login_with_organization:\n self.redirect('/login/organization')\n else:\n params = {\n 'source': self.request.GET.get('source', SOURCE_WEB),\n 'organization_id': config.root_organization.name,\n 'scope': self.request.GET.get('scope') or ''\n }\n if register:\n params['register'] = 1\n if config.required_scopes and config.required_scopes is not MISSING:\n # provide extra scopes\n if params['scope']:\n params['scope'] += ','\n params['scope'] += config.required_scopes\n self.redirect(str('/login/redirect?%s' % urllib.urlencode(params)))\n\n\nclass RegisterHandler(ContinueLoginHandler):\n def get(self, **kwargs):\n super(RegisterHandler, self).get(register=True, **kwargs)\n","sub_path":"plugins/its_you_online_auth/handlers/unauthenticated.py","file_name":"unauthenticated.py","file_ext":"py","file_size_in_byte":9228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"469240215","text":"# coding: utf-8\n\n\nimport os\nimport unittest\n\nfrom atomate.qchem.firetasks.geo_transformations import RotateTorsion\nfrom atomate.utils.testing import AtomateTest\nfrom pymatgen.core import Molecule\nimport numpy as np\n\n__author__ = \"Brandon Wood\"\n__email__ = \"b.wood@berkeley.edu\"\n\nmodule_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))\n\n\nclass TestGeoTransformations(AtomateTest):\n @classmethod\n def setUpClass(cls):\n\n cls.pt_mol = Molecule.from_file(\n os.path.join(module_dir, \"..\", \"..\", \"test_files\",\n \"pt_gs_wb97mv_tz_initial.xyz\"))\n cls.pt_rot_90_mol = Molecule.from_file(\n os.path.join(module_dir, \"..\", \"..\", \"test_files\",\n \"pt_rotated_90.0.xyz\"))\n\n def setUp(self, lpad=False):\n super(TestGeoTransformations, self).setUp(lpad=False)\n\n def tearDown(self):\n pass\n\n def test_rotate_torsion(self):\n atom_indexes = [6, 8, 9, 10]\n angle = 90.0\n ft = RotateTorsion({\n \"molecule\": self.pt_mol,\n \"atom_indexes\": atom_indexes,\n \"angle\": angle\n })\n rot_mol = ft.run_task({})\n test_mol = Molecule.from_dict(\n rot_mol.as_dict()[\"update_spec\"][\"prev_calc_molecule\"])\n np.testing.assert_equal(self.pt_rot_90_mol.species, test_mol.species)\n np.testing.assert_allclose(\n self.pt_rot_90_mol.cart_coords, test_mol.cart_coords, atol=0.0001)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"atomate/qchem/firetasks/tests/test_geo_transformations.py","file_name":"test_geo_transformations.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"252202160","text":"# Make a class LatLon that can be passed parameters `lat` and `lon` to the\n# constructor\n\nclass LatLon:\n def __init__(self, lat, lon):\n self.lat = lat\n self.lon = lon\n\n# Make a class Waypoint that can be passed parameters `name`, `lat`, and `lon` to the\n# constructor. It should inherit from LatLon. Look up the `super` method\n\nclass Waypoint(LatLon):\n def __init__(self, name, lat, lon):\n super().__init__(lat, lon)\n self.name = name\n\n def __str__(self):\n return f\"{self.name}, {self.lat}, {self.lon}\"\n\n# Make a class Geocache that can be passed parameters `name`, `difficulty`,\n# `size`, `lat`, and `lon` to the constructor. What should it inherit from?\n\nclass Geocache(Waypoint):\n def __init__(self, name, difficulty, size, lat, lon):\n super().__init__(name, lat, lon)\n self.difficulty = difficulty\n self.size = size\n \n def __str__(self):\n return f\"{super().__str__()}, difficulty: {self.difficulty}, size: {self.size}\"\n \n\n# Make a new waypoint and print it out: \"Catacombs\", 41.70505, -121.51521\n\nwaypoint = Waypoint(\"Catacombs\", 41.70505, -121.51521)\nprint(waypoint.name)\nprint(waypoint.lat)\nprint(waypoint.lon)\n\n# Without changing the following line, how can you make it print into something\n# more human-readable? Hint: Look up the `object.__str__` method\nprint(waypoint)\n\n# Make a new geocache \"Newberry Views\", diff 1.5, size 2, 44.052137, -121.41556\n\ngeocache = Geocache(\"Newberry Views\", 1.5, 2, 44.052137, -121.41556)\n\n# Print it--also make this print more nicely\nprint(geocache)\n\n\n\n# STRETCH - this algorithm checks if a number is a prime number\n\n\ndef is_prime(num=int(input('Enter a number -----> '))):\n if num:\n \n first_half = [i for i in range(1, 101)]\n \n count = 0\n for i in first_half:\n if num % i == 0:\n count += 1\n total = count\n \n \n if total == 2:\n\n print(num, 'is a prime number')\n elif total < 2 or total > 2:\n print(num, 'is not a prime number')\n \n \nis_prime()\n","sub_path":"src/15_classes.py","file_name":"15_classes.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"474900686","text":"\"\"\"\n方案一\n\"\"\"\n\nfrom socket import *\nimport os,sys\n\n# 服务端地址\nADDR=('127.0.0.1',8636)\n\ndef send_msg(s,name):\n while 1:\n try:\n content=input(\"发言\")\n except:\n content='quit'\n if content =='quit':\n msg='Q '+name\n s.sendto(msg.encode(),ADDR)\n sys.exit('谢谢使用')\n msg='C %s %s'%(name,content)\n s.sendto(msg.encode(),ADDR)\n\ndef recv_msg(s):\n while 1:\n data,addr =s.recvfrom(1024)\n if data.decode()=='EXIT':\n break\n print(data.decode()+'\\n发言',end='')\n\n# 启动函数————》向服务端发送初始请求\ndef main():\n s=socket(AF_INET,SOCK_DGRAM)\n while 1:\n name=input('请输入姓名:')\n msg='L '+name\n s.sendto(msg.encode(),ADDR)\n data,addr=s.recvfrom(128)\n if data.decode()=='OK':\n print('成功进入聊天室')\n break\n else:\n print('该用户已存在')\n\n pid =os.fork()\n if pid<0:\n print('erro!')\n return\n elif pid ==0:\n send_msg(s,name)\n else:\n recv_msg(s)\n\n\nif __name__ == '__main__':\n main()","sub_path":"project_2 client.py","file_name":"project_2 client.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"142708985","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom django.contrib.auth import get_user_model\nfrom django.utils import timezone\n\nfrom todo.models import Todo\n\n\nclass TestModels(TestCase):\n\n def setUp(self):\n\n self.timezone = timezone.now()\n\n self.user = get_user_model().objects.create_user(\n username = 'mkaychuks',\n email = 'admin@admin.com',\n password = 'testing321',\n )\n\n self.todo = Todo.objects.create(\n title = 'Sweep the house',\n description = 'Sweeping keeps the house neat',\n author = self.user,\n date_created = self.timezone # this keep adding/using the current time when \n # the setUp function is ran\n )\n\n def test_absolute_url(self):\n self.assertEqual(self.todo.get_absolute_url(), '/todo/')\n\n def test_string_representation(self):\n self.assertTrue(str(self.todo.title))\n self.assertEqual(str(self.todo.description), 'Sweeping keeps the house neat')\n\n def test_user_is_created(self):\n self.assertTrue(self.user)\n\n def test_model_picks_up_data(self):\n self.assertEqual(self.todo.title, str(self.todo.title))\n self.assertNotEqual(self.todo.date_created, self.timezone) # because 'timezone.now()' keeps changing\n self.assertEquals(f'{self.todo.description}', 'Sweeping keeps the house neat')\n ","sub_path":"todo/test/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"582850644","text":"#!/usr/bin/python\n\nransac_iters = [1000, 5000]\nncc_threshold_s = [0.6, 0.7, 0.8]\nncc_threshold_m = [0.6, 0.7, 0.8]\n\n#search_wsz = [180, 200, 230]\n\nharris_block_sz = [3, 5]\nharris_k = [0.04, 0.06]\nharris_thr = [0.00001, 0.000001, 0.0000001]\n\nfp_runscript = open(\"/mnt/ssd/kivan/cv-stereo/scripts/eval_batch/run_batch_validation.sh\", 'w')\nfp_runscript.write(\"#!/bin/bash\\n\\n\")\n\ncnt = 0\nfor i in range(len(ransac_iters)):\n for j in range(len(ncc_threshold_s)):\n for k in range(len(ncc_threshold_m)):\n for l in range(len(harris_block_sz)):\n for m in range(len(harris_k)):\n for n in range(len(harris_thr)):\n cnt += 1\n #filepath = \"/home/kivan/Projects/cv-stereo/config_files/experiments/kitti/validation/tracker_validation_ncc_\" + str(cnt) + \".txt\"\n filepath = \"/home/kivan/Projects/cv-stereo/config_files/experiments/kitti/validation2/tracker_validation_ncc_\" + str(cnt) + \".txt\"\n print(filepath)\n fp = open(filepath, 'w')\n fp.write(\"odometry_method = VisualOdometryRansac\\n\")\n fp.write(\"ransac_iters = \" + str(ransac_iters[i]) + \"\\n\\n\")\n fp.write(\"tracker = StereoTracker\\n\")\n fp.write(\"max_disparity = 160\\n\")\n fp.write(\"stereo_wsz = 15\\n\")\n fp.write(\"ncc_threshold_s = \" + str(ncc_threshold_s[j]) + \"\\n\\n\")\n fp.write(\"tracker_mono = TrackerBFM\\n\")\n fp.write(\"max_features = 5000\\n\")\n fp.write(\"ncc_threshold_m = \" + str(ncc_threshold_m[k]) + \"\\n\")\n fp.write(\"ncc_patch_size = 15\\n\")\n fp.write(\"search_wsz = 230\\n\\n\")\n fp.write(\"detector = FeatureDetectorHarrisCV\\n\")\n fp.write(\"harris_block_sz = \" + str(harris_block_sz[l]) + \"\\n\")\n fp.write(\"harris_filter_sz = 1\\n\")\n fp.write(\"harris_k = \" + str(harris_k[m]) + \"\\n\")\n fp.write(\"harris_thr = \" + str(harris_thr[n]) + \"\\n\")\n fp.write(\"harris_margin = 15\\n\\n\")\n fp.write(\"use_bundle_adjustment = false\")\n fp.close()\n\n fp_runscript.write('./run_kitti_evaluation_dinodas.sh \"' + filepath + '\"\\n')\nfp_runscript.close()\n","sub_path":"scripts/egomotion_kitti_eval/old/generate_grid_search_validation_stage1.py","file_name":"generate_grid_search_validation_stage1.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"266244789","text":"# (C) Copyright 2007-2021 Enthought, Inc., Austin, TX\r\n# All rights reserved.\r\n#\r\n# This software is provided without warranty under the terms of the BSD\r\n# license included in LICENSE.txt and may be redistributed only under\r\n# the conditions described in the aforementioned license. The license\r\n# is also available online at http://www.enthought.com/licenses/BSD.txt\r\n#\r\n# Thanks for using Enthought open source!\r\n\r\n\"\"\" An IPython kernel plugin. \"\"\"\r\n\r\nimport logging\r\nimport warnings\r\n\r\n# Enthought library imports.\r\nfrom envisage.api import (\r\n bind_extension_point,\r\n ExtensionPoint,\r\n Plugin,\r\n ServiceOffer,\r\n)\r\nfrom traits.api import Bool, Instance, List\r\n\r\n# Constants kept around for backwards compatibility.\r\n# These will be removed in a future release.\r\n# Extension point IDs.\r\nSERVICE_OFFERS = \"envisage.service_offers\"\r\nIPYTHON_NAMESPACE = \"ipython_plugin.namespace\"\r\n\r\n# Protocol for the contributed service offer.\r\nIPYTHON_KERNEL_PROTOCOL = (\r\n \"envisage.plugins.ipython_kernel.internal_ipkernel.InternalIPKernel\")\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass IPythonKernelPlugin(Plugin):\r\n \"\"\" An IPython kernel plugin. \"\"\"\r\n\r\n #: The plugin unique identifier.\r\n id = \"envisage.plugins.ipython_kernel\"\r\n\r\n #: The plugin name (suitable for displaying to the user).\r\n name = \"IPython embedded kernel plugin\"\r\n\r\n #: Extension point for objects contributed to the IPython kernel namespace.\r\n IPYTHON_NAMESPACE = 'ipython_plugin.namespace'\r\n\r\n #: Ipython kernel namespace ExtensionPoint\r\n kernel_namespace = ExtensionPoint(\r\n List,\r\n id=IPYTHON_NAMESPACE,\r\n desc=\"\"\"\r\n\r\n Variables to add to the IPython kernel namespace.\r\n This is a list of tuples (name, value).\r\n\r\n \"\"\",\r\n )\r\n\r\n #: Service offers contributed by this plugin.\r\n service_offers = List(contributes_to=SERVICE_OFFERS)\r\n\r\n # Protocol for the contributed service offer.\r\n IPYTHON_KERNEL_PROTOCOL = 'envisage.plugins.ipython_kernel.internal_ipkernel.InternalIPKernel' # noqa: E501\r\n\r\n #: Whether to initialize the kernel when the service is created.\r\n #: The default is ``False```, for backwards compatibility. It will change\r\n #: to ``True`` in a future version of Envisage. External users wanting\r\n #: to use the future behaviour now should pass ``init_ipkernel=True``\r\n #: when creating the plugin.\r\n init_ipkernel = Bool(False)\r\n\r\n def stop(self):\r\n \"\"\" Stop the plugin. \"\"\"\r\n self._destroy_kernel()\r\n\r\n # Private traits and methods\r\n\r\n #: The InternalIPKernel instance provided by the service.\r\n _kernel = Instance(IPYTHON_KERNEL_PROTOCOL)\r\n\r\n def _create_kernel(self):\r\n from .internal_ipkernel import InternalIPKernel\r\n\r\n # This shouldn't happen with a normal lifecycle, but add a warning\r\n # just in case.\r\n if self._kernel is not None:\r\n warnings.warn(\r\n \"A kernel already exists. \" \"No new kernel will be created.\",\r\n RuntimeWarning,\r\n )\r\n return\r\n\r\n logger.debug(\"Creating the embedded IPython kernel\")\r\n kernel = self._kernel = InternalIPKernel()\r\n bind_extension_point(\r\n kernel, \"initial_namespace\", IPYTHON_NAMESPACE, self.application\r\n )\r\n if self.init_ipkernel:\r\n kernel.init_ipkernel()\r\n else:\r\n warnings.warn(\r\n (\r\n \"In the future, the IPython kernel will be initialized \"\r\n \"automatically at creation time. To enable this \"\r\n \"future behaviour now, create the plugin using \"\r\n \"IPythonKernelPlugin(init_ipkernel=True)\"\r\n ),\r\n DeprecationWarning,\r\n )\r\n\r\n return kernel\r\n\r\n def _destroy_kernel(self):\r\n \"\"\"\r\n Destroy any existing kernel.\r\n \"\"\"\r\n if self._kernel is None:\r\n return\r\n\r\n logger.debug(\"Shutting down the embedded IPython kernel\")\r\n self._kernel.shutdown()\r\n self._kernel = None\r\n\r\n def _service_offers_default(self):\r\n ipython_kernel_service_offer = ServiceOffer(\r\n protocol=IPYTHON_KERNEL_PROTOCOL, factory=self._create_kernel,\r\n )\r\n return [ipython_kernel_service_offer]\r\n","sub_path":"venv/lib/python3.8/site-packages/envisage/plugins/ipython_kernel/ipython_kernel_plugin.py","file_name":"ipython_kernel_plugin.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"510404745","text":"#!/usr/bin/python\n# encoding=utf-8\n\nimport sys\nif sys.version_info[0] == 2:\n # Python2\n import core.AepSdkRequestSend as AepSdkRequestSend\nelse:\n # Python3\n from apis.core import AepSdkRequestSend\n\n\n\n#参数standardVersion: 类型String, 参数可以为空\n# 描述:标准物模型版本号\n#参数thirdType: 类型long, 参数不可以为空\n# 描述:三级分类Id\ndef QueryStandardModel(appKey, appSecret, standardVersion, thirdType):\n path = '/aep_standard_management/standardModel'\n head = {}\n param = {'standardVersion':standardVersion, 'thirdType':thirdType}\n version = '20190713033424'\n application = appKey\n key = appSecret\n response = AepSdkRequestSend.sendSDKRequest(path, head, param, None, version, application, None, key, 'GET')\n if response is not None:\n return response.read()\n return None\n\n","sub_path":"apis/aep_standard_management.py","file_name":"aep_standard_management.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"377918973","text":"import logging\nimport stripe\n\nfrom django.apps import apps\nfrom django.db import models, transaction\nfrom django.utils.translation import gettext_lazy as _\nfrom stripe.api_resources.abstract.listable_api_resource import ListableAPIResource\n\nfrom . import settings\nfrom .errors import *\nfrom .managers import *\n\nstripe.api_key = settings.SECRET_KEY\n\n\n# TODO: fix the test console (optional-ize Stripe)\n# TODO: create templatetag for StripeJS thing\n\n\ndef list_all(t: ListableAPIResource):\n \"\"\"Retrieve all objects of type t from Stripe. Should work for\n Product, Plan. SKU.\n \"\"\"\n objects = []\n last_id = None\n has_more = True\n\n while has_more:\n response = t.list(limit=100, starting_after=last_id)\n has_more = response.has_more\n objects.append(response.data)\n last_id = objects[-1].id\n\n return objects\n\n\nclass Customer(models.Model):\n stripe_id = models.CharField(\n _(\"id\"), max_length=128, help_text=\"Id of Stripe Customer\", db_index=True\n )\n name = models.CharField(_(\"name\"), max_length=128, null=True, blank=True)\n email = models.EmailField(_(\"email\"), max_length=254, null=True, blank=True)\n description = models.TextField(\n _(\"description\"), help_text=_(\"Description for admins\"), null=True, blank=True\n )\n\n SOURCE_NONE = \"NA\"\n SOURCE_FAILED = \"FA\"\n SOURCE_AVAILABLE = \"AV\"\n source_status_choices = [\n (SOURCE_NONE, \"None\"),\n (SOURCE_FAILED, \"Source payment failed\"),\n (SOURCE_AVAILABLE, \"Source available\"),\n ]\n source_status = models.CharField(\n _(\"source status\"),\n max_length=2,\n choices=source_status_choices,\n default=SOURCE_NONE,\n )\n\n default_source_id = models.CharField(\n _(\"default source id\"), max_length=128, null=True\n )\n default_source_brand = models.TextField(_(\"default source brand\"), null=True)\n default_source_last4 = models.CharField(\n _(\"default source last 4 digits\"), max_length=4, null=True\n )\n\n def save(self, *args, **kwargs):\n if self._state.adding: # initial save\n self._create_stripe_object()\n super().save(*args, **kwargs)\n\n def _create_stripe_object(self):\n stripe_object = stripe.Customer.create(\n email=self.email, name=self.name, description=self.description\n )\n self.stripe_id = stripe_object.id\n\n def set_source(self, token, **kwargs):\n try:\n customer = stripe.Customer.modify(self.stripe_id, source=token)\n source = customer.sources.data[0]\n except stripe.error.StripeError as e:\n raise DtStripeError.from_stripe_error(e)\n self.source_status = Customer.SOURCE_AVAILABLE\n self.default_source_id = source.id\n self.default_source_brand = source.brand\n self.default_source_last4 = source.last4\n self.save()\n\n def order(self, sku, token=None, **kwargs):\n try:\n stripe_order = stripe.Order.create(\n customer=self.stripe_id,\n currency=sku.currency,\n items=[{\"type\": \"sku\", \"parent\": sku.stripe_id}],\n )\n if token:\n stripe_order = stripe.Order.pay(stripe_order.id, source=token)\n else:\n stripe_order = stripe.Order.pay(\n stripe_order.id, customer=self.stripe_id\n )\n except stripe.error.StripeError as e:\n self.source_status = Customer.SOURCE_FAILED\n self.save()\n raise DtStripeError.from_stripe_error(e)\n order = Order(stripe_id=stripe_order.id, customer=self, sku=sku)\n order.save()\n return order\n\n def subscribe(self, plan):\n # TODO: prevent duplicate subscriptions\n sub = self.get_subscription(plan)\n status = sub.status\n if status == Subscription.SUBSCRIPTION_ACTIVE:\n raise DtStripeError(\n \"DUPLICATE_SUBSCRIPTION\", \"Attempting a duplicate subscription.\"\n )\n elif status == Subscription.SUBSCRIPTION_CANCEL_AT_PERIOD_END:\n try:\n stripe_sub = stripe.Subscription.modify(\n sub.stripe_id, cancel_at_period_end=True\n )\n except stripe.error.StripeError as e:\n raise DtStripeError.from_stripe_error(e)\n else: # CANCELED or NONE\n try:\n stripe_sub = stripe.Subscription.create(\n customer=self.stripe_id, plan=plan.stripe_id\n )\n except stripe.error.StripeError as e:\n self.source_status = Customer.SOURCE_FAILED\n self.save()\n raise DtStripeError.from_stripe_error(e)\n sub.stripe_id = stripe_sub.id\n sub.status = Subscription.SUBSCRIPTION_ACTIVE\n sub.save()\n return sub\n\n def cancel_subscription(self, plan):\n sub = self.get_subscription(plan)\n if not sub or sub.status != Subscription.SUBSCRIPTION_ACTIVE:\n raise DtStripeError(\n \"NO_ACTIVE_SUBSCRIPTION\", \"There is no active subscription to cancel.\"\n )\n try:\n stripe.Subscription.modify(sub.stripe_id, cancel_at_period_end=True)\n except stripe.error.StripeError as e:\n raise DtStripeError.from_stripe_error(e)\n sub.status = Subscription.SUBSCRIPTION_CANCEL_AT_PERIOD_END\n sub.save()\n return sub\n\n def get_subscription(self, plan):\n obj, created = self.subscriptions.get_or_create(plan=plan)\n return obj\n\n def get_subscription_status(self, plan):\n return self.get_subscription(plan).status\n\n def __str__(self):\n try:\n return self.name\n except AttributeError:\n return \"Incomplete customer\"\n\n\nclass Product(models.Model):\n stripe_id = models.CharField(\n _(\"id\"), max_length=128, help_text=\"Id of Stripe Product\", db_index=True\n )\n name = models.CharField(_(\"name\"), max_length=128)\n description = models.TextField(\n _(\"description\"),\n help_text=_(\n \"Description to be shown to the user. Only for products of type 'good'.\"\n ),\n null=True,\n blank=True,\n )\n\n GOOD = \"good\"\n SERVICE = \"service\"\n TYPE_CHOICES = [(GOOD, \"Good\"), (SERVICE, \"Service\")]\n product_type = models.CharField(_(\"type\"), max_length=16, choices=TYPE_CHOICES)\n\n SYNC_FIELDS = [\"name\", \"description\"]\n\n objects = models.Manager()\n services = ServiceManager()\n goods = GoodManager()\n\n def save(self, *args, **kwargs):\n if self._state.adding: # initial save\n self._create_stripe_object()\n super().save(*args, **kwargs)\n\n def _create_stripe_object(self):\n if self.product_type == \"good\":\n stripe_object = stripe.Product.create(\n name=self.name, type=self.product_type, shippable=False\n )\n else:\n stripe_object = stripe.Product.create(\n name=self.name, type=self.product_type\n )\n self.stripe_id = stripe_object.id\n\n @classmethod\n def sync(cls, product):\n \"\"\"Update or create internal Product object according to product object\n from stripe API.\n \"\"\"\n try:\n local_product = Product.get(stripe_id=product.id)\n except Product.DoesNotExist:\n local_product = Product\n\n local_product.product_type = product.type\n for field in cls.SYNC_FIELDS:\n setattr(local_plan, field, getattr(plan, field))\n local_plan.save()\n\n return local_plan\n\n @staticmethod\n def sync_all():\n products = list_all(stripe.Product)\n for product in products:\n Product.sync(product)\n\n def __str__(self):\n try:\n return self.name\n except AttributeError:\n return \"Incomplete product\"\n\n\nclass Plan(models.Model):\n stripe_id = models.CharField(\n _(\"id\"), max_length=128, help_text=_(\"Id of Stripe Plan\"), db_index=True\n )\n\n DAILY = \"day\"\n WEEKLY = \"week\"\n MONTHLY = \"month\"\n YEARLY = \"year\"\n INTERVAL_CHOICES = [\n (DAILY, \"Daily\"),\n (WEEKLY, \"Weekly\"),\n (MONTHLY, \"Monthly\"),\n (YEARLY, \"Yearly\"),\n ]\n\n product = models.ForeignKey(Product, on_delete=\"CASCADE\", related_name=\"plans\")\n amount = models.IntegerField(_(\"amount\"))\n currency = models.CharField(_(\"currency\"), max_length=3, default=\"usd\")\n interval = models.CharField(_(\"interval\"), max_length=16, choices=INTERVAL_CHOICES)\n interval_count = models.IntegerField(\n _(\"count\"),\n default=1,\n help_text=_(\n \"Number of intervals per billing cycle. I.e., interval=month and interval_count=3 for 3 months.\"\n ),\n )\n\n name = models.CharField(\n _(\"name\"),\n help_text=_(\"Local name for use by admins\"),\n max_length=128,\n null=True,\n )\n description = models.TextField(\n _(\"description\"),\n help_text=_(\"Local description for use by admins\"),\n null=True,\n blank=True,\n )\n\n SYNC_FIELD = [\"amount\", \"currency\", \"interval\", \"interval_count\"]\n\n objects = PlanManager()\n\n @staticmethod\n def create(product: Product, name: str = None, **kwargs):\n raise NotImplementedError()\n\n def save(self, *args, **kwargs):\n if self._state.adding: # initial save\n self._create_stripe_object()\n super().save(*args, **kwargs)\n\n def _create_stripe_object(self):\n stripe_object = stripe.Plan.create(\n product=self.product.stripe_id,\n amount=self.amount,\n currency=self.currency,\n interval=self.interval,\n interval_count=self.interval_count,\n )\n self.stripe_id = stripe_object.id\n\n @classmethod\n def sync(cls, plan):\n \"\"\"Update or create internal Plan object according to plan object\n from stripe API.\n \"\"\"\n try:\n product = Product.get(stripe_id=plan.product)\n except Product.DoesNotExist:\n logging.warning(\n \"Product associated to Stripe Plan object does not exist in local DB. Skipping retrieval of Plan.\"\n )\n return\n\n try:\n local_plan = Plan.get(stripe_id=plan.id)\n except Plan.DoesNotExist:\n local_plan = Plan()\n\n local_plan.product = product\n for field in cls.SYNC_FIELDS:\n setattr(local_plan, field, getattr(plan, field))\n local_plan.save()\n\n return local_plan\n\n @staticmethod\n def sync_all():\n plans = list_all(stripe.Plan)\n for plan in plans:\n Plan.sync(plan)\n\n def __str__(self):\n try:\n base = getattr(self, \"name\", \"Plan\")\n if self.currency == \"usd\":\n amount = \"${:.2f}\".format(self.amount / 100)\n else:\n amount = \"{}{}\".format(self.amount, self.currency.upper())\n return \"{base} for {product} ({amount}/{interval_count}{interval})\".format(\n base=base,\n product=self.product.name,\n amount=amount,\n interval_count=\"\" if self.interval_count == 1 else self.interval_count,\n interval=self.interval[:1],\n )\n except AttributeError:\n return \"Incomplete plan\"\n\n\nclass SKU(models.Model):\n stripe_id = models.CharField(\n _(\"id\"), max_length=128, help_text=_(\"Id of Stripe SKU\"), db_index=True\n )\n product = models.ForeignKey(Product, on_delete=\"CASCADE\", related_name=\"skus\")\n price = models.IntegerField(_(\"price\"))\n currency = models.CharField(_(\"currency\"), max_length=3, default=\"usd\")\n\n name = models.CharField(\n _(\"name\"),\n help_text=_(\"Local name for use by admins\"),\n max_length=128,\n null=True,\n )\n description = models.TextField(\n _(\"description\"),\n help_text=_(\"Local description for use by admins\"),\n null=True,\n blank=True,\n )\n\n INFINITE_INVENTORY = {\"type\": \"infinite\"}\n SYNC_FIELDS = [\"price\", \"currency\"]\n\n def save(self, *args, **kwargs):\n if self._state.adding: # initial save\n self._create_stripe_object()\n super().save(*args, **kwargs)\n\n def _create_stripe_object(self):\n stripe_object = stripe.SKU.create(\n product=self.product.stripe_id,\n price=self.price,\n currency=self.currency,\n inventory=SKU.INFINITE_INVENTORY,\n )\n self.stripe_id = stripe_object.id\n\n @classmethod\n def sync(cls, sku):\n \"\"\"Update or create internal SKU object according to SKU object\n from stripe API.\n \"\"\"\n try:\n product = Product.get(stripe_id=sku.product)\n except Product.DoesNotExist:\n logging.warning(\n \"Product associated to Stripe SKU object does not exist in local DB. Skipping retrieval of SKU.\"\n )\n return\n\n try:\n local_sku = SKU.get(stripe_id=sku.id)\n except SKU.DoesNotExist:\n local_sku = SKU()\n\n local_sku.product = product\n for field in cls.SYNC_FIELDS:\n setattr(local_sku, field, getattr(sku, field))\n local_sku.save()\n\n return local_sku\n\n @staticmethod\n def sync_all():\n skus = list_all(stripe.SKU)\n for sku in skus:\n SKU.sync(sku)\n\n def __str__(self):\n try:\n base = getattr(self, \"name\", \"SKU\")\n if self.currency == \"usd\":\n price = \"${:.2f}\".format(self.price / 100)\n else:\n price = \"{}{}\".format(self.price, self.currency.upper())\n return \"{base} for {product} ({price})\".format(\n base=base, product=self.product.name, price=price\n )\n except AttributeError:\n return \"Incomplete SKU\"\n\n\nclass Subscription(models.Model):\n stripe_id = models.CharField(\n _(\"id\"), max_length=128, help_text=_(\"Id of Stripe Subscription\"), db_index=True\n )\n customer = models.ForeignKey(\n Customer, on_delete=models.CASCADE, related_name=\"subscriptions\"\n )\n plan = models.ForeignKey(\n Plan, on_delete=models.CASCADE, related_name=\"subscriptions\"\n )\n\n SUBSCRIPTION_NONE = \"NA\"\n SUBSCRIPTION_ACTIVE = \"AV\"\n SUBSCRIPTION_CANCEL_AT_PERIOD_END = \"CE\"\n SUBSCRIPTION_CANCELED_BY_EXPIRY = \"CX\"\n SUBSCRIPTION_CANCELED_BY_PAYMENT_ISSUE = \"CP\"\n status_choices = [\n (SUBSCRIPTION_NONE, \"No subscription\"),\n (SUBSCRIPTION_ACTIVE, \"Active\"),\n (SUBSCRIPTION_CANCEL_AT_PERIOD_END, \"Active until end of billing cycle)\"),\n (SUBSCRIPTION_CANCELED_BY_EXPIRY, \"Canceled due to expiry\"),\n (SUBSCRIPTION_CANCELED_BY_PAYMENT_ISSUE, \"Canceled due to payment issue\"),\n ]\n status = models.CharField(\n _(\"status\"), max_length=2, choices=status_choices, default=SUBSCRIPTION_NONE\n )\n\n def __str__(self):\n try:\n plan = getattr(self.plan, \"name\", self.plan.product.name)\n customer = getattr(self.customer, \"name\", self.customer.stripe_id)\n status = self.get_status_display()\n return \"Subscription to {plan} by {customer} ({status})\".format(\n plan=plan, customer=customer, status=status\n )\n except AttributeError:\n return \"Incomplete subscription\"\n\n\nclass Order(models.Model):\n stripe_id = models.CharField(\n _(\"id\"), max_length=128, help_text=_(\"Id of Stripe Order\"), db_index=True\n )\n date_created = models.DateTimeField(_(\"date created\"), auto_now_add=True)\n customer = models.ForeignKey(\n Customer, on_delete=models.CASCADE, related_name=\"orders\"\n )\n sku = models.ForeignKey(SKU, on_delete=models.CASCADE, related_name=\"orders\")\n\n def __str__(self):\n try:\n sku = getattr(self.sku, \"name\", self.sku.product.name)\n customer = getattr(self.customer, \"name\", self.customer.stripe_id)\n return \"Order for {plan} by {customer}\".format(plan=plan, customer=customer)\n except AttributeError:\n return \"Incomplete subscription\"\n","sub_path":"dt_stripe/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":16261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"323260924","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\ndef hidden_init(layer):\n fan_in = layer.weight.data.size()[0]\n lim = 1. / np.sqrt(fan_in)\n return (-lim, lim)\n\nclass Network(nn.Module):\n def __init__(self, state_size, action_size, hidden_in_dim, hidden_out_dim, activation=F.relu, is_actor=False):\n super(Network, self).__init__()\n\n \"\"\"self.input_norm = nn.BatchNorm1d(input_dim)\n self.input_norm.weight.data.fill_(1)\n self.input_norm.bias.data.fill_(0)\"\"\"\n\n self.bn0 = nn.BatchNorm1d(state_size)\n self.fc1 = nn.Linear(state_size,hidden_in_dim)\n self.bn1 = nn.BatchNorm1d(hidden_in_dim)\n self.fc2_actor = nn.Linear(hidden_in_dim,hidden_out_dim)\n self.fc2_critic = nn.Linear(hidden_in_dim+action_size,hidden_out_dim)\n self.bn2 = nn.BatchNorm1d(hidden_out_dim)\n self.fc3_actor = nn.Linear(hidden_out_dim,action_size)\n self.fc3_critic = nn.Linear(hidden_out_dim,1)\n self.activation = activation \n self.is_actor = is_actor\n #self.reset_parameters()\n\n def reset_parameters(self):\n self.fc1.weight.data.uniform_(*hidden_init(self.fc1))\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\n self.fc3.weight.data.uniform_(-1e-3, 1e-3)\n\n def forward(self, x, action=None):\n if self.is_actor:\n # return a vector of the force\n x = self.bn0(x)\n x = self.activation(self.bn1(self.fc1(x)))\n x = self.activation(self.bn2(self.fc2_actor(x)))\n return torch.tanh(self.fc3_actor(x))\n \n else:\n # critic network simply outputs a number\n x = self.bn0(x)\n x = self.activation(self.bn1(self.fc1(x)))\n x = torch.cat((x, action), dim=-1)\n x = self.activation(self.fc2_critic(x))\n return self.fc3_critic(x)","sub_path":"multi-agent-tennis/maddpg/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"584020919","text":"import importlib\nimport os\nimport traceback\n\nimport zero_app\nfrom helpers import setup_logger\nfrom ui import Printer, Menu, HelpOverlay, GridMenu, Entry, \\\n GridMenuLabelOverlay, GridMenuSidebarOverlay, GridMenuNavOverlay\n\nfrom PIL import Image, ImageOps\n\nlogger = setup_logger(__name__, \"info\")\n\n\nclass AppManager(object):\n subdir_menus = {}\n \"\"\" Example of subdir_menus:\n {'apps/network_apps': ,\n ...\n 'apps/system_apps': }\n \"\"\"\n app_list = {}\n \"\"\"Example of app_list:\n {'apps/network_apps/wpa_cli': , \n 'apps/system_apps/system': , \n ...\n 'apps/network_apps/network': }\n \"\"\"\n failed_apps = {}\n \"\"\"Example of failed_apps:\n {'apps/network_apps/wpa_cli': \"Traceback: \\n ....\"\n }\n \"\"\"\n ordering_cache = {}\n\n def __init__(self, app_directory, context_manager, config=None, default_plugins=True):\n self.subdir_menus = {}\n self.subdir_menu_contents = {}\n self.subdir_menu_creators = {}\n self.subdir_menu_overlays = {}\n self.subdir_paths = []\n self.app_list = {}\n self.failed_apps = {}\n self.app_directory = app_directory\n self.cm = context_manager\n self.i, self.o = self.cm.get_io_for_context(\"main\")\n self.config = config if config else {}\n #logger.warning(self.config)\n if default_plugins and self.config.get(\"default_overlays\", True):\n self.register_default_plugins()\n\n def create_main_menu(self, menu_name, contents):\n dir = \"resources/\"\n icons = [f for f in os.listdir(dir) if f.endswith(\".png\")]\n icon_paths = [[f.rsplit('.', 1)[0], os.path.join(dir, f)] for f in icons]\n used_icons = []\n for entry in contents:\n for icon_name, icon_path in icon_paths:\n if entry.basename.startswith(icon_name):\n entry.icon = Image.open(icon_path)\n used_icons.append(icon_name)\n continue\n else:\n pass\n print([x for x, y, in icon_paths if x not in used_icons])\n font = (\"Fixedsys62.ttf\", 16)\n menu = GridMenu(contents, self.i, self.o, font=font, entry_width=32, name=\"Main menu\", draw_lines=False, exitable=False)\n menu.exit_entry = [\"Exit\", \"exit\"]\n menu.process_contents()\n return menu\n\n def sidebar_cb(self, c, ui_el, coords):\n sidebar_image = ImageOps.invert(Image.open(\"sidebar.png\").convert('L'))\n sidebar_image.convert(c.o.device_mode)\n c.image.paste(sidebar_image, (coords.left+3, coords.top-5))\n\n def overlay_main_menu(self, menu):\n main_menu_help = \"ZPUI main menu. Navigate the folders to get to different apps, or press KEY_PROG2 (anywhere in ZPUI) to get to the context menu.\"\n GridMenuNavOverlay().apply_to(menu)\n GridMenuSidebarOverlay(self.sidebar_cb).apply_to(menu)\n GridMenuLabelOverlay().apply_to(menu)\n HelpOverlay(main_menu_help).apply_to(menu)\n\n def register_default_plugins(self):\n self.subdir_menu_creators[\"apps\"] = self.create_main_menu\n self.subdir_menu_overlays[\"apps\"] = [self.overlay_main_menu]\n\n def create_subdir_menu(self, menu_name, contents):\n menu = Menu(contents, self.i, self.o, \"Subdir menu ({})\".format(menu_name))\n return menu\n\n def create_menu_structure(self):\n base_subdir = self.app_directory.rstrip('/')\n for subdir_path in self.subdir_paths:\n self.subdir_menu_contents[subdir_path] = []\n for subdir_path in self.subdir_paths:\n if subdir_path == base_subdir:\n continue\n parent_path = os.path.split(subdir_path)[0]\n menu_name = self.get_subdir_menu_name(subdir_path)\n subdir_entry = Entry(menu_name, type=\"dir\", path=subdir_path)\n self.subdir_menu_contents[parent_path].append(subdir_entry)\n subdir_menu_paths = self.subdir_menu_contents.keys()\n for app_path, app_obj in self.app_list.items():\n if self.app_has_callback(app_obj):\n subdir_menu_name = max([n for n in subdir_menu_paths if app_path.startswith(n)])\n menu_name = self.get_app_name(app_obj, app_path)\n app_entry = Entry(menu_name, type=\"app\", obj=app_obj, path=app_path)\n self.subdir_menu_contents[subdir_menu_name].append(app_entry)\n for path, subdir_contents in self.subdir_menu_contents.items():\n ordering = self.get_ordering(path)\n unordered_contents = self.prepare_menu_contents_for_ordering(subdir_contents)\n menu_contents = self.order_contents_by_ordering(unordered_contents, ordering)\n creator = self.subdir_menu_creators.get(path, self.create_subdir_menu)\n menu = creator(path, menu_contents)\n for overlay_cb in self.subdir_menu_overlays.get(path, []):\n overlay_cb(menu)\n self.subdir_menus[path] = menu\n return self.subdir_menus[base_subdir]\n\n def prepare_menu_contents_for_ordering(self, subdir_contents):\n for entry in subdir_contents:\n entry.basename = os.path.split(entry.path)[-1]\n if entry.type == \"dir\":\n entry.cb = lambda x=entry.path: self.switch_to_subdir(x)\n elif entry.type == \"app\":\n entry.cb = lambda x=entry.path: self.switch_to_app(x)\n return subdir_contents\n\n def switch_to_subdir(self, path):\n self.subdir_menus[path].activate()\n\n def switch_to_app(self, path):\n self.cm.switch_to_context(path.replace(\"/\", '.'))\n\n def order_contents_by_ordering(self, contents, ordering, strip_first_element=True):\n if ordering:\n contents = sorted(contents, key=lambda x: ordering.index(x.basename) if x.basename in ordering else 9999)\n return contents\n\n def load_all_apps(self, interactive=True):\n apps_blocked_in_config = self.config.get(\"do_not_load\", [])\n self.subdir_paths.append(self.app_directory.rstrip(\"/\"))\n for path, subdirs, modules in app_walk(self.app_directory):\n for subdir in subdirs:\n subdir_path = os.path.join(path, subdir)\n self.subdir_paths.append(subdir_path)\n # apps having an \"execute_after_contexts\" hook\n after_contexts_apps = {}\n for _module in modules:\n module_path = os.path.join(path, _module)\n try:\n if module_path in apps_blocked_in_config:\n logger.warning(\"App {} blocked from config; not loading\".format(module_path))\n continue\n app = self.load_app(module_path)\n logger.info(\"Loaded app {}\".format(module_path))\n self.app_list[module_path] = app\n menu_name = self.get_app_name(app, module_path)\n self.bind_context(app, module_path, menu_name)\n if self.app_has_after_contexts_hook(app):\n after_contexts_apps[module_path] = app\n except:\n logger.exception(\"Failed to load app {}\".format(module_path))\n self.failed_apps[module_path] = traceback.format_exc()\n if interactive:\n if self.failed_apps:\n failed_app_names = [os.path.split(p)[1] for p in self.failed_apps.keys()]\n Printer([\"Failed to load:\"]+failed_app_names, self.i, self.o, 0.5)\n # execute after_context functions\n for app_path, app in after_contexts_apps.items():\n try:\n self.execute_after_contexts(app)\n except:\n logger.exception(\"Failed to execute 'after all contexts' hook for {}\".format(app_path))\n else:\n logger.info(\"Executed 'after all contexts' hook for {}\".format(app_path))\n base_menu = self.create_menu_structure()\n return base_menu\n\n def app_has_callback(self, app):\n return (hasattr(app, \"callback\") and callable(app.callback)) or \\\n (hasattr(app, \"on_start\") and callable(app.on_start))\n\n def bind_context(self, app, path, menu_name):\n if hasattr(app, \"callback\") and callable(app.callback): # for function based apps\n app_callback = app.callback\n elif hasattr(app, \"on_start\") and callable(app.on_start): # for class based apps\n app_callback = app.on_start\n else:\n logger.debug(\"App \\\"{}\\\" has no callback; loading silently\".format(menu_name))\n return\n app_path = path.replace('/', '.')\n self.cm.register_context_target(app_path, app_callback)\n self.cm.set_menu_name(app_path, menu_name)\n\n def execute_after_contexts(self, app):\n app.execute_after_contexts()\n\n def app_has_after_contexts_hook(self, app):\n \"\"\"\n Checks whether the app has \"execute after all contexts\n are loaded\" hook. A separate method for ease of future\n changes.\n \"\"\"\n return hasattr(app, \"execute_after_contexts\")\n\n def get_app_path_for_cmdline(self, cmdline_app_path):\n main_py_string = \"/main.py\"\n if cmdline_app_path.endswith(main_py_string):\n app_path = cmdline_app_path[:-len(main_py_string)]\n elif cmdline_app_path.endswith(\"/\"):\n app_path = cmdline_app_path[:-1]\n else:\n app_path = cmdline_app_path\n return app_path\n\n def load_single_app_by_path(self, app_path, threaded = True):\n # If user runs in single-app mode and by accident\n # autocompletes the app name too far, it shouldn't fail\n app_path = self.get_app_path_for_cmdline(app_path)\n if \"__init__.py\" not in os.listdir(app_path):\n raise ImportError(\"Trying to import an app ({}) with no __init__.py in its folder!\".format(app_path))\n app_import_path = app_path.replace('/', '.')\n app = self.load_app(app_import_path, threaded=threaded)\n return app_import_path, app\n\n def load_app(self, path, threaded=True):\n app_path = path.replace('/', '.')\n app = importlib.import_module(app_path + '.main', package='apps')\n context = self.cm.create_context(app_path)\n context.threaded = threaded\n i, o = self.cm.get_io_for_context(app_path)\n if is_class_based_module(app):\n app_class = get_zeroapp_class_in_module(app)\n app = app_class(i, o)\n else:\n if hasattr(app, 'init_app'):\n app.init_app(i, o)\n else: #init_app-less function-based app\n app.i = i\n app.o = o\n self.pass_context_to_app(app, app_path, context)\n return app\n\n def pass_context_to_app(self, app, app_path, context):\n \"\"\"\n This is a function to pass context objects to apps. For now, it works\n with both class-based and module-based apps. It only passes the context\n if it detects that the app has the appropriate function to do that.\n \"\"\"\n if hasattr(app, \"set_context\") and callable(app.set_context):\n try:\n app.set_context(context)\n except Exception as e:\n logger.exception(\"App {}: app class has 'set_context' but raised exception when passed a context\".format(app_path))\n else:\n logger.info(\"Passed context to app {}\".format(app_path))\n\n def get_app_name(self, app, app_path):\n if hasattr(app, \"menu_name\"):\n return app.menu_name\n else:\n menu_name = os.path.split(app_path)[-1].capitalize().replace(\"_\", \" \")\n app.menu_name = menu_name\n return menu_name\n\n def get_subdir_menu_name(self, subdir_path):\n \"\"\"\n This function gets a subdirectory path and imports __init__.py from it.\n It then gets _menu_name attribute from __init__.py and returns it.\n If failed to either import __init__.py or get the _menu_name attribute,\n it returns the subdirectory name.\n \"\"\"\n subdir_import_path = subdir_path.replace('/', '.')\n try:\n subdir_object = importlib.import_module(subdir_import_path + '.__init__')\n return subdir_object._menu_name\n except:\n logger.exception(\"Exception while loading __init__.py for subdir {}\".format(subdir_path))\n return os.path.split(subdir_path)[1].capitalize().replace(\"_\", \" \")\n\n def get_ordering(self, path):\n \"\"\"This function gets a subdirectory path and imports __init__.py from it. It then gets _ordering attribute from __init__.py and returns it. It also caches the attribute for faster initialization.\n If failed to either import __init__.py or get the _ordering attribute, it returns an empty list.\"\"\"\n if path in self.ordering_cache:\n return self.ordering_cache[path]\n import_path = path.replace('/', '.')\n ordering = []\n try:\n imported_module = importlib.import_module(import_path + '.__init__')\n ordering = imported_module._ordering\n logger.debug(\"Found ordering for {} directory!\".format(import_path))\n except ImportError as e:\n logger.error(\"Exception while loading __init__.py for directory {}\".format(path))\n logger.debug(e)\n except AttributeError as e:\n pass\n finally:\n self.ordering_cache[path] = ordering\n return ordering\n\n\ndef app_walk(base_dir):\n \"\"\"Example of app_walk(directory):\n [('./apps', ['ee_apps', 'media_apps', 'test', 'system_apps', 'skeleton', 'network_apps'], ['__init__.pyc', '__init__.py']),\n ('./apps/ee_apps', ['i2ctools'], ['__init__.pyc', '__init__.py']),\n ('./apps/ee_apps/i2ctools', [], ['__init__.pyc', '__init__.py', 'main.pyc', 'main.py']),\n ('./apps/media_apps', ['mocp', 'volume'], ['__init__.pyc', '__init__.py']),\n ('./apps/media_apps/mocp', [], ['__init__.pyc', '__init__.py', 'main.pyc', 'main.py']),\n ('./apps/media_apps/volume', [], ['__init__.pyc', '__init__.py', 'main.pyc', 'main.py'])]\n \"\"\"\n walk_results = []\n modules = []\n subdirs = []\n for element in os.listdir(base_dir):\n full_path = os.path.join(base_dir, element)\n if os.path.isdir(full_path):\n if is_subdir(full_path):\n subdirs.append(element)\n results = app_walk(full_path)\n for result in results:\n walk_results.append(result)\n elif is_module_dir(full_path):\n modules.append(element)\n walk_results.append((base_dir, subdirs, modules))\n return walk_results\n\n\ndef get_zeroapp_class_in_module(module_):\n if 'init_app' in dir(module_):\n return None\n module_content = [item for item in dir(module_) if not item.startswith('__')]\n for item in module_content:\n class_ = getattr(module_, item)\n try:\n if issubclass(class_, zero_app.ZeroApp) and item != 'ZeroApp':\n return class_\n except Exception as e:\n pass # not a class : ignore\n return None\n\n\ndef is_class_based_module(module_):\n return get_zeroapp_class_in_module(module_) is not None\n\n\ndef is_module_dir(dir_path):\n contents = os.listdir(dir_path)\n return \"main.py\" in contents and \"do_not_load\" not in contents\n\n\ndef is_subdir(dir_path):\n contents = os.listdir(dir_path)\n return \"__init__.py\" in contents and \"main.py\" not in contents and \"do_not_load\" not in contents\n\n\nif __name__ == \"__main__\":\n from mock import Mock\n cm = Mock()\n cm.configure_mock(get_io_for_context=lambda x: (x, x))\n am = AppManager(\"apps/\", cm)\n am.o = Mock()\n am.o.configure_mock(cols=20, rows=8, char_width=6, width=128, height=64, device_mode='1', type=[\"b&w\"])\n am.load_all_apps(interactive=False)\n","sub_path":"apps/app_manager.py","file_name":"app_manager.py","file_ext":"py","file_size_in_byte":16262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"645074488","text":"import networkx as nx\nfrom networkx.algorithms import bipartite\nimport matplotlib.pyplot as plt\nimport numpy,random\nimport tkinter as tk\nfrom PIL import Image\n\ndef input_adjen():\n n=int(input())\n arr=[]\n for i in range(n):\n arr.append([])\n for i in range(n):\n cur=input().replace(',','')\n cur=cur.replace(' ','')\n li=cur.split(\" \")\n for j in range(len(li)):\n try:\n arr[i].append(int(li[j]))\n except ValueError:\n print(\"Это не число\")\n return arr\n\n\ndef build_new_graph(tuples,number_of_edges):\n res=[]\n for i in range(number_of_edges):\n res.append([])\n for j in range(number_of_edges):\n res[i].append(0)\n for i in range(len(tuples)):\n x=tuples[i][0]\n y=tuples[i][1]\n for j in range(len(tuples)):\n if i==j :\n continue\n else:\n x_j=tuples[j][0]\n y_j=tuples[j][1]\n flag1= x==x_j or x==y_j\n flag2= y==x_j or y==y_j\n if flag1 or flag2:\n res[i][j]=1\n return res\n\n\ndef parse_colors(dict,edges):\n res=[]\n temp={}\n for i in dict:\n ed=i\n color=dict[ed]\n if not color in temp:\n temp[color]=[ed]\n else:\n temp[color].append(ed)\n for i in temp:\n res.append(temp[i])\n temp=[]\n for i in range(len(res)):\n temp.append([])\n for j in range(len(res[i])):\n temp[i].append(edges[res[i][j]])\n return temp\n\n\nmatrix=input_adjen()\nprint(matrix)\n\nTemp=numpy.matrix(matrix)\ngraph=nx.from_numpy_matrix(Temp)\nprint(graph.nodes())\nprint(graph.number_of_edges())\nli=graph.edges()\nprint(li)\n\nnew_graph=nx.from_numpy_matrix(numpy.matrix(build_new_graph(li,graph.number_of_edges())))\n\nprint(numpy.matrix(build_new_graph(li,graph.number_of_edges())))\n\ntest=nx.coloring.greedy_color(new_graph,strategy=nx.coloring.strategy_largest_first)\npos=nx.circular_layout(graph)\nprint(test)\nans=parse_colors(test,li)\nprint(ans)\nhromo_num=len(ans)\n\n\nlabels_of_nodes={}\nfor i in range(0,graph.number_of_nodes()):\n labels_of_nodes[i]=(str(i+1))\nprint(labels_of_nodes)\nnx.draw_networkx_nodes(graph,pos,node_list=[0,1,2,3],node_color='g',node_size=500)\nnx.draw_networkx_labels(graph,pos,labels_of_nodes,font_size=15)\n\nfor i in range(len(ans)):\n color='#{:06x}'.format(random.randint(0, 0xffff00))\n nx.draw_networkx_edges(graph,pos,edgelist=ans[i],edge_color=color,edge_size=100,width=17)\nplt.show()\nplt.show()\n","sub_path":"Дискр 8/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"106322212","text":"import pygame, shared, tower, time, toolbox, constants, math\nenemy_array = pygame.sprite.Group()\nclass enemy(pygame.sprite.Sprite):\n\tdef __init__(self):\n\t\t'''Handles enemy properties and functions'''\n\t\t#Initialises pygames built in sprite class..\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.image = pygame.image.load('enemy.png').convert_alpha()\n\t\tself.rect = self.image.get_rect()\n\t\tself.speed = 1\n\t\tself.rect.topleft = (171, 9)\n\t\tself.directions = {\"x\":0, \"y\":0}\n\t\tself.direction = \"down\"\n\t\tself.collide = False\n\t\tself.max_health = 50.0 + (1.5 * shared.wave_number)\n\t\tself.health = self.max_health\n\t\tself.health_percentage = int(self.health/self.max_health*100)\n\t\tself.price = 0\n\t\tself.death_money_animation = None\n\t\tself.dead = False\n\n\t\t#When spawns, adds a time_spawned vairable - used for turret targeting.\n\t\tself.time_spawned = time.clock()\n\t\tenemy_array.add(self)\n\tdef calculate_movement(self):\n\t\t'''Checks corner tile and applies appropriate direction to movement path'''\n\t\tfor i in range(len(shared.field.corners)):\n\t\t\tif pygame.Rect(shared.field.corners[i][1]).collidepoint(self.rect.center) == True:\n\t\t\t\tself.direction = shared.field.corners[i][2]\n\t\t\t\tself.directions[\"x\"], self.directions[\"y\"] = 0, 0\n\t\tif self.direction == 'down':\n\t\t\tself.directions[\"y\"] = self.speed\n\t\tif self.direction == 'up':\n\t\t\tself.directions[\"y\"] = -self.speed\n\t\tif self.direction == 'right':\n\t\t\tself.directions[\"x\"] = self.speed\n\t\tif self.direction == 'left':\n\t\t\tself.directions[\"x\"] = -self.speed\n\tdef display_coin(self, surface):\n\t\t'''Animates coin message after enemy dies'''\n\t\tif self.death_money_animation != None:\n\t\t\tdeath_coin, death_coin_rect = toolbox.message_to_screen(surface, \"$\" + str(self.price), constants.color[\"gold\"], self.rect.x, self.rect.y - 20 - self.death_money_animation)\n\t\t\tdeath_coin_rect.x = self.rect.x + self.rect.width/2 - death_coin_rect.width/2\n\t\t\tsurface.blit(death_coin, death_coin_rect)\n\t\t\tif self.death_money_animation > 20:\n\t\t\t\tenemy_array.remove(self)\n\tdef calculate_death(self):\n\t\t'''Checks if enemy is dead and handles it appropriatly'''\n\t\tself.health_percentage = int(self.health/self.max_health*100)\n\t\tif self.health <= 0:\n\t\t\ttry:\n\t\t\t\tself.death_money_animation = 0\n\t\t\t\tself.dead = True\n\t\t\t\tshared.coin += self.price\n\t\t\texcept:\n\t\t\t\tpass\n\t\tif shared.field.home_base_rect.colliderect(self.rect) == True:\n\t\t\tenemy_array.remove(self)\n\t\t\tshared.lives_left -= 1\n\t\t\tif shared.lives_left <= 0:\n\t\t\t\t#GAME OVER!\n\t\t\t\tprint(\"Game Over. You lost. Make yourself a sandwich and flush yourself down the toilet.\")\n\tdef move(self):\n\t\t'''Moves enemy'''\n\t\tself.rect.x += self.directions['x']\n\t\tself.rect.y += self.directions[\"y\"]\n\tdef update(self, surface):\n\t\t'''Pieces all the methods together to create a fucntional enemy'''\n\t\tif self.dead == False:\n\t\t\tself.calculate_movement()\n\t\t\tself.calculate_death()\n\t\t\tself.move()\n\n\t\t\t#Displays enemy health (as a percentage)\n\t\t\ttext, text_rect = toolbox.message_to_screen(surface, str(self.health_percentage), constants.color[\"black\"], self.rect.x, self.rect.y -20)\n\t\t\tsurface.blit(text, text_rect)\n\n\t\t\t#Displays enemy\n\t\t\tsurface.blit(self.image, self.rect)\n\t\telse:\n\t\t\t#Animates the displayed coin\n\t\t\tif self.death_money_animation != None:\n\t\t\t\tself.death_money_animation += 1\n\t\t\tself.display_coin(surface)\n\t\t\n","sub_path":"enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"643480281","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/nicolas/src/django-rest-framework-jwt/rest_framework_jwt/refreshtoken/authentication.py\n# Compiled at: 2015-11-14 11:42:20\n# Size of source mod 2**32: 1548 bytes\nfrom django.utils.translation import ugettext as _\nfrom rest_framework.authentication import TokenAuthentication, get_authorization_header\nfrom rest_framework import exceptions\nfrom rest_framework_jwt.refreshtoken.models import RefreshToken\n\nclass RefreshTokenAuthentication(TokenAuthentication):\n __doc__ = '\\n Subclassed from rest_framework.authentication.TokenAuthentication\\n\\n Auth header:\\n Authorization: RefreshToken 401f7ac837da42b97f613d789819ff93537bee6a\\n '\n model = RefreshToken\n\n def authenticate(self, request):\n auth = get_authorization_header(request).split()\n if not auth or auth[0].lower() != b'refreshtoken':\n return\n if len(auth) == 1:\n msg = _('Invalid token header. No credentials provided.')\n raise exceptions.AuthenticationFailed(msg)\n elif len(auth) > 2:\n msg = _('Invalid token header. Token string should not contain spaces.')\n raise exceptions.AuthenticationFailed(msg)\n return self.authenticate_credentials(auth[1])\n\n def authenticate_credentials(self, key):\n try:\n token = self.model.objects.select_related('user').get(key=key)\n except self.model.DoesNotExist:\n raise exceptions.AuthenticationFailed(_('Invalid token.'))\n\n if not token.user.is_active:\n raise exceptions.AuthenticationFailed(_('User inactive or deleted.'))\n return (token.user, token)\n\n def authenticate_header(self, request):\n return 'RefreshToken'","sub_path":"pycfiles/djangorestframework_jwt_refresh_token-0.5-py2.py3-none-any/authentication.cpython-34.py","file_name":"authentication.cpython-34.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"322559019","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom scipy.stats import norm\nimport torch.nn.functional as F\n\ndef get_weights(flatten_image, o_w, o_h, oi=10, ox=4, radius=5):\n A = flatten_image.expand(len(flatten_image), len(flatten_image))\n AT = A.T\n B = A - AT\n W_F = torch.exp(torch.div(-1*(B **2), oi**2))\n\n x = torch.Tensor(range(o_w))\n y = torch.Tensor(range(o_h))\n \n if(torch.cuda.is_available()):\n x = x.cuda()\n y = y.cuda()\n\n Y_cord = x.repeat_interleave(o_h)\n X_cord = y.repeat(o_w)\n\n X_expand = X_cord.expand(len(X_cord), len(X_cord))\n Y_expand = Y_cord.expand(len(Y_cord), len(Y_cord))\n X_expandT = X_expand.T\n Y_expandT = Y_expand.T\n Xij = (X_expand - X_expandT)\n Yij = (Y_expand - Y_expandT)\n\n sq_distance_matrix = torch.hypot(Xij, Yij)\n mask = sq_distance_matrix.le(radius)\n\n C = torch.exp(torch.div(-1*(sq_distance_matrix **2), ox**2))\n W_X = torch.mul(mask, C)\n\n weights = torch.mul(W_F, W_X)\n\n return weights\n\n\ndef numerator(A, w):\n flatten_a = A.flatten()\n prob = torch.outer(flatten_a, flatten_a)\n a = torch.mul(w, prob)\n return torch.sum(a)\n\ndef denominator(A, w):\n flatten_a = A.flatten()\n prob = flatten_a.expand(len(flatten_a), len(flatten_a))\n a = torch.mul(w, prob)\n return torch.sum(a)\n\n\ndef soft_n_cut_loss(image, k, prob):\n soft_n_cut_loss = k\n\n image = torch.mean(image, dim=0)\n flatten_image = torch.flatten(image)\n weights = get_weights(flatten_image, image.shape[0], image.shape[1])\n\n for i in range(k):\n soft_n_cut_loss = soft_n_cut_loss - (numerator(prob[i,:,],weights)/denominator(prob[i,:,:],weights))\n\n return soft_n_cut_loss\n\n\ndef batch_soft_n_cut_loss(input, enc, k):\n loss = 0\n\n for i in range(input.shape[0]):\n loss += soft_n_cut_loss(input[i], k, enc[i])\n\n return loss / input.shape[0]\n","sub_path":"utils/org_soft_n_cut_loss.py","file_name":"org_soft_n_cut_loss.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"246099422","text":"from ..sage_helper import _within_sage, sage_method\n\nfrom .cuspCrossSection import CuspCrossSection\nfrom .exceptions import TiltInequalityNumericalVerifyError\n\nif _within_sage:\n from sage.rings.complex_interval_field import ComplexIntervalField\n\n_num_tries_canonize = 3\n\n@sage_method\ndef interval_verified_canonical_cell_decomposition(M, bits_prec = None):\n \"\"\"\n Given a cusped (possibly non-orientable) manifold M, return its canonical\n cell decomposition if it has tetrahedral cells and can be verified using\n interval arithmetics. Otherwise, raises an Exception.\n \n sage: from snappy import Manifold\n sage: M = Manifold(\"m015\")\n sage: interval_verified_canonical_cell_decomposition(M)\n m015(0,0)\n\n Has an octagonal canonical cell\n\n sage: M = Manifold(\"m137\")\n sage: interval_verified_canonical_cell_decomposition(M)\n Traceback (most recent call last):\n ...\n TiltInequalityNumericalVerifyError: Numerical verifiaction that tilt is negative has failed: 0.?e-10 < 0\n \n Has a cubical canonical cell\n\n sage: M = Manifold(\"m412\")\n sage: interval_verified_canonical_cell_decomposition(M)\n Traceback (most recent call last):\n ...\n TiltInequalityNumericalVerifyError: Numerical verifiaction that tilt is negative has failed: 0.?e-11 < 0\n \n \"\"\"\n\n # Make a copy before canonizing\n Mcopy = M.copy()\n\n # Try to canonize\n for i in range(_num_tries_canonize):\n try:\n Mcopy.canonize()\n break\n except:\n # If the SnapPea kernel encounters an error, randomize.\n Mcopy.randomize()\n\n # Get verified shape intervals\n shapes = Mcopy.tetrahedra_shapes('rect', intervals = True,\n bits_prec = bits_prec)\n\n # Compute cusp cross sections\n c = CuspCrossSection(Mcopy, shapes)\n\n # Use interval arithmetics to verify hyperbolicity\n if bits_prec:\n CIF = ComplexIntervalField(bits_prec)\n else:\n CIF = ComplexIntervalField()\n c.check_logarithmic_edge_equations_and_positivity(CIF)\n \n # Normalize cusp area. This is not needed when only 1 cusp\n if Mcopy.num_cusps() > 1:\n c.normalize_cusps()\n\n # Make sure all tilts are negative\n for tilt in c.tilts():\n if not (tilt < 0):\n raise TiltInequalityNumericalVerifyError(tilt)\n\n # Return M's canonized copy\n return Mcopy\n","sub_path":"venv/Lib/site-packages/snappy-2.3.1-py2.7-win32.egg/snappy/verify/verifyCanonical.py","file_name":"verifyCanonical.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"629842540","text":"\nimport yaml\n\nclass Dict(dict):\n __setattr__ = dict.__setitem__\n __getattr__ = dict.__getitem__\n\ndef dict2obj(dictObj):\n if not isinstance(dictObj, dict):\n return dictObj\n d = Dict()\n for k, v in dictObj.items():\n d[k] = dict2obj(v)\n return d\n\ndef get_args():\n \"\"\"返回参数\n \"\"\"\n f = open('../resources/param.yaml', 'r')\n file_data = f.read()\n f.close()\n\n return dict2obj(yaml.load(file_data))\n\n\n\nif __name__ == '__main__':\n args = get_args()\n print(args.model.n_state)","sub_path":"param_loader/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"444206164","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.db.models import Q, Count\nfrom django.views.generic import ListView, TemplateView\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom taggit.models import Tag\n\nfrom .models import Post, Comment\nfrom .forms import CommentForm, UploadForm\nfrom .utils import check_duplicate, filter_tags\n\n\nclass IndexView(ListView):\n model = Post\n paginate_by = 3\n results = model.objects.order_by('-published')\n template_name = 'main/index.html'\n \n # In order to test the pagination, i'll set a post limit for each page. 3 posts each page\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(results=self.results, **kwargs)\n paginator = Paginator(self.results, self.paginate_by)\n page = self.request.GET.get('page')\n page_obj = paginator.get_page(page)\n tags = filter_tags(page_obj)\n\n context['tag'] = tags\n return context\n\nclass PostView(ListView):\n model = Post\n paginate_by = 3\n results = model.objects.order_by('-published')\n template_name = 'main/index.html'\n \n # In order to test the pagination, i'll set a post limit for each page. 3 posts each page\n def get_queryset(self):\n self.q = self.request.GET.get('tags')\n if self.q:\n self.results = Post.objects.all()\n for tag in self.q.split(' '): \n self.results = self.results.filter(tags__name=tag)\n\n self.results = check_duplicate(self.results)\n\n return self.results\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(results=self.results, **kwargs)\n paginator = Paginator(self.results, self.paginate_by)\n page = self.request.GET.get('page')\n page_obj = paginator.get_page(page)\n tags = filter_tags(page_obj)\n\n context['tag'] = tags\n context['q'] = self.q\n return context\n\ndef DetailView(request, post_id):\n \n post = get_object_or_404(Post, post_id=post_id)\n tags = []\n for t in post.tags.all():\n tags.append(t)\n\n # Comment is ready. You only need to call it in the template\n comments = post.comments\n new_comment = None\n if request.method == 'POST':\n comment_form = CommentForm(data=request.POST)\n if comment_form.is_valid():\n new_comment = comment_form.save(commit=False)\n new_comment.post = post\n new_comment.save()\n comment_form = CommentForm()\n else:\n comment_form = CommentForm()\n detail = True\n\n return render(request, 'posts/post_detail.html', {'post':post, 'tag':tags, 'comments':comments, 'new_comment':\n new_comment, 'comment_form':comment_form, 'detail':detail})\n\ndef TagView(request, tags):\n q = request.GET.get('tags')\n tags = Tag.objects.filter(slug=tags).values_list('name', flat=True)\n posts = Post.objects.filter(tags__name__in=tags)\n paginator = Paginator(posts, 2)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n\n tag_name = filter_tags(page_obj)\n\n return render(request, 'posts/tag_specific.html', {'posts':posts, 'tag':tag_name, 'page_obj':page_obj, 'q':q})\n\n\n@login_required\ndef upload_view(request):\n uploaded = False\n template_name = 'posts/upload.html'\n if request.method == 'POST':\n upload_form = UploadForm(request.POST, request.FILES)\n if upload_form.is_valid():\n upload_form = upload_form.save()\n uploaded = True\n else:\n print(upload_form.errors)\n return HttpResponse('Invalid post details')\n else:\n upload_form = UploadForm()\n return render(request, template_name, {\n 'upload_form':upload_form,\n 'uploaded':uploaded,\n })\n","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"416632356","text":"'''\nUnit tests for user_status.py\n'''\n# pylint: disable=W0621\nfrom unittest.mock import patch\nfrom pymongo.errors import DuplicateKeyError\nimport pytest\nimport user_status\n\n\n@pytest.fixture\ndef status():\n '''\n A sample status for testing\n '''\n return ('rando_001', 'rando', 'aljdfasj;')\n\n\n@pytest.fixture\ndef statuses():\n '''\n An empty StatusCollection\n '''\n with patch('user_status.sql'):\n faked = user_status.UserStatusCollection()\n return faked\n\n\n@pytest.fixture\ndef dict_keys():\n '''\n Keys for database interface\n '''\n return ('status_id', 'user_id', 'status_text')\n\n\ndef test_user_status_collection_init(dict_keys):\n '''\n Test creation of UserStatusCollection\n '''\n # pylint: disable=E1101\n with patch('user_status.sql') as mocker:\n result = user_status.UserStatusCollection()\n assert isinstance(result, user_status.UserStatusCollection)\n assert result.db_conn == mocker.DBConnection.return_value\n (result.db_conn.social['StatusTable']\n .create_index\n .called_with(dict_keys[0], unique=True))\n\n\ndef test_add_status(status, statuses, dict_keys):\n '''\n Test that add_status calls the database correctly\n '''\n status_create = {'status_id': status[0],\n 'user_id': status[1],\n 'status_text': status[2]}\n with patch('user_status.sql'):\n user_table = statuses.db_conn.social['UserTable']\n user_table.find_one.return_value = {dict_keys[1]: status[1]}\n assert statuses.add_status(*status)\n statuses.table.insert_one.assert_called_with(status_create)\n\n\ndef test_add_status_already_exists(status, statuses):\n '''\n Test duplication detection for add_status\n '''\n with patch('user_status.sql'):\n statuses.table.insert_one.side_effect = DuplicateKeyError(\"ERR_MSG\")\n assert not statuses.add_status(*status)\n\n\ndef test_add_status_user_dne(status, statuses, dict_keys):\n '''\n Test Forgein Key detection for add_status\n '''\n with patch('user_status.sql'):\n user_table = statuses.db_conn.__enter__().social['UserTable']\n user_table.find_one.return_value = None\n assert not statuses.add_status(*status)\n user_table.find_one.assert_called_with({dict_keys[1]: status[1]})\n\n\ndef test_modify_status(status, statuses, dict_keys):\n '''\n Test that modify_status calls the database correctly\n '''\n status_update = dict(zip(dict_keys, status))\n with patch('user_status.sql'):\n edit = {dict_keys[0]: status[0]}\n statuses.table.find_one.return_value = edit\n assert statuses.modify_status(*status)\n statuses.table.find_one.assert_called_with({dict_keys[0]: status[0]})\n (statuses.table.update_one.assert_called_with({dict_keys[0]: status[0]},\n {'$set': status_update}))\n\n\ndef test_modify_status_dne(status, statuses):\n '''\n Test error detection for modify_status\n '''\n with patch('user_status.sql'):\n statuses.table.find_one.return_value = None\n assert not statuses.modify_status(*status)\n\n\ndef test_delete_status(status, statuses):\n '''\n Test that delete_status calls the database correctly\n '''\n with patch('user_status.sql'):\n assert statuses.delete_status(status[0])\n statuses.table.delete_one.assert_called_with({'status_id': status[0]})\n\n\ndef test_delete_status_dne(status, statuses):\n '''\n Test error detection for delete_status\n '''\n with patch('user_status.sql'):\n statuses.table.delete_one.return_value.deleted_count = 0\n assert not statuses.delete_status(status[0])\n\n\ndef test_search_status(status, statuses, dict_keys):\n '''\n Test that search_status calls the database correctly\n '''\n with patch('user_status.sql'):\n search = dict(zip(dict_keys, status))\n statuses.table.find_one.return_value = search\n result = statuses.search_status(status[0])\n statuses.table.find_one.assert_called_with({dict_keys[0]: status[0]})\n assert result is search\n\n\ndef test_search_status_dne(status, statuses, dict_keys):\n '''\n Test error detection for search_status\n '''\n with patch('user_status.sql'):\n statuses.table.find_one.return_value = None\n result = statuses.search_status(status[0])\n assert result == dict(zip(dict_keys, (None, None, None)))\n\n\ndef test_search_all_status_updates(status, statuses):\n '''\n Test that search_all_status_updates calls the database correctly\n '''\n with patch('user_status.sql'):\n result = statuses.search_all_status_updates(status[1])\n find = statuses.table.find\n count = statuses.table.count_documents\n find.assert_called_with({'user_id': status[1]})\n assert result[0] is count.return_value\n assert result[1] is find.return_value\n\n\ndef test_filter_status_by_string(statuses):\n '''\n Test that filter_status_by_string calls the database correctly\n '''\n target_string = \"best\"\n with patch('user_status.sql'):\n result = statuses.filter_status_by_string(target_string)\n statuses.table.find.assert_called_with({'$text': {'$search': target_string}})\n assert result is statuses.table.find.return_value\n","sub_path":"Python/320/assignment 06/mongodb/test_user_status.py","file_name":"test_user_status.py","file_ext":"py","file_size_in_byte":5326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"295234441","text":"from typing import List\n\nfrom vertex import Vertex\n\n\nclass Face:\n def __init__(self, v1: Vertex, v2: Vertex, v3: Vertex):\n self.v1 = v1\n self.v2 = v2\n self.v3 = v3\n\n def __eq__(self, f: 'Face') -> bool:\n if isinstance(f, Face):\n if self.normal() == f.normal():\n vertices = [f.v1, f.v2, f.v3]\n return self.v1 in vertices and self.v2 in vertices and self.v3 in vertices\n else:\n return False\n else:\n return False\n\n def __str__(self) -> str:\n return 'f ' + str(self.v1.id) + ' ' + str(self.v2.id) + ' ' + str(self.v3.id)\n\n def normal(self) -> Vertex:\n vec1 = self.v2 - self.v1\n vec2 = self.v3 - self.v1\n\n return vec1.normal(vec2)","sub_path":"metaballs/src/face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"394388900","text":"import numpy as np\n\nfrom layers import (\n FullyConnectedLayer, ReLULayer,\n ConvolutionalLayer, MaxPoolingLayer, Flattener,\n softmax_with_cross_entropy, softmax\n)\n\n\nclass ConvNet:\n \"\"\"\n Implements a very simple conv net\n\n Input -> Conv[3x3] -> Relu -> Maxpool[4x4] ->\n Conv[3x3] -> Relu -> MaxPool[4x4] ->\n Flatten -> FC -> Softmax\n \"\"\"\n\n def __init__(self, input_shape, n_output_classes, conv1_channels, conv2_channels, filter_size=3):\n \"\"\"\n Initializes the neural network\n\n Arguments:\n input_shape, tuple of 3 ints - image_width, image_height, n_channels\n Will be equal to (32, 32, 3)\n n_output_classes, int - number of classes to predict\n conv1_channels, int - number of filters in the 1st conv layer\n conv2_channels, int - number of filters in the 2nd conv layer\n \"\"\"\n self.input_shape = input_shape\n self.n_output_classes = n_output_classes\n self.conv1_channels = conv1_channels\n self.conv2_channels = conv2_channels\n self.filter_size = filter_size\n self.padding = 1\n\n c1 = int((input_shape[0] - self.filter_size + 2 * self.padding) / 1) + 1\n mp1 = int((c1 - 4) / 4) + 1\n c2 = int((mp1 - self.filter_size + 2 * self.padding) / 1) + 1\n self.size_after_2maxpool = int((c2 - 4) / 4) + 1\n\n self.RL1 = ReLULayer()\n self.RL2 = ReLULayer()\n self.MaxPool1 = MaxPoolingLayer(pool_size=4, stride=4)\n self.MaxPool2 = MaxPoolingLayer(pool_size=4, stride=4)\n self.Flatten = Flattener()\n self.Conv1 = ConvolutionalLayer(in_channels=self.input_shape[-1], out_channels=conv1_channels,\n filter_size=self.filter_size, padding=self.padding)\n self.Conv2 = ConvolutionalLayer(in_channels=conv1_channels, out_channels=conv2_channels,\n filter_size=self.filter_size, padding=self.padding)\n self.FC = FullyConnectedLayer(n_input=conv2_channels * self.size_after_2maxpool ** 2,\n n_output=self.n_output_classes)\n\n def compute_loss_and_gradients(self, X, y):\n \"\"\"\n Computes total loss and updates parameter gradients\n on a batch of training examples\n\n Arguments:\n X, np array (batch_size, height, width, input_features) - input data\n y, np array of int (batch_size) - classes\n \"\"\"\n # Before running forward and backward pass through the model,\n # clear parameter gradients aggregated from the previous pass\n for param in self.params().values():\n param.grad = np.zeros_like(param.value)\n\n # self.Conv1.W.grad = np.zeros_like(self.Conv1.W.grad)\n # self.Conv1.B.grad = np.zeros_like(self.Conv1.B.grad)\n # self.Conv2.W.grad = np.zeros_like(self.Conv2.W.grad)\n # self.Conv2.B.grad = np.zeros_like(self.Conv2.B.grad)\n # self.FC.W.grad = np.zeros_like(self.FC.W.grad)\n # self.FC.B.grad = np.zeros_like(self.FC.B.grad)\n\n # Input -> Conv[3\n # x3] -> Relu -> Maxpool[4\n # x4] ->\n # Conv[3\n # x3] -> Relu -> MaxPool[4\n # x4] ->\n # Flatten -> FC -> Softmax\n\n x = self.Conv1.forward(X)\n x = self.RL1.forward(x)\n x = self.MaxPool1.forward(x)\n x = self.Conv2.forward(x)\n x = self.RL2.forward(x)\n x = self.MaxPool2.forward(x)\n x = self.Flatten.forward(x)\n pred = self.FC.forward(x)\n\n loss, dpred = softmax_with_cross_entropy(pred, target_index=y)\n\n d_out = self.FC.backward(dpred)\n d_out = self.Flatten.backward(d_out)\n d_out = self.MaxPool2.backward(d_out)\n d_out = self.RL2.backward(d_out)\n d_out = self.Conv2.backward(d_out)\n d_out = self.MaxPool1.backward(d_out)\n d_out = self.RL1.backward(d_out)\n _ = self.Conv1.backward(d_out)\n\n # param_ = self.Conv1.W\n # before_opt = param_.value[:2, :2]\n # print(f\"PREDICT stage Conv1_W value: \\n {before_opt} \\n\")\n # print(f\"PREDICT stage Conv1_dW: \\n {param_.grad[:2, :2]} \\n\")\n ## !! do not update params\n # print(f'SHAPE fc1: \\n {np.sum(self.FC1.W.grad)}')\n # print(f'SHAPE fc2: \\n {np.sum(self.FC2.W.grad)}')\n\n # result = {'fc1_w': self.FC1.W.grad,\n # 'fc1_b': self.FC1.B.grad,\n # 'fc2_w': self.FC2.W.grad,\n # 'fc2_b': self.FC2.B.grad}\n return loss\n\n def predict(self, X):\n # You can probably copy the code from previous assignment\n x = self.Conv1.forward(X)\n x = self.RL1.forward(x)\n x = self.MaxPool1.forward(x)\n x = self.Conv2.forward(x)\n x = self.RL2.forward(x)\n x = self.MaxPool2.forward(x)\n x = self.Flatten.forward(x)\n x = self.FC.forward(x)\n\n y_hat = softmax(predictions=x)\n y_hat = np.argmax(y_hat, axis=1)\n return y_hat\n\n def params(self):\n result = {'Conv1.W': self.Conv1.W,\n 'Conv1.B': self.Conv1.B,\n 'Conv2.W': self.Conv2.W,\n 'Conv2.B': self.Conv2.B,\n 'FC.W': self.FC.W,\n 'FC.B': self.FC.B\n }\n return result\n","sub_path":"assignments/assignment3/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"512371082","text":"import numpy as np\nimport tensorflow as tf\n\n\n# the activation function\ndef g_1(x):\n assert len(x.shape) == 1\n rand = tf.random_uniform([x.shape.as_list()[0]], dtype=tf.float32)\n t = tf.nn.sigmoid(x) - rand\n return 0.5*(1 + t / (tf.abs(t) + 1e-8))\n\n\ndef g_2(x):\n return tf.nn.sigmoid(x)\n\n\ndef g(x):\n return tf.nn.leaky_relu(x)\n\n\ndef merge(inputs, weights):\n assert len(inputs.shape)==1\n assert len(weights.shape)==2\n inputs = tf.reshape(inputs, [inputs.shape.as_list()[0], 1])\n return tf.reshape(tf.matmul(weights, inputs), [weights.shape.as_list()[0]])\n\n\ndef rand_init(sizes):\n assert len(sizes)<=2\n if len(sizes)==0:\n return np.float32(np.random.rand())\n elif len(sizes)==1:\n return np.float32(np.random.rand(sizes[0]))\n elif len(sizes)==2:\n return np.float32(np.random.rand(sizes[0], sizes[1]))\n else:\n assert False\n\n\nclass RealNN(object):\n def __init__(self, feats):\n # generate weight variables\n self.weights = []\n self.biases = []\n self.in_dim = feats[0]\n self.inputs = tf.placeholder(shape=[self.in_dim], dtype=tf.float32)\n self.layers = [self.inputs]\n self.before_act = []\n self.alpha = 0.0\n self.reg = None\n self.opt = None\n self.loss = None\n self.minimizer = None\n self.sess = None\n for i in range(1, len(feats)):\n w = tf.get_variable(initializer=rand_init([feats[i], feats[i-1]]), name='L%dW' % i)\n self.weights.append(w)\n b = tf.get_variable(initializer=rand_init([feats[i]]), name='L%dB' % i)\n self.biases.append(b)\n if i==len(feats)-1:\n self.layers.append(merge(self.layers[-1], w)+b)\n else:\n self.before_act.append(merge(self.layers[-1], w)+b)\n self.layers.append(g(self.before_act[-1]))\n self.out_dim = feats[-1]\n self.outputs = self.layers[-1]\n self.truth = tf.placeholder(shape=[self.out_dim], dtype=tf.float32)\n \n def train(self, x, y, max_iter):\n self.opt = tf.train.GradientDescentOptimizer(learning_rate=1e-2)\n self.loss = tf.reduce_mean(tf.abs(self.truth - self.outputs))\n self.reg = 0.00\n for i in range(len(self.before_act)):\n self.reg = self.reg + tf.reduce_mean(tf.maximum(tf.abs(self.before_act[i])-3.0, 0))\n self.reg = self.reg / len(self.before_act)\n self.minimizer = self.opt.minimize((1-self.alpha)*self.loss + self.alpha*self.reg)\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(tf.global_variables())\n _cnt = 0\n while _cnt < max_iter:\n ind = np.random.randint(0, len(x), [])\n _, _loss, _reg, _output = self.sess.run(\n [self.minimizer, self.loss, self.reg, self.layers[-1]],\n feed_dict={\n self.inputs: x[ind],\n self.truth: y[ind]\n })\n print('ITR# %d\\t LOSS=%.6f REG=%.6f' % (_cnt, _loss, _reg))\n #print(_output)\n _cnt += 1\n saver.save(self.sess, 'models/model.ckpt')\n print('model saved to path: models/....')\n\n def infer(self, x):\n return None\n\n\ndef int2bins(x):\n x = np.uint8(x)\n op = 0b10000000\n bins = np.array([0.0] * 8)\n for i in range(8):\n if op & x == op:\n bins[i]=1\n else:\n bins[i]=0\n op = op >> 1\n return bins\n\n\ndef concat(bins_1, bins_2):\n return np.concatenate((bins_1, bins_2), axis=0)\n\n\ndef observe(size):\n x = np.random.randint(0,256,[size,2])\n _x = np.zeros([size, 16], dtype=np.float32)\n _y = np.zeros([size, 2], dtype=np.float32)\n for i in range(size):\n _x[i] = concat(int2bins(x[i,0]), int2bins(x[i,1]))\n if x[i,0] > x[i, 1]:\n _y[i, 0] = 0\n _y[i, 1] = 1\n elif x[i, 0] <= x[i, 1]:\n _y[i, 0] = 1\n _y[i, 1] = 0\n else:\n _y[i, 0] = 1\n _y[i, 1] = 1\n return _x, _y\n\n\ndef check_acc(y, y_i):\n _score = 0.0\n for i in range(y.shape.as_list()[0]):\n if y[i,0]==y_i[i,0] and y[i,1]==y_i[i,1]:\n _score += 1\n return _score / y.shape.as_list()[0]\n\n\nif __name__ == '__main__':\n nn = RealNN([16, 32, 16, 8, 2])\n x, y = observe(10)\n print(x)\n print(y)\n nn.train(x, y, 100000)\n #x, y = observe(10)\n #y_i = nn.infer(x)\n #check_acc(y_i, y)\n\n","sub_path":"failed-trials/real_nn.py","file_name":"real_nn.py","file_ext":"py","file_size_in_byte":4504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"253991176","text":"from django.shortcuts import render\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom .models import Song\nfrom .forms import SongForm,SongForm1\nfrom django.utils import timezone\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\n# Create your views here.\n\n\n\ndef detail(request,pk):\n song = Song.objects.get(pk=pk)\n return render(request, 'mymusic/detail.html',{'song':song })\n\ndef song_list(request,pagesize=30):\n qs = Song.objects.all()\n\n page = request.GET.get('page')\n song_name = request.GET.get('song_name')\n singer_name = request.GET.get('singer_name')\n\n #构造查询条件\n if song_name:\n qs = qs.filter(song_name__icontains=song_name)\n if singer_name:\n qs = qs.filter(singer_name__icontains=singer_name)\n #排序条件\n qs = qs.order_by('-update_time')\n paginator = Paginator(qs,pagesize)\n try:\n songs = paginator.page(page)\n except PageNotAnInteger:\n songs = paginator.page(1)\n except EmptyPage:\n songs = paginator.page(paginator.num_pages)\n\n return render(request,'mymusic/s_list.html',{'s_list':songs ,'song_name':song_name,'singer_name':singer_name})\n\ndef song_add(request):\n if request.method == 'POST':\n form = SongForm1(request.POST)\n \n if form.is_valid():\n \n song = form.save(commit = False)\n now = timezone.now()\n song.create_time = now\n song.update_time = now\n song.save()\n \n return HttpResponseRedirect('/mymusic/')\n \n else:\n form = SongForm1()\n \n return render(request,'mymusic/song_add.html',{'form':form})\n\ndef song_delete(request, pk):\n Song.objects.filter(pk=pk).delete()\n return HttpResponseRedirect('/mymusic/')\n\ndef song_edit(request, pk):\n song_origin = Song.objects.get(pk=pk)\n \n if request.method == 'POST': \n form = SongForm1(request.POST,instance = song_origin) \n if form.is_valid(): \n song = form.save(commit = False)\n \n now = timezone.now()\n song.update_time = now\n song.save() \n return HttpResponseRedirect('/mymusic/') \n else:\n form = SongForm1(instance = song_origin)\n \n return render(request,'mymusic/song_add.html',{'form':form})\n \n \n ","sub_path":"mymusic/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"519668769","text":"import os\nfrom utils import Scheduler\nimport pandas as pd\nimport re\nfrom urllib.parse import quote, unquote, urlencode\n\n\nclass MyScheduler(Scheduler):\n\n def set_cookie(self):\n \"\"\"set cookie first time for project if needed\"\"\"\n return\n\n def add_cookie(self):\n \"\"\"add cookie when cookie collection's empty\"\"\"\n return\n\n def set_url(self):\n url = 'https://search.jd.com/Search?'\n keywords = '电脑'\n page_count = 20\n params = [{\n 'keyword': keywords,\n 'wq': keywords,\n 'page': 1+2*page,\n 's': 1+60*page\n } for page in range(page_count)]\n\n tasks = [{\n 'uid': i,\n 'url': url + urlencode(p),\n 'finish': 0,\n 'formdata': p\n } for i, p in enumerate(params)]\n self.task_manager.set_task(tasks)\n\n\nif __name__ == '__main__':\n ms = MyScheduler('test')\n ms.reset()\n ms.set_url()\n","sub_path":"jditem/jditem/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"188419770","text":"from django.contrib.auth import get_user_model\n\nfrom channels.db import database_sync_to_async\n\nfrom rt_chat_app.models import (\n Participant, \n ChatRoom, \n Role, \n Group, \n Message, \n Notification\n)\n\nUser = get_user_model()\n\n@database_sync_to_async\ndef get_user(self, user_id):\n return User.objects.get(id=int(user_id))\n\n@database_sync_to_async\ndef find_participant(self, user_id):\n try: \n user = User.objects.filter(id=int(user_id))\n if len(user) > 0:\n participant = Participant.objects.get(user=user[0])\n return participant\n\n except Exception as e:\n print(e)\n\n@database_sync_to_async\ndef update_channel_name(self, user_id, channel_name):\n user = User.objects.get(id=int(user_id))\n participant = Participant.objects.filter(user=user)\n\n if len(participant) > 0:\n if participant[0].channel_name != channel_name:\n participant[0].channel_name = channel_name\n participant[0].save()\n\n@database_sync_to_async\ndef create_participant(self, user_id, room_code, channel_name, group_name):\n print(\"create_participant is running right now.\")\n\n user = User.objects.get(id=int(user_id))\n host = ChatRoom.objects.filter(host=user, room_code=room_code)\n\n participant = Participant.objects.filter(user=user)\n # group = Group.objects.get(group_name=group_name)\n add_participant = None\n\n # fix .......\n is_admin = Role.objects.get(id=1)\n is_participant = Role.objects.get(id=2)\n\n if not participant.exists():\n create_participant = None\n\n if len(host) > 0:\n create_participant = Participant(user=user, role=is_admin, channel_name=channel_name)\n else:\n create_participant = Participant(user=user, role=is_participant, channel_name=channel_name)\n \n create_participant.save()\n \n else:\n role = is_admin if (len(host) > 0) else is_participant\n\n participant.update(\n channel_name=channel_name,\n role=role\n )\n\n@database_sync_to_async\ndef add_user_to_lobby(self, room_name, user_id):\n try:\n user = User.objects.get(id=user_id)\n chat_room = ChatRoom.objects.get(room_name=room_name)\n chat_room.participants.add(user)\n\n except Exception as e:\n print(e)\n\n@database_sync_to_async\ndef create_group(self, group_name):\n check_group = Group.objects.filter(group_name=group_name)\n\n if not check_group.exists():\n create_group = Group(group_name=group_name)\n create_group.save()\n\n get_new_group = Group.objects.get(group_name=group_name)\n # get_new_group.participants.add(host)\n\n@database_sync_to_async\ndef remove_participant(self, user_id):\n user = User.objects.get(id=int(user_id))\n participant = Participant.objects.get(user=user)\n participant.delete()\n\n@database_sync_to_async\ndef saving_message(self, user_id, message, room):\n print(room)\n user = User.objects.get(id=int(user_id))\n participant = Participant.objects.get(user=user)\n message = Message(author=participant, message=message, room=room)\n message.save()\n\n@database_sync_to_async\ndef get_latest_message(self):\n return Message.objects.last()\n\n@database_sync_to_async\ndef get_room(self, room_code):\n return ChatRoom.objects.get(room_code=room_code)\n\n@database_sync_to_async\ndef adding_participant_to_room(self, room_code, participant_id):\n print(\"Participant join the room\")\n room = ChatRoom.objects.get(room_code=room_code)\n participant = Participant.objects.get(id=participant_id)\n room.participants.add(participant)\n \n@database_sync_to_async\ndef pending_request(self, sender, receiver, room, message):\n pending_notification = Notification(\n room=room, \n sender=sender, \n receiver=receiver,\n message=message \n )\n\n pending_notification.save()\n\n return pending_notification\n\n@database_sync_to_async\ndef finding_notification(self, notification):\n return Notification.objects.get(id=notification.get(\"id\", None))\n\n@database_sync_to_async\ndef removing_notification(self, notification):\n removing_notification = Notification.objects.get(id=notification.get(\"id\", None)).delete()\n\n@database_sync_to_async\ndef is_user_connection(self, user_id, group_dict, channel_name):\n user = User.objects.get(id=user_id)\n participant = Participant.objects.get(user=user)\n\n group_lists = list(group_dict.keys())\n\n if group_lists:\n for i in range(len(group_lists)):\n channel_lists = list(group_dict[group_lists[i]].keys())\n\n if channel_name in channel_lists:\n return True\n\n return False","sub_path":"chat_app/rt_chat_app/consumer_data_handler/_database_handler.py","file_name":"_database_handler.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"519348154","text":"\"\"\"\nAuthor: Alan Danque\nPurpose: Py Email Alert Sender\nDate: 20210331\nusage: \t\tpy -m E:\\pyAlerts\\pyAlert \"TEST EMAIL SUBJECT TEXT\" \"TEST MESSAGE TEXT\" \"sender_email_addres_ie_adanque@eqr.com\" \"EMAIL_GROUP_NAME\"\n\"\"\"\nimport os\nimport smtplib\nimport email\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport sys\nimport time\nfrom datetime import date, datetime, timedelta\nimport yaml\nfrom yaml import load, dump\nfrom pathlib import Path\nimport pyodbc \nimport contextlib\n\nif __name__ == '__main__':\n\n def send(sender, to, server, subject='None', body='None'):\n errors={}\n error_out={}\n err = 0\n torecipients = []\n ccrecipients = []\n bccrecipients = []\n errors['starttime'] = str(datetime.now())\n message = email.message.Message()\n loopEmlist = to.split(\",\")\n for e in loopEmlist:\n emout = \"\"\n if \"to:\" in e:\n emout = e.replace('to:','')\n torecipients.append(emout)\n if \"cc:\" in e and \"bcc:\" not in e:\n emout = e.replace('cc:','')\n ccrecipients.append(emout)\n if \"bcc:\" in e:\n emout = e.replace('bcc:','')\n bccrecipients.append(emout)\n\n torecipients_str = \", \".join(torecipients)\n ccrecipients_str = \", \".join(ccrecipients)\n bccrecipients_str = \", \".join(bccrecipients)\n\n print(\"loopEmlist\")\n print(type(loopEmlist))\n print(loopEmlist)\n\n print(\"torecipients_str\")\n print(torecipients_str)\n print(type(torecipients_str))\n\n print(\"ccrecipients_str\")\n print(ccrecipients_str)\n print(\"bccrecipients_str\")\n print(bccrecipients_str)\n \n message['To'] = torecipients_str\n message['Cc'] = ccrecipients_str\n message['Bcc'] = bccrecipients_str\n message['From'] = sender\n message['Subject'] = subject\n message.set_payload(body)\n #server = smtplib.SMTP(server)\n try:\n server = smtplib.SMTP(server)\n server.sendmail(sender, (torecipients+ccrecipients+bccrecipients), message.as_string())\n except Exception as e:\n errval = \"Email Send: Exception! Err: \"+str(e)\n errors['ExecutionStatus'] = errval\n print(e)\n print(errval) \n #server.quit() \n return errors # Email Error \n else:\n #server.quit() \n return 3 # Email Success\n\n def executeSQL(sqlserver, sqldatabase, emailgroup, critical_type, subj, msg, sender, RetSQLAlertCode, email_mid=\"\", status_id=\"\", ret_msg=\"\", sqlcmd_type=\"\"):\n errors={}\n error_out={}\n err = 0\n errors['starttime'] = str(datetime.now())\n punc = \"\"\"!()-[]{};:'\"\\, <>./?@#$%^&*_~\"\"\"\n for ele in msg:\n if ele in punc:\n msg = msg.replace(ele, \"\") \n if sqlcmd_type == \"uP_prepEmail\":\n sqlcmd = \"declare @OUTVAL varchar(max) exec pyAlerts..[uP_prepEmail] @EMAILGROUP = '{}',@subj = '{}',@msg = '{}',@sender = '{}',@critical_type = {},@OUTVAL = @OUTVAL OUTPUT select @OUTVAL \".format(emailgroup, subj, msg, sender, critical_type)\n elif sqlcmd_type == \"uP_updtEmailLog\":\n sqlcmd = \" exec pyAlerts..uP_updtEmailLog @EMAIL_MID = {}, @STATUS_ID = {}, @ret_msg = '{}'\".format(email_mid, status_id, ret_msg)\n else: \n print(\"Invalid sqlcmd_type\")\n # Test Connection Before Execution.\n try:\n contextlib.closing(pyodbc.connect(\n 'Driver={SQL Server};'\n 'Server='+ sqlserver + ';'\n 'Database=' + sqldatabase + ';'\n 'Trusted_Connection=yes;'\n )) \n except Exception as e:\n err+=1\n ts = str(datetime.now())\n errval = \"Cmd: ODBC Connection Test Failed! Err: \"+str(e)\n errors['ExecutionStatus'] = errval\n print(e)\n print(errval)\n\n else:\n with contextlib.closing(pyodbc.connect(\n 'Driver={SQL Server};'\n 'Server='+ sqlserver + ';'\n 'Database=' + sqldatabase + ';'\n 'Trusted_Connection=yes;'\n )) as conn1:\n with contextlib.closing(conn1.cursor()) as cursor:\n #print(sqlcmd.format(table_name, dst_path))\n conn1.timeout = 500\n \"\"\"\n if refresh == 1:\n try:\n cursor.execute(sqlcmd)\n RetSQLAlertCode = cursor.fetchone()[0]\n except Exception as e:\n err+=1\n ts = str(datetime.now())\n errval = \"Cmd: \"+ sqlcmd +\" Err: \"+str(e)\n errors['ExecutionStatus'] = errval\n \"\"\"\n\n try:\n cursor.execute(sqlcmd)\n RetSQLAlertCode = cursor.fetchval() \n\n except Exception as e:\n err+=1\n ts = str(datetime.now())\n errval = \"Cmd: \"+ sqlcmd +\" Err: \"+str(e)\n errors['ExecutionStatus'] = errval\n else:\n ts = str(datetime.now())\n rowsaffected = cursor.rowcount\n errval =\"Rows Loaded: \" + str(rowsaffected)\n print(errval)\n errors['ExecutionStatus'] = errval\n #RetSQLAlertCode = cursor.fetchall() #RetSQLAlertCode = cursor.fetchone()[0]\n #print(\"1st RetSQLAlertCode\")\n #print(RetSQLAlertCode)\n\n conn1.commit()\n if err > 0:\n # Execution Logging\n error_out = errors\n ts = str(datetime.now())\n error_out['status'] = \"Exception! Duration: %s seconds ---\" % (time.time() - start_time) + \" Completed at: \"+ ts\n error_out['sql_server'] = sqlserver\n error_out['sql_database'] = sqldatabase\n error_out['sql_cmd'] = sqlcmd\n error_out['endtime'] = str(datetime.now())\n print(error_out)\n return(error_out)\n else:\n ts = str(datetime.now())\n error_out = errors\n error_out['status'] = \"Success! Duration: %s seconds ---\" % (time.time() - start_time) + \" Completed at: \"+ ts\n return(RetSQLAlertCode)\n\n start_time = time.time()\n mypath = \"E://pyAlerts\"\n #mypath = sys.path[0] #\"E://pyAlerts\"\n base_dir = Path(mypath)\n config_dir = base_dir.joinpath(\"Config\") \n filename = 'config.yaml'\n ymlfile = config_dir.joinpath(filename) \n # Read YAML Config\n with open(ymlfile, 'r') as stream:\n try: \n cfg = yaml.safe_load(stream)\n venvpath = cfg[\"pyAlertsCfg\"].get(\"venvpath\") \n ca_certs = cfg[\"pyAlertsCfg\"].get(\"ca_certs\") \n SMTPSERVER = cfg[\"pyAlertsCfg\"].get(\"smtpserver\") \n sqlserver = cfg[\"pyAlertsCfg\"].get(\"sqlserver\") \n sqldatabase = cfg[\"pyAlertsCfg\"].get(\"sqldatabase\") \n sender = cfg[\"pyAlertsCfg\"].get(\"sender\") \n warntext = cfg[\"pyAlertsCfg\"].get(\"warntext\") \n errortext = cfg[\"pyAlertsCfg\"].get(\"errortext\") \n defaultexceptionnotifier = cfg[\"pyAlertsCfg\"].get(\"defaultexceptionnotifier\") \n pyserver = cfg[\"pyAlertsCfg\"].get(\"pyserver\") \n except yaml.YAMLError as exc:\n print(exc)\n server = SMTPSERVER \n\n subj = sys.argv[1]\n msg = sys.argv[2]\n sender = sys.argv[3]\n emailgroup = sys.argv[4]\n RetSQLAlertCode = \"\"\n\n #EVALUATE MESSAGE FOR CRITICALITY\n errortext = errortext.split(\"~\")\n warntext= warntext.split(\"~\")\n #CHECK SUBJECT STRING\n ERRSUBJCHECK = [t for t in errortext if(t in subj.lower())]\n ERRSUBJCHK = len(ERRSUBJCHECK)\n WARNSUBJCHECK = [t for t in warntext if(t in subj.lower())]\n WARNSUBJCHK = len(WARNSUBJCHECK)\n #CHECK MESSAGE STRING\n ERRMSGCHECK = [t for t in errortext if(t in msg.lower())]\n ERRMSGCHK = len(ERRMSGCHECK)\n WARNMSGCHECK = [t for t in warntext if(t in msg.lower())]\n WARNMSGCHK = len(WARNMSGCHECK)\n critical_type = 0\n if ERRSUBJCHK != 0 or ERRMSGCHK != 0:\n critical_type = 1\n elif (WARNSUBJCHK != 0 or WARNMSGCHK != 0) and ERRSUBJCHK == 0 and ERRMSGCHK == 0:\n critical_type = 2\n else: \n critical_type = 4\n\n # Prepare Email Log Record\n sqlcmd_type = 'uP_prepEmail'\n email_mid=\"\"\n status_id=\"\"\n ret_msg=\"\"\n email_recipient_cfg = executeSQL(sqlserver, sqldatabase, emailgroup, critical_type, subj, msg, sender, RetSQLAlertCode, email_mid, status_id, ret_msg, sqlcmd_type)\n print(\"uP_prepEmail Status:\")\n # Verify the uP_prepEmail log entry had issues. Attempt to send email if there are issues\n checkexception = \"Exception! Duration\"\n if email_recipient_cfg in checkexception:\n print(\"Having issues connecting to SQL! Attempt to email alert\")\n subj = \"pyAlert is having issues connecting to SQL while attempting to email alert\"\n message = \"Please review pyAlert Framework Config on server:\"+ pyserver\n attemptemail = send(sender, defaultexceptionnotifier, server=server, subject=subj, body=message)\n else:\n print(\"Obtained recipient config: \" + email_recipient_cfg)\n print(\"Email Prep Complete: --- %s seconds ---\" % (time.time() - start_time) ) \n\n # Parse email recipients\n parserestext = email_recipient_cfg.split(\"|\")\n emailaddr = parserestext[0]\n sendtypesbyrecipient = parserestext[1]\n msgid = parserestext[3]\n to = emailaddr.replace('|',',') \n message = \"\"\"\\\n From: %s\n To: %s\n Subject: %s\n \n %s\n \"\"\" % (sender, to, subj, msg)\n attemptemail = send(sender, to, server=server, subject=subj, body=message)\n print(attemptemail)\n if attemptemail == 3:\n print(\"Email Sent with Success!\") \n ret_msg=\"Email Relay Sent Duration: %s secs \" % (time.time() - start_time)\n else:\n print(\"Email Failed to Send!\") \n ret_msg=str(attemptemail).replace(\"'\",\"\") \n print(ret_msg) \n attemptemail = 4\n print(\"Email Send Attempt Complete: --- %s seconds ---\" % (time.time() - start_time) )\n\n # Update Email Log History with success or any email issues\n sqlcmd_type = 'uP_updtEmailLog'\n email_mid=msgid\n status_id=attemptemail\n logupdate = executeSQL(sqlserver, sqldatabase, emailgroup, critical_type, subj, msg, sender, RetSQLAlertCode, email_mid, status_id, ret_msg, sqlcmd_type) \n print(\"uP_updtEmailLog Status:\")\n print(logupdate)\n\n print(\"Update Complete: --- %s seconds passed ---\" % (time.time() - start_time) )\n","sub_path":"pyAlert/pyAlertCmdLine.py","file_name":"pyAlertCmdLine.py","file_ext":"py","file_size_in_byte":11036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"334792180","text":"from strategy import Strategy\n\n\nclass StrategyMinimaxPrune(Strategy):\n ''' Interface to suggest the best possible moves.\n '''\n\n def suggest_move(self, state):\n ''' (GameState) -> Move\n\n Return the best possible move available in state.\n\n Overrides Strategy.suggest_move\n '''\n\n move_dict = gen_move_dict(state)\n possible_moves = state.possible_next_moves()\n if 1.0 in move_dict.values():\n for move_key in move_dict:\n if move_dict[move_key] == 1.0:\n move = possible_moves[move_key]\n return move\n elif 0.0 in move_dict.values():\n for move_key in move_dict:\n if move_dict[move_key] == 0.0:\n move = possible_moves[move_key]\n return move\n move = possible_moves[move_dict.popitem()[0]]\n return move\n\n\ndef score(state):\n ''' (GameState) -> tup of (float, float)\n\n Precondition: state.over\n\n Return a two item tuple representing the score of next_player and\n state.opponent() respectively.\n '''\n\n outcome = state.outcome()\n next_player = state.next_player\n outcome_tup = (outcome, outcome * -1.0) if next_player == 'p1' else\\\n (outcome * -1.0, outcome)\n return outcome_tup\n\n\ndef gen_move_dict(state, best_score=-1.0):\n ''' (GameState, str, float) -> dict of {obj: float}\n\n Return a dictionary with the indices of each move in\n state.possible_next_moves() as keys and the score of next_player\n after applying the corresponding move. best_score represents the best\n score for next_player seen so far (-1.0 by default).\n\n '''\n\n possible_moves = state.possible_next_moves()\n player_int = 0 if state.next_player == 'p1' else 1\n\n if not possible_moves:\n return {None: score(state)[player_int]}\n\n else:\n move_dict = {}\n possible_move_length = len(possible_moves)\n move_index = 0\n while (move_index in range(possible_move_length) and\n 1.0 not in move_dict.values()):\n\n applied_state = state.apply_move(possible_moves[move_index])\n applied_state_moves = applied_state.possible_next_moves()\n if applied_state_moves:\n temp_dict = {}\n n = 0\n move = 0\n while move in range(len(applied_state_moves)) and n == 0:\n\n # Look another move ahead\n further_state = applied_state.apply_move(\n applied_state_moves[move])\n further_dict = gen_move_dict(further_state, best_score)\n\n # max(scores) represents the score of the opponent after\n # next_player plays a move. So -max(scores) represents\n # the score of next_player before the move is played.\n\n # Ternary if is to ensure that move_dict_value doesn't get\n # set to -0.0\n next_player_score = -max(further_dict.values()) \\\n if max(further_dict.values()) != 0.0 \\\n else 0.0\n\n # Decide if we can stop looking in this applied state\n n = 1 if next_player_score >= (-1 * best_score) else 0\n\n # This is the dictionary that would be returned if\n # next_player called this function.\n\n # Ensure that this move is never picked if we already have\n # a move of better or the same value.\n temp_dict[move] = next_player_score if n == 0 else 1.0\n move += 1\n else:\n temp_dict = gen_move_dict(applied_state, best_score)\n scores = temp_dict.values()\n move_dict_value = -max(scores) if max(scores) != 0.0 else 0.0\n best_score = max(move_dict_value, best_score)\n move_dict[move_index] = move_dict_value\n move_index += 1\n return move_dict\n","sub_path":"strategy_minimax_prune.py","file_name":"strategy_minimax_prune.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"647785456","text":"import copy\nimport json\nimport pandas\nimport pathlib\n\n\nclass Lea:\n def __init__(self):\n self.question = self.load_file()\n self.words = self.question.split(\" \")\n self.all_letters = self.get_all_letters()\n self.display_df = self.make_display_df()\n self.final_df = self.make_final_df()\n\n @staticmethod\n def load_file():\n file_path = pathlib.Path().cwd() / \"lea2.json\"\n with open(file_path, \"r\") as f:\n question = json.load(f)\n return question\n\n def get_all_letters(self):\n letters = []\n for word in self.words:\n for character in word:\n letters.append(character.upper())\n return letters\n\n def make_display_df(self):\n max_len = max([len(word) for word in self.words])\n index = range(0, len(self.words))\n columns = range(0, max_len)\n return pandas.DataFrame(\"\", index=index, columns=columns)\n\n def make_final_df(self):\n df = copy.deepcopy(self.display_df)\n for i, word in enumerate(self.words):\n characters = []\n for character in word:\n characters.append(character.upper())\n blanks = [\"\"] * (len(df.columns) - len(word))\n characters.extend(blanks)\n row = pandas.Series(characters, index=df.columns)\n df.iloc[i, :] = row\n return df\n\n def play(self):\n print(\"\\nciao patata! let's play a game\")\n print(\"this is a table (well, dataframe) that contains a sentence\")\n print(self.display_df)\n print(\"let's play something like hangman\")\n print(\"you enter a letter at the time\")\n print(\"if it's in the sentence you'll see it appear in the table\")\n input_letters = []\n while True:\n letter = input(\"\\nplease enter a letter: \").upper()\n input_letters.append(letter)\n for index, final_row in self.final_df.iterrows():\n display_row = self.display_df.iloc[index, :]\n row = [\n final_letter\n if final_letter in input_letters else display_letter\n for display_letter, final_letter\n in zip(display_row, final_row)]\n new_row = pandas.Series(row, index=self.display_df.columns)\n self.display_df.iloc[index, :] = new_row\n print(self.display_df)\n letters_guessed = [\n True if letter in input_letters else False\n for letter in self.all_letters]\n if all(letters_guessed):\n print(\"\\ni love you baby\\n\")\n break\n\n\nif __name__ == \"__main__\":\n Lea().play()\n","sub_path":"lea.py","file_name":"lea.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"560664177","text":"import socket\nimport sys \n\nargs = sys.argv[1].split(':')\nhost = args[0]\nport = (int)(args[1])\nmsg = sys.argv[2].encode()\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((host, port))\n s.sendall(bytes(msg))\n data = s.recv(1024)\n\nprint('Received:', data.decode())","sub_path":"src/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"324210167","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\turl(r'^signup/$', views.signup, name='signup'),\n url(r'^$', views.pet_status, name='pet_status'),\n url(r'^pet/new/$', views.hatch_pet, name='hatch_pet'),\n url(r'^enter_BAL/$', views.change_pet_health_based_on_BAL, name='change_pet_health_based_on_BAL'),\n url(r'^incoming_sms/$', views.incoming_sms, name='views.incoming_sms'),\n]","sub_path":"mysite/meepo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"358163908","text":"import h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom plotting_methods import plot_all\nimport sys\n\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nplt.rcParams.update({'font.size': 14})\n\nmodel = sys.argv[1]\nwind = sys.argv[2]\nsnap = sys.argv[3]\n\nbasic_dir = '/home/sapple/simba_sizes/profiles/ssfr/extended_profiles/'\ngv_centrals_dir = basic_dir + 'centrals/'+model+'_'+snap+'/'+wind+'/green_valley/random_orientation/'\ngv_sats_dir = basic_dir + 'satellites/'+model+'_'+snap+'/'+wind+'/green_valley/random_orientation/'\nsf_centrals_dir = basic_dir + 'centrals/'+model+'_'+snap+'/'+wind+'/star_forming/random_orientation/'\nsf_sats_dir = basic_dir + 'satellites/'+model+'_'+snap+'/'+wind+'/star_forming/random_orientation/'\ndata_dirs = [sf_centrals_dir, gv_centrals_dir, sf_sats_dir, gv_sats_dir]\n\nresults_dir = '/home/sapple/simba_sizes/profiles/plotting/plots/'\n\nxlim = [0., 1.5]\nylim = None\nxlabel = r'$ R / R_{half}$'\n\nfilename = 'sfr_data.h5'\nylabel = r'$SFR (M_{\\odot} \\textrm{yr}^{-1})$'\nsavefile = results_dir+'sfr_'+wind+'.png'\nplot_all(data_dirs, filename, xlabel, ylabel, xlim, ylim, savefile)\n\n\n\n","sub_path":"profiles/kruger/plotting/sfr_profiles.py","file_name":"sfr_profiles.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"582068671","text":"import data_extraction\nimport pandas as pd\nimport numpy as np\nimport json\nimport time\nimport os\n\ndef main():\n total_start = data_extraction.start_time()\n arguments = data_extraction.setup()\n\n start = data_extraction.start_time('Reading data.')\n data = pd.read_hdf(arguments.infile, columns = arguments.columns)\n data_extraction.end_time(start)\n\n start = data_extraction.start_time('Extracting windows.')\n features, positions = data_extraction.windows(data.index.values, data, arguments.window, arguments.columns)\n data_extraction.end_time(start)\n\n column_labels = []\n for column in arguments.columns:\n column_labels += [column] * arguments.window\n\n print ('Writing output.')\n data = {\n 'columns': column_labels,\n 'vectors': features, \n 'positions': positions\n }\n\n project_folder = data_extraction.project_path()\n data_folder = os.path.join(project_folder, 'data')\n processed_folder = os.path.join(data_folder, 'processed')\n if arguments.prefix:\n filename = os.path.join(processed_folder, f'{arguments.prefix}_data.json')\n else:\n filename = os.path.join(processed_folder, 'data.json')\n\n with open(filename, 'w') as outfile:\n json.dump(data, outfile, indent = 4)\n\n total_time = data_extraction.end_time(total_start, True)\n print (f'{total_time} elapsed in total.')\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n","sub_path":"src/features/plasmid_extraction.py","file_name":"plasmid_extraction.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"519529257","text":"#%%\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\n\n#%%\ndf = pd.read_csv('data\\cleaned_data.csv')\ndf.head(5)\n\n#%%\n\ntrain_x, test_x = train_test_split(df, test_size=0.2, random_state=42)\ntrain_x = train_x[train_x.isFraud == 0] #where normal transactions\ntrain_x = train_x.drop(['isFraud'], axis=1) #drop the class column\n\n\ntest_y = test_x['isFraud'] #save the class column for the test set\n\ntest_x = test_x.drop(['isFraud'], axis=1) #drop the class column\n\nprint(train_x.shape)\nprint(test_x.shape)\n\ntrain_x = train_x.values #transform to ndarray\ntest_x = test_x.values\n\n#%%\n# import packages\n# matplotlib inline\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, precision_recall_curve\nfrom sklearn.metrics import recall_score, classification_report, auc, roc_curve\nfrom sklearn.metrics import precision_recall_fscore_support, f1_score\nfrom sklearn.preprocessing import StandardScaler\nfrom pylab import rcParams\nfrom keras.models import Model, load_model\nfrom keras.layers import Input, Dense\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\nfrom keras import regularizers\nimport os\n\n\n#%%\nnb_epoch = 1000\nbatch_size = 128\ninput_dim = train_x.shape[1] #num of columns, 30\nencoding_dim = 14\nhidden_dim = int(encoding_dim / 2) #i.e. 7\nlearning_rate = 1e-7\n\ninput_layer = Input(shape=(input_dim, ))\nencoder = Dense(encoding_dim, activation=\"tanh\", activity_regularizer=regularizers.l1(learning_rate))(input_layer)\nencoder = Dense(hidden_dim, activation=\"relu\")(encoder)\ndecoder = Dense(hidden_dim, activation='tanh')(encoder)\ndecoder = Dense(input_dim, activation='relu')(decoder)\nautoencoder = Model(inputs=input_layer, outputs=decoder)\n\n#%%\nautoencoder.compile(metrics=['accuracy'],\n loss='mean_squared_logarithmic_error',\n optimizer='adam')\n\ncp = ModelCheckpoint(filepath=\"autoencoder_fraud.h5\",\n save_best_only=True,\n verbose=0)\ntry:\n save_dir = int(max(os.listdir('.\\logs'))) + 1\nexcept ValueError:\n save_dir=1\n\ntb = TensorBoard(log_dir='./logs/' + str(save_dir) ,\n histogram_freq=0,\n write_graph=True,\n write_images=True)\n\nhistory = autoencoder.fit(train_x, train_x,\n epochs=nb_epoch,\n batch_size=batch_size,\n shuffle=True,\n validation_data=(test_x, test_x),\n verbose=1,\n callbacks=[cp, tb]).history\n\n#%%\nautoencoder = load_model('autoencoder_fraud.h5')\n\ntest_x_predictions = autoencoder.predict(test_x)\nmse = np.mean(np.power(test_x - test_x_predictions, 2), axis=1)\nerror_df = pd.DataFrame({'Reconstruction_error': mse,\n 'True_class': test_y})\nerror_df.describe()\n\n#%%\n","sub_path":"code/data_preprocessing/example_test.py","file_name":"example_test.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"48711468","text":"from django.conf.urls import patterns, include, url\nfrom django.conf.urls import patterns, include, url\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n #url(r'^gglplus/$','gplus.views.index')\n url(r'^gglplus/(?P[a-zA-Z0-9_.-]+)/$','gplus.views.index'),\n url(r'^fbapp/(?P\\w+)/$','fbapp.views.index'),\n url(r'^lkdn/$', 'lkdin.views.index'),\n url(r'^lkdn1/$','lkdin.views.index1'),\n url(r'^oauth/$','twitterapp.views.index1'),\n url(r'^oauth1/$','twitterapp.views.index'),\n # url(r'^$', 'ggl.views.home', name='home'),\n # url(r'^ggl/', include('ggl.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"socialapp-online/ggl/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"518031875","text":"# Copyright OTT-JAX\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import TYPE_CHECKING\n\nimport jax\nimport jax.numpy as jnp\n\nfrom ott import utils\n\nif TYPE_CHECKING:\n from ott.problems.linear import linear_problem\n from ott.solvers.linear import sinkhorn\n\n__all__ = [\"AndersonAcceleration\", \"Momentum\"]\n\n\n@utils.register_pytree_node\nclass AndersonAcceleration:\n \"\"\"Implements Anderson acceleration for Sinkhorn.\"\"\"\n\n # TODO(michalk8): use memory=0 as no Anderson acceleration?\n memory: int = 2 # Number of iterates considered to form interpolation.\n refresh_every: int = 1 # Recompute interpolation periodically.\n ridge_identity: float = 1e-2 # Ridge used in the linear system.\n\n def extrapolation(self, xs: jnp.ndarray, fxs: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Compute Anderson extrapolation from past observations.\"\"\"\n # Remove -inf values to instantiate quadratic problem. All others\n # remain since they might be caused by a valid issue.\n fxs_clean = jnp.nan_to_num(fxs, nan=jnp.nan, posinf=jnp.inf, neginf=0.0)\n xs_clean = jnp.nan_to_num(xs, nan=jnp.nan, posinf=jnp.inf, neginf=0.0)\n residuals = fxs_clean - xs_clean\n gram_matrix = jnp.matmul(residuals.T, residuals)\n gram_matrix /= jnp.linalg.norm(gram_matrix)\n\n # Solve linear system to obtain weights\n weights = jax.scipy.sparse.linalg.cg(\n gram_matrix + self.ridge_identity * jnp.eye(xs.shape[1]),\n jnp.ones(xs.shape[1])\n )[0]\n weights /= jnp.sum(weights)\n\n # Recover linear combination and return it with NaN (caused\n # by 0 weights leading to -jnp.inf potentials, mixed with weights\n # coefficients of different signs), disambiguated to -inf.\n combination = jnp.sum(fxs * weights[None, :], axis=1)\n return jnp.where(jnp.isfinite(combination), combination, -jnp.inf)\n\n def update(\n self, state: \"sinkhorn.SinkhornState\", iteration: int,\n prob: \"linear_problem.LinearProblem\", lse_mode: bool\n ) -> \"sinkhorn.SinkhornState\":\n \"\"\"Anderson acceleration update.\n\n When using Anderson acceleration, first update the dual variable f_u with\n previous updates (if iteration count sufficiently large), then record\n new iterations in array.\n\n Anderson acceleration always happens in potentials (not scalings) space,\n regardless of the lse_mode setting. If the iteration count is large\n enough the update below will output a potential variable.\n\n Args:\n state: Sinkhorn state.\n iteration: the current iteration.\n prob: linear OT problem.\n lse_mode: whether to compute in log-sum-exp or in scalings.\n\n Returns:\n A potential variable.\n \"\"\"\n geom = prob.geom\n trigger_update = jnp.logical_and(\n iteration > self.memory, iteration % self.refresh_every == 0\n )\n fu = jnp.where(\n trigger_update, self.extrapolation(state.old_fus, state.old_mapped_fus),\n state.fu\n )\n # If the interpolation was triggered, we store it in memory\n # Otherwise we add the previous value (converting it to potential form if\n # it was initially stored in scaling form).\n old_fus = jnp.where(\n trigger_update,\n jnp.concatenate((state.old_fus[:, 1:], fu[:, None]), axis=1),\n jnp.concatenate((\n state.old_fus[:, 1:],\n (fu if lse_mode else geom.potential_from_scaling(fu))[:, None]\n ),\n axis=1)\n )\n\n # If update was triggered, ensure a scaling is returned, since the result\n # from the extrapolation was outputted in potential form.\n fu = jnp.where(\n trigger_update, fu if lse_mode else geom.scaling_from_potential(fu), fu\n )\n return state.set(fu=fu, old_fus=old_fus)\n\n def init_maps(\n self, pb, state: \"sinkhorn.SinkhornState\"\n ) -> \"sinkhorn.SinkhornState\":\n \"\"\"Initialize log matrix used in Anderson acceleration with *NaN* values.\"\"\"\n fus = jnp.ones((pb.geom.shape[0], self.memory)) * jnp.nan\n return state.set(old_fus=fus, old_mapped_fus=fus)\n\n def update_history(\n self, state: \"sinkhorn.SinkhornState\", pb, lse_mode: bool\n ) -> \"sinkhorn.SinkhornState\":\n \"\"\"Update history of mapped dual variables.\"\"\"\n f = state.fu if lse_mode else pb.geom.potential_from_scaling(state.fu)\n mapped = jnp.concatenate((state.old_mapped_fus[:, 1:], f[:, None]), axis=1)\n return state.set(old_mapped_fus=mapped)\n\n\n@utils.register_pytree_node\nclass Momentum:\n \"\"\"Momentum for Sinkhorn updates.\n\n Can be either constant :cite:`thibault:21` or adaptive :cite:`lehmann:21`.\n \"\"\"\n\n start: int = 0\n error_threshold: float = jnp.inf\n value: float = 1.0\n inner_iterations: int = 1\n\n def weight(self, state: \"sinkhorn.SinkhornState\", iteration: int) -> float:\n \"\"\"Compute momentum term if needed, using previously seen errors.\"\"\"\n if self.start == 0:\n return self.value\n idx = self.start // self.inner_iterations\n\n return jax.lax.cond(\n jnp.logical_and(\n iteration >= self.start,\n state.errors[idx - 1, -1] < self.error_threshold\n ), lambda state: self.lehmann(state), lambda state: self.value, state\n )\n\n def lehmann(self, state: \"sinkhorn.SinkhornState\") -> float:\n \"\"\"Momentum formula :cite:`lehmann:21`, eq. 5.\"\"\"\n idx = self.start // self.inner_iterations\n error_ratio = jnp.minimum(\n state.errors[idx - 1, -1] / state.errors[idx - 2, -1], 0.99\n )\n power = 1.0 / self.inner_iterations\n return 2.0 / (1.0 + jnp.sqrt(1.0 - error_ratio ** power))\n\n def __call__( # noqa: D102\n self,\n weight: float,\n value: jnp.ndarray,\n new_value: jnp.ndarray,\n lse_mode: bool = True\n ) -> jnp.ndarray:\n if lse_mode:\n value = jnp.where(jnp.isfinite(value), value, 0.0)\n return (1.0 - weight) * value + weight * new_value\n value = jnp.where(value > 0.0, value, 1.0)\n return value ** (1.0 - weight) * new_value ** weight\n","sub_path":"src/ott/solvers/linear/acceleration.py","file_name":"acceleration.py","file_ext":"py","file_size_in_byte":6365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"314932806","text":"from __future__ import print_function\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\n\n\ndef f_mnist(params):\n \"\"\"\n num_of_conv_layers : int, (2 ~ 10)\n conv_kernel_size : int, (3 ~ 10)\n conv_output_size : int, (32 ~ 128)\n conv_dropout_rate : float, (0.1 ~ 0.99)\n maxpooling_size : int, (2 ~ 10)\n num_of_dense_layers : int, (2 ~ 5)\n dense_output_size : int, (32 ~ 128)\n dense_drop_out_rate : float, (0.1 ~ 0.99)\n learning_rate : float, (0.1, 0.01, 0.001, 0.0001)\n :param params: list of parameters\n :type params: list\n :return: fitness\n :rtype: float\n \"\"\"\n\n num_of_conv_layers = 3\n conv_kernel_size = 3\n conv_output_size = params[0]\n conv_dropout_rate = params[1]\n maxpooling_size = params[2]\n num_of_dense_layers = params[3]\n dense_output_size = params[4]\n dense_drop_out_rate = params[5]\n learning_rate = params[6]\n\n batch_size = 128\n num_classes = 10\n epochs = 5\n\n # input image dimensions\n img_rows, img_cols = 28, 28\n\n # the data, shuffled and split between train and test sets\n # ********** if there is an error in importing mnist, use below lines\n # import gzip\n # import sys\n # import pickle\n # f = gzip.open('mnist.pkl.gz', 'rb')\n # if sys.version_info < (3,):\n # data = pickle.load(f)\n # else:\n # data = pickle.load(f, encoding='bytes')\n # f.close()\n # (x_train, y_train), (x_test, y_test) = data\n\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n print('x_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(conv_kernel_size, conv_kernel_size),\n activation='relu',\n input_shape=input_shape))\n for i in range(num_of_conv_layers-1):\n model.add(Conv2D(conv_output_size, (conv_kernel_size, conv_kernel_size), activation='relu'))\n model.add(MaxPooling2D(pool_size=(maxpooling_size, maxpooling_size)))\n model.add(Dropout(conv_dropout_rate))\n model.add(Flatten())\n for i in range(num_of_dense_layers - 1):\n model.add(Dense(dense_output_size, activation='relu'))\n model.add(Dropout(dense_drop_out_rate))\n model.add(Dense(num_classes, activation='softmax'))\n\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.RMSprop(lr=learning_rate),\n metrics=['accuracy'])\n\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=2,\n validation_data=(x_test, y_test))\n score = model.evaluate(x_test, y_test, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n return score[1]","sub_path":"functions/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"405776176","text":"import tkinter as tk\r\nfrom tkinter import filedialog\r\nimport os.path\r\n\r\n# this sets working directory and gets the file to convert\r\nroot = tk.Tk()\r\nroot.withdraw()\r\nbase = os.getcwd().replace(os.sep, \"/\")\r\nfile_path = filedialog.askopenfilename()\r\n# print(file_path)\r\n\r\n\r\nconverter_file_string = \"\"\r\n\r\n# this is important, the file structure was downloaded like this\r\n# so I am assuming this is how it was put in the folder\r\nconverter_location = \"Converter/bin\"\r\n\r\nfor i in range(0, 20):\r\n # current settings are to convert to an mp4\r\n current_file = \"output\" + str(i) + \".mp4\"\r\n if not os.path.exists(base + \"/\" + converter_location + \"/\" + current_file):\r\n break\r\n\r\nif file_path != \"\":\r\n # added quotes on either side in case there is spaces within the paths\r\n converter_exe = r'\"' + base + \"/\" + converter_location + \"/\" + \"ffmpeg.exe\" + r'\"'\r\n input_file = r'\"' + file_path + r'\"'\r\n \r\n # modify this if you prefer a different output, currently it outputs to the directory of the ffmpeg executable\r\n output_file = r'\"' + base + \"/\" + converter_location + \"/\" + current_file + r'\"'\r\n \r\n converter_file_string = converter_exe + \" -i \" + input_file + \" \" + output_file\r\n\r\n # print(converter_file_string)\r\nelse:\r\n quit()\r\n\r\n\r\nconverter_file_full = os.getcwd() + \"/\" + converter_location + \"/\" + \"convert.bat\"\r\n\r\n# overwrites the bat file to the new files/settings without extra hastle\r\nwith open(converter_file_full, \"w\") as f:\r\n f.write(converter_file_string)\r\n\r\n\r\nprint(\"converting file\")\r\n\r\nos.system(converter_file_full)\r\n\r\nprint(\"Done\")\r\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"549186840","text":"from random import random\nfrom typing import Tuple\n\n\nclass Node:\n \"\"\"\n Treap's node\n Treap is a binary tree by key and heap by priority\n \"\"\"\n def __init__(self, key: int):\n self.key = key\n self.prior = random() # 优先级\n self.l = None\n self.r = None\n\ndef split(root: Node, key: int) -> Tuple[Node, Node]:\n \"\"\"\n We split current tree into 2 trees with key:\n\n Left tree contains all keys less than split key.\n Right tree contains all keys greater or equal, than split key\n \"\"\"\n if root is None: # None tree is split into 2 Nones\n return (None, None)\n else:\n if key < root.key:\n \"\"\"\n Right tree's root will be current node.\n Now we split(with the same key) current node's left son\n Left tree: left part of that split\n Right tree's left son: right part of that split\n \"\"\"\n l, root.l = split(root.l, key)\n return (l, root)\n else:\n \"\"\"\n Just symmetric to previous case\n \"\"\"\n root.r, r = split(root.r, key)\n return (root, r)\n\n\ndef merge(left: Node, right: Node) -> Node:\n \"\"\"\n We merge 2 trees into one.\n Note: all left tree's keys must be less than all right tree's\n \"\"\"\n if (not left) or (not right): #如果至少有一个是 None , 返回另一个\n return left or right\n elif left.prior < right.prior:\n # print(\"left\") # @Haor: 没有用到?\n left.r = merge(left.r, right)\n return left\n else: #以右为头结点, 将左树与右的左孩子重做结合\n \"\"\"\n Right will be root because it has more priority\n Now we need to merge left tree and right's left son\n \"\"\"\n right.l = merge(left, right.l)\n return right\n\ndef insert(root: Node, key: int) -> Node:\n \"\"\"\n Insert element\n\n Split current tree with a key into l, r,\n Insert new node into the middle\n Merge l, node, r into root\n \"\"\"\n node = Node(key)\n l, r = split(root, key)\n return merge(merge(l, node), r)\n\ndef erase(root: Node, key: int) -> Node:\n \"\"\"\n Erase element\n\n Split all nodes with keys less into l,\n Split all nodes with keys greater into r.\n Merge l, r\n \"\"\"\n l, r = split(root, key)\n _, r = split(r, key + 1)\n return merge(l, r)\n\ndef pre_print(root: Node):\n \"\"\"\n 前序\n Just recursive print of a tree\n \"\"\"\n if not root: # None\n return\n else:\n pre_print(root.l)\n print(root.key, end=\" \")\n pre_print(root.r)\ndef mid_print(root: Node):\n \"\"\"\n 中序\n \"\"\"\n if not root:\n return\n else:\n print(root.key, end=\" \")\n mid_print(root.l)\n mid_print(root.r)\n\ndef interactTreap():\n \"\"\"\n Commands:\n + key to add key into treap\n - key to erase all nodes with key\n\n After each command, program prints treap\n \"\"\"\n root = None\n print(\"本程序将给出一个数字列表, 数字前的'+'代表添加, '-'代表删除. 输入'q'以退出\")\n cmd = input()\n while cmd != 'q':\n if cmd[0] == \"+\":\n root = insert(root, int(cmd[1:]))\n elif cmd[0] == \"-\":\n root = erase(root, int(cmd[1:]))\n else:\n print(\"Unknown command\")\n mid_print(root)\n cmd = input()\n\ndef autoTreap():\n root = None\n print(\"程序示例:\")\n cmds = [\"+1\", \"+3\", \"+5\", \"+17\", \"+19\", \"+2\", \"+16\", \"+4\", \"+0\", \"+4\", \"+4\", \"+4\", \"-0\", \"-3\", \"-4\", \"-5\", \"-10\", \"+0\", ]\n for cmd in cmds:\n print(\"\\n>>>\", cmd )\n if cmd[0] == \"+\":\n root = insert(root, int(cmd[1:]))\n elif cmd[0] == \"-\":\n root = erase(root, int(cmd[1:]))\n mid_print(root)\n print(\"\\n\")\n pre_print(root)\n\nif __name__ == \"__main__\":\n autoTreap()\n interactTreap()\n","sub_path":"data_structures/binary_tree/treap.py","file_name":"treap.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"147141754","text":"from sum_code.scraper import reddit\nfrom sum_code.db.models import Url\nfrom sum_code.db.utils import create_session\nfrom time import sleep\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nmax_urls_to_extract = 10000\nthread_iterator = reddit.RedditAutoTLDRThreadIterator(max_urls_to_extract=max_urls_to_extract)\n\nsession = create_session()\n\nfor count, thread_url in enumerate(thread_iterator):\n session.begin_nested()\n try:\n logger.info('scraping thread https://www.reddit.com{}'.format(thread_url))\n if session.query(session.query(Url).filter(Url.url.like(u'%{}'.format(thread_url))).exists()).scalar():\n logger.info('skipping url {}'.format(thread_url))\n continue\n\n if count % 10 == 0:\n logger.info('url count {} of {}'.format(count, max_urls_to_extract))\n sleep(1)\n\n try:\n reddit.RedditAutoTLDRThreadScraper(thread_url=thread_url).scrape_thread(commit=True)\n except Exception as e:\n session.rollback()\n logger.error('error {} on thread url {}'.format(e, thread_url))\n\n except Exception as e:\n session.rollback()\n logger.error('exception {} on thread url:'.format(e))\n try:\n logger.error(thread_url)\n except:\n logger.error('error logging thread url')\n session.commit()\n\n\n","sub_path":"sum_code/commands/get_autotldr_data.py","file_name":"get_autotldr_data.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"633420896","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 14 13:52:05 2018\r\n\r\n@author: henri\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn import metrics\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.decomposition import PCA\r\nimport itertools\r\n\r\nnp.random.seed(1234)\r\n\r\nnames=['EngNo', 'Cycle', 'OC1', 'OC2', 'OC3', 'S1', 'S2', 'S3',\r\n 'S4', 'S5', 'S6', 'S7', 'S8', 'S9', 'S10', 'S11', 'S12', 'S13',\r\n 'S14', 'S15', 'S16', 'S17', 'S18', 'S19', 'S20', 'S21', 'RUL', 'Class']\r\n\r\n\r\n# read training data - It is the aircraft engine run-to-failure data.\r\ntrain_df = pd.read_csv('Data/train_FD001_labeled.csv', sep=\",\", header=0)\r\ntrain_df.columns = names\r\n\r\n# read test data - It is the aircraft engine operating data without failure events recorded.\r\ntest_df = pd.read_csv('Data/test_FD001.txt', sep=\" \", header=None)\r\ntest_df.drop(test_df.columns[[26, 27]], axis=1, inplace=True)\r\ntest_df.columns = ['EngNo', 'Cycle', 'OC1', 'OC2', 'OC3', 'S1', 'S2', 'S3',\r\n 'S4', 'S5', 'S6', 'S7', 'S8', 'S9', 'S10', 'S11', 'S12', 'S13',\r\n 'S14', 'S15', 'S16', 'S17', 'S18', 'S19', 'S20', 'S21']\r\n\r\n# read ground truth RUL\r\ntruth_df = pd.read_csv('Data/RUL_FD001.txt', sep=\" \", header=None)\r\ntruth_df.drop(truth_df.columns[[1]], axis=1, inplace=True)\r\n\r\n# ground truth dataset (RUL) to genereat labels for the test data\r\n# generate column max for test data\r\nrul = pd.DataFrame(test_df.groupby('EngNo')['Cycle'].max()).reset_index()\r\nrul.columns = ['EngNo', 'max']\r\ntruth_df.columns = ['more']\r\ntruth_df['EngNo'] = truth_df.index + 1\r\ntruth_df['max'] = rul['max'] + truth_df['more']\r\ntruth_df.drop('more', axis=1, inplace=True)\r\n\r\n# generate RUL for test data\r\ntest_df = test_df.merge(truth_df, on=['EngNo'], how='left')\r\ntest_df['RUL'] = test_df['max'] - test_df['Cycle']\r\ntest_df.drop('max', axis=1, inplace=True)\r\n\r\n#test_data = pd.DataFrame(test_df.groupby('EngNo')['Cycle'].max())\r\n#test_data.columns[]\r\n\r\n# Data Filtering - Moving average\r\ndf = test_df\r\n# print(df)\r\ndata = pd.DataFrame(columns=names)\r\n\r\n#df = pd.DataFrame(dataset)\r\nfor i in range(1, 101):\r\n # select engine\r\n engine_no = i\r\n data_sel = df.loc[df['EngNo'] == engine_no]\r\n\r\n # filter\r\n window_size = 15\r\n rolling = data_sel.rolling(window=window_size)\r\n rm = rolling.mean()\r\n # delete NaN\r\n rm = rm.drop(rm.index[0:(window_size - 1)])\r\n data = pd.concat([data, rm])\r\n\r\n\r\ndata = data[['EngNo', 'Cycle', 'OC1', 'OC2', 'OC3', 'S1', 'S2', 'S3',\r\n 'S4', 'S5', 'S6', 'S7', 'S8', 'S9', 'S10', 'S11', 'S12', 'S13',\r\n 'S14', 'S15', 'S16', 'S17', 'S18', 'S19', 'S20', 'S21', 'RUL']]\r\n\r\ntest_df = data\r\n\r\n## Test update um git zu testen !!\r\n\r\ndef plot_confusion_matrix(cm, classes,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n \"\"\"\r\n This function prints and plots the confusion matrix.\r\n Normalization can be applied by setting `normalize=True`.\r\n \"\"\"\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('Richtiger Gesundheitsindex ')\r\n plt.xlabel('Prädizierter Gesundheitsindex')\r\n\r\n\r\n\r\n###################################\r\n## Data Preprocessing\r\n###################################\r\n## Data Labeling - generate column RUL\r\n#rul = pd.DataFrame(train_df.groupby('id')['cycle'].max()).reset_index()\r\n#rul.columns = ['id', 'max']\r\n#train_df = train_df.merge(rul, on=['id'], how='left')\r\n#train_df['RUL'] = train_df['max'] - train_df['cycle']\r\n#train_df.drop('max', axis=1, inplace=True)\r\n\r\n\r\n##df = train_df.loc[train_df['id'] == 90]# define input and output data\r\n##xx_test = df[['setting1', 'setting2', 'setting3', 's1', 's2', 's3',\r\n## 's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',\r\n## 's15', 's16', 's17', 's18', 's19', 's20', 's21']]\r\n##\r\n##yy_test = df[['class']]\r\n\r\n\r\nclmns = ['S2', 'S3','S4', 'S7', 'S11', 'S12', 'S15']\r\n#clmns = ['S2', 'S3', 'S4', 'S7', 'S11', 'S12', 'S13',\r\n# 'S15', 'S17','S20', 'S21']\r\n\r\nX = train_df[clmns]\r\n\r\n# PCA ANWENDEN? keine wesentliche Verbesserung?!\r\n\r\n#pca = PCA(n_components=21)\r\n#variance = pca.fit(X)\r\n#print('Explained Variance: ', pca.explained_variance_ratio_)\r\n#\r\n#X = pca.fit_transform(X)\r\n\r\ny = train_df['Class']\r\n\r\n# split dataset into training set and test set\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True)\r\n\r\n########\r\n## TRAIN\r\n########\r\n# create a Random Forest Classifier\r\nclf = RandomForestClassifier(n_estimators=10)\r\n\r\n# train the model using the trainings sets y_pred=clf.predict(X_test)\r\ntrain_pred = clf.fit(X_train, y_train)\r\n\r\n##########\r\n## Predict\r\n##########\r\ny_pred = clf.predict(X_test)\r\n\r\n\r\n########################\r\n# evaluate and use model\r\n########################\r\n# Confusion Matrix\r\ncnf_matrix = metrics.confusion_matrix(y_test, y_pred)\r\n\r\nplt.figure()\r\nplot_confusion_matrix(cnf_matrix, classes=np.linspace(0,1,5, endpoint=True), normalize=True, title='')\r\nplt.tight_layout(4,4)\r\n\r\n# model accuracy, how often is the classifier correct?\r\nprint('############ DIAGNOSIS - CLASSIFICATION #############')\r\nprint('----------------------------')\r\nprint(\"Accuracy:\", metrics.accuracy_score(y_test, y_pred))\r\nprint('----------------------------')\r\nprint('Confusion matrix: \\nPredicted Class on X-Axis \\nTrue Class on Y-Axis ')\r\nprint('- - - - - - - - - - - - - - ')\r\nprint(cnf_matrix)\r\nprint('----------------------------')\r\nprint('Classification report: \\n', metrics.classification_report(y_test, y_pred))\r\nprint('################# RUL PROGNOSIS #####################')\r\n \r\n#############################\r\n## Second Step: RUL Prognosis\r\n#############################\r\nclmns = ['Cycle', 'S2', 'S3','S4', 'S7', 'S11', 'S12', 'S15', 'Class']\r\n\r\nreg_X = train_df[clmns]\r\n\r\nreg_y = train_df['RUL']\r\n\r\n# split dataset into training set and test set\r\nreg_X_train, reg_X_test, reg_y_train, reg_y_test = train_test_split(reg_X, reg_y, test_size=0.3, shuffle=True)\r\n\r\nselection = pd.DataFrame()\r\nselection_rul = pd.DataFrame()\r\nfor i in range(2):\r\n data_sel = reg_X_test.loc[reg_X_test['Class'] == i]\r\n rul_sel = reg_y_test.loc[reg_X_test['Class'] == i]\r\n selection = pd.concat([selection, data_sel])\r\n selection_rul = pd.concat([selection_rul, rul_sel])\r\n\r\n\r\n# create a Gaussian Classifier\r\nreg = RandomForestRegressor(n_estimators=100)\r\n\r\n# train the model using the trainings sets y_pred=clf.predict(X_test)\r\n\r\nreg.fit(reg_X_train, reg_y_train)\r\n\r\nreg_y_pred = reg.predict(reg_X_test)\r\n\r\n#Plotting\r\nplt.figure(figsize=(5,4))\r\nplt.scatter(reg_y_test, reg_y_pred, color='None', edgecolor='k')\r\nplt.plot(np.arange(0,350), np.arange(0,350), c='r')\r\nplt.xlim(0,350)\r\nplt.ylim(0,350)\r\nplt.xlabel('Target')\r\nplt.ylabel('Prediction')\r\nplt.title('Test data ')\r\nplt.tight_layout()\r\n\r\n\r\nindexis = train_df.index[train_df['EngNo']==3].tolist()\r\ntrue_values = reg_y_test.loc[indexis]\r\ntrue_values = true_values.reset_index()\r\n\r\ntrue_rul = []\r\nindex = []\r\nfor i in range(len(pd.isnull(true_values).any(1))):\r\n if not pd.isnull(true_values).any(1)[i]:\r\n index.append(true_values['index'][i])\r\n true_rul.append(true_values['RUL'][i])\r\n\r\nnew_reg_y_test = reg_y_test.reset_index()\r\n\r\nind = []\r\n#for i in index:\r\nfor i in true_values['index']:\r\n indexa = new_reg_y_test.index[new_reg_y_test['index']==i].tolist()\r\n try:\r\n ind.append(indexa[0])\r\n except:\r\n ind.append(np.nan)\r\n \r\npred_rul = []\r\nfor a in ind:\r\n try:\r\n pred_rul.append(reg_y_pred[a])\r\n except:\r\n pred_rul.append(np.nan)\r\n \r\ndef nan_helper(y):\r\n return np.isnan(y), lambda z: z.nonzero()[0]\r\n\r\n#true_ruls = []\r\n#nans, x= nan_helper(np.array(true_values['RUL']))\r\n#true_ruls[nans]= np.interp(x(nans), x(~nans), np.array(true_values['RUL'])[~nans])\r\n#\r\n#nans, x= nan_helper(pred_rul)\r\n#pred_rul[nans]= np.interp(x(nans), x(~nans), pred_rul[~nans])\r\n\r\ns1mask = np.isfinite(true_values['RUL'])\r\ns2mask = np.isfinite(pred_rul)\r\n\r\nx = range(len(pred_rul))\r\n\r\nplt.figure()\r\nplt.plot(x[s1mask], true_values['RUL'][s1mask])\r\nplt.plot(pred_rul[s2mask])\r\n\r\nprint(\"MAE: \", metrics.mean_absolute_error(reg_y_test, reg_y_pred))\r\nprint(\"MRE: \", metrics.mean_absolute_error(reg_y_test, reg_y_pred)/205.3)\r\nprint(\"RMSE:\", np.sqrt(metrics.mean_squared_error(reg_y_test, reg_y_pred)))\r\nprint(\"R_2: \", metrics.r2_score(reg_y_test, reg_y_pred), '\\n')\r\n\r\nprint('#####################################################')\r\nprint('############ Now test some real data ################')\r\nprint('#####################################################\\n')\r\n \r\n\r\ntest_y_pred = clf.predict(test_df[['S2', 'S3','S4', 'S7', 'S11', 'S12', 'S15']])\r\ntest_df['Class']=test_y_pred\r\n\r\ntest_reg_y_pred = reg.predict(test_df[['Cycle','S2', 'S3','S4', 'S7', 'S11', 'S12', 'S15', 'Class']])\r\n\r\n#Plotting\r\nplt.figure(figsize=(5,4))\r\nplt.scatter(test_df['RUL'], test_reg_y_pred, color='None', edgecolor='k')\r\nplt.plot(np.arange(0,350), np.arange(0,350), c='r')\r\nplt.xlim(0,350)\r\nplt.ylim(0,350)\r\nplt.xlabel('Target')\r\nplt.ylabel('Prediction')\r\nplt.title('Real test data')\r\nplt.tight_layout()\r\n\r\n\r\nprint(\"MAE: \", metrics.mean_absolute_error(test_df['RUL'], test_reg_y_pred))\r\nprint(\"MRE: \", metrics.mean_absolute_error(test_df['RUL'], test_reg_y_pred)/205.5)\r\nprint(\"RMSE:\", np.sqrt(metrics.mean_squared_error(test_df['RUL'], test_reg_y_pred)))\r\nprint(\"R_2: \", metrics.r2_score(test_df['RUL'], test_reg_y_pred), '\\n')\r\n\r\n\r\n####################################\r\n# Test number 2 #\r\n###################################\r\ntest_reg_y_pred2 = reg.predict(selection)\r\n\r\nplt.figure(figsize=(5,4))\r\nplt.scatter(selection_rul, test_reg_y_pred2, color='None', edgecolor='k')\r\nplt.plot(np.arange(0,350), np.arange(0,350), c='r')\r\nplt.xlim(0,350)\r\nplt.ylim(0,350)\r\nplt.xlabel('Target')\r\nplt.ylabel('Prediction')\r\nplt.title('Extraction of Class 1/2 of Testdata')\r\nplt.tight_layout()\r\n\r\nprint('#####################################################')\r\nprint(\"MAE: \", metrics.mean_absolute_error(selection_rul, test_reg_y_pred2))\r\nprint(\"RMSE:\", np.sqrt(metrics.mean_squared_error(selection_rul, test_reg_y_pred2)))\r\nprint(\"R_2: \", metrics.r2_score(selection_rul, test_reg_y_pred2))\r\n","sub_path":"supervised_classification.py","file_name":"supervised_classification.py","file_ext":"py","file_size_in_byte":11063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"266722062","text":"import requests\r\nfrom telegram import *\r\nimport telebot\r\nfrom datetime import date\r\n\r\n\r\nclass BotHandler():\r\n def __init__(self, token):\r\n self.token = token\r\n self.api_url = \"https://api.telegram.org/bot{}/\".format(token)\r\n\r\n #url = \"https://api.telegram.org/bot/\"\r\n\r\n def get_updates(self, offset=0, timeout=30):\r\n method = 'getUpdates'\r\n params = {'timeout': timeout, 'offset': offset}\r\n resp = requests.get(self.api_url + method, params)\r\n result_json = resp.json()[\"result\"]\r\n return result_json\r\n\r\n def send_message(self, chat_id, text):\r\n params = {'chat_id': chat_id, 'text': text, 'parse_mode': 'HTML'}\r\n method = 'sendMessage'\r\n resp = requests.post(self.api_url + method, params)\r\n return resp\r\n\r\n def get_first_update(self):\r\n get_result = self.get_updates()\r\n Update\r\n if len(get_result) > 0:\r\n last_update = get_result[0]\r\n else:\r\n last_update = None\r\n\r\n return last_update\r\n\r\n\r\ntoken = '1281609833:AAFMVFZcn4YcbsKcK9Jv7yJCKlnDpoMhQkA' #Token of your bot\r\nmagnito_bot = BotHandler(\"1281609833:AAFMVFZcn4YcbsKcK9Jv7yJCKlnDpoMhQkA\") #Your bot's name\r\n\r\nbot=telebot.TeleBot(\"1281609833:AAFMVFZcn4YcbsKcK9Jv7yJCKlnDpoMhQkA\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef main():\r\n d0 = date(2021, 5, 2)\r\n d1 = date.today()\r\n delta = d1 - d0\r\n new_offset = 0\r\n print('hi, now launching...')\r\n\r\n while True:\r\n all_updates=magnito_bot.get_updates(new_offset)\r\n\r\n if len(all_updates) > 0:\r\n for current_update in all_updates:\r\n print(current_update)\r\n first_update_id = current_update['update_id']\r\n if 'text' not in current_update['message']:\r\n first_chat_text='New member'\r\n else:\r\n first_chat_text = current_update['message']['text']\r\n first_chat_id = current_update['message']['chat']['id']\r\n if 'first_name' in current_update['message']:\r\n first_chat_name = current_update['message']['chat']['first_name']\r\n elif 'new_chat_member' in current_update['message']:\r\n first_chat_name = current_update['message']['new_chat_member']['username']\r\n elif 'from' in current_update['message']:\r\n first_chat_name = current_update['message']['from']['first_name']\r\n else:\r\n first_chat_name = \"unknown\"\r\n\r\n\r\n\r\n\r\n \r\n \r\n if first_chat_text == '/start':\r\n #bot.send_message(first_chat_id,\"مرحباً بك في المساعد\")\r\n magnito_bot.send_message(first_chat_id,\"مرحباً\")\r\n new_offset = first_update_id + 1\r\n\r\n elif first_chat_text==\"موعد العلاج\":\r\n for i in str(delta).split():\r\n if i.isdigit():\r\n k=int(i)+1\r\n\r\n if (k%2!=0):\r\n magnito_bot.send_message(first_chat_id,\"اليوم موعد العلاج\")\r\n else:\r\n magnito_bot.send_message(first_chat_id,\"غداً موعد العلاج\")\r\n new_offset = first_update_id + 1\r\n else:\r\n #magnito_bot.send_message(first_chat_id,\" عدد الكلمات: \"+str(len(first_chat_text.split()))+\"\\nعدد الأحرف: \" +str(len(first_chat_text)) )\r\n None\r\n new_offset = first_update_id + 1\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n main()\r\n except KeyboardInterrupt:\r\n exit()","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"205738660","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nadmin.autodiscover()\nurlpatterns = patterns('',\n\n # authentication urls\n url(r'^login/$', 'demo.views.login', name='login'),\n url(r'^authenticate/$', 'demo.views.authenticate', name='authenticate'),\n url(r'^logout/$', 'demo.views.logout', name='logout'),\n url(r'^login_success/$', 'demo.views.login_success', name='login_success'),\n url(r'^invalid/$', 'demo.views.invalid', name='invalid'),\n\n # registration urls\n url(r'^register/$', 'demo.views.register', name='register'),\n url(r'^register_success/$', 'demo.views.register_success', name='register_success'),\n \n # signup urls\n url(r'^$', include('signups.urls')),\n\n # blog urls\n url(r'^blogs/$', include('blog.urls')),\n url(r'^blogs/vote/$', 'blog.views.vote', name='vote'),\n\n # admin urls\n url(r'^admin/', include(admin.site.urls)),\n url(r'^markdown/', include('django_markdown.urls')),\n)\n","sub_path":"code-guild/demo/demo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"20329687","text":"import os, sys\nimport time \nimport matplotlib.pyplot as plt\nimport numpy as np\n#import sklearn.datasets\n#import tensorflow as tf\nimport save_images\nimport mnist\n\nimport torch \nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nsys.path.append(os.getcwd())\ntorch.manual_seed(1)\nuse_cuda = torch.cuda.is_available()\n\nDIM = 64 # model dimensionality\nBATCH_SIZE = 50 # batch size\nCRITIC_ITERS = 5 # num of critic iters per gen iters\nLAMBDA = 10 # gradient penalty param\nITERS = 20000 # generator iterations\nOUTPUT_DIM = 784 # MNIST (28x28)\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n preprocess = nn.Sequential(nn.Linear(128, 4*4*4*DIM),\n nn.ReLU(True))\n block_1 = nn.Sequential(nn.ConvTranspose2d(4*DIM, 2*DIM, 5),\n nn.ReLU(True))\n block_2 = nn.Sequential(nn.ConvTranspose2d(2*DIM, DIM, 5),\n nn.ReLU(True))\n deconv_out = nn.ConvTranspose2d(DIM, 1, 8, stride=2)\n self.block_1 = block_1\n self.block_2 = block_2\n self.deconv_out = deconv_out\n self.preprocess = preprocess \n self.sigmoid = nn.Sigmoid()\n def forward(self, inp):\n output = self.preprocess(inp)\n print(output.size())\n output = output.view(-1, 4*DIM, 4, 4)\n print(output.size())\n output = self.block_1(output)\n print(output.size())\n output = output[:, :, :7, :7] \n print(output.size())\n output = self.block_2(output)\n print(output.size())\n output = self.deconv_out(output)\n print(output.size())\n output = self.sigmoid(output)\n print(output.size())\n return output.view(-1, OUTPUT_DIM)\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n main = nn.Sequential(\n nn.Linear(OUTPUT_DIM, 4*4*4*DIM),\n nn.ReLU(True),\n nn.Linear(4*4*4*DIM, 4*4*4*DIM),\n nn.ReLU(True),\n nn.Linear(4*4*4*DIM, 4*4*4*DIM),\n nn.ReLU(True),\n nn.Linear(4*4*4*DIM, 4*4*4*DIM),\n nn.ReLU(True),\n nn.Linear(4*4*4*DIM, 4*4*4*DIM),\n nn.ReLU(True),\n nn.Linear(4*4*4*DIM, 1),\n )\n self.main = main\n def forward(self, inp):\n return self.main(inp).view(-1) \n\ndef generate_image(frame, net_G):\n noise = torch.randn(BATCH_SIZE, 128)\n if use_cuda: noise = noise.cuda()\n noise_var = autograd.Variable(noise, volatile=True)\n samples = net_G(noise_var)\n samples = samples.view(BATCH_SIZE, 28, 28)\n samples = samples.cpu().data.numpy()\n curr_dir= os.path.dirname(os.path.abspath(__file__))\n save_images.save(samples, curr_dir+'/samples_{}.png'.format(frame))\n\n\ntrain_gen, dev_gen, test_gen = mnist.load(BATCH_SIZE, BATCH_SIZE)\n\ndef inf_train_gen():\n while True:\n for images, labels in train_gen():\n yield images\n\ndef calc_grad_penalty(net_D, real_data, fake_data):\n alpha = torch.rand(BATCH_SIZE, 1)\n alpha = alpha.expand(real_data.size())\n alpha = alpha.cuda() if use_cuda else alpha\n interpolates = alpha * real_data + ((1-alpha) * fake_data)\n if use_cuda: interpolates = interpolates.cuda()\n interpolates = autograd.Variable(interpolates, requires_grad=True)\n disc_interpolates = net_D(interpolates)\n grads = autograd.grad(outputs=disc_interpolates,\n inputs=interpolates,\n grad_outputs=torch.ones(disc_interpolates.size()).cuda() \\\n if use_cuda else torch.ones(disc_interpolates.size()),\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n grad_penalty = ((grads.norm(2, dim=1) - 1)**2).mean() * LAMBDA\n return grad_penalty\n\nnet_G = Generator()\nnet_D = Discriminator()\nprint(net_G)\nprint(net_D)\nif use_cuda:\n net_D = net_D.cuda()\n net_G = net_G.cuda()\nopt_D = optim.Adam(net_D.parameters(), lr=1e04, betas=(0.5, 0.9))\nopt_G = optim.Adam(net_G.parameters(), lr=1e04, betas=(0.5, 0.9))\none = torch.FloatTensor([1])\nm_one = one * -1\nif use_cuda:\n one = one.cuda()\n m_one = m_one.cuda()\ndata = inf_train_gen()\nfor iteration in range(ITERS):\n start_time = time.time()\n # update net D\n for p in net_D.parameters(): \n p.requres_grad = True\n for iter_D in range(CRITIC_ITERS):\n _ = next(data)\n real_data = torch.Tensor(_)\n if use_cuda: real_data = real_data.cuda()\n real_data_var = autograd.Variable(real_data)\n net_D.zero_grad()\n # train with real\n D_real = net_D(real_data_var)\n D_real = D_real.mean()\n D_real.backward(m_one)\n #train with fake\n noise = torch.randn(BATCH_SIZE, 128)\n if use_cuda: noise = noise.cuda()\n noise_var = autograd.Variable(noise, volatile=True)\n fake = autograd.Variable(net_G(noise_var).data)\n input_var = fake\n D_fake = net_D(input_var)\n D_fake = D_fake.mean()\n D_fake.backward(one)\n # train with grad penalty\n grad_penalty = calc_grad_penalty(net_D, real_data_var.data, fake.data)\n grad_penalty.backward()\n D_cost = D_fake - D_real + grad_penalty\n Wasserstein_D = D_real - D_fake\n opt_D.step()\n for p in net_D.parameters(): \n p.requires_grad = False\n net_G.zero_grad()\n noise = torch.randn(BATCH_SIZE, 128)\n if use_cuda: noise = noise.cuda()\n noise_var = autograd.Variable(noise)\n fake = net_G(noise_var)\n G = net_D(fake)\n G = G.mean()\n G.backward(m_one)\n G_cost = -G\n opt_G.step()\n\n print(time.time()-start_time)\n print('Discriminator cost:', D_cost.cpu().data.numpy())\n print('Generator cost:', G_cost.cpu().data.numpy())\n print('Wasserstein distance:', Wasserstein_D.cpu().data.numpy())\n \n if iteration % 100 == 99:\n dev_disc_costs = [] \n for images, _ in dev_gen():\n imgs = torch.Tensor(images)\n if use_cuda: imgs = imgs.cuda()\n imgs_var = autograd.Variable(imgs, volatile=True)\n D = net_D(imgs_var)\n _dev_disc_cost = -D.mean().cpu().data.numpy()\n dev_disc_costs.append(_dev_disc_cost)\n print(np.mean(dev_disc_costs))\n generate_image(iteration, net_G)\n\n\n\n","sub_path":"pytorch/mnist/gan_mnist.py","file_name":"gan_mnist.py","file_ext":"py","file_size_in_byte":6487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"123240418","text":"import logging\nimport torch\nimport torch.optim\n\ndef build_optimizer(model, optim=None, lr=None, momentum=None, reg=None, **kwargs):\n if optim == 'sgd':\n optim_kwargs = dict(lr=lr, momentum=momentum)\n Optimizer = torch.optim.SGD\n elif optim == 'adam':\n optim_kwargs = dict(lr=lr, weight_decay=reg)\n Optimizer = torch.optim.Adam\n elif optim == 'rms':\n optim_kwargs = dict(lr=lr, momentum=momentum)\n Optimizer = torch.optim.RMSprop\n\n #logging.info('***********')\n logging.info('Optimizer is {}'.format(optim))\n for k, v in optim_kwargs.items(): logging.info('{}: {}'.format(k, v))\n logging.info('***********')\n\n optimizer = Optimizer(model.parameters(), **optim_kwargs)\n\n return optimizer\n","sub_path":"src/optim/build_optimizer.py","file_name":"build_optimizer.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"363128265","text":"import gi\nfrom gi.repository import Gdk\n\nwin = Gdk.get_default_root_window()\nh = win.get_height()\nw = win.get_width()\n\nprint (\"The size of the window is %d x %d\" % (w, h))\n\npb = Gdk.pixbuf_get_from_window(win, 593, 80, 486, 210)\n\nif (pb != None):\n pb.savev(\"screenshot.png\",\"png\", (), ())\n print(\"Screenshot saved to screenshot.png.\")\nelse:\n print(\"Unable to get the screenshot.\")\n","sub_path":"screenshot.py","file_name":"screenshot.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"593982626","text":"import cv2\nimport socket\nimport pickle\nimport struct\n\n\nHOST = \"127.0.0.1\"\nPORT = 5000\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nserver_socket.bind((HOST, PORT))\nserver_socket.listen(5)\nprint(\"Image taker listining on {}:{}\".format(HOST, PORT))\n\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 1440)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 810)\nprint(\"Opened video capture\")\n\nwhile True:\n conn, addr = server_socket.accept()\n\n data = conn.recv(1024).decode()\n\n if not data:\n continue\n\n print(\"Got {}\".format(data))\n\n if data == \"Capture\":\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n client_socket.connect(('127.0.0.1', 8088))\n print(\"Connected to image processor\")\n \n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n print(\"Taken photo\")\n\n data = pickle.dumps(frame)\n client_socket.sendall(struct.pack(\"i\", len(data)) + data)\n print(\"Sent photo. len: {}\".format(len(data)))\n \n data = client_socket.recv(1024).decode()\n conn.sendall(data.encode())\n \n\nconn.close()\n","sub_path":"image_taker_old.py","file_name":"image_taker_old.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"501042903","text":"# coding=utf-8\n\n\"\"\"\nAuthor: jinshuai_qiao\nDate: 2019/10/21\nDesc:\n\"\"\"\nimport datetime\n\nfrom github import Github\n\ngg = Github('q356970306@163.com', 'q58106513').get_user()\n\nprint(gg)\n\nfor repo in gg.get_repos():\n\n repo = gg.get_repo(repo.name)\n commits = repo.get_commits(\n sha='master',\n since=datetime.datetime.now() -\n datetime.timedelta(\n days=37),\n until=datetime.datetime.now())\n for cm in commits:\n print(repo.name, cm.commit.author, cm.commit.message)\n","sub_path":"do_report_github.py","file_name":"do_report_github.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"223377283","text":"from spt3g import core, maps\nimport numpy as np\n\n__all__ = [\n \"CompactMaps\",\n \"RemoveWeights\",\n \"ApplyWeights\",\n \"FlattenPol\",\n \"MakeMapsPolarized\",\n \"MakeMapsUnpolarized\",\n \"ValidateMaps\",\n \"ExtractMaps\",\n \"InjectMapStub\",\n \"InjectMaps\",\n \"ReplicateMaps\",\n]\n\n\n@core.indexmod\ndef CompactMaps(frame, zero_nans=False):\n \"\"\"\n Compact all maps in a frame to their default sparse representation.\n Optionally remove NaN values as well. Removing NaN values will reduce\n memory use, but will remove the distinction in unweighted (or\n weight-removed) maps between unobserved regions and regions with zero\n temperature.\n \"\"\"\n for s in [\"T\", \"Q\", \"U\", \"Wunpol\", \"Wpol\"]:\n if s in frame:\n m = frame.pop(s)\n m.compact(zero_nans=zero_nans)\n frame[s] = m\n return frame\n\n\n@core.indexmod\ndef RemoveWeights(frame, zero_nans=False):\n \"\"\"\n Remove weights from input maps. If zero_nans is `True`, empty pixels are\n skipped and pixels with zero weight are set to 0 instead of NaN. Operation\n is performed in place to minimize memory use.\n \"\"\"\n if \"Wpol\" not in frame and \"Wunpol\" not in frame:\n return\n\n if not frame[\"T\"].weighted:\n return frame\n\n tmap = frame.pop(\"T\")\n\n if \"Wpol\" in frame:\n wmap = frame[\"Wpol\"]\n qmap = frame.pop(\"Q\")\n umap = frame.pop(\"U\")\n maps.remove_weights(tmap, qmap, umap, wmap, zero_nans=zero_nans)\n else:\n wmap = frame[\"Wunpol\"]\n maps.remove_weights_t(tmap, wmap, zero_nans=zero_nans)\n\n frame[\"T\"] = tmap\n if \"Wpol\" in frame:\n frame[\"Q\"] = qmap\n frame[\"U\"] = umap\n\n return frame\n\n\n@core.indexmod\ndef ApplyWeights(frame):\n \"\"\"\n Apply weights to the input maps. The operation is performed in place to\n minimize memory use.\n \"\"\"\n if \"Wpol\" not in frame and \"Wunpol\" not in frame:\n return\n\n if frame[\"T\"].weighted:\n return frame\n\n tmap = frame.pop(\"T\")\n\n if \"Wpol\" in frame:\n wmap = frame[\"Wpol\"]\n qmap = frame.pop(\"Q\")\n umap = frame.pop(\"U\")\n maps.apply_weights(tmap, qmap, umap, wmap)\n else:\n wmap = frame[\"Wunpol\"]\n maps.apply_weights_t(tmap, wmap)\n\n frame[\"T\"] = tmap\n if \"Wpol\" in frame:\n frame[\"Q\"] = qmap\n frame[\"U\"] = umap\n\n return frame\n\n\n@core.indexmod\ndef FlattenPol(frame, invert=False):\n \"\"\"\n For maps defined on the sphere the direction of the polarization angle is\n is defined relative to the direction of North. When making maps we follow\n this definition.\n\n For any flat sky estimators, the polarization angle is defined relative to\n the vertical axis. For some map projections the direction of north is not\n the same as the vertical axis. This function applies a rotation to the Q\n and U values to switch the curved sky Q/U definition to the flat sky Q/U\n definition.\n\n If for whatever reason you want to reverse the process set the invert\n argument to True.\n \"\"\"\n\n if \"Q\" not in frame or \"U\" not in frame:\n return\n\n qmap, umap = frame.pop(\"Q\"), frame.pop(\"U\")\n if not isinstance(qmap, maps.FlatSkyMap) or not isinstance(umap, maps.FlatSkyMap):\n return\n\n maps.flatten_pol(qmap, umap, invert=invert)\n\n frame[\"Q\"] = qmap\n frame[\"U\"] = umap\n\n return frame\n\n\n@core.indexmod\ndef MakeMapsPolarized(frame, pol_conv=maps.MapPolConv.IAU):\n \"\"\"\n Converts individual unpolarized maps to polarized versions of the same map,\n with the given polarization convention\n\n This module is only a shim that creates null Q and U maps and populates\n a properly invertible Wpol array from the TT Wunpol weights.\n \"\"\"\n if frame.type != core.G3FrameType.Map or \"Wunpol\" not in frame:\n return\n\n wgt = frame[\"Wunpol\"].TT\n del frame[\"Wunpol\"]\n\n qmap = frame[\"T\"].clone(False)\n qmap.pol_type = maps.MapPolType.Q\n frame[\"Q\"] = qmap\n umap = frame[\"T\"].clone(False)\n umap.pol_type = maps.MapPolType.U\n umap.pol_conv = pol_conv\n frame[\"U\"] = umap\n mask = maps.get_mask_map(wgt)\n\n wgt_out = maps.G3SkyMapWeights(frame[\"T\"], polarized=True)\n wgt_out.TT = wgt\n wgt_out.TQ = wgt.clone(False)\n wgt_out.TU = wgt.clone(False)\n wgt_out.QQ = mask\n wgt_out.QU = wgt.clone(False)\n wgt_out.UU = mask.clone(True)\n\n frame[\"Wpol\"] = wgt_out\n\n return frame\n\n\n@core.indexmod\ndef MakeMapsUnpolarized(frame):\n \"\"\"\n Converts individual polarized maps to temperature-only versions of the same map.\n \"\"\"\n if frame.type != core.G3FrameType.Map or \"Wpol\" not in frame:\n return\n\n wgt = frame[\"Wpol\"].TT\n del frame[\"Wpol\"]\n del frame[\"Q\"]\n del frame[\"U\"]\n\n wgt_out = maps.G3SkyMapWeights(frame[\"T\"], polarized=False)\n wgt_out.TT = wgt\n\n frame[\"Wunpol\"] = wgt_out\n\n return frame\n\n\n@core.indexmod\ndef ValidateMaps(frame, ignore_missing_weights=False):\n \"\"\"\n Validate that the input map frame has all the necessary keys.\n\n If ignore_missing_weights is False (default), a warning is issued when the\n frame contains weighted Stokes maps without a weights map. Set this option\n to True when feeding single bolometer map frames with common weights through\n a pipeline.\n \"\"\"\n\n if isinstance(frame, core.G3Frame) and frame.type != core.G3FrameType.Map:\n return\n\n map_id = frame.get(\"Id\", None)\n\n if \"T\" not in frame:\n core.log_fatal(\"Map frame %s: Missing T map\" % map_id, unit=\"ValidateMaps\")\n if (\"Q\" in frame and not \"U\" in frame) or (\"U\" in frame and not \"Q\" in frame):\n core.log_fatal(\"Map frame %s: Missing Q or U map\" % map_id, unit=\"ValidateMaps\")\n if \"Wpol\" in frame and \"Wunpol\" in frame:\n core.log_fatal(\n \"Map frame %s: Found both polarized and unpolarized weights\" % map_id,\n unit=\"ValidateMaps\",\n )\n\n stub = frame[\"T\"].clone(False)\n for k in [\"T\", \"Q\", \"U\", \"Wpol\", \"Wunpol\"]:\n if k not in frame:\n continue\n if not frame[k].compatible(stub):\n core.log_fatal(\n \"Map frame %s: Map %s not compatible with T map\" % (map_id, k),\n unit=\"ValidateMaps\",\n )\n if k in \"TQU\":\n if k == \"U\" and frame[k].pol_conv is maps.MapPolConv.none:\n core.log_warn(\n \"Map frame %s: U map polarization convention not set\" % map_id,\n unit=\"ValidateMaps\",\n )\n if frame[k].weighted and not ignore_missing_weights:\n if \"Wpol\" not in frame and \"Wunpol\" not in frame:\n core.log_warn(\n \"Map frame %s: Missing weights\" % map_id, unit=\"ValidateMaps\"\n )\n if k in \"QU\" and \"Wpol\" not in frame:\n core.log_warn(\n \"Map frame %s: Missing polarized weights\" % map_id,\n unit=\"ValidateMaps\",\n )\n else:\n if frame[k].polarized and (\"Q\" not in frame or \"U\" not in frame):\n core.log_fatal(\n \"Map frame %s: Missing Q or U maps\" % map_id, unit=\"ValidateMaps\"\n )\n elif not frame[k].polarized and (\"Q\" in frame or \"U\" in frame):\n core.log_fatal(\n \"Map frame %s: Found polarized maps with unpolarized weights\"\n % map_id,\n unit=\"ValidateMaps\",\n )\n\n\n@core.indexmod\nclass ExtractMaps(object):\n \"\"\"\n Cache maps that come through the pipeline. Initialize an instance of this\n module before adding to a pipeline.. Any maps that pass through the pipe\n are stored in the .maps attribute of the object after the pipeline is run.\n\n Arguments\n ---------\n map_id : string\n If supplied, select only map frames that match this ID.\n copy : bool\n If True, make a copy of the map on extraction.\n ignore_missing_weights : bool\n If False (default), a warning is issued when the frame contains weighted\n Stokes maps without a weights map. Set this option to True when feeding\n single bolometer map frames with common weights through a pipeline.\n \"\"\"\n\n def __init__(self, map_id=None, copy=False, ignore_missing_weights=False):\n self.map_id = map_id\n self.copy_ = copy\n self.ignore_missing_weights=ignore_missing_weights\n self.maps = {}\n\n def __call__(self, frame):\n if frame.type != core.G3FrameType.Map:\n return\n if self.map_id and frame[\"Id\"] != self.map_id:\n return\n\n ValidateMaps(frame, ignore_missing_weights=self.ignore_missing_weights)\n\n mid = frame[\"Id\"]\n mdict = {}\n for k in [\"T\", \"Q\", \"U\", \"Wpol\", \"Wunpol\"]:\n if k not in frame:\n continue\n mdict[k] = frame[k] if not self.copy_ else frame[k].copy()\n\n if mid not in self.maps:\n self.maps[mid] = mdict\n return\n\n if isinstance(self.maps[mid], dict):\n self.maps[mid] = [self.maps[mid], mdict]\n return\n\n self.maps[mid].append(mdict)\n\n\n@core.indexmod\nclass InjectMapStub(object):\n \"\"\"\n Inject a new map frame from a map stub.\n\n Arguments\n ---------\n map_id : string\n Id to assign to the new map frame\n map_stub : G3SkyMap instance\n Map stub from which to clone the Stokes maps and weights.\n polarized : bool\n If True, add Q and U maps to stub frame, and ensure that weights are\n polarized. Otherwise, only a T map is created.\n weighted : bool\n If True, add weights to the stub frame.\n pol_conv : MapPolConv instance\n Polarization convention to use.\n \"\"\"\n\n def __init__(\n self,\n map_id,\n map_stub,\n polarized=True,\n weighted=True,\n pol_conv=maps.MapPolConv.IAU,\n ):\n self.map_frame = core.G3Frame(core.G3FrameType.Map)\n self.map_frame[\"Id\"] = map_id\n\n map_stub = map_stub.clone(False)\n map_stub.weighted = weighted\n map_stub.pol_conv = pol_conv\n\n T = map_stub.clone(False)\n T.pol_type = maps.MapPolType.T\n self.map_frame[\"T\"] = T\n if polarized:\n Q = map_stub.clone(False)\n Q.pol_type = maps.MapPolType.Q\n self.map_frame[\"Q\"] = Q\n U = map_stub.clone(False)\n U.pol_type = maps.MapPolType.U\n self.map_frame[\"U\"] = U\n if weighted:\n W = maps.G3SkyMapWeights(map_stub, polarized)\n self.map_frame[\"Wpol\" if polarized else \"Wunpol\"] = W\n\n def __call__(self, frame):\n if self.map_frame is None:\n return\n\n map_frame = self.map_frame\n self.map_frame = None\n return [map_frame, frame]\n\n\n@core.indexmod\nclass InjectMaps(object):\n \"\"\"\n Inject a set of maps into a new map frame.\n\n Arguments\n ---------\n map_id : string\n Id to assign to the new map frame\n maps_in : list or dict\n Maps to add to the frame. If a list, contains Stokes maps with valid\n pol_type and weights. If a dict, contains Stokes and weights maps keyed\n by the standard map frame names.\n ignore_missing_weights [False] : bool\n Skip warning about missing weights. Useful for masks.\n \"\"\"\n\n def __init__(self, map_id, maps_in, ignore_missing_weights=False):\n self.map_frame = core.G3Frame(core.G3FrameType.Map)\n self.map_frame[\"Id\"] = map_id\n\n if isinstance(maps_in, list):\n for m in maps_in:\n if isinstance(m, maps.G3SkyMap):\n k = str(m.pol_type)\n if k not in \"TQU\":\n raise ValueError(\"Input map has invalid pol_type %s\" % k)\n self.map_frame[k] = m\n elif isinstance(m, maps.G3SkyMapWeights):\n self.map_frame[\"Wpol\" if m.polarized else \"Wunpol\"] = m\n else:\n raise TypeError(\"maps_in must be G3SkyMap or G3SkyMapWeights\")\n\n elif isinstance(maps_in, dict):\n for k, m in maps_in.items():\n if k not in [\"T\", \"Q\", \"U\", \"Wpol\", \"Wunpol\"]:\n continue\n self.map_frame[k] = m\n\n else:\n raise TypeError(\"maps_in must be a list or dict\")\n\n ValidateMaps(\n self.map_frame, ignore_missing_weights=ignore_missing_weights\n )\n\n def __call__(self, frame):\n if self.map_frame is None:\n return\n\n map_frame = self.map_frame\n self.map_frame = None\n return [map_frame, frame]\n\n\n@core.indexmod\ndef ReplicateMaps(frame, input_map_id, output_map_ids, copy_weights=False):\n \"\"\"\n Clone the input map frame with Id input_map_id into new stub frames, one for\n each Id listed in output_map_ids.\n\n Arguments\n ---------\n input_map_id : string\n ID of the map frame to replicate. The input frame is discarded after\n replication.\n output_map_ids : list of strings\n List of IDs to assign to replicated map frames.\n copy_weights : bool\n If False, only the first output frame in the list includes a weights key\n (Wpol or Wunpol). If True, all output frames include a weights key.\n \"\"\"\n\n if frame.type != core.G3FrameType.Map:\n return\n\n if frame[\"Id\"] != input_map_id:\n return\n\n ValidateMaps(frame)\n\n frames = []\n\n first = True\n for oid in output_map_ids:\n fr = core.G3Frame(core.G3FrameType.Map)\n fr[\"Id\"] = oid\n if copy_weights or first:\n map_keys = [\"T\", \"Q\", \"U\", \"Wpol\", \"Wunpol\"]\n first = False\n else:\n map_keys = [\"T\", \"Q\", \"U\"]\n\n for k in map_keys:\n if k not in frame:\n continue\n fr[k] = frame[k].clone(False)\n\n frames.append(fr)\n\n return frames\n\n\n@core.indexmod\nclass CoaddMaps(object):\n \"\"\"\n Coadd maps and weights.\n\n Arguments\n ---------\n map_ids : list of str\n List of map Id's to include in the coadd. If None, any maps\n in the pipeline are included.\n output_map_id : str\n Id to assign to the output frame.\n ignore_missing_weights : bool\n If False (default), a warning is issued when the frame contains weighted\n Stokes maps without a weights map. Set this option to True when feeding\n single bolometer map frames with common weights through a pipeline.\n \"\"\"\n\n def __init__(self, map_ids=None, output_map_id=None, ignore_missing_weights=False):\n self.coadd_frame = core.G3Frame(core.G3FrameType.Map)\n self.coadd_frame[\"Id\"] = output_map_id\n if isinstance(map_ids, str):\n map_ids = [map_ids]\n self.map_ids = map_ids\n self.ignore_missing_weights = ignore_missing_weights\n\n def __call__(self, frame):\n\n if frame.type == core.G3FrameType.EndProcessing:\n coadd = self.coadd_frame\n self.coadd_frame = None\n return [coadd, frame]\n\n if \"Id\" not in frame:\n return\n\n if self.map_ids is not None and frame[\"Id\"] not in self.map_ids:\n return\n\n ValidateMaps(frame, ignore_missing_weights=ignore_missing_weights)\n input_weighted = True\n if not frame[\"T\"].weighted:\n input_weighted = False\n ApplyWeights(frame)\n\n for key in [\"T\", \"Q\", \"U\", \"Wpol\", \"Wunpol\"]:\n if key not in frame:\n continue\n if key not in self.coadd_frame:\n self.coadd_frame[key] = frame[key].clone(False)\n m = self.coadd_frame.pop(key)\n m += frame[key]\n self.coadd_frame[key] = m\n\n if not input_weighted:\n RemoveWeights(frame)\n","sub_path":"maps/python/map_modules.py","file_name":"map_modules.py","file_ext":"py","file_size_in_byte":15843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"83186609","text":"#!/usr/bin/env python3\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\n__app__ = \"fritzbox2mqtt Adapter\"\n__VERSION__ = \"0.5\"\n__DATE__ = \"19.07.2018\"\n__author__ = \"Markus Schiesser\"\n__contact__ = \"M.Schiesser@gmail.com\"\n__copyright__ = \"Copyright (C) 2018 Markus Schiesser\"\n__license__ = 'GPL v3'\n\nimport fritzbox\nimport threading\nimport time\nimport json\nimport os\nimport sys\nimport logging\nfrom configobj import ConfigObj\nimport paho.mqtt.client as mqtt\nfrom library.loghandler import loghandler\n\n\n\nclass Fritzmonitor(threading.Thread):\n\n def __init__(self, configfile):\n threading.Thread.__init__(self)\n\n self._configfile = configfile\n\n # self._log = logging.getLogger('fritzbox')\n # fh = logging.FileHandler('/home/tgdscm41/fritzbox/Fritzmonitor.log')\n # fh.setLevel(logging.DEBUG)\n #self._log.addHandler(fh)\n\n self._fritz = None\n\n def readConfig(self):\n # print('READCONFIG',self._configfile)\n _cfg = ConfigObj(self._configfile)\n\n if bool(_cfg) is False:\n print('ERROR config file not found', self._configfile)\n sys.exit()\n\n self._cfg_log = _cfg.get('LOGGING', None)\n self._cfg_ibox = _cfg.get('FRITZBOX', None)\n self._mqttCfg = _cfg.get('BROKER', None)\n # self._cfg_devices = _cfg.get('DEVICES',None)\n return True\n\n def startLogger(self):\n # print('STart Logger')\n # self._log = logging.getLogger(__name__)\n #fh = logging.FileHandler('/home/tgdscm41/fritzbox/Fritzmonitor.log')\n #fh.setLevel(logging.DEBUG)\n #self._log.addHandler(fh)\n # print(self._cfg_log)\n _mode = self._cfg_log.get('LOGMODE','PRINT')\n self._log = loghandler()\n self._log.handle(method=_mode,config=self._cfg_log)\n return True\n\n def startTR64(self):\n _host = self._cfg_ibox.get('HOST', '192.168.1.1')\n _user = self._cfg_ibox.get('USER', 'ms412')\n _password = self._cfg_ibox.get('PASSWORD', 'Swisscom10')\n\n self._fbox = fritzbox.Fritzbox()\n if not self._fbox.connect(_host, _user, _password):\n self._log.error('Failed to Connect to Fritzbox %s' % _host)\n return False\n\n return True\n\n def startCallMonitor(self):\n _host = self._cfg_ibox.get('HOST', '192.168.1.1')\n # _host = 'localhost'\n if self._fbox.cm_connect(_host):\n self._log.debug('Connected to CallMonitor with success')\n self._fbox.register_callback(self.callEvent)\n else:\n self._log.error('Failed to Connect to Fritzbox CallMonitor Interface Host: %s' % _host)\n return False\n\n return True\n\n def callEvent(self, msg):\n # print('callback',msg)\n _from = msg.get('FROM', 0)\n # print(_from)\n _name, _id = self._fbox.LookupName(_from)\n self._log.debug('Query Name from %s; Name found %s' % (_from, _name))\n # print('Name',self._fbox.LookupName(_from))\n msg['NAME'] = _name\n msg['ID'] = _id\n self.mqttPublish('CALLMONITOR', msg)\n return _name\n\n # def getPhonebook(self):\n # _host = '192.168.1.1'\n # _user = 'tgdscm41'\n # _password = 'nd%aG9im'\n\n # self._fbox = fritzbox.Fritzbox()\n # self._fbox.connect(_host, _user, _password)\n\n # self._fbox.GetPhoneBook()\n # (name,number) = self._fbox.LookupName('841953200')\n # print('Name: %s; Number: %s'%(name,number))\n # self._fbox.cm_connect('localhost')\n # self._fbox.register_callback(self.callEvent)\n\n def getPM(self):\n return self._fbox.getPM()\n\n def getCallerList(self):\n _temp = {}\n _incomming = self._fbox.incommingCalls()\n _missed = self._fbox.missedCalls()\n _outgoing = self._fbox.outgoingCalls()\n # print('Outgoing',_outgoing)\n _temp['INCOMMING'] = json.loads(_incomming)\n _temp['OUTGOING'] = json.loads(_outgoing)\n _temp['MISSED'] = json.loads(_missed)\n\n self._log.debug('getCallerList %s' % (_temp))\n\n return _temp\n\n def callFilter(self, data):\n _list = []\n\n for item in data:\n # print('xxx',item)\n _templist = {}\n # print('xxxxxxxxxxxx',item)\n _date = item.get('Date', '')\n _name = item.get('Name', '')\n if not _name:\n _name = 'Unknown'\n _duration = item.get('Duration', '')\n _caller = item.get('Caller')\n _to = item.get('Called')\n\n if 'Anrufbeantworter' not in _name:\n # print('block')\n # else:\n _templist['DATE'] = _date\n _templist['DURATION'] = _duration\n _templist['NAME'] = _name\n _templist['CALLER'] = _caller\n _templist['TO'] = _to\n\n _list.append(_templist)\n\n return _list[:5]\n\n def mqttPublish(self, topic, data):\n _host = str(self._mqttCfg.get('HOST', 'localhost'))\n _port = int(self._mqttCfg.get('PORT', 1883))\n _channel = str(self._mqttCfg.get('PUBLISH', 'OPENHAB'))\n _deviceId = str(self._mqttCfg.get('DEVICE', 'FRITZBOX'))\n self._mqttc = mqtt.Client(str(os.getpid()), clean_session=True)\n\n # try:\n self._mqttc.connect(_host, _port, 60)\n _topic = '/' + _channel + '/' + _deviceId + '/' + topic\n self._mqttc.publish(_topic, json.dumps(data))\n # print(_topic, json.dumps(data))\n self._mqttc.loop(10)\n self._mqttc.disconnect()\n self._log.debug('message delivered to mqtt Server: %s; Topic: %s; Message: %s' % (_host, _topic, data))\n # except:\n # self._log.error('Cannot deliver message to mqtt Server')\n\n return True\n\n def run(self):\n # print('START')\n self.readConfig()\n self.startLogger()\n self._log.info('Fritzbox Call Monitor')\n while not self.startTR64():\n # print('X')\n self._log.debug('Failed to connect to Fritzbox, try again in 10 sec')\n time.sleep(10)\n\n\n # print('TEST')\n time.sleep(10)\n self.startCallMonitor()\n time.sleep(10)\n\n _saveTime = time.time() + 1\n # print(_saveTime)\n # while True:\n\n while True:\n if not self._fbox.isconnected():\n self._log.error('Lost Connection to fritzbox')\n self.startTR64()\n time.sleep(10)\n self.startCallMonitor()\n time.sleep(10)\n\n\n if _saveTime < time.time():\n _saveTime = time.time() + 300\n _result = self.getCallerList()\n for key, item in _result.items():\n self.mqttPublish(key, self.callFilter(item))\n\n self.mqttPublish('PM', self.getPM())\n\n time.sleep(1)\n\n\nif __name__ == '__main__':\n if len (sys.argv) > 1:\n _configfile = sys.argv[1]\n\n else:\n _configfile = './fritzbox2mqtt.cfg'\n print(_configfile)\n fZm = Fritzmonitor(_configfile)\n # fZm = Fritzmonitor('/home/tgdscm41/fritzbox/Fritzmonitor.cfg')\n fZm.run()\n","sub_path":"example/fritzbox2mqtt.py","file_name":"fritzbox2mqtt.py","file_ext":"py","file_size_in_byte":7747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"364826902","text":"from bisect import bisect_left\n\ndef mergeSort(nlist):\n if len(nlist)>1:\n mid = len(nlist)//2\n lefthalf = nlist[:mid]\n righthalf = nlist[mid:]\n\n mergeSort(lefthalf)\n mergeSort(righthalf)\n i=j=k=0 \n while i < len(lefthalf) and j < len(righthalf):\n if lefthalf[i] < righthalf[j]:\n nlist[k]=lefthalf[i]\n i=i+1\n else:\n nlist[k]=righthalf[j]\n j=j+1\n k=k+1\n\n while i < len(lefthalf):\n nlist[k]=lefthalf[i]\n i=i+1\n k=k+1\n\n while j < len(righthalf):\n nlist[k]=righthalf[j]\n j=j+1\n k=k+1\n return nlist\n\ndef BinarySearch(a, x): \n i = bisect_left(a, x) \n if i != len(a) and a[i] == x: \n return i \n else: \n return -1\n \narr = [3, 13, 89, 34, 21, 44, 99, 56, 9]\nmergeSort(arr)\nx = int(13) \nres = BinarySearch(arr, x) \nif res == -1: \n print(x, \"is absent\") \nelse: \n print(x, \"is present at\", res) \n","sub_path":"task_26-02.py","file_name":"task_26-02.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"352827610","text":"import shutil, os\r\n# variables\r\n# file Extensions\r\n_fileExtensions = {'Videos' \t : ['.mp4' , '.mkv', '.avi', '.mov', '.wmv', '.flv'],\r\n\t\t \t\t\t'Images' \t : ['.jpeg', '.jpg', '.png'],\r\n\t\t \t\t\t'Text' \t\t : ['.txt' , '.doc', '.docx'],\r\n\t\t \t\t\t'AudioFile' : ['.mp3' , '.wav', '.flac'],\r\n\t\t \t\t\t'Programming' : ['.html', '.py']\r\n\t\t \t\t }\r\n\r\n\r\n# creates the folders based on what filetypes are in the folders\r\ndef CreateFolders(_filepath):\r\n\tfor _files in os.listdir(_filepath): # get all the files in the directory\r\n\t\tfor _keys, _values in _fileExtensions.items(): # loops through the file extensions dictionary and saves the keys and values as varaibles\r\n\t\t\tfor _index in _values: # loops through all the indexes in the values lists\r\n\t\t\t\tif _files.endswith(_index): # checks if the file endswith one of the extensions\r\n\t\t\t\t\tif os.path.exists(os.path.join(_filepath, _keys)): # checks to see if the folder of the filestype(key) exists\r\n\t\t\t\t\t\tcontinue # if it does then if continues\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tos.makedirs(os.path.join(_filepath, _keys)) # if not then its creates a file with the key as the folder name\r\n\r\n# sorts the files within the folder and moves them to the correct folder\r\ndef SortFiles(_filepath):\r\n\tfor _files in os.listdir(_filepath):\r\n\t\tfor _keys, _values in _fileExtensions.items():\r\n\t\t\tfor _index in _values:\r\n\t\t\t\tif _files.endswith(_index):\r\n\t\t\t\t\tshutil.move(os.path.join(_filepath, _files), os.path.join(_filepath, _keys))\r\n\r\n# Although the SortFiles() and CreateFolders() are very similiar, for some reason the code wont work if the are combined\r\n# If you can try and combine them be my guest \r\n\r\ndef Main():\r\n\t_filepathSource = os.getcwd()\r\n\r\n\tCreateFolders(_filepathSource)\r\n\tSortFiles(_filepathSource)\r\n\r\nMain()\r\n\r\n","sub_path":"FileSorter.py","file_name":"FileSorter.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"543254482","text":"# http://library.bjp.org/jspui/browse?type=title&sort_by=1&order=ASC&rpp=100&etal=0&submit_browse=Update\n\nimport scrapy\n\n\nclass BooksListSpider(scrapy.Spider):\n name = \"books_list\"\n start_urls = [\n # - Sorting by title (1)\n # - in order Ascending (ASC)\n # - Results/Page 100 (rpp)\n # - Authors/Record: All\n # - Offset 0 (first page)\n # - rpp = Results Per Page = 2642 (to get all books at once)\n # - etal = Et al. = \"et alia\" = \"and others\" = authors + contributors in this website\n 'http://library.bjp.org/jspui/browse?type=title&sort_by=1&order=ASC&rpp=2642&etal=-1&null=&offset=0'\n ]\n\n def parse(self, response):\n filename = 'books-list.html'\n with open(filename, 'wb') as f:\n f.write(response.body)\n self.log('Saved file %s' % filename)\n","sub_path":"exotic_india_library/spiders/download_books_list_webpage_spider.py","file_name":"download_books_list_webpage_spider.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"231202567","text":"\"\"\"\n431. Encode N-ary Tree to Binary Tree (Hard)\n\nDesign an algorithm to encode an N-ary tree into a binary tree \nand decode the binary tree to get the original N-ary tree. An \nN-ary tree is a rooted tree in which each node has no more \nthan N children. Similarly, a binary tree is a rooted tree in \nwhich each node has no more than 2 children. There is no \nrestriction on how your encode/decode algorithm should work. \nYou just need to ensure that an N-ary tree can be encoded to \na binary tree and this binary tree can be decoded to the \noriginal N-nary tree structure.\n\nFor example, you may encode the following 3-ary tree to a \nbinary tree in this way:\n\nNote that the above is just an example which might or might \nnot work. You do not necessarily need to follow this format, so \nplease be creative and come up with different approaches yourself. \n\nNote:\n\nN is in the range of [1, 1000]\nDo not use class member/global/static variables to store \nstates. Your encode and decode algorithms should be stateless.\n\"\"\"\n\n\nclass Node(object):\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Codec:\n\n def encode(self, root):\n \"\"\"Encodes an n-ary tree to a binary tree.\n \n :type root: Node\n :rtype: TreeNode\n \"\"\"\n if root is None:\n return None\n broot = TreeNode(root.val)\n node = broot\n for i, child in enumerate(root.children):\n new_node = self.encode(child)\n if i == 0:\n broot.left = new_node\n node = broot.left\n else:\n node.right = new_node\n node = node.right\n return broot\n\n def decode(self, data):\n \"\"\"Decodes your binary tree to an n-ary tree.\n \n :type data: TreeNode\n :rtype: Node\n \"\"\"\n if data is None:\n return None\n root = Node(data.val, [])\n if data.left is not None:\n new_node = self.decode(data.left)\n root.children.append(new_node)\n\n n = data.left.right\n while n is not None:\n new_node = self.decode(n)\n root.children.append(new_node)\n n = n.right\n return root\n\n\n# Your Codec object will be instantiated and called as such:\n# codec = Codec()\n# codec.decode(codec.encode(root))\n\nif __name__ == \"__main__\":\n n1 = Node(1, [])\n n2 = Node(2, [])\n n3 = Node(3, [])\n n4 = Node(4, [])\n n5 = Node(5, [])\n n6 = Node(6, [])\n n1.children = [n3, n2, n4]\n n3.children = [n5, n6]\n codec = Codec()\n b_root = codec.encode(n1)\n n_root = codec.decode(b_root)","sub_path":"python/leetcode/tree/431_nary_bin_tree.py","file_name":"431_nary_bin_tree.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"74150059","text":"#!/usr/bin/env python\n\n# Useful things to have\nfrom __future__ import division\nfrom math import *\nimport sys, os, re, math\n\n# Readline completion of everything :)\nimport rlcompleter, readline, atexit\ndefaultCompleter = rlcompleter.Completer()\n\nhistoryPath = os.path.expanduser(\"~/.pyhistory\")\n\ndef myCompleter(text, state):\n\tif text.strip() == \"\" and state == 0:\n\t\treturn text + \"\\t\"\n\telse:\n\t\treturn defaultCompleter.complete(text, state)\n\ndef save_history(historyPath=historyPath):\n import readline\n readline.write_history_file(historyPath)\n\nreadline.set_completer(myCompleter)\nreadline.parse_and_bind(\"tab: complete\")\n\nif os.path.exists(historyPath):\n readline.read_history_file(historyPath)\n\natexit.register(save_history)\n\ndel rlcompleter, readline, atexit\n\n","sub_path":".pythonrc.py","file_name":".pythonrc.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"287185640","text":"from snuba.clickhouse.columns import (\n ColumnSet,\n UInt,\n String,\n Nested,\n)\nfrom snuba.clusters.storage_sets import StorageSetKey\nfrom snuba.datasets.schemas.tables import TableSchema\nfrom snuba.datasets.schemas.join import (\n JoinConditionExpression,\n JoinCondition,\n JoinClause,\n JoinType,\n TableJoinNode,\n)\n\n\ntable1 = TableSchema(\n columns=ColumnSet(\n [\n (\"t1c1\", UInt(64)),\n (\"t1c2\", String()),\n (\"t1c3\", Nested([(\"t11c4\", UInt(64))])),\n ]\n ),\n local_table_name=\"table1\",\n dist_table_name=\"table1\",\n storage_set_key=StorageSetKey.EVENTS,\n).get_data_source()\n\ntable2 = TableSchema(\n columns=ColumnSet(\n [\n (\"t2c1\", UInt(64)),\n (\"t2c2\", String()),\n (\"t2c3\", Nested([(\"t21c4\", UInt(64))])),\n ]\n ),\n local_table_name=\"table2\",\n dist_table_name=\"table2\",\n storage_set_key=StorageSetKey.EVENTS,\n).get_data_source()\n\ntable3 = TableSchema(\n columns=ColumnSet(\n [\n (\"t3c1\", UInt(64)),\n (\"t3c2\", String()),\n (\"t3c3\", Nested([(\"t31c4\", UInt(64))])),\n ]\n ),\n local_table_name=\"table3\",\n dist_table_name=\"table3\",\n storage_set_key=StorageSetKey.EVENTS,\n).get_data_source()\n\n\nsimple_join_structure = JoinClause(\n TableJoinNode(table1.format_from(), table1.get_columns(), [], [], \"t1\"),\n TableJoinNode(table2.format_from(), table2.get_columns(), [], [], \"t2\"),\n [\n JoinCondition(\n left=JoinConditionExpression(table_alias=\"t1\", column=\"t1c1\"),\n right=JoinConditionExpression(table_alias=\"t2\", column=\"t2c2\"),\n ),\n JoinCondition(\n left=JoinConditionExpression(table_alias=\"t1\", column=\"t1c3\"),\n right=JoinConditionExpression(table_alias=\"t2\", column=\"t2c4\"),\n ),\n ],\n JoinType.INNER,\n)\n\ncomplex_join_structure = JoinClause(\n JoinClause(\n TableJoinNode(table1.format_from(), table1.get_columns(), [], [], \"t1\"),\n TableJoinNode(table2.format_from(), table2.get_columns(), [], [], \"t2\"),\n [\n JoinCondition(\n left=JoinConditionExpression(table_alias=\"t1\", column=\"t1c1\"),\n right=JoinConditionExpression(table_alias=\"t2\", column=\"t2c2\"),\n ),\n ],\n JoinType.FULL,\n ),\n TableJoinNode(table3.format_from(), table3.get_columns(), [], [], \"t3\"),\n [\n JoinCondition(\n left=JoinConditionExpression(table_alias=\"t1\", column=\"t1c1\"),\n right=JoinConditionExpression(table_alias=\"t3\", column=\"t3c3\"),\n ),\n ],\n JoinType.INNER,\n)\n","sub_path":"tests/datasets/schemas/join_examples.py","file_name":"join_examples.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"406304649","text":"# Part 3 - Use the binary perceptron to train classifiers to discriminate between classes\n\nimport numpy as np\nfrom DatasetHandler import DatasetHandler\nfrom Perceptron import Perceptron\nfrom Constant import Constant\n\nclass PerceptronClassification:\n def activation_score(self, data_row, weight, bias) -> int:\n activation = np.dot(data_row, weight) + bias\n return np.sign(activation)\n \n # Given a test dataset with feature + label array. Returns the total accuracy for correct activation_scoreion\n def compute_prediction_accuracy(self, feature_dataset, label_dataset, bias, weight) -> int:\n correct_prediction = 0\n for i, data_row in enumerate(feature_dataset):\n activation_score = self.activation_score(data_row, weight, bias)\n if activation_score == label_dataset[i]:\n correct_prediction += 1\n return (correct_prediction / len(feature_dataset)) * 100\n\nfrom Constant import view_dataset\n\nif __name__ == \"__main__\":\n # Question 1 to 3\n # Loads the randomised dataset with two classes or all\n\n # Test and Train Dataset for class 1 and class 2\n train1 = DatasetHandler().extract_two_classes_from_dataset('train.data', 1, 2, randomise=False)\n test1 = DatasetHandler().extract_two_classes_from_dataset('test.data', 1, 2, randomise=False)\n\n view_dataset(train1.feature_dataset, train1.label_dataset)\n print(Constant.BREAKPOINT)\n view_dataset(test1.feature_dataset, test1.label_dataset)\n\n # Test and Train Dataset for class 2 and class 3\n train2 = DatasetHandler().extract_two_classes_from_dataset('train.data', 2, 3, randomise=False)\n test2 = DatasetHandler().extract_two_classes_from_dataset('test.data', 2, 3, randomise=False)\n\n # Test and Train Dataset between class 1 and class 3\n train3 = DatasetHandler().extract_two_classes_from_dataset('train.data', 1, 3, randomise=False)\n test3 = DatasetHandler().extract_two_classes_from_dataset('test.data', 1, 3, randomise=False)\n\n # Initialise perceptron algorithm with start weight of 0 and bias of 0\n m1 = Perceptron(0, 0)\n m2 = Perceptron(0, 0)\n m3 = Perceptron(0, 0)\n\n # Training the perceptron with 20 iterations for the three types of train dataset we have loaded\n m1.train_perceptron( train1.feature_dataset, train1.label_dataset, 20)\n m2.train_perceptron(train2.feature_dataset, train2.label_dataset, 20)\n m3.train_perceptron(train3.feature_dataset, train3.label_dataset, 20)\n\n # Initialise classification class to compute accuracy of test data\n activation_score = PerceptronClassification()\n\n # Compute the total accuracy for the three model given the train dataset\n trainAcc1 = activation_score.compute_prediction_accuracy(train1.feature_dataset, train1.label_dataset, m1.get_bias(), m1.get_weight())\n trainAcc2 = activation_score.compute_prediction_accuracy(train2.feature_dataset, train2.label_dataset, m2.get_bias(), m2.get_weight())\n trainAcc3 = activation_score.compute_prediction_accuracy(train3.feature_dataset, train3.label_dataset, m3.get_bias(), m3.get_weight())\n\n # Compute the total accuracy for the three model given the test dataset\n testAcc1 = activation_score.compute_prediction_accuracy(test1.feature_dataset, test1.label_dataset, m1.get_bias(), m1.get_weight())\n testAcc2 = activation_score.compute_prediction_accuracy(test2.feature_dataset, test2.label_dataset, m2.get_bias(), m2.get_weight())\n testAcc3 = activation_score.compute_prediction_accuracy(test3.feature_dataset, test3.label_dataset, m3.get_bias(), m3.get_weight())\n\n # print(\"The perceptron binary classifcation accuracy for training and testing dataset are:\")\n\n # print(Constant.BREAKPOINT)\n\n # print(f\"Train dataset with class 1 and 2 with accuracy of {trainAcc1}%\")\n # print(f\"Train dataset with class 2 and 3 with accuracy of {trainAcc2}%\")\n # print(f\"Train dataset with class 1 and 3 with accuracy of {trainAcc3}%\")\n\n # print(Constant.BREAKPOINT)\n\n # print(f\"Test dataset with class 1 and 2 with accuracy of {testAcc1}%\")\n # print(f\"Test dataset with class 2 and 3 with accuracy of {testAcc2}%\")\n # print(f\"Test dataset with class 1 and 3 with accuracy of {testAcc3}%\")","sub_path":"PerceptronClassification.py","file_name":"PerceptronClassification.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"654517317","text":"\"\"\"\nsource:\n - https://github.com/the-raspberry-pi-guy/lcd\n - https://www.youtube.com/watch?v=3XLjVChVgec\n\n install:\n sudo apt-get install python3-smbus \n\"\"\"\n# import RPi.GPIO as GPIO\n# from RPLCD.gpio import CharLCD\n\nimport lcd.lcddriver as lcddriver\nimport time\nimport datetime\nfrom keypad_rpi.keypad import keypad\nimport RPi.GPIO as GPIO\n\n\nGPIO.setwarnings(False)\n\ndef GetTime():\n currentTime = datetime.datetime.now()\n return currentTime.strftime(\"%d.%m %a %H:%M\")\n\ndef PrintTime():\n display.lcd_display_string(GetTime(), 1)\n\n\ndef PrintServerStatus():\n display.lcd_display_string(\"jalan gan\", 1)\n\n\ndef PrintLocation():\n display.lcd_display_string(\"dimana kamuh???\", 1)\n\n\ndef printTime():\n display.lcd_clear()\n PrintTime()\n\ndef printLocation():\n display.lcd_clear()\n PrintLocation()\n\ndef printStatus():\n display.lcd_clear()\n PrintServerStatus()\n\ndisplay = lcddriver.lcd()\n\ntry:\n kp = keypad(columnCount = 3)\n while True:\n print(\"writing to LCD display\")\n\n digit = None\n while digit == None:\n digit = kp.getKey()\n print(digit)\n if(digit == 1):\n printStatus()\n elif(digit == 2):\n printLocation()\n elif(digit == 6):\n printTime()\n\nexcept KeyboardInterrupt:\n print(\"Cleaning up!\")\n display.lcd_clear()\n","sub_path":"modules/lcd_controller.py","file_name":"lcd_controller.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"191034391","text":"import os, sys, json\nfrom django.shortcuts import render, redirect\nfrom django.http.response import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\n\n# db\nfrom accounts.models.user import User\nfrom accounts.models.project import Project\n\n\n@csrf_exempt\ndef change_project_title_request(request):\n\n data = json.loads(request.body.decode(\"utf-8\"))\n print(data)\n project_id = data[\"project_id\"]\n project_id = int(project_id)\n title = data[\"title\"]\n\n record_Project = Project.objects.get(id=project_id)\n record_Project.title = title\n record_Project.save()\n\n json_response = {\n \"code\" : 200\n }\n\n return JsonResponse(json_response)\n","sub_path":"accounts/src/project/change_project_title.py","file_name":"change_project_title.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"61403744","text":"\"\"\"\nAsynchronous method\n\"\"\"\nimport aiohttp\nimport asyncio\nimport time\nfrom urllib.request import urlopen\n\nasync def crawl_one_url(url, session):\n get_request = session.get(url)\n\n res = await get_request\n txt = await res.text()\n\n get_request.close()\n\n return txt\n\n\nasync def crawl_urls(urls_to_crawl):\n session = aiohttp.ClientSession()\n\n work_to_do = list()\n for url in urls_to_crawl:\n work_to_do.append(crawl_one_url(url, session))\n\n res = await asyncio.gather(*work_to_do)\n\n await session.close()\n return res\n\n\ndef main():\n t0 = time.time()\n\n urls_to_crawl = get_urls_to_crawl()\n\n asyncio.run(crawl_urls(urls_to_crawl))\n elapsed = time.time() - t0\n print(\"\\n{} URLS downloaded in {:.2f}s\".format(len(urls_to_crawl), elapsed))\n\n\ndef get_urls_to_crawl():\n urls_list = list()\n urls_list.append('http://www.cnn.com/')\n urls_list.append('https://www.foxnews.com/')\n urls_list.append('https://www.bbc.com/')\n urls_list.append('https://www.dawn.com')\n urls_list.append('https://www.cnbc.com')\n urls_list.append('https://www.twitter.com')\n\n return urls_list\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Crawler_Aynchronous.py","file_name":"Crawler_Aynchronous.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"62413166","text":"from PyQt5.QtWidgets import * \nfrom PyQt5 import QtCore \nfrom PyQt5.QtGui import * \nimport sys \n \nclass Window(QMainWindow): \n def __init__(self): \n super().__init__() \n \n # set the title \n self.setWindowTitle(\"Label\") \n \n # setting the geometry of window \n self.setGeometry(0, 0, 400, 300) \n \n # creating a label widget \n self.label_1 = QLabel('Normal Label', self) \n \n # moving position \n self.label_1.move(100, 100) \n \n # setting up border \n self.label_1.setStyleSheet(\"border: 1px solid black;\") \n \n # creating a label widget \n self.label_2 = QLabel('====== extra width label =====', self) \n \n # moving position \n self.label_2.move(100, 150) \n \n # setting up border \n self.label_2.setStyleSheet(\"border: 1px solid black;\") \n \n # resizing the widget \n self.label_2.resize(200, 20) \n \n # creating a label widget \n self.label_3 = QLabel('tiny label', self) \n \n # moving position \n self.label_3.move(100, 200) \n \n # setting up border \n self.label_3.setStyleSheet(\"border: 1px solid black;\") \n \n # resizing the widget \n self.label_3.resize(60, 15) \n \n \n \n # show all the widgets \n self.show() \n \n \n \n# create pyqt5 app \nApp = QApplication(sys.argv) \n \n# create the instance of our Window \nwindow = Window() \n \n# start the app \nsys.exit(App.exec()) ","sub_path":"Test/DeferentGUITest.py","file_name":"DeferentGUITest.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"274755989","text":"import random\r\nimport pickle\r\nimport pandas as pd\r\nimport copy\r\nimport numpy as np\r\nimport datetime\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm_notebook as tqdm\r\n\r\n\r\n\r\n\r\nplayers_df = pd.read_pickle('players_df_modified')\r\ngames_df = pd.read_pickle('games_df_modified')\r\n\r\n# assumes players_df is sorted by game_id\r\ndef find_players_from_game(players_df, game_id):\r\n start_found = 0\r\n players_from_game = pd.DataFrame(players_df.columns)\r\n players_lines = []\r\n for index, player_line in players_df.iterrows():\r\n if player_line['game_id'] == str(game_id):\r\n start_found = 1\r\n players_lines.append(player_line)\r\n if start_found == 1 and not (player_line['game_id'] == str(game_id)):\r\n break\r\n players_from_game.append(players_lines)\r\n return players_from_game\r\n\r\n\r\nplayers_away_list = []\r\nplayers_home_list = []\r\nnew_games_df = pd.DataFrame(columns=games_df.columns)\r\nnew_players_df = pd.DataFrame(columns=players_df.columns)\r\nnew_lines = []\r\nnew_player_lines = []\r\nplayers_df = players_df.astype({'game_id': 'int32'})\r\nplayers_df = players_df.sort_values(by='game_id', axis=0, ascending=True)\r\nplayers_df = players_df.astype({'game_id': 'str'})\r\n\r\nfor index, game_line in tqdm(games_df.iterrows()):\r\n players_away_list = []\r\n players_home_list = []\r\n new_games_df.append(game_line)\r\n for k in range(13):\r\n players_away_list.append(game_line['player_away' + str(k)])\r\n players_home_list.append(game_line['player_home' + str(k)])\r\n for k in range(100):\r\n new_game_line = copy.deepcopy(game_line)\r\n new_game_line['game_id'] = int(str(new_game_line['game_id']) + str(11211) + str(k))\r\n shuffeled_players_away_list = copy.deepcopy(players_away_list)\r\n shuffeled_players_home_list = copy.deepcopy(players_home_list)\r\n random.shuffle(shuffeled_players_away_list)\r\n random.shuffle(shuffeled_players_home_list)\r\n for k in range(13):\r\n new_game_line['player_away' + str(k)] = shuffeled_players_away_list[k]\r\n new_game_line['player_home' + str(k)] = shuffeled_players_home_list[k]\r\n new_lines.append(new_game_line)\r\n\r\n # players_from_game = players_df[players_df['game_id'] == str(game_line['game_id'])]\r\n players_from_game = find_players_from_game(players_df, game_line['game_id'])\r\n for index2, player_line in players_from_game.iterrows():\r\n new_player_line = copy.deepcopy(player_line)\r\n new_player_line['game_id'] = new_game_line['game_id']\r\n new_player_lines.append(new_player_line)\r\n\r\nnew_games_df = new_games_df.append(new_lines)\r\nnew_players_df = new_players_df.append(new_player_lines)","sub_path":"winner/test_stuff3.py","file_name":"test_stuff3.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"574891770","text":"import numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport Python_Code.PowerICA_Python.PowerICA as PICA\n\n\n# setting random seed\nnp.random.seed(41)\n\n# Sampling is done with replacement by default\nnp.random.choice(4, 12)\n\n# Probability weights can be given\nnp.random.choice(4, 12, p=[.4, .1, .1, .4])\nx = np.random.randint(0, 10, (8, 12))\n\n# sampling individual elements\nnp.random.choice(x.ravel(), 12)\n\n# sampling rows\nidx = np.random.choice(x.shape[0], 4)\n\nprint(\"Example Rows: \\n\" + str(x[idx, :]))\n\n# sampling columns\nidx = np.random.choice(x.shape[1], 4)\nprint(\"Example Colums: \\n\" + str(x[:, idx]))\n\n#### Sampling Without replacement\n# Give the argument replace=False\ntry:\n np.random.choice(4, 12, replace=False)\nexcept ValueError as e:\n print(e)\n\n# Shuffling occurs \"in place\" for efficiency\nnp.random.shuffle(x)\n# To shuffle columns instead, transpose before shuffling\nnp.random.shuffle(x.T)\n# numpy.random.permutation does the same thing but returns a copy\nnp.random.permutation(x)\n# When given an integer n, permutation treats is as the array arange(n)\nnp.random.permutation(10)\n# Use indices if you needed to shuffle collections of arrays in synchrony\nx = np.arange(12).reshape(4,3)\ny = x + 10\nidx = np.random.permutation(x.shape[0])\nlist(zip(x[idx, :], y[idx, :]))","sub_path":"utils/random_options.py","file_name":"random_options.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"5210897","text":"__author__ = 'Josa G and Charles Carper'\r\n# Constants for the menu choices\r\nADD = 1\r\nSUBTRACT = 2\r\nMULTIPLY = 3\r\nDIVIDE = 4\r\nQUIT_CHOICE = 5\r\n# The main function.\r\ndef main():\r\n # The choice variable controls the loop\r\n # and holds the user's menu choice.\r\n choice = 0\r\n\r\n # display the menu.\r\n display_menu()\r\n while choice != QUIT_CHOICE:\r\n\r\n\r\n # Get the user's choice.\r\n choice = int(input('\\nWhat type of operation would you like to perform? '))\r\n\r\n # Perform the selected action.\r\n if choice == ADD:\r\n num1 = int(input(\"Enter first number: \"))\r\n num2 = int(input(\"Enter second number: \"))\r\n print(num1, '+', num2, '=', num1+num2)\r\n elif choice == SUBTRACT:\r\n num1 = int(input(\"Enter first number: \"))\r\n num2 = int(input(\"Enter second number: \"))\r\n print(num1, '-', num2, '=', num1-num2)\r\n elif choice == MULTIPLY:\r\n num1 = int(input(\"Enter first number: \"))\r\n num2 = int(input(\"Enter second number: \"))\r\n print(num1, '*', num2, '=', num1*num2)\r\n elif choice == DIVIDE:\r\n num1 = int(input(\"Enter first number: \"))\r\n num2 = int(input(\"Enter second number: \"))\r\n print(num1, '/', num2, '=', num1/num2)\r\n elif choice == QUIT_CHOICE:\r\n print('\\n Thank you for using the Calc Program.\\n Program ended')\r\n\r\n else:\r\n print('Error: invalid selection.')\r\n\r\n# The display_menu function displays a menu.\r\ndef display_menu():\r\n print('Calculations Results')\r\n print(' 1) Add')\r\n print(' 2) Subtract')\r\n print(' 3) Multiply')\r\n print(' 4) Divide')\r\n print(' 5) Quit')\r\n\r\n# Call the main function.\r\nmain()","sub_path":"MidTerm-JoseGAndChuckCarper.py","file_name":"MidTerm-JoseGAndChuckCarper.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"407779540","text":"import requests\nfrom bs4 import BeautifulSoup as soup\nimport time\nimport json\nimport datetime\nfrom random import randint\n\n#Set the refresh rate in seconds\ninterval = 60\n#Choose either UK, US or CA\nregion = \"UK\"\n#Enter your discord webhook here\nwebhook = \"\"\n#If you want to use proxies, set useproxies to True and enter your proxies on individual lines in a file in the same relative directory called proxies.txt\nuseproxies = False\n\nif region.lower() == \"uk\":\n map = \"http://www.adidas.co.uk/static/on/demandware.static/-/Sites-CustomerFileStore/default/adidas-GB/en_GB/sitemaps/product/adidas-GB-en-gb-product.xml\"\n suggestionsbase = \"https://www.adidas.co.uk/api/suggestions/\"\nif region.lower() == \"us\":\n map = \"https://www.adidas.com/static/on/demandware.static/-/Sites-CustomerFileStore/default/adidas-US/en_US/sitemaps/product/adidas-US-en-us-product.xml\"\n suggestionsbase = \"https://www.adidas.com/api/suggestions/\"\nif region.lower() == \"ca\":\n map = \"http://www.adidas.ca/static/on/demandware.static/-/Sites-CustomerFileStore/default/adidas-CA/en_CA/sitemaps/product/adidas-CA-en-ca-product.xml\"\n suggestionsbase = \"https://www.adidas.ca/api/suggestions/\"\n\nheaders = {\n'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n'Accept-Encoding': 'gzip, deflate',\n'Accept-Language': 'en-US,en;q=0.9',\n'Connection': 'keep-alive',\n'Upgrade-Insecure-Requests': '1',\n'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'}\n\ndef proxyforreq(proxy):\n\n try:\n proxy.split(\":\")[2]\n ip = proxy.split(\":\")[0]\n port = proxy.split(\":\")[1]\n userpassproxy = '%s:%s' % (ip, port)\n proxyuser = proxy.split(\":\")[2].rstrip()\n proxypass = proxy.split(\":\")[3].rstrip()\n proxies = {'http': 'http://%s:%s@%s' % (proxyuser, proxypass, userpassproxy),\n 'https': 'http://%s:%s@%s' % (proxyuser, proxypass, userpassproxy)}\n\n except:\n proxies = {'http': 'http://%s' % proxy, 'https': 'http://%s' % proxy}\n\n return proxies\n\n\n\ndef gettitle(url, region):\n if region.lower() == \"uk\":\n base = url.split(\"/\")[3]\n if region.lower() == \"us\" or region.lower() == \"ca\":\n base = url.split(\"/\")[4]\n words = base.split(\"-\")\n baretitle = \"\"\n for word in words:\n word = word[0].capitalize() + word[1:]\n baretitle = baretitle + \" %s\"%word\n title = baretitle[1:]\n return title\n\n\ndef getdata(pid,suggestionsbase,useproxies,proxies):\n suggestionspage = suggestionsbase + pid\n if useproxies == False:\n sug = requests.get(suggestionspage, headers)\n else:\n sug = requests.get(suggestionspage, proxies=proxies, headers=headers)\n sugtext = json.loads(sug.text)\n prod = sugtext['products']\n prods = prod[0]\n price = prods['standardPrice']\n price = price.replace(\" \", \"\")\n url = prods['image']\n return price, url\n\n\ndef discordpost(title,data,url,webhook,useproxies,proxies):\n footer1 = {\n \"icon_url\": \"https://i.imgur.com/RKaFd4O.png\",\n \"text\": \"Developed by @SoleSorcerer\"\n \"\"}\n thumbnail1 = {\n \"url\": data[1]}\n fields1 = [\n {\n \"name\": \"Price:\",\n \"value\": data[0],\n \"inline\": \"true\"\n },\n {\n \"name\": \"Region:\",\n \"value\": region.upper(),\n \"inline\": \"false\"\n },\n ]\n\n embed = {\n \"title\": title,\n \"url\": url,\n \"color\": 5305409,\n \"timestamp\": str(datetime.datetime.now()),\n \"footer\": footer1,\n \"thumbnail\": thumbnail1,\n \"fields\": fields1\n }\n\n embed = [embed]\n discordjson = {\"embeds\": embed, \"username\": \"New Adi Item\"}\n if useproxies == False:\n requests.post(webhook, json=discordjson)\n else:\n requests.post(webhook, proxies=proxies, json=discordjson)\n\n\nlinks = []\n\nif useproxies == True:\n rawproxies = open('proxies.txt','r').read().splitlines()\nelse:\n rawproxies = \"\"\n\ndef loop(index,links,map,useproxies,rawproxies,region):\n foundnewlink = False\n if useproxies == False:\n html = requests.get(map, headers)\n else:\n if len(rawproxies) == 0:\n print(\"Enter some proxies to use proxies\")\n if len(rawproxies) == 1:\n num = 0\n if len(rawproxies) > 1:\n num = randint(0, len(rawproxies))\n\n proxies = proxyforreq(rawproxies[num])\n html = requests.get(map, proxies=proxies, headers=headers)\n pagesoup = soup(html.text, \"html.parser\")\n for item in pagesoup.find_all('loc'):\n link = item.text\n if link in links:\n pass\n else:\n if index != 0:\n if region.lower() == \"uk\" :\n pid = link.split(\"/\")[4].split(\".html\")[0]\n if region.lower() == \"us\" or region.lower() == \"ca\":\n pid = link.split(\"/\")[5].split(\".html\")[0]\n title = gettitle(link, region)\n if useproxies == False:\n data = getdata(pid, suggestionsbase, False, \"\")\n try:\n discordpost(title, data, link, webhook, False, \"\")\n except:\n print(\"Error posting to discord, make sure you have entered your webhook\")\n else:\n data = getdata(pid, suggestionsbase, True, proxies)\n try:\n discordpost(title, data, link, webhook, True, proxies)\n except:\n print(\"Error posting to discord, make sure you have entered your webhook\")\n\n print(\"--------------\\nFound new item: \\nTitle: %s\\nPrice: %s\\nLink: %s\\n---------------\\n\" %(title,data[0],link))\n links.append(link)\n foundnewlink = True\n if index == 0:\n print(\"Loaded products\")\n if foundnewlink == False:\n print(\"No new links found\")\n time.sleep(interval)\n index += 1\n loop(index,links,map,useproxies,rawproxies,region)\n\nloop(0,links,map,useproxies,rawproxies,region)\n\n\n\n\n\n","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":6155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"524854043","text":"\n\nfrom xai.brain.wordbase.verbs._padlock import _PADLOCK\n\n#calss header\nclass _PADLOCKS(_PADLOCK, ):\n\tdef __init__(self,): \n\t\t_PADLOCK.__init__(self)\n\t\tself.name = \"PADLOCKS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"padlock\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_padlocks.py","file_name":"_padlocks.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"284563094","text":"from django.contrib.auth import get_user_model\nfrom django.db import models\n\n\nUser = get_user_model()\n\n\ndef user_directory_path(instance, filename):\n # file will be uploaded to MEDIA_ROOT/name/\n return f'{instance.name}/{filename}'\n\n\nclass Product(models.Model):\n author = models.ForeignKey(\n to=User,\n related_name='fk_product_to_user',\n on_delete=models.CASCADE,\n null=True,\n )\n name = models.CharField(\n max_length=40,\n )\n\n class Category(models.TextChoices):\n Fruit = ('fruit', 'fruit')\n Vegetable = ('vegetable', 'vegetable')\n Other = ('other', 'other')\n\n category = models.CharField(\n max_length=10,\n choices=Category.choices,\n default=Category.Fruit,\n )\n\n def is_upperclass(self):\n return self.category in {\n self.category.fruit,\n self.category.vegetable,\n self.category.other,\n }\n\n location = models.CharField(\n max_length=80,\n blank=True,\n default=\"\"\n )\n\n stock = models.IntegerField(\n null=True,\n )\n price = models.FloatField(\n )\n\n class Unit(models.TextChoices):\n kg = ('kg', 'kg')\n piece = ('piece', 'piece')\n\n units = models.CharField(\n max_length=10,\n choices=Unit.choices,\n default=Unit.kg,\n )\n\n def is_upperclass(self):\n return self.units in {\n self.units.kg,\n self.units.piece,\n }\n\n promotion = models.FloatField(\n null=True,\n blank=True\n )\n image = models.ImageField(\n upload_to=user_directory_path,\n null=True,\n )\n deliver_within_radius = models.IntegerField(\n null=True,\n blank=True\n )\n description = models.CharField(\n max_length=300,\n blank=True\n )\n expiration_date = models.DateField(\n null=True,\n blank=True\n )\n\n def __str__(self):\n return f\"{self.author.first_name}: {self.name[0:100]}\"\n\n","sub_path":"backend/apps/product/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"478358424","text":"import subprocess as sb\nimport os\n\n#global shit\napp = \"youtube-dl\"\npath = \"-o '$HOME/Downloads/youtube-dl_songs/%(channel)s/%(title)s - %(channel)s.%(ext)s'\"\ntitle = \"-f 140 -o '%(title)s - %(channel)s.%(ext)s'\"\naudiodownload = \"-f 140 --extract-audio --audio-format mp3 --audio-quality 0\"\nthumbnaildownload = \"--write-thumbnail --skip-download\"\nfilename = \"--get-filename\"\n#function to check if name is present or not\ndef check_name(name):\n names = []\n #load names form the history file\n with open(\"/home/sohamch/yt-download_history.txt\",'r') as f:\n for line in f.readlines():\n l = line.strip(\"\\n\")\n names.append(l)\n f.close()\n if name in names:\n return True\n else:\n return False\n\n#generate bash file\ndef generate_bash():\n with open(\"/home/sohamch/command.sh\",\"w\") as f:\n f.write(f\"{app} {path} {audiodownload} {link}\\n\")\n f.write(f\"{app} {path} {thumbnaildownload} {link}\\n\")\n f.write(f\"{app} {title} {link} {filename} >> $HOME/yt-download_history.txt\\n\")\n f.write(\"find ~/Downloads/youtube-dl_songs -name '*.webp' -exec magick {} {}.png \\; -exec rm {} \\;\\n\")\n f.write(\"sort -o $HOME/yt-download_history.txt $HOME/yt-download_history.txt\")\n f.close()\n\n\n\nlink = input()\n\n#get the video name\ncommand = f\"{app} {title} {link} {filename}\"\n\nname = sb.check_output(command,shell=True).decode().strip(\"\\n\")\nif check_name(name):\n print(\"[*] Song is already Downloaded\\n[*] Want to download again: \")\n x = input()\n if x == \"y\" or x ==\"Y\":\n #store data in command.sh file and execute it\n generate_bash()\n os.system(\"/home/sohamch/command.sh\")\n else:\n exit(1)\nelse:\n generate_bash()\n os.system(\"/home/sohamch/command.sh\")\n\n\n\n# with open('command.sh','w') as f:\n# f.write(f\"{app} {title} {filename} {link} | python check.py\\n\")\n# f.write(f\"{app} {path} {audiodownload} {link}\\n\")\n# f.write(f\"{app} {path} {thumbnaildownload} {link}\\n\")\n# f.write(f\"{app} {title} {filename} {link} >> yt-download_history.txt\")\n \n","sub_path":"ytdownload.py","file_name":"ytdownload.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"272123516","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Dec 13 03:32:18 2020\r\n\r\n@author: obemb\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Dec 12 01:46:17 2020\r\n\r\nSentiment Labelled Sentences Data Set\r\n\r\nData Set Information:\r\n\r\nThis dataset was created for the Paper 'From Group to Individual Labels using Deep Features', Kotzias et. al,. KDD 2015\r\nPlease cite the paper if you want to use it :)\r\n\r\nIt contains sentences labelled with positive or negative sentiment.\r\n\r\n=======\r\nFormat:\r\n=======\r\nsentence score\r\n\r\n\r\n=======\r\nDetails:\r\n=======\r\nScore is either 1 (for positive) or 0 (for negative)\r\nThe sentences come from three different websites/fields:\r\n\r\nimdb.com\r\namazon.com\r\nyelp.com\r\n\r\nFor each website, there exist 500 positive and 500 negative sentences. Those were selected randomly for larger datasets of reviews.\r\nWe attempted to select sentences that have a clearly positive or negative connotaton, the goal was for no neutral sentences to be selected.\r\n\r\n@author: obemb\r\n\"\"\"\r\n\r\n# Step 1\r\n\r\n# Import libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nprint(\"libraries installed succesffully!\")\r\n\r\n\r\n# Acquire dataset/datframe\r\ntitle = ['Review', 'Label']\r\n\r\ndf1 = pd.read_csv('amazon_cells_labelled.txt', delimiter = '\\t', names= title, quoting= 3, engine = 'python', encoding = 'latin-1')\r\n\r\ndf2 = pd.read_csv('imdb_labelled.txt', quoting = 3, delimiter = '\\t', sep = '.' , names = title, engine = 'python', encoding = 'latin-1')\r\n\r\ndf3 = pd.read_csv('yelp_labelled.txt', delimiter = '\\t', sep = '.' , names = title, quoting = 3, engine = 'python', encoding = 'latin-1')\r\n\r\ndf = pd.concat([df1, df2, df3], axis = 0, sort=False, ignore_index= True)\r\n#df = pd.concat([df_a, df3], axis = 0, sort=False, ignore_index= True)\r\ndf.reset_index(drop = True)\r\n\r\nprint(\" The shape of the dataframe is: \" , df.shape)\r\n\r\n# Step 2: Text Cleaning\r\n\r\nimport re\r\nimport nltk\r\nnltk.download('stopwords')\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem.porter import PorterStemmer\r\ncorpus = []\r\n\r\nfor i in range( df.shape[0]):\r\n review = re.sub('[^a-zA-Z]', ' ', df['Review'][i] )\r\n review = review.lower()\r\n review = review.split()\r\n ps = PorterStemmer()\r\n all_stopwords = stopwords.words('english')\r\n all_stopwords.remove(\"isn't\")\r\n all_stopwords.remove(\"wasn't\")\r\n all_stopwords.remove(\"not\")\r\n review = [ps.stem(word) for word in review \r\n if not word in set(all_stopwords)]\r\n review = ' '.join(review)\r\n corpus.append(review)\r\n \r\n#print(corpus)\r\n\r\n# Step 3 : Create Bag of Word Model\r\n\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\ncv = CountVectorizer(max_features=3500)\r\nX = cv.fit_transform(corpus).toarray()\r\ny = df.iloc[:,-1].values\r\n\r\nprint(len(X[0]))\r\n\r\n\r\n# Step 3 Split data to training and test set\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42)\r\n\r\n# Train Model with Random Forest algorithm\r\n\r\n# Training the Naive Bayes model on the Training set\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nclassifier = RandomForestClassifier(n_estimators = 200, random_state = 2)\r\nclassifier.fit(X_train, y_train)\r\n\r\n\r\n# Predicting the Test set results\r\ny_pred = classifier.predict(X_test)\r\nprint(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))\r\n\r\n# Making the Confusion Matrix\r\nfrom sklearn.metrics import confusion_matrix, accuracy_score\r\ncm = confusion_matrix(y_test, y_pred)\r\nprint(cm)\r\nscore = accuracy_score(y_test, y_pred)\r\n\r\nprint(\"The accuracy for the Random Forest Model is :\", score)","sub_path":"Main_RandomForest.py","file_name":"Main_RandomForest.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"23353283","text":"#! /usr/bin/env python\n#coding=utf-8\n\nimport os, time, json\n\nclass FileStorer:\n\tdef __init__(self, prefix, sufix, data_dir='data/'):\n\t\tself.fw=None\n\t\tself.writed=0\n\t\tself.data_dir=data_dir\n\t\tif not os.path.exists(data_dir): os.mkdir(data_dir)\n\t\tself.prefix, self.sufix=prefix, sufix\n\t\n\tdef __check(self):\n\t\tif self.writed>=50000 and self.fw is not None: \n\t\t\tself.fw.close()\n\t\t\tself.fw=None\n\t\tif self.fw is None:\n\t\t\tself.fw=open(self.data_dir+self.prefix+str(time.time())+self.sufix, 'w')\n\t\t\tself.writed=0\n\n\tdef saveJson(self, item):\n\t\tself.__check()\n\t\tself.fw.write(json.dumps(item)+'\\n')\n\t\tself.writed+=1\n\t\n\tdef close(self):\n\t\tif self.fw is not None: self.fw.close()\n\t\tself.fw=None\n\t\tself.writed=0\n\t\tprint('Storer closed!')\n","sub_path":"filestorer.py","file_name":"filestorer.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"317893681","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [ \n path('', views.hello, name='hello'),\n path('add_post/', views.add_post, name='add_post'),\n path('edit_post//', views.edit_post, name='edit_post'),\n path('delete_post//', views.delete_post, name='delete_post'),\n]\n","sub_path":"hello/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"623733798","text":"__author__ = 'SRL'\n\n\nimport pandas as pd\nimport statsmodels.api as sm\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\n\n\n\n'''\nBackward elimination\n'''\ndef backward_elimination(df,target,drop_features=[]):\n\n #remove non numeric columns and undesired features from dataframe\n df=df.select_dtypes(exclude=['object'])\n const_feat=list(df.columns[df.nunique() <= 1])\n\n drop_features=drop_features+const_feat\n df = df.drop(drop_features, axis=1)\n df.dropna(inplace=True)\n\n #if target constant avoid crashing\n if target in const_feat:\n score=[0 for feat in df.columns]\n score = pd.Series(score,index = list(df.columns))\n\n return None, list(df.columns), score\n\n scaler = StandardScaler()\n scaler.fit(df)\n df[df.columns]=scaler.transform(df[df.columns])\n #\n y=df[target]\n X=df.drop(target,1)\n \n \n \n cols = list(X.columns)\n pmax = 1\n \n while (len(cols)>0):\n \n p= []\n X_1 = X[cols]\n #X_1=scaler.fit_transform(X_1)\n X_1 = sm.add_constant(X_1,has_constant='add')\n \n model = sm.OLS(y,X_1).fit()\n p = pd.Series(model.pvalues.values[1:],index = cols) \n pmax = max(p)\n feature_with_p_max = p.idxmax()\n if(pmax>0.05):\n cols.remove(feature_with_p_max)\n else:\n break\n\n original_features=list(X.columns) \n selected_features_BE = list(cols)\n\n score={feature:1 if feature in selected_features_BE else 0 for feature in original_features}\n coef=pd.Series(list(score.values()), index=original_features)\n\n return selected_features_BE,original_features,coef\n\n\n\n\n","sub_path":"Feature_Selection/feature_selection/backward_elimination.py","file_name":"backward_elimination.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"597091531","text":"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright 2015 by Ecpy Authors, see AUTHORS for more details.\n#\n# Distributed under the terms of the BSD license.\n#\n# The full license is in the file LICENCE, distributed with this software.\n# -----------------------------------------------------------------------------\n\"\"\"Test utility functions found in transformers.\n\n\"\"\"\nfrom __future__ import (division, unicode_literals, print_function,\n absolute_import)\n\nfrom ecpy.utils.transformers import (basic_name_formatter, ids_to_unique_names)\n\n\ndef test_basic_name_formatter():\n \"\"\"Test the base formatting.\n\n \"\"\"\n assert basic_name_formatter('test_test') == 'Test test'\n\n\ndef test_ids_to_unique_names():\n \"\"\"Test the ids to names conversion.\n\n \"\"\"\n ids = ('ecpy.test.tester', 'ecpy.test.dummy_1', 'ecpy.dummy.dummy_1',\n 'user.test.tester')\n assert (sorted(list(ids_to_unique_names(ids))) ==\n sorted(('test.Dummy 1', 'dummy.Dummy 1', 'ecpy.test.Tester',\n 'user.test.Tester')))\n\n\ndef test_ids_to_unique_names2():\n \"\"\"Test the ids to names conversion with preformatting.\n\n \"\"\"\n ids = ('ecpy.test.tester', 'ecpy.test.dummy_1', 'ecpy.dummy.dummy_1',\n 'user.test.tester')\n names = ids_to_unique_names(ids, preformatter=lambda x: x.capitalize())\n assert (sorted(names) ==\n sorted(('test.Dummy 1', 'dummy.Dummy 1', 'Ecpy.test.Tester',\n 'User.test.Tester')))\n assert names['User.test.Tester'] == ids[-1]\n\n\ndef test_ids_to_unique_names3():\n \"\"\"Test the ids to names conversion.\n\n \"\"\"\n ids = ('ecpy.test.tester', 'ecpy.test.dummy_1', 'ecpy.dummy.dummy_1',\n 'user.test.tester')\n assert (sorted(list(ids_to_unique_names(ids, reverse=True))) ==\n sorted(ids))\n","sub_path":"tests/utils/test_transformers.py","file_name":"test_transformers.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"86628984","text":"import sqlite3\n\nconn = sqlite3.connect(\"mydatabase.db\")\ncursor = conn.cursor()\n\ncursor.execute('''\n CREATE TABLE users(id INTEGER PRIMARY KEY, name TEXT,\n monthly_salary INTEGER, yearly_bonus INTEGER, position TEXT)\n''')\n\nconn.commit()\n","sub_path":"hackbulgaria/databases/1-sqlite-starter/create_company.py","file_name":"create_company.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"563581145","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass CVAE(nn.Module):\n\n # define layers\n def __init__(self, conf):\n super(CVAE, self).__init__()\n self.hidden_size = conf['HIDDEN_SIZE']\n self.train_batch_size = conf['TEST_BATCHSIZE']\n self.classes = len(conf['CLASSES'])\n\n # Encoder layers\n self.enc_conv0 = nn.Conv2d(2, 64, 5, stride=2, padding=2)\n self.enc_bn0 = nn.BatchNorm2d(64)\n self.enc_conv1 = nn.Conv2d(64, 128, 5, stride=2, padding=2)\n self.enc_bn1 = nn.BatchNorm2d(128)\n self.enc_conv2 = nn.Conv2d(128, 256, 5, stride=2, padding=2)\n self.enc_bn2 = nn.BatchNorm2d(256)\n self.enc_conv3 = nn.Conv2d(256, 512, 5, stride=2, padding=2)\n self.enc_bn3 = nn.BatchNorm2d(512)\n self.enc_conv4 = nn.Conv2d(512, 1024, 3, stride=2, padding=1)\n self.enc_bn4 = nn.BatchNorm2d(1024)\n self.enc_fc1 = nn.Linear(4 * 4 * 1024, self.hidden_size * 2)\n\n # Cond encoder layers\n self.cond_enc_conv0 = nn.Conv2d(9, 64, 5, stride=2, padding=2)\n self.cond_enc_bn0 = nn.BatchNorm2d(64)\n self.cond_enc_conv1 = nn.Conv2d(64, 128, 5, stride=2, padding=2)\n self.cond_enc_bn1 = nn.BatchNorm2d(128)\n self.cond_enc_conv2 = nn.Conv2d(128, 256, 5, stride=2, padding=2)\n self.cond_enc_bn2 = nn.BatchNorm2d(256)\n self.cond_enc_conv3 = nn.Conv2d(256, 512, 5, stride=2, padding=2)\n self.cond_enc_bn3 = nn.BatchNorm2d(512)\n self.cond_enc_conv4 = nn.Conv2d(512, self.hidden_size, 3, stride=2, padding=1)\n\n # Decoder layers\n self.dec_upsamp1 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n self.dec_conv1 = nn.Conv2d(512 + self.hidden_size, 256, 5, stride=1, padding=2) # 512 (skips) + z (color emb)\n self.dec_bn1 = nn.BatchNorm2d(256)\n self.dec_upsamp2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n self.dec_conv2 = nn.Conv2d(512, 128, 5, stride=1, padding=2) # 256 (out) + 256 (skips)\n self.dec_bn2 = nn.BatchNorm2d(128)\n self.dec_upsamp3 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n self.dec_conv3 = nn.Conv2d(256, 64, 5, stride=1, padding=2) # 128 (out) + 128 (skips)\n self.dec_bn3 = nn.BatchNorm2d(64)\n self.dec_upsamp4 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n self.dec_conv4 = nn.Conv2d(128, 64, 5, stride=1, padding=2) # final shape 64 x 64 x 2 (ab channels)\n self.dec_bn4 = nn.BatchNorm2d(64)\n self.dec_upsamp5 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n self.final_conv = nn.Conv2d(64, 2, 1, stride=1, padding=0)\n\n def encoder(self, x):\n \"\"\"\n :param x: AB COLOR IMAGE, shape: 2 x imgw x imgh\n :return: mu and log var for the hidden space\n \"\"\"\n x = F.relu(self.enc_conv0(x))\n x = self.enc_bn0(x)\n x = F.relu(self.enc_conv1(x))\n x = self.enc_bn1(x)\n x = F.relu(self.enc_conv2(x))\n x = self.enc_bn2(x)\n x = F.relu(self.enc_conv3(x))\n x = self.enc_bn3(x)\n x = F.relu(self.enc_conv4(x))\n x = self.enc_bn4(x)\n x = x.view(-1, 4 * 4 * 1024)\n # x = self.enc_dropout1(x)\n x = self.enc_fc1(x)\n mu = x[..., :self.hidden_size]\n logvar = x[..., self.hidden_size:]\n return mu, logvar\n\n def cond_encoder(self, x):\n \"\"\"\n :param x: GREY LEVEL OR SPECTRAL IMAGES. shape: 1 x imgw x imgh\n :return: skip activations + z hidden size\n \"\"\"\n x = F.relu(self.cond_enc_conv0(x))\n sc_feat64 = self.cond_enc_bn0(x)\n x = F.relu(self.cond_enc_conv1(x))\n sc_feat32 = self.cond_enc_bn1(x)\n x = F.relu(self.cond_enc_conv2(sc_feat32))\n sc_feat16 = self.cond_enc_bn2(x)\n x = F.relu(self.cond_enc_conv3(sc_feat16))\n sc_feat8 = self.cond_enc_bn3(x)\n # z = F.relu(self.cond_enc_conv4(sc_feat8))\n z = self.cond_enc_conv4(sc_feat8)\n return sc_feat64, sc_feat32, sc_feat16, sc_feat8, z\n\n def decoder(self, z, sc_feat64, sc_feat32, sc_feat16, sc_feat8):\n x = self.dec_upsamp1(z)\n x = torch.cat([x, sc_feat8], 1)\n x = F.relu(self.dec_conv1(x))\n x = self.dec_bn1(x)\n x = self.dec_upsamp2(x)\n x = torch.cat([x, sc_feat16], 1)\n x = F.relu(self.dec_conv2(x))\n x = self.dec_bn2(x)\n x = self.dec_upsamp3(x)\n x = torch.cat([x, sc_feat32], 1)\n x = F.relu(self.dec_conv3(x))\n x = self.dec_bn3(x)\n x = self.dec_upsamp4(x)\n x = torch.cat([x, sc_feat64], 1)\n x = F.relu(self.dec_conv4(x))\n x = self.dec_bn4(x)\n x = self.dec_upsamp5(x)\n\n x = self.final_conv(x)\n return x\n\n def forward(self, color, inputs, prediction=False):\n \"\"\"\n when training we accept color and greylevel, they are\n both encoded to z1 and z2. decoder gets z1*z2 in\n to recreate the color image. we also use skips from the b&w image encoder.\n on testing we get only the greyscale image, encoder returns z2.\n a random z1 is sampled and mul is executed. finally the result is decoded to colorize the image\n :param color: AB channel\n :param inputs: L channel or spectral images\n :param prediction: prediction flag, if true detach decoder and use fc layer\n :return: predicted AB channel\n \"\"\"\n\n sc_feat64, sc_feat32, sc_feat16, sc_feat8, z_grey = self.cond_encoder(inputs)\n\n if isinstance(color, type(None)): # TEST TIME\n # z1 is sampled from Normal distribution,\n # we don't have color input on testing!\n z_rand = torch.randn(self.train_batch_size, self.hidden_size, 1, 1).repeat(1, 1, 4, 4).cuda()\n z = z_grey * z_rand\n return self.decoder(z, sc_feat64, sc_feat32, sc_feat16, sc_feat8), 0, 0\n else:\n mu, logvar = self.encoder(color)\n stddev = torch.sqrt(torch.exp(logvar))\n eps = torch.randn(stddev.size()).normal_().cuda()\n z_color = torch.add(mu, torch.mul(eps, stddev))\n z_color = z_color.reshape(-1, self.hidden_size, 1, 1).repeat(1, 1, 4, 4)\n z = z_grey * z_color\n return self.decoder(z, sc_feat64, sc_feat32, sc_feat16, sc_feat8), mu, logvar","sub_path":"networks/cvae_skips.py","file_name":"cvae_skips.py","file_ext":"py","file_size_in_byte":6385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"588457754","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 20 11:03:35 2018\n\n@author: Brooke\n\"\"\"\n\n# HW 6 scripts\n\n# hw: write a for loop to look at first 10 pages:\n# for range 0 to 10, change l dynamically\n# url has page number\n\n# imports and setup \nfrom bs4 import BeautifulSoup\n# you can use either of these libraries to get html from a website\nimport requests\nimport urllib.request\n\nimport pandas as pd\nimport scipy as sc\nimport numpy as np\n\nimport statsmodels.formula.api as sm\n\nimport matplotlib.pyplot as plt \nplt.style.use('ggplot')\n#%matplotlib inline \nplt.rcParams['figure.figsize'] = (10, 6) \n\n# %% Task 1.2\n## Your code goes here\n\n# download the fist ten pages\n# for i in np.arange(1,11):\n \n# download the fist ten pages\n\nfor i in np.arange(1,11):\n\n url = \"https://github.com/search?o=desc&p=\"+str(i)+\"&q=stars%3A%3E1&s=stars&type=Repositories\"\n\n with urllib.request.urlopen(url) as response:\n html = response.read()\n html = html.decode('utf-8')\n \n time.sleep(10) #sleeps after every 5 request\n\n #A simple solution is to just use ‘response.text’ to get html returned by requests.get(url) directly. This doesn't work on Windows.\n # html = requests.get(url)\n # html = html.text\n\n # # save the file\n filename = \"repos\"+str(i)+\".html\"\n with open(filename, 'w') as new_file:\n new_file.write(str(html.replace('\\n','').encode('utf-8')))\n \n # create one massive soup\n if i == 1:\n git_soup = BeautifulSoup(html, 'html.parser') # initialize git_soup w/ first page\n else:\n cup_soup = BeautifulSoup(html, 'html.parser') # soup for each page\n for element in cup_soup:\n git_soup.body.append(element) # massive soup that contains soups of all pages\n\n# massive soup that contains soups of all pages was created in for loop above\ngit_soup\n\n# %% Task 1.3\n\nname_list = [] # initialize lists\nurl_list = [] \ncont_list = [] \nlang_list = [] \nstars_list = []\nissues_list = []\nforks_list = []\nrmlength_list = []\n\nrepos = git_soup.find_all(\"div\",class_=\"col-8 pr-3\")\nrepos_lang = git_soup.find_all(\"div\",class_=\"d-table-cell col-2 text-gray pt-2\")\nurl = \"https://github.com\"\n\nfor i in range(100):\n name_list.append(repos[i].find(\"a\",class_=\"v-align-middle\").text)\n print(repos[i].find(\"a\",class_=\"v-align-middle\").text)\n url_tail = repos[i].find(\"a\").get(\"href\")\n repo_url = url + url_tail\n url_list.append(repo_url)\n with urllib.request.urlopen(repo_url) as response:\n repo_html = response.read()\n repo_html = repo_html.decode('utf-8')\n # if i%5==0:\n # time.sleep(5) #sleeps after every 5 request\n # # adjust frequency or time for sleeping if have issues\n with open('repo.html', 'w') as new_file:\n new_file.write(str(repo_html.replace('\\n','').encode('utf-8')))\n repo_soup = BeautifulSoup(repo_html, 'html.parser')\n cont_list.append(repo_soup.find(\"a\", href= lambda x : x and \"contributors\" in x).text.replace('\\n','').strip('contributors').strip())\n lang_list.append(repos_lang[i].text.strip())\n stars_list.append(repo_soup.find(\"a\",class_=\"social-count js-social-count\").text.strip())\n issues_list.append(repo_soup.find(\"span\",class_=\"Counter\").text)\n forks_list.append(repo_soup.find(\"a\", href= lambda x : x and \"network\" in x).text.strip())\n rmlength_list.append(len(repo_soup.find(\"div\",id=\"readme\").text.replace(\"\\n\",\"\")))\n \n# print('names:',name_list,'\\n')\n# print('URLs:',url_list,'\\n')\n# print('cont_list:',cont_list,'\\n')\n# print('lang_list:',lang_list,'\\n')\n# print('# of stars:',stars_list,'\\n')\n# print('# of issues:',issues_list,'\\n')\n# print('# of forks:',forks_list,'\\n')\n# print('RM lengths:',rmlength_list,'\\n')\n\n# %%\n\n# Convert list into DataFrame\nreposDF = pd.DataFrame({\"Repository Names\":name_list,\n \"URLs\":url_list,\n \"Contributors\":cont_list,\n \"Language\":lang_list,\n \"Stars\":stars_list,\n \"Issues\":issues_list,\n \"Forks\":forks_list,\n \"ReadMeLength\":rmlength_list})\n\nreposDF\n\n# Save dataframe to file project_info.csv\nreposDF.to_csv('project_info.csv', encoding='utf-8')\n\n# %% Task 2\n\n# this loads the data from the project_info.csv file \nproject_info1 = pd.read_csv('project_info.csv')\n# get rid of index column:\nproject_info = project_info1[['Repository Names','Contributors','Forks','Issues','Language','ReadMeLength','Stars','URLs']]\nproject_info = project_info.set_index('Repository Names')\nproject_info.head()\n\n\n# %% Task 2.1\n\nprint('Pre- data types: \\n',project_info.info(),'\\n')\nproject_info['Issues'] = project_info['Issues'].astype(str).str.replace(',','').astype(int)\nproject_info['Forks'] = project_info['Forks'].astype(str).str.replace(',','').astype(int)\nproject_info['Stars'] = project_info['Stars'].astype(str).str.replace(',','').astype(int)\nproject_info.loc['torvalds/linux','Contributors'] = 15000\nproject_info['Contributors'] = project_info['Contributors'].astype(str).str.replace(',','').astype(int)\nprint('Post- data types: \\n',project_info.info())\n\nproject_info.head(12)\n\n# %% Task 2.2\n\nproject_info.describe()\n\n# what are these two outliers? see explanation for this code in the last \"Your Interpretation\" section:\n# project_info.loc[project_info['Stars'] == 291631] #which repository has the max number of stars?\n# project_info = project_info[project_info['Stars'] != 291631] #mask to get rid of outliers\n# project_info = project_info[project_info['Contributors'] != 15000] #mask to get rid of outliers\n# project_info.shape\n\npi_corr = project_info.corr()\npi_corr\n\nind = np.arange(len(list(pi_corr))) + 0.5\nlabels = list(pi_corr)[1:]\nplt.pcolor(pi_corr,vmin=-1,vmax=1)\nplt.xticks(ind,list(pi_corr),rotation=90)\nplt.yticks(ind,list(pi_corr))\nplt.colorbar()\nplt.title('Heat Map of Coefficients')\n\npd.plotting.scatter_matrix(project_info, figsize=(10, 10), diagonal='kde')\nplt.show()\n\n# %% Task 2.3\npi_ols = sm.ols(formula=\"Stars ~ Forks + Contributors + Issues + ReadMeLength\", data=project_info).fit()\nprint('Proposed model \\n',pi_ols.summary(),'\\n')\n\n# pi_ols2 = sm.ols(formula=\"Stars ~ Forks\", data=project_info).fit() # yields an r-squared value > 0.5 after two outlier repositories are removed from data\n# print('Improved model \\n',pi_ols2.summary())\n\npi_ols2 = sm.ols(formula=\"Stars ~ Forks + Issues + Forks*Issues\", data=project_info).fit()\nprint('Improved model \\n',pi_ols2.summary())\n\n","sub_path":"HW6 - Copy/HW6_scripts.py","file_name":"HW6_scripts.py","file_ext":"py","file_size_in_byte":6454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"339284559","text":"import csv\nfrom gdrivesdk.gdriveclient import GDriveClient\n\n\nclass Delete(object):\n PYTHON_COURSE_FOLDER_NAME = 'python_course'\n\n def __init__(self, args):\n self._validate_arguments(args)\n self.year = args.year\n self.type = args.type\n self.data = args.data\n self.file = args.file\n self.all = args.all\n self.gdrive_client = GDriveClient(args)\n\n def _validate_arguments(self, args):\n if args.year is None:\n raise Exception('Year not passed')\n if args.type is None:\n raise Exception('Type of folder not passed')\n if not args.all:\n if args.file is None and args.data is None:\n raise Exception('Data or file is not passed to create shared folders')\n else:\n if args.file is not None or args.data is not None:\n raise Exception('Data or file option should not be provided as -a/--all is set')\n\n def execute(self):\n type_id = self._check_expected_folders_are_created_and_return_type_id()\n folder_info = self.gdrive_client.search_and_return_folder_info(\"'{0}' in parents\".format(type_id))\n\n if self.all:\n self._delete_all_folders(folder_info)\n else:\n self._delete_folders_based_on_entries(folder_info)\n\n def _check_expected_folders_are_created_and_return_type_id(self):\n python_course_folder_id = self._check_folder_is_created_and_return_id(Delete.PYTHON_COURSE_FOLDER_NAME, 'root')\n year_folder_id = self._check_folder_is_created_and_return_id(self.year, python_course_folder_id)\n return self._check_folder_is_created_and_return_id(self.type, year_folder_id)\n\n def _check_folder_is_created_and_return_id(self, folder_name, parent_folder_id):\n folder_id = self._get_folder_id_in_parent_folder(folder_name, parent_folder_id)\n if folder_id is None:\n raise Exception(\"Folder '{}' not found\".format(folder_name))\n return folder_id\n\n def _get_folder_id_in_parent_folder(self, folder_name, parent_folder_id):\n search_query = \"name = '{0}' and '{1}' in parents\".format(folder_name, parent_folder_id)\n folder_info = self.gdrive_client.search_and_return_folder_info(search_query)\n if len(folder_info) == 1:\n print('Found folder {0} which is either created or exists in trash'.format(folder_name))\n return folder_info[folder_name]\n elif len(folder_info) == 0:\n return None\n raise Exception(\"More than one {0} folders are found\".format(folder_name))\n\n def _delete_all_folders(self, folder_info):\n for folder_name,folder_id in folder_info.items():\n self.gdrive_client.delete_folder(folder_id)\n print(\"Deleted folder '{0}'\".format(folder_name))\n\n def _delete_folders_based_on_entries(self, folder_info):\n entries = self._get_entries()\n\n for entry in entries:\n folder_name = entry['uniqueId']\n if folder_name in folder_info:\n folder_id = folder_info[folder_name]\n self.gdrive_client.delete_folder(folder_id)\n print(\"Deleted folder '{0}'\".format(folder_name))\n else:\n print(\"Folder '{0}' not found. Skipping Delete\".format(folder_name))\n\n def _get_entries(self):\n if self.file:\n entries = self._get_student_data_from_file()\n else:\n split_data = self.data.split(',')\n entries = [{'uniqueId': split_data[0], 'emailId': split_data[1]}]\n return entries\n\n def _get_student_data_from_file(self):\n with open(self.file) as csv_file:\n reader = csv.DictReader(csv_file)\n entries = []\n for row in reader:\n entries.append(row)\n return entries","sub_path":"command/gdrive/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":3812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"16314015","text":"#!/usr/local/bin/python\n\nimport argparse\n\nfrom cloudtrail_and_config import get_bucket_policy\nfrom password_policy import set_pw_policy\nfrom utils import load_config\n\n\ndef main():\n config = load_config(\"./config.cfg\")\n conf_parser = argparse.ArgumentParser()\n conf_parser.add_argument(\"-i\", \"--account_id\", dest=\"account_id\", required=False, help=\"12 digit aws account id\")\n conf_parser.add_argument(\"-r\", \"--region\", dest=\"region\", default=\"eu-central-1\", required=False, help=\"region\")\n conf_parser.add_argument(\"-f\", \"--file\", dest=\"file\", required=False, help=\"input file\")\n conf_parser.add_argument(\"-t\", \"--trail\", action='store_true', help=\"configure cloudtrail\")\n args = conf_parser.parse_args()\n if args.file is None and args.account_id is None and args.trail is None:\n conf_parser.error(\"at least one of --account_id or --file required\")\n config = load_config(\"./config.cfg\")\n if args.file:\n accountlist = open(args.file, \"r\")\n for account_id in accountlist:\n set_pw_policy(config, str.strip(account_id), args.region)\n elif args.account_id:\n set_pw_policy(config, args.account_id, args.region)\n else:\n get_bucket_policy(\n config)\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"124766666","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2016 matsumotoyasuyuki\n#\n# Distributed under terms of the MIT license.\n\n\nfrom __future__ import print_function\nimport argparse\nimport sys\nimport os\n\nimport numpy as np\n# import six\nfrom PIL import Image\n# import pickle\n\nimport chainer\nfrom chainer import cuda\nfrom chainer import serializers\n# import chainer.functions as F\n# from chainer.links import caffe\nfrom tqdm import tqdm\n\nimport vgg_fc7\n\nparser = argparse.ArgumentParser(\n description='Predict a label for a image based on CaffeNet')\n# parser.add_argument('--dir', '-d', default=None,\n# help='Path to image directory for prediction')\nparser.add_argument('--val', '-v', default=None,\n help='path validation images directory')\n# parser.add_argument('--model', default='../caffemodel',\n# help='Path to the pretrained Caffe model')\nparser.add_argument('--mean', '-m', default='ilsvrc_2012_mean.npy',\n help='Path to the mean file')\nparser.add_argument('--gpu', '-g', type=int, default=-1,\n help='Zero-origin GPU ID (nevative value indicates CPU)')\nargs = parser.parse_args()\n\n# IID = 'n04311004'\nHOME = os.environ['HOME']\nVGG_WEIGHT_FILE = 'vgg16.h5'\n\n\n# for IID in ['n02691156', 'n02802426', 'n04311004', 'n08253141', 'n09792969',\nfor IID in ['n02802426', 'n04311004']:\n # IID = 'val'\n FEATURE_DIR = HOME + '/mnt/ILSVRC2012/feature_vgg/' + IID + '/'\n IMAGE_DIR = HOME + '/mnt/ILSVRC2012/train/' + IID + '/'\n # IMAGE_DIR = HOME + '/mnt/ILSVRC2012/misc/' + IID + '/'\n\n if args.gpu >= 0:\n cuda.check_cuda_available()\n xp = cuda.cupy if args.gpu >= 0 else np\n\n print('Loading VGG network model...', file=sys.stderr)\n func = vgg_fc7.Vgg16(train=False)\n serializers.load_hdf5(VGG_WEIGHT_FILE, func)\n print('Loaded', file=sys.stderr)\n\n if args.gpu >= 0:\n cuda.get_device(args.gpu).use()\n func.to_gpu()\n\n # in_size = 227\n in_size = 224\n mean_image = np.load(args.mean)\n\n def forward(x):\n # y, = func(inputs={'data': x}, outputs=['fc6'], train=False)\n y = func(x)\n # return F.softmax(y)\n return y\n\n cropwidth = 256 - in_size\n start = cropwidth // 2\n stop = start + in_size\n mean_image = mean_image[:, start:stop, start:stop].copy()\n\n # if args.dir:\n with open(HOME + '/mnt/ILSVRC2012/dropped/' + IID + '_vgg_dropped.txt', 'w') as ferr:\n images = os.listdir('{0}'.format(IMAGE_DIR))\n if not os.path.isdir(FEATURE_DIR):\n os.makedirs(FEATURE_DIR)\n for i in tqdm(images):\n try:\n image = np.asarray(Image.open(IMAGE_DIR + i).resize((in_size, in_size))).transpose(2, 0, 1)[::-1].astype(np.float32)\n image -= mean_image\n pred = forward(chainer.Variable(xp.asarray([image]), volatile=True))\n\n if args.gpu >= 0:\n pred = cuda.to_cpu(pred.data)\n else:\n pred = pred.data\n\n np.save(FEATURE_DIR + i.split('.')[0] + '.npy', pred)\n except ValueError:\n ferr.write(i.split('.')[0] + '\\n')\n\n if args.val:\n with open(HOME + '/mnt/imagenet/images/dropped/' + IID + '_dropped.txt', 'w') as ferr:\n images = os.listdir('{0}'.format(args.val))\n for i in tqdm(images):\n try:\n image = np.asarray(Image.open(args.val + i).resize((in_size, in_size))).transpose(2, 0, 1)[::-1].astype(np.float32)\n image -= mean_image\n pred = forward(chainer.Variable(xp.asarray([image]), volatile=True))\n\n if args.gpu >= 0:\n pred = cuda.to_cpu(pred.data)\n else:\n pred = pred.data\n\n np.save(FEATURE_DIR + i.split('.')[0] + '.npy', pred)\n except ValueError:\n ferr.write(i.split('.')[0] + '\\n')\n","sub_path":"net_feat/feature_extraction_imagenet_vgg.py","file_name":"feature_extraction_imagenet_vgg.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"417022523","text":"import urllib.request\nimport os\n\nfrom google_drive_downloader import GoogleDriveDownloader as gdd\n\ncwd = os.getcwd()\n\n\ndef _download_raw_dataset():\n gdd.download_file_from_google_drive(file_id='19ximkMm1UEkuZ2NUHohCzQpVLOZdzDFz',\n dest_path=os.path.join(cwd, \"data\\\\raw\\\\data.csv\"),\n unzip=False)\n\n return print(\"Raw dataset downloaded in data\\\\raw\" + \"\\n\")\n\n\ndef main():\n \"\"\" Download the datasets already prepared for this project.\n \"\"\"\n _download_raw_dataset()\n\nif __name__ == '__main__':\n main()","sub_path":"src/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"94658193","text":"# -*- coding: utf-8 -*-\nimport math\nimport os\nimport time\nfrom datetime import datetime\nimport sys\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtCore import QThread\nfrom PyQt5.QtWidgets import QFileDialog, QMessageBox, QApplication, QMainWindow, QWidget\n\nfrom drone import Drone\nfrom vehicleStatus import VehicleStatus\nfrom vehicleLocation import VehicleLocation\n\nfrom pyqtlet import L, MapWidget\n\nimport gi\nfrom gi.overrides import Gtk\n\ngi.require_version('Gst', '1.0')\ngi.require_version('GstVideo', '1.0')\nfrom gi.repository import Gst, GObject, GstVideo\n\nGObject.threads_init()\nGst.init(None)\n\nfrom mqttClient import MqttClient\nfrom plotCanvas import PlotCanvas\n\nfrom threadGUI import ThreadGUI\nfrom qfi import qfi_ADI, qfi_ALT, qfi_SI, qfi_HSI, qfi_VSI, qfi_TC\n\n\nclass Monitor(QMainWindow):\n def setupUi(self, MainWindow):\n self.drone = None\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1792, 1008)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())\n MainWindow.setSizePolicy(sizePolicy)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.gridLayout_1 = QtWidgets.QGridLayout(self.centralwidget)\n self.gridLayout_1.setObjectName(\"gridLayout_1\")\n\n # creat Simple Window\n self.container = QWidget(self)\n self.container.setFixedSize(854,480)\n self.container.setWindowTitle('Test1')\n\n # container.connect('destroy', self.quit)\n self.setCentralWidget(self.container)\n self.winId = self.container.winId()\n #self.resize(480, 320)\n\n # Create GStreamer pipeline\n self.videoPipeline()\n\n # Create bus to get events from GStreamer pipeline\n self.bus = self.pipeline.get_bus()\n self.bus.add_signal_watch()\n self.bus.enable_sync_message_emission()\n self.bus.connect('message::error', self.on_error)\n self.bus.connect('message::eos', self.on_eos)\n self.bus.connect('sync-message::element', self.on_sync_message)\n\n # uav detail ui\n self.gridLayout = QtWidgets.QGridLayout()\n\n self.adi = qfi_ADI.qfi_ADI(self)\n self.adi.resize(240, 240)\n self.adi.reinit()\n self.gridLayout.addWidget(self.adi, 0, 0)\n\n self.alt = qfi_ALT.qfi_ALT(self)\n self.alt.resize(240, 240)\n\n self.alt.reinit()\n self.gridLayout.addWidget(self.alt, 0, 1)\n\n self.hsi = qfi_HSI.qfi_HSI(self)\n self.hsi.resize(240, 240)\n self.hsi.reinit()\n self.gridLayout.addWidget(self.hsi, 0, 2)\n\n self.si = qfi_SI.qfi_SI(self)\n self.si.resize(240, 240)\n self.si.reinit()\n self.gridLayout.addWidget(self.si, 1, 0,1,2,alignment=QtCore.Qt.AlignCenter)\n\n self.vsi = qfi_VSI.qfi_VSI(self)\n self.vsi.resize(240, 240)\n self.vsi.reinit()\n self.gridLayout.addWidget(self.vsi, 1, 1,1,2,alignment=QtCore.Qt.AlignCenter)\n\n self.tc = qfi_TC.qfi_TC(self)\n self.tc.resize(240, 240)\n self.tc.reinit()\n\n self.setLayout(self.gridLayout)\n\n # end uav detail ui\n\n\n self.gridLayout_1.addWidget(self.container,0,0)\n self.gridLayout_1.addLayout(self.gridLayout,0,1)\n\n # Map\n self.mapWidget = MapWidget()\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n sizePolicy.setHorizontalStretch(1)\n sizePolicy.setVerticalStretch(1)\n sizePolicy.setHeightForWidth(self.mapWidget.sizePolicy().hasHeightForWidth())\n self.mapWidget.setSizePolicy(sizePolicy)\n self.gridLayout_1.addWidget(self.mapWidget,1,0)\n self.map = L.map(self.mapWidget)\n\n self.map.setView([22.305711, 114.253426], 20)\n L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png').addTo(self.map)\n\n self.marker = L.marker([22.305711, 114.253426])\n self.marker.bindPopup('No connection')\n self.map.addLayer(self.marker)\n self.count = 0\n\n #build scrollArea which is placed the graphs\n self.scrollArea = QtWidgets.QScrollArea(self.centralwidget)\n self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)\n self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.scrollArea.setWidgetResizable(True)\n self.scrollArea.setObjectName(\"scrollArea\")\n self.scrollAreaWidgetContents = QtWidgets.QWidget()\n self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 849, 1024))\n self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\")\n self.verticalLayout_graphs = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)\n self.verticalLayout_graphs.setObjectName(\"verticalLayout_graphs\")\n\n # draw graphs\n # 1 PM2.5\n self.data_pm25 = []\n self.data_pm25_time = []\n self.data_pm25_collectTime = []\n self.canvas_pm25 = PlotCanvas(self, width=1, height=4)\n self.canvas_pm25.init_plot(\"PM2.5\", \"µg/m³\", \"Time(s)\")\n self.canvas_pm25.setMinimumSize(self.canvas_pm25.size())\n self.verticalLayout_graphs.addWidget(self.canvas_pm25)\n # 2 PM10\n self.data_pm10 = []\n self.data_pm10_time = []\n self.data_pm10_collectTime = []\n self.canvas_pm10 = PlotCanvas(self, width=1, height=4)\n self.canvas_pm10.init_plot(\"PM10\", \"µg/m³\", \"Time(s)\")\n self.canvas_pm10.setMinimumSize(self.canvas_pm10.size())\n self.verticalLayout_graphs.addWidget(self.canvas_pm10)\n # 3 temperature\n self.data_temp = []\n self.data_temp_time = []\n self.data_temp_collectTime = []\n self.canvas_temp = PlotCanvas(self, width=1, height=4)\n self.canvas_temp.init_plot(\"Temperature\", \"Temperature(C)\", \"Time(s)\")\n self.canvas_temp.setMinimumSize(self.canvas_temp.size())\n self.verticalLayout_graphs.addWidget((self.canvas_temp))\n # 4 humidity\n self.data_hum = []\n self.data_hum_time = []\n self.data_hum_collectTime = []\n self.canvas_hum = PlotCanvas(self, width=1, height=4)\n self.canvas_hum.init_plot(\"Humidity\", \"Humidity(%)\", \"Time(s)\")\n self.canvas_hum.setMinimumSize(self.canvas_hum.size())\n self.verticalLayout_graphs.addWidget((self.canvas_hum))\n\n self.scrollArea.setWidget(self.scrollAreaWidgetContents)\n self.gridLayout_1.addWidget(self.scrollArea,1,1)\n\n # Menu\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 1792, 22))\n self.menubar.setObjectName(\"menubar\")\n self.menuMission = QtWidgets.QMenu(self.menubar)\n self.menuMission.setObjectName(\"menuFile\")\n self.menuConnection = QtWidgets.QMenu(self.menubar)\n self.menuConnection.setObjectName(\"menuConnection\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n # new mission\n self.actionNew_Mission = QtWidgets.QAction(MainWindow)\n self.actionNew_Mission.setObjectName(\"actionNew_Mission\")\n self.actionNew_Mission.triggered.connect(self.newMission)\n self.actionView_Mission = QtWidgets.QAction(MainWindow)\n self.actionView_Mission.setObjectName(\"actionView_Mission\")\n\n # save as\n self.actionSave = QtWidgets.QAction(MainWindow)\n self.actionSave.setObjectName(\"actionSave\")\n self.actionSave.triggered.connect(self.saveAs)\n\n # quit\n self.actionClose = QtWidgets.QAction(MainWindow)\n self.actionClose.setObjectName(\"actionClose\")\n self.actionClose.triggered.connect(QApplication.quit)\n\n # connect\n self.actionConnect = QtWidgets.QAction(MainWindow)\n self.actionConnect.setObjectName(\"actionConnect\")\n self.actionConnect.triggered.connect(self.connect)\n\n #disconnect\n self.actionDisconnect = QtWidgets.QAction(MainWindow)\n self.actionDisconnect.setObjectName(\"actionDisconnect\")\n self.actionDisconnect.triggered.connect(self.disconnect)\n\n self.menuMission.addAction(self.actionNew_Mission)\n self.menuMission.addAction(self.actionSave)\n self.menuMission.addAction(self.actionClose)\n self.menuConnection.addAction(self.actionConnect)\n self.menuConnection.addAction(self.actionDisconnect)\n self.menubar.addAction(self.menuMission.menuAction())\n self.menubar.addAction(self.menuConnection.menuAction())\n\n # menu button setting\n self.actionConnect.setDisabled(False)\n self.actionDisconnect.setDisabled(True)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n self.menubar.setNativeMenuBar(False) # False for current window, True for parent window\n\n # update detail\n self.updateQFI_thread = VehicleStatus()\n self.updateQFI_thread.updateQFI.connect(self.updateQFI)\n self.updateMap_thread = VehicleLocation()\n self.updateMap_thread.updateMap.connect(self.updateMap)\n\n #build thread\n self.thread1 = QThread()\n self.thread2 = QThread()\n self.updateQFI_thread.moveToThread(self.thread1)\n self.updateMap_thread.moveToThread(self.thread2)\n\n # start thread\n self.thread1.started.connect(self.updateQFI_thread.run)\n self.thread1.start()\n self.thread2.started.connect(self.updateMap_thread.run)\n self.thread2.start()\n\n MainWindow.setCentralWidget(self.centralwidget)\n\n\n @QtCore.pyqtSlot(int)\n def on_stateChanged(self, state):\n if state == MqttClient.Connected:\n print(state)\n self.client.subscribe([(\"/IoTSensor/DHT22\",0),(\"/IoTSensor/SDS011\",1)])\n\n @QtCore.pyqtSlot(str)\n def on_messageSignal(self, msg):\n val = msg\n print(val)\n type = val.split(\" \")[1].split(\"=\")[0]\n if type == \"Temperature\":\n val = val.replace(\"Time=\", \"\")\n val = val.replace(\"Temperature=\", \"\")\n val = val.replace(\"Humidity=\", \"\")\n val = val.split(\" \")\n sTime = val[0] #String format\n dTime = datetime.strptime(sTime, '%H:%M:%S') #Time format\n temp = val[1].replace(\"C\", \"\")\n hum = val[2].replace(\"%\", \"\")\n self.storeData(self.data_temp,temp,self.data_temp_time,self.data_temp_collectTime,sTime,dTime)\n self.storeData(self.data_hum,hum, self.data_hum_time, self.data_hum_collectTime, sTime, dTime)\n elif type == \"PM25\":\n val = val.replace(\"Time=\", \"\")\n val = val.replace(\"PM25=\", \"\")\n val = val.replace(\"PM10=\", \"\")\n val = val.split(\" \")\n sTime = val[0] #String format\n dTime = datetime.strptime(sTime, '%H:%M:%S') #Time format\n pm25 = val[1]\n pm10 = val[2]\n self.storeData(self.data_pm25,pm25,self.data_pm25_time,self.data_pm25_collectTime,sTime,dTime)\n self.storeData(self.data_pm10,pm10,self.data_pm10_time,self.data_pm10_collectTime,sTime,dTime)\n self.draw()\n\n\n def storeData(self, target, data,target_time,target_collectTime, sTime,dTime):\n print(data)\n target.append(float(data))\n\n if (len(target_time) != 0):\n lTime = datetime.strptime(target_collectTime[-1], '%H:%M:%S') #lastest collect time\n timeDiff = dTime-lTime\n target_time.append(target_time[-1] + int(str(timeDiff.seconds)))\n target_collectTime.append(sTime)\n else:\n print('first')\n target_time.append(0)\n target_collectTime.append(sTime)\n\n def draw(self):\n # print(\"draw\")\n self.canvas_pm25.update_figure(self.data_pm25_time,self.data_pm25)\n self.canvas_pm10.update_figure(self.data_pm10_time, self.data_pm10)\n self.canvas_temp.update_figure(self.data_temp_time, self.data_temp)\n self.canvas_hum.update_figure(self.data_hum_time, self.data_hum)\n\n def saveAs(self):\n path = QFileDialog.getExistingDirectory(self, 'Choose Directory')\n directory = time.strftime('%d-%m-%Y') + ' ' + time.strftime('%H-%M-%S')\n\n if path != \"\":\n path = path + \"/\" + directory\n os.mkdir(path)\n record = open(path + \"/\" + 'raw_data.txt', 'a+')\n output_temp = \"\"\n output_hum = \"\"\n output_pm25 = \"\"\n output_pm10 = \"\"\n output_temp_collectTime = \"\"\n output_hum_collectTime = \"\"\n output_pm25_collectTime = \"\"\n output_pm10_collectTime = \"\"\n\n if len(self.data_temp) != 0:\n output_temp = [\"%.1f\" % number for number in self.data_temp]\n output_temp = ','.join(output_temp)\n output_temp_collectTime = ','.join(self.data_temp_collectTime)\n if len(self.data_hum) != 0:\n output_hum = [\"%.1f\" % number for number in self.data_hum]\n output_hum = ','.join(output_hum)\n output_hum_collectTime = ','.join(self.data_hum_collectTime)\n if len(self.data_pm25) != 0:\n output_pm25 = [\"%.1f\" % number for number in self.data_pm25]\n output_pm25 = ','.join(output_pm25)\n output_pm25_collectTime = ','.join(self.data_pm25_collectTime)\n if len(self.data_pm10) != 0:\n output_pm10 = [\"%.1f\" % number for number in self.data_pm10]\n output_pm10 = ','.join(output_pm10)\n output_pm10_collectTime = ','.join(self.data_pm10_collectTime)\n\n\n output = \"{\\n\" \\\n \"\\\"Temperature\\\":{\" \\\n \"\\n\\t\\\"Data\\\":[\" + output_temp + \"],\\n\\t\\\"Unit\\\":C,\" \\\n \"\\n\\t\\\"CollectedTime\\\":[\"+output_temp_collectTime+\"]\\n\\t}\"\\\n \"\\n\\\"Humidity\\\":{\"\\\n \"\\n\\t\\\"Data\\\":[\" + output_hum + \"],\\n\\t\\\"Unit\\\":%,\"\\\n \"\\n\\t\\\"CollectedTime\\\":[\"+output_hum_collectTime+\"]\\n\\t}\" \\\n \"\\n\\\"PM2.5\\\":{\" \\\n \"\\n\\t\\\"Data\\\":[\" + output_pm25 + \"],\\n\\t\\\"Unit\\\":µg/m³,\" \\\n \"\\n\\t\\\"CollectedTime\\\":[\" + output_pm25_collectTime + \"]\\n\\t}\" \\\n \"\\n\\\"PM10\\\":{\" \\\n \"\\n\\t\\\"Data\\\":[\" + output_pm10 + \"],\\n\\t\\\"Unit\\\":µg/m³,\" \\\n \"\\n\\t\\\"CollectedTime\\\":[\" + output_pm10_collectTime + \"]\\n\\t}\" \\\n \"\\n}\"\n record.write(output)\n self.canvas_temp.outputImage(path + \"/\" + \"Temperature\")\n self.canvas_hum.outputImage(path + \"/\" + \"Humidity\")\n self.canvas_pm25.outputImage(path + \"/\" + \"PM2.5\")\n self.canvas_pm10.outputImage(path + \"/\" + \"PM10\")\n\n def newMission(self):\n msgBox = QMessageBox()\n msgBox.setText(\"Do you want to create a new mission now? If yes, then the current mission will be closed.\")\n msgBox.setStandardButtons(QMessageBox.Cancel | QMessageBox.Ok)\n option = msgBox.exec_()\n if option == QMessageBox.Ok:\n # os.execl(sys.executable, 'python', __file__, *sys.argv[1:])\n os.execl(sys.executable, sys.executable, *sys.argv)\n # os.execl(sys.executable, '\"{}\"'.format(sys.executable), *sys.argv)\n\n def videoPipeline(self):\n self.pipeline = Gst.Pipeline()\n self.tcpsrc = Gst.ElementFactory.make('tcpclientsrc', 'tcpsrc')\n self.tcpsrc.set_property(\"host\", '192.168.12.1')\n self.tcpsrc.set_property(\"port\", 5000)\n\n self.gdepay = Gst.ElementFactory.make('gdpdepay', 'gdepay')\n self.rdepay = Gst.ElementFactory.make('rtph264depay', 'rdepay')\n self.avdec = Gst.ElementFactory.make('avdec_h264', 'avdec')\n self.vidconvert = Gst.ElementFactory.make('videoconvert', 'vidconvert')\n self.asink = Gst.ElementFactory.make('autovideosink', 'asink')\n self.asink.set_property('sync', False)\n\n self.pipeline.add(self.tcpsrc)\n self.pipeline.add(self.gdepay)\n self.pipeline.add(self.avdec)\n self.pipeline.add(self.rdepay)\n self.pipeline.add(self.vidconvert)\n self.pipeline.add(self.asink)\n\n self.tcpsrc.link(self.gdepay)\n self.gdepay.link(self.rdepay)\n self.rdepay.link(self.avdec)\n self.avdec.link(self.vidconvert)\n self.vidconvert.link(self.asink)\n\n def on_sync_message(self, bus, message):\n if message.get_structure().get_name() == 'prepare-window-handle':\n message.src.set_property('force-aspect-ratio', True)\n message.src.set_window_handle(self.winId)\n\n def quit(self, container):\n self.pipeline.set_state(Gst.State.NULL)\n Gtk.main_quit()\n\n def on_eos(self, bus, msg):\n print('on_eos(): seeking to start of video')\n self.pipeline.seek_simple(\n Gst.Format.TIME,\n Gst.SeekFlags.FLUSH | Gst.SeekFlags.KEY_UNIT,\n 0\n )\n\n def on_error(self, bus, msg):\n print('on_error():', msg.parse_error())\n\n def start(self):\n self.pipeline.set_state(Gst.State.PLAYING)\n\n def updateQFI(self, detail):\n if(detail[\"airspeed\"] != \"\"):\n self.adi.setRoll(math.degrees(detail[\"attitude_roll\"]))\n self.adi.setPitch(math.degrees(detail[\"attitude_pitch\"]))\n self.alt.setAltitude(detail[\"altitude\"])\n self.si.setSpeed(detail[\"airspeed\"])\n self.hsi.setHeading(detail[\"heading\"])\n self.vsi.setClimbRate(detail[\"verticalSpeed\"])\n\n self.adi.viewUpdate.emit()\n self.alt.viewUpdate.emit()\n self.si.viewUpdate.emit()\n self.hsi.viewUpdate.emit()\n self.vsi.viewUpdate.emit()\n\n def updateMap(self, detail):\n\n # Working with the maps with pyqtlet\n if (detail[\"location_lat\"] != \"\"):\n self.map.removeLayer(self.marker)\n if self.count == 0:\n self.map.setView([detail[\"location_lat\"], detail[\"location_lon\"]], 20)\n self.count += 1\n if self.count % 10 == 0:\n self.map.setView([detail[\"location_lat\"], detail[\"location_lon\"]], 20)\n self.marker = L.marker([detail[\"location_lat\"], detail[\"location_lon\"]])\n self.marker.bindPopup('UAV Here')\n self.map.addLayer(self.marker)\n\n\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Automated Data Collecting System\"))\n self.menuMission.setTitle(_translate(\"MainWindow\", \"Mission\"))\n self.menuConnection.setTitle(_translate(\"MainWindow\", \"Connection\"))\n self.actionNew_Mission.setText(_translate(\"MainWindow\", \"New Mission\"))\n self.actionView_Mission.setText(_translate(\"MainWindow\", \"View Mission\"))\n self.actionSave.setText(_translate(\"MainWindow\", \"Save As\"))\n self.actionClose.setText(_translate(\"MainWindow\", \"Quit\"))\n self.actionConnect.setText(_translate(\"MainWindow\", \"Connect\"))\n self.actionDisconnect.setText(_translate(\"MainWindow\", \"Disconnect\"))\n\n def connect(self):\n self.drone = Drone('udp:0.0.0.0:14550')\n if self.drone.isconnect == True:\n self.vehicle = self.drone.getDrone()\n self.actionConnect.setDisabled(True)\n self.actionDisconnect.setDisabled(False)\n\n #MQtt start\n self.client = MqttClient(self)\n self.client.stateChanged.connect(self.on_stateChanged)\n self.client.messageSignal.connect(self.on_messageSignal)\n self.client.hostname = \"192.168.12.1\"\n self.client.connectToHost()\n\n #start Map and QFI\n self.updateMap_thread.setVehicle(self.vehicle)\n self.updateQFI_thread.setVehicle(self.vehicle)\n self.start()\n\n # Animation thread\n t2 = ThreadGUI(self.gridLayout)\n t2.daemon = True\n t2.start()\n else:\n self.disconnect()\n\n\n def disconnect(self):\n self.drone.disconnectDrone()\n self.actionConnect.setDisabled(False)\n self.actionDisconnect.setDisabled(True)\n self.updateMap_thread.exist = False\n self.updateQFI_thread.exist = False\n self.quit(self.container)\n\n self.adi.setRoll(0)\n self.adi.setPitch(0)\n self.alt.setAltitude(0)\n self.si.setSpeed(0)\n self.hsi.setHeading(0)\n self.vsi.setClimbRate(0)\n\n self.adi.viewUpdate.emit()\n self.alt.viewUpdate.emit()\n self.si.viewUpdate.emit()\n self.hsi.viewUpdate.emit()\n self.vsi.viewUpdate.emit()\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n MainWindow = QMainWindow()\n ui = Monitor()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","sub_path":"Monitor/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":21190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"634448316","text":"#Contando impares e pares em uma lista\r\nnumeros = []\r\npar = []\r\nimpar = []\r\nfor x in range (10):\r\n numeros.append(int(input(f\"Digite o {x + 1}° número: \")))\r\nfor x in numeros:\r\n if x % 2 == 0:\r\n par.append(x)\r\n else:\r\n impar.append(x)\r\nprint(f\"A quantidade de números pares é igual a {len(par)} e a quantidade de ímpares é {len(impar)}\")","sub_path":"ferias014.py","file_name":"ferias014.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"491544332","text":"import sys\nimport queue\n\n\nclass Communication:\n def __init__(self):\n self.task = queue.Queue()\n self.result = queue.Queue()\n\n\ndef _list_swap(ls, i, j):\n cpy = ls[i]\n ls[i] = ls[j]\n ls[j] = cpy\n\n\ndef try_secure(func_type, func, args=(), f=None, in_child=False):\n eva_try_again = \"\\nevaluating failed and we will try again...\\n\"\n eva_return_direct = \"\\nevaluating failed many times and we return score 0.05 directly...\\n\"\n spl_try_again = \"\\nsample failed and we will try again...\\n\"\n spl_exit = \"\\nsample failed many times and we stop the program...\\n\"\n count = 0\n\n while True:\n try:\n result = func(*args)\n break\n except Exception as e:\n count += 1\n if func_type == \"eva\":\n print(e, eva_try_again)\n if f:\n f.write(\"\\n\"+str(e)+eva_try_again)\n if func_type == \"spl\":\n print(e, spl_try_again)\n if count > 10:\n if func_type == \"eva\":\n print(e, eva_return_direct)\n if f:\n f.write(\"\\n\"+str(e)+eva_return_direct)\n result = 0.05\n if func_type == \"spl\":\n print(e, spl_exit)\n if in_child:\n result = None\n else:\n sys.exit(0)\n break\n return result\n","sub_path":"NAS2.2/nas/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"191595339","text":"import asyncio\nimport base64\nimport datetime\nimport io\nimport math\n\nimport aiohttp\nimport discord\nfrom discord.ext import commands\nimport lxml.html\nimport motor.motor_asyncio as amotor\nfrom PIL import Image, ImageFont, ImageDraw\nfrom urllib.parse import urlparse, parse_qs\n\nimport chickensmoothie as cs\nfrom constants import Constants\n\nmongo_client = amotor.AsyncIOMotorClient(Constants.mongodb_uri)\ndatabase = mongo_client[Constants.database_name]\ncollection = database[Constants.other_collection_name]\n\n\nclass PoundPets(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.session = None\n self.generating_image = False\n self.all_rare_pets = 0\n self.parsed_pets = 0\n self.stage = 1\n\n @commands.group(aliases=['ppets', 'pound-pets'])\n @commands.guild_only()\n async def pound_pets(self, ctx):\n if ctx.invoked_subcommand is None:\n if Constants.image_exists:\n cursor = collection.find({'generated': True}) # Try to get document of generated image\n document = await cursor.to_list(length=1)\n document = document[0]\n base64_string = base64.b64decode(document['image_base64'])\n output_buffer = io.BytesIO(base64_string)\n await ctx.send(file=discord.File(fp=output_buffer, filename='poundpets.png'))\n else:\n await ctx.send('No image has been generated yet! Type `,ppets get` to start generating')\n\n @pound_pets.command(aliases=['generate'])\n @commands.guild_only()\n async def get(self, ctx):\n if not self.generating_image:\n pound_data = await cs.get_pound_string()\n if pound_data[0] == 'Lost and Found' or pound_data[0] == 'Pound & Lost and Found':\n await ctx.send('The next opening is not the Pound!')\n\n elif pound_data[0] == 'Pound' and 'Pound is currently open!' in pound_data[1]:\n await ctx.send('An image cannot be generated while the pound is still open!')\n\n elif Constants.image_exists:\n await ctx.send('An image has already been created! Use `,ppets` to display it!')\n\n else:\n await ctx.send('Generating image... Enter the command again to view the progress')\n self.generating_image = True\n headers = { # HTTP request headers\n 'User-Agent': 'CS Pound Discord Bot Agent ' + Constants.version, # Connecting User-Agent\n 'From': Constants.contact_email\n }\n login_url = 'https://www.chickensmoothie.com/Forum/ucp.php?mode=login'\n payload = {\n 'username': Constants.username,\n 'password': Constants.password,\n 'redirect': 'index.php',\n 'sid': '',\n 'login': 'Login'\n }\n\n self.session = aiohttp.ClientSession(headers=headers)\n await self.session.post(login_url, data=payload)\n\n pound_account = Constants.pound_pets_group\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.get(pound_account) as response:\n if response.status == 200:\n connection = await response.text()\n dom = lxml.html.fromstring(connection)\n dom.make_links_absolute('https://www.chickensmoothie.com')\n\n self.stage = 1\n last_page = dom.xpath('//div[@class=\"pages\"]')[0].xpath('a/@href')[-2]\n pet_count = int(parse_qs(urlparse(last_page).query)['pageStart'][0])\n all_pets = []\n\n for i in range(40):\n page_start = pet_count - (20 * i)\n url = pound_account + '&pageStart=' + str(page_start)\n print(f'Parsing {url}')\n async with self.session.get(url) as response: # POST the variables to the base php link\n if response.status == 200: # If received response is OK\n connection = await response.text() # Get text HTML of site\n await asyncio.sleep(0.5)\n dom = lxml.html.fromstring(connection) # Convert into DOM\n pets = dom.xpath('//dl[@class=\"pet\"]')\n all_pets.extend(pets)\n\n self.all_rare_pets = 0\n rare_plus_pets = []\n for pet in all_pets:\n image_url = pet.xpath('dt//img/@src')[0]\n rarity = pet.xpath('dd[last()]//img/@alt')[0]\n try:\n adoption_date = pet.xpath('dd/span/span/text()')[0]\n except IndexError:\n print(\"RAN INTO ERROR\")\n adoption_date = \"\"\n if rarity == 'Rare' or rarity == 'Very rare' or rarity == 'OMG so rare!':\n rare_plus_pets.append((image_url, rarity, adoption_date))\n self.all_rare_pets = len(rare_plus_pets)\n\n self.stage = 2\n self.parsed_pets = 0\n image_data = []\n async with aiohttp.ClientSession() as session: # Create an AIOHTTP session\n for (image, _, _) in rare_plus_pets:\n print(image)\n async with session.post(image, headers=headers) as response:\n if response.status == 200:\n content = await response.read()\n content = io.BytesIO(content)\n image_data.append(content)\n self.parsed_pets += 1\n await asyncio.sleep(0.5)\n\n self.stage = 3\n _, max_height = generate_image(1920, 1080, image_data, rare_plus_pets)\n canvas, _ = generate_image(1920, max_height, image_data, rare_plus_pets)\n\n output_buffer = io.BytesIO() # Convert the PIL output into bytes\n canvas.save(output_buffer, 'png') # Save the bytes as a PNG format\n base64_string = base64.b64encode(output_buffer.getvalue())\n expiration_date = datetime.datetime.now() + datetime.timedelta(hours=1, seconds=cs.get_pound_time(pound_data[1]))\n await collection.insert_one({'generated': True, 'image_base64': base64_string, 'expiration_date': expiration_date})\n self.generating_image = False\n self.stage = 0\n Constants.image_exists = True\n\n else: # The command is currently generating the image\n message = 'Another user already ran this command!\\nCurrent status: '\n if self.stage == 1:\n message += 'Collecting pets to check...'\n elif self.stage == 2:\n message += f'Checking pets... ({self.parsed_pets}/{self.all_rare_pets} pets checked)'\n elif self.stage == 3:\n message += 'Generating image...'\n await ctx.send(message)\n\n\ndef setup(bot):\n bot.add_cog(PoundPets(bot))\n bot.loop.create_task(image_expiration_check(bot))\n\n\ndef generate_image(width, height, image_data, rare_plus_pets):\n hex_colour = 'e0f6b2'\n rgb = [int(hex_colour[i:i + 2], 16) for i in (0, 2, 4)]\n rgb.append(255)\n rgba = tuple(rgb)\n\n pil_images = list(map(Image.open, image_data))\n max_width = width\n font = ImageFont.truetype('Verdana.ttf', 12) # Verdana font size 15\n canvas = Image.new('RGBA', (max_width, height), rgba)\n draw = ImageDraw.Draw(canvas) # Draw the image to PIL\n\n rare = Image.open('rarities/rare.png')\n very_rare = Image.open('rarities/veryrare.png')\n omg_so_rare = Image.open('rarities/omgsorare.png')\n\n current_width = 0\n current_max_height = 0\n y_offset = 0\n for i in pil_images:\n if current_width + i.width >= max_width: # If pasting an image will cause it to go off canvas\n current_width = 0\n y_offset += current_max_height + 31 + 15\n current_max_height = 0\n\n if i.height > current_max_height: # If pet is taller than current top height in row\n current_max_height = i.height\n\n if i.width < 106:\n paste_width = 106\n canvas.paste(i, (math.floor(current_width + ((106 - i.width) / 2)), y_offset))\n\n text_centre_offset_x = math.floor((106 - draw.textsize(rare_plus_pets[pil_images.index(i)][2], font=font)[0]) / 2)\n draw.text((current_width + text_centre_offset_x, i.height + y_offset), rare_plus_pets[pil_images.index(i)][2], fill=(0, 0, 0), font=font)\n pet_rarity = rare_plus_pets[pil_images.index(i)][1]\n if pet_rarity == 'Rare':\n canvas.paste(rare, (current_width, i.height + y_offset + 15), rare)\n elif pet_rarity == 'Very rare':\n canvas.paste(very_rare, (current_width, i.height + y_offset + 15), very_rare)\n elif pet_rarity == 'OMG so rare!':\n canvas.paste(omg_so_rare, (current_width, i.height + y_offset + 15), omg_so_rare)\n else:\n paste_width = i.width\n canvas.paste(i, (current_width, y_offset))\n\n pet_rarity = rare_plus_pets[pil_images.index(i)][1]\n pasting_width = math.floor((i.width - 106) / 2)\n text_centre_offset_x = math.floor((106 - draw.textsize(rare_plus_pets[pil_images.index(i)][2], font=font)[0]) / 2)\n draw.text((current_width + pasting_width + text_centre_offset_x, i.height + y_offset), rare_plus_pets[pil_images.index(i)][2], fill=(0, 0, 0), font=font)\n if pet_rarity == 'Rare':\n canvas.paste(rare, (current_width + pasting_width, i.height + y_offset + 15), rare)\n elif pet_rarity == 'Very rare':\n canvas.paste(very_rare, (current_width + pasting_width, i.height + y_offset + 15), very_rare)\n elif pet_rarity == 'OMG so rare!':\n canvas.paste(omg_so_rare, (current_width + pasting_width, i.height + y_offset + 15), omg_so_rare)\n\n current_width += paste_width\n total_height = y_offset + current_max_height + 31 + 15 + 30 + 50\n return canvas, total_height\n\n\nasync def image_expiration_check(bot):\n await bot.wait_until_ready() # Wait until bot has loaded before starting background task\n while not bot.is_closed(): # While bot is still running\n cursor = collection.find({'generated': True}) # Try to get document of generated image\n document_data = await cursor.to_list(length=1)\n\n if len(document_data) == 0:\n pass\n else:\n object_id = document_data[0]['_id']\n if document_data[0]['expiration_date'] < datetime.datetime.now():\n await collection.delete_one({'_id': object_id})\n Constants.image_exists = False\n else:\n Constants.image_exists = True\n\n await asyncio.sleep(3600) # Sleep for 1 hour\n","sub_path":"cogs/poundpets.py","file_name":"poundpets.py","file_ext":"py","file_size_in_byte":11150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"615028369","text":"def form_matrix():\n rows = int(input())\n matrix = [\n [int(el) for el in input().split()]\n for _ in range(rows)\n ]\n return matrix\n\ndef get_diagonals(matrix):\n first_diagonal = []\n second_diagonal = []\n for i in range(len(matrix)):\n first_diagonal.append(matrix[i][i])\n for j in range(len(matrix) - 1, -1, -1):\n second_diagonal.append(matrix[::-1][j][j])\n\n return [first_diagonal,second_diagonal]\n\ndef find_absolute_diff(diagonals):\n diff = abs(sum(diagonals[0]) - sum(diagonals[1]))\n return diff\n\n\n\nmatrix = form_matrix()\ndiagonals = get_diagonals(matrix)\ndiff = find_absolute_diff(diagonals)\nprint(diff)\n\n\n","sub_path":"advanced/multidimentional lists/Diagonal Difference.py","file_name":"Diagonal Difference.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"108358642","text":"def differentiate(equation, point):\n derivative = 0\n terms = []\n previous_delimiter_pos = 0\n\n for i in range(len(equation)):\n if (equation[i] == '-' or equation[i] == '+') and i != 0:\n terms.append(equation[previous_delimiter_pos:i])\n previous_delimiter_pos = i\n\n #grabs the last term\n if i == len(equation) - 1:\n terms.append(equation[previous_delimiter_pos:i] + equation[i])\n\n for term in terms:\n #setup variables\n coefficient = 0\n power = 0\n\n #terms with x are the only ones that impact the derivative\n x_pos = term.find('x')\n if x_pos != -1:\n\n #handle coefficient\n str_coefficient = term[:x_pos]\n if str_coefficient == '' or str_coefficient == '+':\n str_coefficient = '1'\n elif str_coefficient == '-':\n str_coefficient = '-1'\n\n coefficient = int(str_coefficient)\n\n #handle power\n carat_pos = term.find('^')\n if carat_pos != -1:\n str_power = term[carat_pos + 1:]\n else:\n str_power = '1'\n\n power = int(str_power)\n\n coefficient *= power\n power -= 1\n\n derivative = derivative + coefficient * point**power\n\n return derivative\n","sub_path":"4-kyu/differentiate-a-polynomial.py","file_name":"differentiate-a-polynomial.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"}