diff --git "a/3533.jsonl" "b/3533.jsonl" new file mode 100644--- /dev/null +++ "b/3533.jsonl" @@ -0,0 +1,664 @@ +{"seq_id":"619635048","text":"import scipy\nfrom glob import glob\nimport numpy as np\nimport sys\nimport open3d\nimport os\nimport json\nimport yaml\nimport cv2\nimport transforms3d as tf3d\nimport copy\nimport imgaug.augmenters as iaa\nimport multiprocessing\nimport tensorflow as tf\nimport time\n\nbop_renderer_path = '/home/stefan/bop_renderer/build'\nsys.path.append(bop_renderer_path)\n\nimport bop_renderer\n\n\nclass DataGenerator(tf.keras.utils.Sequence):\n\n def __init__(self, dataset_type, dataset_path, real_path, mesh_path, mesh_info, object_id, batch_size, img_res=(224, 224), is_testing=False):\n self.data_type = dataset_type\n self.img_res = img_res\n self.dataset_path = dataset_path\n self.real_path = [os.path.join(real_path, x) for x in os.listdir(real_path)]\n self.batch_size = batch_size\n self.is_testing = is_testing\n self.ply_path = mesh_path\n self.obj_id = int(object_id)\n\n # annotate\n self.train_info = os.path.join(self.dataset_path, 'annotations', 'instances_' + 'train' + '.json')\n self.val_info = os.path.join(self.dataset_path, 'annotations', 'instances_' + 'val' + '.json')\n # self.mesh_info = os.path.join(self.dataset_path, 'annotations', 'models_info' + '.yml')\n self.mesh_info = mesh_info\n with open(self.train_info, 'r') as js:\n data = json.load(js)\n image_ann = data[\"images\"]\n anno_ann = data[\"annotations\"]\n self.image_ids = []\n self.Anns = []\n\n # init renderer\n # < 11 ms;\n self.ren = bop_renderer.Renderer()\n self.ren.init(640, 480)\n self.ren.add_object(self.obj_id, self.ply_path)\n\n stream = open(self.mesh_info, 'r')\n for key, value in yaml.load(stream).items():\n # for key, value in yaml.load(open(self.mesh_info)).items():\n if int(key) == self.obj_id + 1:\n self.model_dia = value['diameter']\n\n for ann in anno_ann:\n y_mean = (ann['bbox'][0] + ann['bbox'][2] * 0.5)\n x_mean = (ann['bbox'][1] + ann['bbox'][3] * 0.5)\n max_side = np.max(ann['bbox'][2:])\n x_min = int(x_mean - max_side * 0.75)\n x_max = int(x_mean + max_side * 0.75)\n y_min = int(y_mean - max_side * 0.75)\n y_max = int(y_mean + max_side * 0.75)\n if ann['category_id'] != 2 or ann[\n 'feature_visibility'] < 0.5 or x_min < 0 or x_max > 639 or y_min < 0 or y_max > 479:\n continue\n else:\n self.Anns.append(ann)\n # for img_info in image_ann:\n # print(img_info)\n # if img_info['id'] == ann['id']:\n # self.image_ids.append(img_info['file_name'])\n # print(img_info['file_name'])\n template_name = '00000000000'\n id = str(ann['image_id'])\n # print(ann['id'])\n name = template_name[:-len(id)] + id + '.png'\n # print(name)\n self.image_ids.append(name)\n\n self.fx = image_ann[0][\"fx\"]\n self.fy = image_ann[0][\"fy\"]\n self.cx = image_ann[0][\"cx\"]\n self.cy = image_ann[0][\"cy\"]\n\n #self.image_idxs = range(len(self.image_ids))\n c = list(zip(self.Anns, self.image_ids))#, self.image_idxs))\n np.random.shuffle(c)\n self.Anns, self.image_ids = zip(*c)\n\n self.img_seq = iaa.Sequential([\n # blur\n iaa.SomeOf((0, 2), [\n iaa.GaussianBlur((0.0, 2.0)),\n iaa.AverageBlur(k=(3, 7)),\n iaa.MedianBlur(k=(3, 7)),\n iaa.BilateralBlur(d=(1, 7)),\n iaa.MotionBlur(k=(3, 7))\n ]),\n # color\n iaa.SomeOf((0, 2), [\n # iaa.WithColorspace(),\n iaa.AddToHueAndSaturation((-15, 15)),\n # iaa.ChangeColorspace(to_colorspace[], alpha=0.5),\n iaa.Grayscale(alpha=(0.0, 0.2))\n ]),\n # brightness\n iaa.OneOf([\n iaa.Sequential([\n iaa.Add((-10, 10), per_channel=0.5),\n iaa.Multiply((0.75, 1.25), per_channel=0.5)\n ]),\n iaa.Add((-10, 10), per_channel=0.5),\n iaa.Multiply((0.75, 1.25), per_channel=0.5),\n iaa.FrequencyNoiseAlpha(\n exponent=(-4, 0),\n first=iaa.Multiply((0.75, 1.25), per_channel=0.5),\n second=iaa.LinearContrast((0.7, 1.3), per_channel=0.5))\n ]),\n # contrast\n iaa.SomeOf((0, 2), [\n iaa.GammaContrast((0.75, 1.25), per_channel=0.5),\n iaa.SigmoidContrast(gain=(0, 10), cutoff=(0.25, 0.75), per_channel=0.5),\n iaa.LogContrast(gain=(0.75, 1), per_channel=0.5),\n iaa.LinearContrast(alpha=(0.7, 1.3), per_channel=0.5)\n ]),\n ], random_order=True)\n\n self.n_batches = int(np.floor(len(self.image_ids) / self.batch_size))\n self.on_epoch_end()\n\n def __len__(self):\n 'Denotes the number of batches per epoch'\n return self.n_batches\n\n def __getitem__(self, index):\n 'Generate one batch of data'\n # Generate indexes of the batch\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n list_IDs_temp = self.indexes[indexes]\n start_t = time.time()\n inputs, outputs = self.__data_generation(list_IDs_temp)\n print('time batch: ', time.time() - start_t)\n\n return inputs, outputs\n\n def on_epoch_end(self):\n 'Updates indexes after each epoch'\n self.indexes = np.arange(len(self.image_ids))\n\n np.random.shuffle(self.indexes)\n\n def render_img(self, extrinsics, obj_id):\n R = extrinsics[:3, :3]\n t = extrinsics[:3, 3] #* 0.001\n R_list = R.flatten().tolist()\n t_list = t.flatten().tolist()\n\n light_pose = [np.random.rand() * 2000.0 - 1000.0, np.random.rand() * 2000.0 - 1000.0, 0.0]\n # light_color = [np.random.rand() * 0.1 + 0.9, np.random.rand() * 0.1 + 0.9, np.random.rand() * 0.1 + 0.9]\n light_color = [1.0, 1.0, 1.0]\n light_ambient_weight = np.random.rand()\n light_diffuse_weight = 0.75 + np.random.rand() * 0.25\n light_spec_weight = 0.25 + np.random.rand() * 0.25\n light_spec_shine = np.random.rand() * 3.0\n\n # time negligible\n self.ren.set_light(light_pose, light_color, light_ambient_weight, light_diffuse_weight, light_spec_weight,\n light_spec_shine)\n\n # render + get < 23 ms i5-6600k\n self.ren.render_object(obj_id, R_list, t_list, self.fx, self.fy, self.cx, self.cy)\n rgb_img = self.ren.get_color_image(obj_id)\n\n return rgb_img\n\n def __data_sample(self, idx, current_path, annotation):\n img_path = os.path.join(self.dataset_path, 'images', self.data_type, current_path)\n img_path = img_path[:-4] + '_rgb.png'\n obsv_img = cv2.imread(img_path).astype(np.float)\n # real_img = cv2.imread(batch_real[idx]).astype(np.float)\n\n pad_val = 150\n obsv_img = obsv_img.astype(np.uint8)\n obsv_img = self.img_seq.augment_image(obsv_img)\n obsv_img_pad = np.pad(obsv_img, ((pad_val, pad_val), (pad_val, pad_val), (0, 0)), mode='edge')\n\n # annotate\n rand_pose = np.eye((4), dtype=np.float32)\n rand_pose[:3, :3] = tf3d.euler.euler2mat(np.random.normal(scale=np.pi * 0.15),\n np.random.normal(scale=np.pi * 0.15),\n np.random.normal(scale=np.pi * 0.15))\n rand_quat = tf3d.quaternions.mat2quat(rand_pose[:3, :3])\n rand_pose[:3, 3] = [np.random.normal(scale=10), np.random.normal(scale=10), np.random.normal(scale=10)]\n anno_pose = np.array(\n [rand_pose[0, 3], rand_pose[1, 3], rand_pose[2, 3], rand_quat[1], rand_quat[1], rand_quat[2],\n rand_quat[3]])\n # normalize annotation\n anno_pose[:3] = anno_pose[:3] * (1 / 10)\n anno_pose = np.repeat(anno_pose[np.newaxis, :], repeats=25, axis=0)\n anno_pose = np.reshape(anno_pose, (5, 5, 7))\n\n # render and sample crops\n true_pose = np.eye((4), dtype=np.float32)\n true_pose[:3, :3] = tf3d.quaternions.quat2mat(annotation['pose'][3:])\n true_pose[:3, 3] = annotation['pose'][:3]\n obsv_pose = np.eye((4), dtype=np.float32)\n obsv_pose[:3, :3] = np.matmul(true_pose[:3, :3], rand_pose[:3, :3])\n obsv_pose[:3, 3] = true_pose[:3, 3] + rand_pose[:3, 3]\n\n obsv_center_y = ((obsv_pose[0, 3] * self.fx) / obsv_pose[2, 3]) + self.cx\n obsv_center_x = ((obsv_pose[1, 3] * self.fy) / obsv_pose[2, 3]) + self.cy\n dia_pixX = ((self.model_dia * self.fx) / obsv_pose[2, 3])\n dia_pixY = ((self.model_dia * self.fy) / obsv_pose[2, 3])\n\n x_min = int(obsv_center_x - dia_pixX * 0.75)\n x_max = int(obsv_center_x + dia_pixX * 0.75)\n y_min = int(obsv_center_y - dia_pixY * 0.75)\n y_max = int(obsv_center_y + dia_pixY * 0.75)\n # print(x_min, y_min, x_max, y_max)\n\n img_rend_v = self.render_img(obsv_pose, self.obj_id)\n # img_rend_v = np.zeros((640,480, 3), dtype=np.uint8)\n img_rend = np.pad(img_rend_v, ((pad_val, pad_val), (pad_val, pad_val), (0, 0)), mode='constant')\n img_rend = img_rend[(x_min + pad_val):(x_max + pad_val), (y_min + pad_val):(y_max + pad_val), :]\n img_obsv = obsv_img_pad[(x_min + pad_val):(x_max + pad_val), (y_min + pad_val):(y_max + pad_val), :]\n # img_real = real_img[(x_min+pad_val):(x_max+pad_val), (y_min+pad_val):(y_max+pad_val), :]\n\n # print(x_min, y_min, x_max, y_max)\n # print(rand_pose)\n # print(img_rend.shape)\n # print(img_obsv.shape)\n # print(img_real.shape)\n # print(real_img.shape)\n # img_input = np.concatenate([img_obsv, img_rend], axis=1)\n # cv2.imwrite('/home/stefan/PADA_viz/img_input.png', img_input)\n\n # print(x_max, y_max)\n # if x_min < -100 or y_min < -100 or x_max > 740 or y_max > 580:\n # print(x_min, y_min, x_max, y_max)\n # print(rand_pose)\n # img_input = np.concatenate([img_obsv, img_rend], axis=1)\n # img_viz = np.where(img_rend > 0, img_rend, img_obsv)\n # cv2.imwrite('/home/stefan/PADA_viz/img_input.png', img_viz)\n\n img_rend = cv2.resize(img_rend, self.img_res)\n img_obsv = cv2.resize(img_obsv, self.img_res)\n # img_real = cv2.resize(img_real, self.img_res)\n\n return img_obsv, img_rend, anno_pose\n\n def __data_generation(self, list_IDs_temp):\n 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)\n # Initialization\n #batch = self.image_ids[list_IDs_temp]\n #annos = self.Anns[list_IDs_temp]\n batch = np.array(self.image_ids)[list_IDs_temp.astype(int)]\n annos = np.array(self.Anns)[list_IDs_temp.astype(int)]\n #batch_real = np.random.choice(self.real_path, self.batch_size)\n\n imgs_obsv, imgs_rend, imgs_real, poses = [], [], [], []\n for idx, current_path in enumerate(batch):\n\n # render < 25 ms, all < 40\n source_obs, source_ren, target = self.__data_sample(idx, current_path, annos[idx])\n imgs_obsv.append(source_obs)\n imgs_rend.append(source_ren)\n poses.append(target)\n\n imgs_obsv = np.array(imgs_obsv) / 127.5 - 1.\n imgs_rend = np.array(imgs_rend) / 127.5 - 1.\n # imgs_real = np.array(imgs_real) / 127.5 - 1.\n poses = np.array(poses)\n\n return [imgs_obsv, imgs_rend], poses\n\n def call(self, batch_size):\n 'Generate one batch of data'\n # Generate indexes of the batch\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n list_IDs_temp = self.indexes[indexes]\n\n inputs, outputs = self.__data_generation(list_IDs_temp)\n\n return inputs, outputs\n\n\n\n","sub_path":"data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":12092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"462273831","text":"import math\n\nresult=[]\nx=int(input())\ny=int(input())\nbound=int(input())\n\nfor k in range(2,bound+1):\n for i in range(0,math.ceil(math.log(k,x))):\n if math.log(k-math.pow(x,i),y)%1==0:\n result.append(k)\n break\nprint(result) \n \n \n \n ","sub_path":"Code/CodeRecords/2333/59018/275954.py","file_name":"275954.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"641007045","text":"# coding: utf-8\nimport logging\nfrom selenium.common.exceptions import NoSuchElementException\nfrom bgrsrc import justwait\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef delete(window):\n \n try:\n logger.debug('部屋消去開始')\n justwait.justWait(window, 3)\n window.find_element_by_css_selector('.btn-secondary').click()\n except NoSuchElementException:\n logger.warning('部屋消し失敗continue')\n else:\n logger.info('部屋消し準備ok')\n\n try:\n logger.debug('部屋消し確認します')\n justwait.justWait(window, 3)\n window.find_element_by_xpath('/html/body/div/div[1]/div/div/div[3]/button[2]').click()\n except NoSuchElementException:\n logger.warning('部屋消し確認失敗、部屋消しは手動で行ってください')\n else:\n logger.info('部屋を削除しました')\n","sub_path":"bgrsrc/deleteroom.py","file_name":"deleteroom.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"205972396","text":"import uuid\nfrom dataclasses import dataclass\nfrom random import randint\nfrom typing import List\n\nfrom dataclasses_json import dataclass_json\nfrom pydantic import BaseModel\n\n\nclass CommanderCheckIn(BaseModel):\n name: str\n\n\nclass AZ5(BaseModel):\n pressed: bool\n\n\n@dataclass_json\n@dataclass\nclass Commander:\n name: str\n uuid: str\n\n def __init__(self, name):\n self.name = name\n self.uuid = str(uuid.uuid5(uuid.NAMESPACE_DNS, name))\n self.control_rod_manipulation = 0\n self.fuel_rod_manipulation = 0\n\n\n@dataclass_json\nclass ReactorCore:\n __NOMINAL_POWER: int = 1000\n\n def __init__(self, uuid):\n __max_fuel_rods = 100 # originally in RBMK Reactor 1661\n __max_control_rods = 20 # originally in RBMK Reactor 211\n self.__uuid = uuid\n self.__power: int = randint(300, 500)\n self.__fuel_rods: List[str] = [\"fuel_rod\" for _ in range(__max_fuel_rods)]\n self.__control_rods: List[str] = [\"control_rod\" for _ in range(__max_control_rods)]\n # initial control rods removal\n for _ in range(randint(__max_control_rods // 4, __max_control_rods // 2)):\n self.__control_rods[randint(0, __max_control_rods - 1)] = \"\"\n # initial fuel rods removal\n for _ in range(randint(__max_fuel_rods // 5, __max_fuel_rods // 3)):\n self.__fuel_rods[randint(0, __max_fuel_rods - 1)] = \"\"\n self.__state = 'Operational'\n self.__description = \"Rieaktor Bolszoj Moszcznosti Kanalnyj - 1000 MW\"\n def __eq__(self, other):\n return self.__uuid == other.uuid\n\n @property\n def uuid(self):\n return self.__uuid\n\n @property\n def power(self):\n return self.__power\n\n @property\n def fuel_rods(self):\n return self.__fuel_rods\n\n @property\n def control_rods(self):\n return self.__control_rods\n\n @property\n def state(self):\n self.__calculate_state()\n return self.__state\n\n def __calculate_state(self):\n try:\n # rods_ratio = len(self.__fuel_rods) / len(list(filter(lambda x: x != \"\", self.__control_rods)))\n rods_ratio = len(list(filter(lambda x: x != \"\", self.__fuel_rods)))\\\n / len(list(filter(lambda x: x != \"\", self.__control_rods)))\n print(uuid, rods_ratio)\n if rods_ratio < 10:\n self.__state = \"Operational\"\n elif rods_ratio < 20:\n self.__state = \"Unstable\"\n elif rods_ratio < 40:\n self.__state = \"Heavily Xenon-135 poisoned!\"\n else:\n self.__state = \"BOOM!!!\"\n self.__power = 30_000\n self.__description = \"${flag_for_reactor_due_to_3000%_work_norm}\"\n except ZeroDivisionError:\n self.__state = \"BOOM!!!\"\n self.__power = 30_000\n self.__description = \"${flag_for_reactor_due_to_3000%_work_norm}\"\n\n def remove_control_rod_at(self, index: int) -> str:\n try:\n if self.__control_rods[index] == \"\":\n return f\"Control rod at {index} already removed!\"\n self.__control_rods[index] = \"\"\n self.__calculate_state()\n if self.__state == \"Operational\":\n self.__power += 75\n elif self.__state == \"Unstable\":\n self.__power += randint(0, 100)\n if self.__state == \"Heavily Xenon-135 poisoned!\":\n self.__power += randint(300, 500)\n return f\"Removing control rod at {index}!\"\n except IndexError:\n return f\"No such Control Rod with number >>{index}< str:\n try:\n if self.__control_rods[index] == \"control_rod\":\n return f\"Control rod at {index} already in place!\"\n self.__control_rods[index] = \"control_rod\"\n self.__calculate_state()\n if self.__state == \"Operational\":\n self.__power -= 30\n elif self.__state == \"Unstable\":\n self.__power -= randint(0, 50)\n if self.__state == \"Heavily Xenon-135 poisoned!\":\n self.__power += randint(300, 500)\n return f\"Adding control rod at {index}!\"\n except IndexError:\n return f\"No such Control Rod with number >>{index}< 1500:\n self.__power = 30000\n self.__state = \"BOOM!!!\"\n self.__description = \"${flag_for_reactor_due_to_3000%_work_norm}\"\n return self.__state\n\n def add_fuel_rod_at(self, index: int) -> str:\n try:\n if self.__fuel_rods[index] == \"fuel_rod\":\n return f\"Fuel rod at {index} already in place!\"\n self.__fuel_rods[index] = \"fuel_rod\"\n self.__calculate_state()\n if self.__state == \"Operational\":\n self.__power += randint(50, 100)\n elif self.__state == \"Unstable\":\n self.__power += randint(0, 100)\n if self.__state == \"Heavily Xenon-135 poisoned!\":\n self.__power += randint(300, 500)\n return f\"Adding fuel rod at {index}!\"\n except IndexError:\n return f\"No such Fuel Rod with number >>{index}< str:\n try:\n if self.__fuel_rods[index] == \"\":\n return f\"Fuel rod at {index} already in out!\"\n self.__fuel_rods[index] = \"\"\n self.__calculate_state()\n if self.__state == \"Operational\":\n self.__power -= 20\n elif self.__state == \"Unstable\":\n self.__power -= randint(0, 20)\n if self.__state == \"Heavily Xenon-135 poisoned!\":\n self.__power += randint(300, 500)\n return f\"Removing fuel rod at {index}!\"\n except IndexError:\n return f\"No such Fuel Rod with number >>{index}<= 0 and sum2[i] >= t):\n i -= 1\n\n for k in range(i, -1, -1):\n if (jj >= sum2i[k][0]): continue\n v = data[jj] + sum2[k]\n result.append((v, jj, sum2i[k][0], sum2i[k][1]))\n\n return result\n\nsample, target = [-2, 0, 1, 3], 2\nprint(\"{0}:{1}:{2}\".format(sample, target, sum3(sample, target)))\nsample, target = [1, 1, 1, 1], 2\nprint(\"{0}:{1}:{2}\".format(sample, target, sum3(sample, target)))\nsample, target = [1, 1, 1, 1], 4\nprint(\"{0}:{1}:{2}\".format(sample, target, sum3(sample, target)))\n\n","sub_path":"Leetcode/259.py","file_name":"259.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"647883748","text":"#Matthew Romero\r\n#This program simulates Thomas Schelling's model on segregation.\r\n\r\nfrom drawingpanel import * #for animation\r\nfrom random import * #for shuffling agents\r\n\r\nGRID_SIZE = 50 #agents per row/column\r\nAGENT_SIZE = 25 #pixel dimension of each agent\r\nAGENT_1_COLOR = \"red\" #first agent's color\r\nAGENT_2_COLOR = \"blue\" #second agent's color\r\nAGENT_1_CHANCE = 40 #probability for agent 1\r\nAGENT_2_CHANCE = 35 #probability for agent 2\r\nSPEED = 10 #animation speed in ms\r\nMIN_HAPPINESS = 70 #minimum percentage of like agents required\r\n\r\n'''\r\nmain first populates the grid, then determines the unsatisfied agents.\r\nIt will then move the unsatisfied agents around until all are satisfied.\r\n'''\r\n\r\ndef main():\r\n p = DrawingPanel(GRID_SIZE * AGENT_SIZE, GRID_SIZE * AGENT_SIZE)\r\n grid = [] #master list of all agents/empty spots\r\n populate_grid(grid)\r\n draw_grid(grid, p)\r\n unsatisfied = get_unsatisfied(grid) #list of all unsatisfied agents\r\n while (len(unsatisfied) != 0): #loops until all are satisfied\r\n move_agents(unsatisfied, grid)\r\n p.sleep(SPEED)\r\n draw_grid(grid, p)\r\n unsatisfied = get_unsatisfied(grid)\r\n\r\n#populate_grid inititally populates the master list (grid) with agents.\r\n\r\ndef populate_grid(grid):\r\n for i in range (0, GRID_SIZE): #for height\r\n for j in range (0, GRID_SIZE): #for width\r\n fill = 'white'\r\n agent = 0\r\n x = randint(1, 100)\r\n if (1 <= x <= AGENT_1_CHANCE):\r\n fill = AGENT_1_COLOR\r\n agent = 1\r\n elif (AGENT_1_CHANCE + 1 <= x <= AGENT_1_CHANCE + AGENT_2_CHANCE):\r\n fill = AGENT_2_COLOR\r\n agent = 2\r\n grid.append([AGENT_SIZE * i, AGENT_SIZE * j,\r\n AGENT_SIZE * (i + 1), AGENT_SIZE * (j + 1), agent])\r\n\r\n#draw_grid draws all agents and empty spots on the panel.\r\n\r\ndef draw_grid(grid, p):\r\n for agent in grid:\r\n fill = 'white'\r\n if (agent[-1] == 1):\r\n fill = AGENT_1_COLOR\r\n elif (agent[-1] == 2):\r\n fill = AGENT_2_COLOR\r\n p.canvas.create_rectangle(agent[0], agent[1], agent[2], agent[3], fill = fill)\r\n\r\n#get_unsatisfied loops over the entire grid and returns a list of unsatisfied agents.\r\n \r\ndef get_unsatisfied(grid):\r\n unsatisfied = []\r\n for agent in grid:\r\n if (agent[-1] != 0):\r\n result = get_result(agent, grid)\r\n if (result):\r\n unsatisfied.append(agent)\r\n return unsatisfied\r\n\r\n#get_result looks at an individual agent, and returns whether or not the agent is unsatisfied.\r\n\r\ndef get_result(agent, grid):\r\n match = 0 #count of like neighbors\r\n total = 0 #count of all neighbors (excluding empty spots)\r\n for neighbor in grid: \r\n if (neighbor[-1] != 0 and neighbor != agent and\r\n (agent[0] - AGENT_SIZE <= neighbor[0] <= agent[0] #checks x values around agent\r\n or agent[2] <= neighbor[2] <= agent[2] + AGENT_SIZE) and\r\n (agent[1] - AGENT_SIZE <= neighbor[1] <= agent[1] #checks y values around agent\r\n or agent[3] <= neighbor[3] <= agent[3] + AGENT_SIZE)):\r\n total += 1\r\n if (neighbor[-1] == agent[-1]):\r\n match += 1\r\n if (total == 0 or match / total * 100 < MIN_HAPPINESS): #checks if the agent is unsatisfied\r\n return True\r\n else:\r\n return False\r\n\r\n'''\r\nmove_agents moves all of the unsatisfied agents to a random empty spot at once.\r\nIt does so by simply swapping the agent \"value\" of the empty spot and the agent.\r\n'''\r\n\r\ndef move_agents(unsatisfied, grid):\r\n for agent in unsatisfied:\r\n empty = [] #temporary list of empty spots on the grid\r\n for i in grid:\r\n if (i[-1] == 0):\r\n empty.append(i)\r\n x = randint(0, len(empty) - 1) #picks a random empty spot on the grid\r\n index1 = grid.index(empty[x])\r\n index2 = grid.index(agent)\r\n grid[index1][-1] = agent[-1] #swaps the empty spot's value and the agent's value\r\n grid[index2][-1] = 0 #swaps the agent's value and the empty spot's value\r\n\r\nmain()\r\n","sub_path":"segregation_simulation.py","file_name":"segregation_simulation.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"232828044","text":"from q4_b_directedgraphclass import DAG\nfrom q4_c_randomDAGIter import createRandomDAGIter\nfrom queue import Queue\n# for that one reduce line lol\nfrom functools import reduce\nfrom operator import add\n\nclass TopSort:\n def __init__(self, graph):\n self.graph = graph\n\n def Kahns(self):\n # in degree is number of edges to node - have to count over whole adjacency structure\n all_adj_lists = self.graph.node_adj_dict.values()\n def in_deg(node):\n return reduce(add, [1 if node in adj else 0 for adj in all_adj_lists], 0)\n in_degree = {node:in_deg(node) for node in self.graph.nodes()}\n # queue all vertices with indegree 0\n in_deg_0 = [node for node in in_degree.keys() if in_degree[node] == 0]\n node_q = Queue()\n for node in in_deg_0:\n node_q.put(node)\n # now go through queue and queue adjacent when their indegree is 0\n ordered = []\n while not node_q.empty():\n cursor = node_q.get()\n ordered.append(cursor)\n for adj in self.graph.adjacent(cursor):\n in_degree[adj] -= 1\n if in_degree[adj] == 0:\n node_q.put(adj)\n # return ordered output of nodes in graph\n return ordered\n \n def mDFS(self):\n out = []\n visited = {n:False for n in self.graph.nodes()}\n stack = []\n def helper(node, visited, stack):\n visited[node] = True\n for adj in self.graph.adjacent(node):\n if not visited[adj]:\n helper(adj, visited, stack)\n # the 'modified' part\n stack.insert(0, node)\n # 'modified' part: do for all vertices\n for node in self.graph.nodes():\n if not visited[node]:\n helper(node, visited, stack)\n return stack\n\nif __name__ == '__main__':\n n = 10\n graph = createRandomDAGIter(n)\n print(graph, end=\"\\n\\n\")\n sorter = TopSort(graph)\n print(\"Topological sort by Kahn's: {}\".format(sorter.Kahns()))\n print(\"Topological sort by mDFS: {}\".format(sorter.mDFS()))","sub_path":"q4_de_topsort.py","file_name":"q4_de_topsort.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"125501006","text":"from telethon import TelegramClient,sync #need to keep sync in otherwise get await errors\nimport os\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport time\n\n\n\"\"\"\nNOTES:\n1. if get database locked error, that means havent closed session properly. Use client.log_out()\n2. Go create app in Telegram to get below values\n\"\"\"\napi_hash = \"\"\napi_id= \"\"\nphonenumber = \"\"\n\n\nclient = TelegramClient(phonenumber, api_id, api_hash)\n\nclient.connect()\nif not client.is_user_authorized():\n client.send_code_request(phonenumber)\n client.sign_in(phonenumber, input('Enter the code: '))\n\n\ndef chatstocsv(channel_username,filestodl=[]):\n if os.path.isfile(os.path.join(\"Telegramchats\",F\"{channel_username}_chats.csv\")):\n df1 = pd.read_csv(os.path.join(\"Telegramchats\",F\"{channel_username}_chats.csv\"))\n minID = df1.message_id.max() #get ID of last message scraped\n fileexists = True\n else:\n fileexists=False\n minID = 0\n chats = client.get_messages(channel_username, min_id=minID, max_id=0)\n\n message_id = []\n message = []\n sender = []\n reply_to = []\n time = []\n mediatype =[]\n medianame =[]\n filestodl=[]\n mediaids=[]\n if len(chats):\n print(F\" {str(len(chats))} new messages found!\")\n for chat in chats:\n message_id.append(chat.id)\n message.append(chat.message)\n sender.append(chat.from_id)\n reply_to.append(chat.reply_to_msg_id)\n time.append(chat.date)\n if chat.message:#check if any text in message\n if any(x in chat.message for x in filestodl):\n filestodl.append(chat)\n if chat.media:\n MEDIATYPE = next(iter(chat.media.__dict__))\n item = getattr(chat.media,MEDIATYPE)\n if MEDIATYPE == \"document\":\n #item = ast.literal_eval(item)\n name = [x for x in item.attributes if hasattr(x, \"file_name\")] #check attribs for one that has filename in it\n filename = name[0].file_name #then pull that one and get name\n else:\n filename = \"\"\n medianame.append(filename)\n mediaids.append(str(item.id))\n mediatype.append(MEDIATYPE)\n else:\n print (\" No new messages found\")\n\n data = {'message_id': message_id, 'message': message, 'sender_ID': sender, \"media_type\":mediatype,\"media_name\":medianame,\"media_id\":mediaids,'reply_to_msg_id': reply_to,\n 'time': time}\n df = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in data.items() ])) #lists are uneven lengths because not all items have media so do this\n df = df.sort_values(\"message_id\")\n if fileexists:\n df.to_csv(os.path.join(\"Telegramchats\",F\"{channel_username}_chats.csv\"),mode=\"a\",header=False,index=False)\n\n else:\n df.to_csv(os.path.join(\"Telegramchats\",F\"{channel_username}_chats.csv\"),index=False)\n\n return filestodl\n\ndef dltelegrammedia(chat):\n chat.download_media(\"TelegramFiles\")\n\ndef monitorTelegram(channels=[],downloadfiles=False,kwstofindinmessagestodownload=[]):\n if not os.path.exists(\"Telegramchats\"):\n os.makedirs(\"Telegramchats\")\n if not os.path.exists(\"Telegramfiles\"):\n os.makedirs(\"Telegramfiles\")\n\n for channel in channels:\n if downloadfiles:\n if not os.path.exists(os.path.join(\"TelegramFiles\", channel)):\n os.makedirs(os.path.join(\"TelegramFiles\", channel))\n print(F\"Starting scrape of {channel}\")\n try:\n filestodl = chatstocsv(channel,filestodl=kwstofindinmessagestodownload)\n if downloadfiles:\n for y in filestodl:\n print(F\"Downloading file from message: {str(y.id)}\")\n dltelegrammedia(y)\n except Exception as e:\n print(e)\n\ndef getkws():\n import telegramkws\n import importlib\n importlib.reload(telegramkws) #reload moduel so can add new groups/kws to kw file without having to stop script\n channels = telegramkws.channels\n filestodl = telegramkws.kwstofindinmessagestodownload\n return channels,filestodl\n\n\n\ndef main():\n count = 0\n while 1:\n count += 1\n channels,filestodl = getkws()\n monitorTelegram(channels=channels,downloadfiles=False,kwstofindinmessagestodownload=filestodl) #if you don't want to DL files set to false\n\n print(F\"Completed run number {str(count)} at {str(datetime.now()).split('.', 1)[0]}\")\n # below code runs script every hour\n dt = datetime.now() + timedelta(hours=1)\n # dt = dt.replace(minute=10)\n while datetime.now() < dt:\n time.sleep(1)\n\nif __name__ == '__main__':\n main()\n","sub_path":"telegramMonitor.py","file_name":"telegramMonitor.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"334438967","text":"# -*- coding: utf-8 -*-\nfrom model.contact import Contact\nimport random\n\n\ndef test_edit_contact(app, db, check_ui):\n if len(db.get_contact_list()) == 0:\n app.contact.create(Contact(firstname=\"Имён\", lastname=\"Фамильярный\"))\n old_contacts = db.get_contact_list()\n contact = Contact(firstname=\"modСтепан\", middlename=\"modДядя Степа\", lastname=\"modСтепанов\", nickname=\"modКаланча\", title=\"modБывший флотский старшина\", company=\"modЗастава Ильича\", address=\"modДом 8/1 у заставы Ильича\", homephone=\"mod+7001001001\", mobilephone=\"mod+7002002002\",\n workphone=\"mod+7003003003\", fax=\"mod+7004004004\", email_1=\"mode.email1@mail.ma\", email_2=\"mode.email2@mail.ma\", email_3=\"mode.email3@mail.ma\", homepage=\"modhttps://www.culture.ru/poems/45240/dyadya-stepa\", address_2=\"modaddress\", homephone2=\"modhome\", notes=\"modЗнают взрослые и дети, Весь читающий народ, Что, живя на белом свете, Дядя Стёпа не умрёт!\")\n contact.id = random.choice(old_contacts).id\n app.contact.edit_contact_by_id(contact, contact.id)\n new_contacts = db.get_contact_list()\n assert len(old_contacts) == len(new_contacts)\n if check_ui:\n assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)","sub_path":"test/test_edit_contact.py","file_name":"test_edit_contact.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"141330981","text":"#!/usr/local/bin/python3\n\"\"\"Sort srt file by timestamp and add delays.\"\"\"\n\nimport argparse\nimport re\nfrom eic_utils import colorful_str\n\n\ndef get_args():\n \"\"\"Setup arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"srt sort\")\n parser.add_argument(\"-f\", \"--file\", required=True, type=str)\n parser.add_argument(\"-d\", \"--delay\", default=0, type=float)\n parser.add_argument(\"-o\", \"--output\", type=str, default=None)\n parser.add_argument(\"--ignore\", action=\"store_true\", default=False)\n return parser.parse_args()\n\n\nclass Sentence(object):\n \"\"\"Setence in each timestap.\"\"\"\n\n def __init__(self, time_stamp, text):\n self.time_stamp = time_stamp\n self.text = text\n\n\nclass Srt(object):\n \"\"\"Srt content.\"\"\"\n\n def __init__(self, text, ignore):\n self.ignore = ignore\n self.id_re = re.compile(r\"^(\\d+)$\")\n self.time_stamp = re.compile(\n r\"^(\\d+):(\\d+):(\\d+),(\\d+) *--> *\" r\"(\\d+):(\\d+):(\\d+),(\\d+)$\"\n )\n self.items = self.split(text)\n\n def match(self, row):\n \"\"\"Determine row type and extracet content.\"\"\"\n match = self.id_re.match(row)\n if match is not None:\n return \"id\", int(match.groups()[0])\n\n match = self.time_stamp.match(row)\n\n if match is not None:\n\n def load_time(ts):\n ts = list(map(int, ts))\n return ts[0] * 3600 + ts[1] * 60 + ts[2] + ts[3] / 1000\n\n start_time, end_time = map(\n load_time, [match.groups()[:4], match.groups()[-4:]]\n )\n return \"time\", [start_time, end_time]\n\n if row.strip() == \"\":\n return None, None\n\n return \"text\", row.strip()\n\n def split(self, text):\n ignore = True\n \"\"\"Split text and build srt class.\"\"\"\n index = 0\n status = 0\n text += \"\\n\\n\"\n items = []\n for row_id, row in enumerate(text.split(\"\\n\")):\n t, match = self.match(row)\n if t is None:\n if status == 2:\n items.append(Sentence(time, text))\n status = 0\n continue\n\n if status == 0:\n assert ignore or (\n t == \"id\" and (index + 1 == match or self.ignore)\n ), colorful_str.err(\n \"row #{}, index not match, expected {} but found {}\".format(\n row_id + 1, index + 1, match\n )\n )\n index += 1\n status = 1\n elif status == 1:\n assert t == \"time\", colorful_str.err(\n \"row #{}, timestamp is missing.\".format(row_id + 1)\n )\n time = match\n status = 2\n text = []\n elif status == 2:\n assert t == \"text\", colorful_str.err(\n \"row #{}, text is missing.\".format(row_id + 1)\n )\n text.append(match)\n return items\n\n def sort(self, **kwargs):\n \"\"\"Inherit 'sort' from class <'list'>.\"\"\"\n self.items.sort(**kwargs)\n\n def dump(self, delay=0):\n \"\"\"Dump srt with time delay.\"\"\"\n\n def generate_time_stamp(time):\n return \"{:02d}:{:02d}:{:02d},{:03d}\".format(\n *list(\n map(\n int,\n [\n time // 3600,\n time // 60 % 60,\n time % 60,\n round(time * 1000 % 1000),\n ],\n )\n )\n )\n\n results = \"\"\n for index, sentence in enumerate(self.items):\n results += \"{}\\n{} --> {}\\n{}\\n\\n\".format(\n index + 1,\n generate_time_stamp(sentence.time_stamp[0] + delay),\n generate_time_stamp(sentence.time_stamp[1] + delay),\n \"\\n\".join(sentence.text),\n )\n return results\n\n\ndef test_same(src, dst):\n \"\"\"Test whether src srt and dst srt are the same.\"\"\"\n src = src.split(\"\\n\")\n dst = dst.split(\"\\n\")\n if len(src) != len(dst):\n print(colorful_str.err(\"row number: {} != {}\".format(len(src), len(dst))))\n for i in range(min(map(len, [src, dst]))):\n if src[i] != dst[i]:\n print(\n colorful_str.err(\n \"#{}\\n (#y){}(#)\\n (#b){}(#)\".format(i, src[i], dst[i])\n )\n )\n\n\ndef main():\n \"\"\"Main function.\"\"\"\n args = get_args()\n\n assert args.file.lower().endswith(\"srt\"), colorful_str.err(\n \"file must end with .srt\"\n )\n\n with open(args.file, \"r\", encoding=\"utf-8\") as f:\n src = f.read()\n # avoid effect of dark magic of file system\n src = src.replace(\"\\ufeff\", \"\")\n\n srt = Srt(src, args.ignore)\n test_same(src, srt.dump())\n\n srt.sort(key=lambda x: x.time_stamp[0])\n dst = srt.dump(delay=args.delay)\n\n if args.output is None:\n args.output = \"{}_{}.srt\".format(args.file[:-4], args.delay)\n\n with open(args.output, \"w\") as f:\n f.write(dst)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"fastrun/source/srt_sort.py","file_name":"srt_sort.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"350697858","text":"import os\nfrom datetime import datetime\nfrom matplotlib import pyplot\n\nimport asserts\nfrom data.handle import config\n\n\nTIME, X, Y, Z = 0, 2, 3, 4\nLABELS = {X: 'AccX', Y: 'AccY', Z: 'AccZ'}\n\nclass ImageConverter:\n def __init__(self, date: str, data: dict, base_dir: str = config.PNG_FILE_PATH):\n self.dev_id, self.summary, self.results = data.get('dev_id'), date, data.get('raw').get('results')[0]\n self.series = self.results['series'][0]\n self.values = self.series['values']\n self.base_dir = os.path.join(base_dir, date)\n asserts.assert_path_dir(self.base_dir)\n\n self.__extract()\n\n def __extract(self):\n '''\n x_axis_values are timestamp on graph\n y_axis_values are acc X, acc Y, acc Z\n '''\n self.x_axis_values = []\n self.y_axis_values = {X: [], Y: [], Z: []}\n\n for value in self.values:\n self.x_axis_values.append(datetime.fromtimestamp(value[TIME] / (10 ** 6)))\n for column in (X, Y, Z):\n self.y_axis_values[column].append(value[column])\n\n def __draw(self, axis: int):\n pyplot.rcParams[\"figure.figsize\"] = (10, 4)\n pyplot.rcParams['lines.linewidth'] = 0.5\n pyplot.rcParams['lines.color'] = 'r'\n pyplot.rcParams['axes.grid'] = True\n\n pyplot.plot(self.x_axis_values, self.y_axis_values[axis])\n pyplot.gcf().autofmt_xdate()\n pyplot.xlabel(\"Timestamp\")\n pyplot.ylabel(LABELS[axis])\n pyplot.title(f'{self.dev_id}_{self.summary}_{LABELS[axis]}')\n\n fig = pyplot.gcf()\n fig_filename = f'{self.dev_id}_{self.summary}_{LABELS[axis]}.png'\n fig_path = os.path.join(self.base_dir, fig_filename)\n fig.savefig(fig_path)\n print('create a figure: ', fig_path)\n\n pyplot.clf()\n\n return fig_path\n\n def get_graph_images(self, specific_axis: str = None):\n axes = [X, Y, Z]\n if specific_axis is not None:\n if specific_axis == 'X': axes = [X]\n elif specific_axis == 'Y': axes = [Y]\n elif specific_axis == 'Z': axes = [Z]\n else:\n # error handling\n raise Exception('incorrect arg')\n\n images = [self.__draw(axis) for axis in axes]\n\n return images","sub_path":"VisualizeServer/data/handle/image_converter.py","file_name":"image_converter.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"484810102","text":"# クイックソート\n# 計算量:O(n log n)〜O(n^2)\ndef sort(target_array):\n global cnt\n left = []\n right = []\n middle = []\n pivot_index = int(len(target_array) / 2)\n pivot_value = target_array[pivot_index]\n\n for element in target_array:\n if pivot_value > element:\n left.append(element)\n elif pivot_value < element:\n right.append(element)\n else:\n middle.append(element)\n cnt += 1\n if len(left) > 0:\n left = sort(left)\n if len(right) > 0:\n right = sort(right)\n\n return left + middle + right\n\nif __name__ == '__main__':\n import random\n # data = [6, 15, 4, 2, 8, 5, 11, 9, 7, 13, 1]\n # data = [8, 4, 3, 7, 6, 5, 2, 1]\n # 配列サンプルをランダムに生成(30までの範囲で、20個)\n data = random.sample(range(30), k=20)\n print(\"ソート前:\", data)\n # print(\"データ長:\", len(data))\n cnt = 0\n data = sort(data)\n print(\"ソート後:\", data)\n print(str(cnt) + \"回\")","sub_path":"05_数学関連/03_ソート/sort_quick.py","file_name":"sort_quick.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"497013766","text":"#\n# @lc app=leetcode.cn id=688 lang=python3\n#\n# [688] “马”在棋盘上的概率\n#\n# https://leetcode-cn.com/problems/knight-probability-in-chessboard/description/\n#\n# algorithms\n# Medium (50.35%)\n# Likes: 104\n# Dislikes: 0\n# Total Accepted: 6.1K\n# Total Submissions: 12.1K\n# Testcase Example: '3\\n2\\n0\\n0'\n#\n# 已知一个 NxN 的国际象棋棋盘,棋盘的行号和列号都是从 0 开始。即最左上角的格子记为 (0, 0),最右下角的记为 (N-1, N-1)。 \n# \n# 现有一个 “马”(也译作 “骑士”)位于 (r, c) ,并打算进行 K 次移动。 \n# \n# 如下图所示,国际象棋的 “马” 每一步先沿水平或垂直方向移动 2 个格子,然后向与之相垂直的方向再移动 1 个格子,共有 8\n# 个可选的位置。\n# \n# \n# \n# \n# \n# \n# \n# 现在 “马” 每一步都从可选的位置(包括棋盘外部的)中独立随机地选择一个进行移动,直到移动了 K 次或跳到了棋盘外面。\n# \n# 求移动结束后,“马” 仍留在棋盘上的概率。\n# \n# \n# \n# 示例:\n# \n# 输入: 3, 2, 0, 0\n# 输出: 0.0625\n# 解释: \n# 输入的数据依次为 N, K, r, c\n# 第 1 步时,有且只有 2 种走法令 “马” 可以留在棋盘上(跳到(1,2)或(2,1))。对于以上的两种情况,各自在第2步均有且只有2种走法令 “马”\n# 仍然留在棋盘上。\n# 所以 “马” 在结束后仍在棋盘上的概率为 0.0625。\n# \n# \n# \n# \n# 注意:\n# \n# \n# N 的取值范围为 [1, 25]\n# K 的取值范围为 [0, 100]\n# 开始时,“马” 总是位于棋盘上\n# \n# \n#\n\n# @lc code=start\nclass Solution:\n def knightProbability(self, N: int, K: int, r: int, c: int) -> float:\n if K == 0: return 1\n if N < 3: return 0\n\n move = [[2,1],[2,-1],[-2,1],[-2,-1],[1,2],[1,-2],[-1,2],[-1,-2]]\n\n '''\n # 递归超时,N=8 K=30就不行了\n def prob1step(x,y,K):\n if K == 0: return 1\n prob = 0\n for i, j in move:\n if 0 <= x+i < N and 0 <= y+j < N:\n prob += prob1step(x+i,y+j,K-1) * 0.125\n return prob\n \n return prob1step(r,c,K)\n '''\n # 自底向上,动态规划 dp[i][j][k] 在ij跳k步还在棋盘的概率 ~ 其他位置 跳k-1步还在棋盘上的概率\n dp1 = [[1] * N for _ in range(N)] #跳0步还在棋盘上的概率\n dp2 = [[0] * N for _ in range(N)]\n \n for _ in range(K-1): # K-1步,最后一步不用全部计算\n for i in range(N):\n for j in range(N):\n for x, y in move:\n if 0 <= x+i < N and 0 <= y+j < N:\n dp2[i][j] += dp1[x+i][y+j]/8\n dp1 = dp2.copy()\n dp2 = [[0] * N for _ in range(N)]\n \n # 最后一步\n res = 0\n for x, y in move:\n if 0 <= x+r < N and 0 <= y+c < N:\n res += dp1[x+r][y+c]/8\n return res\n#s = Solution()\n#s.knightProbability(3,3,0,0)\n# @lc code=end\n\n","sub_path":"688.马-在棋盘上的概率.py","file_name":"688.马-在棋盘上的概率.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"595269458","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 5 01:44:46 2019\n\n@author: thuanngo tungft\n\n\"\"\"\nfrom flask_restful import Resource\nfrom source.utilities.utils import Utils\n\nclass Segmentation(Resource):\n @classmethod\n def make_api(cls, request, es,_index_seg, _doc_seg):\n cls.request = request\n cls.es = es\n cls._index_seg = _index_seg\n cls._doc_seg = _doc_seg\n return cls\n# Search a segmentation by segID\n def get(self, segID):\n res = self.es.search(index=self._index_seg, body={ \"query\": { \"ids\" : { \"values\" : [segID] } } })\n res = Utils.getActive(res)\n if (len(res) > 0):\n return res, 200\n return \"Segmentation not found\", 404\n\n# Create a new seg, unique segID\n def post(self, segID):\n res = self.es.search(index=self._index_seg, body={ \"query\": { \"ids\" : { \"values\" : [segID] } } })\n res = Utils.getActive(res)\n if (len(res) > 0):\n return \"segID already exists\", 400\n inp = self.request.json\n inp['active'] = 1\n res = self.es.index(index=self._index_seg, doc_type=self._doc_seg, id = segID, body=inp)\n return res[\"result\"], 201\n\n# Update a existing seg\n def put(self, segID):\n res = self.es.search(index=self._index_seg, body={ \"query\": { \"ids\" : { \"values\" : [segID] } } })\n res = Utils.getActive(res)\n if (len(res) == 0):\n return \"segID not found\", 400\n inp = self.request.json\n inp['active'] = res[0]['_source']['active']\n res = self.es.index(index=self._index_seg, doc_type=self._doc_seg, id = segID, body=inp)\n return res[\"result\"], 202\n \n# Delete a existing seg\n def delete(self, segID):\n res = self.es.search(index=self._index_seg, body={ \"query\": { \"ids\" : { \"values\" : [segID] } } })\n res = Utils.getActive(res)\n if (len(res) == 0):\n return \"segID not found\", 400\n res[0]['_source']['active'] = 0\n res = self.es.index(index=self._index_seg, doc_type=self._doc_seg, id = segID, body=res[0]['_source'])\n return \"Deleted.\", 202\n\n# Get all segmentation\nclass SegAll(Resource):\n @classmethod\n def make_api(cls, es,_index_seg):\n cls.es = es\n cls._index_seg = _index_seg\n return cls\n def get(self):\n _query = {\"query\": { \"match_all\": { } } }\n res = self.es.search(index=self._index_seg, body=_query)\n res = Utils.getActive(res)\n if (len(res) > 0):\n return res, 200\n return \"segID not found\", 400\n \n#EXECUTE a seg - run a query in ES\nclass ExecuteSegmentation(Resource):\n @classmethod\n def make_api(cls, es,_index_seg,_index,_index_query):\n cls.es = es\n cls._index_seg = _index_seg\n cls._index = _index\n cls._index_query = _index_query\n def get(self, segID):\n res = self.es.search(index=self._index_seg, body={ \"query\": { \"ids\" : { \"values\" : [segID] } } })\n res = Utils.getActive(res)\n if (len(res) == 0):\n return \"segID not found\", 400\n queryID = res[0][\"_source\"][\"queryID\"]\n return Utils.exeQuery(queryID, self.es,self._index, self._index_query)\n","sub_path":"rule_engine_ver_0.1/source/model/segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"298131160","text":"# -*- coding: utf-8 -*-\n\n# @Author = 'CanYi.Lin'\n\n# @Date = 2019-02-15 13:56\n\n\n\n\nfrom hashlib import sha1\n\n\n\nsha1Obj = sha1()\nfile_path = '希腊圣托里尼岛风景5K壁纸的副本.jpg'\n\n\n\"\"\"危险代码实例\"\"\"\nwith open(file_path, 'rb') as f:\n sha1Obj.update(f.read())\n# or\nwith open(file_path, 'rb') as f:\n for line in f.readlines():\n print(line)\n\n\n\"\"\"\n这对方法在读取小文件时确实不会产生什么异常,但是一旦读取大文件,很容易会产生MemoryError,也就是内存溢出的问题。\n\nWhy Memory Error?\n\n我们首先来看看这两个方法:\n\n当默认参数size=-1时,read方法会读取直到EOF,当文件大小大于可用内存时,自然会发生内存溢出的错误。\n\nread方法\nread([size])方法从文件当前位置起读取size个字节,若无参数size,则表示读取至文件结束为止,它范围为字符串对象\n同样的,readlines会构造一个list。list而不是iter,所以所有的内容都会保存在内存之上,同样也会发生内存溢出的错误。\n\nreadlines方法\n该方法每次读出一行内容,所以,读取时占用内存小,比较适合大文件,该方法返回一个字符串对象\n\n\"\"\"\n\n\n\"\"\"正确用法\"\"\"\n'''如果是二进制文件推荐用如下这种写法,可以自己指定缓冲区有多少byte。显然缓冲区越大,读取速度越快。'''\nwith open(file_path, 'rb') as f:\n while True:\n buf = f.read(1024)\n if buf:\n sha1Obj.update(buf)\n else:\n break","sub_path":"Chapter-write/chapter01.py","file_name":"chapter01.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"139639752","text":"import tensorflow as tf\r\n\r\nfrom bot_code.models.atbas import nnatba\r\n\r\nCUDNN = \"cudnn\"\r\nBASIC = \"basic\"\r\n\r\n\r\nclass RNNAtba(nnatba.NNAtba):\r\n\r\n num_hidden_1 = 500 # 1st layer num features\r\n labels = None\r\n num_layers = 5\r\n hidden_size = 300\r\n rnn_mode = CUDNN\r\n num_steps = 10\r\n batch_size = 1\r\n keep_prob = 0.5\r\n init_scale = .99\r\n\r\n \"\"\"\"\r\n This will be the example model framework with the needed functions but none of the code inside them\r\n You can copy this to implement your own model\r\n \"\"\"\r\n def __init__(self, session, state_dim, num_actions, player_index=-1, action_handler=None, is_training=False,\r\n optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.1), summary_writer=None, summary_every=100,\r\n config_file=None):\r\n\r\n super().__init__(session, state_dim, num_actions, player_index, action_handler, is_training, optimizer,\r\n summary_writer, summary_every,config_file)\r\n\r\n def _create_model(self, model_input):\r\n self.create_weights()\r\n hidden_layers = self.input_encoder(model_input)\r\n hidden_layers = tf.expand_dims(hidden_layers, 0)\r\n output, last_state = self._build_rnn_graph(inputs=hidden_layers, config=None, is_training=self.is_training)\r\n with tf.variable_scope('RNN'):\r\n output_w = tf.get_variable('output_w', [self.hidden_size, output.get_shape()[1]])\r\n output_b = tf.get_variable('output_b', [output.get_shape()[1]])\r\n\r\n output = tf.reshape(output, [-1, self.hidden_size])\r\n output = tf.nn.xw_plus_b(output, output_w, output_b)\r\n\r\n logits = self.rnn_decoder(output)\r\n return self.action_handler.create_model_output(logits), logits\r\n\r\n def create_weights(self):\r\n self.weights = {\r\n 'inputW': tf.Variable(tf.random_normal([self.state_feature_dim, self.hidden_size]), name='inputW'),\r\n 'rnn_outputW': tf.Variable(tf.random_normal([self.hidden_size, self.num_hidden_1]), name='rnn_outputW'),\r\n 'out': tf.Variable(tf.random_normal([self.num_hidden_1, self.num_actions]), name='outputW'),\r\n }\r\n self.biases = {\r\n 'inputB': tf.Variable(tf.random_normal([self.hidden_size]), name='inputB'),\r\n 'rnn_outputB': tf.Variable(tf.random_normal([self.num_hidden_1]), name='rnn_outputB'),\r\n 'out': tf.Variable(tf.random_normal([self.num_actions]), name='outputB'),\r\n }\r\n\r\n def input_encoder(self, input):\r\n # Encoder Hidden layer with sigmoid activation #1\r\n inputs = tf.nn.relu(tf.add(tf.matmul(input, self.weights['inputW']), self.biases['inputB']), name='input_layer')\r\n return inputs\r\n\r\n def rnn_decoder(self, rnn_out):\r\n # Encoder Hidden layer with sigmoid activation #2\r\n hidden_layer_1 = tf.nn.relu(tf.add(tf.matmul(rnn_out, self.weights['rnn_outputW']), self.biases['rnn_outputB']), name='rnn_out')\r\n\r\n # Encoder Hidden layer with sigmoid activation #3\r\n return tf.nn.sigmoid(tf.add(tf.matmul(hidden_layer_1, self.weights['out']), self.biases['out']), name='logits')\r\n\r\n def _build_rnn_graph(self, inputs, config, is_training):\r\n if self.rnn_mode == CUDNN:\r\n return self._build_rnn_graph_cudnn(inputs, config, is_training)\r\n else:\r\n return self._build_rnn_graph_lstm(inputs, config, is_training)\r\n\r\n def _build_rnn_graph_cudnn(self, inputs, config, is_training):\r\n \"\"\"Build the inference graph using CUDNN cell.\"\"\"\r\n inputs = tf.transpose(inputs, [1, 0, 2])\r\n self._cell = tf.contrib.cudnn_rnn.CudnnLSTM(\r\n num_layers=self.num_layers,\r\n num_units=self.hidden_size,\r\n input_size=self.hidden_size,\r\n dropout=1 - self.keep_prob if is_training else 0)\r\n params_size_t = self._cell.params_size()\r\n self._rnn_params = tf.get_variable(\r\n \"lstm_params\",\r\n initializer=tf.random_uniform(\r\n [params_size_t], -self.init_scale, self.init_scale),\r\n validate_shape=False)\r\n c = tf.zeros([self.num_layers, self.batch_size, self.hidden_size],\r\n tf.float32)\r\n h = tf.zeros([self.num_layers, self.batch_size, self.hidden_size],\r\n tf.float32)\r\n self._initial_state = (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),)\r\n outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training)\r\n outputs = tf.transpose(outputs, [1, 0, 2])\r\n outputs = tf.reshape(outputs, [-1, self.hidden_size])\r\n return outputs, (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),)\r\n\r\n def _get_lstm_cell(self, config, is_training):\r\n if self.rnn_mode == BASIC:\r\n return tf.contrib.rnn.BasicLSTMCell(\r\n self.hidden_size, forget_bias=0.0, state_is_tuple=True,\r\n reuse=not is_training)\r\n if self.rnn_mode == BLOCK:\r\n return tf.contrib.rnn.LSTMBlockCell(\r\n self.hidden_size, forget_bias=0.0)\r\n raise ValueError(\"rnn_mode %s not supported\" % self.rnn_mode)\r\n\r\n def _build_rnn_graph_lstm(self, inputs, config, is_training):\r\n \"\"\"Build the inference graph using canonical LSTM cells.\"\"\"\r\n # Slightly better results can be obtained with forget gate biases\r\n # initialized to 1 but the hyperparameters of the model would need to be\r\n # different than reported in the paper.\r\n def make_cell():\r\n cell = self._get_lstm_cell(config, is_training)\r\n if is_training and self.keep_prob < 1:\r\n cell = tf.contrib.rnn.DropoutWrapper(\r\n cell, output_keep_prob=self.keep_prob)\r\n return cell\r\n\r\n cell = tf.contrib.rnn.MultiRNNCell(\r\n [make_cell() for _ in range(self.num_layers)], state_is_tuple=True)\r\n\r\n self._initial_state = cell.zero_state(self.batch_size, data_type())\r\n state = self._initial_state\r\n # Simplified version of tensorflow_models/tutorials/rnn/rnn.py's rnn().\r\n # This builds an unrolled LSTM for tutorial purposes only.\r\n # In general, use the rnn() or state_saving_rnn() from rnn.py.\r\n #\r\n # The alternative version of the code below is:\r\n #\r\n # inputs = tf.unstack(inputs, num=num_steps, axis=1)\r\n # outputs, state = tf.contrib.rnn.static_rnn(cell, inputs,\r\n # initial_state=self._initial_state)\r\n outputs = []\r\n with tf.variable_scope(\"RNN\"):\r\n for time_step in range(self.num_steps):\r\n if time_step > 0: tf.get_variable_scope().reuse_variables()\r\n (cell_output, state) = cell(inputs[:, time_step, :], state)\r\n outputs.append(cell_output)\r\n output = tf.reshape(tf.concat(outputs, 1), [-1, self.hidden_size])\r\n return output, state\r\n\r\n def get_model_name(self):\r\n return 'rnnatba' + ('_split' if self.action_handler.is_split_mode else '')\r\n","sub_path":"bot_code/models/atbas/rnn_atba.py","file_name":"rnn_atba.py","file_ext":"py","file_size_in_byte":7021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"639672253","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport argparse\nfrom datetime import datetime\ntry:\n import twitter_bot_utils as tbu\nexcept ImportError:\n import tweepy\n\n\ndef last_tweet(api, screen_name):\n '''Hours since last tweet. Returns float.'''\n try:\n created_at = api.user_timeline(screen_name, count=1)[0].created_at\n now = datetime.now()\n\n return (now - created_at).total_seconds() / 3600.\n except (TypeError, IndexError):\n return False\n\n\ndef notify(api, screen_name, elapsed=False):\n if elapsed:\n text = \"I'm not working. It's been {} hours since my last tweet. Fix me!\".format(int(elapsed))\n else:\n text = \"I'm not working. Fix me!\"\n\n api.send_direct_message(screen_name=screen_name, text=text)\n\n\ndef whatsup(api, screen_name, hours, recipient=None):\n elapsed = last_tweet(api, screen_name)\n\n if elapsed > hours:\n if recipient:\n notify(api, recipient, elapsed)\n else:\n print('no tweets from', screen_name, 'in the last', int(elapsed), 'hours')\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--screen_name', default=None, required=False, help='screen name to check')\n parser.add_argument('--hours', type=int, default=24,\n help=\"Gaps of this many hours are a problem (default: 24)\")\n parser.add_argument('--notify', dest='recipient', metavar='USER', type=str, default=None,\n help='user to notify when screen_name is down')\n\n try:\n tbu\n parser.add_argument('-c', '--config', dest='config_file', metavar='PATH', default=None, type=str,\n help='bots config file (json or yaml). All bots in the file will be checked.')\n except NameError:\n pass\n\n parser.add_argument('--key', type=str, help='access token')\n parser.add_argument('--secret', type=str, help='access token secret')\n parser.add_argument('--consumer-key', metavar='KEY', type=str, help='consumer key (aka consumer token)')\n parser.add_argument('--consumer-secret', metavar='SECRET', type=str, help='consumer secret')\n\n args = parser.parse_args()\n\n if getattr(args, 'config_file'):\n bots = tbu.confighelper.parse(args.config_file).get('users').keys()\n\n for bot in bots:\n api = tbu.API(screen_name=bot, config_file=args.config_file)\n\n if api.config.get('whatsupbot') is False:\n continue\n\n hours = api.config.get('whatsupbot', {}).get('hours', args.hours)\n whatsup(api, bot, hours, args.recipient)\n\n else:\n try:\n api = tbu.API(args)\n except NameError:\n auth = tweepy.OAuthHandler(args.consumer_key, args.consumer_secret) \n auth.set_access_token(args.key, args.secret)\n api = tweepy.API(auth)\n\n whatsup(api, args.screen_name, args.hours, args.recipient)\n\nif __name__ == '__main__':\n main()\n","sub_path":"whatsupbot.py","file_name":"whatsupbot.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"168811183","text":"\n\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef f(x,y): #diffrential equation function\n F= (y/x) - ((y/x)**2)\n return F\n\n\n\ndef Eulr_diff(y,xi,xf,h): #rk4 y is a np array/list containing initial value\n #xi initial x , xf final x, h step size\n \n #h = (xf-xi)/n\n x = xi\n n= int(np.floor((xf-xi)/h))#number of mesh points\n \n for i in range(0,n-1): \n x = x + h\n y.append(y[i] + h*f(x,y[i]))\n #print(y)\n \n xp = np.linspace(xi,xf,n)\n yp = xp/(1+ np.log(xp))\n\n plt.scatter(xp,y,c='k',label='Euler method') #marker='o'\n plt.plot(xp,yp,'b',label='Exact solution')\n plt.xlabel('t')\n plt.ylabel('y(t)')\n plt.legend()\n plt.grid()\n plt.show()\n \n err=abs(yp-y)\n rel=abs(((yp-y)/yp))\n plt.plot(xp,err)\n plt.title('Absolute error')\n plt.grid()\n plt.xlabel('t')\n plt.ylabel('abs error(t)')\n plt.show()\n plt.plot(xp,rel)\n plt.title('Relative error')\n plt.xlabel('t')\n plt.ylabel('rel error(t)')\n plt.grid()\n plt.show()\n \n print('Absolute Error:',err)\n print('Relative Error:',rel)\n \n \n \n \n return()\n\n\ny = [1]\nxi=1\nxf=2\nh=0.1\n\nEulr_diff(y,xi,xf,h)\n\n\n\n\n\n\n\n","sub_path":"q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"44594779","text":"from tkinter import * \nfrom tkinter import ttk \nfrom widgets.AppWidgets import *\nfrom AppPalette import *\n\nclass InstSettingsPage(Frame):\n def __init__(self, pages, instruments, midiMaster, *args, **kwargs):\n Frame.__init__(self, *args, **kwargs)\n self.config(bg=AppPalette.Blue)\n self.__instruments = instruments\n self.__instruments.onMidiInUpdate(self.midiInHandler)\n self.__instruments.onInstrumentUpdate(self.instrumentChangeHandler)\n\n self._sliders = {}\n self._sliderLabels = []\n self.__pages = pages\n\n self.__midiMaster = midiMaster\n #self.__midiMaster.onUpdate(self.midiInHandler)\n \n self.__canvasTop = Canvas(self, width=self['width'], height=50, bg=AppPalette.DarkBlue,highlightthickness=0) \n self.__canvasBottom = Canvas(self, width=self['width'], height=self['height']-50, bg=AppPalette.Blue,highlightthickness=0) \n\n self.__setupForNewInstrument(self.__instruments.currentInstrument)\n\n self.__populateTopCanvas(self.__canvasTop)\n self.__populateBottomCanvas(self.__canvasBottom)\n\n self.__canvasTop.pack(side=\"top\", expand=YES)\n self.__canvasBottom.pack(side=\"bottom\", expand=YES)#3fill=BOTH, expand=YES)\n\n def show(self):\n self.__updateAllSliders()\n #self.__populateBottomCanvas(self.__canvasBottom)\n self.lift()\n\n def midiInHandler(self, controlName, value):\n #print(f\"NextPage midiInHandler-{controlName}-{value}\")\n slider = self._sliders.get(controlName, None)\n\n if slider is not None:\n slider.setToValue(value)\n\n def instrumentChangeHandler(self, instrumentName):\n self.__setupForNewInstrument(instrumentName)\n self.__populateBottomCanvas(self.__canvasBottom)\n\n def __setupForNewInstrument(self, instrumentName):\n self.__sliderCount = len(self.__instruments.getCurentSettings())\n self.__slidersPos = 0\n\n def __btnCallback(self, event):\n self.__pages[\"mainPage\"].show()\n\n def __settingsShiftLeftCallback(self, event): \n newSlidersPos = self.__slidersPos + 2\n #5 is the number of sliders eveer on the screen\n if (newSlidersPos + 5) >= self.__sliderCount:\n newSlidersPos = self.__sliderCount - 5\n self.__moveSliders(newSlidersPos, -1)\n \n def __settingsShiftRightCallback(self, event): \n newSlidersPos = self.__slidersPos - 2\n #5 is the number of sliders eveer on the screen\n if newSlidersPos < 0:\n newSlidersPos = 0\n self.__moveSliders(newSlidersPos, 1)\n\n def __moveSliders(self, newSlidersPos, directionMultiplier):\n sliderSpacing = 75\n dif = -(newSlidersPos - self.__slidersPos) * sliderSpacing #* directionMultiplier \n #print(f\"Cur pos: {self.__slidersPos} New pos: {newSlidersPos} Dif: {dif}\")\n for slider in self._sliders.values():\n slider.moveHorizontally(dif)\n labelNames = list(self._sliders.keys())\n i=0\n for lbl in self._sliderLabels:\n lbl.config(text = labelNames[i+newSlidersPos])\n i = i+1\n #curXPos = int(lbl.winfo_x()+(lbl.winfo_width()/2))\n #lbl.place(x=curXPos+dif)\n self.__slidersPos = newSlidersPos\n\n def __populateTopCanvas(self, canvas):\n titleLbl = Label(canvas, text=\"SP1X\")\n titleLbl.config(fg=AppPalette.White, bg=AppPalette.DarkBlue, font='Helvetica 28 bold')\n titleLbl.place(x=275, y=25, anchor=\"center\")\n\n MenuBtn(canvas, 20, 10, 120, 30, \"Back\", self.__btnCallback)\n\n def __populateBottomCanvas(self, canvas):\n sliderStartXPos = 60\n sliderSpacing = 75\n\n #remove existing sliders and labels\n self.__canvasBottom.delete(\"all\")\n self._sliders.clear() \n for lbl in self._sliderLabels:\n lbl.destroy()\n self._sliderLabels.clear()\n\n #draw sliders\n i = 0\n print(\"slides for\")\n print(self.__instruments.getCurentSettings())\n for name, value in self.__instruments.getCurentSettings():\n print(f\"{name}-{value}\")\n if i < 5:\n lbl = LowerLabel(canvas, text=name)\n lbl.place(x = (i*sliderSpacing)+sliderStartXPos+25,y = 13, anchor=\"center\")\n self._sliderLabels.append(lbl)\n xPos = (i*sliderSpacing)+sliderStartXPos\n \n rangeData = self.__instruments.getControlRangeData(name)\n #print(f\"rangeData is {rangeData}\")\n slider = StandardMidiSliderControl(canvas, xPos, 26, rangeData[0], rangeData[1], float(value))\n slider.OnUpdate(self.__handleSliderChange)\n self._sliders[name] = slider \n i = i+1\n print(\"done\")\n canvas.create_rectangle(0, 0, 50, 300, fill=AppPalette.Blue, outline=\"\")\n canvas.create_rectangle(430, 0, 430+50, 300, fill=AppPalette.Blue, outline=\"\")\n MenuBtn(canvas, 10, 66, 30, 150, \"<\", self.__settingsShiftRightCallback)\n MenuBtn(canvas, 435, 66, 30, 150, \">\", self.__settingsShiftLeftCallback)\n \n \n def __updateAllSliders(self):\n i = 0\n for name, value in self.__instruments.getCurentSettings():\n if i > 4:\n break\n slider = self._sliders.get(name, None)\n\n if slider is not None:\n slider.setToValue(float(value))\n\n def __handleSliderChange(self, obj, event):\n for key, value in self._sliders.items():\n if value == obj:\n #print (f\"Found {key}, {event}\")\n self.__instruments.setInstrumentSetting(key, event)\n #self.__midiMaster.sendControlOutputForControlName(key, event)","sub_path":"gui/synthgui/pages/SettingsPage.py","file_name":"SettingsPage.py","file_ext":"py","file_size_in_byte":5749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"532459510","text":"from openerp import api\nfrom openerp.exceptions import ValidationError\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nfrom datetime import date, time\n\nclass patient_info(osv.osv):\n _name = \"patient.info\"\n # _rec_name = 'patient_id'\n\n def name_get(self, cr, uid, ids, context=None):\n if not ids:\n return []\n res = []\n for elmt in self.browse(cr, uid, ids, context=context):\n name = elmt.name\n try:\n name = name + ' ' + str(elmt.patient_id) if elmt.patient_id is not False else '---'\n res.append((elmt.id, name))\n except ValueError:\n pass\n return res\n\n # def name_search(self, name, args=None, operator='ilike', limit=100):\n # import pdb\n # pdb.set_trace()\n\n # args = args or []\n # recs = self.browse()\n # if not recs:\n # recs = self.search([('patient_id', operator, name)] + args, limit=limit)\n # return 1\n\n def _testname(self,cr,uid,ids,field_name, arg, context=None):\n result={}\n tes_id =[]\n abc=[]\n patient_id=self.browse(cr,uid,ids,context=None)\n for items in patient_id:\n abc.append(items.id)\n\n\n\n bill_id=self.pool.get('bill.register').search(cr,uid,[('patient_name', '=', abc)],context=None)\n test_history = self.pool.get('bill.register').browse(cr, uid, bill_id, context=None)\n xyz=[]\n for testname in test_history:\n for datas in testname.bill_register_line_id.name:\n xyz.append(datas)\n abcd = []\n for item in xyz:\n for items in self.browse(cr,uid,ids,context=None):\n # import pdb\n # pdb.set_trace()\n abcd.append(item.name)\n result[items.id]=abcd\n\n return result\n\n @api.multi\n @api.constrains('mobile')\n def _check_mobile(self):\n\n for rec in self:\n\n if rec.mobile and len(rec.mobile) != 11:\n\n raise ValidationError(_(\"Mobile Number Should be 11 digit\"))\n\n return True\n\n\n\n\n\n # for item in test_history:\n # abcd.append(item.name)\n\n # bill_obj=self.pool.get('bill.register').browse(self,uid,ids,context)\n # for item in bill_obj:\n # if item.testid:\n # tes_id.append(item.testid)\n # tes_obj=self.pool.get('abc.model').browse(self,uid,tes_id,context)\n # biil_history={}\n # for record in bill_obj.bill_register_line_id:\n # price=record.price\n # biil_history[record.id]=price\n # return biil_history\n\n\n\n _columns = {\n\n 'mobile': fields.char(\"Mobile No\"),\n 'patient_id': fields.char(\"Patient Id\", readonly=True),\n 'name':fields.char(\"Name\", required=True),\n 'age':fields.char('Age'),\n 'address':fields.char('Address',required=True),\n 'sex': fields.selection([('male', 'Male'), ('female', 'Female'),('others','Others')], string='Sex', default='male'),\n 'bills':fields.one2many('bill.register','patient_name','Bill History',required=False),\n 'testname':fields.function(_testname,string=\"Test Name\",type='char'),\n 'state': fields.selection(\n [('created', 'Created'), ('notcreated', 'Notcreated')],\n 'Status', default='notcreated', readonly=True),\n # 'is_patient':fields.function(_ispatient,string=\"Is Patient\",type='boolean')\n }\n _sql_constraints = [\n ('code_mobile_uniq', 'Check(1=1)', 'The mobile number already exist !')\n ]\n\n def create(self, cr, uid, vals, context=None):\n # mobile_number = None\n # mobile_number = vals.get('mobile')\n # if len(mobile_number) <11:\n # raise osv.except_osv(_('Error!'), _('Mobile number should minimum 11 digit'))\n # if len(mobile_number)>11:\n # x=mobile_number.split()\n # number=x[0]\n # if((number[0]=='0' or number[1]=='0' or number[2]=='0' or number[3]=='0') and len(number)>11):\n # number=number[:-1]\n # back = len(number) - 11\n # if len(number)>11:\n # listnumber=[]\n # for item in range(len(number)-1,back-1,-1):\n # singlenumber=number[item]\n # listnumber.append(singlenumber)\n # reverse_number=''.join(listnumber)\n # final_number=reverse_number[::-1]\n # vals['mobile'] = final_number\n # else:\n # vals['mobile']=number\n\n # import pdb\n # pdb.set_trace()\n\n\n\n\n\n stored_id=super(patient_info, self).create(cr, uid, vals, context=context)\n if stored_id is not None:\n name_text = 'P-0' + str(stored_id)\n cr.execute('update patient_info set patient_id=%s where id=%s', (name_text, stored_id))\n cr.execute('update patient_info set state=%s where id=%s', ('created', stored_id))\n cr.commit()\n\n return stored_id\n\n def write(self, cr, uid, ids, vals, context=None):\n change_patient= self.browse(cr, uid, ids, context)\n if \"age\" in vals:\n newage=vals['age']\n # query=\"select name from opd_ticket where patient_id=%\"\n # cr.execute(query, (ids))\n # import pdb\n # pdb.set_trace()\n\n return super(patient_info, self).write(cr, uid, ids, vals, context=context)\n\n @api.model\n def name_search(self, name, args=None, operator='ilike', limit=100):\n args = args or []\n recs = self.browse()\n # test_val=self.search([])\n\n if name:\n recs = self.search([('patient_id', '=', name)] + args, limit=limit)\n\n # import pdb\n # pdb.set_trace()\n if not recs:\n recs = self.search([('patient_id', operator, name)] + args, limit=limit)\n if not recs:\n recs = self.search([('name', operator, name)] + args, limit=limit)\n if not recs:\n recs = self.search([('mobile', operator,name)] + args, limit=limit)\n\n return recs.name_get()\n","sub_path":"Patients/patient_info.py","file_name":"patient_info.py","file_ext":"py","file_size_in_byte":6105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"350297777","text":"import random\nfrom math import tanh, cosh, exp\n\nalpha = 0.8\n\nclass Connection:\n def __init__(self, w, dw):\n self.w = w\n self.dw = dw\n\nclass Neuron:\n def __init__(self, myInd, numInp):\n self.myInd = myInd\n self.numInp = numInp\n self.inputVal = 1\n self.outputVal = 1\n self.connections = []\n self.error = 0\n for nNum in range(self.numInp):\n self.connections.append(Connection(w=randomWeight(), dw=0))\n\n def activationFunc(self, x):\n # return (x / (abs(x) + 1) + 1) / 2\n return 1 / (1 + exp(-x))\n\n def dActivationFunc(self, x):\n # return 0.5 / (1 + abs(x)) ** 2\n return self.activationFunc(x) * (1 - self.activationFunc(x))\n\n def calcVal(self, prevLayer):\n summ = 0\n for nNum in range(self.numInp):\n summ += prevLayer[nNum].outputVal * self.connections[nNum].w\n\n self.setVal(summ)\n\n def setVal(self, x):\n self.inputVal = x\n self.outputVal = self.activationFunc(x)\n\n def updateConnections(self, prevLayer):\n for nNum in range(self.numInp):\n self.connections[nNum].dw += alpha * self.error * prevLayer[nNum].outputVal\n\n def applyUpdates(self):\n for nNum in range(self.numInp):\n self.connections[nNum].w += self.connections[nNum].dw\n self.connections[nNum].dw = 0\n\nclass Net:\n def __init__(self, nInp, nHid, nOut):\n self.inpLayer = []\n self.hidLayer = []\n self.outLayer = []\n self.nInp = nInp + 1\n self.nHid = nHid + 1\n self.nOut = nOut\n\n for nNum in range(self.nInp):\n self.inpLayer.append(Neuron(nNum, 0))\n for nNum in range(self.nHid):\n self.hidLayer.append(Neuron(nNum, self.nInp))\n for nNum in range(self.nOut):\n self.outLayer.append(Neuron(nNum, self.nHid))\n\n\n def feedForw(self, inpVal):\n assert len(inpVal) == self.nInp - 1\n for nNum in range(self.nInp - 1):\n self.inpLayer[nNum].outputVal = (inpVal[nNum])\n for nNum in range(self.nHid - 1):\n self.hidLayer[nNum].calcVal(self.inpLayer)\n for nNum in range(self.nOut):\n self.outLayer[nNum].calcVal(self.hidLayer)\n\n def backProp(self, targetVal):\n assert len(targetVal) == self.nOut\n\n for nNum in range(self.nOut):\n error = (targetVal[nNum] - self.outLayer[nNum].outputVal) * self.outLayer[nNum].dActivationFunc(self.outLayer[nNum].inputVal)\n self.outLayer[nNum].error = error\n\n for nNum in range(self.nHid):\n sigm_in = 0\n for nextNNum in range(self.nOut):\n sigm_in += self.outLayer[nextNNum].error * self.outLayer[nextNNum].connections[nNum].w\n\n self.hidLayer[nNum].error = sigm_in * self.hidLayer[nNum].dActivationFunc(self.hidLayer[nNum].inputVal)\n\n for hidN in self.hidLayer:\n hidN.updateConnections(self.inpLayer)\n for outN in self.outLayer:\n outN.updateConnections(self.hidLayer)\n\n def applyUpdates(self):\n for hidN in self.hidLayer:\n hidN.applyUpdates()\n for outN in self.outLayer:\n outN.applyUpdates()\n\n def out(self):\n for f in self.inpLayer:\n print(f.outputVal, end=\" \")\n print()\n for s in self.hidLayer:\n print(s.outputVal, end=\" \")\n print()\n for t in self.outLayer:\n print(t.outputVal, end=\" \")\n print()\n\n def learn(self, inp, tar):\n self.feedForw(inp)\n self.backProp(tar)\n\n def test(self, inp, debug=True):\n if debug:\n print(\"Testing on\", inp)\n self.feedForw(inp)\n if debug:\n print(\"Got result\", [x.outputVal for x in self.outLayer])\n return [x.outputVal for x in self.outLayer]\n\n def backup(self):\n with open(\"backup.txt\", \"w\") as f:\n f.write(str(self.nInp - 1) + \" \" + str(self.nHid - 1) + \" \" + str(self.nOut) + \"\\n\")\n\n f.write(\"\\n\")\n for neuron in self.hidLayer:\n for connection in neuron.connections: \n f.write(str(connection.w) + \" \")\n f.write(\"\\n\")\n f.write(\"\\n\")\n for neuron in self.outLayer:\n for connection in neuron.connections: \n f.write(str(connection.w) + \" \")\n f.write(\"\\n\")\n\ndef fromFile():\n filename = \"backup.txt\"\n with open(filename, \"r\") as f:\n nInp, nHid, nOut = map(int, f.readline().split())\n net = Net(nInp, nHid, nOut)\n f.readline()\n for nNum in range(nHid + 1):\n values = f.readline().split()\n for prevN in range(nInp + 1):\n net.hidLayer[nNum].connections[prevN] = Connection(float(values[prevN]), 0)\n f.readline()\n for nNum in range(nOut):\n values = f.readline().split()\n for prevN in range(nHid + 1):\n net.outLayer[nNum].connections[prevN] = Connection(float(values[prevN]), 0)\n return net\n\ndef randomWeight():\n n = 1000\n return random.randint(0, n) / (130 * n)\n # return random.randint(0, n) / (130 * n)\n\ndef readTr():\n f = open(\"out_tr.txt\", \"r\")\n lines = f.readlines()\n tests = []\n for line in lines:\n ans, arr = line.split()\n ans = int(ans)\n arr = list(map(int, list(arr)))\n tests.append((ans, arr))\n return tests\n\ndef readTe():\n f = open(\"out_te_All.txt\", \"r\")\n lines = f.readlines()\n tests = []\n for line in lines:\n ans, arr = line.split()\n ans = int(ans)\n arr = list(map(int, list(arr)))\n tests.append((ans, arr))\n return tests\n\ndef testImage(i):\n print(\"Test with image #%d\" % (i + 1))\n ans, inp = test[i]\n print(\"True answer is %d\" % ans)\n res = net.test(inp, debug=False)\n print(\"Got answer\", res)\n resAr = [(res[x], x) for x in range(10)]\n resAr.sort(reverse=True)\n print(\"I think the answer is %d, confidense - %f\" % (resAr[0][1], resAr[0][0]))\n print()\n return resAr[0][1], ans\n\nnet = Net(784, 100, 10)\n# net = fromFile()\n\nprint(\"Reading data...\")\ntrain = readTr()\ntest = readTe()\nprint(\"Read\")\n\nn = 500\ne = 0\ngo = True\nwhile go:\n print(\"=========================\")\n for i in range(n):\n trainExercice = random.choice(train)\n print(\"\\rEpoche %d, test %d\" % (e + 1, i + 1), end=\"\")\n ans, inp = trainExercice\n ansArr = [0 for i in range(10)]\n ansArr[ans] = 1\n net.learn(inp, ansArr)\n net.applyUpdates()\n print(\"\\nEpoche ended!\")\n e += 1\n net.backup()\n\n print(\"Starting testing\")\n nCorrectAns = 0\n nTests = 500\n for i in range(nTests):\n imageInd = random.randint(0, len(test) - 1)\n neuronNetAns, correctAns = testImage(imageInd)\n if neuronNetAns == correctAns:\n nCorrectAns += 1\n print(\"Testing complete, success - %s%%\" % (nCorrectAns * 100 / nTests))\n print()\n","sub_path":"v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":6986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"460790496","text":"# Copyright 2012 Davoud Taghawi-Nejad\n#\n# Module Author: Davoud Taghawi-Nejad\n#\n# ABCE is open-source software. If you are using ABCE for your research you\n# are requested the quote the use of this software.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License and quotation of the\n# author. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\"\"\"\nThe :class:`abceagent.Agent` class is the basic class for creating your agent.\nIt automatically handles the\npossession of goods of an agent. In order to produce/transforme goods you\nneed to also subclass the :class:`abceagent.Firm` [1]_ or to create a\nconsumer the :class:`abceagent.Household`.\n\nFor detailed documentation on:\n\nTrading:\n see :class:`abceagent.Trade`\n\nLogging and data creation:\n see :class:`abceagent.Database` and :doc:`simulation_results`\n\nMessaging between agents:\n see :class:`abceagent.Messaging`.\n\n.. autoexception:: abce.NotEnoughGoods\n\n.. [1] or :class:`abceagent.FirmMultiTechnologies` for complex technologies.\n\"\"\"\nfrom collections import OrderedDict\n\n\nclass Database:\n \"\"\" The database class \"\"\"\n def __init__(self, id, agent_parameters, simulation_parameters, group, trade_logging,\n database, check_unchecked_msgs, expiring, perishable, resource_endowment, start_round=None):\n super(Database, self).__init__(id, agent_parameters, simulation_parameters, group, trade_logging, database,\n check_unchecked_msgs, expiring, perishable, resource_endowment, start_round)\n\n def log(self, action_name, data_to_log):\n \"\"\" With log you can write the models data. Log can save variable\n states and and the working of individual functions such as production,\n consumption, give, but not trade(as its handled automatically). Sending\n a dictionary instead of several using several log statements with a\n single variable is faster.\n\n Args:\n 'name'(string):\n the name of the current action/method the agent executes\n\n data_to_log:\n a variable or a dictionary with data to log in the the database\n\n Example::\n\n self.log('profit', profit)\n\n self.log('employment_and_rent',\n {'employment': self['LAB'],\n 'rent': self['CAP'],\n 'composite': self.composite})\n\n self.log(self.produce_use_everything())\n\n See also:\n :meth:`~abecagent.Database.log_nested`:\n handles nested dictianaries\n\n :meth:`~abecagent.Database.log_change`:\n loges the change from last round\n\n :meth:`~abecagent.Database.observe_begin`:\n\n \"\"\"\n if self.log_this_round:\n try:\n data_to_write = {'%s_%s' % (str(action_name), str(\n key)): data_to_log[key] for key in data_to_log}\n except TypeError:\n data_to_write = {str(action_name): data_to_log}\n\n self.database_connection.put(\n [\"log\",\n self.group,\n self.id,\n self.round,\n data_to_write,\n action_name])\n\n def log_change(self, action_name, data_to_log):\n \"\"\" This command logs the change in the variable from the round before.\n Important, use only once with the same action_name.\n\n Args:\n 'name'(string):\n the name of the current action/method the agent executes\n data_to_log:\n a dictianary with data for the database\n\n Examples::\n\n self.log_change('profit', {'money': self['money']]})\n self.log_change('inputs',\n {'money': self.possessions(['money', 'gold', 'CAP', 'LAB')]})\n \"\"\"\n if self.log_this_round:\n data_to_write = {}\n try:\n for key in data_to_log:\n data_to_write['%s_change_%s' % (\n action_name, key)] = (\n data_to_log[key] -\n self._data_to_log_1[action_name][key])\n except KeyError:\n for key in data_to_log:\n data_to_write['%s_change_%s' %\n (action_name, key)] = data_to_log[key]\n data_to_write['id'] = self.id\n self.database_connection.put(\n [\"log\", self.group, data_to_write, str(self.round)])\n\n self._data_to_log_1[action_name] = data_to_log\n\n def observe_begin(self, action_name, data_to_observe):\n \"\"\" observe_begin and observe_end, observe the change of a variable.\n observe_begin(...), takes a list of variables to be observed.\n observe_end(...) writes the change in this variables into the log file\n\n you can use nested observe_begin / observe_end combinations\n\n Args:\n 'name'(string):\n the name of the current action/method the agent executes\n data_to_log:\n a dictianary with data for the database\n\n Example::\n\n self.log('production', {'composite': self.composite,\n self.sector: self.final_product[self.sector]})\n\n ... different method ...\n\n self.log('employment_and_rent', {\n 'employment': self['LAB'],\n 'rent': self['CAP']})\n \"\"\"\n if self.log_this_round:\n self._data_to_observe[action_name] = data_to_observe\n\n def observe_end(self, action_name, data_to_observe):\n \"\"\" This command puts in a database called log, whatever values you\n want values need to be delivered as a dictionary:\n\n Args:\n 'name'(string):\n the name of the current action/method the agent executes\n data_to_log:\n a dictianary with data for the database\n\n Example::\n\n self.log('production', {'composite': self.composite,\n self.sector: self.final_product[self.sector]})\n\n ... different method ...\n\n self.log('employment_and_rent', {\n 'employment': self['LAB'],\n 'rent':self['CAP']})\n \"\"\"\n if self.log_this_round:\n before = self._data_to_observe.pop(action_name)\n data_to_write = {}\n for key in data_to_observe:\n data_to_write['%s_delta_%s' % (action_name, key)] = \\\n data_to_observe[key] - before[key]\n data_to_write['id'] = self.id\n self.database_connection.put(\n [\"log\", self.group, data_to_write, str(self.round)])\n\n def _common_log(self, variables, possessions, functions, lengths):\n ret = OrderedDict()\n for var in variables:\n ret[var] = self.__dict__[var]\n for pos in possessions:\n ret[pos] = self._inventory[pos]\n for name, func in functions.items():\n ret[name] = func(self)\n for length in lengths:\n ret['len_' + length] = len(self.__dict__[length])\n return ret\n\n def _agg_log(self, variables, possessions, functions, lengths):\n if self.log_this_round:\n data_to_write = self._common_log(variables,\n possessions,\n functions,\n lengths)\n self.database_connection.put([\"snapshot_agg\",\n str(self.round),\n self.group,\n data_to_write])\n\n def _panel_log(self, variables, possessions, functions, lengths, serial):\n if self.log_this_round:\n data_to_write = self._common_log(variables,\n possessions,\n functions,\n lengths)\n self.database_connection.put([\"log\",\n self.group,\n self.id,\n str(self.round),\n data_to_write,\n serial])\n\n def custom_log(self, method, *args, **kwargs):\n \"\"\" send custom logging commands to database plugin, see :ref:`Database Plugins` \"\"\"\n self.database_connection.put([method, args, kwargs])\n","sub_path":"abce/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":8972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"468306595","text":"import uuid\nfrom flask import g\n\nfrom .util import random_string, get_request_json, http\nfrom .global_obj import database as db\nfrom .global_obj import blueprint as bp\nfrom .model import User\nfrom .authentication import salt_available_char, server_hash\n\n\n@bp.route(\"/user\", methods=[\"POST\"])\ndef create_user():\n schema = {\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"description\": \"create a new user\",\n \"type\": \"object\",\n \"properties\": {\n \"email\": {\n \"description\": \"account email for login\",\n \"type\": \"string\",\n \"format\": \"email\"\n },\n \"shortName\": {\n \"description\": \"short user name for login\",\n \"type\": \"string\"\n },\n \"realPersonInfo\": {\n \"description\": \"information about identity in reality\",\n \"type\": \"object\"\n },\n \"extraInfo\": {\n \"description\": \"extra information to store\",\n \"type\": \"object\"\n },\n \"authentication\": {\n \"description\": \"authentication information\",\n \"type\": \"object\",\n \"properties\": {\n \"salt\": {\n \"description\": \"random salt generated at client\",\n \"type\": \"object\",\n \"properties\": {\n \"front\": {\n \"type\": \"string\"\n },\n \"back\": {\n \"type\": \"string\"\n }\n },\n \"required\": [\"front\", \"back\"]\n },\n \"hashResult\": {\n \"type\": \"string\"\n }\n },\n \"required\": [\"salt\", \"hashResult\"]\n }\n },\n \"required\": [\"email\", \"shortName\", \"realPersonInfo\", \"extraInfo\", \"authentication\"],\n \"additionalProperties\": False\n }\n instance = get_request_json(schema=schema)\n\n user = User()\n for key in [\"email\", \"shortName\", \"realPersonInfo\", \"extraInfo\"]:\n value = instance[key]\n setattr(user, key, value)\n\n server_random_salt = {\n \"front\": random_string(salt_available_char, 8),\n \"back\": random_string(salt_available_char, 8)\n }\n server_hash_result = server_hash(server_random_salt, instance[\"authentication\"][\"hashResult\"])\n user.authentication = {\n \"clientSalt\": instance[\"authentication\"][\"salt\"],\n \"serverSalt\": server_random_salt,\n \"hashResult\": server_hash_result\n }\n\n user.createTime = g.request_datetime\n user.uuid = uuid.uuid1()\n db.session.add(user)\n db.session.commit()\n\n return http.Success({\n \"userID\": user.id\n })\n","sub_path":"ccnuoj_webapi/src/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"488346660","text":"import re\n\nurls=set()\n\nstr1='http://mail.baidu.com'\nstr2='http://music.douban.com'\nstr3='http://mail.douban.com'\nstr4='http://baike.dd.baidu.com'\nstr5='./test.html'\n\nstrs=[str1,str2,str3,str4,str5]\n\nkeystr='baidu.com'\n\nfor str in strs:\n if keystr in str or 'http' not in str:\n print('found inner:',str)\n else:\n print('found outer',str)\n urls.remove(str)\n\nstrurls='\\n'.join(urls)\n\nprint(strurls)","sub_path":"spider/strmatch.py","file_name":"strmatch.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"392121560","text":"#!/usr/bin/python\nimport sys\n\nclass Solution:\n \"\"\"\n @param: triangle: a list of lists of integers\n @return: An integer, minimum path sum\n \"\"\"\n def minimumTotal(self, triangle):\n # write your code here\n result = triangle[-1][:]\n for r in range(len(triangle)-2, -1, -1):\n for i in range(len(triangle[r])):\n result[i] = min(result[i]+triangle[r][i], result[i+1]+triangle[r][i])\n return result[0]\n\ndef main():\n aa = Solution()\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())","sub_path":"LintCode/triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"368932897","text":"\n#verilen sayının faktöriyelini hesaplama...\n\ndef factorial(n):\n return n*factorial(n-1) if n>1 else 1\n\n\ndef factorial(Z):\n total = 1\n for i in range(1, Z+1):\n total *= i\n return total\n\n\n\n\n\ndef factorial(Z):\n\tif Z == 0:\n\t\treturn 1\n\telif Z == 1:\n\t\treturn 1\n\telif Z > 1:\n\t\treturn Z * factorial(Z-1)\n\telse:\n\t\treturn None\n\ndef factorial(Z):\n n = 1\n if int(Z) > 0:\n for i in range(1,int(Z)+1):\n n = n*i\n return n\n else:\n return 1\n\n\n\ndef factorial(Z):\n\tif Z == 0:\n\t\treturn 1\n\tprod = 1\n\tfor i in range(1,Z+1):\n\t\tprod*=i\n\treturn prod\n\n\n\n\ndef factorial(n):\n\tif n==0: return 1\n\treturn n*factorial(n-1)\n\n\n\n\n\n\ndef factorial(Z):\n\treturn 1 if Z <= 1 else Z*factorial(Z-1)\n\n\n\n\n\ndef factorial(Z):\n\tr=1\n\tdef rf(num):\n\t\tnonlocal r\n\t\tif num==0:\n\t\t\treturn r\n\t\telse:\n\t\t\tr=r*num\n\t\t\tnum=num-1\n\t\t\treturn rf(num)\n\treturn rf(Z)\n","sub_path":"python_exercises/faktoriyel_calc.py","file_name":"faktoriyel_calc.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"394332106","text":"\"\"\"\nDeveloper - Vinit Nayak\nMaintainer - \"\"\nVersion - v1.0\nDate - June 2012\nTest Case ID - otc_3\nTest Description - '****To check - Contents on Homepage ' \"\"\"\n\nfrom constants import *\nfrom testconfiguration import *\nfrom lib import *\n\n\nclass otc_3(unittest.TestCase):\n \n def setUp(self):\n initiate_setup(self)\n self.resultFile.write(\"Test Areas - Element Inspection\\n\")\n\n def test_entry_via_drop_down(self):\n wd_handle = self.wd_handle\n resultFile = self.resultFile\n wd_handle.get(HOMEPAGE)\n #Check for main \"Amazon MP3\" header title\n try:\n mp3_header = wd_handle.find_element_by_xpath(HOMEPAGE_STRING_XPATH)\n if mp3_header.text == HOMEPAGE_STRING:\n resultFile.write(\" Homepage string present.\\n\")\n except:\n resultFile.write(TEST_FAILED + \" Homepage string NOT present.\\n\")\n #Check for \"Browse MP3s\" header on left panel\n try:\n browse_mp3s = wd_handle.find_element_by_xpath(HOMEPAGE_BROWSE_SIGN_UP)\n if browse_mp3s.text == HOMEPAGE_BROWSE_MP3s_STRING:\n resultFile.write(\" Homepage string present.\\n\")\n except:\n resultFile.write(\" Homepage string NOT present.\\n\")\n \n\n if mp3_header and browse_mp3s:\n resultFile.write(TEST_PASSED)\n else:\n resultFile.write(TEST_FAILED)\n\n def tearDown(self):\n self.filename = __file__\n initiate_teardown(self)\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"Selenium/otc_3.py","file_name":"otc_3.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"194524306","text":"from Japanese.src_updation.play_songs1 import trial_play_main_jap\nfrom English.src_updation.play_songs1 import trial_play_main_eng\nimport Japanese.src_updation.play_songs1\nimport English.src_updation.play_songs1\n\nimport pymysql\n\nmydb = pymysql.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"\",\n database=\"headphones_demo\",\n autocommit=True\n)\n\nif __name__=='__main__':\n mycursor = mydb.cursor()\n mycursor.execute(\"select language from controls where id=0\")\n value = list(mycursor.fetchall())[0][0]\n while True:\n mycursor.execute(\"select language from controls where id=0\")\n value = list(mycursor.fetchall())[0][0]\n if value == 1:\n trial_play_main_eng()\n elif value == 2:\n trial_play_main_jap()\n else:\n print(\"No language selected\")\n continue\n","sub_path":"start_file.py","file_name":"start_file.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"393888093","text":"#!/usr/bin/python3\n# coding: utf-8\nfrom flask import Blueprint, jsonify\nfrom flask import request\n\nfrom mainapp.models import TCategory\nimport db\n\nfrom mainapp import serializor\n\nblue = Blueprint('category_blue', __name__)\n\n\n@blue.route('/all_/', methods=('GET',))\ndef all_category():\n # [, <>, ]\n cate_list = db.session.query(TCategory).filter_by(parent_id=0).all()\n\n datas = {\n 'status': 0,\n 'data': {\n 'cates': serializor.dumps(cate_list),\n 'childs': serializor.dumps(cate_list[0].childens)\n }\n }\n\n return jsonify(datas)\n","sub_path":"mainapp/views/category_api.py","file_name":"category_api.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"547799306","text":"#!/usr/bin/python3\n\nfrom gmxsmdscript import *\nimport sys\nimport argparse\nimport os\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-s\", dest=\"structure\", required=True,\n help=\"Structure filename directory '/home/patgen/Documentos/Dynamics/ProjectName'\", type=lambda f: open(f))\nparser.add_argument(\"-f\", dest=\"folder\", required=True,\n help=\"Desired project folder name\")\nparser.add_argument(\"-mdt\", dest=\"mdtime\", required=True,\n help=\"MD simulation time in nsteps (2 * 500000 = 1000 ps (1 ns)\")\nparser.add_argument(\"-l\", dest=\"ligname\", required=False,\n help=\"Ligand filename .gro\")\nparser.add_argument(\"-grps\", dest=\"grps\", required=True,\n help=\"TC-TGRPS for simulation (ligand name ex: DLG)\")\nargs = parser.parse_args()\n\ndef yes_or_no(question):\n reply = str(input(question+' (y/n): ')).lower().strip()\n if reply[0] == 'y':\n return 1\n elif reply[0] == 'n':\n raise SystemExit\n else:\n return yes_or_no(\"Would you like to run the simulation? (y/n) \")\n\nprint(\"This Script was made by Heitor Sampaio\")\nwhile True:\n print(\"The pourpose of this script is to run a Simple MD simulation...\")\n if(yes_or_no('Would you like to run the simulation? ')):\n break\nprint(\"done\")\n\nprint(args.structure.name)\nprint(args.folder)\nprint(args.mdtime)\nprint(args.ligname)\nprint(args.grps)\n\nwith system(args.folder):\n\n # System preparation\n pdb2gmx(\n ff = 'gromos53a6',\n water = 'spce',\n f = [args.structure.name],\n o = 'protein.gro',\n p = 'topol.top',\n ignh = 'true'\n )\n\n def yes_or_no(question):\n reply = str(input(question+' (y/n): ')).lower().strip()\n if reply[0] == 'y':\n return 1\n elif reply[0] == 'n':\n raise SystemExit\n else:\n return yes_or_no(\"Are you combine the protein and ligand to complex.gro and add the Protein-Ligand Topologies to topol.top? (y/n) \")\n\n print(\n \"Please combine the protein.gro and the ligand.gro to a complex using the following command 'python combineGRO.py protein.gro ligand.gro' and then \"\n )\n while True:\n print(\"Combine the Protein-Ligand Topologies to topol.top as showed in tutorial\")\n if(yes_or_no('Are you combine the protein and ligand to complex.gro and add the Protein-Ligand Topologies to topol.top? ')):\n break\n print(\"done\")\n\n editconf(\n f = 'complex.gro',\n o = 'pbc.gro',\n bt = 'cubic',\n d = 1.0\n )\n\n solvate(\n cp = 'pbc.gro',\n cs = 'spc216.gro',\n o = 'sol.gro',\n p = 'topol.top'\n )\n\n grompp(\n maxwarn = 2,\n f = MDP['ions.mdp'],\n c = 'sol.gro',\n o = 'ions.tpr',\n p = 'topol.top'\n )\n\n genion(\n s = 'ions.tpr',\n o = 'ions.gro',\n neutral = 1,\n p = \"topol.top\",\n stdin = \"\"\"\n SOL\n \"\"\"\n )\n\n # Steep descent energy minimization\n grompp(\n maxwarn = 2,\n f = MDP['em.mdp'],\n c = 'ions.gro',\n o = 'em.tpr',\n p = 'topol.top',\n )\n mdrun(deffnm = 'em' ,\n cpi = 'md.cpt',\n #append = 'true',\n #nt = 20,\n #pinoffset = 1,\n #pinstride = 2,\n #gpu_id = \"01\",\n v = 'true',\n #resethway = 'true',\n #pin = 'on',\n #nb = 'gpu'\n )\n\n energy(\n f = 'em.edr',\n o = 'potential.xvg',\n stdin = \"\"\"\n 10 0\n \"\"\"\n )\n\n make_ndx(\n f = [args.ligname],\n o = 'index_lig.ndx',\n stdin = '''\n 0 & ! a H*\n q | q\n '''\n )\n\n genrestr(\n f = [args.ligname],\n n = 'index_lig.ndx',\n o = 'posre_lig.itp',\n fc = [1000 , 1000 , 1000],\n stdin = \"\"\"\n 0\n \"\"\"\n )\n\n def yes_or_no(question):\n reply = str(input(question+' (y/n): ')).lower().strip()\n if reply[0] == 'y':\n return 1\n elif reply[0] == 'n':\n raise SystemExit\n else:\n return yes_or_no(\"Are you add the ligand position restrain to topol.top? (y/n) \")\n\n print(\n \"Please add the ligand position restrain to topol.top \"\n )\n while True:\n print(\"Please add the ligand position restrain to topol.top as showed in tutorial\")\n if(yes_or_no('Are you add the ligand position restrain to topol.top? ')):\n break\n print(\"done\")\n\n make_ndx(\n f = 'em.gro',\n o = 'index.ndx',\n stdin = '''\n 1 | 13\n q | q\n '''\n )\n\n #nvt\n grompp(\n f = MDP['nvt.mdp', {\n 'tc-grps' : [args.grps , 'Water_and_ions'] ,\n }],\n c = 'em.gro',\n o = 'nvt.tpr',\n p = 'topol.top',\n\t r = 'em.gro',\n n = 'index.ndx',\n maxwarn = 2\n )\n mdrun(deffnm = 'nvt',\n cpi = 'md.cpt',\n #append = 'true',\n #nt = 20,\n #pinoffset = 1,\n #pinstride = 2,\n #gpu_id = \"01\",\n v = 'true',\n #resethway = 'true',\n #pin = 'on',\n #nb = 'gpu'\n )\n energy(\n f = 'nvt.edr',\n o = 'temperature.xvg',\n stdin = \"\"\"\n 15 0\n \"\"\"\n )\n\n #npt\n grompp(\n f = MDP['npt.mdp', {\n 'tc-grps' : [args.grps , 'Water_and_ions'] ,\n }],\n c = 'nvt.gro',\n o = 'npt.tpr',\n p = 'topol.top',\n t = 'nvt.cpt',\n\t r = 'nvt.gro',\n n = 'index.ndx',\n maxwarn = 2\n )\n mdrun(deffnm = 'npt' ,\n cpi = 'md.cpt',\n #append = 'true',\n #nt = 20,\n #pinoffset = 1,\n #pinstride = 2,\n #gpu_id = \"01\",\n v = 'true',\n #resethway = 'true',\n #pin = 'on',\n #nb = 'gpu'\n )\n energy(\n f = 'npt.edr',\n o = 'pressure.xvg',\n stdin = \"\"\"\n 16 0\n \"\"\"\n )\n energy(\n f = 'npt.edr',\n o = 'density.xvg',\n stdin = \"\"\"\n 22 0\n \"\"\"\n )\n\n # Molecular dynamics\n grompp(\n f = MDP['md.mdp', {\n 'nsteps' : args.mdtime ,\n 'tc-grps' : [args.grps , 'Water_and_ions'] ,\n }],\n c = 'npt.gro',\n o = 'md.tpr',\n p = 'topol.top',\n t = 'npt.cpt',\n\t r = 'npt.gro',\n n = 'index.ndx',\n maxwarn = 2\n )\n mdrun(deffnm = 'md',\n cpi = 'md.cpt',\n #append = 'true',\n #nt = 20,\n #pinoffset = 1,\n #pinstride = 2,\n #gpu_id = \"01\",\n v = 'true',\n #resethway = 'true',\n #pin = 'on',\n #nb = 'gpu'\n )\n trjconv(\n s = 'md.tpr',\n f = 'md.xtc',\n o = 'md_noPBC.xtc',\n pbc = 'mol',\n ur = 'compact',\n stdin = \"\"\"\n 0\n \"\"\"\n )\n rms(\n s = 'md.tpr',\n f = 'md_noPBC.xtc',\n o = 'rmsd.xvg',\n tu = 'ns',\n stdin = \"\"\"\n 4 4\n \"\"\"\n )\n gyrate(\n s = 'md.tpr',\n f = 'md_noPBC.xtc',\n o = 'gyrate.xvg',\n stdin = \"\"\"\n 1\n \"\"\"\n )\n sasa(\n s = 'md.tpr',\n f = 'md_noPBC.xtc',\n o = 'sasa.xvg',\n stdin = \"\"\"\n 1\n \"\"\"\n )\n rmsf(\n s = 'md.tpr',\n f = 'md_noPBC.xtc',\n o = 'rmsf.xvg',\n res = 'true',\n stdin = \"\"\"\n 1\n \"\"\"\n )\n rmsf(\n s = 'md.tpr',\n f = 'md_noPBC.xtc',\n o = 'bfactor.xvg',\n oq = 'bfactor.pdb',\n stdin = \"\"\"\n 1\n \"\"\"\n )\n hbond(\n s = 'md.tpr',\n f = 'md_noPBC.xtc',\n dist = 'hbond.xvg',\n g = 'hbond.log',\n stdin = \"\"\"\n 1 1\n \"\"\"\n )\n","sub_path":"script/complex/complex_nogpu.py","file_name":"complex_nogpu.py","file_ext":"py","file_size_in_byte":7831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"633155728","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.contact, name='contact'),\n url(r'^feedback/$', views.feedback, name='feedback'),\n url(r'^gullkorn/$', views.feedback, {'template':'gullkorn.html', 'send_to':'redaktor@nabla.ntnu.no'}, name='gullkorn'),\n url(r'^success/$', views.success, name='success'),\n]\n","sub_path":"nablapps/contact/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"434313465","text":"from __future__ import unicode_literals\n\nimport codecs\nimport collections\nimport copy\nfrom functools import wraps\nimport os\nimport six\nimport shutil\nimport tempfile\nfrom unittest import TestCase\n\nfrom click.testing import CliRunner\n\nfrom taxi.backends import BaseBackend, PushEntryFailed, PushEntriesFailed\nfrom taxi.backends.registry import backends_registry\nfrom taxi.commands.base import cli\nfrom taxi.projects import ProjectsDb\nfrom taxi.utils.file import expand_date\n\n\nclass TestBackendEntryPoint(object):\n \"\"\"\n Dedicated backend for tests. Entries with the alias `fail` will fail when\n trying to push them.\n \"\"\"\n class TestBackend(BaseBackend):\n def __init__(self, *args, **kwargs):\n super(TestBackendEntryPoint.TestBackend, self).__init__(\n *args, **kwargs\n )\n self.entries = []\n\n def push_entry(self, date, entry):\n self.entries.append(entry)\n\n if entry.alias == 'fail':\n raise PushEntryFailed()\n\n def post_push_entries(self):\n failed_entries = {}\n\n for entry in self.entries:\n if entry.alias == 'post_push_fail':\n failed_entries[entry] = 'foobar'\n\n if failed_entries:\n raise PushEntriesFailed(entries=failed_entries)\n\n def load(self):\n return self.TestBackend\n\n\nclass CommandTestCase(TestCase):\n def setUp(self):\n _, self.config_file = tempfile.mkstemp()\n _, self.entries_file = tempfile.mkstemp()\n self.taxi_dir = tempfile.mkdtemp()\n # Keep the original entry points to restore them in tearDown\n self.backends_original_entry_points = backends_registry._entry_points\n\n # Hot swap the entry points from the backends registry with our own\n # test backend. This avoids having to register the test backend in the\n # setup.py file\n backends_registry._entry_points = {\n 'test': TestBackendEntryPoint(),\n 'dummy': TestBackendEntryPoint(),\n }\n\n # Create an empty projects db file\n projects_db_file = os.path.join(self.taxi_dir,\n ProjectsDb.PROJECTS_FILE)\n with open(projects_db_file, 'w') as f:\n f.close()\n\n existing_settings = (self._settings\n if hasattr(self, '_settings')\n else {})\n self._settings = recursive_update({\n 'default': {\n 'date_format': '%d/%m/%Y',\n 'editor': '/bin/true',\n 'file': self.entries_file,\n 'use_colors': '0'\n },\n 'backends': {\n 'test': 'test:///',\n 'local': 'dummy:///',\n },\n 'test_aliases': {\n 'alias_1': '123/456',\n 'fail': '456/789',\n 'post_push_fail': '456/789',\n },\n }, existing_settings)\n\n def tearDown(self):\n backends_registry._entry_points = self.backends_original_entry_points\n entries_file = expand_date(self.entries_file)\n\n os.remove(self.config_file)\n if os.path.exists(entries_file):\n os.remove(entries_file)\n shutil.rmtree(self.taxi_dir)\n\n def assertLineIn(self, line, content):\n \"\"\"\n Check that the given line is present in the given content, ignoring\n whitespace differences.\n \"\"\"\n def remove_spaces(text):\n chars_to_strip = [' ', '\\t']\n\n for char in chars_to_strip:\n text = text.replace(char, '')\n\n return text\n\n self.assertIn(\n remove_spaces(line),\n remove_spaces(content),\n \"Line `%s` not found in `%s`\" % (line, content)\n )\n\n\n def settings(self, *args, **kwargs):\n \"\"\"\n Context manager to temporarily override the settings:\n\n with self.settings({'default': {'file': '/tmp/test.txt'}}):\n ...\n \"\"\"\n return override_settings(*args, container=self, **kwargs)\n\n def write_config(self, config):\n with open(self.config_file, 'w') as f:\n for (section, params) in six.iteritems(config):\n f.write(\"[%s]\\n\" % section)\n\n for (param, value) in six.iteritems(params):\n f.write(\"%s = %s\\n\" % (param, value))\n\n def write_entries(self, contents):\n with codecs.open(expand_date(self.entries_file), 'a', 'utf-8') as f:\n f.write(contents)\n\n def read_entries(self):\n with codecs.open(expand_date(self.entries_file), 'r', 'utf-8') as f:\n contents = f.read()\n\n return contents\n\n def run_command(self, command_name, args=None, input=None,\n write_config=True):\n \"\"\"\n Run the given taxi command with the given arguments and options. The\n output of the command is returned as a string.\n\n Args:\n command_name -- The name of the command to run\n args -- An optional list of arguments for the command\n \"\"\"\n if args is None:\n args = []\n\n if write_config:\n self.write_config(self._settings)\n\n args.insert(0, command_name)\n args.insert(0, '--taxi-dir=%s' % self.taxi_dir)\n args.insert(0, '--config=%s' % self.config_file)\n\n runner = CliRunner()\n result = runner.invoke(cli, args, input=input, standalone_mode=False)\n\n if result.exception:\n raise result.exception\n\n return result.output\n\n\nclass override_settings(object):\n \"\"\"\n Allow to temporarily override settings in the given portion of code. Can be\n used as a class decorator, a method decorator or as a context manager.\n Example usage:\n\n @override_settings({'default': {'file': '/tmp/test'txt'}})\n class MyTestClass(CommandTestCase):\n ...\n \"\"\"\n def __init__(self, settings=None, container=None):\n self.settings = settings or {}\n # The container is the test instance that holds the settings\n self.container = container\n\n def __call__(self, func):\n # Class decorator\n if isinstance(func, type):\n self.container = func\n self.enable()\n\n return func\n else:\n # Method decorator. This only works on bound methods since we use\n # the first arg as the settings container\n @wraps(func)\n def inner(*args, **kwargs):\n self.container = args[0]\n\n with self:\n return func(*args, **kwargs)\n\n return inner\n\n def enable(self):\n # The _settings attribute might not exist, eg. if it's used as a class\n # decorator\n if not hasattr(self.container, '_settings'):\n self.container._settings = {}\n\n # Keep a copy of the current settings and override them\n self.original_settings = copy.deepcopy(self.container._settings)\n for section, settings in six.iteritems(self.settings):\n if section not in self.container._settings:\n self.container._settings[section] = {}\n\n for setting, value in six.iteritems(settings):\n self.container._settings[section][setting] = value\n\n def disable(self):\n self.container._settings = self.original_settings\n\n def __enter__(self):\n self.enable()\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.disable()\n\n\ndef recursive_update(d, u):\n \"\"\"\n Recursive dict update.\n \"\"\"\n for k, v in six.iteritems(u):\n if isinstance(v, collections.Mapping):\n r = recursive_update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d\n","sub_path":"tests/commands/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"45155414","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 7 16:20:44 2018\r\n@author: ABittar\r\n\"\"\"\r\n\r\nimport os\r\nfrom ehost_functions import load_mentions_with_attributes\r\n\r\ndef batch_process_directory(pin, full_key=True):\r\n \"\"\"\r\n Get all annotations from the corpus and store a mapping of file names to \r\n annotations.\r\n \"\"\"\r\n global_annotations = {}\r\n \r\n d_list = [os.path.join(pin, os.path.join(d, 'saved')) for d in os.listdir(pin) if 'config' not in d and 'annotationadmin' not in d and '.' not in d]\r\n\r\n for d in d_list:\r\n f_list = [os.path.join(d,f) for f in os.listdir(d) if f.endswith('knowtator.xml')]\r\n \r\n for f in f_list:\r\n curr_annotations = load_mentions_with_attributes(f, full_key=full_key)\r\n global_annotations.update(curr_annotations)\r\n \r\n return global_annotations\r\n\r\ndef get_value(dic, key):\r\n if (key in dic):\r\n return dic[key]\r\n else:\r\n return \"null\"\r\n\r\ndef get_value2(dic, key):\r\n if (key in dic):\r\n return dic[key]\r\n else:\r\n return \"null\"\r\n\r\n\r\n#### MANUAL ANNOTATIONS #####\r\n#annotator = 'Jack'\r\n#dir_1 = \"T:/Natalia Viani/annotation_onset_mention/\"+annotator+\"/first_ref_extended/\"\r\n#batches = list(range(1,21))\r\n#batches = [str(b) for b in batches]\r\n\r\n#annotator = 'RS'\r\n#dir_1 = \"T:/Natalia Viani/annotation_onset_mention/\"+annotator+\"/first_referrals/\"\r\n#batches=['1']\r\n\r\nannotator = 'GATE'\r\ndir_1 = \"T:/Natalia Viani/annotation_prescription/\"+annotator+\"/drug_annotation/Schizophrenia\\\\\"\r\nbatches = list(range(1,6))\r\nbatches = [str(b) for b in batches]\r\n\r\nwrite_file = True\r\nif(write_file):\r\n #text_file = open(\"./first_referrals_extended_onset/output_\"+annotator+\".txt\", \"w\")\r\n #text_file = open(\"./first_referrals_0diff/output_\"+annotator+\".txt\", \"w\") \r\n text_file = open(\"./prescriptions/Schizophrenia/output_\"+annotator+\"_drug_anno_batch1to5\"+\".txt\", \"w\")\r\n \r\nfor batch in batches:\r\n if(os.path.isdir(dir_1+'batch_'+batch)):\r\n \r\n #read corpus file\r\n pin = dir_1+'batch_'+batch\r\n d_list = [os.path.join(pin, os.path.join(d, 'corpus')) for d in os.listdir(pin) if 'config' not in d and 'annotationadmin' not in d and '.' not in d]\r\n all_files = []\r\n for d in d_list:\r\n f_list = [os.path.join(d,f) for f in os.listdir(d) if f.endswith('.txt')]\r\n for f in f_list:\r\n all_files.append(f.split(\"\\\\\")[-3]+\"\\\\\"+f.split(\"\\\\\")[-1].split(\".\")[0])\r\n \r\n annotations = batch_process_directory(pin, full_key=True)\r\n #annotations = batch_process_directory(dir_1+'set_0_diff_batch'+batch, full_key=True)\r\n saved_files = []\r\n for path, annotationList in annotations.items():\r\n docPath = path.split('/')[5]\r\n batch = docPath.split('\\\\')[1]\r\n pat = docPath.split('\\\\')[2]\r\n docName = docPath.split('\\\\')[4].split('.')[0]\r\n if(write_file):\r\n for key, value in annotationList.items():\r\n line = batch+\"\\t\"+pat+\"\\t\"+docName+\"\\t\"+value['start']+\"\\t\"+value['end']+\"\\t\"+get_value2(value, 'Subject')+\"\\t\"+get_value2(value, 'drug')+\"\\t\"+get_value2(value, '1_INITIATION')+\"\\t\"+get_value2(value, '1_INITIATION_time')+\"\\t\"+get_value2(value, '2_CESSATION')+\"\\t\"+get_value2(value, '2_CESSATION_time')+\"\\t\"+get_value(value, 'Modality')+\"\\t\"+get_value(value, 'dose_unit')+\"\\t\"+get_value(value, 'dose_value')+\"\\t\"+get_value(value, 'frequency')+\"\\t\"+get_value(value,'interval')+\"\\t\"+get_value(value,'route')+\"\\t\"+get_value(value,'when')+\"\\t\"+get_value(value,'type')+\"\\t\"+value['text']+\"\\t\"+get_value(value,'value')\r\n if(value['comment'] is not None):\r\n line = line+\"\\t\"+value['comment']\r\n text_file.write(line+\"\\n\")\r\n saved_files.append(pat+\"\\\\\"+docName)\r\n \r\n saved_files = list(dict.fromkeys(saved_files))\r\n missing = [s for s in all_files if s not in saved_files]\r\n print(batch, missing)\r\n \r\nif(write_file): \r\n text_file.close()\r\n \r\n \r\n'''\r\n###SUTIME OUTPUT###\r\nannotator = 'SUTime'\r\ndir_1 = \"S:/All apps/Development/Natalia/annotation_projects/first_referrals_2018/extraction_1_no_overlap_extended/\"\r\nbatches = list(range(1,21))\r\nbatches = [str(b) for b in batches]\r\n\r\n#annotator = 'SUTime'\r\n#dir_1 = \"S:/All apps/Development/Natalia/annotation_projects/early_services/set_0_diff/\"\r\n#batches=['RP','RS']\r\n\r\nwrite_file_SUTime = True\r\nif(write_file_SUTime):\r\n text_file = open(\"./first_referrals_extended_onset/output_\"+annotator+\".txt\", \"w\")\r\n #text_file = open(\"./early_services_0diff/output_\"+annotator+\".txt\", \"w\")\r\n\r\n for batch in batches:\r\n annotations = batch_process_directory(dir_1+'batch_'+batch+\"_preannotated\", full_key=True)\r\n for path, annotationList in annotations.items():\r\n docPath = path.split('/')[6]\r\n batch = path.split('/')[7]\r\n pat = batch.split('\\\\')[1]\r\n docName = batch.split('\\\\')[3].split('.')[0]\r\n for key, value in annotationList.items():\r\n ann_class = value['class']\r\n if(ann_class==\"UNDEF\"):\r\n line = ''\r\n text = value['text']\r\n text = text.replace(\"\\n\",\" \")\r\n line = batch.split('\\\\')[0]+\"\\t\"+pat+\"\\t\"+docName+\"\\t\"+value['start']+\"\\t\"+value['end']+\"\\t\"+text+\"\\t\"+get_value(value, 'value')\r\n if(value['comment'] is not None):\r\n line = line+\"\\t\"+value['comment']\r\n text_file.write(line+\"\\n\")\r\n \r\n text_file.close()\r\n'''","sub_path":"python scripts/ehost_annotation_reader_prescription_all_attributesv2_drug_anno.py","file_name":"ehost_annotation_reader_prescription_all_attributesv2_drug_anno.py","file_ext":"py","file_size_in_byte":5658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"553449401","text":"def solve(inp_arr):\n ans = 0\n dx = inp_arr[2] - inp_arr[0]\n dy = inp_arr[3] - inp_arr[1]\n n = dx + dy - 1\n m = min(dx, dy)\n if m == 0:\n ans = 1\n elif (dx, dy) == (1, 1):\n ans = 2\n else:\n if n == 2*m - 1:\n ans = m*m + 1\n else:\n ans = m*(n + 1 - m) + 1\n return ans\n\n\n# fin = open('input.txt', 'r')\n# fout = open('output.txt', 'w')\n\nt = int(input())\n# t = int(fin.readline())\nfor ___ in range(t):\n # inp_arr = [int(k) for k in fin.readline().split(' ')]\n inp_arr = [int(k) for k in input().split(' ')]\n answer = solve(inp_arr)\n # fout.write(str(answer) + '\\n')\n print(answer)\n\n# fin.close()\n# fout.close()","sub_path":"Round_645_Div_2/C/C_1.py","file_name":"C_1.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"117830330","text":"import os, sys\nCW_DIR = os.getcwd()\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nCONFIG_PATH = os.path.abspath(os.path.join(BASE_DIR, \"configs/train.yaml\"))\nsys.path.append(os.path.abspath(os.path.join(BASE_DIR, \"../../../\"))) # for package path\n\nimport hydra\nimport omegaconf\nimport numpy as np\nfrom tqdm import tqdm\nfrom tensorboardX import SummaryWriter\n\nimport torch\nfrom torch.utils.data import DataLoader\n\n# tools\nfrom torchpcp.utils import pytorch_tools\nfrom torchpcp.utils.monitor import dict2logger, fix_path_in_configs\nfrom torchpcp.utils.metrics import MultiAssessmentMeter, LossMeter\n\n# env\nfrom model_env import processing, save_params\nfrom model_env import (get_writer, get_model, get_dataset, get_losses, \n get_optimizer, get_scheduler)\n\n@hydra.main(config_path=CONFIG_PATH)\ndef main(cfg:omegaconf.DictConfig):\n # fix paths\n cfg = fix_path_in_configs(CW_DIR, cfg, [[\"dataset\",\"root\"]])\n\n # set a seed \n pytorch_tools.set_seed(\n cfg.general.seed, \n cfg.general.device, \n cfg.general.reproducibility\n )\n\n # set a device\n cfg.general.device = pytorch_tools.select_device(cfg.general.device)\n\n model = get_model(cfg)\n dataset = get_dataset(cfg)\n criterion = get_losses(cfg)\n optimizer = get_optimizer(cfg, model)\n scheduler = get_scheduler(cfg, optimizer)\n\n # get a logger\n writer = get_writer(cfg)\n\n # start training\n loader = tqdm(range(cfg.general.start_epoch, cfg.general.epochs), \n desc=\"Training\", ncols=70)\n for epoch in loader:\n # print('Epoch {}/{}:'.format(epoch, cfg.general.epochs))\n\n # training\n train_log = train(\n cfg, \n model, \n dataset[\"train\"], \n optimizer, \n criterion, \n scheduler\n )\n\n dict2logger(train_log, writer, epoch, cfg.writer.name)\n\n # save params and model\n if (epoch+1) % cfg.general.save_epoch == 0 and \\\n cfg.general.save_epoch != -1:\n save_params(\"model.path.tar\", epoch+1, cfg, model, optimizer, \n scheduler)\n\n print('Epoch {}/{}:'.format(cfg.general.epochs, cfg.general.epochs))\n save_params(\"f_model.path.tar\", cfg.general.epochs, cfg, model, optimizer, \n scheduler)\n\n writer.close()\n\n print(\"Finish training.\")\n\n# training\ndef train(cfg, model, dataset, optimizer, criterion, scheduler, publisher=\"train\"):\n model.train()\n loader = DataLoader(\n #Subset(dataset[\"train\"],range(320)),\n dataset,\n batch_size=cfg.general.batch_size,\n num_workers=cfg.loader.nworkers,\n pin_memory=True,\n shuffle=True,\n )\n # loader = tqdm(loader, ncols=100, desc=publisher)\n\n acc_meter = MultiAssessmentMeter(\n num_classes=dataset.num_classes, \n metrics=[\"class\",\"overall\",\"iou\"]\n )\n batch_loss = LossMeter()\n meters = (acc_meter, batch_loss)\n\n for data in loader:\n optimizer.zero_grad()\n\n loss = processing(model, criterion, data, meters, cfg.general.device)\n\n loss.backward()\n optimizer.step()\n\n # nan\n if torch.isnan(loss):\n print(\"Train loss is nan.\")\n exit()\n\n # update lr step\n scheduler.step()\n\n # get epoch loss and acc\n train_loss = batch_loss.compute()\n train_acc = acc_meter.compute()\n # print(\n # '-> Train loss: {} mAcc: {} iou: {} oAcc: {}'.format(\n # train_loss, \n # train_acc[\"class\"],\n # train_acc[\"iou\"],\n # train_acc[\"overall\"]\n # )\n # )\n\n # save loss and acc to tensorboard\n lr = scheduler.get_last_lr()[0]\n log_dict = {\n \"lr\": lr,\n \"train/loss\": train_loss,\n \"train/mAcc\": train_acc[\"class\"],\n \"train/oAcc\": train_acc[\"overall\"],\n \"train/IoU\": train_acc[\"iou\"]\n }\n\n return log_dict\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"examples/PointNet2ASIS/InsSemSeg/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"511291628","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright 2014, SIL International\n# All rights reserved.\n#\n# This library is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published\n# by the Free Software Foundation; either version 2.1 of License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should also have received a copy of the GNU Lesser General Public\n# License along with this library in the file named \"LICENSE\".\n# If not, write to the Free Software Foundation, 51 Franklin Street,\n# suite 500, Boston, MA 02110-1335, USA or visit their web page on the \n# internet at http://www.fsf.org/licenses/lgpl.html.\n\n\nimport os, sys, StringIO\n\n# Load Rapuma modules\nfrom rapuma.core.tools import Tools\nfrom rapuma.core.user_config import UserConfig\nfrom rapuma.project.proj_setup import ProjDelete\nfrom rapuma.core.proj_data import ProjData\nfrom rapuma.core.proj_local import ProjLocal\n\n# Load GUI modules\nfrom PySide import QtGui, QtCore\nfrom PySide.QtGui import QDialog, QApplication, QMessageBox, \\\n QListWidgetItem, QFileDialog, \\\n QRadioButton\nfrom PySide.QtCore import QPropertyAnimation\nfrom rapuma.dialog import menu_project_cloud_dlg\n\nclass MenuProjectCloudCtrl (QDialog, QPropertyAnimation, menu_project_cloud_dlg.Ui_MenuProjectCloud) :\n\n def __init__ (self, guiSettings, userConfig, parent=None) :\n '''Initialize and start up the UI'''\n\n super(MenuProjectCloudCtrl, self).__init__(parent)\n\n # Setup the GUI\n self.setupUi(self)\n self.connectionActions()\n self.completed = False\n self.tools = Tools()\n self.guiSettings = guiSettings\n self.userConfig = userConfig\n self.local = ProjLocal(self.guiSettings.currentPid)\n self.lineEditProjectLocal.setText(self.local.projHome)\n self.populateProjects()\n\n\n def populateProjects (self) :\n '''Populate the combo box list.'''\n\n self.comboBoxCloudProjects = QtGui.QComboBox()\n dirs = os.listdir(self.local.userCloudStorage)\n dirs.sort()\n for d in dirs :\n self.comboBoxCloudProjects.addItem(d)\n\n\n def main (self) :\n '''This function shows the main dialog'''\n\n self.show()\n\n\n def connectionActions (self) :\n '''Connect to form buttons.'''\n\n self.pushButtonOk.clicked.connect(self.okClicked)\n self.pushButtonLocalBrowse.clicked.connect(self.findProjPath)\n\n\n def findProjPath (self) :\n '''Call a basic find folder widget to get the path we want.'''\n\n dialog = QFileDialog(self, \"Find a Folder\")\n dialog.setDirectory(self.local.projHome)\n dialog.setFileMode(QFileDialog.Directory)\n dialog.setOption(QFileDialog.ShowDirsOnly)\n if dialog.exec_() :\n # When the folder is found, change the right line edit box\n self.lineEditProjectLocal.setText(dialog.selectedFiles()[0])\n\n\n def okClicked (self) :\n '''Execute the OK button.'''\n\n # Look at the radio buttons in the Action group\n # (This was taken from: \n # http://stackoverflow.com/questions/2089897/finding-checked-qradiobutton-among-many-into-a-qvboxlayout )\n actionContents = self.groupBoxAction.layout()\n for i in range(0, actionContents.count()) :\n widget = actionContents.itemAt(i).widget()\n # Find the radio buttons\n if (widget!=0) and (type(widget) is QRadioButton) :\n # Do an action according to wich one was selected\n if i == 0 and widget.isChecked() :\n ProjData(self.guiSettings.currentPid).pushToCloud(self.checkBoxFlush.isChecked())\n elif i == 2 and widget.isChecked() :\n ProjData(self.guiSettings.currentPid).pullFromCloud(self.checkBoxBackup.isChecked(), self.lineEditProjectLocal.text())\n elif i == 4 and widget.isChecked() :\n if os.path.exists(os.path.join(self.local.userCloudStorage, self.comboBoxCloudProjects.currentText())) :\n self.guiSettings.currentPid = self.comboBoxCloudProjects.currentText()\n self.guiSettings.currentGid = ''\n self.guiSettings.currentCid = ''\n self.guiSettings.setBookmarks()\n ProjData(self.guiSettings.currentPid).pullFromCloud(False, self.lineEditProjectLocal.text())\n self.completed = True\n else :\n QMessageBox.warning(self, \"Error!\", \"\"\"

Cloud ID not valid.\"\"\")\n\n self.close()\n\n\n###############################################################################\n############################## Dialog Starts Here #############################\n###############################################################################\n\nif __name__ == '__main__' :\n\n app = QApplication(sys.argv)\n window = MenuProjectCloudCtrl()\n window.main()\n sys.exit(app.exec_())\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"lib/rapuma/dialog/menu_project_cloud_ctrl.py","file_name":"menu_project_cloud_ctrl.py","file_ext":"py","file_size_in_byte":5629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"89011755","text":"#-*-coding:utf8-*-\n#@Time: 2019-05-30 14:38\n#@Author: caohui\n#@File: 206. 反转链表.py\n#@SoftWare:PyCharm\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\n\n# coding = utf-8\nclass Node:\n def __init__(self, data=None, next=None):\n self.data = data\n self.next = next\n\n\ndef rev(link):\n pre = link\n cur = link.next\n pre.next = None\n while cur:\n temp = cur.next\n cur.next = pre\n pre = cur\n cur = temp\n return pre\n\n\nif __name__ == '__main__':\n link = Node(1, Node(2, Node(3, Node(4, Node(5, Node(6, Node(7, Node(8, Node(9)))))))))\n root = rev(link)\n while root:\n print(root.data)\n root = root.next\n\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n if head is None or head.next is None:\n return head\n pre = head\n cur = head.next\n pre.next = None\n while cur:\n temp = cur.next\n cur.next = pre\n pre = cur\n cur = temp\n return pre\n\n\n\n","sub_path":"LeetCode/206. 反转链表.py","file_name":"206. 反转链表.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"196990242","text":"import time\nimport sys\nimport os\nimport shutil\nimport MeCab\n\ntagger = MeCab.Tagger('-F\\s%f[6] -U\\s%m -E\\\\n -u ncnc.dic -d /usr/lib/mecab/dic/mecab-ipadic-neologd')\n\n\ndlist=os.listdir(\"Text\")\nfor dfldr in dlist:\n if os.path.isfile(\"Text/\"+dfldr)==False:\n dflst=os.listdir(\"Text/\"+dfldr)\n for dfile in dflst:\n dfnm,ext=os.path.splitext(dfile)\n if ext==\".txt\":\n fi=open(\"Text/\"+dfldr+\"/\"+dfile,\"r\")\n if os.path.exists(\"WText/\"+dfldr)==False:\n os.mkdir(\"WText/\"+dfldr)\n fo=open(\"WText/\"+dfldr+\"/\"+dfile,\"w\")\n line = fi.readline()\n while line:\n result = tagger.parse(line)\n fo.write(result[1:]) # skip first \\s\n line = fi.readline()\n fi.close()\n fo.close\n","sub_path":"WIkiwakati.py","file_name":"WIkiwakati.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"508650438","text":"from utility import Utility\nfrom datetime import datetime\nimport pandas as pd\nfrom timeseries import TimeSeriesInterval as tsi\nimport numpy as np\n\n# Network Parameters\nnetworkParameters = {}\nnetworkParameters['size'] = 2000\nnetworkParameters['initialTransient'] = 50\nnetworkParameters['spectralRadius'] = 0.79\nnetworkParameters['inputScaling'] = 0.5\nnetworkParameters['reservoirScaling'] = 0.5\nnetworkParameters['leakingRate'] = 0.3\nnetworkParameters['inputConnectivity'] = 0.7\nnetworkParameters['reservoirConnectivity'] = 0.1\nnetworkParameters['arbitraryDepth'] = 24 * 90 # Depth of 1 year\n\n# Dataset\ndirectoryName = \"Datasets/\"\nprofileName = \"Jeep\"\ndatasetFileName = directoryName + profileName + \"_time_interaction_rate.csv\"\n\n# Horizon - used to split the training and testing\ndaysOfHorizon = 14 # 10 days ahead\nhorizon = 24*daysOfHorizon\nutil = Utility.SeriesUtility()\n\n# Step 1 - Convert the dataset into pandas series\nseries = util.convertDatasetsToSeries(datasetFileName)\n\n# Step 2 - Re-sample the series (to hourly)\nresampledSeries = util.resampleSeriesSum(series, \"H\")\ndel series\n\n# Step 3 - Filter recent data\nyearsOfData = 3\nrecentCount = yearsOfData * 365 * 24 + horizon\nfilteredSeries = util.filterRecent(resampledSeries, recentCount)\ndel resampledSeries\n\n# Step 4 - Scale the series\nnormalizedSeries = util.scaleSeries(filteredSeries)\ndel filteredSeries\n\n# Step 5 - Split into training, validation and testing series\ntrainingSeries, testingSeries = util.splitIntoTrainingAndTestingSeries(normalizedSeries, horizon)\navailableSeries = trainingSeries\ntrainingSeries, validationSeries = util.splitIntoTrainingAndValidationSeries(trainingSeries, trainingSetRatio=0.9)\n\n# Step 6 - Form the feature and target vectors for training\nbestFeaturesIndices, bestFeatures, targetVectors = util.getBestFeatures(trainingSeries, validationSeries, networkParameters)\n\n\n# Step 7 - Train the network\nutil.trainESNWithoutTuning(size=networkParameters['size'], featureVectors=bestFeatures, targetVectors=targetVectors,\n initialTransient=networkParameters['initialTransient'],\n inputConnectivity=networkParameters['inputConnectivity'], reservoirConnectivity=networkParameters['reservoirConnectivity'],\n inputScaling=networkParameters['inputScaling'], reservoirScaling=networkParameters['reservoirScaling'],\n spectralRadius=networkParameters['spectralRadius'], leakingRate=networkParameters['leakingRate'])\n\n\n# Step 8 - Predict the future\npredictedSeries = util.predict(util.esn, availableSeries, networkParameters['arbitraryDepth'], horizon, bestFeaturesIndices)\n\n\n# Step 9 - De-scale the series\nactualSeries = util.descaleSeries(testingSeries)\npredictedSeries = util.descaleSeries(predictedSeries)\n\n\n# Step 10 - Plot the results\ndetails = profileName + \"_yearsOfData_\" + str(yearsOfData) + \"_horizon_\" + str(daysOfHorizon) + \"_network_size_\" + str(networkParameters['size'])\nutil.plotSeries(\"Outputs/Outputs_\" + str(datetime.now()) + details,\n [actualSeries, predictedSeries], [\"Actual Output\", \"Predicted Output\"], \"Facebook Own Posts Interaction Rate - \"+profileName, \"Interaction Rate\")\n","sub_path":"scripts/experiments/facebook/OwnPosts_InteractionRate/WithTuning/predictSeries_Feature_Selected(Using_Interval).py","file_name":"predictSeries_Feature_Selected(Using_Interval).py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"88009869","text":"# coding: utf-8\n#\n# Copyright 2015 Palantir Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport json\nimport sys\n\nfrom typedjsonrpc.errors import Error, InternalError, get_status_code_from_error_code\n\n\nclass TestInternalError(object):\n\n def test_from_error_not_serializable(self):\n class NonSerializableObject(object):\n pass\n try:\n e = Exception()\n e.random = NonSerializableObject()\n raise e\n except Exception:\n wrapped_exc = InternalError.from_error(sys.exc_info(), json.JSONEncoder)\n json.dumps(wrapped_exc.as_error_object())\n\n def test_from_error_serializable(self):\n try:\n e = Exception()\n e.random = {\"foo\": \"bar\"}\n raise e\n except Exception:\n wrapped_exc = InternalError.from_error(sys.exc_info(), json.JSONEncoder)\n json.dumps(wrapped_exc.as_error_object())\n\n\ndef test_get_status_code_from_error_code():\n for type_ in [Error] + Error.__subclasses__():\n status_code = get_status_code_from_error_code(type_.code)\n assert type_.status_code == status_code\n","sub_path":"tests/test_errors.py","file_name":"test_errors.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"227635577","text":"from boy_ai import*\r\nimport game_framework\r\nimport random\r\nfrom pico2d import*\r\nimport title_state\r\nimport json\r\n\r\n\r\nname = 'MainState'\r\n\r\ndef get_frame_time():\r\n global current_time\r\n\r\n frame_time=get_time()-current_time\r\n current_time+=frame_time\r\n return frame_time\r\n \r\ndef handle_events():\r\n global running\r\n global team\r\n events = get_events()\r\n for event in events:\r\n global team\r\n if event.type==SDL_QUIT:\r\n running=False\r\n elif event.type==SDL_KEYDOWN and event.key==SDLK_ESCAPE:\r\n running=False\r\n else:\r\n team[n].handle_event(event)\r\n \r\ndef create_team():\r\n\r\n team_data_json= '{\"Tiffany\":{\"StartState\":\"LEFT_RUN\",\"x\":100,\"y\":100},\"Yuna\":{\"StartState\":\"RIGHT_RUN\",\"x\":100,\"y\":100},\"Sunny\":{\"StartState\":\"LEFT_STAND\",\"x\":100,\"y\":100},\"Yuri\":{\"StartState\":\"RIGHT_STAND\",\"x\":100,\"y\":100},\"Jessica\":{\"StartState\":\"LEFT_RUN\",\"x\":100,\"y\":100}}'\r\n \r\n player_state_table={\r\n \"LEFT_RUN\":Boy.LEFT_RUN,\r\n \"RIGHT_RUN\":Boy.RIGHT_RUN,\r\n \"LEFT_STAND\":Boy.LEFT_STAND,\r\n \"RIGHT_STAND\":Boy.RIGHT_STAND\r\n }\r\n team_data=json.loads(team_data_json)\r\n\r\n team=[]\r\n for name in team_data:\r\n player=Boy()\r\n player.name=name\r\n player.x=team_data[name]['x']\r\n player.y=team_data[name]['y']\r\n player.state=player_state_table[team_data[name]['StartState']]\r\n team.append(player)\r\n\r\n return team \r\n\r\ndef handle_mouseevents():\r\n global running,n,team\r\n select()\r\n events=get_events()\r\n global team\r\n for event in events:\r\n if event.type==SDL_MOUSEMOTION:\r\n team[n].x,team[n].y,=event.x,600-event.y \r\n\r\ncurrent_time = 0.0\r\nframe_time=0.0\r\n\r\n\r\n\r\ndef enter():\r\n global team,grass,boy,n,running,image,font,char_font\r\n #team=[Boy() for i in range(member)]\r\n team=create_team()\r\n grass = Grass()\r\n font=load_font('ENCR10B.TTF',50)\r\n char_font=load_font('ENCR10B.TTF',25)\r\n running=True\r\n image=None\r\n\r\n \r\n\r\ndef exit():\r\n global team, grass\r\n del(team)\r\n del(grass)\r\n\r\ndef get_frame_time():\r\n\r\n global current_time,frame_time\r\n frame_time = get_time() - current_time\r\n current_time += frame_time\r\n return frame_time\r\n\r\ndef update():\r\n global team\r\n for i in range(member):\r\n team[i].update(frame_time)\r\n \r\ndef draw():\r\n global team,current_time,n\r\n current_time=get_time() \r\n frame_time=get_time()-current_time\r\n\r\n while running:\r\n update()\r\n clear_canvas()\r\n grass.draw()\r\n font.draw(730,560,str(n))\r\n char_font.draw(team[n].x-50,team[n].y+50,'This Boy')\r\n char_font.draw(team[n].x-50,team[n].y-15,str(n))\r\n for i in range(member):\r\n team[i].draw()\r\n frmae_time=get_frame_time() \r\n update_canvas()\r\n handle_mouseevents()\r\n delay(0.01)\r\n delay(0.03)\r\n\r\ndef pause():\r\n pass\r\n\r\ndef resume():\r\n pass\r\n\r\ndef select():\r\n global running,n,team\r\n events=get_events()\r\n for event in events:\r\n if event.type == SDL_KEYDOWN and event.key == SDLK_UP:\r\n n+=1\r\n print(n)\r\n if n>member-1:\r\n n=0\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_DOWN:\r\n n-=1\r\n print(n)\r\n if n<=-1:\r\n n=member-1\r\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\r\n running=False\r\n game_framework.change_state(title_state)\r\n else:\r\n team[n].handle_event(event) \r\n \r\n","sub_path":"homework/jin_woo_won/main_state.py","file_name":"main_state.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"362926074","text":"from flask import Flask, render_template\nimport requests\nimport json\napp = Flask(__name__)\n\n@app.route(\"/\", methods=['GET'])\ndef zyte():\n zyterequest = requests.get('https://api.github.com/orgs/scrapinghub/repos?sort=name&direction=asc&per_page=10')\n data = json.loads(zyterequest.content)\n\n return render_template('index.html', data=data)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')","sub_path":"zyterepos/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"249721875","text":"from django.contrib.auth import logout\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\n\nimport jingo\n\nimport amo\nfrom bandwagon.models import Collection\n\nfrom .models import UserProfile\nfrom .signals import logged_out\n\n\ndef profile(request, user_id):\n \"\"\"user profile display page\"\"\"\n user = get_object_or_404(UserProfile, id=user_id)\n\n # get user's own and favorite collections, if they allowed that\n if user.display_collections:\n own_coll = Collection.objects.filter(\n collectionuser__user=user,\n collectionuser__role=amo.COLLECTION_ROLE_ADMIN,\n listed=True).order_by('name')\n else:\n own_coll = []\n if user.display_collections_fav:\n fav_coll = Collection.objects.filter(\n collectionsubscription__user=user,\n listed=True).order_by('name')\n else:\n fav_coll = []\n\n return jingo.render(request, 'users/profile.html',\n {'profile': user, 'own_coll': own_coll,\n 'fav_coll': fav_coll})\n\n\ndef logout_view(request):\n # XXX - we should redirect to /en-US/firefox by default\n redir = request.REQUEST.get('to', '/')\n logout(request)\n # fire logged out signal so we can be decoupled from cake\n response = HttpResponseRedirect(redir)\n logged_out.send(None, request=request, response=response)\n return response\n","sub_path":"apps/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"25747805","text":"__author__ = 'din'\n\nfrom setuptools import setup, find_packages\n\ncommands = {'console_scripts':['fooc=foo_c.gofooc:main',\n 'barc=bar_c.gobarc:main',\n 'quxc=goquxc:main']}\nsetup(\n install_requires=[\"qux_b==4.0.0\"],\n name = \"qux_c\",\n version = \"4.0.0\",\n packages = find_packages(),\n py_modules = ['goquxc'],\n entry_points = commands\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"131827549","text":"#!/usr/bin/env python3\n\n# Use the argparse library to simplify the command-line argument handling\n# https://docs.python.org/3/library/argparse.html\n# https://pymotw.com/3/argparse/\nimport argparse\n\n# Use the json library to simplify the handling of JSON\n# https://docs.python.org/3/library/json.html\n# https://www.w3schools.com/python/python_json.asp\nimport json\n\n# Use the elasticsearch library to simplify accessing\n# https://elasticsearch-py.readthedocs.io/en/6.3.1/\nfrom elasticsearch import Elasticsearch\n\n# This might help to simplify things, but not necessarily\n# https://pypi.org/project/elasticsearch-dsl/\nimport elasticsearch_dsl\n\n# Used to verify address given\n# https://docs.python.org/3/library/ipaddress.html\nimport ipaddress\n\n# This will parse the command-line arguments for our program\n# Very simple example of JSON being used:\n# python apthunter.py -wa '{\"color\":\"Red\"}'\nparser = argparse.ArgumentParser(description=\"A program to hunt for Advanced Persistent Threats (APT). It provides a command-line wrapper for the Federated Security Module (FSM) to query the indicies which contain sensor data in Elasticsearch.\",\n\t prog=\"apthunter\", \n\tepilog=\"Copyright 2019 Wade W. Wesolowsky\")\nparser.add_argument(\"-s\",\n\t\"--server\",\n\thelp=\"IP address of the Elasticsearch server. The default server is 127.0.0.1.\",\n\tdefault=\"127.0.0.1\",\n\tdest=\"server_IP\",\n\taction=\"store\")\nparser.add_argument(\"-p\",\n\t\"--port\",\n\thelp=\"Port the Elasticsearch server is running on. The default port is 9200.\",\n\ttype=int,\n\tdefault=9200,\n\tdest=\"port\",\n\taction=\"store\")\nparser.add_argument(\"-ht\",\n\t\"--honeytrap\",\n\thelp=\"Search the honeytrap* index in Elasticsearch. This index will contain HoneyTrap status messages and Honeypot connection attempts. The argument expects the JSON body for the query.\",\n\ttype=json.loads,\n\tdest=\"honeytrap_JSON\",\n\taction=\"store\")\nparser.add_argument(\"-log\",\n\t\"--logstash\",\n\thelp=\"Search the logstash-* index in Elasticsearch. This index will contain Zeek network traffic monitoring information. The argument expects the JSON body for the query.\",\n\ttype=json.loads,\n\tdest=\"logstash_JSON\",\n\taction=\"store\")\nparser.add_argument(\"-pf\",\n\t\"--pfsense\",\n\thelp=\"Search the pfsense-* index in Elasticsearch. This index will contain firewall status messages and Snort alerts. The argument expects the JSON body for the query.\",\n\ttype=json.loads,\n\tdest=\"pfsense_JSON\",\n\taction=\"store\")\nparser.add_argument(\"-ss\",\n\t\"--sweetsecurity\",\n\thelp=\"Search the sweet_security index in Elasticsearch. This index will contain detected device information and port scans. The argument expects the JSON body for the query.\",\n\ttype=json.loads,\n\tdest=\"sweetsecurity_JSON\",\n\taction=\"store\")\nparser.add_argument(\"-ssa\",\n\t\"--sweetsecurityalerts\",\n\thelp=\"Search the sweet_security_alerts index in Elasticsearch. This index will contain new (unique) Sweet Security log events. The argument expects the JSON body for the query.\",\n\ttype=json.loads,\n\tdest=\"sweetsecurityalerts_JSON\",\n\taction=\"store\")\nparser.add_argument(\"-t\",\n\t\"--tardis\",\n\thelp=\"Search the tardis index in Elasticsearch. This index will contain historical hosts, IP addresses, and websites. The argument expects the JSON body of the query.\",\n\ttype=json.loads,\n\tdest=\"tardis_JSON\",\n\taction=\"store\")\nparser.add_argument(\"-wa\",\n\t\"--wazuhalerts\",\n\thelp=\"Search the wazuh-alerts-3.x-* index in Elasticsearch. This index will contain log events above the alert thresholds in Wazuh. The argument expects the JSON body of the query.\",\n\ttype=json.loads,\n\tdest=\"wazuhalerts_JSON\",\n\taction=\"store\")\nparser.add_argument(\"-wm\",\n\t\"--wazuhmonitoring\",\n\thelp=\"Search the wazuh-monitoring-3.x-* index in Elasticsearch. This index will contain all Wazuh monitoring logs. The argument expects the JSON body of the query.\",\n\ttype=json.loads,\n\tdest=\"wazuhmonitoring_JSON\",\n\taction=\"store\")\nparser.add_argument(\"-win\",\n\t\"--winlogbeat\",\n\thelp=\"Search the winlogbeat-* index in Elasticsearch. This index will contain Windows Even logs. The argument expects the JSON body of the query.\",\n\ttype=json.loads,\n\tdest=\"winlogbeat_JSON\",\n\taction=\"store\")\n# Store_true will set verbose to True if the argument is specified\nparser.add_argument(\"--verbose\",\n\thelp=\"Outputs useful information to find errors.\",\n\tdefault=False,\n\tdest=\"verbose\",\n\taction=\"store_true\")\n\n# This gets the arguments!\nargs = parser.parse_args()\n\n# Check the IP server address\ntry:\n ip = ipaddress.ip_address(args.server_IP)\n #correct IP address found!\nexcept ValueError:\n print(f\"Address is invalid: {args.server_IP}\")\n raise SystemExit\n\n# Check the port given\nif (args.port < 1 or args.port > 65535):\n print(\"Port is invalid: %i\" % args.port)\n raise SystemExit\n\n# Open an Elasticsearch connection using the argument\nes = Elasticsearch([\n {'host': args.server_IP, 'port': args.port, 'url_prefix': 'es', 'use_ssl': False},\n])\n\n# Query es at the specified index using the body JSON query\n#res = es.search(index=\"test-index\", body={\"query\": {\"match_all\": {}}})\n\n# Are the arguments initialized?\n# https://stackoverflow.com/questions/30487767/check-if-argparse-optional-argument-is-set-or-not\n\n# Stores the result(s) of the queries\nresults = []\nsensors_used = 0\n\nif args.honeytrap_JSON is not None:\n try:\n temp = es.search(index=\"honeytrap\", body=args.honeytrap_JSON)\n results.append(temp)\n sensors_used += 1\n except:\n #error\n args.verbose and print(\"honeytrap query failed\")\n\nif args.logstash_JSON is not None:\n try:\n temp = es.search(index=\"logstash-*\", body=args.logstash_JSON)\n results.append(temp)\n sensors_used += 1\n except:\n #error\n args.verbose and print(\"logstash query failed\")\n\nif args.pfsense_JSON is not None:\n try:\n temp = es.search(index=\"pfsense-*\", body=args.pfsense_JSON)\n results.append(temp)\n sensors_used += 1\n except:\n #error\n args.verbose and print(\"pfsense query failed\")\n\nif args.sweetsecurity_JSON is not None:\n try:\n temp = es.search(index=\"sweet_security\", body=args.sweetsecurity_JSON)\n results.append(temp)\n sensors_userd += 1\n except:\n #error\n args.verbose and print(\"sweet_security query failed\")\n\nif args.sweetsecurityalerts_JSON is not None:\n try:\n temp = es.search(index=\"sweet_security_alerts\", body=args.sweetsecurityalerts_JSON)\n results.append(temp)\n sensors_used += 1\n except:\n #error\n args.verbose and print(\"sweet_security_alerts query failed\")\n\nif args.tardis_JSON is not None:\n try:\n temp = es.search(index=\"tardis\", body=args.tardis_JSON)\n results.append(temp)\n sensors_used += 1\n except:\n #error\n args.verbose and print(\"tardis query failed\")\n\nif args.wazuhalerts_JSON is not None:\n try:\n temp = es.search(index=\"wazuh-alerts-3.x-*\", body=args.wazuhalerts_JSON)\n results.append(temp)\n sensors_used += 1\n except:\n #error\n args.verbose and print(\"wazuh-alerts query failed\")\n\nif args.wazuhmonitoring_JSON is not None:\n try:\n temp = es.search(index=\"wazuh-monitoring-3.x-*\", body=args.wazuhmonitoring_JSON)\n results.append(temp)\n sensors_used += 1\n except:\n #error\n args.verbose and print(\"wazuh-monitoring query failed\")\n\nif args.winlogbeat_JSON is not None:\n try:\n temp = es.search(index=\"winlogbeat-*\", body=args.winlogbeat_JSON)\n results.append(temp)\n sensors_used += 1\n except:\n #error\n args.verbose and print(\"wazuhlogbeat query failed\")\n\n\n# Output the results, recommendation was to use f-string\n# https://saralgyaan.com/posts/f-string-in-python-usage-guide/\n\n# Hit count total (very, very simple metric) to determine presence of APSTs\nif len(results) > 0:\n hits = 0\n for result in results:\n hits = hits + result.hits.total\n per_index = hits / sensors_used\n print(f\"APST Query Hits: {hits}\")\n print(f\"Hits Per Sensor: {per_index}\")\nelse:\n print(f\"No APST Detected from Queries\")\n\n\n","sub_path":"apthunter.py","file_name":"apthunter.py","file_ext":"py","file_size_in_byte":8048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"175741796","text":"import pandas as pd\nimport numpy as np\nfrom random import randint\nfrom sklearn.preprocessing import normalize\n\n\ndef get_points(data,num_points,num_days):\n # first, make sure all of the succssful points are included\n s = data[data.target==1].index.tolist()\n s = list(filter(lambda x: x>num_days,s))\n s.extend([randint(num_days,data.shape[0]-12) for _ in range(num_points)])\n return s\n\ndef get_data_point(index,num_days,data):\n\n data_point = np.empty(int(num_days/5 * 2 + 3))\n\n #Posts\n posts = data.post_number.values[index-50:index]\n post_averages = np.mean(posts.reshape(-1, 5), axis=1).reshape(1,-1)\n npa = normalize(post_averages.reshape(1,-1))\n data_point[0:10] = npa.flatten()\n\n #posts_significance_factor\n data_point[10] = data[data.post_number < posts.mean()].shape[0]/data.shape[0]\n\n #Volume\n vol = data.dollar_volume.values[index-50:index]\n vol_averages = np.mean(vol.reshape(-1, 5), axis=1).reshape(1,-1)\n va = normalize(vol_averages.reshape(1,-1))\n data_point[11:21] = va.flatten()\n\n #vol_significance_factor\n data_point[21] = data[data.dollar_volume < vol.mean()].shape[0]/data.shape[0]\n\n data_point[22] = data.loc[index].target\n\n return data_point\n\n\n\nif __name__ == '__main__':\n\n\n\n# NUMBER OF RANDOM POINTS FROM EACH DATASET\n num_points = 1000\n\n# FEATURES\n\n # X number of days before prediction\n num_days = 50\n\n #MESSAGE BOARD POSTS\n # Normalize weekly data\n # Significance Factor\n\n #DOLLAR VOLUME\n # Normalize weekly data\n # Significance Factor\n\n #BREAKOUT BOARDS\n #Coming as soon as data is available\n\n #MOST READ BOARDS\n #Coming as soon as data is available\n\n first = True\n df = pd.read_json('data/stock_list.json')\n stocks = df.columns.tolist()\n\n for stock in stocks:\n print (stock)\n data = pd.read_csv('data/data/'+stock+'.csv')\n points = get_points(data,num_points,num_days)\n for point in points:\n if first:\n training_data = get_data_point(point,num_days,data)\n first=False\n else:\n training_data = np.vstack([training_data,get_data_point(point,num_days,data)])\n\n np.savetxt('data/train.csv',training_data,delimiter=\",\")\n","sub_path":"src/data/training_data.py","file_name":"training_data.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"445910074","text":"import pandas as pd \nimport numpy as np \nfrom itertools import product\nimport gc\nfrom kaggler.data_io import save_data, load_data, save_array, load_array\n\n#flags\ndebug = False\nclip_y_values = True\nremove_unused_shops = True\nremove_unused_categories = True\nremove_categoricals = True\n\nprint('[INFO] Reading dataset')\nif debug:\n sales = pd.read_csv('../datasets/sales_train.csv', parse_dates=['date'],date_parser=lambda x: pd.to_datetime(x, format=\"%d.%m.%Y\"), nrows=10000)\nelse:\n sales = pd.read_csv('../datasets/sales_train.csv', parse_dates=['date'],date_parser=lambda x: pd.to_datetime(x, format=\"%d.%m.%Y\"))\n\ntest_df = pd.read_csv('../datasets/test.csv', index_col=0)\nitems = pd.read_csv('../datasets/items.csv')\ncategories = pd.read_csv('../datasets/item_categories.csv')\n\nindex_cols = [ 'shop_id', 'item_id', 'date_block_num']\n\nprint('[INFO] Preparing shop/item matrix')\n# For every month we create a grid from all shops/items combinations from that month\ngrid = [] \nfor block_num in sales['date_block_num'].unique():\n cur_shops = sales[sales['date_block_num']==block_num]['shop_id'].unique()\n cur_items = sales[sales['date_block_num']==block_num]['item_id'].unique()\n grid.append(np.array(list(product(*[cur_shops, cur_items, [block_num]])),dtype='int32'))\n\n#turn the grid into pandas dataframe\ngrid = pd.DataFrame(np.vstack(grid), columns = index_cols,dtype=np.int32)\n\n#get aggregated values for (shop_id, item_id, month)\ngb = sales.groupby(index_cols,as_index=False).agg({'item_cnt_day':'sum'}).rename(columns={'item_cnt_day':'target'})\n\n#join aggregated data to the grid\ndata = pd.merge(grid,gb,how='left',on=index_cols).fillna(0)\n#sort the data\ndata.sort_values(['date_block_num','shop_id','item_id'],inplace=True)\n\ndel gb\ndel grid\ngc.collect()\n\nif clip_y_values:\n print('[INFO] Clipping target values to (0,20)')\n data.target = data.target.clip(0,20)\n\nprint('[INFO] Adding month and year columns')\n# add month and year columns\ntemp = sales.groupby('date_block_num', as_index=False).first()\ntemp['month'] = temp.date.dt.month.astype(int)\ntemp['year'] = temp.date.dt.year.astype(int)\ndata = pd.merge(data, temp.loc[:,['year','month','date_block_num']], on=['date_block_num'], how='left')\n\ndel temp\ngc.collect()\n\nprint('[INFO] Merging train and test set for feature engineering')\n# prepare test set\ntest_df['date_block_num'] = 34\ntest_df['month'] = 11\ntest_df['year'] = 2015\n\nsize_of_training_data = len(data)\n# we concatenate both so we can do feature engineering\ndata = pd.concat([data, test_df], ignore_index=True)\n# reorder columns\ndata = data.loc[:,['year','month','date_block_num','shop_id', 'item_id','target']]\n\n\nprint('[INFO] Creating store bins')\nZ = sales.groupby('shop_id', as_index=False).agg({'item_cnt_day': 'sum'})\nbig_stores = Z.loc[np.where(Z.item_cnt_day >= 100000)[0],['shop_id']]\nlarge_stores = Z.loc[np.where((Z.item_cnt_day < 100000) & (Z.item_cnt_day >= 50000))[0],['shop_id']]\nmedium_stores = Z.loc[np.where((Z.item_cnt_day < 50000) & (Z.item_cnt_day >= 20000))[0],['shop_id']]\nsmall_stores = Z.loc[np.where(Z.item_cnt_day < 20000)[0],['shop_id']]\n\nbig_stores['store_size'] = 'x-large'\nlarge_stores['store_size'] = 'large'\nmedium_stores['store_size'] = 'medium'\nsmall_stores['store_size'] = 'small'\n\nstores = pd.DataFrame()\nstores = stores.append(big_stores)\nstores = stores.append(large_stores)\nstores = stores.append(medium_stores)\nstores = stores.append(small_stores)\n\ndata = pd.merge(data, stores, on=['shop_id'], how='left')\n\ndel Z\ndel stores\ngc.collect()\n\nif remove_unused_shops:\n print('[INFO] Checking unused shops')\n shops_to_remove = sales[~sales['shop_id'].isin(test_df['shop_id'].unique())].shop_id.unique()\n print(\"...found {} shop_ids not present in test dataset\".format(len(shops_to_remove)))\n gc.collect()\n\nprint('[INFO] Adding Category Id information')\ntrain = sales.merge(items, on='item_id', how='left')\ntrain = train.merge(categories, on='item_category_id', how='left')\ntest = test_df.merge(items, on='item_id', how='left')\ntest = test.merge(categories, on='item_category_id', how='left')\n\ntemp = train.loc[:,['item_id','item_category_id']].groupby('item_id', as_index=False).first()\ndata = pd.merge(data, temp, on=['item_id'], how='left')\n\nif remove_unused_categories:\n print('[INFO] Checking unused categories')\n categories_to_remove = train[~train['item_category_id'].isin(test['item_category_id'].unique())].item_category_id.unique() \n print(\"...found {} item_category_ids not present in test dataset\".format(len(categories_to_remove)))\n gc.collect()\n\ndel temp\ndel train\ndel test\ngc.collect()\n\nprint('[INFO] Adding item_cnt_last_month')\nv = data.groupby(['date_block_num','shop_id', 'item_id'], as_index=False).agg({'target':'first'}).rename(columns={\"target\": \"item_cnt_last_month\"})\nv['date_block_num'] = v['date_block_num'] + 1\ndata = data.merge(v, on=['date_block_num','shop_id', 'item_id'],how='left')\ndata.loc[:,'item_cnt_last_month'].fillna(0,inplace=True)\n\ndel v\ngc.collect()\n\nprint('[INFO] Adding item_cnt_up_to_last_month')\nv = data.groupby(['date_block_num','shop_id', 'item_id']).agg({'target' : 'sum'}).groupby(level=[1,2]).cumsum().reset_index().rename(columns={\"target\" : \"item_cnt_up_to_last_month\"})\ndata['item_cnt_up_to_last_month'] = v['item_cnt_up_to_last_month'] - data['target']\n\ndel v\ngc.collect()\n\n\n# splitting train and test set\nprint('[INFO] Splitting train and test data')\nX_test = data.loc[size_of_training_data:,:].reset_index(drop=True)\nX_train = data.loc[:size_of_training_data-1,:].reset_index(drop=True)\n \nprint('... train size: ' + str(len(X_train)))\nprint('... test size: ' + str(len(X_test)))\n\nfrom ndl.utils.feature_engineering import MeanEncoding, SumEncoding, StdEncoding, VarEncoding, MeanEncodingSmoothing, MeanEncodingLOO, MeanEncodingExpanding, MeanEncodingKFold\nprint('[INFO] Adding MeanEncodings')\n\ngroup_by_cols = [\n ['item_id'],\n ['shop_id'],\n ['shop_id','item_id'],\n ['item_category_id'],\n ['shop_id', 'item_category_id'],\n ['date_block_num'],\n ['date_block_num','item_id'],\n ['date_block_num','shop_id']\n ]\n\nfor g in group_by_cols:\n X_train, X_test, column_name, corr = MeanEncodingKFold(X_train, X_test, g, 'target')\n print(\"Result of MeanEncoding for\", column_name , \": \", corr)\n X_train, X_test, column_name, corr = SumEncoding(X_train, X_test, g, 'target')\n print(\"Result of SumEncoding for\", column_name , \": \", corr)\n X_train, X_test, column_name, corr = VarEncoding(X_train, X_test, g, 'target')\n \n\nif remove_unused_shops:\n print('[INFO] Removing unused shops')\n original_size = len(X_train)\n shops_to_remove = sales[~sales['shop_id'].isin(test_df['shop_id'].unique())].shop_id.unique()\n print(\"...found {} shop_ids not present in test dataset\".format(len(shops_to_remove)))\n X_train = X_train.drop(np.where(X_train.shop_id.isin(shops_to_remove))[0]).reset_index(drop=True)\n print(\"...removed {} rows\".format(original_size - len(X_train)))\n del shops_to_remove\n gc.collect()\n\nif remove_unused_categories:\n train = sales.merge(items, on='item_id', how='left')\n train = train.merge(categories, on='item_category_id', how='left')\n test = test_df.merge(items, on='item_id', how='left')\n test = test.merge(categories, on='item_category_id', how='left')\n print('[INFO] Removing unused item categories')\n original_size = len(X_train)\n categories_to_remove = train[~train['item_category_id'].isin(test['item_category_id'].unique())].item_category_id.unique() \n print(\"...found {} item_category_ids not present in test dataset\".format(len(categories_to_remove)))\n X_train = X_train.drop(np.where(X_train.item_category_id.isin(categories_to_remove))[0]).reset_index(drop=True)\n print(\"...removed {} rows\".format(original_size - len(X_train)))\n del train\n del test\n del categories_to_remove\n gc.collect()\n\nif remove_categoricals:\n categorical = ['shop_id','item_id','item_category_id', 'store_size','date_block_num']\n X_train.drop(categorical, axis=1, inplace=True)\n X_test.drop(categorical, axis=1, inplace=True)\n\ndel data\ngc.collect()\n\n# check result\nfrom pandas import HDFStore\nimport os\nos.makedirs('../datasets/', exist_ok=True)\nhdf = HDFStore('../datasets/fe.h5')\nprint('\\n[INFO] Saving train and test dataset to fe.h5 (datasets folder)\\n')\nhdf.put('train', X_train, format='table', data_columns=True)\nhdf.put('test', X_test, format='table', data_columns=True)\nhdf.close()\n\n\n\n\n\n","sub_path":"feature_engineering.py","file_name":"feature_engineering.py","file_ext":"py","file_size_in_byte":8448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"568738535","text":"import torch\nimport torch.nn as nn\nimport torch.utils.data as Data\nimport torchvision\n\ntorch.manual_seed(1)\n\nEPOCH = 1\nBATCH_SIZE = 50\nLR = 0.001\nDOWNLOAD_MNIST = False\n\ntrain_data = torchvision.datasets.MNIST(\n root='./mnist',\n train=True,\n transform=torchvision.transforms.ToTensor(),\n download=DOWNLOAD_MNIST\n)\ntest_data = torchvision.datasets.MNIST(\n root='./mnist', train=False\n)\n\ntrain_loader = Data.DataLoader(\n dataset=train_data, batch_size=BATCH_SIZE,\n shuffle=True,\n)\n\ntrain_x = torch.unsqueeze(train_data.train_data, dim=1).type(\n torch.FloatTensor)[:2000] / 255\ntrain_y = train_data.train_labels[:2000]\n\ntest_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[\n :2000] / 255\ntest_y = test_data.test_labels[:2000]\n\n\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=16,\n kernel_size=5, stride=1, padding=2),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2)\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(in_channels=16, out_channels=32,\n kernel_size=5, stride=1, padding=2),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2)\n )\n self.out = nn.Linear(32 * 7 * 7, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = x.view(x.size(0), -1)\n output = self.out(x)\n return output\n\n\ncnn = CNN()\nprint(cnn)\n\noptimizer = torch.optim.Adam(cnn.parameters(), lr=LR)\nloss_func = nn.CrossEntropyLoss()\n\nfor epoch in range(EPOCH):\n for step, (b_x, b_y) in enumerate(train_loader):\n output = cnn(b_x)\n loss = loss_func(output, b_y)\n\n # print('train_loader', train_loader)\n # print('acc', acc)\n current_output = cnn(train_x)\n current_pred = torch.max(current_output, 1)[1].data.numpy().squeeze()\n real_labels = train_y.numpy()\n acc = sum(current_pred == real_labels) / 2000\n\n print('loss', loss.data.numpy(), '|acc:', acc)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\ntest_output = cnn(test_x[:2000])\npred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()\nreal_out = test_y[:2000].numpy()\nprint(pred_y, 'prediction number')\nprint(real_out, 'real number')\nprint('test_set_acc:', sum(pred_y == real_out) / 2000)\n","sub_path":"a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"400526747","text":"from flask import Flask,flash,render_template,request,redirect\nfrom forms import IniciosesionForm,OlvidarForm,RegistrarForm\nimport yagmail as yagmail\nimport utils\nfrom flask import session\nfrom usuario import Usuario\n\n\napp=Flask(__name__)\napp.secret_key=b'_5#y2L\"F4Q8z\\n\\xec]/'\n\n@app.route(\"/\")\ndef pagina():\n return render_template(\"page1.html\")\n\n@app.route(\"/registrarse\", methods=('POST','GET'))\ndef Registrarse():\n session.clear()\n form=RegistrarForm()\n form1=IniciosesionForm()\n if request.method == 'POST':\n name= request.form.get('name') \n email = request.form.get('email')\n username = request.form.get('user')\n password = request.form.get('contra')\n \n if not utils.isUsernameValid(name):\n flash(\"Nombre invalido\")\n if not utils.isEmailValid(email):\n flash(\"Correo invalido\")\n return render_template('registrarse.html',form=form)\n if not utils.isUsernameValid(username):\n flash(\"Usuario invalido\")\n return render_template('registrarse.html',form=form) \n if not utils.isPasswordValid(password):\n flash(\"Contraseña invalida\")\n return render_template('registrarse.html',form=form) \n usuario = Usuario(None, username, name, None, email, password, None, None)\n result = usuario.registrar()\n yag = yagmail.SMTP('mintic2022@gmail.com', 'HolamundoMintic2020') #SMTP protocolo de envío de correos \n yag.send(to=email, subject='Activa tu cuenta', contents='Bienvenido, usa el link para activar tu cuenta')\n flash(\"Cuenta creada con éxito\")\n return render_template('Iniciosesion.html', form= form1)\n return render_template(\"registrarse.html\",form=form)\n\n@app.route(\"/Olvidar\",methods=('POST','GET'))\ndef olvidarcontraseña():\n olv=OlvidarForm()\n if request.method=='POST':\n correo=request.form.get('correo')\n\n if not utils.isEmailValid(correo):\n flash('Formato de correo invalido')\n return render_template('Olvidar.html',olv=olv)\n yag=yagmail.SMTP('mintic2022@gmail.com','HolamundoMintic2020')\n yag.send(to=correo,subject='Recuperación de contraseña',contents='Se le ha asignado la siguiente contraseña para su cuenta')\n\n return redirect('Iniciosesion')\n return render_template(\"Olvidar.html\", olv=olv)\n\n@app.route(\"/Galeria\")\ndef galeria():\n return render_template(\"Galeria.html\")\n\n@app.route(\"/Iniciosesion\", methods=('GET','POST'))\ndef iniciarsesion():\n form=IniciosesionForm()\n if form.is_submitted():\n result=request.form\n return render_template('index.html',result=result)\n\n return render_template(\"Iniciosesion.html\",form=form) ","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"9162491","text":"import matplotlib.pyplot as plt\n\nimport numpy as np\n\nimport serial\n\nimport time\n\n\nT = 10.0/0.1; # time interval\n\nt = np.arange(0,10,0.1) # time vector\n\n\nserdev = '/dev/ttyACM0'\n\ns = serial.Serial(serdev,115200)\n\nACC_X = []\nACC_Y = []\nACC_Z = []\nevent = []\n\nfor i in range(int(T)):\n\n line=s.readline() # Read an echo string from K66F terminated with '\\n'\n\n num = list(map(float,line.split()))\n\n # print (num)\n\n ACC_X.append(num[0])\n ACC_Y.append(num[1])\n ACC_Z.append(num[2])\n event.append(num[3])\n\n\nfig, ax = plt.subplots(2, 1)\n\nax[0].plot(t, ACC_X, color=\"blue\", label=\"x\")\n\nax[0].plot(t, ACC_Y, color=\"red\", label=\"y\")\n\nax[0].plot(t, ACC_Z, color=\"green\", label=\"z\")\n\nax[0].legend(loc='lower left')\n\nax[0].set_xlabel('Time')\n\nax[0].set_ylabel('ACC Vector')\n\nax[1].stem(t, event) \n\nax[1].set_xlabel('Time')\n\nax[1].set_ylabel('Tilt')\n\nplt.show()\n\ns.close()","sub_path":"logger/ACC.py","file_name":"ACC.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"209371250","text":"import json\nimport csv\n\ndata = []\n\nfor age_grp in range(9, 13):\n i = 1\n prev_points = 0\n prev_placement = 1\n with open(f\"{age_grp}kl.txt\") as f:\n for row in csv.reader(f, dialect=\"excel-tab\"):\n print(row)\n name = row[0]\n cls = int(row[2])\n score = int(row[5])\n school = row[1]\n if score == prev_points:\n placement = prev_placement\n else:\n placement = prev_placement = i\n prev_points = score\n data.append({\"name\":name,\"class\":cls,\"age_group\":str(age_grp),\"score\":score,\"school\":school,\"placement\":placement})\n\nwith open(\"../2019.json\",'w') as f:\n d = {\n \"age_groups\": [{\"name\":str(x),\"min_class\":x,\"max_class\":x}for x in range(9,13)],\n \"scores\": data\n }\n json.dump(d, f)\n","sub_path":"matemaatika/2019/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"284304531","text":"from typing import Any, Optional\nfrom app import schemas, models\nfrom app.crud.base import CRUDBase\nfrom sqlalchemy.orm import Session\n\n\nclass CRUDDirectory(\n CRUDBase[\n models.Directory,\n schemas.DirectoryCreateExtended,\n schemas.DirectoryUpdate,\n schemas.DirectoryReturn,\n ]\n):\n def get_multi_by_server(\n self,\n db: Session,\n *,\n server: models.Server,\n filtration: schemas.FilterData[Any] = schemas.FilterData[Any]()\n ) -> tuple[int, list[models.Directory]]:\n query = db.query(self.model).filter(self.model.server == server)\n return self._filter_multi_query(query, filtration)\n\n def get_by_location_and_server(\n self, db: Session, *, server: models.Server, location: str\n ) -> Optional[models.Directory]:\n return (\n db.query(self.model)\n .filter(self.model.server == server, self.model.location == location)\n .first()\n )\n","sub_path":"app/crud/directory.py","file_name":"directory.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"426722737","text":"\"\"\"\nTests for textures.\n\"\"\"\nimport arcade\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nLINE_HEIGHT = 20\nCHARACTER_SCALING = 0.5\n\n\nclass MyTestWindow(arcade.Window):\n\n def __init__(self, width, height, title):\n super().__init__(width, height, title)\n\n arcade.set_background_color(arcade.color.AMAZON)\n\n self.texture = arcade.load_texture(\":resources:images/space_shooter/playerShip1_orange.png\")\n\n def on_draw(self):\n arcade.start_render()\n\n scale = .6\n arcade.draw_texture_rectangle(540, 120,\n self.texture.image.width * scale,\n self.texture.image.height * scale,\n self.texture, angle=45)\n\n arcade.draw_lrwh_rectangle_textured(10, 400, 64, 64, self.texture)\n\n for i in range(15):\n arcade.draw_scaled_texture_rectangle(i * 50 + 20, 220,\n self.texture,\n scale,\n angle=45, alpha=i * 15)\n\n\ndef test_textured_rects():\n window = MyTestWindow(SCREEN_WIDTH, SCREEN_HEIGHT, \"Test Textures\")\n window.test()\n window.close()\n arcade.cleanup_texture_cache()\n","sub_path":"tests/unit2/test_textured_rects.py","file_name":"test_textured_rects.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"541990554","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 22 19:22:18 2018\r\n\r\n@author: Yuto\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom numpy.random import multivariate_normal, permutation\r\nimport pandas as pd\r\nfrom pandas import DataFrame, Series\r\n\r\nnp.random.seed(20180122)\r\ntf.set_random_seed(20180122)\r\n\r\ndef generate_datablock(n,mu,var,t):\r\n data = multivariate_normal(mu,np.eye(2) * var, n)\r\n df = DataFrame(data,columns=['x1','x2'])\r\n df['t'] = t\r\n return df\r\n\r\ndf0 = generate_datablock(15,[7,7],22,0)\r\ndf1 = generate_datablock(15,[22,7],22,0)\r\ndf2 = generate_datablock(10,[7,22],22,0)\r\ndf3 = generate_datablock(25,[20,20],22,1)\r\n\r\ndf = pd.concat([df0,df1,df2,df3], ignore_index=True)\r\ntrain_set = df.reindex(permutation(df.index)).reset_index(drop=True)\r\n\r\ntrain_x = train_set[['x1','x2']].as_matrix()\r\ntrain_t = train_set['t'].as_matrix().reshape([len(train_set),1])\r\n\r\n\"\"\"\r\np120\r\n\"\"\"","sub_path":"tensor6.py","file_name":"tensor6.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"396596796","text":"import urllib.request\nimport urllib.parse\nimport urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\nimport csv\nimport sys\n\n\ndef scrape(url, counter):\n\t# For ignoring SSL certificate errors\n\t# ctx = ssl.create_default_context()\n\t# ctx.check_hostname = False\n\t# ctx.verify_mode = ssl.CERT_NONE\n\n\tproduct_json = dict()\n\n\tuser_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'\n\tvalues = {'name': 'Michael Foord' + str(counter),\n\t\t\t 'location': 'Northampton',\n\t\t\t 'language': 'Python'}\n\theaders = {'User-Agent': user_agent}\n\n\tdata = urllib.parse.urlencode(values)\n\tdata = data.encode('ascii')\n\treq = urllib.request.Request(url, data, headers)\n\n\ttry:\n\t\thtml = urllib.request.urlopen(req).read()\n\t\t# html = urllib.request.urlopen(url).read()\n\t\tsoup = BeautifulSoup(html, 'html5lib')\n\n\t\t# This block of code will help extract the Prodcut Title of the item\n\n\t\tfor spans in soup.findAll('span', attrs={'id': 'productTitle'}):\n\t\t\tname_of_product = spans.text.strip()\n\t\t\tproduct_json['title'] = name_of_product\n\t\t\tbreak\n\n\t\t# This block of code will help extract the image of the item\n\n\t\tfor divs in soup.findAll('div', attrs={'class': 'imgTagWrapper', 'id': 'imgTagWrapperId'}):\n\t\t\tfor img_tag in divs.findAll('img', attrs={'id': 'landingImage'\n\t\t\t\t\t\t\t\t\t\t\t\t\t }):\n\t\t\t\t# product_json['image'] = img_tag['src']\n\t\t\t\tproduct_json['image'] = img_tag['data-a-dynamic-image'][img_tag['data-a-dynamic-image'].find('\"')+1:img_tag['data-a-dynamic-image'].find('\":')]\n\t\t\t\tbreak\n\n\t\t# This block of code will help extract top specifications and details of the product\n\n\t\tproduct_json['description'] = []\n\t\tfor div in soup.findAll('div', attrs={'id': 'feature-bullets'}):\n\t\t\tfor li_tags in div.findAll('li'):\n\t\t\t\tfor spans in li_tags.findAll('span',\n\t\t\t\t\t\t\t\t\t\t\t attrs={'class': 'a-list-item'}, text=True,\n\t\t\t\t\t\t\t\t\t\t\t recursive=False):\n\t\t\t\t\tproduct_json['description'].append(spans.text.strip())\n\n\texcept IOError as err:\n\t\tprint(\"Error opening \" + url)\n\n\treturn product_json\n\n\nif __name__ == \"__main__\":\n\tinstances = []\n\tinput_csv_path = sys.argv[1]\n\twith open(input_csv_path) as csv_file:\n\t\treader = csv.DictReader(csv_file)\n\t\tfor row in reader:\n\t\t\tinstances.append(row)\n\n\tcsv_columns = ['sample_id', 'user', 'query', 'product', 'attention_weight', 'drem_explanation', 'drem_attn_explanation', 'previous_reviews', 'title', 'image', 'description']\n\n\n\twith open('mturk-batch-input.csv', 'w') as csvfile:\n\t\twriter = csv.DictWriter(csvfile, fieldnames=csv_columns)\n\t\twriter.writeheader()\n\t\tproduct_json_map = dict()\n\t\tcounter = 0\n\t\tfor instance in instances:\n\t\t\tif instance['product'] not in product_json_map:\n\t\t\t\tjson_data = scrape(\"https://www.amazon.com/dp/\" + instance['product'] + \"/\", counter)\n\t\t\t\tproduct_json_map[instance['product']] = json_data\n\t\t\telse:\n\t\t\t\tjson_data = product_json_map[instance['product']]\n\n\t\t\tif json_data and json_data['title']:\n\t\t\t\tinstance['title'] = json_data['title']\n\t\t\t\tinstance['image'] = json_data['image'] if 'image' in json_data else \"\"\n\t\t\t\tinstance['description'] = json_data['description'] if 'description' in json_data else \"\"\n\t\t\t\twriter.writerow(instance)\n\t\t\t\tprint('Scraped product ' + str(counter) + ' : ' + instance['product'])\n\n\t\t\tcounter+=1\n","sub_path":"utils/web_scrapper.py","file_name":"web_scrapper.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"51958705","text":"\nimport re\nimport time\n\n\ndef main():\n display_instructions()\n user_input = get_user_input()\n\n while user_input is not False:\n handle_user_input(user_input)\n user_input = get_user_input()\n\n print(\"Shutting down...\")\n time.sleep(3)\n\n\ndef display_instructions():\n welcome = \"Welcome to the Hash Program!\"\n instructions = \"Please enter a word or sentence to be hashed\"\n explain_test = \"To see specific test cases please enter #test\"\n\n print(welcome)\n print(instructions)\n print(explain_test)\n\n\ndef get_letter_map():\n letter_values = {\n \"a\": 1,\n \"b\": 2,\n \"c\": 3,\n \"d\": 4,\n \"e\": 5,\n \"f\": 6,\n \"g\": 7,\n \"h\": 8,\n \"i\": 9,\n \"j\": 10,\n \"k\": 11,\n \"l\": 12,\n \"m\": 13,\n \"n\": 14,\n \"o\": 15,\n \"p\": 16,\n \"q\": 17,\n \"r\": 18,\n \"s\": 19,\n \"t\": 20,\n \"u\": 21,\n \"v\": 22,\n \"w\": 23,\n \"x\": 24,\n \"y\": 25,\n \"z\": 26,\n \" \": 31\n }\n\n return letter_values\n\n\ndef get_user_input():\n prompt = \"\\nPlease enter the value you would like to hash (or type exit to quit): \"\n quit_pattern = re.compile(\"[eE]xit|[qQ]uit\")\n\n try:\n user_input = raw_input(prompt)\n\n if quit_pattern.search(user_input):\n return False\n else:\n return user_input\n except IOError:\n print(\"Input has failed. Shutting down.\")\n time.sleep(3)\n exit(1)\n\n\ndef handle_user_input(user_input):\n\n if user_input.find(\"#test\") > -1:\n display_test_cases()\n else:\n print(get_hash_value(user_input))\n\n\ndef get_hash_value(string):\n letter_map = get_letter_map()\n\n string = sanitize_string(string)\n\n hash_value = 0\n\n for letter in string:\n hash_value += letter_map.get(letter.lower())\n\n hash_value %= 31\n\n return hash_value\n\n\ndef sanitize_string(string):\n\n # find symbols that will break the hash but keep the spaces since that is part of the hash\n symbol_pattern = \"[^\\s\\w]\"\n # find numbers since those will break the hash as well\n number_pattern = \"\\d\"\n\n # replace all symbols and numbers with blank\n string = re.sub(symbol_pattern, \"\", string)\n string = re.sub(number_pattern, \"\", string)\n\n return string\n\n\ndef display_test_cases():\n description = \"Please see the table below. The string in the left column should match the value in the right column\"\n test_case_table = {\n \"cat\": 24,\n \"Don't Panic\": 3,\n \"book\": 12,\n }\n\n print(description)\n\n for string, hash_value in test_case_table.items():\n print(\"| {: <15}\" + \"|\" + \"{: >5} |\").format(string, hash_value)\n\n\nmain()\n","sub_path":"hash.py","file_name":"hash.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"358581091","text":"import numpy as np\nimport scipy.optimize as opt\nimport matplotlib.pyplot as plt\n\nclass MotionOptimizer:\n def __init__(self):\n pass\n\n def define_LP_matrix(self, q0, qw, qf, max_acc, horizon, nb_waypoint):\n # define c, A_ub, b_ub, A_eq, b_eq\n # such that x = [q10, ..., q1H | q20, ..., q2H | ... | q60, ..., q6H | dt1**2, dt2**2] (6*H + 2)\n # c = [0, ..., 0, n, H-n] (6*H + 2)\n # A_ub = [1, -2, 1, 0, ..., 0, 0, 0, 0 | -a, 0] (6*2*(H-2) x 6*H+2)\n # [0, 1, -2, 1, ..., 0, 0, 0, 0 | -a, 0]\n # [0, 0, 1, -2, ..., 0, 0, 0, 0 | -a, 0]\n # [0, 0, 0, 1, ..., 0, 0, 0, 0 | -a, 0]\n # [ ... ]\n # [0, 0, 0, 0, ..., -2, 1, 0, 0 | 0,-a]\n # [0, 0, 0, 0, ..., 1, -2, 1, 0 | 0,-a]\n # [0, 0, 0, 0, ..., 0, 1, -2, 1 | 0,-a]\n #\n # [-1, 2, -1, 0, ..., 0, 0, 0, 0 | -a, 0]\n # [0, -1, 2, -1, ..., 0, 0, 0, 0 | -a, 0]\n # [0, 0, -1, 2, ..., 0, 0, 0, 0 | -a, 0]\n # [0, 0, 0, -1, ..., 0, 0, 0, 0 | -a, 0]\n # [ ... ]\n # [0, 0, 0, 0, ..., 2, -1, 0, 0 | 0,-a]\n # [0, 0, 0, 0, ..., -1, 2, -1, 0 | 0,-a]\n # [0, 0, 0, 0, ..., 0, -1, 2,-1 | 0,-a]\n # b_ub = [0, ..., 0] (6*2*(H-2))\n # A_eq = [1, 0, ..., 0 | 0, ..., 0, 0 | ... | 0, ..., 0, 0 | 0, 0] (6*5, 6*H+2)\n # [0, ..., 0, 0 | 1, 0, ..., 0 | ... | 0, ..., 0, 0 | 0, 0]\n # [ ... ]\n # [0, ..., 0, 0 | 0, ..., 0, 0 | ... | 1, 0, ..., 0 | 0, 0] (q0 condition)\n # [ ... ]\n # [0, ..., 0, 1 | 0, ..., 0, 0 | ... | 0, 0, ..., 0 | 0, 0]\n # [0, ..., 0, 0 | 0, ..., 0, 1 | ... | 0, 0, ..., 0 | 0, 0]\n # [ ... ]\n # [0, ..., 0, 0 | 0, ..., 0, 0 | ... | 0, 0, ..., 1 | 0, 0] (qf condition)\n # [ ... ]\n # b_eq = [q10, q20, ..., q60 | q1w, q2w, ..., q6w | q1f, q2f, ..., q6f | 0, ... 0 ] (6*5)\n\n H = horizon\n n = nb_waypoint\n\n # objective matrix to minimize time\n c = np.zeros(6*H + 2, dtype=np.float)\n c[-2] = n\n c[-1] = H - n\n\n # Inequality\n # constraint 1: -a_max <= (q(n)-2*q(n-1)+q(n-2))/dt^2 <= a_max\n A1 = (np.diag(np.ones(H), k=0) + np.diag(-2*np.ones(H-1), k=1) + np.diag(np.ones(H - 2), k=2))[:-2]\n A2 = (np.diag(-np.ones(H), k=0) + np.diag(2*np.ones(H-1), k=1) + np.diag(-np.ones(H - 2), k=2))[:-2]\n Az = np.zeros_like(A1)\n As = np.zeros((6, H - 2, 2)) # abs. max acceleration for each joint\n for i in range(6):\n As[i, :n - 1, 0] = -max_acc[i]\n As[i, n - 1:, 1] = -max_acc[i]\n\n A_ub = np.block([[A1, Az, Az, Az, Az, Az, As[0]],\n [Az, A1, Az, Az, Az, Az, As[1]],\n [Az, Az, A1, Az, Az, Az, As[2]],\n [Az, Az, Az, A1, Az, Az, As[3]],\n [Az, Az, Az, Az, A1, Az, As[4]],\n [Az, Az, Az, Az, Az, A1, As[5]],\n\n [A2, Az, Az, Az, Az, Az, As[0]],\n [Az, A2, Az, Az, Az, Az, As[1]],\n [Az, Az, A2, Az, Az, Az, As[2]],\n [Az, Az, Az, A2, Az, Az, As[3]],\n [Az, Az, Az, Az, A2, Az, As[4]],\n [Az, Az, Az, Az, Az, A2, As[5]]])\n b_ub = np.zeros(np.shape(A_ub)[0]).T\n\n # Equality\n A_eq = []\n b_eq = []\n\n # constraint 1: q(t0) = q0\n for i in range(6):\n const = np.zeros(6 * H + 2)\n const[i * H + 0] = 1.0\n A_eq.append(const)\n b_eq.append(q0[i])\n\n # constraint 2: q(tw) = qw (waypoint)\n for i in range(6):\n const = np.zeros(6 * H + 2)\n const[i * H + n - 1] = 1.0\n A_eq.append(const)\n b_eq.append(qw[i])\n\n # constraint 3: q(tf) = qf\n for i in range(6):\n const = np.zeros(6 * H + 2)\n const[i * H + H - 1] = 1.0\n A_eq.append(const)\n b_eq.append(qf[i])\n\n # constraint 4: v(t0) = 0\n for i in range(6):\n const = np.zeros(6 * H + 2)\n const[i * H + 0] = 1.0\n const[i * H + 1] = -1.0\n A_eq.append(const)\n b_eq.append(0.0)\n\n # constraint 5: v(tf) = 0\n for i in range(6):\n const = np.zeros(6 * H + 2)\n const[i * H + H - 2] = 1.0\n const[i * H + H - 1] = -1.0\n A_eq.append(const)\n b_eq.append(0.0)\n A_eq = np.array(A_eq)\n return c, A_ub, b_ub, A_eq, b_eq\n\n def define_QP_matrix(self, q0, qw, qf, horizon, nb_waypoint):\n # define Q, c, A_eq, b_eq\n # such that\n # x = [q10, ..., q1H | ... | q60, ..., q6H | a1*dt1**2, ..., a6*dt1**2 | a1*dt2**2, ... a6*dt2**2] (6*H+12)\n # Q = [[0., 0., 0., ..., 0., 0., 0.], (6*H+12 x 6*H+12)\n # [0., 0., 0., ..., 0., 0., 0.],\n # [0., 0., 0., ..., 0., 0., 0.],\n # ...,\n # [0., 0., 0., ..., 1., 0., 0.],\n # [0., 0., 0., ..., 0., 1., 0.],\n # [0., 0., 0., ..., 0., 0., 1.]])\n # c = [0, ..., 0] (6*H+12)\n # A_eq = [[1., -2., 1., ..., 0., 0., 0.], (6*(H-2)+6*5, 6*H+12)\n # [0., 1., -2., ..., 0., 0., 0.],\n # [0., 0., 1., ..., 0., 0., 0.],\n # ...,\n # [0., 0., 0., ..., 0., 0., 0.],\n # [0., 0., 0., ..., 0., 0., 0.],\n # [0., 0., 0., ..., 0., 0., 0.]]) (including q0, qw, qf, v0, vf condition)\n # b_eq = [0, ..., 0 | q10, q20, ..., q60 | q1w, q2w, ..., q6w | q1f, q2f, ..., q6f | 0, ... 0 ] (6*(H-2)+6*5)\n\n H = horizon\n\n # objective matrix to minimize smoothness\n v1 = np.zeros(6 * H, dtype=np.float)\n v2 = np.ones(12, dtype=np.float)\n v = np.concatenate((v1, v2))\n Q = np.diag(v, k=0)\n c = np.zeros(6 * H + 12, dtype=np.float)\n\n # Equality\n # constraint 1: q(n)-2*q(n-1)+q(n-2) - a*dt^2 = 0\n A1 = (np.diag(np.ones(H), k=0) + np.diag(-2 * np.ones(H - 1), k=1) + np.diag(np.ones(H - 2), k=2))[:-2]\n Az = np.zeros_like(A1)\n As = np.zeros((6, H - 2, 12)) # abs. max acceleration for each joint\n for i in range(6):\n As[i, :H // 2 - 1, i] = -1\n As[i, H // 2 - 1:, i + 6] = -1\n A_eq = np.block([[A1, Az, Az, Az, Az, Az, As[0]],\n [Az, A1, Az, Az, Az, Az, As[1]],\n [Az, Az, A1, Az, Az, Az, As[2]],\n [Az, Az, Az, A1, Az, Az, As[3]],\n [Az, Az, Az, Az, A1, Az, As[4]],\n [Az, Az, Az, Az, Az, A1, As[5]]])\n b_eq = np.zeros(np.shape(A_eq)[0])\n\n # constraint 1: q(t0) = q0\n for i in range(6):\n const = np.zeros(6 * H + 12)\n const[i * H + 0] = 1.0\n A_eq = np.insert(A_eq, len(A_eq), const, axis=0)\n b_eq = np.insert(b_eq, len(b_eq), q0[i])\n\n # constraint 2: q(tw) = qw (waypoint)\n for i in range(6):\n const = np.zeros(6 * H + 12)\n const[i*H + nb_waypoint-1] = 1.0\n A_eq = np.insert(A_eq, len(A_eq), const, axis=0)\n b_eq = np.insert(b_eq, len(b_eq), qw[i])\n\n # constraint 3: q(tf) = qf\n for i in range(6):\n const = np.zeros(6 * H + 12)\n const[i * H + H - 1] = 1.0\n A_eq = np.insert(A_eq, len(A_eq), const, axis=0)\n b_eq = np.insert(b_eq, len(b_eq), qf[i])\n\n # constraint 4: v(t0) = 0\n for i in range(6):\n const = np.zeros(6 * H + 12)\n const[i * H + 0] = 1.0\n const[i * H + 1] = -1.0\n A_eq = np.insert(A_eq, len(A_eq), const, axis=0)\n b_eq = np.insert(b_eq, len(b_eq), 0.0)\n\n # constraint 5: v(tf) = 0\n for i in range(6):\n const = np.zeros(6 * H + 12)\n const[i * H + H - 2] = 1.0\n const[i * H + H - 1] = -1.0\n A_eq = np.insert(A_eq, len(A_eq), const, axis=0)\n b_eq = np.insert(b_eq, len(b_eq), 0.0)\n return Q, c, A_eq, b_eq\n\n def solve_LP(self, c, A_ub, b_ub, A_eq, b_eq):\n # arg min cT*x\n # such that A_ub*x <= b_ub\n # A_eq*x = b_eq\n result = opt.linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=(None, None))\n return result\n\n def solve_QP(self, Q, c, A_eq, b_eq):\n # arg min (1/2)*xT*Q*x + cT*x\n # such that A_eq*x = b_eq\n # x >= 0\n A = np.block([[Q, A_eq.T], [A_eq, np.eye(np.shape(A_eq)[0])]])\n b = np.concatenate((-c, b_eq))\n result = np.linalg.lstsq(A, b)\n return result[0][:np.shape(Q)[0]]\n\n def optimize_motion(self, q0, qw, qf, max_acc, horizon1, horizon2, visualize=False):\n # First optimization\n H1 = horizon1\n n1 = H1//2\n c, A_ub, b_ub, A_eq, b_eq = self.define_LP_matrix(q0, qw, qf, max_acc=max_acc, horizon=H1, nb_waypoint=n1)\n result1 = self.solve_LP(c, A_ub, b_ub, A_eq, b_eq)\n\n # Process the first result\n dt1 = np.sqrt(result1.x[-2])\n dt2 = np.sqrt(result1.x[-1])\n print (dt1, dt2)\n # x = result.x[:-2].reshape(6, -1).T # not used\n # print(\"dt1=\", dt1)\n # print(\"dt2=\", dt2)\n total = n1*dt1 + (H1-n1)*dt2\n t1_ratio = (n1)*dt1 / total\n t2_ratio = (H1-n1)*dt2 / total\n print(\"[Result of the 1st optimization]\")\n # print(\"total time =\", total)\n print(\"t1_ratio =\", t1_ratio)\n print(\"t2_ratio =\", t2_ratio)\n n1 = int(horizon2*t1_ratio)\n n2 = horizon2-n1\n print(\"n_ratio =\", n1, \":\", n2)\n\n # Second optimization\n H2 = horizon2\n Q, c, A_eq, b_eq = self.define_QP_matrix(q0, qw, qf, horizon=H2, nb_waypoint=n1)\n result2 = self.solve_QP(Q, c, A_eq, b_eq)\n\n # Process the second result\n # x = [q10, ..., q1H | ... | q60, ..., q6H | a1*dt1**2, ..., a6*dt1**2 | a1*dt2**2, ... a6*dt2**2] (6*H+12)\n qs = result2[:6*H2].reshape(-1,H2).T\n adt1sq = result2[-12:-6]\n adt2sq = result2[-6:]\n a = np.array(max_acc)\n dt1 = max(np.sqrt(abs(adt1sq/a)))\n dt2 = max(np.sqrt(abs(adt2sq/a)))\n print(\"[Result of the 2nd optimization]\")\n # print(\"qs =\", qs)\n print(\"dt1 =\", dt1)\n print(\"dt2 =\", dt2)\n\n # time horizon\n t1 = np.arange(n1)*dt1\n t2 = t1[-1] + np.arange(1, n2+1)*dt2\n time = np.concatenate((t1, t2), axis=0)\n\n if visualize:\n self.plot_joint(time, qs, time[n1], qs[n1,:])\n return qs, time\n\n def plot_joint(self, t, qs, t_border, qs_border):\n # Create plot\n # plt.title('joint angle')\n ax = plt.subplot(611)\n plt.plot(t, qs[:, 0] * 180. / np.pi, 'b.-', t_border, qs_border[0] * 180. / np.pi, 'ro-')\n plt.legend(loc='upper center', bbox_to_anchor=(0.9, 2))\n # plt.legend(['q_des', 'q_grey', 'q_red'], ncol=3, bbox_to_anchor=(0.5, 1), loc=\"lower center\")\n # plt.ylim([35, 62])\n ax.set_xticklabels([])\n plt.ylabel('q0 ($^\\circ$)')\n\n ax = plt.subplot(612)\n plt.plot(t, qs[:, 1] * 180. / np.pi, 'b.-', t_border, qs_border[1] * 180. / np.pi, 'ro-')\n # plt.ylim([-10, 12])\n ax.set_xticklabels([])\n plt.ylabel('q2 ($^\\circ$)')\n\n ax = plt.subplot(613)\n plt.plot(t, qs[:, 2], 'b.-', t_border, qs_border[2], 'ro-')\n # plt.ylim([0.14, 0.23])\n ax.set_xticklabels([])\n plt.ylabel('q3 (m)')\n\n ax = plt.subplot(614)\n plt.plot(t, qs[:, 3] * 180. / np.pi, 'b.-', t_border, qs_border[3] * 180. / np.pi, 'ro-')\n # plt.ylim([-90, 70])\n ax.set_xticklabels([])\n plt.ylabel('q4 ($^\\circ$)')\n\n ax = plt.subplot(615)\n plt.plot(t, qs[:, 4] * 180. / np.pi, 'b.-', t_border, qs_border[4] * 180. / np.pi, 'ro-')\n # plt.ylim([-60, 60])\n ax.set_xticklabels([])\n plt.ylabel('q5 ($^\\circ$)')\n\n plt.subplot(616)\n plt.plot(t, qs[:, 5] * 180. / np.pi, 'b.-', t_border, qs_border[5] * 180. / np.pi, 'ro-')\n # plt.ylim([-60, 60])\n # plt.legend(['desired', 'actual'])\n plt.ylabel('q6 ($^\\circ$)')\n plt.xlabel('sample number')\n plt.show()","sub_path":"trash/MotionOptimizer_joint.py","file_name":"MotionOptimizer_joint.py","file_ext":"py","file_size_in_byte":12985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"96150347","text":"# Willis Kirkham\n# 11084378\n# wrk027\n# C317 - a2q6\n\n# Synopsis:\n\n\nclass Variable:\n \"\"\" A representation of a variable, which has a domain and a current value that is assigned or unassigned \"\"\"\n\n def __init__(self, domain):\n \"\"\" Initialize the variable \"\"\"\n\n self.current = None\n self.domain = domain\n\n def assign(self, value):\n \"\"\" Assign the variable a value, which also means the domain becomes empty \"\"\"\n\n self.current = value\n self.domain = []\n\n def copy(self):\n \"\"\" Make a copy of the variable \"\"\"\n var_cp = Variable(self.domain)\n if self.current != None:\n var_cp.assign(self.current)\n return var_cp\n\nclass LatinSqState:\n \"\"\" Class for Latin Square states\"\"\"\n\n def __init__(self, variables, blanks):\n \"\"\" Represent the problem as a collection of Variables \"\"\"\n\n self.vars = variables\n self.blanks = blanks\n\n\nclass LatinSqProblem:\n \"\"\" Class for Latin Square problems\"\"\"\n\n def __init__(self, square):\n \"\"\" Initialize the problem \"\"\"\n self.square = square\n self.order = len(square)\n\n def create_initial_state(self):\n \"\"\" Creates a state from the square defined the problem \"\"\"\n\n order = len(self.square)\n variables = {}\n blanks = []\n\n # convert the square into variables and assignments\n for row_num, row in enumerate(self.square):\n for col_num, entry in enumerate(row):\n\n # create the variable\n new_var = Variable(range(1, order + 1))\n\n # build list of identifiers of unassigned variables\n if entry == 0:\n blanks.append((row_num, col_num))\n else:\n new_var.assign(entry)\n\n # put the variable in the dict\n variables.update({(row_num, col_num): new_var})\n\n return LatinSqState(variables, blanks)\n\n def is_goal(self, state):\n \"\"\"Checks if state is a true Latin Square.\"\"\"\n\n # if there are blanks, it's not a state\n if len(state.blanks) > 0:\n return False\n\n # TESTING\n # print_sq = self.var_to_square(state)\n # if self.equal_sq(print_sq):\n # break1 = 1\n # self.print_sq(print_sq)\n\n new_sq = []\n\n # Check rows, and construct a square without blanks\n for row_num, row in enumerate(self.square):\n\n row_cp = row.copy()\n\n\n # get the keys of the blanks in the current row\n row_blank_keys = []\n for col_num, entry in enumerate(row_cp):\n if entry == 0:\n row_blank_keys.append((row_num, col_num))\n\n # For every blank key, update the new row with the corresponding variable assignment\n for key in row_blank_keys:\n\n # get the variable in the state with that key\n curr_var = state.vars.get(key)\n\n # get the assignment in that variable\n val = curr_var.current\n\n # get the column of the variable\n col = key[1]\n\n # fill in the blank in the copy of the row with the assignment in the variable\n row_cp[col] = val\n\n # do the actual check for latin squareness on this row\n # set() removes duplicates, so if the length changes, there are duplicates, and its not a latin square\n if len(row_cp) != len(set(row_cp)):\n return False\n\n # add the filled in row to the new square\n new_sq.append(row_cp)\n\n # check columns (we already constructed the filled in square, so just perform the check)\n for i in range(0, self.order):\n col = [row[i] for row in new_sq] # construct column by grabbing i-th entry of each row\n if len(col) != len(set(col)):\n return False\n\n return True\n\n def actions(self, state):\n \"\"\" Creates a list of actions for state.\n An unassigned variable can be filled in with any number in the domain of the variable\"\"\"\n\n if len(state.blanks) == 0:\n return []\n\n # assume the blank is the first one in the list, and get the key\n first_bl_key = state.blanks[0]\n\n # get the variable at that key\n first_bl_var = state.vars.get(first_bl_key)\n\n # build list of actions, where an action is a tuple of the entry position and the integer to put there\n actions = []\n for i in first_bl_var.domain:\n actions.append((first_bl_key, i))\n\n return actions\n\n def result(self, state, action):\n \"\"\"Creates a new State object with the blank filled in, as described in action.\"\"\"\n\n # make copy of variables\n new_vars = {}\n new_blanks = []\n\n # copy vars, perform the action, and make new blanks\n for key, var in state.vars.items():\n\n var_cp = var.copy()\n\n if var.current == None:\n if key == action[0]:\n # found the unassigned variable we want to perform the action on\n var_cp.assign(action[1])\n else:\n # didn't find the unassigned variable we want, so add to list of unassigned variables\n new_blanks.append(key)\n\n # add the new variable\n new_vars.update({key: var_cp})\n\n # make the new state\n new_state = LatinSqState(new_vars, new_blanks)\n\n return new_state\n\n\n\n ######## Functions for Testing! ########\n\n def var_to_square(self, state):\n \"\"\" converts state with variables to a square \"\"\"\n\n # have: variables with position and value\n\n new_sq = []\n new_sq.append([])\n\n curr_row = 0\n\n for key, var in state.vars.items():\n\n if key[0] > curr_row:\n new_sq.append([])\n curr_row += 1\n\n new_sq[curr_row].append(var.current)\n\n return new_sq\n\n def print_sq(self, square):\n \"\"\" print a square \"\"\"\n\n with open('test_out.txt', 'a') as myfile:\n\n myfile.write(str(self.order) + '\\n')\n for row in square:\n for entry in row:\n myfile.write(str(entry) + ' ')\n myfile.write('\\n')\n myfile.write('\\n')\n\n def equal_sq(self, square):\n \"\"\" check if the square is equal to the hard coded square \"\"\"\n\n test_sq = []\n test_sq.append([1, 4, 2, 3])\n test_sq.append([4, 1, 3, 2])\n test_sq.append([3, 2, 1, 4])\n test_sq.append([2, 3, 4, 1])\n\n result = True\n\n for index, row in enumerate(square):\n if row != test_sq[index]:\n result = False\n\n return result","sub_path":"a3/a3q2.py","file_name":"a3q2.py","file_ext":"py","file_size_in_byte":6820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"277870628","text":"#Code written by Parker Thurston with help from Class (With some extra help from Hannah Young)\n'''\n p5.py: Illustrates the payroll module.\n'''\n\nfrom payroll import *\nimport os, os.path, shutil\n\nPAY_LOGFILE = 'paylog.txt'\nemployees = []\n\ndef load_employees():\n with open(\"employees.csv\", \"r\") as reader:\n reader.readline()\n\n while reader:\n emp = reader.readline().strip().split(',')\n if emp[0] == \"\":\n return\n new_emp = Employee(emp[0], emp[1], emp[2], emp[3], emp[4], emp[5], emp[6])\n\n if emp[7] == \"3\": #Hourly Emp\n new_emp.make_hourly(emp[10])\n elif emp[7] == \"2\": #Commissioned Emp\n new_emp.make_commissioned(emp[8], emp[9])\n else: # Salaried Emp\n new_emp.make_salaried(emp[8])\n\n employees.append(new_emp)\n\ndef process_timecards():\n with open(\"timecards.csv\", \"r\") as reader:\n \n while reader:\n timecard = reader.readline().strip().split(',')\n if timecard[0] ==\"\":\n return\n\n employ = find_employee_by_id(timecard[0])\n if employ != None:\n for i in range(1,len(timecard)):\n employ.classification.add_timecard(timecard[i])\n\n\ndef process_receipts():\n with open(\"receipts.csv\", \"r\") as reader:\n\n while reader:\n receipt = reader.readline().strip().split(',')\n if receipt[0] == \"\":\n return\n employ = find_employee_by_id(receipt[0])\n if employ != None:\n for i in range(1,len(receipt)):\n employ.classification.add_receipt(receipt[i])\n \n\n\ndef run_payroll():\n if os.path.exists(PAY_LOGFILE): # pay_log_file is a global variable holding ‘payroll.txt’\n os.remove(PAY_LOGFILE)\n for emp in employees: # employees is the global list of Employee objects\n emp.issue_payment()\n\n\ndef find_employee_by_id(number):\n for emp in employees:\n emp_id = emp.get_employee_id()\n if emp_id == number:\n return emp\n return None\n \n\n \ndef main():\n load_employees()\n process_timecards()\n process_receipts()\n run_payroll()\n\n # Save copy of payroll file; delete old file\n shutil.copyfile(PAY_LOGFILE, 'paylog_old.txt')\n if os.path.exists(PAY_LOGFILE):\n os.remove(PAY_LOGFILE)\n\n # Change Issie Scholard to Salaried by changing the Employee object:\n emp = find_employee_by_id('51-4678119')\n emp.make_salaried(134386.51)\n emp.issue_payment()\n\n # Change Reynard,Lorenzin to Commissioned; add some receipts\n emp = find_employee_by_id('11-0469486')\n emp.make_commissioned(50005.50, 27)\n clas = emp.classification\n clas.add_receipt(1109.73)\n clas.add_receipt(746.10)\n emp.issue_payment()\n\n # Change Jed Netti to Hourly; add some hour entries\n emp = find_employee_by_id('68-9609244')\n emp.make_hourly(47)\n clas = emp.classification\n clas.add_timecard(8.0)\n clas.add_timecard(8.0)\n clas.add_timecard(8.0)\n clas.add_timecard(8.0)\n clas.add_timecard(8.0)\n emp.issue_payment()\n\nif __name__ == '__main__':\n main()\n","sub_path":"Project 5/p5ParkerT.py","file_name":"p5ParkerT.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"459662802","text":"from argparse import Namespace\n\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, random_split\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau\n\nfrom torchnlp.metrics import get_accuracy, get_token_accuracy\n\nfrom pytorch_lightning import Trainer\n\nfrom .dataset.intent_entity_dataset import RasaIntentEntityDataset\nfrom .model.models import EmbeddingTransformer\n\nimport os, sys\nimport multiprocessing\nimport dill\n\nimport torch\nimport torch.nn as nn\nimport pytorch_lightning as pl\n\n\nclass DualIntentEntityTransformer(pl.LightningModule):\n def __init__(self, hparams):\n super().__init__()\n\n self.hparams = hparams\n\n if hasattr(self.hparams, 'tokenizer'):\n self.dataset = RasaIntentEntityDataset(markdown_lines=self.hparams.nlu_data, tokenizer=self.hparams.tokenizer)\n else:\n self.dataset = RasaIntentEntityDataset(markdown_lines=self.hparams.nlu_data, tokenizer=None)\n\n self.model = EmbeddingTransformer(\n vocab_size=self.dataset.get_vocab_size(),\n seq_len=self.dataset.get_seq_len(),\n intent_class_num=len(self.dataset.get_intent_idx()),\n entity_class_num=len(self.dataset.get_entity_idx()),\n num_encoder_layers=self.hparams.num_encoder_layers,\n )\n\n self.train_ratio = self.hparams.train_ratio\n self.batch_size = self.hparams.batch_size\n self.optimizer = self.hparams.optimizer\n self.intent_optimizer_lr = self.hparams.intent_optimizer_lr\n self.entity_optimizer_lr = self.hparams.entity_optimizer_lr\n self.loss_fn = nn.CrossEntropyLoss()\n\n def forward(self, x):\n return self.model(x)\n\n def prepare_data(self):\n train_length = int(len(self.dataset) * self.train_ratio)\n\n self.train_dataset, self.val_dataset = random_split(\n self.dataset, [train_length, len(self.dataset) - train_length],\n )\n\n def train_dataloader(self):\n train_loader = DataLoader(\n self.train_dataset,\n batch_size=self.batch_size,\n num_workers=multiprocessing.cpu_count(),\n )\n return train_loader\n\n def val_dataloader(self):\n val_loader = DataLoader(\n self.val_dataset,\n batch_size=self.batch_size,\n num_workers=multiprocessing.cpu_count(),\n )\n return val_loader\n\n def configure_optimizers(self):\n intent_optimizer = eval(\n f\"{self.optimizer}(self.parameters(), lr={self.intent_optimizer_lr})\"\n )\n entity_optimizer = eval(\n f\"{self.optimizer}(self.parameters(), lr={self.entity_optimizer_lr})\"\n )\n\n return (\n [intent_optimizer, entity_optimizer],\n # [StepLR(intent_optimizer, step_size=1),StepLR(entity_optimizer, step_size=1),],\n [\n ReduceLROnPlateau(intent_optimizer, patience=1),\n ReduceLROnPlateau(entity_optimizer, patience=1),\n ],\n )\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n self.model.train()\n\n tokens, intent_idx, entity_idx = batch\n\n intent_pred, entity_pred = self.forward(tokens)\n\n intent_acc = get_accuracy(intent_idx.cpu(), intent_pred.max(1)[1].cpu())[0]\n entity_acc = get_token_accuracy(\n entity_idx.cpu(),\n entity_pred.max(2)[1].cpu(),\n ignore_index=self.dataset.pad_token_id,\n )[0]\n\n tensorboard_logs = {\n \"train/intent/acc\": intent_acc,\n \"train/entity/acc\": entity_acc,\n }\n\n if optimizer_idx == 0:\n intent_loss = self.loss_fn(intent_pred, intent_idx.squeeze(1))\n tensorboard_logs[\"train/intent/loss\"] = intent_loss\n return {\n \"loss\": intent_loss,\n \"log\": tensorboard_logs,\n }\n if optimizer_idx == 1:\n entity_loss = self.loss_fn(entity_pred.transpose(1, 2), entity_idx.long())\n tensorboard_logs[\"train/entity/loss\"] = entity_loss\n return {\n \"loss\": entity_loss,\n \"log\": tensorboard_logs,\n }\n\n def validation_step(self, batch, batch_idx):\n self.model.eval()\n\n tokens, intent_idx, entity_idx = batch\n\n intent_pred, entity_pred = self.forward(tokens)\n\n intent_acc = get_accuracy(intent_idx.cpu(), intent_pred.max(1)[1].cpu())[0]\n entity_acc = get_token_accuracy(\n entity_idx.cpu(),\n entity_pred.max(2)[1].cpu(),\n ignore_index=self.dataset.pad_token_id,\n )[0]\n\n intent_loss = self.loss_fn(intent_pred, intent_idx.squeeze(1))\n entity_loss = self.loss_fn(\n entity_pred.transpose(1, 2), entity_idx.long()\n ) # , ignore_index=0)\n\n return {\n \"val_intent_acc\": torch.Tensor([intent_acc]),\n \"val_entity_acc\": torch.Tensor([entity_acc]),\n \"val_intent_loss\": intent_loss,\n \"val_entity_loss\": entity_loss,\n \"val_loss\": intent_loss + entity_loss,\n }\n\n def validation_end(self, outputs):\n avg_loss = torch.stack([x[\"val_loss\"] for x in outputs]).mean()\n avg_intent_acc = torch.stack([x[\"val_intent_acc\"] for x in outputs]).mean()\n avg_entity_acc = torch.stack([x[\"val_entity_acc\"] for x in outputs]).mean()\n\n tensorboard_logs = {\n \"val/loss\": avg_loss,\n \"val/intent_acc\": avg_intent_acc,\n \"val/entity_acc\": avg_entity_acc,\n }\n\n return {\n \"val_loss\": avg_loss,\n \"log\": tensorboard_logs,\n \"progress_bar\": tensorboard_logs,\n }\n","sub_path":"DIET/DIET_lightning_model.py","file_name":"DIET_lightning_model.py","file_ext":"py","file_size_in_byte":5720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"495355838","text":"import requests\nimport sys\nimport os\nfrom bs4 import BeautifulSoup, SoupStrainer\n\nwanted_tags = ['a', 'p', 'ul', 'ol', 'li', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6']\nsaved_tabs = []\nheaders = {'user-agent': 'Mozilla/5.0 (X11; Linux i686; rv:64.0) Gecko/20100101 Firefox/64.0'}\nproxy = {'http': 'http://' + '212.56.218.90:40572'}\ntabs_folder = sys.argv[1]\n\n\ntry:\n os.mkdir(tabs_folder)\nexcept FileExistsError:\n # print(f'The folder {tabs_folder} already exists')\n pass\n\n\ndef make_valid_url(url: str):\n if '.' not in url:\n if 'com' in url:\n url = url[:-3] + '.com'\n else:\n url += '.com'\n if 'https://' in url:\n pass\n else:\n url = 'https://' + url\n return url\n\n\ndef make_file_name(url: str):\n url = url.strip('https://')\n url_split = url.split('.')\n url_split.pop()\n file_name = '.'.join(url_split)\n return file_name\n\n\ndef show_site(url):\n r = requests.get(url)\n strainer = SoupStrainer(wanted_tags)\n strained_soup = BeautifulSoup(r.text, 'html.parser', parse_only=strainer)\n site = [st for st in strained_soup.stripped_strings]\n text = '\\n'.join(site)\n print(text)\n file_name = make_file_name(url)\n with open(os.path.join(tabs_folder, file_name), 'w', encoding='utf-8') as f:\n f.write(text)\n saved_tabs.append(file_name)\n\n\ndef main():\n user_input = ''\n while user_input != 'exit':\n user_input = input()\n if user_input == 'exit':\n break\n elif user_input in saved_tabs:\n with open(os.path.join(tabs_folder, user_input), 'r', encoding='utf-8') as f:\n print(f.read())\n else:\n url = make_valid_url(user_input)\n show_site(url)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"304724318","text":"import discord\n\nTOKEN = 'NzA0NjI3NDY0MDM0NTgyNTM4.Xqf5iA.oZG7lTxvXikyJPwMw7kCgrOFwsI'\nclient = discord.Client()\n\n# @client.event\n# async def on_ready():\n# print(f'{client.user} подключен к Discord!')\n# for guild in client.guilds:\n# print(\n# f'{client.user} подключились к чату:\\n'\n# f'{guild.name}(id: {guild.id})'\n# )\n\nclass YLBotClient(discord.Client):\n async def on_ready(self):\n print(f'{self.user} has connected to Discord!')\n for guild in self.guilds:\n print(\n f'{self.user} подключились к чату:\\n'\n f'{guild.name}(id: {guild.id})')\n\n async def on_member_join(self, member):\n await member.create_dm()\n await member.dm_channel.send(\n f'Привет, {member.name}!'\n )\n\n async def on_message(self, message):\n if message.author == self.user:\n return\n if \"привет\" in message.content.lower():\n await message.channel.send(\"И тебе привет\")\n await message.channel.send(\"Спасибо за сообщение\")\n\n\nclient = YLBotClient()\nclient.run(TOKEN)\n\n\n#client.run(TOKEN)\n","sub_path":"botyl112.py","file_name":"botyl112.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"195301217","text":"import pandas as pd\r\n\r\ndf = pd.read_csv(\"dists.csv\", encoding=\"utf8\")\r\nordinals =[\"first\", \"second\", \"third\", \"fourth\", \"fifth\",\r\n \"sixth\", \"seventh\", \"eigth\", \"ninth\", \"tenth\",\r\n \"eleventh\", \"twelvth\", \"thirteenth\"] \r\ndf.index = ordinals\r\n\r\nprint(df[0:3]) # 不含 3 使用預設的索引去搜索\r\nprint(df[\"sixth\":\"eleventh\"]) # 含 \"eleventh\" #使用自建的索引去搜索\r\ndf[0:3].to_html(\"Ch9_3_1a_01.html\")\r\ndf[\"sixth\":\"eleventh\"].to_html(\"Ch9_3_1a_02.html\")","sub_path":"第九章 Pandas套件/Ch9_3_1a.py","file_name":"Ch9_3_1a.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"179374114","text":"def fakulteta(n):\n # print('Računam fakulteto od', n)\n if n <= 1:\n # print('Enostaven primer!')\n return 1\n else:\n # print('Aha, moram najprej izračunati fakulteto od', n - 1)\n fakulteta_prejsnjega = fakulteta(n - 1)\n # print('Rezultat je', fakulteta_prejsnjega)\n # print('Zdaj pa le še zmnožim z', n)\n return n * fakulteta_prejsnjega\n\ndef neustavljiva_fakulteta(n):\n return n * neustavljiva_fakulteta(n - 1)\n\ndef fibonacci(n):\n print('Računam fibonacci', n)\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)\n\ndef stevilo_samoglasnikov(niz):\n samoglasniki = 'aeiouAEIOU'\n if niz == '':\n return 0\n else:\n samoglasniki_v_preostanku = stevilo_samoglasnikov(niz[1:])\n if niz[0] in samoglasniki:\n return 1 + samoglasniki_v_preostanku\n else:\n return samoglasniki_v_preostanku\n","sub_path":"datoteke-s-predavanj/2017-18/02-rekurzija/rekurzija-a.py","file_name":"rekurzija-a.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"36682292","text":"# 9. Среди натуральных чисел, которые были введены, найти наибольшее по сумме цифр.\n# Вывести на экран это число и сумму его цифр.\n\nprint('Введите несколько натуральных чисел или 0')\n\nmax_sum = 0\nnum = 0\n\nwhile True:\n x = input('Число: ')\n if x == '0':\n break\n else:\n cur_sum = 0\n for i in x:\n cur_sum += int(i)\n if cur_sum > max_sum:\n max_sum = cur_sum\n num = int(x)\n\nprint(f'В веденной вами последовательности чисел {num} имеет наибольшую сумму цифр, которая равна {max_sum}')\n","sub_path":"lesson_2/task_7.py","file_name":"task_7.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"206041804","text":"import csv\n\ndef get_movie_data(data_path):\n the_data = []\n with open(data_path) as file:\n \treader = csv.reader(file, delimiter = \"|\")\n \tfor row in reader:\n \t\tthe_data.append(row)\n movie_data = {int(row[0]):Movie(row[1],row[5:]) for row in the_data}\n return movie_data\n\ndef get_user_data(data_path):\n user_data = []\n with open(data_path) as file:\n \treader = csv.reader(file, delimiter = \"\\t\")\n \tfor row in reader:\n \t\tuser_data.append(row)\n return user_data\n\ndef get_user_info(data_path):\n user_info = {}\n with open(data_path) as file:\n \treader = csv.reader(file, delimiter = \"|\")\n \tfor row in reader:\n \t\tuser_info[int(row[0])] = int(row[1])\n return user_info\n\ndef get_user_mov_rating(user_data):\n user_mov_rat = {}\n for user in user_data:\n if int(user[0]) not in user_mov_rat:\n user_mov_rat[int(user[0])] = [(int(user[1]),int(user[2]))]\n else:\n user_mov_rat[int(user[0])].append((int(user[1]),int(user[2])))\n return user_mov_rat\n\ndef get_user_movs(user_data):\n user_movs = {}\n for user in user_data:\n if int(user[0]) not in user_movs:\n user_movs[int(user[0])] = [int(user[1])]\n else:\n user_movs[int(user[0])].append(int(user[1]))\n return user_movs\n\ndef get_movie_ratings(user_data):\n mov_ratings = {}\n for movie in user_data:\n if int(movie[1]) not in mov_ratings:\n mov_ratings[int(movie[1])] = [int(movie[2])]\n else:\n mov_ratings[int(movie[1])].append(int(movie[2]))\n return mov_ratings\n\nclass Movie:\n def __init__(self,title,genres):\n self.title = title\n self.genre_list = genres\n self.genres = self.find_genres()\n\n def find_genres(self):\n return [i for i in range(19) if self.genre_list[i] == '1']\n\n def __repr__(self):\n return self.title\n","sub_path":"data_handling.py","file_name":"data_handling.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"197308772","text":"#import pykitti\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport utils\r\n\r\nfiledir='../'\r\nmodel=['object','tracking']\r\nobj_train_dir=['calib','image_2','label_2','velodyne']\r\nobj_test_dir=['calib','image_2','velodyne']\r\n\r\n#tracking_seq='0000' #train: 0000-0020, test: 0000-0028\r\nshow_flag=True # visualize\r\ntracking_train_seq=20 #20 #20\r\ntracking_test_seq=28\r\n#original\r\noriginal_class_dic = {'Car': 0, 'Van': 1, 'Truck': 2, 'Pedestrian': 3, 'Person_sitting': 4, 'Cyclist': 5, 'Tram': 6}\r\n#combined\r\ncombine_class_dic = {'Car': 0, 'Van': 0, 'Truck': 0, 'Pedestrian': 1, 'Person_sitting': 1, 'Cyclist': 2, 'Tram': 0}\r\n\r\ndef tracking_data(path,mode='training'):\r\n for i in range(0,tracking_train_seq+1):\r\n seq_id=str(i).zfill(4)\r\n if mode=='training':\r\n if not os.path.exists(os.path.join(path, mode,'front_view_label')):\r\n os.mkdir(os.path.join(path, mode,'front_view_label'))\r\n\r\n save_path=os.path.join(path, mode,'front_view_label',seq_id)\r\n path_image=os.path.join(path, mode,'image_02',seq_id)\r\n path_velo=os.path.join(path, mode,'velodyne',seq_id)\r\n path_oxts=os.path.join(path, mode,'oxts',seq_id+'.txt')\r\n path_label=os.path.join(path, mode,'label_02',seq_id+'.txt')\r\n path_det=os.path.join(path, mode,'det_02',seq_id)\r\n path_calib=os.path.join(path, mode,'calib',seq_id+'.txt')\r\n\r\n process(impath=path_image,velopath=path_velo,oxtspath=path_oxts,labelpath=path_label,calibpath=path_calib,savepath=save_path)\r\n else:\r\n path_image = os.path.join(path, mode, 'image_02', seq_id)\r\n path_velo = os.path.join(path, mode, 'velodyne', seq_id)\r\n path_oxts = os.path.join(path, mode, 'oxts', seq_id+'.txt')\r\n #path_label = os.path.join(path, mode, 'label_02', seq_id) #no in test\r\n path_det = os.path.join(path, mode, 'det_02', seq_id)\r\n path_calib = os.path.join(path, mode, 'calib', seq_id+'.txt')\r\n\r\ndef object_data(path,mode='training'):\r\n #for i in range(0,tracking_train_seq+1):\r\n #seq_id=str(i).zfill(4)\r\n if mode=='training':\r\n if not os.path.exists(os.path.join(path, mode,'front_view_label')):\r\n os.mkdir(os.path.join(path, mode,'front_view_label'))\r\n\r\n save_path=os.path.join(path, mode,'front_view_label')\r\n path_image=os.path.join(path, mode,'image_2')\r\n path_velo=os.path.join(path, mode,'velodyne')\r\n path_label=os.path.join(path, mode,'label_2')\r\n #path_det=os.path.join(path, mode,'det_2')\r\n path_calib=os.path.join(path, mode,'calib')\r\n\r\n process_object(impath=path_image,velopath=path_velo,labelpath=path_label,calibpath=path_calib,savepath=save_path)\r\n else:\r\n path_image = os.path.join(path, mode, 'image_2')\r\n path_velo = os.path.join(path, mode, 'velodyne')\r\n #path_label = os.path.join(path, mode, 'label_2', seq_id) #no in test\r\n #path_det = os.path.join(path, mode, 'det_2', seq_id)\r\n path_calib = os.path.join(path, mode, 'calib')\r\n\r\ndef verify_tracking(path,mode='training'):\r\n for i in range(1,tracking_train_seq+1):\r\n seq_id=str(i).zfill(4)\r\n if mode=='training':\r\n if not os.path.exists(os.path.join(path, mode,'front_view_label')):\r\n os.mkdir(os.path.join(path, mode,'front_view_label'))\r\n\r\n save_path=os.path.join(path, mode,'front_view_label',seq_id)\r\n path_image=os.path.join(path, mode,'image_02',seq_id)\r\n path_velo=os.path.join(path, mode,'velodyne',seq_id)\r\n path_oxts=os.path.join(path, mode,'oxts',seq_id+'.txt')\r\n path_label=os.path.join(path, mode,'label_02',seq_id+'.txt')\r\n path_det=os.path.join(path, mode,'det_02',seq_id)\r\n path_calib=os.path.join(path, mode,'calib',seq_id+'.txt')\r\n\r\n verify_process(impath=path_image,velopath=path_velo,oxtspath=path_oxts,labelpath=path_label,calibpath=path_calib,savepath=save_path)\r\n else:\r\n path_image = os.path.join(path, mode, 'image_02', seq_id)\r\n path_velo = os.path.join(path, mode, 'velodyne', seq_id)\r\n path_oxts = os.path.join(path, mode, 'oxts', seq_id+'.txt')\r\n #path_label = os.path.join(path, mode, 'label_02', seq_id) #no in test\r\n path_det = os.path.join(path, mode, 'det_02', seq_id)\r\n path_calib = os.path.join(path, mode, 'calib', seq_id+'.txt')\r\n\r\ndef verify_object(path,mode='training'):\r\n if mode=='training':\r\n save_path=os.path.join(path, mode,'front_view_label')\r\n path_image=os.path.join(path, mode,'image_2')\r\n path_label=os.path.join(path, mode,'label_2')\r\n #print(save_path)\r\n #print(path_image)\r\n #print(path_label)\r\n num=len(os.listdir(path_image))\r\n for idx in range(0,num):\r\n img=cv2.imread(path_image+'/'+str(idx).zfill(6)+'.png')\r\n #print(path_image+'/'+str(idx).zfill(6)+'.png')\r\n [h,w,c]=img.shape\r\n label_old=os.path.join(path_label,str(idx).zfill(6)+'.txt')\r\n label_new=os.path.join(save_path,str(idx).zfill(6)+'.txt')\r\n file = open(label_old, 'r')\r\n old = file.readlines()\r\n file.close()\r\n\r\n for o in old:\r\n #print(o)\r\n o=o.strip('\\n').split(' ')\r\n xmin = int(float(o[4]))\r\n ymin = int(float(o[5]))\r\n xmax = int(float(o[6]))\r\n ymax = int(float(o[7]))\r\n img = cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 0,), 2)\r\n\r\n file = open(label_new, 'r')\r\n new = file.readlines()\r\n file.close()\r\n\r\n for n in new:\r\n n=n.strip('\\n').split(' ')\r\n xmid = float(n[1])\r\n ymid = float(n[2])\r\n width = float(n[3])\r\n height = float(n[4])\r\n xmin = int((xmid - width / 2) * w)\r\n xmax = int((xmid + width / 2) * w)\r\n ymin = int((ymid - height / 2) * h)\r\n ymax = int((ymid + height / 2) * h)\r\n img = cv2.rectangle(img, (xmin + 5, ymin + 5), (xmax - 5, ymax - 5), (0, 0, 255,), 2)\r\n cv2.imshow(\"\",img)\r\n cv2.waitKey()\r\n else:\r\n path_image = os.path.join(path, mode, 'image_02')\r\n path_velo = os.path.join(path, mode, 'velodyne')\r\n path_det = os.path.join(path, mode, 'det_02')\r\n path_calib = os.path.join(path, mode, 'calib')\r\n\r\ndef verify_process(impath,oxtspath,velopath,labelpath,calibpath,savepath):\r\n if not os.path.exists(savepath):\r\n os.mkdir(savepath)\r\n #single frame do not need imu, imu contains pose calculation\r\n imglist=os.listdir(impath) # total frame\r\n tframe=len(imglist)\r\n labellist=load_tracking_label(labelpath)\r\n for idx in range(0,tframe):\r\n #output_label(savepath, str(idx).zfill(6), None, None)\r\n img=cv2.imread(os.path.join(impath,str(idx).zfill(6))+'.png')\r\n [h,w,c]=img.shape\r\n for j in labellist:\r\n if j[0][-1]==str(idx):\r\n for cl in j:\r\n #img=img\r\n img=cv2.rectangle(img,(cl[3],cl[4]),(cl[5],cl[6]),(255,0,0,),2)\r\n #print(os.path.exists(os.path.join(impath,str(idx).zfill(6))+'.png'))\r\n newlabelpath=os.path.join(impath,str(idx).zfill(6)).replace('image_02','front_view_label')+'.txt'\r\n #print(newlabelpath)\r\n file=open(newlabelpath,'r')\r\n newlabels=file.readlines()\r\n #print(len(newlabels))\r\n file.close()\r\n for nl in newlabels:\r\n nl=nl.strip('\\n').split(' ')\r\n xmid=float(nl[1])\r\n ymid=float(nl[2])\r\n width=float(nl[3])\r\n height=float(nl[4])\r\n xmin=int((xmid-width/2)*w)\r\n xmax=int((xmid+width/2)*w)\r\n ymin = int((ymid - height / 2) * h)\r\n ymax = int((ymid + height / 2) * h)\r\n #print((xmin, ymin), (xmax, ymax))\r\n img = cv2.rectangle(img, (xmin+5, ymin+5), (xmax-5, ymax-5), (0, 0, 255,), 2)\r\n cv2.imshow(\"\",img)\r\n cv2.waitKey()\r\n\r\ndef process_object(impath,velopath,labelpath,calibpath,savepath):\r\n if not os.path.exists(savepath):\r\n os.mkdir(savepath)\r\n imglist=os.listdir(impath) # total frame\r\n tframe=len(imglist)\r\n #velolist=os.listdir(velopath)\r\n for idx in range(0,tframe):\r\n img=cv2.imread(os.path.join(impath,str(idx).zfill(6)+'.png'))\r\n csavepath=os.path.join(savepath,str(idx).zfill(6)+'.txt')\r\n #print(csavepath)\r\n #'''\r\n f_new_label=open(csavepath,'w')\r\n [h,w,c]=img.shape\r\n clabel=os.path.join(labelpath,str(idx).zfill(6)+'.txt')\r\n f_label=open(clabel,'r')\r\n cl=f_label.readlines()\r\n f_label.close()\r\n for l in cl:\r\n l=l.strip('\\n').split(' ')\r\n #print(l)\r\n cls=l[0]\r\n coolusion=l[2] # 0 = visible, 1 = partly occluded, 2 = fully occluded, 3 = unknown\r\n if cls!='DontCare' and cls!='Misc' and (coolusion=='0' or coolusion=='1'):\r\n angle=float(l[3])\r\n #xmin = int(float(l[4]))\r\n #xmax = int(float(l[5]))\r\n #ymin = int(float(l[6]))\r\n #ymax = int(float(l[7]))\r\n xmid = str((float(l[4]) + float(l[6])) / 2 / w)\r\n ymid = str((float(l[5]) + float(l[7])) / 2 / h)\r\n width = str(abs(float(l[6]) - float(l[4])) / w)\r\n height = str(abs(float(l[7]) - float(l[5])) / h)\r\n coord=xmid+' '+ymid+' '+width+' '+height\r\n #print(coord)\r\n #f_new_label.write(str(original_class_dic.get(l[0]))+' '+coord+'\\n') # 6 classes\r\n f_new_label.write(str(original_class_dic.get(l[0])) + ' ' + coord + '\\n') #3 classes\r\n\r\n f_new_label.close()\r\n\r\ndef process(impath,oxtspath,velopath,labelpath,calibpath,savepath):\r\n if not os.path.exists(savepath):\r\n os.mkdir(savepath)\r\n #single frame do not need imu, imu contains pose calculation\r\n imglist=os.listdir(impath) # total frame\r\n tframe=len(imglist)\r\n #calib=load_calib(calibpath)\r\n imu=load_oxts(oxtspath)\r\n velolist=os.listdir(velopath)\r\n labellist=load_tracking_label(labelpath)\r\n #print(len(imglist),len(velolist),len(labellist))\r\n #iter according to image list, create empty files\r\n for idx in range(0,tframe): #\r\n output_label(savepath, str(idx).zfill(6), None, None)\r\n # iter according to label list for non-empty labels\r\n for idx in labellist:#range(0,tframe):\r\n #print(idx)\r\n #print('i:',i)\r\n #print(os.path.join(impath, idx[0][-1].zfill(6)+'.png'))\r\n im = cv2.imread(os.path.join(impath, idx[0][-1].zfill(6)+'.png'))\r\n #print('input:',savepath, idx[0][-1].zfill(6), idx, im.shape)\r\n output_label(savepath, idx[0][-1].zfill(6), idx, im.shape)\r\n\r\ndef transcoord(l,s):\r\n xmid=str(float(l[3]+l[5])/2/s[1])\r\n ymid=str(float(l[4]+l[6])/2/s[0])\r\n width=str(abs(l[5]-l[3])/s[1])\r\n height=str(abs(l[6]-l[4])/s[0])\r\n return xmid+\" \"+ymid+\" \"+width+\" \"+height\r\n\r\ndef output_label(savepath,img_id,llist,size):\r\n\r\n filename=os.path.join(savepath,img_id+'.txt')\r\n if llist==None or size==None:\r\n f = open(filename, 'w')\r\n f.close()\r\n else:\r\n #print(filename)\r\n #print(llist)\r\n #'''\r\n f=open(filename,'w')\r\n if llist != 'empty':\r\n for l in llist:\r\n coord=transcoord(l,size)\r\n #print(original_class_dic.get(l[0]),coord) # 0=car 1=people\r\n #print(combine_class_dic.get(l[0]), coord) # 0=car 1=people 2=cyclist\r\n #f.write(str(original_class_dic.get(l[0]))+' '+coord+'\\n')# 6 classes\r\n f.write(str(combine_class_dic.get(l[0]))+' '+coord+'\\n') #3 classes\r\n f.close()\r\n #'''\r\n\r\n\r\ndef load_oxts(path):\r\n assert os.path.exists(path)\r\n imu=utils.load_oxts_packets_and_poses(path)\r\n\r\n\r\ndef load_object_label(path):\r\n assert os.path.exists(path)\r\n with open(path, 'r') as f:\r\n labels = []\r\n last_frame = 0\r\n current = []\r\n for line in f.readlines():\r\n line=line.split(' ')\r\n if line[1]=='-1' and line[2]=='DontCare':# ignored objects\r\n continue\r\n if line[4]=='2' or line[4]=='3': #0 = visible, 1 = partly occluded, 2 = fully occluded, 3 = unknown\r\n continue\r\n #print(line)\r\n frame_id=line[0]\r\n cal = line[2]\r\n occlusion = int(float(line[3]))\r\n angle = float(line[5])\r\n x1 = int(float(line[6]))\r\n y1 = int(float(line[7]))\r\n x2 = int(float(line[8]))\r\n y2 = int(float(line[9]))\r\n if frame_id==last_frame:\r\n #if current:\r\n current.append([cal,occlusion,angle,x1,y1,x2,y2,frame_id])\r\n else:\r\n if current:\r\n labels.append(current)\r\n current=[]\r\n #else:\r\n # labels.append('empty')\r\n last_frame=frame_id\r\n current.append([cal, occlusion, angle, x1, y1, x2, y2,frame_id])\r\n if current:\r\n labels.append(current)\r\n #print(labels[426])\r\n return(labels)\r\n\r\ndef load_tracking_label(path):\r\n assert os.path.exists(path)\r\n with open(path, 'r') as f:\r\n labels = []\r\n last_frame = 0\r\n current = []\r\n for line in f.readlines():\r\n line=line.split(' ')\r\n if line[2]=='DontCare' or line[2]=='Misc': #remove ignored objects\r\n continue\r\n if line[4]=='2' or line[4]=='3': #remove occlude objects\r\n continue\r\n\r\n #print(line)\r\n frame_id=line[0]\r\n cal = line[2]\r\n occlusion = int(float(line[3]))\r\n angle = float(line[5])\r\n x1 = int(float(line[6]))\r\n y1 = int(float(line[7]))\r\n x2 = int(float(line[8]))\r\n y2 = int(float(line[9]))\r\n if frame_id==last_frame:\r\n #if current:\r\n current.append([cal,occlusion,angle,x1,y1,x2,y2,frame_id])\r\n else:\r\n if current:\r\n labels.append(current)\r\n current=[]\r\n #else:\r\n # labels.append('empty')\r\n last_frame=frame_id\r\n current.append([cal, occlusion, angle, x1, y1, x2, y2,frame_id])\r\n if current:\r\n labels.append(current)\r\n #print(labels[426])\r\n return(labels)\r\n\r\ndef load_calib(path):\r\n \"\"\"Load and compute intrinsic and extrinsic calibration parameters.\"\"\"\r\n # We'll build the calibration parameters as a dictionary, then\r\n # convert it to a namedtuple to prevent it from being modified later\r\n data = {}\r\n\r\n # Load the calibration file\r\n #calib_filepath = os.path.join(self.sequence_path + '.txt', 'calib.txt')\r\n filedata = utils.read_calib_file(path)\r\n\r\n # Create 3x4 projection matrices\r\n P_rect_00 = np.reshape(filedata['P0'], (3, 4))\r\n P_rect_10 = np.reshape(filedata['P1'], (3, 4))\r\n P_rect_20 = np.reshape(filedata['P2'], (3, 4))\r\n P_rect_30 = np.reshape(filedata['P3'], (3, 4))\r\n\r\n data['P_rect_00'] = P_rect_00\r\n data['P_rect_10'] = P_rect_10\r\n data['P_rect_20'] = P_rect_20\r\n data['P_rect_30'] = P_rect_30\r\n\r\n # Compute the rectified extrinsics from cam0 to camN\r\n T1 = np.eye(4)\r\n T1[0, 3] = P_rect_10[0, 3] / P_rect_10[0, 0]\r\n T2 = np.eye(4)\r\n T2[0, 3] = P_rect_20[0, 3] / P_rect_20[0, 0]\r\n T3 = np.eye(4)\r\n T3[0, 3] = P_rect_30[0, 3] / P_rect_30[0, 0]\r\n\r\n # Compute the velodyne to rectified camera coordinate transforms\r\n data['T_cam0_velo'] = np.reshape(filedata['Tr'], (3, 4))\r\n data['T_cam0_velo'] = np.vstack([data['T_cam0_velo'], [0, 0, 0, 1]])\r\n data['T_cam1_velo'] = T1.dot(data['T_cam0_velo'])\r\n data['T_cam2_velo'] = T2.dot(data['T_cam0_velo'])\r\n data['T_cam3_velo'] = T3.dot(data['T_cam0_velo'])\r\n\r\n # Compute the camera intrinsics\r\n data['K_cam0'] = P_rect_00[0:3, 0:3]\r\n data['K_cam1'] = P_rect_10[0:3, 0:3]\r\n data['K_cam2'] = P_rect_20[0:3, 0:3]\r\n data['K_cam3'] = P_rect_30[0:3, 0:3]\r\n\r\n # Compute the stereo baselines in meters by projecting the origin of\r\n # each camera frame into the velodyne frame and computing the distances\r\n # between them\r\n p_cam = np.array([0, 0, 0, 1])\r\n p_velo0 = np.linalg.inv(data['T_cam0_velo']).dot(p_cam)\r\n p_velo1 = np.linalg.inv(data['T_cam1_velo']).dot(p_cam)\r\n p_velo2 = np.linalg.inv(data['T_cam2_velo']).dot(p_cam)\r\n p_velo3 = np.linalg.inv(data['T_cam3_velo']).dot(p_cam)\r\n\r\n data['b_gray'] = np.linalg.norm(p_velo1 - p_velo0) # gray baseline\r\n data['b_rgb'] = np.linalg.norm(p_velo3 - p_velo2) # rgb baseline\r\n return data\r\n\r\n# generate labels\r\n\r\ntracking_data(os.path.join(filedir,model[1]))\r\n\r\n#verify labels\r\n#verify_tracking(os.path.join(filedir,model[1]))\r\n\r\n#object_data(os.path.join(filedir,model[0]))\r\n#verify_object(os.path.join(filedir,model[0]))\r\n","sub_path":"labeler.py","file_name":"labeler.py","file_ext":"py","file_size_in_byte":17348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"29917455","text":"#############################START LICENSE##########################################\n# Copyright (C) 2019 Pedro Martinez\n#\n# # This program is free software: you can redistribute it and/or modify\n# # it under the terms of the GNU Affero General Public License as published\n# # by the Free Software Foundation, either version 3 of the License, or\n# # (at your option) any later version (the \"AGPL-3.0+\").\n#\n# # This program is distributed in the hope that it will be useful,\n# # but WITHOUT ANY WARRANTY; without even the implied warranty of\n# # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# # GNU Affero General Public License and the additional terms for more\n# # details.\n#\n# # You should have received a copy of the GNU Affero General Public License\n# # along with this program. If not, see .\n#\n# # ADDITIONAL TERMS are also included as allowed by Section 7 of the GNU\n# # Affero General Public License. These additional terms are Sections 1, 5,\n# # 6, 7, 8, and 9 from the Apache License, Version 2.0 (the \"Apache-2.0\")\n# # where all references to the definition \"License\" are instead defined to\n# # mean the AGPL-3.0+.\n#\n# # You should have received a copy of the Apache-2.0 along with this\n# # program. If not, see .\n#############################END LICENSE##########################################\n\n\n###########################################################################################\n#\n# Script name: qc-lightrad\n#\n# Description: This script performs automated EPID QC of the QC-3 phantom developed in Manitoba.\n# There are other tools out there that do this but generally the ROI are fixed whereas this script\n# aims to dynamically identify them using machine vision and the BBs in the phantom.\n#\n# Example usage: python qc-lightrad \"/file/\"\n#\n# Using MED-TEC MT-IAD-1 phantom\n#\n# Author: Pedro Martinez\n# pedro.enrique.83@gmail.com\n# 5877000722\n# Date:2019-04-09\n#\n###########################################################################################\n\nimport argparse\nimport os\nfrom datetime import datetime\nfrom sys import platform\nfrom tqdm import tqdm\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom PIL import Image\nfrom skimage.feature import blob_log\nimport pydicom\nimport roi_sel_old as roi\nimport utils as u\nimport inquirer\nfrom timeit import default_timer as timer\nfrom outlier import find_outlier_pixels\n\n\ndef argmax2d(X):\n n, m = X.shape\n x_ = np.ravel(X)\n k = np.argmax(x_)\n i, j = k // m, k % m\n return i, j\n\n\ndef read_dicom(filenm):\n dataset = pydicom.dcmread(filenm)\n now = datetime.now()\n\n ArrayDicom = np.zeros(\n (dataset.Rows, dataset.Columns), dtype=dataset.pixel_array.dtype\n )\n ArrayDicom = dataset.pixel_array\n SID = dataset.RTImageSID\n print(\"array_shape=\", np.shape(ArrayDicom))\n height = np.shape(ArrayDicom)[0]\n width = np.shape(ArrayDicom)[1]\n dx = 1 / (SID * (1 / dataset.ImagePlanePixelSpacing[0]) / 1000)\n dy = 1 / (SID * (1 / dataset.ImagePlanePixelSpacing[1]) / 1000)\n print(\"pixel spacing row [mm]=\", dx)\n print(\"pixel spacing col [mm]=\", dy)\n\n # creating the figure extent based on the image dimensions, we divide by 10 to get the units in cm\n extent = (\n 0,\n 0 + (ArrayDicom.shape[1] * dx / 10),\n 0 + (ArrayDicom.shape[0] * dy / 10),\n 0,\n )\n\n # creating the figure extent list for the BB images\n list_extent = []\n\n print('np.shape_dim0',np.shape(ArrayDicom)[0])\n if np.shape(ArrayDicom)[0] == 768: #Clinac\n ioptn=1\n elif np.shape(ArrayDicom)[0] == 1190: #Edge EPID\n ioptn=2\n elif np.shape(ArrayDicom)[0] == 1280: ##Varian XI (Edmonton)\n ioptn=3\n\n intercept = dataset[0x0028, 0x1052].value\n slope = dataset[0x0028, 0x1053].value\n ArrayDicom = ArrayDicom * slope + intercept\n\n\n\n if ioptn == 1:\n height, width = ArrayDicom.shape\n ArrayDicom_mod = ArrayDicom[:, width // 2 - height // 2 : width // 2 + height // 2]\n else:\n ArrayDicom_mod = ArrayDicom\n\n # we take a diagonal profile to avoid phantom artifacts\n # im_profile = ArrayDicom_mod.diagonal()\n\n # test to make sure image is displayed correctly BBs are high amplitude against dark background\n ctr_pixel = ArrayDicom_mod[height // 2, width // 2]\n corner_pixel = ArrayDicom_mod[0, 0]\n\n\n # processing of hot BBs\n hot_pixels, ArrayDicom = find_outlier_pixels(ArrayDicom)\n\n # fig = plt.figure()\n # plt.imshow(fixed_image)\n # # plt.plot(profile_horz_inv)\n # plt.show(block=True)\n\n # print('max value', np.max(ArrayDicom))\n # #where do maximum values occur\n # print(argmax2d(ArrayDicom))\n # print(ArrayDicom[argmax2d(ArrayDicom)[0],argmax2d(ArrayDicom)[1]])\n\n\n ArrayDicom = u.norm01(ArrayDicom)\n\n if ctr_pixel > corner_pixel:\n ArrayDicom = u.range_invert(ArrayDicom)\n\n\n\n\n #Here we need to select the correct ROIs for the image processing since in the FC-2 phantom the BBs are in the corners we will select more than 1 ROI per-BB maybe??\n questions = [\n inquirer.List(\n \"type\",\n message=\"Select the phantom\",\n choices=[\n \"IsoAlign\",\n \"FC-2\",\n \"GP-1\",\n ],\n )\n ]\n answers = inquirer.prompt(questions)\n print(answers[\"type\"])\n print('ioptn',ioptn)\n\n if answers[\"type\"] == \"IsoAlign\":\n profiles, imcirclist, xdet, ydet, list_extent = roi.roi_sel_IsoAlign(ArrayDicom,ioptn,dx,dy)\n elif answers[\"type\"] == \"FC-2\":\n profiles, imcirclist, xdet, ydet, list_extent = roi.roi_sel_FC2(ArrayDicom,ioptn,dx,dy)\n elif answers[\"type\"] == \"GP-1\":\n start = timer()\n profiles, imcirclist, xdet, ydet, list_extent = roi.roi_sel_GP1(ArrayDicom,ioptn,dx,dy)\n dt = timer() - start\n print(\"File processed in %f s\" % dt)\n\n\n\n \n # tolerance levels to change at will\n tol = 1.0 # tolearance level\n act = 2.0 # action level\n\n\n\n k = 0\n fig = plt.figure(figsize=(8, 12)) # this figure will hold the BBs\n plt.subplots_adjust(hspace=0.35)\n\n # creating the page to write the results\n dirname = os.path.dirname(filenm)\n\n\n if platform == \"linux\":\n output_flnm=dirname+ \"/\"+ now.strftime(\"%d-%m-%Y_%H:%M_\")+ dataset[0x0008, 0x1010].value+ \"_Lightrad_report.pdf\"\n elif platform == \"win32\":\n output_flnm=dataset[0x0008, 0x1010].value+ \"_Lightrad_report.pdf\"\n\n print('Writing to:',output_flnm)\n\n with PdfPages(\n output_flnm\n ) as pdf:\n Page = plt.figure(figsize=(4, 5))\n Page.text(0.45, 0.9, \"Report\", size=18)\n if answers[\"type\"] == \"IsoAlign\":\n phantom_distance = 3.0 # distance from the BB to the edge of the phantom in mm\n kk = 0 # counter for data points\n for profile in profiles:\n profile_inv = u.range_invert(profile)\n _, index = u.find_nearest(profile_inv, 0.5) # find the 50% amplitude point\n # value_near, index = find_nearest(profile, 0.5) # find the 50% amplitude point\n if ( # pylint: disable = consider-using-in\n k == 0 or k == 1 or k == 4 or k == 5\n ): # there are the BBs in the horizontal\n offset_value_y = round(\n abs((ydet[k] - index) * (dy / 10)) - phantom_distance, 2\n )\n\n txt = str(offset_value_y)\n # print('offset_value_y=', offset_value_y)\n if abs(offset_value_y) <= tol:\n Page.text(\n 0.1,\n 0.8 - kk / 10,\n \"Point\" + str(kk + 1) + \" offset=\" + txt + \" mm\",\n color=\"g\",\n )\n elif abs(offset_value_y) > tol and abs(offset_value_y) <= act:\n Page.text(\n 0.1,\n 0.8 - kk / 10,\n \"Point\" + str(kk + 1) + \" offset=\" + txt + \" mm\",\n color=\"y\",\n )\n else:\n Page.text(\n 0.1,\n 0.8 - kk / 10,\n \"Point\" + str(kk + 1) + \" offset=\" + txt + \" mm\",\n color=\"r\",\n )\n kk = kk + 1\n\n ax = fig.add_subplot(\n 4, 2, k + 1\n ) # plotting all the figures in a single plot\n\n ax.imshow(\n np.array(imcirclist[k], dtype=np.uint8) / 255,\n extent=list_extent[k],\n origin=\"upper\",\n )\n ax.scatter(\n list_extent[k][0] + xdet[k] * dx / 100,\n list_extent[k][3] + ydet[k] * dy / 100,\n s=30,\n marker=\"P\",\n color=\"y\",\n )\n ax.set_title(\"Bib=\" + str(k + 1))\n ax.axhline(\n list_extent[k][3] + index * dy / 100, color=\"r\", linestyle=\"--\"\n )\n ax.set_xlabel(\"x distance [cm]\")\n ax.set_ylabel(\"y distance [cm]\")\n else:\n offset_value_x = round(\n abs((xdet[k] - index) * (dx / 10)) - phantom_distance, 2\n )\n\n txt = str(offset_value_x)\n if abs(offset_value_x) <= tol:\n # print('1')\n Page.text(\n 0.1,\n 0.8 - kk / 10,\n \"Point\" + str(kk + 1) + \" offset=\" + txt + \" mm\",\n color=\"g\",\n )\n elif abs(offset_value_x) > tol and abs(offset_value_x) <= act:\n # print('2')\n Page.text(\n 0.1,\n 0.8 - kk / 10,\n \"Point\" + str(kk + 1) + \" offset=\" + txt + \" mm\",\n color=\"y\",\n )\n else:\n # print('3')xdet[0]\n Page.text(\n 0.1,\n 0.8 - kk / 10,\n \"Point\" + str(kk + 1) + \" offset=\" + txt + \" mm\",\n color=\"r\",\n )\n kk = kk + 1\n\n ax = fig.add_subplot(\n 4, 2, k + 1\n ) # plotting all the figures in a single plot\n\n ax.imshow(\n np.array(imcirclist[k], dtype=np.uint8) / 255,\n extent=list_extent[k],\n origin=\"upper\",\n )\n ax.scatter(\n list_extent[k][0] + xdet[k] * dx / 100,\n list_extent[k][3] + ydet[k] * dy / 100,\n s=30,\n marker=\"P\",\n color=\"y\",\n )\n ax.set_title(\"Bib=\" + str(k + 1))\n ax.axvline(\n list_extent[k][0] + index * dx / 100, color=\"r\", linestyle=\"--\"\n )\n ax.set_xlabel(\"x distance [cm]\")\n ax.set_ylabel(\"y distance [cm]\")\n\n k = k + 1\n elif answers[\"type\"] == \"GP-1\":\n phantom_distance = 10.0 # distance from the BB to the edge of the phantom in mm\n kk = 0 # counter for data points\n for profile in profiles:\n profile_inv = u.range_invert(profile)\n _, index = u.find_nearest(profile_inv, 0.5) # find the 50% amplitude point\n # value_near, index = find_nearest(profile, 0.5) # find the 50% amplitude point\n if ( # pylint: disable = consider-using-in\n k == 0 or k == 2 \n ): # there are the BBs in the horizontal\n offset_value_y = round(\n abs((ydet[k] - index) * (dy / 10)) - phantom_distance, 2\n )\n\n txt = str(offset_value_y)\n # print('offset_value_y=', offset_value_y)\n if abs(offset_value_y) <= tol:\n Page.text(\n 0.1,\n 0.8 - kk / 10,\n \"Point\" + str(kk + 1) + \" offset=\" + txt + \" mm\",\n color=\"g\",\n )\n elif abs(offset_value_y) > tol and abs(offset_value_y) <= act:\n Page.text(\n 0.1,\n 0.8 - kk / 10,\n \"Point\" + str(kk + 1) + \" offset=\" + txt + \" mm\",\n color=\"y\",\n )\n else:\n Page.text(\n 0.1,\n 0.8 - kk / 10,\n \"Point\" + str(kk + 1) + \" offset=\" + txt + \" mm\",\n color=\"r\",\n )\n kk = kk + 1\n\n ax = fig.add_subplot(\n 4, 2, k + 1\n ) # plotting all the figures in a single plot\n\n ax.imshow(\n np.array(imcirclist[k], dtype=np.uint8) / 255,\n extent=list_extent[k],\n origin=\"upper\",\n )\n ax.scatter(\n list_extent[k][0] + xdet[k] * dx / 100,\n list_extent[k][3] + ydet[k] * dy / 100,\n s=30,\n marker=\"P\",\n color=\"y\",\n )\n ax.set_title(\"Bib=\" + str(k + 1))\n ax.axhline(\n list_extent[k][3] + index * dy / 100, color=\"r\", linestyle=\"--\"\n )\n ax.set_xlabel(\"x distance [cm]\")\n ax.set_ylabel(\"y distance [cm]\")\n else:\n offset_value_x = round(\n abs((xdet[k] - index) * (dx / 10)) - phantom_distance, 2\n )\n\n txt = str(offset_value_x)\n if abs(offset_value_x) <= tol:\n # print('1')\n Page.text(\n 0.1,\n 0.8 - kk / 10,\n \"Point\" + str(kk + 1) + \" offset=\" + txt + \" mm\",\n color=\"g\",\n )\n elif abs(offset_value_x) > tol and abs(offset_value_x) <= act:\n # print('2')\n Page.text(\n 0.1,\n 0.8 - kk / 10,\n \"Point\" + str(kk + 1) + \" offset=\" + txt + \" mm\",\n color=\"y\",\n )\n else:\n # print('3')\n Page.text(\n 0.1,\n 0.8 - kk / 10,\n \"Point\" + str(kk + 1) + \" offset=\" + txt + \" mm\",\n color=\"r\",\n )\n kk = kk + 1\n\n ax = fig.add_subplot(\n 4, 2, k + 1\n ) # plotting all the figures in a single plot\n\n ax.imshow(\n np.array(imcirclist[k], dtype=np.uint8) / 255,\n extent=list_extent[k],\n origin=\"upper\",\n )\n ax.scatter(\n list_extent[k][0] + xdet[k] * dx / 100,\n list_extent[k][3] + ydet[k] * dy / 100,\n s=30,\n marker=\"P\",\n color=\"y\",\n )\n ax.set_title(\"Bib=\" + str(k + 1))\n ax.axvline(\n list_extent[k][0] + index * dx / 100, color=\"r\", linestyle=\"--\"\n )\n ax.set_xlabel(\"x distance [cm]\")\n ax.set_ylabel(\"y distance [cm]\")\n\n k = k + 1\n elif answers[\"type\"] == \"FC-2\":\n phantom_distance = 7.0710678 # distance from the BB to the edge of the phantom in mm\n\n for kk in range(0,4):\n profile_inv1 = u.range_invert(profiles[2*kk])\n profile_inv2 = u.range_invert(profiles[2*kk+1])\n _, index_b = u.find_nearest(profile_inv1, 0.5) # find the 50% amplitude point in the x direction\n _, index_a = u.find_nearest(profile_inv2, 0.5) # find the 50% amplitude point in the y direction\n # value_near, index = find_nearest(profile, 0.5) # find the 50% amplitude point\n\n offset_value_y = round(abs((ydet[kk] - index_b) * (dy / 10)) - phantom_distance, 2)\n offset_value_x = round(abs((xdet[kk] - index_a) * (dx / 10)) - phantom_distance, 2)\n\n\n txt_x = str(offset_value_x)\n txt_y = str(offset_value_y)\n\n\n if abs(offset_value_y) <= tol:\n Page.text(\n 0.1,\n 0.8 - 2*kk / 10,\n \"Point \" + str(kk + 1) + \" vertical offset=\" + txt_y + \" mm\",\n color=\"g\",\n )\n elif abs(offset_value_y) > tol and abs(offset_value_y) <= act:\n Page.text(\n 0.1,\n 0.8 - 2*kk / 10,\n \"Point \" + str(kk + 1) + \" vertical offset=\" + txt_y + \" mm\",\n color=\"y\",\n )\n else:\n Page.text(\n 0.1,\n 0.8 - 2*kk / 10,\n \"Point \" + str(kk + 1) + \" vertical offset=\" + txt_y + \" mm\",\n color=\"r\",\n )\n\n\n\n\n\n if abs(offset_value_x) <= tol:\n Page.text(\n 0.1,\n 0.8 - (2*kk + 1) / 10,\n \"Point \" + str(kk + 1) + \" horizontal offset=\" + txt_x + \" mm\",\n color=\"g\",\n )\n elif abs(offset_value_x) > tol and abs(offset_value_x) <= act:\n Page.text(\n 0.1,\n 0.8 - (2*kk + 1) / 10,\n \"Point \" + str(kk + 1) + \" horizontal offset=\" + txt_x + \" mm\",\n color=\"y\",\n )\n else:\n Page.text(\n 0.1,\n 0.8 - (2*kk + 1) / 10,\n \"Point \" + str(kk + 1) + \" horizontal offset=\" + txt_x + \" mm\",\n color=\"r\",\n )\n\n\n\n\n\n\n \n\n ax = fig.add_subplot(\n 4, 2, kk + 1\n ) # plotting all the figures in a single plot\n\n ax.imshow(\n np.array(imcirclist[kk], dtype=np.uint8) / 255,\n extent=list_extent[kk],\n origin=\"upper\",\n )\n ax.scatter(\n list_extent[kk][0] + xdet[kk] * dx / 100,\n list_extent[kk][3] + ydet[kk] * dy / 100,\n s=30,\n marker=\"P\",\n color=\"y\",\n )\n ax.set_title(\"Bib=\" + str(kk + 1))\n ax.axhline(\n list_extent[kk][3] + index_b * dy / 100, color=\"r\", linestyle=\"--\"\n )\n\n ax.axvline(\n list_extent[kk][0] + index_a * dx / 100, color=\"r\", linestyle=\"--\"\n )\n\n ax.set_xlabel(\"x distance [cm]\")\n ax.set_ylabel(\"y distance [cm]\")\n\n\n pdf.savefig()\n pdf.savefig(fig)\n\n # we now need to select a horizontal and a vertical profile to find the edge of the field from an image\n # for the field size calculation\n im = Image.fromarray(255 * ArrayDicom)\n\n if ioptn == 1:\n PROFILE = {\n \"horizontal\": 270,\n \"vertical\": 430,\n } # location to extract the horizontal and vertical profiles if this is a linac\n elif ioptn == 2:\n PROFILE = {\n \"horizontal\": 470,\n \"vertical\": 510,\n } # location to extract the horizontal and vertical profiles if this is a TrueBeam Edge of Varian XI\n elif ioptn == 3:\n PROFILE = {\n \"horizontal\": 470,\n \"vertical\": 470,\n } # location to extract the horizontal and vertical profiles if this is a TrueBeam Edge of Varian XI\n \n\n profilehorz = (\n np.array(im, dtype=np.uint8)[PROFILE[\"horizontal\"], :] / 255\n ) # we need to change these limits on a less specific criteria\n profilevert = np.array(im, dtype=np.uint8)[:, PROFILE[\"vertical\"]] / 255\n profile_horz_inv = u.range_invert(profilehorz)\n profile_vrt_inv = u.range_invert(profilevert)\n\n _, index_top = u.find_nearest(\n profilevert[0 : height // 2], 0.5\n ) # finding the edge of the field on the top\n # fig = plt.figure() WORKING HERE\n # plt.imshow(ArrayDicom)\n # plt.plot(profile_horz_inv)\n # plt.show(block=True)\n # exit(0)\n _, index_bot = u.find_nearest(\n profilevert[height // 2 : height], 0.5\n ) # finding the edge of the field on the bottom\n\n _, index_l = u.find_nearest(\n profilehorz[0 : width // 2], 0.5\n ) # finding the edge of the field on the bottom\n _, index_r = u.find_nearest(\n profilehorz[width // 2 : width], 0.5\n ) # finding the edge of the field on the right\n\n fig2 = plt.figure(\n figsize=(7, 5)\n ) # this figure will show the vertical and horizontal calculated field size\n ax = fig2.subplots()\n ax.imshow(ArrayDicom, extent=extent, origin=\"upper\")\n ax.set_xlabel(\"x distance [cm]\")\n ax.set_ylabel(\"y distance [cm]\")\n\n # adding a vertical arrow\n ax.annotate(\n s=\"\",\n xy=(PROFILE[\"vertical\"] * dx / 10, index_top * dy / 10),\n xytext=(PROFILE[\"vertical\"] * dx / 10, (height // 2 + index_bot) * dy / 10),\n arrowprops=dict(arrowstyle=\"<->\", color=\"r\"),\n ) # example on how to plot a double headed arrow\n ax.text(\n (PROFILE[\"vertical\"] + 10) * dx / 10,\n (height // 2) * dy / 10,\n \"Vfs=\"\n + str(round((height // 2 + index_bot - index_top) * dy / 10, 2))\n + \"cm\",\n rotation=90,\n fontsize=14,\n color=\"r\",\n )\n\n # adding a horizontal arrow\n # print(index_l*dx, index_l, PROFILE['horizontal']*dy, PROFILE['horizontal'])\n ax.annotate(\n s=\"\",\n xy=(index_l * dx / 10, PROFILE[\"horizontal\"] * dy / 10),\n xytext=((width // 2 + index_r) * dx / 10, PROFILE[\"horizontal\"] * dy / 10),\n arrowprops=dict(arrowstyle=\"<->\", color=\"r\"),\n ) # example on how to plot a double headed arrow\n ax.text(\n (width // 2) * dx / 10,\n (PROFILE[\"horizontal\"] - 10) * dy / 10,\n \"Hfs=\" + str(round((width // 2 + index_r - index_l) * dx / 10, 2)) + \"cm\",\n rotation=0,\n fontsize=14,\n color=\"r\",\n )\n\n pdf.savefig(fig2)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"file\", type=str, help=\"Input the Light/Rad file\")\n args = parser.parse_args()\n\n filename = args.file\n\n read_dicom(filename)\n","sub_path":"src/qc-lightrad-old.py","file_name":"qc-lightrad-old.py","file_ext":"py","file_size_in_byte":24521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"178186947","text":"from django.urls import path\n\nfrom .views import (\n ProjectsAddView,\n ProjectsEditView,\n ProjectsDetail,\n TaskAddEdit,\n TaskDetail,\n SubTaskAddEdit\n)\n\nurlpatterns = [\n path('add', ProjectsAddView.as_view(), name='project-add-view'),\n path('edit/', ProjectsEditView.as_view(), name='project-edit-view'),\n path('detail/', ProjectsDetail.as_view(), name='project-detail-view'),\n path(\n '/task-add-edit/',\n TaskAddEdit.as_view(),\n name='task-add-edit'\n ),\n path(\n '/task-add-edit//subtask/',\n SubTaskAddEdit.as_view(),\n name='subtask-add-edit'\n ),\n path('task/detail/', TaskDetail.as_view(), name='task-detail-view'),\n]\n","sub_path":"projecttask/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"409587730","text":"from django.db import transaction\n\nfrom misc.api import InternalAPI\nfrom misc.const import *\nfrom organization.models import Organization\nfrom .models import User, Team\n\n\nclass UserAPI:\n @staticmethod\n def update(user, target_user_id, name=None, chinese_name=None, phone=None):\n try:\n target_user = User.objects.get(id=target_user_id)\n\n if target_user != user:\n return False, ARK_ERRMSG_CONTENT[1201]\n\n if name is not None:\n target_user.name = name\n\n if chinese_name is not None:\n target_user.chinese_name = chinese_name\n\n if phone is not None:\n target_user.phone = phone\n\n target_user.save()\n\n return True, None\n except Exception as e:\n return False, str(e)\n\n @staticmethod\n def get(user, target_user_id):\n try:\n target_user = User.objects.get(id=target_user_id)\n\n if user == target_user:\n return True, None, target_user\n elif user.roles.filter(resource_type=RS_SYS, name=RO_SYS_ADMIN)\\\n .exists():\n return True, None, target_user\n # 当两者在一个组织时\n elif target_user.organizations & user.organizations:\n return True, None, target_user\n\n return False, ARK_ERRMSG_CONTENT[1201], None\n except Exception as e:\n return False, str(e), None\n\n @staticmethod\n def all(user):\n try:\n if user.roles.filter(\n resource_type=RS_SYS,\n name=RO_SYS_ADMIN\n ).exists():\n return True, None, User.objects.all()\n else:\n ret = list()\n user_orgs = user.organizations\n if user_orgs.count() != 0:\n for user_org in user_orgs:\n if user.roles.filter(\n resource_type=RS_ORG,\n resource_id=user_org.id,\n name=RO_ORG_ADMIN\n ).exists():\n ret.extend(user_org.users)\n # 组织成员\n else:\n users = user_org.users.exclude(\n roles__name=RO_ORG_ADMIN\n )\n ret.extend(users)\n #ret.extend([user])\n else:\n ret_set = set(ret)\n ret_list = list(ret_set)\n return True, None, ret_list\n else:\n return True, None, [user]\n except Exception as e:\n return False, str(e), None\n\n # @staticmethod\n # def filter(user):\n # pass\n\n\nclass TeamAPI:\n @staticmethod\n def create(user, organization_id, name, description=None):\n try:\n errmsg = list()\n if organization_id == None:\n errmsg.append('组织ID不能为空')\n if name == None or len(name) == 0:\n errmsg.append('团队名字不能为空')\n if len(errmsg) != 0:\n return False, ','.join(errmsg), None\n\n pm_list = InternalAPI.get_user_permissions_on_resource(\n user, RS_ORG, organization_id\n )\n\n if not pm_list[PM_CREATE_TEAM]:\n return False, ARK_ERRMSG_CONTENT[1201]\n\n org = Organization.objects.get(id=organization_id)\n team = Team(name=name, organization=org)\n\n if description is not None:\n team.description = description\n\n with transaction.atomic():\n team.save()\n InternalAPI.update_resource_and_roles_relationship(\n RS_TEAM, team.id\n )\n\n return True, None\n except Exception as e:\n return False, str(e)\n\n @staticmethod\n def delete(user, team_id):\n try:\n team = Team.objects.get(id=team_id)\n pm_list = InternalAPI.get_user_permissions_on_resource(\n user, RS_TEAM, team.id\n )\n\n if not pm_list[PM_DELETE_TEAM]:\n return False, ARK_ERRMSG_CONTENT[1201]\n\n if User.objects.filter(roles__team=team).\\\n exclude(roles__name__in=[RO_ORG_ADMIN, RO_SYS_ADMIN])\\\n .exists():\n return False, '该团队内仍有普通用户存在'\n\n #if team.user_set.exists():\n #return False, 'users belong to team exist'\n\n Team.objects.filter(id=team.id).delete()\n\n return True, None\n except Exception as e:\n return False, str(e)\n\n @staticmethod\n def update(user, team_id, name=None, description=None):\n try:\n errmsg = list()\n if team_id == None:\n errmsg.append('团队ID不能为空')\n team = Team.objects.get(id=team_id)\n pm_list = InternalAPI.get_user_permissions_on_resource(\n user, RS_TEAM, team.id\n )\n\n if not pm_list[PM_UPDATE_TEAM]:\n return False, ARK_ERRMSG_CONTENT[1201]\n\n if name is not None:\n team.name = name\n\n if description is not None:\n team.description = description\n team.save()\n return True, None\n except Exception as e:\n return False, str(e)\n\n @staticmethod\n def get(user, team_id):\n try:\n team = Team.objects.get(id=team_id)\n #pm_list = InternalAPI.get_user_permissions_on_resource(\n #user, RS_ORG, team.organization.id\n #)\n pm_list = InternalAPI.get_user_permissions_on_resource(\n user, RS_TEAM, team.id\n )\n if not pm_list[PM_RETRIEVE_TEAM]:\n return False, ARK_ERRMSG_CONTENT[1201], None\n\n return True, None, team\n except Exception as e:\n return False, str(e), None\n\n @staticmethod\n def all(user):\n # try:\n # if user.roles.filter(resource_type=RS_SYS, name=RO_SYS_ADMIN).exists():\n # return True, None, Team.objects.all()\n # elif user.organization and user.roles.filter(resource_type=RS_ORG,\n # resource_id=user.organization.id, name=RO_ORG_ADMIN):\n # return True, None, user.organization.team_set.all()\n # elif user.team:\n # teams = [user.team]\n # else:\n # teams = []\n # return True, None, teams\n # except Exception as e:\n # return False, str(e), None\n try:\n if user.roles.filter(\n resource_type=RS_SYS,\n name=RO_SYS_ADMIN\n ).exists():\n return True, None, Team.objects.all()\n else:\n user_teams = user.teams\n return True, None, user_teams\n except Exception as e:\n return False, str(e), None\n\n @staticmethod\n def get_team_users(user, team_id):\n '''\n 返回一个team内的所有用户\n :param user:\n :param team_id:\n :return:\n '''\n try:\n team = Team.objects.get(id=team_id)\n # pm_list = InternalAPI.get_user_permissions_on_resource(\n # user, RS_ORG, team.organization.id\n # )\n pm_list = InternalAPI.get_user_permissions_on_resource(\n user, RS_TEAM, team.id\n )\n if pm_list[PM_RETRIEVE_TEAM]:\n return True, None, team.users\n return True, None, [user]\n except Exception as e:\n return False, str(e), None\n\n @staticmethod\n def get_team_permission(user, team_id):\n '''\n 用户可以查看所能管理的用户在其他资源上的角色\n :param user: 调用者\n :param target_user_id: 被查看者\n :return:\n '''\n pm_list_sys = InternalAPI.get_user_permissions_on_resource(user, RS_SYS)\n status, errmsg, team = TeamAPI.get(user, team_id)\n if not status:\n return False, '获取团队出错', None\n pm_list_org = []\n if team.organization:\n pm_list_org = InternalAPI.get_user_permissions_on_resource(\n user=user,\n resource_type=RS_ORG,\n resource_id=team.organization.id\n )\n # 系统管理员 或者 组织管理员\n if pm_list_sys[PM_RETRIEVE_SYSTEM_ROLE] or (\n len(pm_list_org) != 0 and\n pm_list_org[PM_RETRIEVE_ORGANIZATION_ROLE]):\n ret = {}\n return True, None, ret\n return False, ARK_ERRMSG_CONTENT[1201], None\n\n # @staticmethod\n # def filter(user):\n # pass\n","sub_path":"DorneWeb/user/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":9030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"538725192","text":"##############################################################\r\n# FILE : largest_and_smallest.py\r\n# WRITER : itai shopen\r\n# EXERCISE : intro2cs ex2 2017-2018\r\n# DESCRIPTION: A simple program that gets an input of three numbers and gives\r\n# back the largest and smallest\r\n#############################################################\r\n\r\n\r\ndef largest_and_smallest(x, y, z):\r\n # this function finds the largest and smallest number out of the\r\n # three inputs\r\n num1 = int(x)\r\n num2 = int(y)\r\n num3 = int(z)\r\n if num1 >= num2:\r\n if num1 >= num3:\r\n if num3 >= num2:\r\n return num1, num2\r\n else:\r\n return num1, num3\r\n else:\r\n return num3, num2\r\n else:\r\n if num2 >= num3:\r\n if num3 >= num1:\r\n return num2, num1\r\n else:\r\n return num2, num3\r\n else:\r\n return num3, num1\r\n\r\n\r\n","sub_path":"ex02/largest_and_smallest.py","file_name":"largest_and_smallest.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"254976242","text":"import gl\nfrom database.joueur import Joueur\n\n\n# @Ninja :\n# :regional_indicator_a: +2 si il n'a pas été touché au tour précédent durant 1 tour\n# :regional_indicator_d: +1 et donne -1 d'attaque à un ennemi au choix durant 1 tour\n\nclass Ninja(Joueur):\n\n async def attaque(self, ctx):\n if self.hasBeenHit:\n await ctx.send(\"Arrrgh j'ai été touché au tour d'avant, pas de bonus\")\n else:\n await ctx.send(\"Personne ne m'a touché au tour d'avant\")\n await ctx.send(\"+2 d'attaque pour moi\")\n self.temp_atk_modifier += 2\n try:\n hit, enemy = await self.throwdice(ctx=ctx)\n if hit is True:\n enemy.hp -= 1\n gl.session.commit()\n except:\n pass\n\n async def defend(self, ctx):\n self.temp_def_modifier += 1\n await self.takeAtkToSomeone(ctx=ctx, num=-1)\n\n __mapper_args__ = {\n 'polymorphic_identity': 'Ninja'\n }\n","sub_path":"src/database/roles/aventurier/Ninja.py","file_name":"Ninja.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"537210620","text":"\"\"\"\nEx071 ”Simulador de caixa eletrônico” Crie um programa que simule o funcionamento de um caixa eletrônico. No início,\npergunta ao usuário qual será o valor sacado (número inteiro) e o programa vai informar quantas cédulas de cada valor\nserão entregues.\nOBS: Considere que o caixa possui cédulas de 50, 20, 10 e 1.\n\n\"\"\"\nprint('-=-=-=CAIXA ELETRÔNICO=-=-=-')\nvalor = int(input('Que valor você quer sacar? R$'))\ntotal = valor\nced = 50\ntotced = 0\nwhile True:\n if total >= ced:\n total -= ced\n totced += 1\n else:\n if totced > 0:\n print(f'Total de {totced} cédulas de R${ced}')\n if ced == 50:\n ced = 20\n elif ced == 20:\n ced = 10\n elif ced == 10:\n ced = 1\n totced = 0\n if total == 0:\n break\nprint('Finalizando...')","sub_path":"Python/ex071.py","file_name":"ex071.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"341734128","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport smtplib\nfrom email.mime.text import MIMEText\nimport argparse\nimport subprocess\n\nclass EmailSender:\n def __init__(self, user, pwd, tosend):\n self._user = user\n self._pwd = pwd\n self._tosend = tosend\n\n def SetSmtpServer(self, smtpserver, smtpserver_port):\n self._smtpserver = smtpserver\n self._smtpserver_port = smtpserver_port\n\n def sendmail(self, title, message):\n msg = MIMEText(message)\n msg[\"Subject\"] = title\n msg[\"From\"] = self._user\n msg[\"To\"] = self._tosend\n try:\n s = smtplib.SMTP_SSL(self._smtpserver, self._smtpserver_port)\n s.login(self._user, self._pwd)\n s.sendmail(self._user, self._tosend, msg.as_string())\n s.quit()\n print(\"Success!\")\n except smtplib.SMTPException as e:\n print(\"Falied,%s\" % e)\npaser=argparse.ArgumentParser()\npaser.add_argument(\"-m\",\"--msg\",help=\"about the msg you wanted to send\")\n\nemailsender=EmailSender(\"from_eamil\",\"key\",\"to_email\")\nemailsender.SetSmtpServer(\"smtp.exmail.qq.com\", 465)\n\nargs=paser.parse_args()\n\nif args.msg:\n emailsender.sendmail(\"nodename:crontab info\",args.msg)\nelse:\n info = subprocess.Popen(\"machine reboot\", shell=True, stdout=subprocess.PIPE).stdout.read()\n emailsender.sendmail(\"nodename:ip addr alter\", info.decode())\n\n\"\"\"\n@author:raymond\n@file:ipmail.py\n@time:2018/3/1813:52\n\"\"\"\n","sub_path":"iponchange/ipmail.py","file_name":"ipmail.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"119588857","text":"from pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\nimport re\n\nif __name__ == \"__main__\":\n # Create a local StreamingContext with two working thread and batch interval of 1 second\n sc = SparkContext()\n ssc = StreamingContext(sc, 1)\n\n # Create a DStream that will connect to hostname:port, like localhost:9999\n lines = ssc.socketTextStream(\"devenv\", 9999)\n\n # Split each line into words\n words = lines.flatMap(lambda line: re.compile(r\"\\W+\").split(line.lower()))\n\n # Count each word in each batch\n pairs = words.map(lambda word: (word, 1))\n word_counts = pairs.reduceByKey(lambda x, y: x + y)\n\n # Print the first ten elements of each RDD generated in this DStream to the console\n word_counts.pprint(30)\n\n ssc.start() # Start the computation\n ssc.awaitTermination() # Wait for the computation to terminate\n","sub_path":"proj_spark_streaming/network_wordcount.py","file_name":"network_wordcount.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"613770144","text":"from ctree import Tree\n\nclass Parser(object):\n\n def __init__(self, scanner):\n self.scanner = scanner\n\n def compile(self):\n\n if (self._gen_tree()):\n self._process_tree()\n\n def _gen_tree(self):\n s = self.scanner\n\n if (s.next() != 'BEGIN'):\n print(\"Error: the program must begin with keyword BEGIN. line: %d\" % s.line)\n return False\n\n self.tree = Tree('ROOT')\n self.current_node = self.tree.root\n\n status = \"C\" #C-Continue, E-Error\n\n while status == \"C\":\n\n term = s.next()\n\n if (term == None):\n break\n\n if (self.current_node == None):\n print(\"Error: unexpected term %s. the program already has been end. line: %d\" % (term, s.line))\n return False\n\n if (term == 'OP'):\n status = self._op()\n\n elif (term == 'LOOP'):\n status = self._loop()\n\n elif (term == 'END'):\n status = self._end()\n\n else:\n print(\"Error: unexpected term %s. line: %d\" % (term, s.line))\n status = \"E\"\n\n if (status == \"E\"):\n return False\n\n if (self.current_node):\n print(\"Error: unexpected end. line: %d\" % (s.line))\n return False\n\n return True\n\n def _process_tree(self):\n self.tree.preorder()\n\n def _op(self):\n s = self.scanner\n number = s.next()\n\n try:\n number = int(number)\n except ValueError:\n print(\"Error: unexpected term %s, expected number. line: %d\" % (number, s.line))\n return \"E\"\n\n self.current_node.add_child(\"OP %d\" % number)\n print(\"new OP\", self.current_node.value)\n\n return \"C\"\n\n def _loop(self):\n\n s = self.scanner\n number = s.next()\n\n if (number != \"n\"):\n try:\n number = int(number)\n except ValueError:\n print(\"Error: unexpected term %s, expected number or n. line: %d\" % (number, s.line))\n return \"E\"\n\n self.current_node = self.current_node.add_child(\"LOOP %s\" % number)\n print('new node', self.current_node.value);\n\n return \"C\" \n\n def _end(self):\n\n self.current_node = self.current_node.parent\n print('return to last node', (self.current_node and self.current_node.value) or 'END')\n\n return \"C\"\n","sub_path":"cparser.py","file_name":"cparser.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"144700800","text":"import numpy as np\nimport cv2 as cv\n\n# front and back\ncap=cv.VideoCapture(0)\n\nwhile(1):\n # Processing each frame\n ret,frame=cap.read()\n # convert to gray\n gray=cv.cvtColor(frame,cv.COLOR_BGR2GRAY)\n # show every frame\n cv.imshow('frame',gray)\n # quit\n if cv.waitKey(0) &0xFF==ord('q'):\n break\n\ncap.release()\ncv.destroyAllWindows()","sub_path":"python_cv/youtube_sandex/Video_display/video_capture.py","file_name":"video_capture.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"156435833","text":"from fire_detection.models import Categoria, Subcategoria\n\n# Creo un procesador de contextos para todas las vistas\ndef custom_proc(request):\n\ttodasCategorias = Categoria.objects.filter(active=True).order_by('orden')\n\ttodasSubcategorias = []\n\tfor categoria in todasCategorias:\n\t\tsubcategorias = Subcategoria.objects.filter(categoria_id=categoria.id, active=True).order_by('orden')\n\t\ttupla = (categoria, subcategorias)\n\t\ttodasSubcategorias.append(tupla)\n\treturn {\n\t\t'todasSubcategorias': todasSubcategorias,\n\t\t}","sub_path":"fire_detection/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"594868573","text":"#!/usr/bin/env python\nimport datetime\nfrom urllib import unquote_plus\nimport random\nfrom wsgiref.handlers import CGIHandler\n\ndef calcoffsets(n):\n\tallposs = []\n\tfor a in range(2,n):\n\t\tfor b in range(a+1,n):\n\t\t\tfor c in range(b+1,n):\n\t\t\t\tdists = [-1] * n\n\t\t\t\titers = 1;\n\t\t\t\tprevlist = [0]\n\t\t\t\twhile -1 in dists:\n\t\t\t\t\t#print \"iters = \",str(iters)\n\t\t\t\t\tnextlist = []\n\t\t\t\t\tfor i in prevlist:\n\t\t\t\t\t\tt0 = (i+1) % n\n\t\t\t\t\t\tt1 = (i+a) % n\n\t\t\t\t\t\tt2 = (i+b) % n\n\t\t\t\t\t\tt3 = (i+c) % n\n\t\t\t\t\t\tif dists[t0]==-1:\n\t\t\t\t\t\t\tdists[t0] = iters\n\t\t\t\t\t\t\tnextlist.append(t0)\n\t\t\t\t\t\tif dists[t1]==-1:\n\t\t\t\t\t\t\tdists[t1] = iters\n\t\t\t\t\t\t\tnextlist.append(t1)\n\t\t\t\t\t\tif dists[t2]==-1:\n\t\t\t\t\t\t\tdists[t2] = iters\n\t\t\t\t\t\t\tnextlist.append(t2)\n\t\t\t\t\t\tif dists[t3]==-1:\n\t\t\t\t\t\t\tdists[t3] = iters\n\t\t\t\t\t\t\tnextlist.append(t3)\n\t\t\t\t\titers += 1\n\t\t\t\t\tprevlist = nextlist\n\t\t\t\tallposs.append(dists)\n\t\t\t\ttotal = sum(dists) - dists[0]\n#\t\t\t\tprint \"total =\",total, \"\\ttotal/self =\", float(total)/float(dists[0]), \"\\tself =\",dists[0], \"\\toffsets =\",[1,a,b,c],dists\n\t# Now we have all the possible lists\n\tbesttotal = 999999999\n\tbestlist = []\n\tbestrat = 0\n\tfor list in allposs:\n\t\ttot = sum(list) - list[0]\n\t\tif tot < besttotal:\n\t\t\tbestrat = float(tot)/float(list[0])\n\t\t\tbesttotal = tot\n\t\t\tbestlist = list\n\t\telif tot == besttotal:\n\t\t\trat = float(tot)/float(list[0])\n\t\t\tif rat < bestrat:\n\t\t\t\tbestrat = rat\n\t\t\t\tbestlist = list\n#\tsys.stderr.write(\"Best one found:\")\n\tbestoffsets = []\n\tfor i in xrange(len(bestlist)):\n\t\tif bestlist[i] == 1:\n\t\t\tbestoffsets.append(i)\n#\tprint \"total =\",besttotal,\"\\ttotal/self =\", float(besttotal)/float(bestlist[0]), \"\\tself =\",bestlist[0], \"\\n\",bestlist,\"\\nOffsets: \",bestoffsets \n\treturn (bestoffsets, bestrat)\n\ndef genpage(start_response, output):\n\t# Send post reply\n\tstatus = '200 OK'\n\tresponse_headers = [('Content-type', 'text/plain'),\n\t\t\t\t\t ('Content-Length', str(len(output)))]\n\tstart_response(status, response_headers)\n\treturn [output]\n\n\n\n# WSGI stuff\ndef application(environ, start_response):\n\tif environ[\"REQUEST_METHOD\"] == \"POST\":\n\t\tbody = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))\n\t\tparameter_pairs = body.split('&')\n\t\tparameter = dict()\n\t\tfor parameter_pair in parameter_pairs:\n\t\t\tparameter_pair = parameter_pair.split('=')\n\t\t\tparameter[unquote_plus(parameter_pair[0])] = unquote_plus(parameter_pair[1])\n#\t\toutput = repr(parameter)\n\n\t\t# TODO: IMPLEMENT CODE HERE\n\t\tplayers = parameter[\"players\"]\n\n\n\t\tretval = \"\"\n\t\tpeople = {}\n\t\tlines = players.split('\\n')\n\t\tfor line in lines:\n\t\t\ttry:\n\t\t\t\t(codename, realname) = line.strip().split(':')\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\tif codename in people.keys() and people[codename] != realname:\n\t\t\t\treturn genpage(start_response, \"Error: duplicate codename %s refers to players %s and %s. Please check your input and resubmit.\" % (codename, realname, people[codename]))\n\t\t\tif realname in people.values():\n\t\t\t\treturn genpage(start_response, \"Error: duplicate real name %s has multiple codenames. Please check your input and resubmit.\" % (realname))\n\t\t\tpeople[codename] = realname\n\t\t\t\n\t\t\t#retval = retval + codename + \"\\n\"\n\t\t(offsets, ratio) = calcoffsets(len(people))\n\t\tcodenames = people.keys()\n\t\trandom.shuffle(codenames)\n\t\tif len(codenames) < 10:\n\t\t\treturn genpage(start_response, \"Error: too few people. This game really sucks when there's not enough players, and %s doesn't cut it. Go for at least 10 players.\" % len(codenames))\n\t\tretval = retval + \"Game notes:\\n\" + (\"There are %d people playing. Each person has 4 targets, at circle offsets %s.\\n\" % (len(codenames), offsets)) #+ (\"The ratio of minimum distance to duplicate over distance to self is %s.\" % str(ratio))\n\t\tretval = retval + \"Target lists follow.\\n\\n\\n\\n\"\n\t\tfor i in xrange(len(codenames)):\n\t\t\tcodename = codenames[i]\n\t\t\trealname = people[codename]\n\t\t\ttargets = map( lambda x: ((x+i) % len(codenames)) , offsets)\n\t\t\ttargetnames = map( lambda x: codenames[x] , targets)\n\t\t\tretval = retval + realname + (\" (%s) \" % codename) + \"has targets:\\n\" + \"\\t\".join(targetnames) + \"\\n\\n\"\n\n\t\treturn genpage(start_response, retval)\n\n\telse:\n\t\tresponse_headers = [(\"Allow\", \"POST\")]\n\t\tstatus = \"405 Method not allowed\"\n\t\tstart_response(status, response_headers)\n\t\treturn [\"\"]\n\nif __name__ == \"__main__\":\n CGIHandler().run(application)\n","sub_path":"gengame.py","file_name":"gengame.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"771782","text":"import numpy as np\nimport sys\n\nimport hashlib\n\ndef pairs(a,b):\n for index in range(min(len(a), len(b))):\n yield (a[index], b[index])\n\ndef convert_a(x):\n out = -127 + x\n out = (out[::2]**2 + out[1::2]**2)**0.5\n return out\n\ndef convert_b(x):\n out = np.zeros(int(len(x)/2.0))\n for i in range(0,len(x),2):\n out[int(i/2)] = ( (int(x[i]) -127)**2 + (int(x[i+1]) -127)**2 )**0.5\n return out\n\ndef do_sha224(x):\n hashed = hashlib.sha224(x)\n hashed = hashed.hexdigest()\n return hashed\n\ndef smooth_line(x,y, iter):\n x1 = []\n y1 = []\n for i in range(len(x)):\n x1.append(x[i])\n y1.append(y[i])\n\n b, a, rxy, Rsquared, y_hat, y_residuum = linear_regression(x1,y1)\n\n print(\"smoothing\", i, len(x), len(y), len(x1), len(y1), b, a, rxy, Rsquared, np.argmax(np.abs(y_residuum)))\n print(np.argmax(np.abs(y_residuum)), np.max(np.abs(y_residuum)))\n print(np.sum(np.abs(y_residuum)))#, np.abs(y_residuum))\n #print(x1\n #print(y1\n for i in range(iter):\n del x1[np.argmax(np.abs(y_residuum))]\n del y1[np.argmax(np.abs(y_residuum))]\n del y_residuum[np.argmax(np.abs(y_residuum))]\n #print(x1\n #print(y1\n print(\"\")\n b, a, rxy, Rsquared, y_hat, y_residuum = linear_regression(x1,y1)\n # conditions:\n # no negative slope for time!\n # slope must be near to 1!\n\n y_hat1 = []\n for i in range(len(x)):\n y_hat1.append(a + b * x[i])\n\n return x, y_hat1\n\n\ndef linear_regression(x, y):\n x_sum = np.sum(x)\n y_sum = np.sum(y)\n x_mean = np.mean(x)\n y_mean = np.mean(y)\n\n Sx = x - x_mean\n Sy = y - y_mean\n\n Sxy = Sx * Sy\n Sxx = Sx * Sx\n Syy = Sy * Sy\n\n Sxy_sum = np.sum(Sxy)\n Sxx_sum = np.sum(Sxx)\n Syy_sum = np.sum(Syy)\n\n b = Sxy_sum / Sxx_sum # slope\n a = y_sum / len(y) - (b * x_sum / len(x)) # achsenabschnitt der regr. geraden\n\n rxy = Sxy_sum / (Sxx_sum * Syy_sum)**0.5 # empirische korelation\n Rsquared = rxy**2 # bestimtheitsmass\n\n y_hat = []\n y_residuum = []\n for i in range(len(x)):\n y_hat.append(a + b * x[i])\n y_residuum.append(y[i] - y_hat[i])\n\n return b, a, rxy, Rsquared, y_hat, y_residuum\n\ndef find_line_properties(points):\n m = float(points[1][1] - points[0][1]) / (points[1][0] - points[0][0] + sys.float_info.epsilon) # slope (gradient) of the line\n c = points[1][1] - m * points[1][0] # y-intercept of the line\n return m, c\n\ndef find_intercept_point(m, c, x0, y0):\n # intersection point with the model\n x = (x0 + m*y0 - m*c)/(1 + m**2)\n y = (m*x0 + (m**2)*y0 - (m**2)*c)/(1 + m**2) + c\n\n return x, y\n\ndef bad_sphere_center(long, lat, alt):\n import math\n\n dots = np.zeros((len(long),4))\n for i in range(len(long)):\n dots[i][0] = long[i]\n dots[i][1] = lat[i]\n dots[i][2] = alt[i][1]\n dots[i][3] = i\n print(dots)\n\n iterations = 40000\n distance_allowed = 250\n\n counterbest = 0\n Rearth = 6371000.0\n\n found_dots = 0\n\n for l in range(2000):\n np.random.shuffle(dots)\n dots_sum = 0\n\n x0 = (Rearth + dots[0][2]) * math.cos(dots[0][1]) * math.cos(dots[0][0])\n y0 = (Rearth + dots[0][2]) * math.cos(dots[0][1]) * math.sin(dots[0][0])\n z0 = (Rearth + dots[0][2]) * math.sin(dots[0][1])\n\n index = []\n index.append(dots[0][3])\n\n for i in range(1, len(long)):\n x1 = (Rearth + dots[i][2]) * math.cos(dots[i][1]) * math.cos(dots[i][0])\n y1 = (Rearth + dots[i][2]) * math.cos(dots[i][1]) * math.sin(dots[i][0])\n z1 = (Rearth + dots[i][2]) * math.sin(dots[i][1])\n\n distance = ((x0-x1)**2 + (y0-y1)**2 + (z0-z1)**2)**0.5\n\n if distance < distance_allowed:\n dots_sum += 1\n index.append(dots[i][3])\n\n if found_dots < dots_sum:\n out4 = []\n found_dots = dots_sum\n dots_center = [dots[0][0], dots[0][1], dots[0][2], dots[0][3]]\n for i in range(len(dots)):\n yes = 0\n for j in range(len(index)):\n if dots[i][3] == index[j]:\n yes = 1\n\n if yes == 1:\n out4.append(dots[i][3])\n\n print(\"center\", dots_center, found_dots)\n return out4\n\ndef bad_ransac(x, y):\n\n dots = np.zeros((len(x),2), dtype=np.float64)\n for i in range(len(x)):\n dots[i][0] = float(x[i])\n dots[i][1] = float(y[i])\n #print(dots[i][0], dots[i][1])\n print(\"dots\")\n #print(dots)\n\n iterations1 = 70000\n iterations = iterations1\n distance_allowed = 1.0\n ppp = 12\n\n counterbest = 0\n\n while iterations >= 0:\n np.random.shuffle(dots)\n\n m, c = find_line_properties(dots[0:2])\n '''\n xx = [dots[0][0], dots[1][0]]\n yy = [dots[0][1], dots[1][1]]\n\n if xx[1] - xx[0] == 0:\n print(xx[1] - xx[0])\n tmp = linregress(xx,yy)\n #print(tmp)\n m = tmp.slope\n c = tmp.intercept\n '''\n\n\n counter = 0\n for i in range(2,len(dots)):\n x1, y1 = find_intercept_point(m, c, dots[i][0], dots[i][1])\n distance = ((x1-dots[i][0])**2 + (y1-dots[i][1])**2)**0.5\n #print(dots[i][0], dots[i][1], distance)\n\n if distance < distance_allowed:\n # didn't work that good with noisy measurements\n #if m*2048000.0 < 1.5 and m*2048000.0 > 0.5:\n counter += 1\n\n #print(\"counter\", counter, len(x)-2\n\n #if float(counter)/float(len(x)-2) > 0.2 and counterbest < counter:\n if counterbest < counter and np.abs(m*2048000.0 - 1.0) < 1.0:\n #print(\"hgggg\"\n dotsbest = dots[0:2]\n mbest = m\n cbest = c\n counterbest = counter\n\n iterations -= 1\n if iterations <= 1 and counter <= 1 and ppp > 0:\n iterations = iterations1\n ppp -= 1\n\n y_new = []\n\n passed = 0\n if counter > 0:\n print(\"counter\", counter, \"ppp\", ppp, mbest, cbest, dotsbest[0][0], dotsbest[0][1],dotsbest[1][0], dotsbest[1][1])\n for i in range(len(x)):\n y_new.append(np.round(cbest + mbest*x[i]))\n\n #print(\"final linear properties\", cbest, mbest, mbest*2048000.0, float(counterbest)/float(len(x)-2), counterbest)\n passed = 1\n else:\n for i in range(len(x)):\n #print(\"counter\", counter\n y_new.append(float(y[i]))\n\n #print(\"final normal properties\", counter)\n passed = 0\n\n return x, y_new, passed\n\ndef bad_ransac_lin(x, y):\n distance_allowed = 1.0\n counterbest = 0\n\n for i in range(len(x)-1):\n for j in range(i+1, len(x)):\n #print(i,j)\n dots = [[x[i],y[i]],[x[j],y[j]]]\n m, c = find_line_properties(dots)\n\n\n counter = 0\n for k in range(len(x)):\n x1, y1 = find_intercept_point(m, c, x[k], y[k])\n distance = ((x1-x[k])**2 + (y1-y[k])**2)**0.5\n #print(dots[i][0], dots[i][1], distance)\n\n if distance < distance_allowed:\n # didn't work that good with noisy measurements\n #if m*2048000.0 < 1.5 and m*2048000.0 > 0.5:\n counter += 1\n\n #if float(counter)/float(len(x)-2) > 0.2 and counterbest < counter:\n if counterbest < counter and np.abs(m*2048000.0 - 1.0) < 1.0:\n #print(\"hgggg\"\n dotsbest = dots\n mbest = m\n cbest = c\n counterbest = counter\n\n y_new = []\n\n passed = 0\n if counter > 0:\n print(\"counter\", counter, \"ppp\", mbest, cbest, dotsbest[0][0], dotsbest[0][1],dotsbest[1][0], dotsbest[1][1])\n for i in range(len(x)):\n y_new.append(np.round(cbest + mbest*x[i]))\n\n #print(\"final linear properties\", cbest, mbest, mbest*2048000.0, float(counterbest)/float(len(x)-2), counterbest)\n passed = 1\n else:\n for i in range(len(x)):\n #print(\"counter\", counter\n y_new.append(float(y[i]))\n\n #print(\"final normal properties\", counter)\n passed = 0\n print(passed)\n return x, y_new, passed","sub_path":"software/do.py","file_name":"do.py","file_ext":"py","file_size_in_byte":8250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"29696654","text":"#!/usr/bin/python2.5\n# Copyright 2011 JatLeGo Inc. All Rights Reserved.\n# Author: andyzh1314@gmail.com (Andy Zhau)\n\n\nfrom website.framework.change import change\n\n\nclass JememberLinkEntry(change.Entry):\n\n hash_link = change.Required(1, basestring)\n origin_link = change.Required(2, basestring)\n tags = change.Repeated(3, basestring)\n description = change.Optional(4, basestring)\n\n @classmethod\n def from_jemember(cls, jemember, full=False):\n result = JememberLinkEntry(hash_link=jemember.hash_link,\n origin_link=jemember.origin_link)\n if full:\n result.tags.extend(jemember.tags)\n result.description = jemember.description\n return result\n\n @classmethod\n def from_jemember_user(cls, ju):\n return [cls.from_jemember(jemember)\n for jemember in ju.jemembers.create_order()]\n","sub_path":"website/jetlag3/views/jemember.py","file_name":"jemember.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"137066411","text":"import io,os,struct,glob,codecs,math,re\r\nlst='FONT.PAK_list.txt'\r\nhsize=os.path.getsize(lst)\r\nsrc =lst.replace('_list.txt','')\r\ndirname = src+'_unpacked\\\\'\r\nsrc='o_'+src\r\nfl = open(src,'wb')\r\nfilename = os.path.basename(src)\r\nfls=open(lst,'r')\r\nfile_num = int(fls.readline().replace('\\n',''))\r\nfl.seek(4)\r\nfl.write(struct.pack(' 3:\n\t\tclientParams = urllib.parse.parse_qsl(postData)\n\t\tclientSID = clientParams[0][1]\n\t\tclientSKey = clientParams[1][1][:-1] #[:-1] removes the last char '\\''\n\t\t#print(\"{:s} : {:s}\".format(clientSID, clientSKey))\n\telse:\n\t\tprint('GetProfile Failed')\n\t\treturn 'nope'\n\n\tpostParams = urllib.parse.urlencode({'s_id': clientSID, 's_key': clientSKey}).encode('utf-8')\n\tlegitPostReply = urllib.request.urlopen(\"https://api.playwarz.com/WarZ/api/api_GetProfile1.aspx\", postParams)\n\n\tcacheFile(clientSID, 'getprofile', legitPostReply.read())\n\n\tif 'wsgi.file_wrapper' in environ:\n\t\treturn environ['wsgi.file_wrapper'](cacheHandle(clientSID, 'getprofile'), 1024)\n\telse:\n\t\treturn iter(lambda: legitPostReply.read(1024), '')\n","sub_path":"getprofile.wsgi","file_name":"getprofile.wsgi","file_ext":"wsgi","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"422377607","text":"import boto3\nimport os\nimport time\nfrom clustering import do_clustering\n\ndef update_medoids(minutes,meters,minsam):\n medoid_list = {}\n\n points = do_clustering(minutes,meters,minsam)\n points_list = []\n for point in points:\n point_dict = {}\n point_dict['SS'] = [str(coord) for coord in point]\n points_list.append(point_dict)\n\n medoid_list['tstamp'] = {'S': str(time.time())}\n medoid_list['points'] = {'L': points_list}\n medoid_list['cluster_id'] = {'S': 'activo'}\n\n print('\\nItem for DynamoDB:\\n', medoid_list)\n\n dynamodb_rs = boto3.resource('dynamodb',\n aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],\n region_name=os.environ['AWS_DEFAULT_REGION'])\n\n dynamodb_cl = boto3.client('dynamodb',\n aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],\n region_name=os.environ['AWS_DEFAULT_REGION'])\n\n table = dynamodb_rs.Table('tinkuy-clusters-qas')\n response = table.delete_item(Key={'cluster_id': 'activo'})\n dynamodb_cl.put_item(TableName='tinkuy-clusters-qas', Item=medoid_list)\n\n print(\"Medoids updated in dynamo\", response)\n","sub_path":"src/update_medoids.py","file_name":"update_medoids.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"69945915","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Grayscale\ndef BGR2GRAY(img):\n\t# Grayscale\n\tgray = 0.2126 * img[..., 2] + 0.7152 * img[..., 1] + 0.0722 * img[..., 0]\n\treturn gray\n\n# Bi-Linear interpolation\ndef bl_interpolate(img, ax=1., ay=1.):\n\tif len(img.shape) > 2:\n\t\tH, W, C = img.shape\n\telse:\n\t\tH, W = img.shape\n\t\tC = 1\n\n\taH = int(ay * H)\n\taW = int(ax * W)\n\n\t# get position of resized image\n\ty = np.arange(aH).repeat(aW).reshape(aW, -1)\n\tx = np.tile(np.arange(aW), (aH, 1))\n\n\t# get position of original position\n\ty = (y / ay)\n\tx = (x / ax)\n\n\tix = np.floor(x).astype(np.int)\n\tiy = np.floor(y).astype(np.int)\n\n\tix = np.minimum(ix, W-2)\n\tiy = np.minimum(iy, H-2)\n\n\t# get distance \n\tdx = x - ix\n\tdy = y - iy\n\n\tif C > 1:\n\t\tdx = np.repeat(np.expand_dims(dx, axis=-1), C, axis=-1)\n\t\tdy = np.repeat(np.expand_dims(dy, axis=-1), C, axis=-1)\n\n\t# interpolation\n\tout = (1-dx) * (1-dy) * img[iy, ix] + dx * (1 - dy) * img[iy, ix+1] + (1 - dx) * dy * img[iy+1, ix] + dx * dy * img[iy+1, ix+1]\n\n\tout = np.clip(out, 0, 255)\n\tout = out.astype(np.uint8)\n\n\treturn out\n\n\n# Read image\nimg = cv2.imread(\"imori.jpg\").astype(np.float)\n\ngray = BGR2GRAY(img)\n\n# Bilinear interpolation\nout = bl_interpolate(gray.astype(np.float32), ax=0.5, ay=0.5)\n\n# Bilinear interpolation\nout = bl_interpolate(out, ax=2., ay=2.)\n\nout = np.abs(out - gray)\n\nout = out / out.max() * 255\n\nout = out.astype(np.uint8)\n\n# Save result\ncv2.imshow(\"result\", out)\ncv2.waitKey(0)\ncv2.imwrite(\"out.jpg\", out)\n","sub_path":"Question_71_80/answers/answer_74.py","file_name":"answer_74.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"509150149","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"L1DTTrigPhase2Prod\")\n\nprocess.load(\"Geometry.CMSCommonData.cmsIdealGeometryXML_cff\")\nprocess.load(\"Geometry.DTGeometry.dtGeometry_cfi\")\nprocess.DTGeometryESModule.applyAlignment = False\n\nprocess.load(\"L1Trigger.DTPhase2Trigger.dtTriggerPhase2PrimitiveDigis_cfi\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff\")\nprocess.GlobalTag.globaltag = \"80X_dataRun2_2016SeptRepro_v7\"\n\nprocess.load(\"Phase2L1Trigger.CalibratedDigis.CalibratedDigis_cfi\")\nprocess.load(\"L1Trigger.DTPhase2Trigger.dtTriggerPhase2PrimitiveDigis_cfi\")\n\nprocess.source = cms.Source(\"PoolSource\",fileNames = cms.untracked.vstring('file:/eos/cms/store/user/folguera/P2L1TUpgrade/digis_segments_Run2016BSingleMuonRAW-RECO_camilo.root'))\nprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(10))\nprocess.dtTriggerPhase2PrimitiveDigis.dump = True\nprocess.dtTriggerPhase2PrimitiveDigis.debug = False\nprocess.dtTriggerPhase2PrimitiveDigis.grouping_code = cms.untracked.int32(0)\n\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n outputCommands = cms.untracked.vstring('keep *'),\n fileName = cms.untracked.string('DTTriggerPhase2Primitives.root')\n)\n\nprocess.p = cms.Path(process.CalibratedDigis*process.dtTriggerPhase2PrimitiveDigis)\nprocess.this_is_the_end = cms.EndPath(process.out)\n\n\n\n\n\n\n","sub_path":"L1Trigger/DTPhase2Trigger/test/test_primitivesPhase2Prod_over_dTDigis_and_4Dsegments_cfg.py","file_name":"test_primitivesPhase2Prod_over_dTDigis_and_4Dsegments_cfg.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"87600744","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport os\n\ndef lecture_matrice_X_et_Y_from_file(nom_fic):\n os.chdir('E:/Onedrive/Documents/Lonaparte-PC/Codes/learn_python/INF_1_BE_4')\n if not nom_fic in os.listdir():\n print(\"pb d'ouverture du fichier\"+nom_fic)\n matrice_X_et_Y=[]\n return matrice_X_et_Y\n mat_tous_les_caracteres = open(nom_fic).read()\n matrix_cars= [item.split() for item in mat_tous_les_caracteres.split('\\n')[:-1]]\n matrice_X_et_Y= [[int(row[i]) for i in range(len(row))] for row in matrix_cars]\n return matrice_X_et_Y\n\ndef prerparer_donnees(nom_fic):\n global matrice_X\n global matrice_cible_y\n global L_Gray_repr_binaire_de_0_a_9\n global vecteur_chiffre_0_9_cible_y\n global matrice_Y\n\n matrice_X_et_Y=lecture_matrice_X_et_Y_from_file(nom_fic)\n\n if(matrice_X_et_Y==[]):\n print(\"pb constitution de la matrice_X_et_Y\")\n quit()\n\n matrice_X=np.array([row[:-1] for row in matrice_X_et_Y])\n matrice_Y=np.array([row[-1] for row in matrice_X_et_Y])\n matrice_cible_y = np.array([code_Gray_repr_binaire_de_0_a_9(i) for i in matrice_Y])\n\ndef code_Gray_repr_binaire_de_0_a_9(num):\n if(num==0):return [0,0,0,0]\n if(num==1):return [0,0,0,1]\n if(num==2):return [0,0,1,1]\n if(num==3):return [0,0,1,0]\n if(num==4):return [0,1,1,0]\n if(num==5):return [0,1,1,1]\n if(num==6):return [0,1,0,1]\n if(num==7):return [0,1,0,0]\n if(num==8):return [1,1,0,0]\n if(num==9):return [1,1,0,1]\n\nprerparer_donnees('train.data')\nX= matrice_X\ny= matrice_cible_y\nprerparer_donnees('test.data')\nX_test= matrice_X\ny_real= matrice_cible_y\nprint(\"Le Resultat reel:\")\nprint(y_real)\n#print(matrice_X)\n#print(matrice_Y)\n#print(matrice_cible_y)\n\ndef sigmoide_et_sa_derivee (x,deriv =False) :\n if ( deriv ==True ) :\n return x*(1-x)\n return 1/(1+np.exp(-x))\ndef exemple_et_sa_derivee (x,deriv =False):\n if ( deriv ==True ) :\n return 1/(1+np.exp(-x))\n return np.log(1+np.exp(-x),math.e)\ndef hyperboliaue_et_sa_derivee (x,deriv =False):\n if ( deriv ==True ) :\n return 1-x*x\n return 2/(1+np.exp(-2*x))-1\ndef gaussienne_et_sa_derivee (x,deriv =False):\n if ( deriv ==True ) :\n return -2*x*np.exp(-x*x)\n return np.exp(-x*x)\ndef ArcTangente_et_sa_derivee (x,deriv =False):\n if ( deriv ==True ) :\n return 1/(1+x*x)\n return np.arctan(x)\n\nnp.random.seed (1)\nnoeud=128 # Le nombre de couches cachees\nnoeud1=8\n\nsynapse0 = 2*np.random.random((48,noeud))-1\nsynapse01 = 2*np.random.random((noeud,noeud1))-1\nsynapse1 = 2*np.random.random((noeud1,4))-1\n#synapse0 = np.random.random((3,noeud))\n#synapse1 = np.random.random((noeud,1))\n#synapse0 = math.pi*np.random.random((3,noeud))-math.pi/2\n#synapse1 = math.pi*np.random.random((noeud,1))-math.pi/2\n\ncouche_entree = X\nnb_iterations = 100000\nalpha=0.1\nerr = []\nfor j in range (nb_iterations) :\n couche_cachee = sigmoide_et_sa_derivee (np.dot(couche_entree,synapse0))\n couche_cachee1 = sigmoide_et_sa_derivee (np.dot(couche_cachee,synapse01))\n couche_sortie = sigmoide_et_sa_derivee (np.dot(couche_cachee1,synapse1))\n erreur_couche_sortie = y-couche_sortie\n moy=np.mean(np.abs(erreur_couche_sortie))\n err.append(abs(moy))\n if j%(nb_iterations//10)==0:\n print ( \"Moyenne Erreur couche sortie : \" + str (moy))\n delta_couche_sortie = erreur_couche_sortie * sigmoide_et_sa_derivee (couche_sortie,deriv=True)\n error_couche_cachee1 = delta_couche_sortie.dot(synapse1.T)\n delta_couche_cachee1 = error_couche_cachee1 * sigmoide_et_sa_derivee (couche_cachee1,deriv =True)\n error_couche_cachee = delta_couche_cachee1.dot(synapse01.T)\n delta_couche_cachee = error_couche_cachee * sigmoide_et_sa_derivee (couche_cachee,deriv =True)\n synapse1 += couche_cachee1.T.dot(delta_couche_sortie )*alpha\n synapse01 += couche_cachee.T.dot(delta_couche_cachee1)*alpha\n synapse0 += couche_entree.T.dot(delta_couche_cachee)*alpha\nprint ( \"Resultat de l'apprentissage : \" )\nprint ( couche_sortie )\n\nmyvec=np.array([range(len(err)),err])\nplt.plot(myvec[0,],myvec[1,])\nplt.show()\n\n#############TEST#################\ncache_test=sigmoide_et_sa_derivee (np.dot(X_test,synapse0))\ncache1_test=sigmoide_et_sa_derivee (np.dot(cache_test,synapse01))\nY_test=sigmoide_et_sa_derivee (np.dot(cache1_test,synapse1))\nprint(\"Resultat de test:\")\nprint(Y_test)\n","sub_path":"INF_1_BE_4/Section_VI_une_autre_exemple.py","file_name":"Section_VI_une_autre_exemple.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"363901223","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2016-10-20\n\n@author: hustcc\n'''\n\nfrom app.wraps.login_wrap import login_required\nfrom app import app\nfrom app.utils import ResponseUtil, RequestUtil, StringUtil\nfrom app.database.model import WebHook, Server\n\n\n# get webhook list\n@app.route('/api/webhook/list', methods=['GET'])\n@login_required()\ndef api_webhook_list():\n # login user\n user_id = RequestUtil.get_login_user().get('id', '')\n\n webhooks = WebHook.query.filter_by(user_id=user_id, deleted=False).all()\n webhooks = [webhook.dict(True) for webhook in webhooks]\n\n return ResponseUtil.standard_response(1, webhooks)\n\n\n# new webhook\n@app.route('/api/webhook/new', methods=['POST'])\n@login_required()\ndef api_webhook_new():\n # login user\n user_id = RequestUtil.get_login_user().get('id', '')\n server_id = RequestUtil.get_parameter('server_id', '')\n # server must be added by yourself\n if not Server.query.filter_by(id=server_id, user_id=user_id).first():\n return ResponseUtil.standard_response(0, 'Permition deny!')\n\n repo = RequestUtil.get_parameter('repo', '')\n branch = RequestUtil.get_parameter('branch', '')\n shell = RequestUtil.get_parameter('shell', '')\n\n if not all((repo, branch, shell, server_id)):\n return ResponseUtil.standard_response(0, 'Form data can not be blank!')\n \n webhook_id = RequestUtil.get_parameter('id', '')\n if webhook_id:\n # update webhook\n # you can only update the webhook which you create.\n webhook = WebHook.query.filter_by(id=webhook_id, user_id=user_id).first()\n if not webhook:\n return ResponseUtil.standard_response(0, 'WebHook is not exist!')\n webhook.repo = repo\n webhook.branch = branch\n webhook.shell = shell\n webhook.server_id = server_id\n else:\n # new webhook\n webhook = WebHook(repo=repo, branch=branch, shell=shell, server_id=server_id,\n user_id=user_id, key=StringUtil.md5_token())\n\n webhook.save()\n\n return ResponseUtil.standard_response(1, webhook.dict(with_key=True))\n\n\n@app.route('/api/webhook/delete', methods=['POST'])\n@login_required()\ndef api_webhook_delete():\n # login user\n user_id = RequestUtil.get_login_user().get('id', '')\n webhook_id = RequestUtil.get_parameter('webhook_id', '')\n\n webhook = WebHook.query.filter_by(user_id=user_id, id=webhook_id).first()\n if not webhook:\n return ResponseUtil.standard_response(0, 'Permition deny!')\n\n webhook.deleted = True\n webhook.save()\n\n return ResponseUtil.standard_response(1, 'Success')\n","sub_path":"app/views/webhook.py","file_name":"webhook.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"447123867","text":"\nimport pandas as pd\nfrom hydroDL.data import usgs, gageII, gridMET, ntn, GLASS, transform, dbBasin\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom hydroDL.post import axplot, figplot\nfrom hydroDL import kPath, utils\nimport json\nimport os\nimport importlib\nfrom hydroDL.master import basinFull\nfrom hydroDL.app.waterQuality import WRTDS\n\ndataName = 'G200N'\ntrainSet = 'rmR20'\ntestSet = 'pkR20'\n# trainSet = 'B10'\n# testSet = 'A10'\nlabel = 'QFPRT2C'\noutName = '{}-{}-{}'.format(dataName, label, trainSet)\n\nDF = dbBasin.DataFrameBasin(dataName)\nyP, ycP = basinFull.testModel(\n outName, DF=DF, testSet=testSet, ep=500)\n\n# deal with mean and std\ncodeLst = usgs.newC\nyOut = np.ndarray(yP.shape)\nfor k, code in enumerate(codeLst):\n m = DF.g[:, DF.varG.index(code+'-M')]\n s = DF.g[:, DF.varG.index(code+'-S')]\n data = yP[:, :, k]\n yOut[:, :, k] = data*s+m\n\n# WRTDS\n# yW = WRTDS.testWRTDS(dataName, trainSet, testSet, codeLst)\ndirRoot = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-dbBasin')\nfileName = '{}-{}-{}'.format(dataName, trainSet, testSet)\nyW = np.load(os.path.join(dirRoot, fileName)+'.npz')['arr_0']\n\n# correlation matrix\nd1 = dbBasin.DataModelBasin(DF, subset=trainSet, varY=codeLst)\nd2 = dbBasin.DataModelBasin(DF, subset=testSet, varY=codeLst)\nsiteNoLst = DF.siteNoLst\nmat1 = np.full([len(siteNoLst), len(codeLst), 4], np.nan)\nmat2 = np.full([len(siteNoLst), len(codeLst), 4], np.nan)\nfor indS, siteNo in enumerate(siteNoLst):\n for indC, code in enumerate(codeLst):\n n1 = np.sum(~np.isnan(d1.Y[:, indS, indC]), axis=0)\n n2 = np.sum(~np.isnan(d2.Y[:, indS, indC]), axis=0)\n if n1 >= 160 and n2 >= 40:\n stat = utils.stat.calStat(yOut[:, indS, indC], d2.Y[:, indS, indC])\n stat2 = utils.stat.calStat(yW[:, indS, indC], d2.Y[:, indS, indC])\n mat1[indS, indC, 0] = stat['Bias']\n mat1[indS, indC, 1] = stat['RMSE']\n mat1[indS, indC, 2] = stat['NSE']\n mat1[indS, indC, 3] = stat['Corr']\n mat2[indS, indC, 0] = stat2['Bias']\n mat2[indS, indC, 1] = stat2['RMSE']\n mat2[indS, indC, 2] = stat2['NSE']\n mat2[indS, indC, 3] = stat2['Corr']\n\nindT1 = np.where(~np.isnan(d1.Y[:, indS, indC]))\nindT2 = np.where(~np.isnan(d2.Y[:, indS, indC]))\n\nyOut[indT2, indS, indC]\nyW[indT2, indS, indC]\n\n# select sites\ndictSiteName = 'dict{}.json'.format(dataName[:4])\ndirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')\nwith open(os.path.join(dirSel, dictSiteName)) as f:\n dictSite = json.load(f)\n\nstatStrLst = ['Bias', 'RMSE', 'NSE', 'Corr']\ndataPlot = list()\nlabelLst = [usgs.codePdf.loc[code]['shortName'] +\n '\\n'+code for code in codeLst]\nfor k, statStr in enumerate(statStrLst):\n temp = list()\n for ic, code in enumerate(codeLst):\n [a, b], _ = utils.rmNan([mat1[:, ic, k], mat2[:, ic, k]])\n temp.append([a, b])\n sharey = False if statStr in ['Bias', 'RMSE'] else True\n fig, axes = figplot.boxPlot(temp, widths=0.5, figsize=(12, 4),\n label2=['LSTM', 'WRTDS'], label1=labelLst,\n sharey=sharey)\n if statStr == 'Bias':\n for ax in axes:\n _ = ax.axhline(0)\n fig.show()\n\n#\n# DF2 = dbBasin.DataFrameBasin('G400')\n\n# labelLst = [usgs.codePdf.loc[code]['shortName'] + code for code in codeLst]\n# d1 = dbBasin.DataModelBasin(DF2, subset=trainSet, varY=codeLst)\n# d2 = dbBasin.DataModelBasin(DF2, subset=testSet, varY=codeLst)\n# k = 60\n# dataPlot = [yW[:, k, :], d1.Y[:, k, :], d2.Y[:, k, :]]\n# cLst = ['red', 'grey', 'black']\n# fig, axes = figplot.multiTS(\n# DF.t, dataPlot, labelLst=labelLst, cLst=cLst)\n# fig.show()\n\n","sub_path":"app/wqFull/result/box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"360391183","text":"import os\nimport time\nimport pickle\nimport checkRange\nfrom selenium import webdriver\nfrom operator import itemgetter\n\n\ndef main(keyword, extension):\n\n driver = webdriver.Firefox(executable_path=\"geckodriver\") # initiate a driver, in this case Firefox\n driver.get(\"https://www.github.com/login\")\n\n if not os.path.exists('crawler_results'):\n os.makedirs('crawler_results')\n\n # log in\n username_field = driver.find_element_by_name('login') # get the username field\n password_field = driver.find_element_by_name('password') # get the password field\n #username_field.send_keys(\"\") # enter in your username\n #password_field.send_keys(\"\") # enter in your password\n username_field.send_keys(\"\") # enter in your username\n password_field.send_keys(\"\") # enter in your password\n password_field.submit() # submit it\n\n time.sleep(5)\n\n #cookie\n pickle.dump(driver.get_cookies(), open(\"QuoraCookies.pkl\", \"wb\"))\n\n time.sleep(5)#per dare il tempo ai cookie\n\n #searching\n #idea: usare la size per raffinare la ricerca ed andare sotto i 1000 risultati\n\n lb = checkRange.checkRange() #lower bound iniziale per la dimensione calcolato sul file con i risultati che già abbiamo in modo da non dover ripartire sempre dall'inizio\n\n if(lb == 0): #upperd bound inziale\n ub = 500\n else:\n ub = lb*2\n\n while(True):\n #result file\n file = open('crawler_results/result.txt', 'ab')# a per adding, w per write ogni volta che riapre\n page = 1\n lb = finder(lb, ub, page, driver, file, keyword, extension)\n if(lb==ub):\n ub = ub*2\n file.close()\n if(ub>100000):\n file.close()\n break\n\n update()\n\n\ndef update():\n file = open('crawler_results/result.txt', 'rb') #file originale con la lista disordinata\n\n objs = []\n while True:\n try:\n o = pickle.load(file)\n except EOFError:\n break\n objs.append(o)\n\n\n file = open('crawler_results/result_ordered.txt', 'wb') #nuovo file con la lista ordinata\n\n newlist = []\n for highelem in objs:\n for elem in highelem:\n print(elem)\n elem['owner_name'] = elem['repository_name'][0:elem['repository_name'].find('/')]\n newlist.append(elem)\n\n newlist = sorted(newlist, key=itemgetter('owner_name'))\n pickle.dump(newlist, file)\n\n\ndef finder(lb, ub, page, driver, file, keyword, extension):\n\n lista = []\n\n while(True):\n\n time.sleep(2)\n driver.get(\"https://github.com/search?p=\"+str(page)+\"&q=\"+keyword+\" extension%3A\"+extension+\" size%3A%22\"+str(lb)+\"..\"+str(ub)+\"%22&type=Code&utf8=%E2%9C%93\")\n for cookie in pickle.load(open(\"QuoraCookies.pkl\", \"rb\")): #cookie\n new_cookie={}\n new_cookie['name'] = cookie['name']\n new_cookie['value'] = cookie['value']\n driver.add_cookie(new_cookie)\n\n################################################################################################################################################\n#inizio blocco ricerca del numero dei risultati\n try:\n #dovrebbe risolvere il problema dell'abuse\n for num_results in driver.find_elements_by_class_name('d-flex'):#cerca dentro la div d-flex, dove stanno i nomi delle repository\n try: #abuse managing\n if(num_results.text.find('Showing')!=-1):#cerco nella pagina il numero di risultati\n first_index = num_results.text.find('Showing')+8\n last_index = num_results.text.find('available')-1\n number_of_results = num_results.text[first_index:last_index]\n aux = number_of_results.replace(\",\", \"\")\n number_of_results = aux\n break\n if(num_results.text.find('code')!=-1):\n last_index = num_results.text.find('code')-1\n number_of_results = num_results.text[0:last_index]\n aux = number_of_results.replace(\",\", \"\")\n number_of_results = aux\n break\n except:\n time.sleep(10)\n continue\n except:\n time.sleep(10)\n continue\n\n if(int(number_of_results)>1000):\n print(number_of_results)\n ub = int(ub - ((ub-lb)/2))\n print(ub)\n continue\n#fine blocco\n################################################################################################################################################\n\n################################################################################################################################################\n#inizio blocco ricerca nome repository, file\n\n results = driver.find_elements_by_class_name('d-inline-block')#nome della div class\n\n for elem in results:\n if(elem.text.find(\".\"+extension)==-1):\n continue\n else:\n extension_index = elem.text.find('.'+extension)+len(extension)+1#pulizia stringa\n var_string = elem.text[0:extension_index]\n index = var_string.find(' ')\n pythonDictionary = {'repository_name':var_string[0:index],'range':ub,'file_name':elem.find_element_by_link_text(var_string[index+3:extension_index]).text ,'file_path':elem.find_element_by_link_text(var_string[index+3:extension_index]).get_attribute('title') }\n lista.append(pythonDictionary)\n\n#fine blocco\n################################################################################################################################################\n\n try:#quando ho raggiunto l'ultima pagina e non riesco a trovare la casellina con la pagina\n current_page = driver.find_element_by_class_name('current')\n except:\n return ub\n\n #if int(current_page.text) == 2:#debug\n # break\n if int(current_page.text) == 30:\n time.sleep(30)\n if int(current_page.text) == 60:\n time.sleep(30)\n if int(current_page.text) == 90:\n time.sleep(30)\n if int(current_page.text) > 100:\n break\n\n page += 1\n\n pickle.dump(lista, file)\n file.close()\n return ub\n\n#main(\"eclass\", \"ecore\") #debug\n#main(\"eclipse\",\"mtl\") #debug\n\n#update() #debug\n","sub_path":"Crawler-Model/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"179927598","text":"import info\nfrom Package.CMakePackageBase import *\n\n\nclass subinfo(info.infoclass):\n def setTargets(self):\n for ver in ['1.15.1', '1.18.0', '1.19.1']:\n self.targets[ver] = \\\n 'http://kcat.strangesoft.net/openal-releases/openal-soft-' + ver + '.tar.bz2'\n self.targetInstSrc[ver] = 'openal-soft-' + ver\n self.patchToApply['1.15.1'] = ('openal-soft-1.15.1-20130411.diff', 1)\n self.targetDigests['1.15.1'] = 'a0e73a46740c52ccbde38a3912c5b0fd72679ec8'\n self.description = 'a library for audio support'\n self.defaultTarget = '1.19.1'\n\n def setDependencies(self):\n self.runtimeDependencies[\"virtual/base\"] = None\n\n\nclass Package(CMakePackageBase):\n def __init__(self, **args):\n CMakePackageBase.__init__(self)\n","sub_path":"libs/openal-soft/openal-soft.py","file_name":"openal-soft.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"338566538","text":"# ------------------------------------------------------------------------------\n#\n# Project: Master Inventory \n# Authors: Fabian Schindler \n#\n# ------------------------------------------------------------------------------\n# Copyright (C) 2016 European Space Agency\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies of this Software or works derived from this Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n# ------------------------------------------------------------------------------\n\n\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom minv.commands import MinvCommand\nfrom minv.inventory.backup import restore\n\n\nclass Command(MinvCommand):\n option_list = BaseCommand.option_list + ()\n\n require_group = \"minv_g_app_administrators\"\n\n args = ''\n\n help = (\n 'Restore the contents of the backup archive. '\n 'Requires membership of group \"minv_g_app_administrator\".'\n )\n\n def handle_authorized(self, *args, **options):\n if len(args) < 1:\n raise CommandError(\"No backup filename specified.\")\n\n filename = args[0]\n\n try:\n restore(filename)\n self.info(\"Successfully restored backup '%s'.\" % filename)\n except Exception as exc:\n self.error(\n \"Unable to restore backup '%s' backup. Error was: %s\"\n % (filename, exc)\n )\n if options.get(\"traceback\"):\n raise\n","sub_path":"minv/inventory/management/commands/restore.py","file_name":"restore.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"277657938","text":"from tkinter import *\nfrom pandas import *\nimport funcs\n\ndef clear():\n '''Clearing screen'''\n if len(array_points) > 0:\n try:\n for i in range(len(array_points)):\n canvas.delete(array_points[i])\n except:\n errors(' Точек нет!')\n try:\n canvas.delete(triangle_a)\n canvas.delete(in_circle_a)\n except:\n print('')\n\ndef errors(error_text):\n '''Outputing errors'''\n error = Tk()\n error.title('Ошибка!')\n error.geometry('500x80')\n\n lb_error_no_pts = Label(error, text = error_text, fg = 'red')\n lb_error_no_pts.pack(expand = 1)\n error.bind('', lambda event: error.destroy())\n\n error.mainloop()\n\ndef table_pts(all_points):\n '''Outputing table with all points'''\n global table\n\n if len(all_points) > 0:\n size = '350x' + str(len(all_points) * 50)\n\n table = Tk()\n table.title('Таблица точек')\n table.geometry(size)\n\n tb = DataFrame(all_points, columns = ['x', 'y'])\n lb = Label(table, text = str(tb))\n lb.place(x = 0, y = 0)\n table.bind('', lambda event: table.destroy())\n\n table.mainloop()\n else:\n errors('Таблица пуста!')\n\ndef create_pts(k, all_points):\n '''Building and drawing all points'''\n\n for i in range(len(all_points)):\n pts_text = canvas.create_text((540 + all_points[i][0] * k), 320 - all_points[i][1] * k,\n text = (str(i) + '.' + '(' + str(all_points[i][0]) + ', '\n + str(all_points[i][1]) + ')'), fill = 'black')\n pts = canvas.create_oval((500 + all_points[i][0] * k - 5), (300 - all_points[i][1] * k - 5),\n (500 + all_points[i][0] * k + 5),\n (300 - all_points[i][1] * k + 5), outline = 'red', fill = 'red', width = 1)\n array_points.append(pts)\n array_points.append(pts_text)\n\ndef get_pts(all_points):\n '''Drawing points and triangle with square'''\n global in_circle_a\n global triangle_a\n global pts\n global answer\n global k\n global text_x\n global text_y\n\n if len(all_points) > 2:\n calc_pts, trian_square = funcs.find_answer(all_points)\n #print(calc_pts)\n\n k = 50\n max = -1000000\n\n if 0 not in calc_pts:\n for i in range(len(calc_pts)):\n for j in range(len(calc_pts[i])):\n #print(abs(calc_pts[i][j]))\n if abs(calc_pts[i][j]) > max:\n max = abs(calc_pts[i][j])\n\n\n k = 270 / max\n #if (max >= 500):\n # k = 0.3\n #elif (max >= 20 and max <= 40):\n # k = 3\n #elif (max <= 7):\n # k = 30\n\n #print(k, max)\n\n if 0 not in calc_pts:\n answer = Tk()\n answer.title('Результаты')\n answer.geometry('350x200')\n answer.resizable(False, False)\n\n center_ic = funcs.center_in_circle(calc_pts)\n radius_ic = funcs.find_radius_in_circle(calc_pts)\n #print(radius_ic)\n\n x1 = 500 + (center_ic[0] - radius_ic) * k\n y1 = 300 - (center_ic[1] - radius_ic) * k\n x2 = 500 + (center_ic[0] + radius_ic) * k\n y2 = 300 - (center_ic[1] + radius_ic) * k\n\n #print(center_ic[0], i, x2, y2)\n\n #print(x1, y1, x2, y2)\n\n triangle_x1 = 500 + calc_pts[0][0] * k\n triangle_y1 = 300 - calc_pts[0][1] * k\n triangle_x2 = 500 + calc_pts[1][0] * k\n triangle_y2 = 300 - calc_pts[1][1] * k\n triangle_x3 = 500 + calc_pts[2][0] * k\n triangle_y3 = 300 - calc_pts[2][1] * k\n\n a, b, c = funcs.sides_len(calc_pts[0], calc_pts[1], calc_pts[2])\n\n triangle_a = canvas.create_polygon([triangle_x1, triangle_y1],\n [triangle_x2, triangle_y2],\n [triangle_x3, triangle_y3], fill = 'green')\n #print(x1[0])\n\n in_circle_a = canvas.create_oval(x1[0], y1[0], x2[0], y2[0], outline = 'blue',\n fill = 'blue', width = 2)\n lb = Label(answer, text = 'Результирующие точки')\n lb.place(x = 90, y = 10)\n\n indexes = [0, 0, 0]\n\n for i in range(len(calc_pts)):\n indexes[i] = all_points.index(calc_pts[i])\n\n in_circle = funcs.square_in_circle(calc_pts[0], calc_pts[1], calc_pts[2])\n tr_square = funcs.triangle_square(a, b, c)\n\n table_result_points = DataFrame(calc_pts, columns = ['x', 'y'], index = indexes)\n res_points = Label(answer, text = str(table_result_points))\n res_points.place(x = 150, y = 30)\n\n lb_square_in_circle = Label(answer, text = 'Площадь вписанной окружности: ')\n lb_square_in_circle.place(x = 10, y = 130)\n square_inwr_circle = Label(answer, text = str(round(in_circle, 3)))\n square_inwr_circle.place(x = 270, y = 130)\n\n lb_square_triangle = Label(answer, text = 'Площадь треугольника: ')\n lb_square_triangle.place(x = 10, y = 150)\n square_tr = Label(answer, text = str(round(tr_square, 3)))\n square_tr.place(x = 270, y = 150)\n square_tr = 0\n\n lb_square_difference = Label(answer, text = 'Разность площадей: ')\n lb_square_difference.place(x = 10, y = 170)\n square_diff = Label(answer, text = str(round(abs(tr_square - in_circle), 3)))\n square_diff.place(x = 270, y = 170)\n\n create_pts(k, all_points)\n answer.bind('', lambda event: answer.destroy())\n\n answer.mainloop()\n else:\n create_pts(k, all_points)\n errors('Треугольник вырожденный!')\n elif len(all_points) > 0 and len(all_points) < 3:\n create_pts(50, all_points)\n errors('Недостаточное количество точек для построения треугольника!')\n\ndef input_data(points_array):\n '''Filtering information that was inputed'''\n points_array = points_array.split(';')\n\n for i in range(len(points_array)):\n if points_array[i] != '':\n if len(points_array[i].split(' ')) <= 1 and len(points_array[i].split(',')) == 2:\n if points_array[i].split(',')[0] != '' and points_array[i].split(',')[1] != '' \\\n and [float(points_array[i].split(',')[0]), float(points_array[i].split(',')[1])] \\\n not in all_points:\n all_points.append([float(points_array[i].split(',')[0]), float(points_array[i].split(',')[1])])\n q = -500\n\n # for x in range(25):\n # k = x * 40\n # canvas.create_line(k + 20, 600, k + 20, 0, width = 1, fill = 'grey')\n # q += 40\n #\n # q = -300\n #\n # for y in range(15):\n # k = y * 40\n # canvas.create_line(0, k + 20, 1000, k + 20, width = 1, fill = 'grey')\n # q += 40\n\n canvas.create_line(500, 0, 500, 600, width = 2, arrow = FIRST, fill = 'black')\n canvas.create_line(0, 300, 1000, 300, width = 2, arrow = LAST, fill = 'black')\n canvas.create_text(490, 310, text = '0', fill = 'black')\n\ndef start(points_array):\n '''Calculating triangle'''\n canvas.delete('all')\n input_data(points_array)\n get_pts(all_points)\n\ndef delete_all_pts():\n '''Deleting all points and clearing the screen'''\n clear()\n all_points.clear()\n\ndef add_and_reset(points_array):\n '''Adding points and recalculating'''\n if points_array != '':\n clear()\n start(points_array)\n else:\n errors('Вы ничего не ввели!')\n\ndef delete_pts(points_array):\n '''Deleting uneccessary points'''\n #if points_array != '':\n #print(len(points_array))\n if len(points_array) > 1:\n points_array = list(points_array.split(','))\n points_arr = []\n points_arr.append([float(points_array[0]), float(points_array[1])])\n count = 0\n\n #print(points_arr)\n for i in range(len(points_arr)):\n if points_arr[i] in all_points:\n all_points.remove(points_arr[i])\n count += 1\n if count > 0:\n clear()\n get_pts(all_points)\n else:\n errors('Такой точки нет!')\n else:\n errors('Вы ничего не ввели!')\n\ndef edit_points(points_array):\n '''Editing points by user'''\n #print(len(points_array))\n if len(points_array) > 5:\n points_array = list(points_array.split(':'))\n delete_array = []\n for i in range(len(points_array)):\n if points_array[i] != '':\n delete_array.append([float(points_array[i].split(',')[0]),\n float(points_array[i].split(',')[1])])\n if delete_array[0] in all_points:\n all_points[all_points.index(delete_array[0])] = delete_array[1]\n clear()\n get_pts(all_points)\n else:\n errors('Такой точки нет!')\n else:\n errors('Вы ничего не ввели!')\n\n# ==============================================================================\n\ndef key_input(var):\n '''Filtering inputing in Entry windows'''\n\n new = var.get()\n check = False\n FindDot = False\n FindSign = False\n FindZero = False\n\n for i in range(len(new)):\n if (i != 0 and new[i] in \"+-\"):\n FindSign = True\n if (not FindDot and i != 0 and new[i] == '0' and new[0] == '0') or\\\n (not FindDot and i > 1 and new[i] == '0' and new[0] in '+-' and\\\n new[1] == '0'):\n FindZero = True\n else:\n FindZero = False\n if (not (new[i] in \"1234567890-+.\")) or\\\n (i > 0 and new[i] in \"+-.\" and new[i-1] in \"+-.\") or\\\n (FindDot and new[i] == '.') or (FindSign and new[i] in '+-') or\\\n (FindZero and new[i] == '0'):\n check = True\n break\n #if new == '-0':\n # new == 0\n if len(new) == 1 and new[0] == '+':\n break\n if new[i] == '.':\n FindDot = True\n if new[i] in '+-':\n FindDot = False\n if new[i] == '-' or new[i] == '+':\n FindSign = True\n if new == \"\" or new == \"-\" or not check:\n key_input.old = new\n else:\n var.set(key_input.old)\nkey_input.old = \"\"\n\n# ==============================================================================\n\ndef main():\n '''Main function'''\n\n root = Tk()\n root.title('Lab #1 by Ilyasov')\n root.geometry('1280x600')\n root.resizable(False, False)\n\n s_1 = StringVar()\n s_2 = StringVar()\n s_3 = StringVar()\n s_4 = StringVar()\n s_5 = StringVar()\n s_6 = StringVar()\n s_7 = StringVar()\n s_8 = StringVar()\n s_9 = StringVar()\n s_10 = StringVar()\n s_11 = StringVar()\n s_12 = StringVar()\n\n s_1.trace('w', lambda nm, idx, mode, var = s_1: key_input(var))\n s_2.trace('w', lambda nm, idx, mode, var = s_2: key_input(var))\n s_3.trace('w', lambda nm, idx, mode, var = s_3: key_input(var))\n s_4.trace('w', lambda nm, idx, mode, var = s_4: key_input(var))\n s_5.trace('w', lambda nm, idx, mode, var = s_5: key_input(var))\n s_6.trace('w', lambda nm, idx, mode, var = s_6: key_input(var))\n s_7.trace('w', lambda nm, idx, mode, var = s_7: key_input(var))\n s_8.trace('w', lambda nm, idx, mode, var = s_8: key_input(var))\n s_9.trace('w', lambda nm, idx, mode, var = s_9: key_input(var))\n s_10.trace('w', lambda nm, idx, mode, var = s_10: key_input(var))\n s_11.trace('w', lambda nm, idx, mode, var = s_11: key_input(var))\n s_12.trace('w', lambda nm, idx, mode, var = s_12: key_input(var))\n\n global canvas\n canvas = Canvas(root, width = 1000, height = 600, bg = 'white')\n canvas.pack(side = 'left')\n\n global all_points\n all_points = []\n\n global array_points\n array_points = []\n\n # q = -500\n #\n # for x in range(25):\n # k = x * 40\n # canvas.create_line(k + 20, 600, k + 20, 0, width = 1, fill = 'grey')\n # q += 40\n #\n # q = -300\n #\n # for y in range(15):\n # k = y * 40\n # canvas.create_line(0, k + 20, 1000, k + 20, width = 1, fill = 'grey')\n # q += 40\n\n\n canvas.create_line(500, 0, 500, 600, width = 2, arrow = FIRST, fill = 'black')\n canvas.create_line(0, 300, 1000, 300, width = 2, arrow = LAST, fill = 'black')\n canvas.create_text(490, 310, text = '0', fill = 'black')\n\n label_points = Label(root, text = 'X')\n label_points.place(x = 1060, y = 10)\n\n label_points = Label(root, text = 'Y')\n label_points.place(x = 1200, y = 10)\n\n entry_pts_x1 = Entry(root, width = 11, textvariable = s_1)\n entry_pts_x1.place(x = 1020, y = 30)\n\n entry_pts_y1 = Entry(root, width = 11, textvariable = s_2)\n entry_pts_y1.place(x = 1160, y = 30)\n\n entry_pts_x2 = Entry(root, width = 11, textvariable = s_3)\n entry_pts_x2.place(x = 1020, y = 60)\n\n entry_pts_y2 = Entry(root, width = 11, textvariable = s_4)\n entry_pts_y2.place(x = 1160, y = 60)\n\n entry_pts_x3 = Entry(root, width = 11, textvariable = s_5)\n entry_pts_x3.place(x = 1020, y = 90)\n\n entry_pts_y3 = Entry(root, width = 11, textvariable = s_6)\n entry_pts_y3.place(x = 1160, y = 90)\n\n button_find_result = Button(root, text = 'Найти', width = 26)\n button_find_result.bind('', lambda event: start(str(entry_pts_x1.get()) + ',' + str(entry_pts_y1.get()) + ';' + str(entry_pts_x2.get())\n + ',' + str(entry_pts_y2.get()) + ';' + str(entry_pts_x3.get()) + ',' + str(entry_pts_y3.get())))\n button_find_result.place(x = 1020, y = 120)\n\n button_add_pts = Button(root, text = 'Добавить точки', width = 26)\n button_add_pts.bind('', lambda event: add_and_reset(str(entry_pts_x1.get()) + ',' + str(entry_pts_y1.get()) + ';' + str(entry_pts_x2.get())\n + ',' + str(entry_pts_y2.get()) + ';' + str(entry_pts_x3.get()) + ',' + str(entry_pts_y3.get())))\n button_add_pts.place(x = 1020, y = 160)\n\n button_tab = Button(root, text = 'Вывести таблицу значений', width = 26)\n button_tab.bind('', lambda event: table_pts(all_points))\n button_tab.place(x = 1020, y = 200)\n\n button_clean = Button(root, text = 'Удалить все точки', width = 26)\n button_clean.bind('', lambda event: delete_all_pts())\n button_clean.place(x = 1020, y = 240)\n\n label_points = Label(root, text = '_________________________________')\n label_points.place(x = 1020, y = 275)\n\n entry_pts_x = Entry(root, width = 11, textvariable = s_7)\n entry_pts_x.place(x = 1020, y = 330)\n\n entry_pts_y = Entry(root, width = 11, textvariable = s_8)\n entry_pts_y.place(x = 1160, y = 330)\n\n label_points = Label(root, text = 'X')\n label_points.place(x = 1060, y = 310)\n\n label_points = Label(root, text = 'Y')\n label_points.place(x = 1200, y = 310)\n\n button_delete = Button(root, text = 'Удалить точку', width = 26)\n button_delete.bind('', lambda event: delete_pts(str(entry_pts_x.get())\n + ',' + str(entry_pts_y.get())))\n button_delete.place(x = 1020, y = 360)\n\n label_points = Label(root, text = '_________________________________')\n label_points.place(x = 1020, y = 390)\n\n label_points = Label(root, text = 'Старые значения: ')\n label_points.place(x = 1020, y = 420)\n\n label_points = Label(root, text = 'X')\n label_points.place(x = 1060, y = 440)\n\n label_points = Label(root, text = 'Y')\n label_points.place(x = 1200, y = 440)\n\n entry_points_x4 = Entry(root, width = 11, textvariable = s_9)\n entry_points_x4.place(x = 1020, y = 460)\n\n entry_points_y4 = Entry(root, width = 11, textvariable = s_10)\n entry_points_y4.place(x = 1160, y = 460)\n\n label_points = Label(root, text = 'Новые значения: ')\n label_points.place(x = 1020, y = 490)\n\n label_points = Label(root, text = 'X')\n label_points.place(x = 1060, y = 510)\n\n label_points = Label(root, text='Y')\n label_points.place(x = 1200, y = 510)\n\n entry_points_x5 = Entry(root, width = 11, textvariable = s_11)\n entry_points_x5.place(x = 1020, y = 530)\n\n entry_points_y5 = Entry(root, width = 11, textvariable = s_12)\n entry_points_y5.place(x = 1160, y = 530)\n\n btn_add = Button(root, text = 'Редактировать точку', width = 26)\n btn_add.bind('', lambda event: edit_points(str(entry_points_x4.get()) + ',' + str(entry_points_y4.get()) +\n ':' + str(entry_points_x5.get()) + ',' + str(entry_points_y5.get())))\n btn_add.place(x = 1020, y = 560)\n root.bind('', lambda event: root.destroy())\n\n root.mainloop()\n\nif __name__ == '__main__':\n main()\n","sub_path":"lab_01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"37377037","text":"import numpy as np\nfrom sklearn import metrics\nfrom functools import partial\nimport scipy as sp\n\n\nclass OptimizedRounder(object):\n def __init__(self):\n self.coef_ = 0\n\n def _kappa_loss(self, coef, X, y):\n X_p = np.copy(X)\n for i, pred in enumerate(X_p):\n if pred < coef[0]:\n X_p[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n X_p[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n X_p[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n X_p[i] = 3\n else:\n X_p[i] = 4\n\n ll = metrics.cohen_kappa_score(y, X_p, weights='quadratic')\n return -ll\n\n def fit(self, X, y):\n loss_partial = partial(self._kappa_loss, X=X, y=y)\n initial_coef = [0.5, 1.5, 2.5, 3.5]\n self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead')\n print(-loss_partial(self.coef_['x']))\n\n def predict(self, X, coef):\n X_p = np.copy(X)\n for i, pred in enumerate(X_p):\n if pred < coef[0]:\n X_p[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n X_p[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n X_p[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n X_p[i] = 3\n else:\n X_p[i] = 4\n return X_p\n\n def coefficients(self):\n return self.coef_['x']\n","sub_path":"scorer.py","file_name":"scorer.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"324468085","text":"from lib.exception import NotFoundError\nfrom lib.utils.utils import Singleton\n\n\nclass Collection(metaclass=Singleton):\n def __init__(self, mapper):\n self._objects = {}\n self._mapper = mapper\n\n def __iter__(self):\n return iter(self._objects.values())\n\n def __getitem__(self, i_object):\n return self._objects.get(int(i_object))\n\n def __contains__(self, item):\n return item in self._objects\n\n def _load_all(self):\n res_list = []\n\n results = self._mapper.get_ids()\n\n for res in results:\n res_id = res['id']\n self._load_if_not_exist(res_id)\n try:\n res_list.append(self[res_id])\n except:\n pass\n\n return res_list\n\n def _load_if_not_exist(self, i_object):\n if not self[i_object]:\n new_objects = self._mapper.get(i_object)\n for new_object in new_objects:\n self._objects[new_object.get_id()] = new_object\n\n @staticmethod\n def get_tags():\n pass\n\n def get_ids(self):\n return self._mapper.get_ids()\n\n def get(self, i_object=None, is_dict=True, **kwargs):\n res_list = []\n\n if i_object:\n self._load_if_not_exist(i_object)\n if self[i_object]:\n res_list.append(self[i_object])\n else:\n res_list = self._load_all()\n res_list = self.filter(res_list, **kwargs)\n if is_dict:\n for res_index in range(len(res_list)):\n res_list[res_index] = res_list[res_index].get_dict()\n return res_list\n\n def create(self, new_object, insert_id=False):\n self._mapper.insert(new_object, insert_id=insert_id)\n self._objects[new_object.get_id()] = new_object\n\n def delete(self, i_object):\n if i_object in self:\n del self._objects[i_object]\n self._mapper.delete(i_object)\n\n def update(self, i_object, data):\n self._load_if_not_exist(i_object)\n if i_object in self:\n self[i_object].update(data)\n self._mapper.update(self[i_object])\n else:\n raise NotFoundError('Company not found')\n\n def filter(self, search_list=None, **kwargs):\n res_list = []\n compare_list = []\n search_list = search_list if search_list else self._load_all()\n for key, value in kwargs.items():\n if isinstance(value, list):\n value = value[0]\n value = str(value)\n if '_' in key:\n sign, attr = key.split('_', maxsplit=1)\n compare_list.append({attr: {'sign': sign, 'value': value}})\n else:\n compare_list.append({key: {'value': value}})\n search_list = search_list if search_list else self._load_all()\n for obj in search_list:\n if obj.compare(attributes=compare_list):\n res_list.append(obj)\n return res_list\n","sub_path":"lab2/lib/utils/Collection.py","file_name":"Collection.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"557298477","text":"# linefrac.py\n\n\"\"\" \ncalculate the fractional contributions of lines in a given band, if all the flux in the band is contributed by lines \n\"\"\"\nimport numpy as np\nimport astropy.constants as const\nimport astropy.units as u\n\ndef fline_over_fnuband(fl, wl, Tl, fs, ws, Ts):\n\t\"\"\"\n\tcalcualte the ratio flux_line / Fnu_band,\n\n\t\t{c / (wl * Tl)} * {( fl * wl * Tl) / sum[ fs * ws * Ts] }\n\n\twhere fl, wl, Tl are the flux, wavelength, and normalized filter transmission func of the target line. \n\twhere fs, ws, Ts are the flux, wavelength, and normalized filter transmission func of all the lines in band. \n\n\tParams\n\t------\n\tfl (float): \n\t\tflux of line\n\twl (float)\n\t\twavelength of line\n\tTl (float):\n\t\tnormalized filter transmission function at line\n\n\t\\\\ followings are the same but array of all the lines in band\n\tfs (array of float)\n\tws (array of float)\n\tTs (array of float)\n\n\tReturn\n\t------\n\tratio (quantity of unit Hz)\n\t\"\"\"\n\n\t# sanity check\n\tcheck_no_unit(fl)\n\tcheck_no_unit(fs)\n\tcheck_no_unit(Tl)\n\tcheck_no_unit(Ts)\n\twl = check_either_no_unit_or_specificunit(wl, unit=u.AA)\n\tws = check_either_no_unit_or_specificunit(ws, unit=u.AA)\n\n\t# calculation\n\tfirstterm = const.c / (Tl * wl * u.AA)\n\tsecondterm = FWT(fl, wl, Tl) / sumFWT(fs, ws, Ts)\n\n\treturn firstterm * secondterm\n\n\ndef check_no_unit(quan):\n\ttry:\n\t\tquan.unit\n\texcept:\n\t\treturn quan\n\telse: \n\t\traise Exception(\"Expecting no unit\")\n\n\ndef check_either_no_unit_or_specificunit(quan, unit=u.AA):\n\ttry:\n\t\tquan.unit\n\texcept:\n\t\treturn quan\n\telse: \n\t\tif quan.unit != unit: \n\t\t\traise Exception(\"wavelength unit incorrect\")\n\t\telse:\n\t\t\treturn np.array(quan)\n\n\ndef FWT(f, w, T):\n\treturn f*w*T\n\n\ndef sumFWT(fs, ws, Ts):\n\treturn np.sum( fs * ws * Ts)","sub_path":"bubbleimg/spector/linefrac.py","file_name":"linefrac.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"464341259","text":"# -*- coding: UTF-8 -*-\nfrom tkinter import *\nimport tkinter.font\nimport time\nfrom time import strftime\n#from darksky import forecast\nimport pytz\nfrom datetime import date, timedelta, datetime, timezone\nimport pandas as pd\n#from newsapi.articles import Articles\nimport datetime\nimport pickle\nimport os.path\n#from googleapiclient.discovery import build\n#from google_auth_oauthlib.flow import InstalledAppFlow\n#from google.auth.transport.requests import Request\nimport feedparser\nimport requests, json \nWIDTH = 800\nHEIGHT = 600\n\n# Weather API credentials\n#key = '93a522f375502ea4e4a091c06d034ff1'\n#ORANGE = 45.415907100000005, (-73.48298489999999)\n\n# News API credentials\n#apikey = '455e01c84ca44ff387187f10f202bed3'\nzipcode='J5R'\ncountrycode='ca'\nunits='metric'\nopenweathermap_appkey='c9cdea1f63b108c6311423e7fe4686a9'\nlatitude=45.415907100000005\nlongitude=-73.48298489999999\nexclude='hourly,minutely'\nunits='metric'\nweather_api_url='https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={lon}&exclude={ex}&appid={appkey}&units={units}'.format(lat=latitude,appkey=openweathermap_appkey,lon=longitude,units=units,ex=exclude)\nrweather_api_url='https://api.openweathermap.org/data/2.5/onecall?lat1={lat}&lon1={lon}&exclude1={ex}&appid=1{appkey}&units={units}'.format(lat=latitude,appkey=openweathermap_appkey,lon=longitude,units=units,ex=exclude)\nprint(weather_api_url)\nclass GUI(Frame):\n\n def __init__(self, master):\n Frame.__init__(self, master)\n\n self.largeFont = tkinter.font.Font(family=\"Piboto\", size=70)\n self.mediumFont = tkinter.font.Font(family=\"Calibri\", size=40)\n self.normalFont = tkinter.font.Font(family=\"Piboto Light\", size=20)\n self.enNewsFont = tkinter.font.Font(family=\"Calibri Light\", size=20)\n\n def setupGUI(self):\n self.grid(row=0, column=0)\n persianFont = tkinter.font.Font(family=\"Traditional Arabic\", size=20)\n # Weather & news frame to contain weather/news info\n # For weather, column 0 = info, column 1 = icon\n today_weather_frame = Frame(self, width=400, height=500, bg='black')\n today_weather_frame.grid(row=0, column=0, sticky=W)\n GUI.weather_label0 = Label(today_weather_frame, text=\"Loading weather...\", fg='white', bg='black',\n font=self.mediumFont, justify=LEFT)\n GUI.weather_label0.grid(row=0, column=2, sticky=NW)\n \n GUI.weather_label1 = Label(today_weather_frame, text=\"Loading weather...\", fg='white', bg='black',\n font=self.mediumFont, justify=LEFT)\n GUI.weather_label1.grid(row=0, column=0, sticky=NW)\n \n GUI.weather_label_feels = Label(today_weather_frame, text=\"Feels Like...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label_feels.grid(row=1, column=0, sticky=NW)\n \n GUI.weather_label_humidity = Label(today_weather_frame, text=\"Humidity...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label_humidity.grid(row=1, column=2, sticky=NW)\n \n GUI.weather_label_wind = Label(today_weather_frame, text=\"Wind...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label_wind.grid(row=2, column=0, sticky=NW)\n\n GUI.weather_label_pressure = Label(today_weather_frame, text=\"Pressure...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label_pressure.grid(row=2, column=2, sticky=NW) \n\n GUI.weather_label_sunrise = Label(today_weather_frame, text=\"Sunrise...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label_sunrise.grid(row=3, column=0, sticky=NW)\n\n GUI.weather_label_sunset = Label(today_weather_frame, text=\"Sunset...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label_sunset.grid(row=3, column=2, sticky=NW) \n \n GUI.weather_label_cloudiness = Label(today_weather_frame, text=\"Cloudiness...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label_cloudiness.grid(row=4, column=0, sticky=NW)\n\n GUI.weather_label_visibility = Label(today_weather_frame, text=\"Visibility...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label_visibility.grid(row=4, column=2, sticky=NW) \n\n\n \n # Frame and labels to hold the forecast\n weather_news_frame = Frame(self, width=200, height=500, bg='black')\n weather_news_frame.grid(row=1, column=0, sticky=W)\n\n GUI.weather_label2 = Label(weather_news_frame, text=\"Loading weather...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label2.grid(row=1, column=0, sticky=W)\n GUI.weather_label3 = Label(weather_news_frame, text=\"Loading weather...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label3.grid(row=2, column=0, sticky=W)\n GUI.weather_label4 = Label(weather_news_frame, text=\"Loading weather...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label4.grid(row=3, column=0, sticky=W)\n GUI.weather_label5 = Label(weather_news_frame, text=\"Loading weather...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label5.grid(row=4, column=0, sticky=W)\n GUI.weather_label6 = Label(weather_news_frame, text=\"Loading weather...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label6.grid(row=5, column=0, sticky=W)\n GUI.weather_label7 = Label(weather_news_frame, text=\"Loading weather...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label7.grid(row=6, column=0, sticky=W)\n GUI.weather_label8 = Label(weather_news_frame, text=\"Loading weather...\", fg='white', bg='black',\n font=self.normalFont, justify=LEFT)\n GUI.weather_label8.grid(row=7, column=0, sticky=W)\n \n \n icon = PhotoImage(file=\"weather_icons/02d.gif\")\n icon = icon.subsample(10)\n\n # Set up labels to hold weather icons\n GUI.icon_label = Label(today_weather_frame, borderwidth=0, image=icon ,background='black')\n GUI.icon_label.photo = icon\n GUI.icon_label.grid(row=0, column=1, sticky=W)\n GUI.icon_label2 = Label(weather_news_frame, borderwidth=0, image=icon)\n GUI.icon_label2.grid(row=1, column=1, sticky=W)\n GUI.icon_label3 = Label(weather_news_frame, borderwidth=0, image=icon)\n GUI.icon_label3.grid(row=2, column=1, sticky=W)\n GUI.icon_label4 = Label(weather_news_frame, borderwidth=0, image=icon)\n GUI.icon_label4.grid(row=3, column=1, sticky=W)\n GUI.icon_label5 = Label(weather_news_frame, borderwidth=0, image=icon)\n GUI.icon_label5.grid(row=4, column=1, sticky=W)\n GUI.icon_label6 = Label(weather_news_frame, borderwidth=0, image=icon)\n GUI.icon_label6.grid(row=5, column=1, sticky=W)\n GUI.icon_label7 = Label(weather_news_frame, borderwidth=0, image=icon)\n GUI.icon_label7.grid(row=6, column=1, sticky=W)\n GUI.icon_label8 = Label(weather_news_frame, borderwidth=0, image=icon)\n GUI.icon_label8.grid(row=7, column=1, sticky=W)\n \n\n\n\n # Adjust this width for spacing\n frame_placeholder = Frame(self, width=WIDTH/2.65, height=10, bg='black')\n frame_placeholder.grid(row=0, column=1)\n\n\n\n\n \n\n\n self.configure(background='black')\n\n \n \n def updateWeatherDetails(self):\n response = requests.get(weather_api_url) \n dict_weather = response.json()\n if \"current\" in dict_weather:\n cur_weather=dict_weather[\"current\"]\n cur_feels=cur_weather[\"feels_like\"]\n cur_humidity=cur_weather[\"humidity\"]\n cur_temp=cur_weather[\"temp\"]\n cur_dew=cur_weather[\"dew_point\"]\n cur_pressure=cur_weather[\"pressure\"]\n cur_visibility=cur_weather[\"visibility\"]\n cur_wind_speed=cur_weather[\"wind_speed\"]\n today_icon=dict_weather[\"current\"][\"weather\"][0][\"icon\"]\n today_sunset=cur_weather[\"sunset\"]\n today_sunrise=cur_weather[\"sunrise\"]\n sunrise_utc_time = datetime.datetime.fromtimestamp(today_sunset, timezone.utc)\n sunrise_local_time = sunrise_utc_time.astimezone()\n sunrise=sunrise_local_time.strftime(\"%H:%M\")\n sunset_utc_time = datetime.datetime.fromtimestamp(today_sunrise, timezone.utc)\n sunset_local_time = sunset_utc_time.astimezone()\n sunset=sunset_local_time.strftime(\"%H:%M\")\n #GUI.icon_label.photo = today_icon \n icon_path = 'weather_icons/'\n today_icon += '.png'\n icon_path += today_icon\n print(icon_path)\n icon = PhotoImage(file='weather_icons/drone.gif') \n GUI.icon_label.configure(image=icon)\n GUI.icon_label.photo = icon \n else:\n print(\"bye\")\n \n #weather_url='http://api.openweathermap.org/data/2.5/weather?zip={zipcode},{countrycode}&appid={appkey}&units={units}'.format(zipcode=zipcode, countrycode=countrycode, units=units, appkey=openweathermap_appkey)\n #response = requests.get(weather_url) \n #dict_base = response.json()\n # if dict_base[\"cod\"] != \"404\": \n \n # # store the value of \"main\" \n # dict_main = dict_base[\"main\"] \n # wind=dict_base[\"wind\"]\n # wind_speed=round(wind[\"speed\"]*3.6)\n # current_visibility = round(dict_base[\"visibility\"]/1000)\n # current_city=dict_base[\"name\"]\n # dict_weather = dict_base[\"weather\"] \n # weather_description = dict_weather[0][\"description\"] \n\n # current_temperature = dict_main[\"temp\"] \n # feels_like_temperature = round(dict_main[\"feels_like\"] )\n # current_pressure = dict_main[\"pressure\"] \n # current_humidiy = dict_main[\"humidity\"] \n # #sunset=dict_base[\"sys\"][\"sunrise\"] \n # #sunset=datetime.datetime.utcfromtimestamp(dict_base[\"sys\"][\"sunrise\"]).strftime('%Y-%m-%dT%H:%M:%SZ')\n \n # sunrise_utc_time = datetime.datetime.fromtimestamp(dict_base[\"sys\"][\"sunrise\"], timezone.utc)\n # sunrise_local_time = sunrise_utc_time.astimezone()\n # sunrise=sunrise_local_time.strftime(\"%H:%M\")\n\n # sunset_utc_time = datetime.datetime.fromtimestamp(dict_base[\"sys\"][\"sunset\"], timezone.utc)\n # sunset_local_time = sunset_utc_time.astimezone()\n # sunset=sunset_local_time.strftime(\"%H:%M\")\n \n # #sunset=dict_base[\"sys\"][\"sunset\"] \n # cloudiness=dict_base[\"clouds\"][\"all\"] \n # if not cloudiness:\n # cloudiness=0\n # # print(\" Temperature (in Celsius unit): \" +\n # # str(round(current_temperature)) + \n # # \"\\n Atmospheric pressure (in hPa unit):\" +\n # # str(current_pressure) +\n # # \"\\n Humidity: \" +\n # # str(current_humidiy) + \"%\"\n # # \"\\n Description: \" +\n # # str(weather_description)) \n # # print(\" Visibility:\"+str(round(a,2))+\" KM\")\n # else: \n # print(\" City Not Found \") \n # GUI.weather_label_wind.configure(text='Wind Speed: '+ str(wind_speed) + \" KM/H\")\n # GUI.weather_label1.configure(text='Today in '+current_city+\": \"+ str(round(current_temperature)) + \"| \")\n # GUI.weather_label_humidity.configure(text='Humidity: '+ str(round(current_humidiy)) + \" %\")\n # GUI.weather_label_pressure.configure(text='Pressure: '+ str(current_pressure) + \" hPa\")\n # GUI.weather_label_feels.configure(text='Feels Like: '+ str(feels_like_temperature))\n # GUI.weather_label_sunrise.configure(text='Sunrise: '+ str(sunrise))\n # GUI.weather_label_sunset.configure(text='Sunset: '+ str(sunset))\n # GUI.weather_label_visibility.configure(text='Visibility: '+ str(current_visibility)+ \" KM\")\n # GUI.weather_label_cloudiness.configure(text='Cloudiness: '+ str(cloudiness) + \" %\")\n # #GUI.weather_label_feels.configure(text='Feels Like: '+ str(current_pressure) + \" hPa\")\n window.after(600000, mirror.updateWeatherDetails)\n \n def updateWeather(self):\n # Updates the weather information\n weekday = date.today()\n daily_summary = ''\n weather_today = ''\n weather_list = []\n today_icon = ''\n icons_list = []\n\n # Gets weather info\n counter = 0\n with forecast(key, *ORANGE) as orange:\n daily_summary += orange.daily.summary\n for day in orange.daily:\n day = dict(day=date.strftime(weekday, '%a'),\n sum=day.summary,\n tempMin=round((day.temperatureMin-32)*5/9),\n tempMax=round((day.temperatureMax-32)*5/9),\n icon=day.icon\n )\n # Save each of these in a list to display to GUI\n if counter == 0:\n weather_today += ('MAX {tempMax} | MIN {tempMin}'.format(**day))\n today_icon = ('{icon}'.format(**day))\n weekday += timedelta(days=1)\n counter += 1\n else:\n weather_list.append('{day}: MAX {tempMax} | MIN {tempMin}'.format(**day))\n icons_list.append('{icon}'.format(**day))\n weekday += timedelta(days=1)\n counter += 1\n\n GUI.weather_label0.configure(text=weather_today)\n\n # Set icon for weather today\n icon_path = 'weather_icons/'\n today_icon += '.gif'\n icon_path += today_icon\n icon = PhotoImage(file=icon_path)\n icon = icon.subsample(9)\n GUI.icon_label.configure(image=icon)\n GUI.icon_label.photo = icon\n\n # Push updated weather info to each label along with icons\n for x in range(0, len(weather_list)):\n temp_icon_path = 'weather_icons/'\n temp_icon_name = icons_list[x]\n temp_icon_name += '.gif'\n temp_icon_path += temp_icon_name\n temp_icon = PhotoImage(file=temp_icon_path)\n temp_icon = temp_icon.subsample(15)\n\n if x == 0:\n GUI.weather_label2.configure(text='•'+weather_list[x])\n GUI.icon_label2.configure(image=temp_icon)\n GUI.icon_label2.photo = temp_icon\n if x == 1:\n GUI.weather_label3.configure(text='•'+weather_list[x])\n GUI.icon_label3.configure(image=temp_icon)\n GUI.icon_label3.photo = temp_icon\n if x == 2:\n GUI.weather_label4.configure(text='•'+weather_list[x])\n GUI.icon_label4.configure(image=temp_icon)\n GUI.icon_label4.photo = temp_icon\n if x == 3:\n GUI.weather_label5.configure(text='•'+weather_list[x])\n GUI.icon_label5.configure(image=temp_icon)\n GUI.icon_label5.photo = temp_icon\n if x == 4:\n GUI.weather_label6.configure(text='��'+weather_list[x])\n GUI.icon_label6.configure(image=temp_icon)\n GUI.icon_label6.photo = temp_icon\n if x == 5:\n GUI.weather_label7.configure(text='•'+weather_list[x])\n GUI.icon_label7.configure(image=temp_icon)\n GUI.icon_label7.photo = temp_icon\n if x == 6:\n GUI.weather_label8.configure(text='•'+weather_list[x])\n GUI.icon_label8.configure(image=temp_icon)\n GUI.icon_label8.photo = temp_icon\n\n window.after(600000, mirror.updateWeather)\n\n\n \n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n \n # Call the Calendar API\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n nextDate = datetime.datetime.utcnow() + datetime.timedelta(weeks=+2)\n nextDate= nextDate.isoformat()+ 'Z'\n events_result = service.events().list(calendarId='primary', timeMin=now, timeMax=nextDate,\n maxResults=7, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n event_list = []\n if not events:\n print('No upcoming events found.')\n for event in events:\n event_str = ''\n start = event['start'].get('dateTime', event['start'].get('date'))\n start = start[0:10] +\" \"+ start[11:16] # Remove unnecessary characters at end of string\n year = start.find('-')\n start_day = datetime.datetime.strptime(start, '%Y-%m-%d %H:%M').strftime('%a %b %d %H:%M')\n event_date = start[year + 1:year + 6]\n summary = event['summary'].encode('ascii', 'ignore').decode('ascii') # Remove emojis\n event_str += summary + ' | ' + start_day\n event_list.append(event_str)\n\n # Update calendar text\n event_delta= 5 - len(event_list)\n i=0\n if event_delta > 0:\n while i < event_delta+1:\n event_list.append(\"\") \n i=i+1 \n GUI.calendar_label1.configure(text=event_list[0]) \n GUI.calendar_label2.configure(text=event_list[1])\n GUI.calendar_label3.configure(text=event_list[2])\n GUI.calendar_label4.configure(text=event_list[3])\n GUI.calendar_label5.configure(text=event_list[4])\n \n if event_delta==5:\n GUI.calendar_label1.configure(text=\"No upcoming events!\") \n \n window.after(600000, mirror.updateCalendar)\n\ndef close_escape(event=None):\n print('Smart mirror closed')\n window.destroy()\n\n\nwindow = Tk()\nwindow.title(\"Smart Mirror\")\nwindow.geometry('800x600')\nwindow.configure(background='black')\n\n#Removes borders from GUI and implements quit via esc\nwindow.overrideredirect(1)\nwindow.overrideredirect(0)\nwindow.attributes(\"-fullscreen\", True)\nwindow.wm_attributes(\"-topmost\", 1)\nwindow.focus_set()\n\nwindow.bind(\"\", close_escape)\n\nmirror = GUI(window)\nmirror.setupGUI()\nwindow.after(1000, mirror.updateWeatherDetails)\n#window.after(1000, mirror.updateWeather)\n#window.after(1000, mirror.updateNews)\n#window.after(1000, mirror.updateNews_en)\n#window.after(1000, mirror.updateCalendar)\nwindow.mainloop()\n","sub_path":"weather_test.py","file_name":"weather_test.py","file_ext":"py","file_size_in_byte":20011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"472586791","text":"# coding: utf-8\n# author: dlyapun\n\nfrom django.contrib.auth.decorators import login_required\nfrom profile.models import CustomUser\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.shortcuts import render_to_response, redirect, render\nfrom django.views.generic import ListView, DetailView\nfrom force_blog.forms import BlogPostForm\nfrom force_blog.models import BlogPost, Category\nfrom django.template import RequestContext\nfrom django.core.exceptions import ObjectDoesNotExist\nimport time\n\n\nDISABLE = 0\nENABLE = 1\n\n\nclass BlogPostListView(ListView):\n model = BlogPost\n context_object_name = 'blog_posts'\n\n paginate_by = 6\n\n def get_queryset(self):\n if not self.request.user.is_authenticated():\n qs = BlogPost.objects.select_related(\n 'owner', 'owner__user').prefetch_related('category').exclude(state=0)\n return qs\n else:\n profile = CustomUser.objects.get(user=self.request.user)\n if profile.moderator or profile.goverment or profile.user.is_superuser:\n qs = BlogPost.objects.select_related(\n 'owner', 'owner__user').prefetch_related('category')\n else:\n qs = BlogPost.objects.select_related(\n 'owner', 'owner__user').prefetch_related('category').exclude(state=0)\n return qs\n\n def get_context_data(self, **kwargs):\n context = super(BlogPostListView, self).get_context_data(**kwargs)\n return context\n\n\nclass BlogPostListViewTag(BlogPostListView):\n\n def get_queryset(self, **kwargs):\n category = self.kwargs['category_id']\n return super(BlogPostListViewTag, self).get_queryset().filter(category=category)\n\n\nclass BlogPostArhiveListView(ListView):\n model = BlogPost\n context_object_name = 'blog_posts'\n\n template_name = 'force_blog/arhive_news.html'\n\n paginate_by = 50\n\n def get_queryset(self):\n qs = BlogPost.objects.select_related('owner', 'owner__user').prefetch_related(\n 'category').exclude(state=0)\n return qs\n\n\ndef blog_detail(request, pk):\n blogpost = BlogPost.objects.get(id=pk)\n\n if blogpost.state == DISABLE:\n try:\n profile = CustomUser.objects.get(user=request.user)\n if blogpost.owner.user == profile or profile.moderator or profile.goverment or profile.user.is_superuser:\n data = {'blogpost': blogpost, }\n return render_to_response('force_blog/blogpost_detail.html',\n data,\n context_instance=RequestContext(request))\n else:\n return redirect('/')\n except:\n return redirect('/')\n\n else:\n data = {'blogpost': blogpost, }\n return render_to_response('force_blog/blogpost_detail.html',\n data,\n context_instance=RequestContext(request))\n\n\n@login_required\ndef blog_edit(request, blog_id):\n profile = CustomUser.objects.get(user=request.user)\n categorys = Category.objects.all()\n\n if not profile.moderator and profile.goverment and profile.user.is_superuser:\n return redirect('/login/')\n\n blog = BlogPost.objects.get(id=blog_id)\n\n if request.method == \"POST\":\n form = BlogPostForm(request.POST, instance=blog)\n form.is_valid()\n try:\n tags = form.cleaned_data['category']\n except KeyError:\n new_tag = Category.objects.create(\n category=request.POST['category'])\n\n if form.is_valid():\n for x in categorys:\n blog.category.remove(x)\n\n for x in tags:\n try:\n tag = Category.objects.get(category=x)\n except ObjectDoesNotExist:\n pass\n\n try:\n blog.category.add(tag)\n except TypeError:\n pass\n\n form.save()\n url = u'/blog/%s' % blog_id\n return redirect(url)\n\n else:\n form = BlogPostForm(instance=blog)\n\n data = {'form': form, 'blog': blog, 'categorys': categorys}\n return render_to_response('force_blog/blogpost_edit.html',\n data,\n context_instance=RequestContext(request))\n\n\n@login_required\ndef blog_new(request):\n profile = CustomUser.objects.get(user=request.user)\n categorys = Category.objects.all()\n form = BlogPostForm()\n\n if not profile.moderator and profile.goverment and profile.user.is_superuser:\n return redirect('/login/')\n\n if request.method == \"POST\":\n form = BlogPostForm(request.POST)\n form.is_valid()\n if form.is_valid():\n\n try:\n tags = form.cleaned_data['category']\n except KeyError:\n new_tag = Category.objects.create(\n category=request.POST['category'])\n\n form.save_with_owner(owner=profile)\n blog = BlogPost.objects.first()\n for x in tags:\n tag = Category.objects.get(category=x)\n blog.category.add(tag)\n\n url = u'/blog/%s' % blog.id\n return redirect(url)\n\n data = {'form': form, 'categorys': categorys}\n return render_to_response('force_blog/blogpost_new.html',\n data,\n context_instance=RequestContext(request))\n\n\ndef blog_backup(blog, user):\n # blog_backup = blog\n # blog_backup.title = blog.title + ', user: ' + str(user.user) + ', date' + time.ctime()\n # blog_backup.state = 0\n # blog_backup.save()\n # blog_edit = BlogEdit(user_edit=user)\n # blog_edit.save()\n pass\n\n\n@login_required\ndef blog_hidden(request, action, blog_id):\n profile = CustomUser.objects.get(user=request.user)\n\n if not profile.moderator and profile.goverment and profile.user.is_superuser:\n return redirect('/login/')\n else:\n blog = BlogPost.objects.get(id=int(blog_id))\n if action == \"hide\":\n blog.state = BlogPost.DISABLE\n elif action == \"show\":\n blog.state = BlogPost.ENABLE\n else:\n pass\n\n blog.save()\n return redirect(blog.get_absolute_url())\n","sub_path":"force_blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"146576909","text":"from kafka import KafkaProducer\nfrom intelligence import ContextES\nfrom intelligence import ContextFile\nfrom intelligence import Intel\nfrom variables import api_keys, config\nimport argparse\nimport json\n\n\nclass NoKeyError(Exception):\n pass\n\n\nclass IntelProducer:\n def __init__(self, server=config['kafka']['host'], **kwargs):\n self.kwargs = kwargs\n self.producer = KafkaProducer(bootstrap_servers=server,\n value_serializer=lambda x: json.dumps(x).encode('utf-8'))\n \n def produce_enrichment(self, key, value, records):\n for record in records:\n record[key] = value\n self.producer.send(self.kwargs['topic'], record).get(timeout=10)\n\n def greynoise(self):\n all_ips = ContextES().get_unique_extips()\n for ip in all_ips:\n records = Intel(ip=ip).greynoise()['records']\n produce_records = self.produce_enrichment('src_ip', ip, records)\n \n def virustotal(self):\n all_indicators = ContextFile().get_indicators()\n for indicator in all_indicators:\n records = Intel(md5=indicator).virustotal()['records']\n produce_records = self.produce_enrichment('md5', indicator, records)\n\n def xforce(self):\n all_indicators = ContextFile().get_indicators()\n for indicator in all_indicators:\n records = Intel(md5=indicator).xforce()['records']\n produce_records = self.produce_enrichment('md5', indicator, records)\n \n def hybridanalysis(self):\n all_indicators = ContextFile().get_indicators()\n records = Intel(md5=all_indicators).hybridanalysis()['records']\n for indicator in records:\n produce_records = self.produce_enrichment('md5', indicator['md5'], [indicator])\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Verify iocs/footpring against public Intel sources')\n parser.add_argument('-s', '--source', type=str, help='What intel source to query', required=True,\n choices=['xforce', 'greynoise', 'virustotal', 'hybridanalysis'])\n args = parser.parse_args()\n if not api_keys[args.source]['key']: raise NoKeyError('No key specified')\n intel_producer = getattr(IntelProducer(topic=api_keys[args.source]['topic']), args.source)\n verify_intel = intel_producer()\n\n\n","sub_path":"elkserver/intel/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"350415573","text":"import re\n\nimport requests\nfrom jsonview.views import JsonView\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import redirect\nfrom django.shortcuts import render\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.generic import ListView\n\nfrom keybase_proofs.models import KeybaseProof\n\n\ndef fullmatch(regex, string, flags=0):\n \"\"\"Emulate python-3.4 re.fullmatch().\"\"\"\n return re.match(\"(?:\" + regex + r\")\\Z\", string, flags=flags)\n\n\ndef get_domain():\n domain = getattr(settings, 'KEYBASE_PROOFS_DOMAIN')\n if not domain:\n raise ImproperlyConfigured('KEYBASE_PROOFS_DOMAIN must be set when using.')\n return domain\n\n\ndef verify_proof(user, sig_hash, kb_username):\n \"\"\"\n Check the proof status in Keybase via the `sig/check_proof` endpoint.\n Returns a tuple (valid_proof, proof_live) indicating if the signature is\n valid for the given domain/kb_username/username combination and if the\n proof is publicly verifiable to the Keybase client/server respectively.\n\n Before storing a signature `valid_proof=True` must hold. Services can\n optionally check the status of signatures periodically and verify\n `proof_live=True` as well.\n \"\"\"\n\n domain = get_domain()\n endpoint = \"https://keybase.io/_/api/1.0/sig/check_proof.json\"\n try:\n r = requests.get(endpoint, params={\n 'domain': domain,\n 'username': user.username,\n 'kb_username': kb_username,\n 'sig_hash': sig_hash,\n })\n if r.status_code != 200:\n print(\"Invalid response:\", r)\n return False, False\n r_json = r.json()\n return r_json.get('valid_proof', False), r_json.get('proof_live', False)\n except Exception:\n return False, False\n\n\nclass KeybaseProofProfileView(ListView):\n \"\"\"\n Example endpoint for showing existing keybase proofs on a user's profile.\n Can be integrated into an existing profile page in production apps.\n \"\"\"\n model = KeybaseProof\n template_name = 'keybase_proofs/profile.html'\n\n def get_context_data(self, **kwargs):\n context = super(KeybaseProofProfileView, self).get_context_data(**kwargs)\n context['domain'] = get_domain()\n return context\n\n def get_queryset(self):\n username = self.kwargs.get('username', '')\n user = get_object_or_404(get_user_model(), username=username)\n queryset = self.model.objects.filter(user=user)\n return queryset\n\n\nclass KeybaseProofListView(JsonView, KeybaseProofProfileView):\n \"\"\"\n API endpoint that the Keybase servers/clients will use to validate proofs.\n Returns a json list of proofs for the given username. Must be a publicly\n accessible url.\n \"\"\"\n\n def get_context_data(self, **kwargs):\n # Will throw a 404 for missing users (from the URL parameter) or return\n # an empty queryset for a valid username.\n queryset = self.get_queryset()\n return {\n 'keybase_sigs': [x.to_dict() for x in queryset]\n }\n\n\n@method_decorator(login_required, name='dispatch')\nclass KeybaseProofView(View):\n \"\"\"\n Handles posting a new keybase proof. On GET requests the user can confirm\n the `kb_username` and `sig_hash` posted in the GET parameters from the\n keybase client. Upon confirmation, the user can POST these to store and\n allow the keybase servers/clients to verify the signature.\n\n Users can post multiple kb_usernames, and update the sig_hash if they wish\n to repost the proof.\n\n `sig_hash` must validate as a hex string\n \"\"\"\n template_name = 'keybase_proofs/profile_confirm.html'\n\n def _is_hex(self, s):\n return fullmatch(r'^[0-9a-fA-F]+$', s or \"\") is not None\n\n def get_redirect_url(self, **kwargs):\n return \"https://keybase.io/_/proof_creation_success?kb_ua={kb_ua}&kb_username={kb_username}&sig_hash={sig_hash}&username={username}&domain={domain}\".format(**kwargs)\n\n def _validate(self, sig_hash, kb_username):\n error = None\n if not (sig_hash and kb_username):\n error = 'both sig_hash and kb_username query parameters are required'\n elif not self._is_hex(sig_hash):\n error = 'sig_hash must be a hex string'\n return error\n\n def get(self, request, *args, **kwargs):\n sig_hash = request.GET.get('sig_hash')\n kb_username = request.GET.get('kb_username')\n kb_ua = request.GET.get('kb_ua')\n error = self._validate(sig_hash, kb_username)\n return render(request, self.template_name, {\n 'sig_hash': sig_hash,\n 'kb_username': kb_username,\n 'kb_ua': kb_ua,\n 'error': error\n })\n\n def post(self, request, *args, **kwargs):\n sig_hash = request.POST.get('sig_hash')\n kb_username = request.POST.get('kb_username')\n kb_ua = request.POST.get('kb_ua')\n error = self._validate(sig_hash, kb_username)\n if error is None:\n valid_proof, _ = verify_proof(request.user, sig_hash, kb_username)\n if not valid_proof:\n error = \"Invalid signature, please retry\"\n else:\n kb_proof, created = KeybaseProof.objects.get_or_create(\n user=request.user, kb_username=kb_username)\n kb_proof.is_verified = valid_proof\n kb_proof.sig_hash = sig_hash\n kb_proof.save()\n return redirect(self.get_redirect_url(**{\n 'kb_ua': kb_ua,\n 'kb_username': kb_username,\n 'sig_hash': sig_hash,\n 'username': request.user.username,\n 'domain': get_domain(),\n }), permanent=True)\n\n return render(request, self.template_name, {'error': error}, status=400)\n","sub_path":"keybase_proofs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"281586038","text":"import random\n\nfrom faker import Faker\n\nfrom common.constants import ProfileConstants\n\nfake_ru = Faker(\"Ru-ru\")\n\n\nclass EditRequiredData:\n def __init__(self, firstname=None, lastname=None, email=None):\n self.firstname = firstname\n self.lastname = lastname\n self.email = email\n\n @staticmethod\n def random():\n firstname = fake_ru.first_name()\n lastname = fake_ru.last_name()\n email = fake_ru.email()\n return EditRequiredData(firstname, lastname, email)\n\n\nclass EditOptionalData:\n def __init__(\n self,\n username=\"mem\",\n moodle_net_profile=None,\n city=None,\n description=None,\n picture_description=None,\n firstname_pho=None,\n surname_pho=None,\n middle_name=None,\n alternate_name=None,\n tags=None,\n id_number=None,\n institution=None,\n departament=None,\n phone=None,\n mobile_phone=None,\n address=None,\n ):\n self.username = username\n self.moodle_net_profile = moodle_net_profile\n self.city = city\n self.description = description\n self.picture_description = picture_description\n self.firstname_pho = firstname_pho\n self.surname_pho = surname_pho\n self.middle_name = middle_name\n self.alternate_name = alternate_name\n self.tags = tags\n self.id_number = id_number\n self.institution = institution\n self.departament = departament\n self.phone = phone\n self.mobile_phone = mobile_phone\n self.address = address\n\n @staticmethod\n def random():\n username = \"mem\"\n moodle_net_profile = fake_ru.url()\n city = fake_ru.city()\n description = fake_ru.text(max_nb_chars=200)\n picture_description = fake_ru.text(max_nb_chars=100)\n firstname_pho = fake_ru.first_name()\n surname_pho = fake_ru.last_name()\n middle_name = fake_ru.name()\n alternate_name = fake_ru.name()\n tags = fake_ru.name()\n id_number = fake_ru.random_number()\n institution = fake_ru.name()\n departament = fake_ru.name()\n phone = fake_ru.random_number()\n mobile_phone = fake_ru.random_number()\n address = fake_ru.name()\n return EditOptionalData(\n username,\n moodle_net_profile,\n city,\n description,\n picture_description,\n firstname_pho,\n surname_pho,\n middle_name,\n alternate_name,\n tags,\n id_number,\n institution,\n departament,\n phone,\n mobile_phone,\n address,\n )\n\n\nclass EditEmailTimeCountry:\n def __init__(\n self,\n email_display=None,\n country_code=None,\n timezone=None\n ):\n self.email_display = email_display\n self.country_code = country_code\n self.timezone = timezone\n\n @staticmethod\n def random():\n email_display = random.choice(\n list(ProfileConstants.EMAIL_DISPLAY_CHOICE.values())\n )\n country_code = fake_ru.country_code()\n timezone = random.choice(ProfileConstants.TIMEZONE_CHOICE)\n return EditEmailTimeCountry(email_display, country_code, timezone)\n","sub_path":"models/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"43314312","text":"import json\nimport psycopg2\nimport requests\nfrom datetime import datetime, timedelta\n\nCONNECTION = json.load(open('connection.json'))\nPSQL_CONNECT = psycopg2.connect(\n host=CONNECTION['host'],\n port=CONNECTION['port'],\n database=CONNECTION['database'],\n user=CONNECTION['user'],\n password=CONNECTION['password']\n)\nCUR = PSQL_CONNECT.cursor()\nLISTED_COMPANY = requests.get('https://www.idx.co.id/umbraco/Surface/Helper/GetEmiten?emitenType=s').json()\n \ndef main():\n\n data = []\n for ticker in LISTED_COMPANY:\n url = f'https://www.idx.co.id/umbraco/Surface/ListedCompany/GetCompanyProfilesDetail?emitenType=&kodeEmiten={ticker.get(\"KodeEmiten\")}&language=id-id'\n response = requests.get(url)\n data.append(response.json())\n\n for d in data:\n try:\n CUR.execute(\n \"\"\"\n INSERT INTO company_profile (\n ticker_code,\n company_address,\n sector,\n subsector,\n listing_date,\n website,\n count_secretary,\n count_director,\n count_commissioner,\n count_audit_committee,\n count_stakeholder,\n count_subsidiary,\n count_dividend,\n created_at\n ) VALUES (\n %s, %s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s, %s, %s, %s\n )\n \"\"\",\n (\n d.get('Search').get('KodeEmiten'),\n d.get('Profiles')[0].get('Alamat').upper(),\n d.get('Profiles')[0].get('Sektor').upper(),\n d.get('Profiles')[0].get('SubSektor').upper(),\n datetime.strptime(d.get('Profiles')[0].get('TanggalPencatatan')[:10], '%Y-%m-%d').date(),\n d.get('Profiles')[0].get('Website').lower(),\n len(d.get('Sekretaris')),\n len(d.get('Direktur')),\n len(d.get('Komisaris')),\n len(d.get('KomiteAudit')),\n len(d.get('PemegangSaham')),\n len(d.get('AnakPerusahaan')),\n len(d.get('Dividen')),\n datetime.now() + timedelta(hours = 7)\n )\n )\n PSQL_CONNECT.commit()\n except psycopg2.errors.UniqueViolation:\n PSQL_CONNECT.rollback()\n pass\n\n CUR.close()\n PSQL_CONNECT.close()\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"src/get_company_profile.py","file_name":"get_company_profile.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"234383617","text":"import json\nfrom flask import Flask, render_template\n\nbaseFilePath = \"data/\"\n\ndef readJSONDataSet():\n dataset = []\n for i in range(2, 68):\n filename = 'output' + str(i) + '.json'\n if filename:\n path = baseFilePath + \"raw/\" + filename\n with open(path, 'r') as f:\n datastore = json.load(f)\n dataset.append(datastore)\n return json.dumps(dataset)\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef main():\n return render_template(\"dashboard.html\", dataset=readJSONDataSet())\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"215327599","text":"from secrets_example import *\nimport requests\n\ndef get_stories(section):\n baseurl = \"https://api.nytimes.com/svc/topstories/v2/\"\n\n extendedurl = baseurl + section + '.json'\n\n params={'api-key': api_key}\n\n return requests.get(extendedurl,params).json()\n\ndef get_headlines(nyt_results_dict):\n results = nyt_results_dict['results']\n headlines = []\n\n for r in results:\n title = r['title']\n url = r['url']\n #headlines.append(r['title'])\n headlines.append(str(title)+' ('+str(url)+')')\n return headlines","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"287823886","text":"import os\nimport multiprocessing\n\n\nmpimin = 1\nmpimax = 2\nmpistep = 1\nompmin = 4\nompmax = 12\nompstep = 4\n\n\ndef test(mpi=1, omp=1, intel=1):\n os.environ['OMP_NUM_THREADS'] = str(omp)\n os.environ['MKL_NUM_THREADS'] = str(intel)\n print('export OMP_NUM_THREADS='+str(int(omp)))\n print('export MKL_NUM_THREADS='+str(int(intel)))\n\n # intel iomp5 must source compilervars.sh intel64 to work properly\n cmd = 'mpiexec -n ' + \\\n str(mpi)+' ./TestStokes3D.X -S 2 -T 32 -P 0 -R 0'\n print(cmd)\n os.system(cmd)\n\n cmd = 'mpiexec -n ' + \\\n str(mpi)+' ./TestStokes3D.X -S 2 -T 32 -P 1 -R 0'\n print(cmd)\n os.system(cmd)\n\n cmd = 'mpiexec -n ' + \\\n str(mpi)+' ./TestStokes3D.X -S 2 -T 32 -P 4 -R 0'\n print(cmd)\n os.system(cmd)\n\n cmd = 'mpiexec -n ' + \\\n str(mpi)+' ./TestStokes3D.X -S 1 -T 32 -P 7 -R 0'\n print(cmd)\n os.system(cmd)\n\n return\n\n\ncpunumber = multiprocessing.cpu_count()\n\nfor mpi in range(mpimin, mpimax, mpistep):\n for omp in range(ompmin, ompmax, ompstep):\n if mpi*omp > cpunumber:\n continue\n test(mpi, omp, 1)\n","sub_path":"Test/StokesFMM3D/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"253611619","text":"import sys\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torchvision\nfrom .net_1d import cnn_1d,lstm,resnet_1d,multi_scale_resnet_1d,micro_multi_scale_resnet_1d,mlp\nfrom .net_2d import densenet,dfcnn,resnet,squeezenet,multi_scale_resnet,mobilenet,lightcnn\nfrom .ipmc import EarID,MV_Emotion\nfrom .autoencoder import autoencoder\nfrom .domain import dann,dann_base,dann_lstm\n\n\ndef creatnet(opt):\n name = opt.model_name\n #---------------------------------autoencoder---------------------------------\n if name =='autoencoder':\n net = autoencoder.Autoencoder(opt.input_nc, opt.feature, opt.label, opt.finesize)\n\n #---------------------------------domain---------------------------------\n elif name == 'dann':\n net = dann.DANN(opt.input_nc,opt.label,opt.domain_num)\n elif name == 'dann_base':\n net = dann_base.DANNBase()\n elif name == 'dann_lstm':\n net = dann_lstm.DANN(opt.lstm_inputsize, opt.lstm_timestep, opt.input_nc, opt.label, opt.domain_num)\n\n #---------------------------------IPMC Custom---------------------------------\n elif name == 'EarID':\n net = EarID.EarID(opt.label)\n elif name == 'MV_Emotion':\n net = MV_Emotion.MV_Emotion(opt.label)\n\n #---------------------------------classify_1d---------------------------------\n #mlp\n elif name =='mlp':\n net = mlp.mlp(opt.input_nc, opt.label, opt.finesize)\n #lstm\n elif name =='lstm':\n net = lstm.lstm(opt.lstm_inputsize,opt.lstm_timestep,input_nc=opt.input_nc,num_classes=opt.label)\n #cnn\n elif name == 'cnn_1d':\n net = cnn_1d.cnn(opt.input_nc,num_classes=opt.label)\n elif name == 'resnet18_1d':\n net = resnet_1d.resnet18()\n net.conv1 = nn.Conv1d(opt.input_nc, 64, 7, 2, 3, bias=False)\n net.fc = nn.Linear(512, opt.label)\n elif name == 'resnet34_1d':\n net = resnet_1d.resnet34()\n net.conv1 = nn.Conv1d(opt.input_nc, 64, 7, 2, 3, bias=False)\n net.fc = nn.Linear(512, opt.label)\n elif name == 'multi_scale_resnet_1d':\n net = multi_scale_resnet_1d.Multi_Scale_ResNet(inchannel=opt.input_nc, num_classes=opt.label)\n elif name == 'micro_multi_scale_resnet_1d':\n net = micro_multi_scale_resnet_1d.Multi_Scale_ResNet(inchannel=opt.input_nc, num_classes=opt.label)\n\n #---------------------------------classify_2d---------------------------------\n elif name == 'light':\n net = lightcnn.LightCNN(input_nc=opt.input_nc, num_classes=opt.label)\n elif name == 'dfcnn':\n net = dfcnn.dfcnn(num_classes = opt.label, input_nc = opt.input_nc)\n elif name == 'multi_scale_resnet':\n net = multi_scale_resnet.Multi_Scale_ResNet(input_nc = opt.input_nc, num_classes=opt.label)\n \n elif name in ['resnet101','resnet50','resnet18']:\n if name =='resnet101':\n net = resnet.resnet101(pretrained=True)\n net.fc = nn.Linear(2048, opt.label)\n elif name =='resnet50':\n net = resnet.resnet50(pretrained=True)\n net.fc = nn.Linear(2048, opt.label)\n elif name =='resnet18':\n net = resnet.resnet18(pretrained=True)\n net.fc = nn.Linear(512, opt.label)\n net.conv1 = nn.Conv2d(opt.input_nc, 64, 7, 2, 3, bias=False) \n \n elif 'densenet' in name:\n if name =='densenet121':\n net = densenet.densenet121(pretrained=False,num_classes = opt.label)\n elif name == 'densenet201':\n net = densenet.densenet201(pretrained=False,num_classes = opt.label)\n net.features.conv0 = nn.Conv2d(opt.input_nc, 64, kernel_size=7, stride=2, padding=3, bias=False) \n \n elif name == 'squeezenet':\n net = squeezenet.squeezenet1_1(pretrained=False,num_classes = opt.label,inchannel = opt.input_nc)\n\n elif name == 'mobilenet':\n net = mobilenet.mobilenet_v2(pretrained=True)\n net.features[0][0] = nn.Conv2d(opt.input_nc, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n net.classifier[1] = nn.Linear(in_features=1280, out_features=opt.label, bias=True)\n\n if opt.mode in ['classify_2d','domain']:\n exp = torch.rand(opt.batchsize, opt.input_nc, opt.img_shape[0], opt.img_shape[1])\n else:\n exp = torch.rand(opt.batchsize,opt.input_nc,opt.finesize)\n return net,exp","sub_path":"models/creatnet.py","file_name":"creatnet.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"40523396","text":"s = 999\np=[]\nq=[]\nfor i in range(1, s+1):\n if i % 3 == 0:\n p.append(i)\n if i % 5 ==0:\n q.append(i)\n\nm = set(p + q)\nprint(p)\nprint(q)\nprint(m)\nprint (sum(m))\n\n \n","sub_path":"multiples.py","file_name":"multiples.py","file_ext":"py","file_size_in_byte":183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"35014220","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n#\n# Author: Alisue (lambdalisue@hashnote.net)\n# URL: http://hashnote.net/\n# Date: 2013-10-04\n#\n# (C) 2013 hashnote.net, Alisue\n#\nfrom functools import wraps\nfrom functools import update_wrapper\nimport matplotlib.pyplot as pl\nfrom graf.styles.color import color_cycle\n\n\"\"\"matplotlib.pyplot commands which will be treated as command\"\"\"\n_pyplot_commands = [\n 'title', 'suptitle', 'grid',\n 'xlabel', 'xticks', 'xlim', 'twinx',\n 'ylabel', 'yticks', 'ylim', 'twiny',\n 'tick_params', 'axes',\n 'show', 'savefig',\n 'text', 'box',\n]\n\"\"\"matplotlib.pyplot PLOT commands which will be treated as command\"\"\"\n_pyplot_plot_commands = [\n 'plot', 'errorbar', 'bar', 'axhline', 'axvline',\n]\n\n\ndef original_color_cycle(fn):\n \"\"\"\n A decorator to force using own color_cycle instead of matplotlib\n color_cycle. All method related to plotting should be wrapped with this\n decorator\n \"\"\"\n def wrapper(*args, **kwargs):\n if kwargs.get('color', None) is None:\n kwargs['color'] = next(color_cycle)\n return fn(*args, **kwargs)\n return update_wrapper(wrapper, fn)\n\n@wraps(pl.figure)\ndef figure(*args, **kwargs):\n # reset color cycle\n color_cycle.reset()\n return pl.figure(*args, **kwargs)\n\n@wraps(pl.subplot)\ndef subplot(*args, **kwargs):\n # reset color cycle\n color_cycle.reset()\n return pl.subplot(*args, **kwargs)\n\n@wraps(pl.legend)\ndef legend(*args, **kwargs):\n if len(args) == 0:\n # automatically create handles and labels from\n # all axes in the current figure\n handles = []\n labels = []\n fig = pl.gcf()\n for ax in fig.axes:\n _handles, _labels = ax.get_legend_handles_labels()\n handles += _handles\n labels += _labels\n args = [handles, labels]\n return pl.legend(*args, **kwargs)\n\n# Create an plugin registry for loader class\ndef __plugin__(registry):\n # register alternative commands\n registry.register('figure', figure)\n registry.register('subplot', subplot)\n registry.register('legend', legend)\n # register commands\n for name in _pyplot_commands:\n fn = getattr(pl, name)\n registry.register(name, fn)\n # register commands related to plotting\n for name in _pyplot_plot_commands:\n fn = getattr(pl, name)\n registry.register(name, original_color_cycle(fn))\n__plugin__.manually = True\n","sub_path":"graf/src/graf/builtins/pyplot.py","file_name":"pyplot.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"163560606","text":"import numpy as np\nfrom collections import defaultdict\nfrom operator import itemgetter\ndataset_filename = \"/Users/kddr/PycharmProjects/affinity_dataset.txt\"\nX = np.loadtxt(dataset_filename)\nn_samples, n_features = X.shape\nprint(\"This dataset has {0} samples and {1} features\".format(n_samples, n_features))\nvalid_rules=defaultdict(int)\ninvalid_rules=defaultdict(int)\nnum_occurances=defaultdict(int)\nfeatures = [\"bread\", \"milk\", \"cheese\", \"apples\", \"bananas\"]\nfor sample in X:\n for premis in range(4):\n if sample[premis]==0:\n continue\n num_occurances[premis]+=1\n for conclusion in range(n_features):\n if premis==conclusion:\n continue\n if sample[conclusion]==1:\n valid_rules[(premis,conclusion)]+=1\n else:\n invalid_rules[(premis,conclusion)]+=1\nsupport =valid_rules\nconfidence=defaultdict(float)\nfor premis,conclusion in valid_rules.keys():\n rule=(premis,conclusion)\n confidence[rule]=valid_rules[rule]/num_occurances[premis]\n\n\n#for premise, conclusion in confidence:\n# premise_name = features[premise]\n# conclusion_name = features[conclusion]\n# print(\"Rule: If a person buys {0} they will also buy {1}\".format(premise_name, conclusion_name))\n# print(\" - Confidence: {0:.3f}\".format(confidence[(premise, conclusion)]))\n# print(\" - Support: {0}\".format(support[(premise, conclusion)]))\n# print(\"\")\n\n\ndef print_rule(premise,conclusion,supoort,confidence,features):\n premis_name=features[premise]\n conclusion_name=features[conclusion]\n print(\"Rule:If a presion buys {0} then will also buy {1}\".format(premis_name,conclusion_name))\n print(\"-Support:{0}\".format(support[(premise,conclusion)]))\n print(\"-Confidence:{0:.3f}\".format(confidence[(premise,conclusion)]))\npremise = 3\nconclusion = 1\n#print_rule(premise, conclusion, support, confidence, features)\n\nsorted_support=sorted(support.items(),key=itemgetter(1),reverse=True)\nfor index in range(5):\n print(\"Rule #{0}\".format(index+1))\n premise,conclusion=sorted_support[index][0]\n print_rule(premise,conclusion,support,confidence,features)\nsorted_confidence=sorted(confidence.items(),key=itemgetter(1),reverse=True)\nfor index in range(5):\n print(\"Rule #{0}\".format(index+1))\n premise,conclusion=sorted_confidence[index][0]\n print_rule(premise,conclusion,support,confidence,features)","sub_path":"dataminingTest.py","file_name":"dataminingTest.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"89943677","text":"import scipy.sparse as sparse\nimport numpy as np\n\nclass ConstrainedRandomWalk:\n def __init__(self, time_series, m, t):\n self.time_series = time_series\n self.embedding_dimension = m\n self.permutation_to_vertex = {}\n self.permutation_to_zvectors = {}\n self.vertex_to_permutation = {}\n self.lag = t\n self.z_vectors = self.generate_z_vectors(time_series, m, t)\n self.adjacency_matrix = self.build_adjacency_matrix(time_series, m, t)\n self.restored_dynamics = []\n\n def build_adjacency_matrix(self, time_series, m, t):\n adjacency_matrix = sparse.dok_matrix((np.math.factorial(m), np.math.factorial(m)), dtype=np.intc)\n\n for idx, z_vector_current in enumerate(self.z_vectors):\n if idx + 1 < self.z_vectors.shape[0]: #the last vertex in graph may be ommited, but it will be isolated in such a case\n permutation_current = self.get_permutation(z_vector_current)\n permutation_next = self.get_permutation(self.z_vectors[idx + 1])\n vertex_current_idx = self.map_permutation_to_vertex(permutation_current)\n vertex_next_idx = self.map_permutation_to_vertex(permutation_next)\n\n if adjacency_matrix[vertex_current_idx, vertex_next_idx] is None:\n adjacency_matrix[vertex_current_idx, vertex_next_idx] = 1\n else :\n adjacency_matrix[vertex_current_idx, vertex_next_idx] += 1\n\n return adjacency_matrix\n\n def generate_z_vectors(self, time_series, m, t):\n z_vectors = []\n\n for i in range(time_series.size):\n z_vector = np.empty(m, dtype=np.float)\n z_vector[0] = time_series[i]\n\n for j in range(1, m):\n if i + j * t < time_series.size:\n z_vector[j] = time_series[i + j * t]\n else:\n z_vector = None\n\n if z_vector is not None:\n z_vectors.append(z_vector)\n return np.array(z_vectors)\n\n def get_permutation(self, z_vector):\n indexes = np.argsort(np.argsort((-z_vector)))\n\n if tuple(indexes) in self.permutation_to_zvectors:\n self.permutation_to_zvectors[tuple(indexes)].append(z_vector)\n else:\n self.permutation_to_zvectors[tuple(indexes)] = [z_vector]\n\n return indexes\n\n def map_permutation_to_vertex(self, permutation):\n permutation_tuple = tuple(permutation)\n ret_val = -1\n\n if permutation_tuple in self.permutation_to_vertex:\n ret_val = self.permutation_to_vertex[permutation_tuple]\n else:\n ret_val = self.permutation_to_vertex[permutation_tuple] = len(self.permutation_to_vertex) + 1\n self.vertex_to_permutation[ret_val] = permutation_tuple\n\n if ret_val == -1:\n raise ValueError('ret_val is -1')\n\n return ret_val\n\n def constrained_random_walk(self, length = 1000):\n init_seed = np.random.randint(0, self.z_vectors.shape[0] - self.lag)\n restored_dynamics = self.init_restored_dynamics(init_seed)\n ptr = self.lag - 1\n blocked_nodes = [[]] * length\n\n while ptr < length:\n print(ptr)\n current_node = restored_dynamics[ptr]\n allowable_transitions = []\n\n for i in range(ptr+1, len(blocked_nodes)):\n blocked_nodes[i] = []\n\n all_possible_transitions = self.get_all_possible_transitions(current_node)\n\n for transition in all_possible_transitions:\n if self.is_allowable_transition(restored_dynamics[ptr - self.lag + 1], transition):\n allowable_transitions.append(transition)\n\n allowable_transitions = self.remove_blocked_nodes(allowable_transitions, blocked_nodes[ptr])\n\n if len(allowable_transitions) > 0:\n ptr += 1\n next_node = self.choose_transition(current_node, allowable_transitions)\n if ptr >= len(restored_dynamics):\n restored_dynamics.append(next_node)\n else:\n restored_dynamics[ptr] = next_node\n else:\n ptr -= 1\n blocked_nodes[ptr].append(current_node)\n\n if ptr < self.lag:\n raise ValueError('No path can be found from initial seed')\n\n self.restored_dynamics = restored_dynamics\n return restored_dynamics\n\n def init_restored_dynamics(self, init_seed):\n restored_dynamics = []\n for i in range(self.lag):\n restored_dynamics.append(tuple(self.get_permutation(self.z_vectors[init_seed + i])))\n\n return restored_dynamics\n\n def get_all_possible_transitions(self, current_node):\n transitions = []\n i = self.permutation_to_vertex[tuple(current_node)]\n for key in self.adjacency_matrix.keys():\n if key[0] == i:\n transitions.append(self.vertex_to_permutation[key[1]])\n return transitions\n\n def is_allowable_transition(self, previous_node, next_node):\n for i in range(1, len(previous_node)):\n if i + 1 == len(previous_node):\n return 1\n if (previous_node[i] - previous_node[i+1]) * (next_node[i-1] - next_node[i]) < 0:\n return 0\n return 1\n\n def remove_blocked_nodes(self, allowable_transitions, blocked_nodes):\n np_allowable = np.array(allowable_transitions)\n np_blocked = np.array(blocked_nodes)\n constrained_transitions = []\n\n for allowable in np_allowable:\n to_add = True\n for blocked in np_blocked:\n if np.array_equal(blocked, allowable):\n to_add = False\n break\n if to_add:\n constrained_transitions.append(tuple(allowable.tolist()))\n\n return constrained_transitions\n\n def choose_transition(self, current_node, allowable_transitions):\n weights = []\n current_row_idx = self.permutation_to_vertex[current_node]\n for transition in allowable_transitions:\n weights.append(self.adjacency_matrix.get((current_row_idx, self.permutation_to_vertex[transition])))\n\n idx = np.random.choice(len(weights), p=weights/np.sum(weights))\n return allowable_transitions[idx]\n\n def regenerate_time_series(self, length):\n restored_dynamics = self.constrained_random_walk(length)\n regenerated_time_series = []\n\n for node in restored_dynamics:\n z_vectors = self.permutation_to_zvectors[node]\n idx = np.random.randint(len(z_vectors))\n regenerated_time_series.append(z_vectors[idx][0])\n\n return np.array(regenerated_time_series)\n\n","sub_path":"ConstrainedRandomWalk.py","file_name":"ConstrainedRandomWalk.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"259711050","text":"from django.shortcuts import render\nfrom rest_framework import mixins, viewsets\n\nfrom app.models import Article\nfrom app.serializers import ArticleSerializer\n\n\nclass ArticleView(viewsets.GenericViewSet,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n mixins.UpdateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin):\n\n # 查询数据\n queryset = Article.objects.filter(is_delete=0)\n # 序列化\n serializer_class = ArticleSerializer\n\n def perform_destroy(self, instance):\n instance.is_delete = 1\n instance.save()\n\n\ndef all_article(request):\n if request.method == 'GET':\n # articels = Article.objects.all()\n return render(request, 'articles.html')\n","sub_path":"qf_1806/1.django/day09/代码/day09/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"81217264","text":"import dlib\nimport cv2\nfrom imutils import face_utils\nimport numpy as np\nimport argparse\nimport imutils\n\ndef run(image):\n # initialize dlib's face detector (HOG-based) and then create\n # the facial landmark predictor\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n\n # load the input image, resize it, and convert it to grayscale\n #image = cv2.imread(image)\n image = imutils.resize(image, width=500)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # detect faces in the grayscale image\n rects = detector(gray, 1)\n\n # loop over the face detections\n for (i, rect) in enumerate(rects):\n # determine the facial landmarks for the face region, then\n # convert the landmark (x, y)-coordinates to a NumPy array\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n\n # loop over the face parts individually\n for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():\n # clone the original image so we can draw on it, then\n # display the name of the face part on the image\n clone = image.copy()\n # loop over the subset of facial landmarks, drawing the\n # specific face part\n xMax = 0\n yMax = 0\n for (x, y) in shape[i:j]:\n cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)\n if x > xMax:\n xMax = x\n if y > yMax:\n yMax = y\n\n # extract the ROI of the face region as a separate image\n (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))\n differenceY = yMax-y\n differenceX = xMax-x\n x -= differenceX/4\n w += differenceX/2\n y -= differenceY/4\n h += differenceY/2\n roi = image[y:y + h, x:x + w]\n roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)\n cv2.imwrite(\"ROI.png\", roi)\n\n # show the particular face part\n #cv2.imshow(\"ROI\", roi)\n #cv2.imshow(\"Image\", clone)\n #cv2.waitKey(0)\n break\n\n","sub_path":"FaceRecognition.py","file_name":"FaceRecognition.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"285823888","text":"import os\r\nimport webapp2\r\n\r\nimport jinja2\r\n\r\njinja_environment = jinja2.Environment(autoescape=True,\r\n loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')))\r\n\r\nclass MainPage(webapp2.RequestHandler):\r\n def get(self):\r\n template_values = {\r\n 'name': 'Serendipo',\r\n 'verb': 'extremely happy'\r\n }\r\n\r\n template = jinja_environment.get_template('1234.html')\r\n self.response.out.write(template.render(template_values))\r\n\r\napp = webapp2.WSGIApplication([('/', MainPage)],\r\n debug=True)","sub_path":"Untitled-1.py","file_name":"Untitled-1.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"401488249","text":"import pickle\nimport numpy as np\n\nimport theano\nimport theano.tensor as T\nimport matplotlib.pyplot as plt\n\nfrom shared_modeling_simulation import get_likelihoods, post_from_lik\nfrom modeling_helpers import load_data, get_save_dir_and_save_id, print_logp_info\nimport pymc3 as pm\n\n\n# Switches for this script\nrun_on_cluster = False\nverbose = False\nfile_name_suff = 'betperswirew'\n\nupper = 1000\n\n# Which data should be fitted?\nfitted_data_name = 'humans' # 'humans', 'simulations'\nkids_and_teens_only = False\nadults_only = False\n\n# Sampling details\nn_samples = 50\nn_tune = 10\nn_cores = 1\nn_chains = 1\ntarget_accept = 0.8\n\n# Load to-be-fitted data\nn_subj, rewards, choices, group, n_groups = load_data(run_on_cluster, fitted_data_name, kids_and_teens_only, adults_only, verbose)\npersev_bonus = 2 * choices - 1 # recode as -1 for left and +1 for right\npersev_bonus = np.concatenate([np.zeros((1, n_subj)), persev_bonus]) # add 0 bonus for first trial\n\nrewards = theano.shared(np.asarray(rewards, dtype='int32'))\nchoices = theano.shared(np.asarray(choices, dtype='int32'))\ngroup = theano.shared(np.asarray(group, dtype='int32'))\npersev_bonus = theano.shared(np.asarray(persev_bonus, dtype='int32'))\n\n# Prepare things for saving\nsave_dir, save_id = get_save_dir_and_save_id(run_on_cluster, file_name_suff, fitted_data_name, n_samples)\n\n# Fit models\nmodel_dict = {}\nprint(\"Compiling models for {0} with {1} samples and {2} tuning steps...\\n\".format(fitted_data_name, n_samples, n_tune))\n\nwith pm.Model() as model:\n\n # Get population-level and individual parameters\n eps = T.as_tensor_variable(0)\n p_noisy = 1e-5 * T.as_tensor_variable(1)\n\n beta_mu_mu = pm.Bound(pm.Normal, lower=0)('beta_mu_mu', mu=1, sd=2)\n beta_mu_sd = pm.HalfNormal('beta_mu_sd', sd=2)\n beta_sd_sd = pm.HalfNormal('beta_sd_sd', sd=1)\n beta_mu = pm.Bound(pm.Normal, lower=0)('beta_mu', mu=beta_mu_mu, sd=beta_mu_sd, shape=n_groups)\n beta_sd = pm.HalfNormal('beta_sd', sd=beta_sd_sd, shape=n_groups)\n beta_offset = pm.Normal('beta_offset', mu=0, sd=1, shape=n_subj)\n\n persev_mu_mu = pm.Bound(pm.Normal, lower=-1, upper=1)('persev_mu_mu', mu=0, sd=0.5)\n persev_mu_sd = pm.HalfNormal('persev_mu_sd', sd=0.5)\n persev_sd_sd = pm.HalfNormal('persev_sd_sd', sd=0.5)\n persev_mu = pm.Bound(pm.Normal, lower=-1, upper=1)('persev_mu', mu=persev_mu_mu, sd=persev_mu_sd, shape=n_groups)\n persev_sd = pm.HalfNormal('persev_sd', sd=persev_sd_sd, shape=n_groups)\n persev_offset = pm.Normal('persev_offset', mu=0, sd=1, shape=n_subj)\n\n p_switch_mu_mu = pm.Bound(pm.Normal, lower=0, upper=1)('p_switch_mu_mu', mu=0.1, sd=0.5)\n p_switch_mu_sd = pm.HalfNormal('p_switch_mu_sd', sd=0.5)\n p_switch_sd_sd = pm.HalfNormal('p_switch_sd_sd', sd=0.5)\n p_switch_mu = pm.Bound(pm.Normal, lower=0, upper=1)('p_switch_mu', mu=p_switch_mu_mu, sd=p_switch_mu_sd, shape=n_groups)\n p_switch_sd = pm.HalfNormal('p_switch_sd', sd=p_switch_sd_sd, shape=n_groups)\n p_switch_offset = pm.Normal('p_switch_offset', mu=0, sd=1, shape=n_subj)\n\n p_reward_mu_mu = pm.Bound(pm.Normal, lower=0, upper=1)('p_reward_mu_mu', mu=0.75, sd=0.5)\n p_reward_mu_sd = pm.HalfNormal('p_reward_mu_sd', sd=0.5)\n p_reward_sd_sd = pm.HalfNormal('p_reward_sd_sd', sd=0.5)\n p_reward_mu = pm.Bound(pm.Normal, lower=0, upper=1)('p_reward_mu', mu=p_reward_mu_mu, sd=p_reward_mu_sd, shape=n_groups)\n p_reward_sd = pm.HalfNormal('p_reward_sd', sd=p_reward_sd_sd, shape=n_groups)\n p_reward_offset = pm.Normal('p_reward_offset', mu=0, sd=1, shape=n_subj)\n\n # Individual parameters\n beta = pm.Deterministic('beta', beta_mu[group] + beta_offset * beta_sd[group])\n persev = pm.Deterministic('persev', persev_mu[group] + persev_offset * persev_sd[group])\n p_switch = pm.Deterministic('p_switch', p_switch_mu[group] + p_switch_offset * p_switch_sd[group])\n p_reward = pm.Deterministic('p_reward', p_reward_mu[group] + p_reward_offset * p_reward_sd[group])\n\n scaled_persev_bonus = persev_bonus * persev.reshape((1, n_subj))\n T.printing.Print('choices')(choices)\n T.printing.Print('scaled_persev_bonus')(scaled_persev_bonus)\n\n # Group differences\n beta_mu_diff01 = pm.Deterministic('beta_mu_diff01', beta_mu[0] - beta_mu[1])\n beta_mu_diff02 = pm.Deterministic('beta_mu_diff02', beta_mu[0] - beta_mu[2])\n beta_mu_diff12 = pm.Deterministic('beta_mu_diff12', beta_mu[1] - beta_mu[2])\n persev_mu_diff01 = pm.Deterministic('persev_mu_diff01', persev_mu[0] - persev_mu[1])\n persev_mu_diff02 = pm.Deterministic('persev_mu_diff02', persev_mu[0] - persev_mu[2])\n persev_mu_diff12 = pm.Deterministic('persev_mu_diff12', persev_mu[1] - persev_mu[2])\n p_switch_mu_diff01 = pm.Deterministic('p_switch_mu_diff01', p_switch_mu[0] - p_switch_mu[1])\n p_switch_mu_diff02 = pm.Deterministic('p_switch_mu_diff02', p_switch_mu[0] - p_switch_mu[2])\n p_switch_mu_diff12 = pm.Deterministic('p_switch_mu_diff12', p_switch_mu[1] - p_switch_mu[2])\n p_reward_mu_diff01 = pm.Deterministic('p_reward_mu_diff01', p_reward_mu[0] - p_reward_mu[1])\n p_reward_mu_diff02 = pm.Deterministic('p_reward_mu_diff02', p_reward_mu[0] - p_reward_mu[2])\n p_reward_mu_diff12 = pm.Deterministic('p_reward_mu_diff12', p_reward_mu[1] - p_reward_mu[2])\n\n # Get likelihoods\n lik_cor, lik_inc = get_likelihoods(rewards, choices, p_reward, p_noisy)\n\n # Get posterior, calculate probability of subsequent trial, add eps noise\n p_right = 0.5 * T.ones(n_subj, dtype='int32')\n p_right, _ = theano.scan(fn=post_from_lik,\n sequences=[lik_cor, lik_inc, scaled_persev_bonus],\n outputs_info=[p_right],\n non_sequences=[p_switch, eps, beta])\n\n # Add initial p=0.5 at the beginning of p_right\n initial_p = 0.5 * T.ones((1, n_subj))\n p_right = T.concatenate([initial_p, p_right[:-1]], axis=0)\n\n # Use Bernoulli to sample responses\n model_choices = pm.Bernoulli('model_choices', p=p_right, observed=choices)\n\n # Check model logp and RV logps (will crash if they are nan or -inf)\n # if verbose:\n # print_logp_info(model)\n\n # Sample the model\n trace = pm.sample(n_samples, tune=n_tune, chains=n_chains, cores=n_cores, nuts_kwargs=dict(target_accept=.8))\n\n# Get results\nmodel_summary = pm.summary(trace)\n\nif not run_on_cluster:\n pm.traceplot(trace)\n plt.savefig(save_dir + 'plot_' + save_id + '.png')\n\n# Save results\nprint('Saving trace, trace plot, model, and model summary to {0}{1}...\\n'.format(save_dir, save_id))\nwith open(save_dir + save_id + '.pickle', 'wb') as handle:\n pickle.dump({'trace': trace, 'model': model, 'summary': model_summary},\n handle, protocol=pickle.HIGHEST_PROTOCOL)\n","sub_path":"models/PSBayesModelNonCentered.py","file_name":"PSBayesModelNonCentered.py","file_ext":"py","file_size_in_byte":6708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"611803011","text":"from django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.template.defaultfilters import truncatechars\nfrom cradmin_legacy.viewhelpers import objecttable\nfrom cradmin_legacy.viewhelpers import create\nfrom cradmin_legacy.viewhelpers import update\nfrom cradmin_legacy.viewhelpers import delete\nfrom cradmin_legacy import crapp\nfrom crispy_forms import layout\n\nfrom trix.trix_core import models as trix_models\nfrom trix.trix_admin import formfields\n\n\nclass PermalinkQuerysetForRoleMixin(object):\n def get_queryset_for_role(self, course):\n return self.model.objects.filter(course=course)\\\n .prefetch_related('tags')\n\n\nclass TitleColumn(objecttable.MultiActionColumn):\n modelfield = 'title'\n\n def get_buttons(self, permalink):\n return [\n objecttable.Button(\n label=_('Edit'),\n url=self.reverse_appurl('edit', args=[permalink.id])),\n objecttable.Button(\n label=_('View'),\n url=reverse('trix_student_permalink', args=[permalink.id])),\n objecttable.Button(\n label=_('Delete'),\n url=self.reverse_appurl('delete', args=[permalink.id]),\n buttonclass=\"btn btn-sm btn-danger\"),\n ]\n\n\nclass DescriptionIntroColumn(objecttable.PlainTextColumn):\n modelfield = 'description'\n\n def render_value(self, permalink):\n return truncatechars(permalink.description, 50)\n\n\nclass TagsColumn(objecttable.PlainTextColumn):\n modelfield = 'tags'\n\n def is_sortable(self):\n return False\n\n def render_value(self, permalink):\n return ', '.join(tag.tag for tag in permalink.tags.all())\n\n\nclass PermalinkListView(PermalinkQuerysetForRoleMixin, objecttable.ObjectTableView):\n model = trix_models.Permalink\n columns = [\n TitleColumn,\n TagsColumn,\n DescriptionIntroColumn\n ]\n searchfields = [\n 'title',\n 'tags__tag',\n 'description',\n ]\n\n def get_buttons(self):\n app = self.request.cradmin_app\n return [\n objecttable.Button(label=_('Create'), url=app.reverse_appurl('create')),\n ]\n\n\nclass PermalinkCreateUpdateMixin(object):\n model = trix_models.Permalink\n roleid_field = 'course'\n\n def get_field_layout(self):\n return [\n layout.Div('title', css_class=\"trix-focusfield\"),\n # layout.Fieldset(_('Organize'), 'tags'),\n layout.Div('tags', css_class=\"trix-focusfield\"),\n layout.Div('description', css_class=\"trix-focusfield\"),\n ]\n\n def get_form(self, *args, **kwargs):\n form = super(PermalinkCreateUpdateMixin, self).get_form(*args, **kwargs)\n form.fields['tags'] = formfields.ManyToManyTagInputField(required=False)\n return form\n\n def save_object(self, form, commit=True):\n permalink = super(PermalinkCreateUpdateMixin, self).save_object(form, commit=commit)\n if commit:\n # Replace the tags with the new tags\n permalink.tags.clear()\n for tag in form.cleaned_data['tags']:\n permalink.tags.add(tag)\n return permalink\n\n def form_saved(self, permalink):\n course = self.request.cradmin_role\n if not permalink.tags.filter(tag=course.course_tag).exists():\n permalink.tags.add(course.course_tag)\n\n\nclass PermalinkCreateView(PermalinkCreateUpdateMixin, create.CreateView):\n \"\"\"\n View used to create new permalinks.\n \"\"\"\n\n\nclass PermalinkUpdateView(PermalinkQuerysetForRoleMixin,\n PermalinkCreateUpdateMixin,\n update.UpdateView):\n \"\"\"\n View used to create edit existing permalinks.\n \"\"\"\n\n\nclass PermalinkDeleteView(PermalinkQuerysetForRoleMixin, delete.DeleteView):\n \"\"\"\n View used to delete existing permalinks.\n \"\"\"\n model = trix_models.Permalink\n template_name = \"trix_admin/delete.django.html\"\n\n\nclass App(crapp.App):\n appurls = [\n crapp.Url(\n r'^$',\n PermalinkListView.as_view(),\n name=crapp.INDEXVIEW_NAME),\n crapp.Url(\n r'^create$',\n PermalinkCreateView.as_view(),\n name=\"create\"),\n crapp.Url(\n r'^edit/(?P\\d+)$',\n PermalinkUpdateView.as_view(),\n name=\"edit\"),\n crapp.Url(\n r'^delete/(?P\\d+)$',\n PermalinkDeleteView.as_view(),\n name=\"delete\")\n ]\n","sub_path":"trix/trix_admin/views/permalinks.py","file_name":"permalinks.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"593384369","text":"\"\"\"\r\nChiral representation is used:\r\nd || psi_1 || || 0 m - E + V(x) || || psi_1 ||\r\n-- || || = || || * || ||\r\ndx || psi_2 || || m + E - V(x) 0 || || psi_2 ||\r\n\r\nV(x) = - e^2 * sum_by_impurities( Z_i / (|x-a_i| + delta) )\r\n\r\nChange of variable:\r\nr(x) = sum_by_impurities( Z_i * sign(x-a_i) * ln( (|x-a_i| + delta) / delta ) )\r\n\r\n\"\"\"\r\n\r\nimport os, re, sys, datetime, time\r\nimport numpy as np\r\nfrom scipy import special\r\nfrom scipy.optimize import root\r\nfrom recordclass import recordclass\r\nimport multiprocessing as mp\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass Crystal:\r\n \"\"\"Class that represents 1D finite crystal given by impurity positions\r\n and their charges.\r\n \"\"\"\r\n\r\n \"---Some constants---\"\r\n SPEED_OF_LIGHT = 299792458\r\n ELECTRIC_CONSTANT = 8.854187817e-12\r\n ELECTRON_MASS = 9.10938356e-31\r\n PLANK_CONSTANT = 6.626070040e-34\r\n ELEMENTARY_CHARGE = 1.6021766208e-19\r\n GAMMA = 5.059074323e-8\r\n\r\n FINE_STRUCTURE_CONSTANT = ELEMENTARY_CHARGE**2 / (2 * ELECTRIC_CONSTANT *\r\n SPEED_OF_LIGHT *\r\n PLANK_CONSTANT)\r\n\r\n # m * c / h_bar\r\n X_MULT = 2 * np.pi * ELECTRON_MASS * SPEED_OF_LIGHT / PLANK_CONSTANT\r\n\r\n # regularization parameter (given in compton length units)\r\n DELTA = 0.01\r\n\r\n def __init__(self, A, Z, Z_p):\r\n \"\"\"Constructor.\r\n\r\n Args:\r\n A: impurity positions.\r\n Z: impurity charges in elementary charge units.\r\n Z_p: charge of particle in elementary charge units.\r\n\r\n \"\"\"\r\n self.A = A\r\n self.Z = Z\r\n self.Z_p = Z_p\r\n\r\n @staticmethod\r\n def log_transform(x, scale, origin):\r\n \"\"\"Logarithmic transform of variable x.\r\n\r\n Args:\r\n x: point of evaluation.\r\n scale: scaling factor of x.\r\n origin: point at which logarithm argument turns into 0.\r\n\r\n Returns:\r\n Transformed variable.\r\n\r\n \"\"\"\r\n return -np.log(np.abs(x - origin) * scale)\r\n\r\n def delta_func(self, e0, e):\r\n \"\"\"Smeared delta function.\r\n\r\n Args:\r\n e0: functions origin.\r\n e: energy of evaluation.\r\n\r\n Returns:\r\n Delta function given by Lorentzian function.\r\n\r\n \"\"\"\r\n return self.GAMMA / (np.pi * ((e0 - e) ** 2 + self.GAMMA ** 2))\r\n\r\n @staticmethod\r\n def interpolate(y_l, x_l, y_r, x_r, x):\r\n \"\"\"Linear interpolation.\r\n\r\n Args:\r\n y_l: function value at left point.\r\n x_l: left point.\r\n y_r: function value at right point.\r\n x_r: right point.\r\n x: point of evaluation.\r\n\r\n Returns:\r\n Linearly interpolated value of function at point x.\r\n\r\n \"\"\"\r\n return y_l + (x - x_l) * (y_r - y_l) / (x_r - x_l)\r\n\r\n @staticmethod\r\n def find_neighbours(l, val):\r\n \"\"\"Find neighbour values in the list l for a given value val.\r\n\r\n Args:\r\n l: list of values.\r\n val: value for which neighbours should be found.\r\n\r\n Returns:\r\n Pair of left and right neighbours.\r\n\r\n \"\"\"\r\n left = 0\r\n right = len(l) - 1\r\n\r\n while left != right - 1:\r\n curr = (left + right) // 2\r\n if l[curr] > val:\r\n right = curr\r\n else:\r\n left = curr\r\n\r\n return left, right\r\n\r\n @staticmethod\r\n def WhittakerW(k: float, m: float, x: float) -> float:\r\n \"\"\"Gives the Whittaker function W_{k,m}(x).\r\n\r\n Args:\r\n k, m: parameters.\r\n x : point of evaluation.\r\n\r\n Returns:\r\n Value of Whittaker function W_{k,m} function at point x.\r\n\r\n \"\"\"\r\n return np.exp(-x / 2) * pow(x, m + 0.5) * special.hyperu(0.5 + m - k,\r\n 1 + 2 * m, x)\r\n\r\n @staticmethod\r\n def WhittakerW_der(k, m, x):\r\n \"\"\"Gives x-derivative of Whittaker function.\r\n\r\n Args:\r\n k, m: parameters.\r\n x: point of evaluation.\r\n\r\n Returns:\r\n Value of x-derivative Whittaker function W_{k,m} function at point x.\r\n\r\n \"\"\"\r\n return ((k - 0.5 * x) * Crystal.WhittakerW(k, m, x) -\r\n (m * m - pow(k - 0.5, 2)) * Crystal.WhittakerW(k - 1, m, x)) / x\r\n\r\n def V(self, x):\r\n \"\"\"\"Calculates the potential of impurities at given position x.\r\n\r\n Args:\r\n x: point of evaluation.\r\n\r\n Returns:\r\n Value of total potential of impurities in given point.\r\n\r\n \"\"\"\r\n return self.Z_p * self.FINE_STRUCTURE_CONSTANT * \\\r\n sum([z / (np.abs(x - a) + self.DELTA) for a, z in\r\n zip(self.A, self.Z)])\r\n\r\n @staticmethod\r\n def max_range_for_e(e, N_nodes):\r\n \"\"\"Calculates maximum value of x to gain necessary accuracy.\r\n\r\n Args:\r\n e: energy of particle in electron rest energy units.\r\n N_nodes: number of desired wave function nodes.\r\n\r\n Returns:\r\n Maximum range of calculation along x-axis.\r\n\r\n \"\"\"\r\n return 10 * pow(N_nodes, 0.33) / np.sqrt(1 - e * e) if e > 0 else 10\r\n\r\n @staticmethod\r\n def normalize(psi, x):\r\n \"\"\"Normalizes wave functions such that the total probability is 1.\r\n\r\n Args:\r\n psi: vector of particle's wave function components.\r\n x: list of grid points.\r\n\r\n \"\"\"\r\n N = len(x)\r\n s = sum([(psi[i][0][0] ** 2 + psi[i][1][0] ** 2) *\r\n (x[i + 1] - x[i]) for i in range(N - 1)])\r\n psi /= np.sqrt(s)\r\n\r\n def h(self, x, e):\r\n \"\"\"Calculates at given point 2x2 matrix at the rhs of the equation\r\n given in the description.\r\n\r\n Args:\r\n x: point of evaluation.\r\n e: energy of particle in electron rest energy units.\r\n\r\n Returns:\r\n 2x2 matrix.\r\n\r\n \"\"\"\r\n res = np.zeros((2, 2))\r\n\r\n mult = 1 / self.dr_by_dx(x)\r\n V_ = self.V(x)\r\n\r\n res[0, 1] = (1 - e + V_) * mult\r\n res[1, 0] = (1 + e - V_) * mult\r\n\r\n return res\r\n\r\n def x_2_r(self, x):\r\n \"\"\"Turn x into r.\r\n\r\n Args:\r\n x - point of evaluation.\r\n\r\n Returns:\r\n r(x).\r\n\r\n \"\"\"\r\n return sum([np.abs(z) * np.sign(x - a) *\r\n np.log((np.abs(x - a) + self.DELTA) / self.DELTA)\r\n for a, z in zip(self.A, self.Z)])\r\n\r\n def r_2_x(self, r):\r\n \"\"\"Turn r into x.\r\n\r\n Args:\r\n r - point of evaluation.\r\n\r\n Returns:\r\n x(r).\r\n\r\n \"\"\"\r\n return root(lambda x, r: self.x_2_r(x) - r, 0, r).x[0]\r\n\r\n @staticmethod\r\n def nodes_count(l):\r\n \"\"\"Count the number of sign change in the given list.\r\n\r\n Args:\r\n l: list of numbers.\r\n\r\n Returns:\r\n Number of sign change in the given list.\r\n\r\n \"\"\"\r\n count = 0\r\n for i in range(1, len(l)):\r\n count += np.sign(l[i]) * np.sign(l[i - 1]) < 0\r\n\r\n return count\r\n\r\n @staticmethod\r\n def round_seconds(current_datetime):\r\n \"\"\"Round seconds of the given datetime to integer value\r\n and return datetime as a string.\r\n\r\n Args:\r\n current_datetime: current date and time.\r\n\r\n Returns:\r\n Datetime separated by '_' as a string.\r\n\r\n \"\"\"\r\n s = str(current_datetime.date()) + '_'\r\n s += str(current_datetime.time().hour) + '-'\r\n s += str(current_datetime.time().minute) + '-'\r\n s += str(int(current_datetime.time().second))\r\n return s\r\n\r\n def RK4(self, psi, x, dr, e):\r\n \"\"\"Solving the equation given in description by the Runge-Kutta method.\r\n\r\n Args:\r\n psi: vector of particle's wave function components.\r\n x: grid in x variable.\r\n dr: grid step in r variable.\r\n e: energy of particle in electron rest energy units.\r\n\r\n \"\"\"\r\n N = len(x)\r\n for i in range(2, N, 2):\r\n k1 = dr * np.dot(self.h(x[i - 2], e), psi[i // 2 - 1])\r\n k2 = dr * np.dot(self.h(x[i - 1], e), psi[i // 2 - 1] + k1 / 2)\r\n k3 = dr * np.dot(self.h(x[i - 1], e), psi[i // 2 - 1] + k2 / 2)\r\n k4 = dr * np.dot(self.h(x[i], e), psi[i // 2 - 1] + k3)\r\n psi[i // 2] = psi[i // 2 - 1] + (k1 + 2 * k2 + 2 * k3 + k4) / 6\r\n\r\n def dr_by_dx(self, x):\r\n \"\"\"Calculate a derivative of r(x) at given point.\r\n\r\n Args:\r\n x - point of evaluation.\r\n\r\n Returns:\r\n r'(x).\r\n\r\n \"\"\"\r\n return sum([np.abs(z) / (np.abs(x - a) + self.DELTA) \\\r\n for a, z in zip(self.A, self.Z)])\r\n\r\n def calculate(self, N, N_nodes):\r\n \"\"\"Find energy and wave function for given conditions.\r\n\r\n Args:\r\n N: number of grid points.\r\n N_nodes: number of desired wave function nodes.\r\n\r\n Returns:\r\n Tuple of energy, energy error, wave function vector, r-grid and x-grid.\r\n\r\n \"\"\"\r\n \"---Set initial bounds for energy---\"\r\n e_bounds = Crystal.Bounds(up=1.0, down=-1.0)\r\n\r\n \"---'shoot' until energy is converged---\"\r\n while True:\r\n \"--Set energy--\"\r\n e = 0.5 * (e_bounds.up + e_bounds.down)\r\n\r\n \"---Calculate wave function for given energy---\"\r\n # set default number of grid points if not given\r\n if not N: N = 250 * len(self.A)\r\n psi, x, r = self.calculate_for_given_energy(e, N_nodes, N)\r\n\r\n \"---Count nodes for upper and lower components---\"\r\n count_1 = self.nodes_count([psi[i][0][0] for i in range(N)])\r\n count_2 = self.nodes_count([psi[i][1][0] for i in range(N)])\r\n\r\n \"---Reassign energy bounds based on node counts---\"\r\n if count_1 >= N_nodes + 1:\r\n if count_2 >= N_nodes:\r\n e_bounds.up = e\r\n elif count_2 == N_nodes - 1:\r\n e_bounds.down = e\r\n else:\r\n e_bounds.down = e\r\n elif count_1 == N_nodes:\r\n if count_2 >= N_nodes:\r\n e_bounds.up = e\r\n else:\r\n e_bounds.down = e\r\n else:\r\n e_bounds.down = e\r\n\r\n \"---Break if converged---\"\r\n if e == 0.5 * (e_bounds.up + e_bounds.down):\r\n break\r\n\r\n \"---Plot results---\"\r\n # print(e)\r\n # plt.plot(x, [psi[i][0][0] for i in range(N)], 'r')\r\n # plt.show(block=False); plt.pause(0.1); plt.clf()\r\n\r\n \"---Normalize wave functions---\"\r\n self.normalize(psi, x)\r\n\r\n return e, (e_bounds.up - e_bounds.down) / 2, psi, r, x\r\n\r\n def calculate_for_given_energy(self, e, N_nodes, N=None):\r\n \"\"\"Calculate wave functions for given energy.\r\n\r\n Args:\r\n e: particle energy.\r\n N_nodes: number of desired wave function nodes.\r\n N: number of grid points.\r\n\r\n Returns:\r\n Tuple of wave function vector, x-grid and r-grid.\r\n\r\n \"\"\"\r\n \"---Set grid---\"\r\n if not N: N = 250 * len(self.A)\r\n x_max = self.max_range_for_e(e, N_nodes) + max(self.A)\r\n x_min = -self.max_range_for_e(e, N_nodes) + min(self.A)\r\n r_max = self.x_2_r(x_max)\r\n r_min = self.x_2_r(x_min)\r\n r = np.linspace(r_min, r_max, 2 * N - 1)\r\n x = [self.r_2_x(rr) for rr in r]\r\n psi = np.zeros((N, 2, 1))\r\n\r\n \"---Set initial conditions---\"\r\n korr = pow(1 - e * e, 0.5)\r\n p = 2 * e * self.Z_p * self.FINE_STRUCTURE_CONSTANT * sum(self.Z)\r\n s = 2 * e * self.Z_p * self.FINE_STRUCTURE_CONSTANT * \\\r\n sum([z * (a + self.DELTA) for a, z in zip(self.A, self.Z)]) + \\\r\n pow(self.Z_p * self.FINE_STRUCTURE_CONSTANT * sum(self.Z), 2)\r\n kappa = 0.5 * p * e / korr\r\n mu = -0.5 * pow(1 - 4 * s, 0.5)\r\n psi[0] = [[0.1], [0.1]]\r\n if np.abs(e) <= 1:\r\n psi[0][1][0] = psi[0][0][0] * (2 * korr / (1 - e + self.V(x_min))) * \\\r\n (self.WhittakerW_der(kappa, mu, 2 * np.abs(x_min) * korr)/\r\n self.WhittakerW(kappa, mu, 2 * np.abs(x_min) * korr))\r\n psi[0][1][0] = 0.1\r\n\r\n \"---Solve equation for given energy---\"\r\n self.RK4(psi, x, r[2] - r[0], e)\r\n\r\n return psi, x[::2], r[::2]\r\n\r\n def find_energy(self, num, e_bounds):\r\n '''Find bound state energy in given region.\r\n\r\n Args:\r\n e_bounds: energy region.\r\n num: number of steps in energy region [-1, 1].\r\n\r\n Returns:\r\n A list of found energies in given energy region.\r\n\r\n '''\r\n energy_list = []\r\n initial_e_grid = np.linspace(e_bounds.down, e_bounds.up, num + 1)\r\n\r\n for i in range(num):\r\n # set energy interval\r\n e_bounds_temp = Crystal.Bounds(\r\n down=initial_e_grid[i],\r\n up=initial_e_grid[i + 1]\r\n )\r\n\r\n \"--Find bound state energy if wave function sign differs on boundaries--\"\r\n while True:\r\n # calculate fo energy interval boundaries\r\n psi_1, x_1, _ = self.calculate_for_given_energy(e_bounds_temp.down, 1)\r\n psi_2, x_2, _ = self.calculate_for_given_energy(e_bounds_temp.up, 1)\r\n\r\n # check signs\r\n if np.sign(psi_1[-1][0][0]) * np.sign(psi_2[-1][0][0]) > 0:\r\n break\r\n\r\n e = (e_bounds_temp.down + e_bounds_temp.up) / 2\r\n psi, x, _ = self.calculate_for_given_energy(e, 1)\r\n\r\n if np.sign(psi[-1][0][0]) * np.sign(psi_1[-1][0][0]) > 0:\r\n e_bounds_temp.down = e\r\n else:\r\n e_bounds_temp.up = e\r\n\r\n if e_bounds_temp.up - e_bounds_temp.down < 1e-15:\r\n energy_list.append(e)\r\n break\r\n\r\n return energy_list\r\n\r\n \"---Ancillary function to calculate and save data for LDOS---\"\r\n\r\n def calculate_and_save(self, i, data_dir):\r\n \"\"\"Calculate and save LDOS data for given level.\r\n\r\n Args:\r\n i: level number.\r\n data_dir: directory to save data to.\r\n\r\n \"\"\"\r\n '---Solve equation---'\r\n (e, de, psi, r, x) = self.calculate(None, i)\r\n\r\n \"---Print results---\"\r\n print('Level: ' + str(i))\r\n print(' Energy = ', e)\r\n print(' Energy error = ', de)\r\n\r\n \"---Save results to file---\"\r\n # save level\r\n self.save_level(e, i, psi, x)\r\n # save data for LDOS\r\n with open(data_dir + '/' + str(i) + '.txt', 'w') as out:\r\n out.write(str(e) + '\\n')\r\n out.write(' '.join([str(x_i) for x_i in x]) + '\\n')\r\n out.write(\r\n ' '.join([str(psi_i[0][0] ** 2 + psi_i[1][0] ** 2) for psi_i in psi]))\r\n\r\n def data_for_LDOS(self, N_nodes, LDOS_dir=None):\r\n \"\"\"Calculate and save data needed for LDOS plotting.\r\n\r\n Args:\r\n N_nodes: number of desired wave function nodes.\r\n LDOS_dir: directory for LDOS data.\r\n\r\n \"\"\"\r\n # use default name if not given\r\n if not LDOS_dir:\r\n LDOS_dir = self.crystal_dir() + '/LDOS/data'\r\n\r\n \"---Calculate and save data in parallel---\"\r\n cpu_count = mp.cpu_count()\r\n for chunk in Crystal.chunks(range(1, N_nodes + 1), cpu_count):\r\n processes = []\r\n for i in chunk:\r\n proc = mp.Process(target=self.calculate_and_save, args=(i, LDOS_dir))\r\n processes.append(proc)\r\n proc.start()\r\n for proc in processes:\r\n proc.join()\r\n\r\n @staticmethod\r\n def chunks(l, n):\r\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]\r\n\r\n def plot_LDOS(self, level_rng, filenames=None):\r\n \"\"\"Plot local density of states (LDOS).\r\n\r\n Args:\r\n filenames: list of files with data about levels.\r\n\r\n \"\"\"\r\n # Tk().withdraw()\r\n e_grid_N = 1000\r\n e_lst = []\r\n x_lst = []\r\n rho_lst = []\r\n\r\n if not filenames:\r\n data_dir = self.crystal_dir() + '/LDOS/data/'\r\n filenames = [data_dir + f for f in os.listdir(data_dir)\r\n if os.path.isfile(data_dir + f) and\r\n int(f.split('.')[0]) in level_rng]\r\n delta = self.DELTA\r\n else:\r\n A = []\r\n Z = []\r\n for pair in re.findall('\\((.*?)\\)', filenames[0], re.DOTALL):\r\n a, z = pair[1:-1].split(',')\r\n A.append(float(a))\r\n Z.append(float(z))\r\n delta = float(\r\n re.findall('delta=(.*?)/', self.crystal_dir(), re.DOTALL)[0])\r\n\r\n if len(filenames) == 0:\r\n sys.exit()\r\n\r\n print(\"Charges: \", [(a, z) for a, z in zip(self.A, self.Z)])\r\n print(\"Delta = \", delta)\r\n print(\"\\n\")\r\n\r\n for filename in filenames:\r\n with open(filename, 'r') as inp:\r\n e = float(inp.readline())\r\n x = [float(s) for s in inp.readline().split()]\r\n rho = [float(s) for s in inp.readline().split()]\r\n\r\n e_lst.append(e)\r\n x_lst.append(x)\r\n rho_lst.append(rho)\r\n\r\n print('Finished reading.')\r\n max_range_idx = 0\r\n max_range = 0\r\n for i, val in enumerate(x_lst):\r\n if val[-1] > max_range:\r\n max_range = val[-1]\r\n max_range_idx = i\r\n X = x_lst[max_range_idx] # sorted(list(set(X)))\r\n delta_e = max(e_lst) - min(e_lst)\r\n e_grid = np.linspace(min(e_lst) - 0.001 * delta_e,\r\n max(e_lst) + 0.001 * delta_e, e_grid_N)\r\n\r\n N = len(X)\r\n num_of_levels = len(e_lst)\r\n for level in range(num_of_levels):\r\n new_rho = []\r\n for i in range(N):\r\n if X[i] < x_lst[level][0] or X[i] > x_lst[level][-1]:\r\n new_rho.append(0.0)\r\n elif X[i] in x_lst[level]:\r\n idx = x_lst[level].index(X[i])\r\n new_rho.append(rho_lst[level][idx])\r\n else:\r\n (left, right) = Crystal.find_neighbours(x_lst[level], X[i])\r\n new_rho.append(Crystal.interpolate(rho_lst[level][left],\r\n x_lst[level][left],\r\n rho_lst[level][right],\r\n x_lst[level][right], X[i]))\r\n\r\n rho_lst[level] = new_rho[:]\r\n print('Finished recalculation.')\r\n\r\n LDOS = np.zeros((e_grid_N, N))\r\n\r\n for e_i in range(e_grid_N):\r\n for x_i in range(N):\r\n for level in range(num_of_levels):\r\n LDOS[e_i][x_i] += rho_lst[level][x_i] * \\\r\n Crystal.delta_func(self, e_lst[level], e_grid[e_i])\r\n\r\n # log(LDOS)\r\n for e_i in range(e_grid_N):\r\n for x_i in range(N):\r\n LDOS[e_i][x_i] = np.log(1 + LDOS[e_i][x_i])\r\n print('Finished computing LDOS.')\r\n\r\n \"---Plot LDOS as color plot---\"\r\n # e_grid = [log_transform(e, 10, 1) for e in e_grid]\r\n plt.contourf(X, e_grid, LDOS, 1000)\r\n # plt.colorbar()\r\n plt.title(r'$\\delta = {0} \\lambdabar_c$'.format(delta),\r\n fontsize=30, verticalalignment='bottom')\r\n plt.xlabel(r'$x$', fontsize=30, labelpad=-5)\r\n plt.ylabel(r'$\\epsilon$ ', rotation='horizontal',\r\n verticalalignment='bottom', horizontalalignment='right',\r\n fontsize=30)\r\n # plt.yscale('log')\r\n plt.show()\r\n\r\n \"New data type\"\r\n Bounds = recordclass('Bounds', \"up down\")\r\n\r\n def plot_wave_function(self, psi, x):\r\n \"\"\"Plot given wave function.\r\n\r\n Args:\r\n psi: vector of particle's wave function components.\r\n x: grid in x variable.\r\n\r\n \"\"\"\r\n N = len(x)\r\n plt.plot(x, [psi[i][0][0] for i in range(N)], 'r')\r\n plt.plot(x, [psi[i][1][0] for i in range(N)], 'b')\r\n plt.title(r'$\\delta = {0} \\lambdabar_c$'.format(self.DELTA),\r\n fontsize=30, verticalalignment='bottom')\r\n plt.xlabel(r'$x (\\lambdabar_c)$', fontsize=30)\r\n plt.ylabel(r'$\\psi$ ', rotation='horizontal',\r\n verticalalignment='bottom', horizontalalignment='right',\r\n fontsize=30)\r\n plt.show()\r\n\r\n @staticmethod\r\n def plot_level(path: str):\r\n \"\"\"Plot wave function by data from file.\r\n\r\n Args:\r\n path: path of file with data.\r\n\r\n \"\"\"\r\n e, n, psi, x, A, Z = Crystal.read_level(path)\r\n N = len(x)\r\n dlt = float(re.findall('delta=(.*?)/', path, re.DOTALL)[0])\r\n plt.plot(x, [psi[i][0][0] for i in range(N)], 'r')\r\n plt.plot(x, [psi[i][1][0] for i in range(N)], 'b')\r\n plt.title(r'$\\delta = {0} \\lambdabar_c, \\epsilon = {1}$'.format(dlt, e),\r\n fontsize=30, verticalalignment='bottom')\r\n plt.xlabel(r'$x (\\lambdabar_c)$', fontsize=30)\r\n plt.ylabel(r'$\\psi$ ', rotation='horizontal',\r\n verticalalignment='bottom', horizontalalignment='right',\r\n fontsize=30)\r\n plt.show()\r\n\r\n def save_level(self, e, n, psi, x):\r\n \"\"\"Save information about the energy level n.\r\n\r\n Args:\r\n e: particle energy.\r\n n: level number.\r\n psi: vector of particle's wave function components.\r\n x: grid in x variable.\r\n\r\n \"\"\"\r\n with open(self.crystal_dir() + '/levels/data/' + str(n) + '.txt',\r\n 'w') as out:\r\n out.write(str(e) + '\\n')\r\n out.write(str(len(x)) + '\\n')\r\n out.write(' '.join(['({},{})'.format(a, z) for a, z in\r\n zip(self.A, self.Z)]) + '\\n')\r\n for psi_i, x_i in zip(psi, x):\r\n out.write(\r\n ' '.join([str(x_i), str(psi_i[0][0]), str(psi_i[1][0])]) + '\\n')\r\n\r\n @staticmethod\r\n def read_level(path: str):\r\n \"\"\"Read information about the energy level n.\r\n\r\n Args:\r\n path: path to file with data.\r\n\r\n Returns:\r\n Tuple of energy, level number, wave function vector,x-grid,\r\n list of impurity positions and list of impurity charges.\r\n\r\n \"\"\"\r\n n = int(path.split('/')[-1].split('.')[0])\r\n with open(path, 'r') as inp:\r\n e = float(inp.readline())\r\n N = int(inp.readline())\r\n\r\n A = []\r\n Z = []\r\n for s in inp.readline()[:-1].split():\r\n a, z = s[1:-1].split(',')\r\n A.append(float(a))\r\n Z.append(float(z))\r\n\r\n psi = []\r\n x = []\r\n for i in range(N):\r\n x_i, psi_1, psi_2 = [float(var) for var in inp.readline()[:-1].split()]\r\n psi.append([[psi_1], [psi_2]])\r\n x.append(x_i)\r\n\r\n return e, n, psi, x, A, Z\r\n\r\n def crystal_dir(self):\r\n \"\"\"Return crystal directory path, where all the information is stored.\r\n\r\n Returns:\r\n Relative directory path.\r\n\r\n \"\"\"\r\n return './Results' + '/alpha=' + str(self.FINE_STRUCTURE_CONSTANT) + \\\r\n '/delta=' + str(self.DELTA) + '/' + \\\r\n ' '.join(['({},{})'.format(a, z) for a, z in zip(self.A, self.Z)])\r\n\r\n @staticmethod\r\n def create_dir(path):\r\n \"\"\"Create directory with given path if doesn't exist.\r\n\r\n Args:\r\n path: directory path.\r\n\r\n \"\"\"\r\n if not os.path.exists(path):\r\n os.mkdir(path)\r\n\r\n def create_file_tree(self):\r\n \"\"\"Create file tree to store information.\r\n\r\n Returns:\r\n Relative directory path.\r\n\r\n \"\"\"\r\n upper_dir = './Results'\r\n self.create_dir('./Results')\r\n\r\n upper_dir += '/alpha=' + str(self.FINE_STRUCTURE_CONSTANT)\r\n self.create_dir(upper_dir)\r\n\r\n upper_dir += '/delta=' + str(self.DELTA)\r\n self.create_dir(upper_dir)\r\n\r\n upper_dir += '/' + ' '.join(['({},{})'.format(a, z) for a, z in\r\n zip(self.A, self.Z)])\r\n self.create_dir(upper_dir)\r\n\r\n self.create_dir(upper_dir + '/levels')\r\n self.create_dir(upper_dir + '/levels/data')\r\n self.create_dir(upper_dir + '/levels/plots')\r\n\r\n self.create_dir(upper_dir + '/LDOS')\r\n self.create_dir(upper_dir + '/LDOS/data')\r\n self.create_dir(upper_dir + '/LDOS/plots')\r\n\r\n return upper_dir\r\n","sub_path":"_1D_Dirac_Crystal.py","file_name":"_1D_Dirac_Crystal.py","file_ext":"py","file_size_in_byte":22457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"134635004","text":"import cwlbrowser.workflow as wf\nimport cwlbrowser.input_output as io\n\n\ndef printAttr(attribute, attributeName, subjectName):\n\tprint(subjectName + \" \" + attributeName + \":\")\n\tprint(\"-----------------\")\n\tfor item in attribute:\n\t\tif(isinstance(item, dict)):\n\t\t\tprint(item[\"id\"])\n\t\telse:\n\t\t\tprint(item)\n\tprint(\"\\n\")\n\n\ndef compare(name1, name2,attributeX, attributeY, attributeName):\n\tcount1 = 0\n\tcount2 = 0\n\tdifference = 0\n\tfor input_ in attributeX:\n\t\tcount1 = count1 + 1\n\tfor input_2 in attributeY:\n\t\tcount2 = count2 + 1\n\tif(count1 >= count2):\n\t\tdifference = count1 - count2\n\telse:\n\t\tdifference = count2 - count1\n\tprint(\"-------------------------------------\")\n\tprint(\"--------- \" + attributeName + \" comparison------------\")\n\tprint(\"-------------------------------------\")\n\tprint(name1 + \"| no. of \" + attributeName+ \": \" + str(count1))\n\tprint(name2 + \"| no. of \" + attributeName+ \": \" + str(count2))\n\tprint(\"Difference: \" + str(difference))\n\tprint(\"-------------------------------------\")\n\tprint(\"\\n\")\n\n\ndef createInputOutputArray(elements) :\n\ttemp =[]\n\tif(isinstance(elements, dict)) :\n\t\tfor key, value in elements.items() :\n\t\t\tobj = io.InputOutput(value, name=key) \n\t\t\ttemp.append(obj)\n\telif (isinstance(elements, list)) :\n\t\tfor e in elements :\n\t\t\tobj = io.InputOutput(e) \n\t\t\ttemp.append(obj)\n\telse :\n\t\ttemp = []\n\treturn temp\n\n\ndef constructLink(parentLink, link) :\n\tparentlhs, parentrhs = parentLink.rsplit('/', 1)\n\tif('/' in link) :\n\t\tlhs, rhs = link.split('/', 1)\n\t\tif(lhs == '..') :\n\t\t\troot, path = parentlhs.rsplit('/', 1)\n\t\t\tfinalLink = root + '/' + rhs\n\t\telse :\n\t\t\tfinalLink = \"\"\n\telse :\n\t\tfinalLink = parentlhs + '/' + link\n\treturn finalLink\n\n\n\n\n\n\n\n\t\n\n","sub_path":"cwlbrowser/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"194444465","text":"#!/usr/bin/env python3\n\nimport sys\nimport math\n\ndef get_fall_time(height):\n gravity = 9.8\n time = math.sqrt((2 * height) / gravity) \n\n return time\n\ndef isVulnerable(tower_height, tower_x, tower_y, target_x, target_y):\n velocity = 300\n\n time_in_air = get_fall_time(tower_height)\n \n tower_range = time_in_air * velocity\n\n delta_x = tower_x - target_x\n delta_y = tower_y - target_y\n\n separation = math.sqrt((delta_x * delta_x) + (delta_y * delta_y))\n\n if separation < tower_range:\n print(\"Target is in range!\")\n else:\n print(\"Target is out of tower range :(\") \n \ntower_height = 10\nget_fall_time(tower_height)\n\nisVulnerable(tower_height, 1000, 1200, 10, 2)\n\n","sub_path":"class1/assignment5/gravity.py","file_name":"gravity.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"650275433","text":"#! python3\n\n\"\"\"\nThis script creates http server running on localhost on given port that forwards requests to upstream\n\"\"\"\nimport sys\nimport argparse\nimport os\nfrom http.server import HTTPServer, CGIHTTPRequestHandler\nfrom socketserver import ThreadingMixIn\nimport http.client\n\ndef eprint(*args, **kwargs):\n \"This function prints message to error output\"\n print(*args, file=sys.stderr, **kwargs)\n\n\ndef verbose_print(*args, **kwargs):\n \"This function prints message in verbose mode\"\n if VERBOSE:\n print(*args, file=sys.stderr, **kwargs)\n\n\ndef parse_args():\n \"This function parses command-line arguments and does basic checks\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"port\", type=int, help=\"Server port\")\n parser.add_argument(\"root_dir\", help=\"Root dir for serving content\")\n parser.add_argument(\"-v\", action='store_true', help=\"Activation of verbose mode\")\n\n args = parser.parse_args()\n\n return args.port, args.root_dir, args.v\n\n\nclass CGIHandler(CGIHTTPRequestHandler):\n _root_dir = None\n _cgi_args = None\n\n def do_GET(self):\n self._serve_request(self._get_full_path())\n\n\n def do_POST(self):\n self._serve_request(self._get_full_path())\n\n\n def _get_full_path(self):\n self.path, _, self._cgi_args = self.path.partition('?')\n verbose_print(self.path + \":\" + self._cgi_args)\n return os.path.normpath(self._root_dir + self.path)\n\n\n def _serve_request(self, file_path):\n if(os.path.isfile(file_path)):\n verbose_print(\"file <{}> exists\".format(file_path))\n if file_path.endswith('.cgi'):\n verbose_print(\"file <{}> is CGI script\".format(file_path))\n self._execute_cgi(file_path)\n else:\n self._send_file(file_path)\n\n elif(os.path.isdir(file_path)):\n verbose_print(\"path <{}> is dir\".format(file_path))\n self.send_error(http.client.FORBIDDEN, explain='URL points to directory'.format(file_path))\n else:\n verbose_print(\"path <{}> does not exist\".format(file_path))\n self.send_error(http.client.NOT_FOUND, explain='path \"{}\" does not exist'.format(file_path))\n\n\n def _send_file(self, file_path):\n content_type = 'application/octet-stream'\n if file_path.endswith('.txt'):\n content_type = 'text/plain'\n with open(file_path, 'rb') as file:\n self.send_response(http.client.OK)\n self.send_header(\"Content-Type\", content_type)\n self.send_header(\"Content-Disposition\", 'attachment; filename=\"{}\"'.format(os.path.basename(file_path)))\n self.send_header(\"Content-Length\", os.path.getsize(file_path))\n self.end_headers()\n while True:\n data = file.read(1024)\n if not data:\n break\n self.wfile.write(data)\n\n\n def _execute_cgi(self, file_path):\n relpath = os.path.relpath(file_path)\n cgi_file = os.path.basename(relpath)\n if(self._cgi_args):\n cgi_file += '?' + self._cgi_args\n self.cgi_info = os.path.dirname(relpath), cgi_file \n self.run_cgi()\n\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n \"\"\"Handle requests in a separate thread.\"\"\"\n\n\ndef run_server(port, root_dir):\n CGIHandler._root_dir = os.path.abspath(root_dir)\n server = ThreadedHTTPServer(('localhost', port), CGIHandler)\n server.serve_forever()\n\n\nif __name__ == \"__main__\":\n PORT, ROOT_DIR, VERBOSE = parse_args()\n run_server(PORT, ROOT_DIR)\n","sub_path":"10-cgi/serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"594084585","text":"# encoding=utf-8\n\nimport logging\nimport os\nimport re\n\n\n# from google.appengine.dist import use_library\nimport jinja2\nimport webapp2\n# use_library('django', '0.96')\n\n# from django.utils import simplejson\nfrom google.appengine.api import users\n# from google.appengine.ext import webapp\n# from google.appengine.ext.webapp import template\n\nimport issues\nimport model\n\nimport gaetracker\n\n\n\ndef parse_labels(labels):\n labels = list(set(re.split('[, ]+', labels)))\n return sorted([l for l in labels if l])\n\n\nclass Action(webapp2.RequestHandler):\n def reply(self, response, content_type='text/html'):\n self.response.headers['Content-Type'] = content_type + '; charset=utf-8'\n self.response.write(response)\n \n def render(self, data):\n template = gaetracker.JINJA_ENVIRONMENT.get_template(self.template)\n data['path'] = self.request.path\n data['user'] = users.get_current_user()\n \n self.reply(template.render(data))\n \n \n\nclass SubmitAction(Action):\n template = 'submit.html'\n\n def get(self):\n issue = self.get_issue()\n if self.rh.request.get('labels'):\n issue.labels.append(self.rh.request.get('labels'))\n self.render({\n 'issue': issue,\n })\n\n def post(self):\n data = dict([(x, self.rh.request.get(x)) for x in self.rh.request.arguments()])\n if 'labels' in data:\n data['labels'] = parse_labels(data['labels'])\n if not data.get('id'):\n user = users.get_current_user()\n if user:\n data['author'] = user.email()\n issue = issues.update(data)\n self.rh.redirect(self.rh.request.path + '?action=' + DEFAULT_ACTION)\n #self.rh.redirect(self.rh.request.path + '?action=view&id=' + str(issue.id))\n\n def get_issue(self):\n issue = model.TrackerIssue()\n issue.labels = [ 'Open' ]\n user = users.get_current_user()\n if user is not None:\n issue.author = user\n issue.owner = user\n return issue\n\n\nclass EditAction(SubmitAction):\n template = 'edit.html'\n\n def get_issue(self):\n issue_id = int(self.rh.request.get('id'))\n issue = model.TrackerIssue.gql('WHERE id = :1', issue_id).get()\n if issue is None:\n raise Exception('Issue %u does not exist.' % issue_id)\n return issue\n\n\nclass ViewAction(Action):\n template = 'view.html'\n\n def get(self):\n issue_id = int(self.rh.request.get('id'))\n issue = issues.get_issue_by_id(issue_id)\n self.render({\n 'issue': issue,\n 'labels': sorted(issue.labels, key=lambda l: ('-' not in l, l.lower())),\n 'resolved': 'Closed' in issue.labels,\n 'comments': model.TrackerIssueComment.gql('WHERE issue_id = :1 ORDER BY date_created', issue_id).fetch(100),\n })\n\n\nclass CommentAction(Action):\n def post(self):\n labels = parse_labels(self.rh.request.get('labels'))\n\n for l in ('Open', 'Closed'):\n if l in labels:\n labels.remove(l)\n\n if self.rh.request.get('resolved'):\n labels.append('Closed')\n else:\n labels.append('Open')\n\n issue_id = int(self.rh.request.get('id', '0'))\n issues.add_comment(issue_id, users.get_current_user(), self.rh.request.get('text'), labels=labels)\n self.rh.redirect(self.rh.request.path + '?action=view&id=' + str(issue_id))\n\n\nclass ListAction(Action):\n template = 'list.html'\n\n def get(self):\n issues_ = issues.find_issues()\n self.render({\n 'issues': issues_\n })\n\n\nclass ExportAction(Action):\n def get(self):\n data = issues.export_json(self.rh.request.get('label') or None)\n self.rh.reply(data)\n\n\nclass ImportAction(Action):\n template = 'import.html'\n\n def get(self):\n self.render({ })\n\n def post(self):\n data = simplejson.loads(self.rh.request.get('dump'))\n issues.import_all(data)\n self.rh.redirect(self.rh.request.path)\n\n\nclass ImportOneAction(Action):\n def post(self):\n issue = issues.update(simplejson.loads(self.rh.request.get('data')), create=True)\n logging.info('Issue %u imported.' % issue.id)\n\n\nclass FixPriorityAction(Action):\n def get(self):\n for issue in issues.find_issues():\n labels = list(issue.labels)\n issues.fix_priority_labels(issue)\n if labels != issue.labels:\n issue.put()\n\n\nhandlers = [\n ('/', ListAction),\n ('/comment', CommentAction),\n ('/edit', EditAction),\n ('/export', ExportAction),\n ('/fixpriority', FixPriorityAction),\n ('/import', ImportAction),\n ('/import-one', ImportOneAction),\n ('/submit', SubmitAction),\n ('/view', ViewAction),\n]\n","sub_path":"gaetracker/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"63955892","text":"#!/usr/bin/python\nfrom sys import argv\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\n\nfrom math import log\nfrom subprocess import Popen, PIPE\n\n#\n# Classe N\n#\n# | |\n# t | c |\n# i | o |\n# m | m |\n# e | p |\n# | s |\n# +------------------- +---------------------\n# nitem nitem\n#\n# 1. Classe\n# 2. Dim\n# 3. Nitem\n#\n\nclass X: pass\n\ndef call(func, args): # without star\n func(*args)\n\ndef med(l):\n return sum(l)/float(len(l))\n\ndef getClasses(table):\n d = {}\n for l in table:\n d[l[1]] = 1\n ks = d.keys();\n ks.sort()\n return ks\n\ndef getNitems(table, c, d):\n di = {}\n for l in table:\n if l[1] == c and l[2] == d:\n di[l[0]] = 1\n ks = di.keys();\n ks.sort()\n return ks\n\ndef getDims(table, c):\n d = {}\n for l in table:\n if l[1] == c:\n d[l[2]] = 1\n ks = d.keys();\n ks.sort()\n return ks\n\n# Returns a dictionary with the data:\n# dic[classe][ndim][nitem]['t']: average time for 'this'\n# dic[classe][ndim][nitem]['c']: average comparison for 'this'\ndef getDic(table):\n cs = getClasses(table)\n dic = {}\n for c in cs:\n dic[c] = {}\n ds = getDims(table, c)\n for d in ds:\n dic[c][d] = {}\n ns = getNitems(table, c, d)\n for n in ns:\n dic[c][d][n] = X()\n ts, cs = ([], [])\n for l in table:\n if l[0] == n and l[1] == c and l[2] == d:\n cs += [l[5]]\n ts += [l[6]]\n dic[c][d][n] = {}\n dic[c][d][n]['t'] = med(ts)\n dic[c][d][n]['c'] = med(cs)\n cs = dic.keys()\n cs.sort()\n ds = dic[cs[0]].keys()\n ds.sort()\n ns = dic[cs[0]][ds[0]].keys()\n ns.sort()\n return (dic, cs, ds, ns)\n\n\ndef lblDim(dim):\n cdic = {0: 'black', 1: 'green', 2: 'blue', 3: 'yellow'}\n # setting label\n if dim == 0: lbl = 'list'\n elif dim == 1: lbl = 'AVL'\n else: lbl = str(dim) + 'd-tree'\n # setting color\n return lbl\n\n\ndef myplot(c, dic, ls, field, title, ylbl):\n (ns, cs, ds) = ls\n for d in ds:\n plt.plot(ns, [dic[c][d][n][field] for n in ns], '-', label=lblDim(d), linewidth=2.0)\n plt.legend(loc='upper right')\n plt.title('Class ' + str(c) + ' - ' + title)\n plt.yscale('logit')\n plt.ylabel(ylbl)\n plt.xlabel('size')\n plt.savefig(field+str(c)+'.png')\n\n\ndef plotLineTime(c, dic, di):\n s = ''\n s += 'set term png\\n'\n s += 'set output \\'' + di + '/out' + str(c) + '.png\\'\\n'\n s += 'set title \\'Classe ' + str(c) + '\\'\\n'\n s += 'set logscale y\\n'\n s += 'set grid\\n'\n s += 'plot '\n ps = []\n ds = dic[c].keys()\n ds.sort()\n for d in ds:\n ps += [\" '-' lw 3 with lines t '\" + lblDim(d) + \"'\"]\n s += ','.join(ps) + '\\n'\n for d in ds:\n ns = dic[c][d].keys()\n ns.sort()\n for n in ns:\n s += str(n) + ' ' + str(dic[c][d][n].t) + \"\\n\"\n s += \"e\\n\"\n #print(s)\n p = Popen(['gnuplot'], stdin=PIPE)\n p.communicate(s)\n f = open('/home/mbaroni/Desktop/out' + str(c) + '.txt', 'w')\n f.write(s)\n f.close()\n\n# c: classe de instancia\n# dic: dicionario com os dados\n# ds: lista de dimensoes\n# ns: lista de nitens\ndef plotBars(c, dic, dout, tipo):\n s = ''\n\n ds = dic[c].keys(); ds.sort()\n ns = dic[c][ds[0]].keys(); ns.sort()\n\n tit_dic= {}\n tit_dic['t'] = 'Tempo'\n tit_dic['c'] = 'N-Comparacoes'\n titulo = tit_dic[tipo]\n\n ###########################\n # Configurando o plot #\n ###########################\n # titulo do grafico\n s += 'set title \\\"5O-KP: ' + titulo + ' para classe ' + str(c) + '\\\"\\n'\n\n # titulo do eixo y\n s += 'set ylabel \\\"' + titulo + '\\\"\\n'\n\n # tipo de grafico\n s += 'set style data histogram\\n'\n\n # borda das barras\n s += 'set style fill solid border\\n'\n\n # escala logaritmica no y\n s += 'set logscale y\\n'\n\n # grid no fundo\n s += 'set grid\\n'\n\n # estilo do grafico\n s += 'set style histogram clustered\\n'\n\n # borda das barras \n s += 'set style fill solid border 1\\n'\n\n # tipo de saida\n s += 'set term pngcairo font \\'Arial bold\\'\\n'\n\n # arquivo de saida\n s += \"set output \\\"\" + dout + '/classe' + str(c) + '-' + titulo.lower() + \".png\\\"\\n\"\n\n ###########################\n # Escrevendo os dados #\n ###########################\n s += '$data << EOD\\n'\n # header\n s += '000 ' + ' '.join(map(lblDim, ds)) + '\\n'\n for n in ns:\n s += str(n) + ' '\n for d in ds:\n s += ( '%.3f' % dic[c][d][n][tipo]) + ' '\n s += '\\n'\n s += 'EOD\\n'\n\n ###########################\n # Plotando #\n ###########################\n s += 'plot for [COL=2:%d] \"$data\" using COL:xticlabels(1) title columnheader' % (len(ds) + 1)\n\n return s\n\n\n# execute gnuplot with ',\n '')\n\n # Replace jQuery CDN with full code.\n with open(includes + 'jquery.min.js', 'r') as f:\n jquery = f.read()\n contents = contents.replace(\n '',\n '')\n\n # Remove MathJax CDN as it is unnecessary\n contents = contents.replace(\n '', '')\n\n # Inject jQuery code to hide the code input boxes\n contents = contents.replace(\n '', \"\")\n\n # Deal with how crap IE is.\n contents = contents.replace(\n '', '\\n')\n\n if str(file).find('Daily') != -1:\n if is_modnet:\n output_file = output_folder + 'modnet-daily-report-' + date + '.html'\n elif is_merge:\n output_file = output_folder + 'merge-daily-report-' + date + '.html'\n else:\n output_file = output_folder + 'daily-report-' + date + '.html'\n elif str(file).find('Weekly') != -1:\n if is_modnet:\n output_file = output_folder + 'modnet-weekly-report-' + date + '.html'\n elif is_merge:\n output_file = output_folder + 'merge-weekly-report-' + date + '.html'\n else:\n output_file = output_folder + 'weekly-report-' + date + '.html'\n elif str(file).find('Monthly') != -1:\n if is_modnet:\n output_file = output_folder + 'modnet-monthly-report-' + date + '.html'\n elif is_merge:\n output_file = output_folder + 'merge-monthly-report-' + date + '.html'\n else:\n output_file = output_folder + 'monthly-report-' + date + '.html'\n else:\n raise TypeError('The only supported report types are \"daily\" and \"weekly')\n\n with open(output_file, 'w') as f:\n f.write(contents)\n return output_file\n\n\n@click.group()\n@click.version_option(version=0.1)\ndef main():\n \"\"\"A command line wrapper around the trending module.\"\"\"\n pass\n\n\n@main.command(short_help='correct notebook output')\n@click.argument('file', type=click.Path(exists=True))\n@click.argument('output_folder', type=click.Path(exists=True))\n@click.argument('date')\ndef clean_html(file, output_folder, date):\n \"\"\"FILE: The path to the output of the notebook\n DATE: The date the report is for. For weekly the date of the monday is recommended.\"\"\"\n if not re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', date):\n click.echo('The date must be in \"yyyy-mm-dd\" format.')\n exit()\n output_file = correct_html(file, output_folder, date)\n if click.confirm('Do you want to view the corrected output?'):\n click.launch(output_file)\n\n\n@main.command(short_help='generate the daily report')\n@click.argument('file', type=click.Path(exists=True))\ndef daily(file):\n \"\"\"FILE: The path to that days data\"\"\"\n raise NotImplementedError\n\n\n@main.command(short_help='generate the weekly report')\n@click.argument('monday_file', type=click.Path(exists=True))\ndef weekly(monday_file):\n \"\"\"MONDAY_FILE: The path to the monday data\"\"\"\n raise NotImplementedError\n\n\n@main.command(short_help='generate the monthly report')\n@click.argument('month_number')\n@click.argument('year', default=str(datetime.now().year))\ndef monthly(month_number, year):\n \"\"\"MONTH_NUMBER: The number of the month\n YEAR: The year to grab the data from\"\"\"\n try:\n month_number = int(month_number)\n except ValueError:\n click.echo('The month entered is invalid')\n exit()\n if month_number < 1 or month_number > 12:\n click.echo('The month entered is invalid')\n exit()\n\n try:\n year = int(year)\n except ValueError:\n click.echo('The year entered is invalid')\n exit()\n if year < 2015:\n click.echo('No data from before 2015 is present')\n exit()\n raise NotImplementedError\n","sub_path":"trending/command_line.py","file_name":"command_line.py","file_ext":"py","file_size_in_byte":5764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"315873377","text":"\nCOMMA = ','\nNEWLINE = '\\n'\nCOMMA_JOIN = \"{0},{1}\"\nWRITEABLE = 'w'\n\ndef coroutine(func):\n \"\"\"\n A decorator to start coroutines\n\n :param:\n \n - `func`: A coroutine function.\n \"\"\"\n def wrap(*args, **kwargs):\n coroutine_func = func(*args, **kwargs)\n coroutine_func.next()\n return coroutine_func\n return wrap\n\n@coroutine\ndef broadcast(targets):\n \"\"\"\n A coroutine to broadcast input.\n \n :param:\n\n - `targets`: A list of coroutines to send output to.\n \"\"\"\n while True:\n line = (yield)\n for target in targets:\n target.send(line)\n return\n\n@coroutine\ndef comma_join(target, input_count):\n \"\"\"\n This outputs the data in the opposite order that it's received.\n This way the source of the data pipeline is output first.\n \n :param:\n\n - `target`: A coroutine to send output to.\n - `input_count`: number of inputs before creating line to send.\n \"\"\"\n inputs = range(input_count)\n while True:\n line = COMMA.join(reversed([(yield) for source in inputs]))\n target.send(line)\n return\n\n@coroutine\ndef output(target_file): \n \"\"\"\n Writes input to the target file\n \n :param:\n\n - `target_file`: A file-like object to write output to.\n \"\"\"\n while True:\n line = (yield)\n if not line.endswith(NEWLINE):\n line += NEWLINE\n target_file.write(line)\n return\n\n@coroutine\ndef comma_append(source, target):\n \"\"\"\n Joins a source stream output and incoming strings with commas\n\n :param:\n\n - `source`: iterable of strings\n - `target`: target to send joined strings\n \"\"\"\n for line in source:\n line_2 = (yield)\n target.send(COMMA_JOIN.format(line.rstrip(NEWLINE), line_2))\n return\n\n@coroutine\ndef file_output(file_object):\n \"\"\"\n Writes strings to a file, making sure there's a newline at the end\n\n :param:\n\n - `file_object`: opened, writable file or name of file to open\n \"\"\"\n #if not type(file_object) is FileType:\n # file_object = open(file_object, WRITEABLE)\n # python 3 lacks the FileType\n while True:\n line = (yield)\n line = line.rstrip(NEWLINE) + NEWLINE\n file_object.write(line)","sub_path":"iperflexer/coroutine.py","file_name":"coroutine.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"249763953","text":"#!/usr/bin/env python\n# -*- encoding: utf-8\n\"\"\"\nStart/stop all the instances in an autoscaling group.\n\nUsage: toggle_asg.py (--start | --stop | --enable-overnight | --disable-overnight) [--type=(p2 | t2)] [--namespace=]\n toggle_asg.py [--type=(p2 | t2)] [--namespace=]\n\n\nActions:\n --start Start the autoscaling group (set the desired count to 1).\n --stop Stop the autoscaling group (set the desired count to 0).\n --enable-overnight Keeps the autoscaling group desired count at 1 (disables auto scale down)\n --disable-overnight Allows the automatic scaling action to be set desired count to 0 (enables auto scale down)\n --type=(p2 | t2) AWS Instance type (valid values: p2,t2) defaults to t2\n\n\"\"\"\n\nimport boto3\nimport docopt\n\nfrom asg_utils import discover_asg, set_asg_size\n\n\ndef update_desired_capacity_for_scheduled_action(\n client, scaling_group_name, scheduled_action_name, desired_capacity\n):\n \"\"\"Updates the desired capacity for a scheduled action.\n\n Keyword arguments:\n client -- boto3 autoscaling client\n scaling_group_name -- name of the autoscaling group to modify\n scheduled_action_name -- name of the scheduled action to modify\n desired_capacity -- desired capacity of scheduled action\n \"\"\"\n\n action_str = (\n \"Setting desired capacity of ScheduledAction {scheduled_action_name} to {desired_capacity}.\"\n ).format(\n scheduled_action_name=scheduled_action_name, desired_capacity=desired_capacity\n )\n\n print(action_str)\n\n client.put_scheduled_update_group_action(\n AutoScalingGroupName=scaling_group_name,\n ScheduledActionName=scheduled_action_name,\n Recurrence=\"0 20 * * *\",\n DesiredCapacity=desired_capacity,\n )\n\n\ndef get_status(client, tag_name, scheduled_action_name):\n \"\"\"Gets the status of an autoscaling group.\n\n Keyword arguments:\n client -- boto3 autoscaling client\n tag_name -- tags used to identify autoscaling group\n scheduled_action_name -- name of the scheduled action to get status for\n \"\"\"\n scaling_group = discover_asg(asg_client=client, tag_name=tag_name)\n\n desired_capacity = scaling_group[\"DesiredCapacity\"]\n instance_count = len(scaling_group[\"Instances\"])\n scaling_group_name = scaling_group[\"AutoScalingGroupName\"]\n\n response = client.describe_scheduled_actions(\n AutoScalingGroupName=scaling_group_name,\n ScheduledActionNames=[scheduled_action_name],\n )\n\n scheduled_desired_capacity = response[\"ScheduledUpdateGroupActions\"][0][\n \"DesiredCapacity\"\n ]\n\n return {\n \"scaling_group_name\": scaling_group_name,\n \"desired_capacity\": desired_capacity,\n \"instance_count\": instance_count,\n \"scheduled_desired_capacity\": scheduled_desired_capacity,\n }\n\n\ndef print_status(status):\n \"\"\"Prints the status of an autoscaling group.\n\n Keyword arguments:\n status -- status as returned from the get_status function\n \"\"\"\n instance_str = (\n 'The desired size of autoscaling group \"{name}\" is {desired}.\\n\\nThere are currently {count} running.\\n'\n ).format(\n name=status[\"scaling_group_name\"],\n desired=status[\"desired_capacity\"],\n count=status[\"instance_count\"],\n )\n\n if status[\"scheduled_desired_capacity\"] == 1:\n scheduled_action_str = \"The instances will continue running overnight.\\n\"\n else:\n scheduled_action_str = \"The instances will be turned off at 8pm this evening.\\n\"\n\n print(instance_str)\n print(scheduled_action_str)\n\n\nif __name__ == \"__main__\":\n args = docopt.docopt(__doc__)\n\n instance_type = args[\"--type\"] or \"t2\"\n namespace = args[\"--namespace\"] or \"notebook\"\n tag_name = \"jupyter-%s-%s\" % (instance_type, namespace)\n scheduled_action_name = \"ensure_down\"\n\n client = boto3.client(\"autoscaling\")\n scaling_group_name = discover_asg(asg_client=client, tag_name=tag_name)[\n \"AutoScalingGroupName\"\n ]\n\n if args[\"--start\"]:\n set_asg_size(asg_client=client, asg_name=scaling_group_name, desired_size=1)\n\n if args[\"--stop\"]:\n set_asg_size(asg_client=client, asg_name=scaling_group_name, desired_size=0)\n\n if args[\"--enable-overnight\"]:\n update_desired_capacity_for_scheduled_action(\n client=client,\n scaling_group_name=scaling_group_name,\n scheduled_action_name=scheduled_action_name,\n desired_capacity=1,\n )\n\n if args[\"--disable-overnight\"]:\n update_desired_capacity_for_scheduled_action(\n client=client,\n scaling_group_name=scaling_group_name,\n scheduled_action_name=scheduled_action_name,\n desired_capacity=0,\n )\n\n print(\"---\\n\")\n\n status = get_status(client, tag_name, scheduled_action_name)\n print_status(status)\n","sub_path":"data_science/scripts/toggle_asg.py","file_name":"toggle_asg.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"410232200","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nA mock implementation of a key manager. This module should NOT be used for\nanything but integration testing.\n\"\"\"\n\nimport array\nimport uuid\n\nfrom nova import exception\nfrom nova.keymgr import key\nfrom nova.keymgr import key_mgr\nfrom nova.openstack.common import log as logging\nfrom nova import utils\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass MockKeyManager(key_mgr.KeyManager):\n \"\"\"\n This mock key manager implementation supports all the methods specified\n by the key manager interface. This implementation creates a single key in\n response to all invocations of create_key. Side effects (e.g., raising\n exceptions) for each method are handled as specified by the key manager\n interface.\n\n This class should NOT be used for anything but integration testing because\n the same key is created for all invocations of create_key and keys are not\n stored persistently.\n \"\"\"\n def __init__(self):\n self.keys = {}\n\n def create_key(self, ctxt, **kwargs):\n \"\"\"Creates a key.\n\n This implementation returns a UUID for the created key. A\n NotAuthorized exception is raised if the specified context is None.\n \"\"\"\n if ctxt is None:\n raise exception.NotAuthorized()\n\n # generate the key\n key_length = kwargs.get('key_length', 256)\n # hex digit => 4 bits\n hex_string = utils.generate_password(length=key_length / 4,\n symbolgroups='0123456789ABCDEF')\n\n _bytes = array.array('B', hex_string.decode('hex')).tolist()\n _key = key.SymmetricKey('AES', _bytes)\n\n return self.store_key(ctxt, _key)\n\n def store_key(self, ctxt, key, **kwargs):\n \"\"\"Stores (i.e., registers) a key with the key manager.\n\n This implementation does nothing -- i.e., the specified key is\n discarded.\n \"\"\"\n if ctxt is None:\n raise exception.NotAuthorized()\n\n # generate UUID and ensure that it isn't in use\n key_id = uuid.uuid4()\n while key_id in self.keys:\n key_id = uuid.uuid4()\n\n self.keys[key_id] = key\n\n return key_id\n\n def get_key(self, ctxt, key_id, **kwargs):\n \"\"\"Retrieves the key identified by the specified id.\n\n This implementation returns a fixed key that is associated with the\n UUID returned by the create_key method. A NotAuthorized exception is\n raised if the specified context is None; a KeyError is raised if the\n UUID is invalid.\n \"\"\"\n if ctxt is None:\n raise exception.NotAuthorized()\n\n return self.keys[key_id]\n\n def delete_key(self, ctxt, key_id, **kwargs):\n \"\"\"Deletes the key identified by the specified id.\n\n This implementation intentionally does nothing except raise a\n NotAuthorized exception is the context is None or a KeyError if the\n UUID is invalid.\n \"\"\"\n if ctxt is None:\n raise exception.NotAuthorized()\n\n del self.keys[key_id]\n","sub_path":"nova/tests/keymgr/mock_key_mgr.py","file_name":"mock_key_mgr.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"583923215","text":"import pandas as pd\nimport json\nimport os\nimport numpy as np\nimport cv2\n\n\ndef annotations_to_yolo(df, images_folder, output_path):\n data = {}\n for idx, row in df.iterrows():\n file_name = row['img_name'].split('.')[0]\n img = cv2.imread(os.path.join(images_folder, f\"{file_name}.jpg\"))\n height, width = 640, 640\n box_width = (float(row['x2']) - float(row['x1'])) / width\n box_height = (float(row['y2']) - float(row['y1'])) / height\n x1 = row['x1'] / width\n y1 = row['y1'] / height\n\n if file_name not in data:\n data[file_name] = [[0, x1, y1, box_width, box_height]]\n else:\n data[file_name].append([0, x1, y1, box_width, box_height])\n\n for key, bbox_list in data.items():\n with open(os.path.join(output_path, f\"{key}.txt\"), \"w\") as f:\n for box in bbox_list:\n f.write(' '.join(str(i) for i in box))\n f.write('\\n')\n f.close()\n\n\ndef parse_csv_to_dict(csv_path, images_folder):\n out_data = {'images': [], 'annotations': []}\n csv_data = pd.read_csv(csv_path)\n img_id_history = []\n for idx, row in csv_data.iterrows():\n img_id = int(row['img_name'].split('.')[0])\n file_name = row['img_name']\n img = cv2.imread(os.path.join(images_folder, file_name))\n height, width = img.shape[:2]\n if img_id not in img_id_history:\n img_id_history.append(img_id)\n out_data['images'].append({'id': img_id,\n 'license': 1,\n 'file_name': f\"{file_name}\",\n \"height\": height,\n \"width\": width,\n \"date_captured\": \"null\"})\n x, y = row['x1'], row['y1']\n h, w = row['y2'] - row['y1'], row['x2'] - row['x1']\n out_data['annotations'].append({\"id\": img_id,\n \"image_id\": img_id,\n \"category_id\": 0,\n 'bbox': [x, y, w, h],\n 'segmentation': [],\n 'area': float(w*h),\n 'iscrowd': 0})\n return out_data\n\n\ndef export_labels_yolo_format(images_df, labels_df, labels_folder):\n if not os.path.isdir(labels_folder):\n os.makedirs(labels_folder)\n for idx, row in images_df.iterrows():\n file_name = row['file_name'].split('.')[0]\n label_file_path = os.path.join(labels_folder, f\"{file_name}.txt\")\n matching_labels = labels_df.loc[labels_df['id'] == row['id']]\n with open(label_file_path, 'a') as f:\n for jdx, label_row in matching_labels.iterrows():\n line = f\"0 {label_row['bbox'][0] / row['width']} {label_row['bbox'][1] / row['height']} \" \\\n f\"{label_row['bbox'][2] / row['width']} {label_row['bbox'][3] / row['height']}\\n\"\n f.write(line)\n f.close()\n\n\ndef write_info_to_json(json_path, data_dict):\n out_dict = {}\n out_dict['info'] = {'year': \"2021\",\n \"version\": 1.0,\n \"description\": \"Retail pricing detection\",\n \"contributor\": \"AmichayFeldman\",\n \"url\": \"\",\n \"date_created\": \"2021-04-06T20:38:00\"}\n out_dict['licenses'] = [{\"url\": \"\",\n \"id\": 1,\n \"name\": \"Normal\"}]\n out_dict['categories'] = [{\"id\": 1,\n \"name\": \"bottle\",\n \"supercategory\": \"bottle\"}]\n out_dict['images'] = data_dict['images']\n out_dict['annotations'] = data_dict['annotations']\n\n with open(json_path, 'w') as fout:\n json.dump(out_dict, fout, indent=4)\n\n\nif __name__ == '__main__':\n csv_path = '/home/amichay/DL/RetailPricingDetection/data/annotations.csv'\n images_folder = '/home/amichay/DL/RetailPricingDetection/data/images'\n json_out_path = '/home/amichay/DL/RetailPricingDetection/data/labels.json'\n yolo_format_labels = '/home/amichay/DL/RetailPricingDetection/data/labels'\n\n annotations_to_yolo(df = pd.read_csv('/home/amichay/DL/RetailPricingDetection/data/annotations.csv'),\n images_folder=images_folder,\n output_path='/home/amichay/DL/RetailPricingDetection/data/labels')\n\n\n #\n # data = parse_csv_to_dict(csv_path=csv_path, images_folder=images_folder)\n # write_info_to_json(json_path=json_out_path, data_dict=data)\n # export_labels_yolo_format(images_df=pd.DataFrame(data['images']), labels_df=pd.DataFrame(data['annotations']),\n # labels_folder=yolo_format_labels)\n #\n\n import pandas as pd\n import os\n import shutil\n\n train_data = pd.read_csv('/home/amichay/DL/RetailPricingDetection/data/train_ids.csv')\n val_data = pd.read_csv('/home/amichay/DL/RetailPricingDetection/data/val_ids.csv')\n\n for idx, row in train_data.iterrows():\n img_path = os.path.join('/home/amichay/DL/RetailPricingDetection/data/images', \"{:04}.jpg\".format(row['img_id']))\n target_path = os.path.join('/home/amichay/DL/RetailPricingDetection/data/images/train2', \"{:04}.jpg\".format(row['img_id']))\n shutil.copy2(src=img_path, dst=target_path)\n\n label_path = os.path.join('/home/amichay/DL/RetailPricingDetection/data/labels', \"{:04}.txt\".format(row['img_id']))\n label_target_path = os.path.join('/home/amichay/DL/RetailPricingDetection/data/labels/train2', \"{:04}.txt\".format(row['img_id']))\n shutil.copy2(src=label_path, dst=label_target_path)","sub_path":"Utils/CsvToCoco.py","file_name":"CsvToCoco.py","file_ext":"py","file_size_in_byte":5738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"635887710","text":"from pydm import Display\nfrom PyQt5 import QtGui, QtCore, QtWidgets\nfrom PyQt5.QtWidgets import (QWidgetItem, QCheckBox, QPushButton, QLineEdit,\n QGroupBox, QVBoxLayout, QMessageBox, QWidget,\n QLabel, QFrame, QComboBox, QRadioButton, QGridLayout,\n QColorDialog)\nfrom pydm.widgets import PyDMDrawingRectangle, PyDMLabel\nimport epics\nfrom epics import caget, caput\nfrom functools import partial\n\n\nclass pvCommunicator(Display):\n\t\n\tdef __init__(self, parent=None, args=None):\n\t\tsuper(pvCommunicator, self).__init__(parent=parent, args=args)\n\t\tself.connectElements()\n\t\t\n\tdef ui_filename(self):\n\t\treturn 'pvCommunicator.ui'\n\n\t# Function associates specific PV with each corresponding QLineEdit Object\n\t# Then establishes connection to readValue() function when user enters a value\n\tdef connectElements(self):\n\t\tself.ui.inputPV1.PV = 'SIOC:SYS0:ML07:AO011'\n\t\tself.ui.inputPV1.returnPressed.connect(lambda:self.readValue())\n\n\t\tself.ui.inputPV2.PV = 'SIOC:SYS0:ML07:AO012'\n\t\tself.ui.inputPV2.returnPressed.connect(lambda:self.readValue())\n\n\t\tself.ui.inputPV3.PV = 'SIOC:SYS0:ML07:AO013'\n\t\tself.ui.inputPV3.returnPressed.connect(lambda:self.readValue())\n\n\t\tself.ui.inputPV4.PV = 'SIOC:SYS0:ML07:AO014'\n\t\tself.ui.inputPV4.returnPressed.connect(lambda:self.readValue())\n\n\t\tself.ui.inputPV5.PV = 'SIOC:SYS0:ML07:AO015'\n\t\tself.ui.inputPV5.returnPressed.connect(lambda:self.readValue())\n\n\t\tself.ui.inputPV6.PV = 'SIOC:SYS0:ML07:AO016'\n\t\tself.ui.inputPV6.returnPressed.connect(lambda:self.readValue())\n\n\t\tself.ui.inputPV7.PV = 'SIOC:SYS0:ML07:AO017'\n\t\tself.ui.inputPV7.returnPressed.connect(lambda:self.readValue())\n\n\t\tself.ui.inputPV8.PV = 'SIOC:SYS0:ML07:AO018'\n\t\tself.ui.inputPV8.returnPressed.connect(lambda:self.readValue())\n\n\n\t\tself.ui.cm2inputPV1.PV = 'SIOC:SYS0:ML07:AO019'\n\t\tself.ui.cm2inputPV1.returnPressed.connect(lambda:self.readValue())\n\n\t\tself.ui.cm2inputPV2.PV = 'SIOC:SYS0:ML07:AO020'\n\t\tself.ui.cm2inputPV2.returnPressed.connect(lambda:self.readValue())\n\n\t\tself.ui.cm2inputPV3.PV = 'SIOC:SYS0:ML07:AO021'\n\t\tself.ui.cm2inputPV3.returnPressed.connect(lambda:self.readValue())\n\n\t\tself.ui.cm2inputPV4.PV = 'SIOC:SYS0:ML07:AO022'\n\t\tself.ui.cm2inputPV4.returnPressed.connect(lambda:self.readValue())\n\n\t\n\t# self.sender() identifies which QLineEdit object was typed into\n\t# changes PV value to corresponding user input\t\n\tdef readValue(self):\n\t\tlineInfo = self.sender()\n\t\tuserInput = lineInfo.text()\n\t\tprint(userInput)\n\t\tcaput(lineInfo.PV, userInput)\n\n\n","sub_path":"CodeTests/pvCommunicator.py","file_name":"pvCommunicator.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"632273316","text":"import flickr \n\nfrom models import *\nfrom menya.models import Menya, Amateka, Ubuhanga, Ikoranabuhanga\nfrom abana.models import Kumenya, Kwishuli, Bahandi\nfrom inkuru.models import Ndende, Ishushanyije, Namateka\nfrom amakuru.models import Siporo, Ibivugwa, Amakuru\nfrom urubuga.models import Ibitekerezo, Tubwire\nfrom ubuvanganzo.models import Abahanzi, Ubuvanganzo\n\nfrom django.shortcuts import render_to_response, render, get_object_or_404\n\n\ndef home(request):\n # from flickr photos, get one tagged 'main'\n # import pdb; pdb.set_trace()\n main = flickr.api.walk(user_id=flickr.user_id, tags=\"main\", sort=\"date-posted-desc\")\n #get all photos tagged 'favorite'\n favorites = flickr.api.walk(user_id=flickr.user_id, tags=\"favorite\", sort = \"date-posted-desc\")\n images = []\n # The following sizes are for testing\n sizes = ['495x374', '495x374']\n \n try:\n main_photo = list(iter(main))[0]\n images.append((flickr.get_url(main_photo, 'b'), sizes[0], main_photo.get('title')))\n \n # This occurs when the main photo is the same with the favorite\n for i, favorite in enumerate(favorites):\n if main_photo.get('id') != favorite.get('id'):\n images.append((flickr.get_url(favorite, 'b'), sizes[i % len(sizes)], favorite.get('title')))\n except:\n # When main_photo is different from favorite, this occurs\n for i, favorite in enumerate(favorites):\n images.append((flickr.get_url(favorite, 'b'), sizes[i % len(sizes)], favorite.get('title')))\n\n recent = Menya.objects.filter(is_active=True).order_by('-created_on')[:5]\n context = dict(images=images, recent=recent)\n return render(request, 'public/home.html', context)\n\n#### +++++++++++++++++++++++ Les suivants composent le rubrique UBUMENYI\n\ndef ubumenyi(request):\n posts_ama = Amateka.objects.filter(is_active=True).order_by('-created_on')\n posts_me = Menya.objects.filter(is_active=True).order_by('-created_on')\n posts_ubu = Ubuhanga.objects.filter(is_active=True).order_by('-created_on')\n posts_iko = Ikoranabuhanga.objects.filter(is_active=True).order_by('-created_on')\n context = dict(posts_ama=posts_ama, posts_me=posts_me, posts_ubu=posts_ubu, posts_iko=posts_iko)\n return render(request, 'public/ubumenyi.html', context)\n\ndef menya(request):\n posts = Menya.objects.filter(is_active=True).order_by('-created_on')\n context = dict(posts=posts)\n return render(request, 'public/menya.html',context)\n\ndef amateka(request):\n posts = Amateka.objects.filter(is_active=True).order_by('-created_on')\n context = dict(posts=posts)\n return render(request, 'public/amateka.html', context)\n\ndef ubuhanga(request):\n posts = Ubuhanga.objects.filter(is_active=True).order_by('-created_on')\n context = dict(posts=posts)\n return render(request, 'public/ubuhanga.html', context)\n\ndef ikoranabuhanga(request):\n posts = Ikoranabuhanga.objects.filter(is_active=True).order_by('-created_on')\n context = dict(posts=posts)\n return render(request, 'public/ikoranabuhanga.html', context)\n\n#### ++++++++++++++++++++++ Les suivants composent le rubrique ABANA\n\ndef abana(request):\n posts_ba = Bahandi.objects.filter(is_active=True).order_by('-created_on')[:1]\n posts_ku = Kumenya.objects.filter(is_active=True).order_by('-created_on')[:1]\n posts_kwi = Kwishuli.objects.filter(is_active=True).order_by('-created_on')[:1]\n context = dict(posts_ba=posts_ba, posts_ku=posts_ku, posts_kwi=posts_kwi)\n return render(request, 'public/abana.html', context)\n\ndef bahandi(request):\n posts = Bahandi.objects.filter(is_active=True).order_by('-created_on')\n context = dict(posts=posts)\n return render(request, 'public/bahandi.html', context)\n\ndef kumenya(request):\n posts = Kumenya.objects.filter(is_active=True).order_by('-created_on')\n context = dict (posts=posts)\n return render(request, 'public/kumenya.html', context)\n\ndef kwishuli(request):\n posts= Kwishuli.objects.filter(is_active=True).order_by('-created_on')\n context = dict(posts=posts)\n return render(request, 'public/kwishuli.html', context)\n\n#### ++++++++++++++++++++++++ Les suivants composent le rubrique INKURU\n\ndef inkuru(request):\n ndende = Ndende.objects.filter(is_active=True).order_by('-created_on')[:1]\n namateka = Namateka.objects.filter(is_active=True).order_by('-created_on')[:1]\n ishushanyije = Ishushanyije.objects.filter(is_active=True).order_by('-created_on')[:1]\n context = dict(ndende=ndende, namateka=namateka, ishushanyije=ishushanyije)\n return render(request, 'public/inkuru.html', context)\n\ndef inkuruNdende(request):\n posts = Ndende.objects.filter(is_active=True).order_by('-created_on')\n context = dict(posts=posts)\n return render(request, 'public/ndende.html', context)\n\ndef inkuruIshushanyije(request):\n posts = Ishushanyije.objects.filter(is_active=True).order_by('-created_on')\n context = dict(posts=posts)\n return render(request, 'public/ishushanyije.html', context)\n\ndef inkuruNamateka(request):\n posts = Namateka.objects.filter(is_active=True).order_by('-created_on')\n context = dict(posts=posts)\n return render(request, 'public/namateka.html', context)\n\n\n#### +++++++++++++++++++++ Les suivants composent le rubrique AMAKURU\n\ndef amakurus(request):\n posts_sip = Siporo.objects.filter(is_active=True).order_by('-created_on')[:1]\n posts_ibi = Ibivugwa.objects.filter(is_active=True).order_by('-created_on')[:1]\n posts_ama = Amakuru.objects.filter(is_active=True).order_by('-created_on')[:1]\n context = dict(posts_sip=posts_sip, posts_ibi=posts_ibi, posts_ama=posts_ama)\n return render(request, 'public/amakurus.html', context)\n\ndef siporo(request):\n posts_sipo = Siporo.objects.filter(is_active=True).order_by('-created_on')\n context = dict(posts_sipo=posts_sipo)\n return render(request, 'public/siporo.html', context)\n\ndef ibivugwa(request):\n posts_ibiv = Ibivugwa.objects.filter(is_active=True).order_by('-created_on')\n context = dict(posts_ibiv=posts_ibiv)\n return render(request, 'public/ibivugwa.html', context)\n\ndef amakuru(request):\n posts_amak = Amakuru.objects.filter(is_active=True).order_by('-created_on')\n context = dict(posts_amak=posts_amak)\n return render(request, 'public/amakuru.html', context)\n\n\n##### ++++++++++++++++++++++ Les suivants composent le rubrique URUBUGA\n\ndef urubuga(request):\n posts_ibit = Ibitekerezo.objects.filter(is_active=True).order_by('-created_on')[:1]\n posts_tubw = Tubwire.objects.filter(is_active=True).order_by('-created_on')[:1]\n context = dict(posts_ibit=posts_ibit, posts_tubw=posts_tubw)\n return render(request, 'public/urubuga.html', context)\n\ndef ibitekerezo(request):\n posts_ibi = Ibitekerezo.objects.filter(is_active=True).order_by('-created_on')[:1]\n context = dict(posts_ibi=posts_ibi)\n return render(request, 'public/ibitekerezo.html', context)\n\ndef tubwire(request):\n posts_tub = Tubwire.objects.filter(is_active=True).order_by('-created_on')[:1]\n context = dict(posts_tub=posts_tub)\n return render(request, 'public/tubwire.html', context)\n\n#### +++++++++++++++++++++++ Les suivants composent le rubrique Ubuvanganzo\n\ndef ubuvanganzos(request):\n posts_ubuv = Ubuvanganzo.objects.filter(is_active=True).order_by('-created_on')[:1]\n posts_abah = Abahanzi.objects.filter(is_active=True).order_by('-created_on')[:1]\n context = dict(posts_abah=posts_abah, posts_ubuv=posts_ubuv)\n return render(request, 'public/ubuvanganzos.html', context)\n\ndef abahanzi(request):\n posts_aba = Abahanzi.objects.filter(is_active=True).order_by('-created_on')[:1]\n context = dict(posts_aba=posts_aba)\n return render(request, 'public/abahanzi.html', context)\n\ndef ubuvanganzo(request):\n posts_ubuv = Ubuvanganzo.objects.filter(is_active=True).order_by('-created_on')[:1]\n context = dict(posts_ubuv=posts_ubuv)\n return render(request, 'public/ubuvanganzo.html', context)\n","sub_path":"ibaba/public/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"224960891","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 03 09:38:03 2015\n\n@author: Mirko\n\"\"\"\nimport os, glob, json, re\nimport openhydro as oh\nfrom openhydro.Exceptions import *\n\ndef sql(structure, operation, where=None, replace=None, like=None, where_in=None):\n \"\"\"\n \"\"\"\n # save old path\n old_path = os.getcwd()\n \n # change to file directory\n os.chdir(os.path.join(oh.__base__, 'queries/', oh.__profile__))\n for path in glob.glob('*.build'):\n build = json.loads(open(path, 'r').read())\n # load operation sql from structure \n try:\n obj = build[structure]\n try:\n found = obj[operation]\n except:\n # operation for structure not found\n continue # next file \n except:\n # structure not found\n continue # next file\n \n # set old path again\n os.chdir(old_path)\n \n # check if a sql statement was found\n try:\n sql = found\n except:\n return False\n \n # check for replacments\n pattern = re.compile(\"###_.*?_###\")\n if len(pattern.findall(sql)) > 0:\n if replace is not None:\n # another check necessary to put where in replace\n if where_in is not None or like is not None and not '###_where_###' in replace:\n replace['###_where_###'] = True\n sql = _reg_replace(sql, replace, where, like, where_in)\n else:\n raise ConnectException(\"the SQL statements includes replacement patterns, but no replace dict was passed.\\n SQL:{0}\\nPatterns:{1}\".format(sql, pattern.findall(sql)))\n else:\n # _reg_replace handles where, if not used handle where here\n sql = \"{0} {1}\".format(sql,_build_where(where, like, where_in)) \n \n return sql\n\n\n\ndef _build_where(where=None, like=None, where_in=None):\n \"\"\"\n Create where clause from dictionary and return.\n \"\"\"\n sql = \"\"\n # handle where using =\n if where is not None: \n if isinstance(where, dict):\n w = \" AND \".join([\"\\\"{0}\\\"=\\'{1}\\'\".format(e, where[e]) for e in where])\n sql = \" WHERE {0}\".format(w) \n else:\n raise TypeError(\"where has to be of type dict, found {0}.\".format(where.__class__))\n # handle where LIKE \n if like is not None:\n if isinstance(like, dict):\n l = \" AND \".join([\"\\\"{0}\\\" LIKE \\'{1}\\'\".format(e, like[e]) for e in like])\n if sql == \"\":\n sql = \" WHERE {0}\".format(l)\n else:\n sql = \"{0} AND {1}\".format(sql, l)\n else:\n raise TypeError(\"like has to be of type dict, found {0}.\".format(like.__class__))\n \n # handle where IN ()\n if where_in is not None:\n if isinstance(where_in, dict):\n i = \" AND \".join([\"\\\"{0}\\\" IN ({1})\".format(e, \",\".join([\"'{0}'\".format(ei) for ei in where_in[e]])) for e in where_in])\n if sql == \"\":\n sql = \" WHERE {0}\".format(i)\n else:\n sql = \"{0} AND {1}\".format(sql, i)\n else:\n raise TypeError(\"where_in has to be of type dict, found {0}.\".format(where_in.__class__))\n \n return sql\n\n\n\ndef _reg_replace(sql, replace, where=None, like=None, where_in=None):\n \"\"\"\n Replace replacement pattern like ###_pattern_### in the sql string by value \n of key pattern in the replacement dict.\n \"\"\"\n # check data type\n if not isinstance(replace, dict):\n raise TypeError(\"replace has to be a dictionary of patter:replacement pairs, found {0}.\".format(replace.__class__))\n \n # create regular expression\n pattern = re.compile(\"###_.*?_###\")\n \n # handle where\n # this is necessary if the sql string has code after where\n w = _build_where(where, like, where_in)\n if \"###_where_###\" in sql:\n sql = sql.replace(\"###_where_###\", w)\n else:\n # append\n sql = \"{0} {1}\".format(sql, w) \n \n \n for rep in pattern.findall(sql):\n try:\n sql = sql.replace(rep, replace[rep])\n except KeyError:\n continue\n \n # check if sql does still contain pattern:\n if len(pattern.findall(sql)) > 0:\n raise ConnectException(\"The SQL statement does still contain replacement pattern not defined in replace.\\n\\n SQL:{0}\\n\\nReplace:{1}\".format(sql, replace))\n else:\n return sql\n ","sub_path":"queries/SqlBuilder.py","file_name":"SqlBuilder.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"384812279","text":"from otree.api import Currency as c, currency_range\nfrom ._builtin import Page, WaitPage\nfrom .models import Constants\nfrom datetime import datetime\n\n\nclass MyPage(Page):\n\n def get_timeout_seconds(self):\n target = datetime(2120, 10, 6, 22, 50)\n today = datetime.now()\n countdown = target - today\n time_left = countdown.total_seconds()\n return time_left\n\n\npage_sequence = [\n MyPage,\n]\n","sub_path":"countdown/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"85406507","text":"# coding=utf-8\nimport sys\nsys.path.insert(0, r'../Application')\nimport labo2\nimport re\nimport os\n\n\nif (len(sys.argv) < 2):\n\traise Exception(\"Usage: rapportmaker outputfile\")\nif os.path.exists(sys.argv[1]):\n\traise Exception(\"File already exists !\")\n\nstats = labo2.run(False, True, \"data.json\", 1000000)\n\nformat = \"%0.4f\"\nformat2 = \"%0.9f\"\n\n\n\n_vars = {\n\t\"basic_moy_unif\": stats[\"basic\"][\"samplers\"][\"UniformSampler\"][\"average\"],\n\t\"basic_moy_pref\": stats[\"basic\"][\"samplers\"][\"ImportanceSampler\"][\"average\"],\n\t\"basic_moy_cont\": stats[\"basic\"][\"samplers\"][\"UniformControlledSampler\"][\"average\"],\n\n\t\"main_eval_moy\": stats[\"main\"][\"evaluation\"][\"value\"],\n\n\t\"main_moy_unif\": stats[\"main\"][\"samplers\"][\"UniformSampler\"][\"average\"],\n\t\"main_moy_pref\": stats[\"main\"][\"samplers\"][\"ImportanceSampler\"][\"average\"],\n\t\"main_moy_cont\": stats[\"main\"][\"samplers\"][\"UniformControlledSampler\"][\"average\"],\n\n\t\"main_std_unif\": stats[\"main\"][\"samplers\"][\"UniformSampler\"][\"std_deviation\"],\n\t\"main_std_pref\": stats[\"main\"][\"samplers\"][\"ImportanceSampler\"][\"std_deviation\"],\n\t\"main_std_cont\": stats[\"main\"][\"samplers\"][\"UniformControlledSampler\"][\"std_deviation\"],\n\n\t\"comp_tim_unif\": stats[\"comp\"][\"samplers\"][\"UniformSampler\"][\"time\"],\n\t\"comp_tim_pref\": stats[\"comp\"][\"samplers\"][\"ImportanceSampler\"][\"time\"],\n\t\"comp_tim_cont\": stats[\"comp\"][\"samplers\"][\"UniformControlledSampler\"][\"time\"],\n\n\t\"comp_std_unif\": float(format2%stats[\"comp\"][\"samplers\"][\"UniformSampler\"][\"std_deviation\"]),\n\t\"comp_std_pref\": float(format2%stats[\"comp\"][\"samplers\"][\"ImportanceSampler\"][\"std_deviation\"]),\n\t\"comp_std_cont\": float(format2%stats[\"comp\"][\"samplers\"][\"UniformControlledSampler\"][\"std_deviation\"]),\n\n}\n\nconf_intervals = {\n\t\"main_int_unif\": stats[\"main\"][\"samplers\"][\"UniformSampler\"][\"conf_interval\"],\n\t\"main_int_pref\": stats[\"main\"][\"samplers\"][\"ImportanceSampler\"][\"conf_interval\"],\n\t\"main_int_cont\": stats[\"main\"][\"samplers\"][\"UniformControlledSampler\"][\"conf_interval\"],\n\n\t\"comp_int_unif\": stats[\"comp\"][\"samplers\"][\"UniformSampler\"][\"conf_interval\"],\n\t\"comp_int_pref\": stats[\"comp\"][\"samplers\"][\"ImportanceSampler\"][\"conf_interval\"],\n\t\"comp_int_cont\": stats[\"comp\"][\"samplers\"][\"UniformControlledSampler\"][\"conf_interval\"],\n}\n\n\nfor key in conf_intervals:\n\t(a, b) = conf_intervals[key]\n\t_vars[key] = float(format%a), float(format%b)\n\n\n\n\nwith open(\"rapport.md\", 'r') as _input:\n\twith open(sys.argv[1], 'w') as _output:\n\t\tfor line in _input:\n\t\t\tl = line\n\t\t\tvars_to_replace = re.findall(\"§(.*?)§\", l)\n\t\t\tfor replacing in vars_to_replace:\n\t\t\t\ttoReplace = \"§\" + replacing + \"§\"\n\t\t\t\tl = re.sub(toReplace, str(\" \" + str(_vars[replacing])), l, 1)\n\t\t\t_output.write(l)\n\n","sub_path":"Labo2/rapportmaker.py","file_name":"rapportmaker.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"98325437","text":"import array\nimport copy\nimport ctypes\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\nimport numpy\n\nimport caproto as ca\nimport inspect\nimport pytest\n\n\nip = '255.255.255.255'\n\n\nparameter_values = {\n 'address': [ip],\n 'client_address': [ip],\n 'ip': [ip],\n 'repeater_address': [ip],\n\n 'access_rights': [3],\n 'beacon_id': [3],\n 'cid': [2],\n 'data_count': [1],\n 'data_type': [ca.DBR_LONG.DBR_ID],\n 'error_message': ['error msg'],\n 'header': [9],\n 'high': [27],\n 'ioid': [3],\n 'low': [8],\n 'mask': [3],\n 'metadata': [None],\n 'name': ['name'],\n 'original_request': [ca.CreateChanRequest('test', 0, 1)],\n 'payload': [ca.DBR_LONG(5)],\n 'port': [4321],\n 'priority': [0],\n 'server_port': [1234],\n 'sid': [2],\n 'status': [99],\n 'status_code': [999],\n 'subscriptionid': [5],\n 'to': [7],\n 'validate': [1],\n 'data': [[1], ],\n 'version': [13],\n}\n\nall_commands = set(ca._commands._commands) - set([ca.Message])\n\n\ndef _np_hack(buf):\n try:\n return buf.tobytes()\n except AttributeError:\n return bytes(buf)\n\n\n@pytest.mark.parametrize('cmd', all_commands)\ndef test_serialize(cmd):\n print()\n print('--- {} ---'.format(cmd))\n sig = inspect.signature(cmd)\n bind_args = {}\n for param in sig.parameters.keys():\n # TODO all combinations of those in the list\n bind_args[param] = parameter_values[param][0]\n\n ba = sig.bind(**bind_args)\n\n print(cmd, ba.arguments)\n inst = cmd(*ba.args, **ba.kwargs)\n\n role = (ca.CLIENT if cmd.DIRECTION is ca.REQUEST\n else ca.SERVER)\n\n print('inst', bytes(inst))\n print(' ', inst)\n print(' ', bytes(inst.header))\n print(' ', inst.buffers)\n print(' ', inst.header.payload_size)\n print(' dt ', inst.header.data_type)\n\n wire_inst = ca.read_datagram(bytes(inst), ('addr', 0), role)[0]\n print('wire', bytes(wire_inst))\n print(' ', wire_inst)\n print(' ', bytes(wire_inst.header))\n print(' ', wire_inst.buffers)\n print(' ', wire_inst.header.payload_size)\n assert bytes(wire_inst.header) == bytes(inst.header)\n print(' dt ', wire_inst.header.data_type)\n assert wire_inst == inst\n\n # Smoke test properties.\n signature = inspect.signature(type(inst))\n for arg in signature.parameters:\n getattr(inst, arg)\n getattr(wire_inst, arg)\n len(inst)\n inst.nbytes\n\n\ndef make_channels(cli_circuit, srv_circuit, data_type, data_count, name='a'):\n cid = cli_circuit.new_channel_id()\n sid = srv_circuit.new_channel_id()\n\n cli_channel = ca.ClientChannel(name, cli_circuit, cid)\n srv_channel = ca.ServerChannel(name, srv_circuit, cid)\n req = cli_channel.create()\n cli_circuit.send(req)\n commands, num_bytes_needed = srv_circuit.recv(bytes(req))\n for command in commands:\n srv_circuit.process_command(command)\n res = srv_channel.create(data_type, data_count, sid)\n srv_circuit.send(res)\n commands, num_bytes_needed = cli_circuit.recv(bytes(res))\n for command in commands:\n cli_circuit.process_command(command)\n return cli_channel, srv_channel\n\n\n# Define big-endian arrays for use below in test_reads and test_writes.\nint_arr = array.array('h', [7, 21, 2, 4, 5])\nint_arr.byteswap()\nfloat_arr = array.array('f', [7, 21.1, 3.1])\nfloat_arr.byteswap()\nlong_arr = array.array('i', [7, 21, 2, 4, 5])\nlong_arr.byteswap()\ndouble_arr = array.array('d', [7, 21.1, 3.1])\ndouble_arr.byteswap()\n\npayloads = [\n # data_type, data_count, data, metadata\n (ca.ChannelType.INT, 1, (7,), None),\n (ca.ChannelType.INT, 5, (7, 21, 2, 4, 5), None),\n (ca.ChannelType.INT, 5, int_arr, None),\n (ca.ChannelType.INT, 5, bytes(int_arr), None),\n (ca.ChannelType.INT, 5, numpy.array([7, 21, 2, 4, 5], dtype='i2'), None),\n\n (ca.ChannelType.FLOAT, 1, (7,), None),\n (ca.ChannelType.FLOAT, 3, (7, 21.1, 3.1), None),\n (ca.ChannelType.FLOAT, 3, float_arr, None),\n (ca.ChannelType.FLOAT, 3, bytes(float_arr), None),\n (ca.ChannelType.FLOAT, 3, numpy.array([7, 21.1, 3.1], dtype='f4'), None),\n\n (ca.ChannelType.LONG, 1, (7,), None),\n (ca.ChannelType.LONG, 2, (7, 21), None),\n (ca.ChannelType.LONG, 5, numpy.array([7, 21, 2, 4, 5], dtype='i4'), None),\n (ca.ChannelType.LONG, 5, long_arr, None),\n (ca.ChannelType.LONG, 5, bytes(long_arr), None),\n\n (ca.ChannelType.DOUBLE, 1, (7,), None),\n (ca.ChannelType.DOUBLE, 6, (7, 21.1, 7, 7, 2.1, 1.1), None),\n (ca.ChannelType.DOUBLE, 3, numpy.array([7, 21.1, 3.1], dtype='f8'), None),\n (ca.ChannelType.DOUBLE, 3, double_arr, None),\n (ca.ChannelType.DOUBLE, 3, bytes(double_arr), None),\n\n (ca.ChannelType.TIME_DOUBLE, 1, (7,),\n ca.DBR_TIME_DOUBLE(1, 0, ca.TimeStamp(3, 5))),\n (ca.ChannelType.TIME_DOUBLE, 1, (7,), (1, 0, ca.TimeStamp(3, 5))),\n (ca.ChannelType.TIME_DOUBLE, 2, (7, 3.4), (1, 0, ca.TimeStamp(3, 5))),\n\n (ca.ChannelType.STRING, 1, b'abc'.ljust(40, b'\\x00'), None),\n (ca.ChannelType.STRING, 3, 3 * b'abc'.ljust(40, b'\\x00'), None),\n (ca.ChannelType.STRING, 3, numpy.array(['abc', 'def'], '>S40'), None),\n (ca.ChannelType.STRING, 3, numpy.array(['abc', 'def'], 'S40'), None),\n (ca.ChannelType.CHAR, 1, b'z', None),\n (ca.ChannelType.CHAR, 3, b'abc', None),\n]\n\n\n@pytest.mark.parametrize('data_type, data_count, data, metadata', payloads)\ndef test_reads(circuit_pair, data_type, data_count, data, metadata):\n\n cli_circuit, srv_circuit = circuit_pair\n cli_channel, srv_channel = make_channels(*circuit_pair, data_type,\n data_count)\n\n req = ca.ReadNotifyRequest(data_count=data_count, data_type=data_type,\n ioid=0, sid=0)\n buffers_to_send = cli_circuit.send(req)\n # Socket transport would happen here. Calling bytes() simulates\n # serialization over the socket.\n commands, _ = srv_circuit.recv(*(_np_hack(buf) for buf in buffers_to_send))\n for command in commands:\n srv_circuit.process_command(command)\n res = ca.ReadNotifyResponse(data=data, metadata=metadata,\n data_count=data_count, data_type=data_type,\n ioid=0, status=1)\n buffers_to_send = srv_circuit.send(res)\n # Socket transport would happen here. Calling bytes() simulates\n # serialization over the socket.\n commands, _ = cli_circuit.recv(*(_np_hack(buf) for buf in buffers_to_send))\n res_received, = commands\n cli_circuit.process_command(res_received)\n\n if isinstance(data, array.ArrayType):\n # Before comparing array.array (which exposes the byteorder naively)\n # with a numpy.ndarray (which buries the byteorder in dtype), flip\n # the byte order to little-endian.\n expected = copy.deepcopy(data)\n expected.byteswap()\n assert_array_almost_equal(res_received.data, expected)\n elif isinstance(data, bytes):\n assert data == _np_hack(res_received.data)\n else:\n try:\n assert_array_equal(res_received.data, data) # for strings\n except AssertionError:\n assert_array_almost_equal(res_received.data, data) # for floats\n\n\n@pytest.mark.parametrize('data_type, data_count, data, metadata', payloads)\ndef test_writes(circuit_pair, data_type, data_count, data, metadata):\n\n cli_circuit, srv_circuit = circuit_pair\n cli_channel, srv_channel = make_channels(*circuit_pair, 5, 1)\n\n req = ca.WriteNotifyRequest(data=data, metadata=metadata,\n data_count=data_count,\n data_type=data_type, ioid=0, sid=0)\n buffers_to_send = cli_circuit.send(req)\n # Socket transport would happen here. Calling bytes() simulates\n # serialization over the socket.\n commands, _ = srv_circuit.recv(*(_np_hack(buf) for buf in buffers_to_send))\n for command in commands:\n srv_circuit.process_command(command)\n req_received, = commands\n res = ca.WriteNotifyResponse(data_count=data_count, data_type=data_type,\n ioid=0, status=1)\n buffers_to_send = srv_circuit.send(res)\n\n # Socket transport would happen here. Calling bytes() simulates\n # serialization over the socket.\n commands, _ = cli_circuit.recv(*(_np_hack(buf) for buf in buffers_to_send))\n for command in commands:\n cli_circuit.process_command(command)\n\n if isinstance(data, array.ArrayType):\n # Before comparing array.array (which exposes the byteorder naively)\n # with a numpy.ndarray (which buries the byteorder in dtype), flip\n # the byte order to little-endian.\n expected = copy.deepcopy(data)\n expected.byteswap()\n assert_array_almost_equal(req_received.data, expected)\n elif isinstance(data, bytes):\n assert data == _np_hack(req_received.data)\n else:\n try:\n assert_array_equal(req_received.data, data) # for strings\n except AssertionError:\n assert_array_almost_equal(req_received.data, data) # for floats\n\n\ndef test_extended():\n req = ca.ReadNotifyRequest(data_type=5, data_count=1000000, sid=0, ioid=0)\n assert req.header.data_count == 1000000\n assert req.data_count == 1000000\n assert isinstance(req.header, ca.ExtendedMessageHeader)\n\n\ndef test_bytelen():\n with pytest.raises(ca.CaprotoNotImplementedError):\n ca.bytelen([1, 2, 3])\n assert ca.bytelen(b'abc') == 3\n assert ca.bytelen(bytearray(b'abc')) == 3\n assert ca.bytelen(array.array('d', [1, 2, 3])) == 3 * 8\n assert ca.bytelen(numpy.array([1, 2, 3], 'f8')) == 3 * 8\n assert ca.bytelen(memoryview(b'abc')) == 3\n assert ca.bytelen(ctypes.c_uint(1)) == 4\n\n\ndef test_overlong_strings():\n with pytest.raises(ca.CaprotoValueError):\n ca.SearchRequest(name='a' * (ca.MAX_RECORD_LENGTH + 1), cid=0, version=13)\n\n\nskip_ext_headers = [\n 'MessageHeader',\n 'ExtendedMessageHeader',\n\n # TODO: these have no args, and cannot get an extended header\n 'EchoRequestHeader',\n 'EchoResponseHeader',\n 'EventsOffRequestHeader',\n 'EventsOnRequestHeader',\n 'ReadSyncRequestHeader',\n]\n\n\nall_headers = [header_name\n for header_name in dir(ca._headers)\n if header_name.endswith('Header')\n and not header_name.startswith('_')\n and header_name not in skip_ext_headers]\n\n\n@pytest.mark.parametrize('header_name', all_headers)\ndef test_extended_headers_smoke(header_name):\n header = getattr(ca, header_name)\n sig = inspect.signature(header)\n\n regular_bind_args = {}\n extended_bind_args = {}\n for param in sig.parameters.keys():\n regular_bind_args[param] = 0\n extended_bind_args[param] = 2 ** 32\n\n reg_args = sig.bind(**regular_bind_args)\n reg_hdr = header(*reg_args.args, **reg_args.kwargs)\n\n ext_args = sig.bind(**extended_bind_args)\n ext_hdr = header(*ext_args.args, **ext_args.kwargs)\n\n print(reg_hdr)\n assert isinstance(reg_hdr, ca.MessageHeader)\n print(ext_hdr)\n assert isinstance(ext_hdr, ca.ExtendedMessageHeader)\n","sub_path":"caproto/tests/test_serialization.py","file_name":"test_serialization.py","file_ext":"py","file_size_in_byte":11058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"335482485","text":"#!/usr/bin/python\n'''\n (C) Copyright 2017-2019 Intel Corporation.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE\n The Government's rights to use, modify, reproduce, release, perform, display,\n or disclose this software are subject to the terms of the Apache License as\n provided in Contract No. B609815.\n Any reproduction of computer software, computer software documentation, or\n portions thereof marked with this legend must also reproduce the markings.\n'''\nfrom __future__ import print_function\n\nimport subprocess\n\ndef check_for_pool(host, uuid):\n \"\"\"\n Function to check if pool folder exist on server\n Args:\n host: Server host name\n uuid: Pool uuid to check if exists\n return:\n resp: subprocess return code\n \"\"\"\n cmd = \"test -e /mnt/daos/\" + uuid\n resp = subprocess.call([\"ssh\", host, cmd])\n if resp == 0:\n print ('%s exists' %uuid)\n else:\n print ('%s does not exist' %uuid)\n return resp\n\ndef cleanup_pools(hosts):\n \"\"\"\n To cleanup the pool and content from /mnt/daps/\n Args:\n hosts[list]: Lists of servers name\n return\"\n None\n \"\"\"\n for host in hosts:\n cmd = \"rm -rf /mnt/daos/*\"\n subprocess.call([\"ssh\", host, cmd])\n","sub_path":"src/tests/ftest/util/check_for_pool.py","file_name":"check_for_pool.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"493771162","text":"\n\n#calss header\nclass _CRACKDOWN():\n\tdef __init__(self,): \n\t\tself.name = \"CRACKDOWN\"\n\t\tself.definitions = [u'a situation in which someone starts to deal with bad or illegal behaviour in a more severe way: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_crackdown.py","file_name":"_crackdown.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"450834375","text":"import pymysql\n# config is not a real module, it is just a user file with the MySQL credentials\nimport config\n\n\n# basis for class taken from\n# https://medium.com/@vipinc.007/python-a-database-interaction-class-using-pymysql-3338fb90f38c\nclass MySQLModule:\n # MySQL database credentials stored in user created config file\n # To use this code yourself, you need to get rid of the import config\n # statement and replace the parameters of the connect statement with\n # your MySQL database credentials\n def __connect__(self):\n self.connection = pymysql.connect(host=config.HOST,\n user=config.USER,\n password=config.PASS,\n charset=config.CHARSET)\n self.cursor = self.connection.cursor()\n\n def __disconnect__(self):\n self.connection.close()\n\n # gets the results of a valid MySQL command\n def fetch(self, cmd, commit=False, args=()):\n self.execute(cmd, commit, args)\n fetch_result = self.cursor.fetchall()\n return fetch_result\n\n # executes a MySQL command\n def execute(self, cmd, commit=False, args=()):\n self.__connect__()\n exception_found = False\n if len(args) == 0:\n try:\n self.cursor.execute(cmd)\n except pymysql.err.IntegrityError:\n print(\"Error executing command: \" + cmd)\n exception_found = True\n else:\n try:\n self.cursor.execute(cmd, args)\n except pymysql.err.IntegrityError:\n print(\"Error executing command: \" + cmd + \" with args: %s\" % (args,))\n exception_found = True\n # only if the command needs to be committed\n if commit:\n if not exception_found:\n self.connection.commit()\n self.__disconnect__()\n\n def insert_ping_val(self, ping):\n cmd = \"INSERT INTO ping_database.ping_data (time_value, ping_value) VALUES\" \\\n \"(%s, %s)\"\n self.execute(cmd, True, (ping[0], ping[1],))\n\n def get_ping_values(self):\n cmd = \"SELECT * FROM ping_database.ping_data\"\n return self.fetch(cmd)\n\n def init_database(self):\n cmd = \"CREATE DATABASE IF NOT EXISTS ping_database\"\n self.execute(cmd)\n\n cmd = \"DROP TABLE IF EXISTS ping_database.ping_data\"\n self.execute(cmd)\n\n cmd = \"CREATE TABLE ping_database.ping_data (time_value INT(5), ping_value INT(5))\"\n\n self.execute(cmd)\n\n\nif __name__ == '__main__':\n module = MySQLModule()\n\n module.init_database()\n","sub_path":"mysql_mod.py","file_name":"mysql_mod.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"322777290","text":"#\n# @lc app=leetcode.cn id=88 lang=python3\n#\n# [88] 合并两个有序数组\n#\n\n# @lc code=start\nclass Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n # # 方法1\n # if nums2:\n # nums1[-n:] = nums2[:]\n # nums1.sort()\n\n # # 方法2\n # nums = nums1[:m]\n # p1, p2 = 0, 0\n # idx = 0\n # while p1 < m and p2 < n:\n # if nums[p1] < nums2[p2]:\n # nums1[idx] = nums[p1]\n # p1 += 1\n # else:\n # nums1[idx] = nums2[p2]\n # p2 += 1\n # idx += 1\n \n # if p1 < m:\n # nums1[p1+p2: ] = nums[p1:]\n # if p2 < n:\n # nums1[p1+p2: ] = nums2[p2:]\n\n # 方法3\n p1, p2 = m-1, n-1\n idx = m + n - 1\n\n while p1 >= 0 and p2 >= 0:\n if nums1[p1] > nums2[p2]:\n nums1[idx] = nums1[p1]\n p1 -= 1\n else:\n nums1[idx] = nums2[p2]\n p2 -= 1\n idx -= 1\n\n nums1[: p2+1] = nums2[: p2+1]\n# @lc code=end\n\n","sub_path":"Week_01/本周作业/简单/88.合并两个有序数组.py","file_name":"88.合并两个有序数组.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"430594800","text":"#!/usr/bin/python\n\nimport os\nimport sys\n\nif __name__ == '__main__':\n\ttarget_dir = sys.argv[1]\n\n\tlist_dir = os.listdir(target_dir)\n\topen_output_file = open(os.path.join(target_dir, 'output.txt'), 'w')\n\tfor files in list_dir :\n\t\tfir_col = files\n\t\topen_files = open(target_dir+'/'+files, 'r')\n\t\tlines = open_files.readlines()\n\t\tlong_line = ''\n\t\tfor line in lines:\n\t\t\tlong_line += line.strip()\n\n\t\topen_output_file.write(fir_col +'\\t' + long_line + '\\n')\n\topen_output_file.close()\n\n","sub_path":"merge_files.py","file_name":"merge_files.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"271169452","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport filebrowser.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Slide',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),\n ('photo', filebrowser.fields.FileBrowseField(max_length=255, verbose_name='Графический файл')),\n ('photo_order', models.IntegerField(verbose_name='Индекс сортировки')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"apps/first_page/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"64830826","text":"\n\n#1. Write a program in Python to find out the character in a string\n# which is uppercase using list comprehension.\nx = [letter for letter in \"PaVaN\" if letter.isupper() ]\nprint(x) \n\n\n#2.Write a program to construct a dictionary from the two lists containing the names of students and\n# their corresponding subjects. The dictionary should map the students with their respective subjects.\n# Let’s see how to do this using for loops and dictionary comprehension.\n# students = ['Smit', 'Jaya', 'Rayyan'] subjects = ['CSE', 'Networking', 'Operating System'] \n# Expected output: {‘Smit’ : ’CSE’ , ’Jaya’ : ’Networking’ , ’Rayyan’ : ’Operating System’}\nstudents = ['Smit', 'Jaya', 'Rayyan']\nsubjects = ['CSE', 'Networking', 'Operating System'] \noutput = dict(zip(students,subjects))\nprint((output))\n\n#4.Write a program in Python using generators to reverse the string.Input: String = “Consultadd Training”\nstring = \"Consultadd Training\"\ndef gen(string):\n s1 = \" \"\n for c in string:\n s1 = c + s1\n yield s1\n\nreversedLetter = gen(string)\nfor i in range(len(string)):\n print(next(reversedLetter))\n\n\n#5.Write an example on decorators.\n\ndef myDecorator(functionName):\n def innerFunc():\n print(\"Decorator Started:\")\n functionName()\n print(\"Decorator ended\") \n return innerFunc()\n \n@myDecorator \ndef myfunction():\n print(\"This function is using decorator.\")\n\n","sub_path":"taskSix.py","file_name":"taskSix.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"634436350","text":"#!/usr/bin/env python3\n\nimport sys\nimport csv\n\nout = csv.writer(sys.stdout)\nout.writerow(['geoid', 'variable', 'value'])\n\nfor f in sys.argv[1:]:\n # progress tracking\n sys.stderr.write(f)\n sys.stderr.write('\\n')\n sys.stderr.flush()\n \n reader = csv.DictReader(open(f, 'r'))\n\n for row in reader:\n geoid = row['GEOID']\n keys = list(set(row.keys()) - set(['GEOID']))\n\n for k in keys:\n out.writerow([geoid, k, row[k]])\n\n\n","sub_path":"acs/bin/pivot.py","file_name":"pivot.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"555840129","text":"from django.db import models\n\n# Create your models here.\n\nclass Comment(models.Model):\n name = models.CharField(max_length=100) # 评论用户的名字\n email = models.EmailField(max_length=255) # 邮箱\n url = models.URLField(blank=True) # 个人网站\n text = models.TextField() # 内容\n created_time = models.DateTimeField(auto_now_add=True) # 评论时间,\n # auto_now_add: 当评论数据保存到数据库时,自动把created_time的值指定为当前时间\n\n post = models.ForeignKey('blog.Post') # 这个评论是关联到某篇文章(Post)的\n\n def __str__(self):\n return self.text[:20]\n","sub_path":"blogproject/comments/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"120957301","text":"# coding=utf-8\nimport os\nimport re\nimport shutil\nimport subprocess\nimport urllib\nimport urllib2\n\nimport pymysql\nimport scrapy\nfrom PIL import Image\nfrom scrapy.loader import ItemLoader\nfrom scrapy.loader.processors import MapCompose\nfrom w3lib.html import replace_escape_chars\nfrom scrapy.conf import settings\nfrom resthome.items import ResthomeItem\n\nfrom resthome.MysqlFactory import MysqlFactory\n\n\nclass ResthomeSpider(scrapy.Spider):\n name = \"ResthomeSpider\"\n start_urls = [\n \"http://www.yanglao.com.cn/resthome\"\n ]\n url_base = \"http://www.yanglao.com.cn\"\n url_login = url_base + \"/user/login\"\n first = True\n\n # def process_login(self, response):\n # img_src = response.css(\"#yw0::attr(src)\").extract()\n # csrf = response.css(\"#userForm>input[name='YII_CSRF_TOKEN']::attr(value)\").extract()[0]\n #\n # if os.path.isfile(\"captcha.png\"):\n # os.remove(\"captcha.png\")\n #\n # urllib.urlretrieve(self.url_base + img_src[0], \"captcha.png\")\n #\n # opener = urllib2.build_opener()\n # opener.addheaders.append(('Cookie', response.headers['Set-Cookie']))\n #\n # username = settings.get('USERNAME')\n # password = settings.get('PASSWORD')\n # # Image.open(\"captcha.png\").show()\n # Image.open(opener.open(self.url_base + img_src[0]).fp).show()\n # captcha = raw_input(\"验证码:\")\n #\n # return scrapy.FormRequest.from_response(response,\n # formxpath='//form[@id=\\'userForm\\']',\n # formdata={\n # 'UserLoginForm[name]': username,\n # 'UserLoginForm[password]': password,\n # 'UserLoginForm[verifyCode]': captcha,\n # 'UserLoginForm[rememberMe]': '0',\n # 'yt0': u'登录',\n # 'YII_CSRF_TOKEN': csrf\n # },\n # callback=self.check_login_response)\n #\n # def check_login_response(self, response):\n # print response.body\n # pass\n\n # def parse_next_page(self, response):\n def parse(self, response):\n if self.first:\n self.clear_db_and_files()\n self.first = False\n\n next_page = response.css(\"#yw0 div.pager ul.pages li[class='']\").xpath(\n u\"a[contains(.,'下一页')]/@href\").extract()\n for item in response.css(\"#yw0 ul li a.pic\").xpath(\"@href\"):\n yield scrapy.Request(self.url_base + item.extract(), callback=self.parse_rest_home)\n if next_page:\n yield scrapy.Request(self.url_base + next_page[0], callback=self.parse)\n\n # def parse(self, response):\n # yield scrapy.Request(self.url_login, callback=self.process_login)\n\n def parse_rest_home(self, response):\n item = ResthomeItem()\n item['name'] = response.css(\".inst-summary h1::text\").extract()\n pcr = \"\".join(response.xpath(\n u\"//div[@class='base-info']/div[@class='cont']/ul/li[contains(.,'所在地区')]/text()\").extract()).split(\"-\")\n item['province'] = pcr[0].strip()\n item['city'] = pcr[1].strip()\n item['region'] = pcr[2].strip()\n item['address'] = response.xpath(\n u\"//div[@class='contact-info']/div[@class='cont']/ul/li[re:test(.,'.*地.*址.*')]/text()\").extract()\n item['nature'] = response.xpath(\n u\"//div[@class='base-info']/div[@class='cont']/ul/li[contains(.,'机构性质')]/text()\").extract()\n charge_range = \"\".join(response.xpath(\n u\"//div[@class='base-info']/div[@class='cont']/ul/li[contains(.,'收费区间')]/text()\").extract()).split(\n \"-\")\n item['min_charge'] = charge_range[0].strip()\n item['max_charge'] = charge_range[1].strip()\n item['beds_count'] = response.xpath(\n u\"//div[@class='base-info']/div[@class='cont']/ul/li[contains(.,'床位数')]/text()\").extract()\n item['acceptable_person'] = response.xpath(\n u\"//div[@class='base-info']/div[@class='cont']/ul/li[contains(.,'收住对象')]/text()\").extract()\n item['extra_service'] = response.xpath(\n u\"//div[@class='base-info']/div[@class='cont']/ul/li[contains(.,'特色服务')]/text()\").extract()\n item['type'] = response.xpath(\n u\"//div[@class='base-info']/div[@class='cont']/ul/li[contains(.,'机构类型')]/text()\").extract()\n item['floor_area'] = response.xpath(\n u\"//div[@class='base-info']/div[@class='cont']/ul/li[contains(.,'占地面积')]/text()\").extract()\n item['founding_time'] = response.xpath(\n u\"//div[@class='base-info']/div[@class='cont']/ul/li[contains(.,'成立时间')]/text()\").extract()\n item['contract'] = response.xpath(\n u\"//div[@class='contact-info']/div[@class='cont']/ul/li[re:test(.,'.*联.*系.*人.*')]/text()\").extract()\n item['phone'] = response.xpath(u\"//div[@class='inst-summary']/ul/li[re:test(.,'.*电.*话.*')]/text()\").extract()\n item['postcode'] = response.xpath(\n u\"//div[@class='contact-info']/div[@class='cont']/ul/li[re:test(.,'.*邮.*编.*')]/text()\").extract()\n\n item['email'] = response.xpath(\n u\"//div[@class='contact-info']/div[@class='cont']/ul/li[re:test(.,'.*邮.*箱.*')]/text()\").extract()\n item['website'] = response.xpath(\n u\"//div[@class='contact-info']/div[@class='cont']/ul/li[re:test(.,'.*网.*址.*')]//a/@href\").extract()\n item['introduce'] = response.css(\"div.inst-intro>div.cont\").extract()\n item['service_content'] = response.css(\"div.service-content>div.cont\").extract()\n item['charge_standard'] = response.css(\"div.inst-charge>div.cont\").extract()\n item['facility'] = response.css(\"div.facilities>div.cont\").extract()\n item['facility_pic'] = response.css(\"div.facilities>div.cont img::attr(src)\").extract()\n item['room_note'] = response.css(\"div.inst-notes>div.cont\").extract()\n item['pic_thumb'] = response.css(\"div.inst-photos>div.cont>ul>li>a::attr(href)\").extract()\n item['pic'] = []\n for src in item['pic_thumb']:\n yield scrapy.Request(self.url_base + src, self.parse_big_pic, meta={\"item\": item})\n\n def parse_big_pic(self, response):\n item = response.meta['item']\n src = response.css(\"#bigpics>img::attr(src)\").extract()[0]\n item['pic'].append(re.sub(\"/water_\", \"/\", src))\n if len(item['pic']) == len(item['pic_thumb']):\n return item\n\n def clear_db_and_files(self):\n conn = MysqlFactory.createConn(settings)\n cursor = conn.cursor()\n cursor.execute(\"\"\"\n SET FOREIGN_KEY_CHECKS = 0;\n truncate table t_resthome_pic;\n truncate table t_resthome;\n truncate table t_pic;\n SET FOREIGN_KEY_CHECKS = 1;\"\"\")\n conn.commit()\n conn.close()\n shutil.rmtree(settings.get('IMAGES_STORE'), ignore_errors=True)\n pass\n","sub_path":"resthome/resthome/spiders/resthomeSpider.py","file_name":"resthomeSpider.py","file_ext":"py","file_size_in_byte":7273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"231461394","text":"include_rules = [\n \"+content/public\",\n\n \"+crypto\",\n \"+net\",\n \"+sandbox\",\n \"+skia\",\n \"+ui\",\n \"+v8\",\n \"+webkit\",\n\n # Allow inclusion of third-party code.\n \"+third_party/skia\",\n \"+third_party/WebKit/Source/Platform/chromium\",\n \"+third_party/WebKit/Source/WebKit/chromium\",\n\n # Files generated during Crosswalk build.\n \"+grit/xwalk_resources.h\",\n]\n\nvars = {\n}\n\ndeps = {\n}\n\nhooks = [\n {\n # Fetch Crosswalk dependencies.\n \"pattern\": \".\",\n \"action\": [\"python\", \"src/xwalk/tools/fetch_deps.py\", \"-v\"],\n },\n {\n # A change to a .gyp, .gypi, or to GYP itself should run the generator.\n \"pattern\": \".\",\n \"action\": [\"python\", \"src/xwalk/gyp_xwalk\"],\n }\n]\n","sub_path":"DEPS","file_name":"DEPS","file_ext":"","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"6280578","text":"import numpy as np\nfrom lungmask import mask as segmentator\nimport cv2\nfrom diplom_backend.service.preprocess import resize_image_ar\n\ndef get_volume_lesion(mask_lung, mask_lesion):\n\n # mask_lung = [cv2.resize(slice, (224, 224)) for slice in mask_lung]\n # mask_lung = np.array(mask_lung)\n mask_lung = resize_image_ar(mask_lung)\n\n print(mask_lung.shape, mask_lesion.shape)\n unique_value_lung = np.unique(mask_lung, return_counts=True)[1]\n count_pixel_lung = unique_value_lung[1] + unique_value_lung[2]\n print('count_pixel_lung = ', count_pixel_lung, 'unique_value_lung = ', unique_value_lung)\n\n uniq_val_mask = np.unique(mask_lesion, return_counts=True)[1]\n count_pixel_lesion = uniq_val_mask[1]\n print('count_pixel_lesion = ', count_pixel_lesion, 'uniq_val_mask = ', uniq_val_mask)\n\n\n cols_by_2 = int(mask_lesion.shape[2] / 2)\n count_pixel_left_lesion = np.unique(mask_lesion[:, :, 0:cols_by_2], return_counts=True)[1][1]\n count_pixel_right_lesion = np.unique(mask_lesion[:, :, cols_by_2:], return_counts=True)[1][1]\n print('count_pixel_left_lesion =', count_pixel_left_lesion, 'count_pixel_right_lesion = ', count_pixel_right_lesion)\n\n\n # volume = (count_pixel_lesion / count_pixel_lung) * 100\n # print('volume = ', volume)\n\n\n volume_left = (count_pixel_left_lesion / unique_value_lung[1]) * 100\n volume_right = (count_pixel_right_lesion / unique_value_lung[2]) * 100\n volume = volume_left + volume_right\n\n volume = volume_left + volume_right\n\n\n return {\n 'lung': np.round(volume, 5),\n 'left': np.round(volume_left, 5),\n 'right': np.round(volume_right, 5)\n }\n\ndef segmentation_lung(ct):\n segmentation = segmentator.apply(ct)\n return segmentation\n","sub_path":"backend/diplom_server/diplom_backend/service/segment_lung.py","file_name":"segment_lung.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"329361309","text":"'''Train CIFAR10 with PyTorch.'''\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport matplotlib\nmatplotlib.use(\"pdf\")\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport argparse\nimport sys\nimport models\nimport importlib\nimport logging\nimport re\nfrom datetime import datetime\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\nparser.add_argument('--optimizer', '--opt', default='sgd', type=str,\n help='sgd variants (sgd, adam, amsgrad, adagrad, adadelta, rmsprop)')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--net-type', default='CifarResNetBasic', type=str,\n help='the type of net (ResNetBasic or ResNetBottleneck or CifarResNetBasic or CifarPlainNetBasic)')\nparser.add_argument('--num-blocks', default='3-3-3', type=str, help='starting net')\nparser.add_argument('--batch-size', default=128, type=int, help='batch size')\nparser.add_argument('--epochs', default=200, type=int, help='the number of epochs')\nparser.add_argument('--print-freq', default=3910, type=int, help='the frequency to print')\nparser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')\nargs = parser.parse_args()\n\nnet_arch = list(map(int, args.num_blocks.split('-')))\nsave_path = os.path.join('./results', datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))\nif not os.path.exists(save_path):\n os.makedirs(save_path)\nelse:\n raise OSError('Directory {%s} exists. Use a new one.' % save_path)\nlogging.basicConfig(filename=os.path.join(save_path, 'log.txt'), level=logging.INFO)\nlogger = logging.getLogger('main')\nlogger.addHandler(logging.StreamHandler())\nlogger.info(\"Saving to %s\", save_path)\nlogger.info(\"Running arguments: %s\", args)\nbest_acc = 0 # best test accuracy\ntrained_batchs = 0\n\n\ndef decay_learning_rate(optimizer):\n \"\"\"Sets the learning rate to the initial LR decayed by 10\"\"\"\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * 0.1\n\n# Training\ndef train(epoch, net, loader, optimizer, criterion, device='cuda'):\n logger.info('\\nEpoch: %d' % epoch)\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n global trained_batchs\n for batch_idx, (inputs, targets) in enumerate(loader):\n inputs, targets = inputs.to(device), targets.to(device)\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n if 0 == batch_idx % 100 or batch_idx == len(loader) - 1:\n logger.info('(%d/%d) ==> Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (\n batch_idx + 1, len(loader), train_loss / (batch_idx + 1), 100. * correct / total, correct,\n total))\n trained_batchs += 1\n return train_loss / len(loader), 100. * correct / total\n\ndef test(epoch, net, loader, criterion, device='cuda'):\n global best_acc\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(loader):\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n if 0 == batch_idx % 100 or batch_idx == len(loader) - 1:\n logger.info('(%d/%d) ==> Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (batch_idx+1, len(loader), test_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\n # Save checkpoint.\n acc = 100.*correct/total\n if acc > best_acc:\n logger.info('Saving the best model %.3f @ %d ...' % (acc, epoch))\n state = {\n 'net': net.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, './checkpoint/ckpt.t7')\n best_acc = acc\n\n return test_loss / len(loader), acc\n\ndef plot_curves(curves, save_path):\n # plotting\n clr1 = (0.5, 0., 0.)\n clr2 = (0.0, 0.5, 0.)\n fig, ax1 = plt.subplots()\n ax2 = ax1.twinx()\n ax1.set_xlabel('epoch')\n ax1.set_ylabel('Loss', color=clr1)\n ax1.tick_params(axis='y', colors=clr1)\n ax2.set_ylabel('Accuracy (%)', color=clr2)\n ax2.tick_params(axis='y', colors=clr2)\n\n markersize = 12\n ax1.semilogy(curves[:, 0], curves[:, 1], '--', color=clr1,\n markersize=markersize)\n ax1.semilogy(curves[:, 0], curves[:, 3], '-', color=clr1,\n markersize=markersize)\n ax2.plot(curves[:, 0], curves[:, 2], '--', color=clr2,\n markersize=markersize)\n ax2.plot(curves[:, 0], curves[:, 4], '-', color=clr2,\n markersize=markersize)\n\n # ax2.set_ylim(bottom=40, top=100)\n ax1.legend(('Train loss', 'Val loss'), loc='lower right')\n ax2.legend(('Train accuracy', 'Val accuracy', 'Val moving avg'), loc='lower left')\n fig.savefig(os.path.join(save_path, 'curves-vs-epochs.pdf'))\n\ndef get_optimizer(net):\n wd = 5e-4\n if 'sgd' == args.optimizer:\n optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=wd)\n elif 'adam' == args.optimizer:\n optimizer = optim.Adam(net.parameters(), lr=args.lr, weight_decay=wd)\n elif 'amsgrad' == args.optimizer:\n optimizer = optim.Adam(net.parameters(), lr=args.lr, weight_decay=wd, amsgrad=True)\n elif 'adagrad' == args.optimizer:\n optimizer = optim.Adagrad(net.parameters(), lr=args.lr, weight_decay=wd)\n elif 'adadelta' == args.optimizer:\n optimizer = optim.Adadelta(net.parameters(), weight_decay=wd)\n elif 'rmsprop' == args.optimizer:\n optimizer = optim.RMSprop(net.parameters(), lr=args.lr, alpha=0.99, weight_decay=wd)\n else:\n logger.fatal('Unknown --optimizer')\n raise ValueError('Unknown --optimizer')\n return optimizer\n\ndef main():\n device = 'cuda' # if torch.cuda.is_available() else 'cpu'\n start_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n # Data\n logger.info('==> Preparing data..')\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=2)\n\n testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)\n\n classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n # Model\n logger.info('==> Building model..')\n net = getattr(models, args.net_type)(net_arch)\n # net = VGG('VGG19')\n # net = ResNet18()\n # net = CifarResNetBasic([1,1,1])\n # net = PreActResNet18()\n # net = GoogLeNet()\n # net = DenseNet121()\n # net = ResNeXt29_2x64d()\n # net = MobileNet()\n # net = MobileNetV2()\n # net = DPN92()\n # net = ShuffleNetG2()\n # net = SENet18()\n\n net = net.to(device)\n if device == 'cuda':\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n\n if args.resume:\n # Load checkpoint.\n logger.info('==> Resuming from checkpoint..')\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load('./checkpoint/ckpt.t7')\n net.load_state_dict(checkpoint['net'])\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch']\n\n criterion = nn.CrossEntropyLoss()\n optimizer = get_optimizer(net)\n\n curves = np.zeros((args.epochs, 5))\n for epoch in range(start_epoch, start_epoch+args.epochs):\n if epoch == args.epochs / 2 or epoch == args.epochs * 3 / 4:\n logger.info('======> decaying learning rate')\n decay_learning_rate(optimizer)\n\n curves[epoch, 0] = epoch\n curves[epoch, 1], curves[epoch, 2] = train(epoch, net, trainloader, optimizer, criterion)\n curves[epoch, 3], curves[epoch, 4] = test(epoch, net, testloader, criterion)\n if epoch % 5 == 0:\n plot_curves(curves[:epoch+1, :], save_path)\n\n plot_curves(curves, save_path)\n logger.info('curves: \\n {}'.format(np.array_str(curves)))\n np.savetxt(os.path.join(save_path, 'curves.dat'), curves)\n\n\n\nif __name__ == '__main__':\n main()\n logger.info('Done!')","sub_path":"cifar10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"149368201","text":"import pandas as pd\nimport numpy as np\nfrom geopy.distance import geodesic\n\n\ndef closest_city(epicenter):\n df=pd.read_csv('static/files/Mexican_cities.csv')\n \n distances = []\n lat = df['Lat']\n lon = df['Lon']\n \n for i in range(len(df)):\n distances.append(geodesic(epicenter, (lat[i],lon[i])).km)\n\n dist = int(round(np.array(distances).min()))\n city = df['City'][np.array(distances).argmin()]\n \n return city, dist","sub_path":"earthquake/static/python_scripts/closest_city.py","file_name":"closest_city.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"501950956","text":"import os\nimport sys\nfrom typing import List\n\nfrom javalang.tree import MethodDeclaration, ClassDeclaration\nimport traceback\n\ncwd = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(1, os.path.join(cwd, '..', 'perfec2'))\n\nimport perfec2\n\nrepos_root = '/Users/sroman/repos'\nrepos = {\n # 'address-api': \"A REST API for operations on Addresses, Address Types and their relationships in AMS. Addresses are used to geographically locate a Person or Organization or for operations on identification purposes. Each Address has a mandatory Address Type.\",\n # 'ams-search-api': \"A REST API for operations on searching AMS entities by specifying one or more attribute values; e.g.: \"\n # \"\\\"all Persons with last name like 'Johns%'\\\" or \\\"all franchises with manufacturer 'Ford'\\\"\\n\"\n # 'apppolicy-api': \"A REST API for operations on Application Policies and their relationships in AMS. An AppPolicy grants a user access to\\n\"\n # \" an online marketplace (such as m.com, OVE, or Simulcast) on behalf of an Organization (dealership). Can also represent\\n\n # \"other application-level AMS permissions\"\n # 'contact-api': \"A REST API for operations on Contacts, Contact Types and their relationships in AMS. Contacts are lines of communication\\n\"\n # \" to Organizations and Persons such as telephone numbers, email addresses, etc. Each Contact has a mandatory Contact Type. \\n\"\n # 'identifier-api': \"A REST API for operations on Identifiers, Identifier Types and their relationships in AMS. Identifiers are used\\n\"\n # \" in conjunction with an Identifier Type to uniquely identify an Organization or Person. An entity may have many Identifiers \\n\"\n # \"but never of the same Identifier Type. Pairs of Identifier/IdentifierType are always unique.\"\n # 'issue-api': \"A REST API for operations on Issues and their relationships in AMS. Issues track actions required of an Organization or Person \\n\"\n # \"such as the submission of legal or identification documents or any condition preventing a person from buying or selling at \\n\"\n # \"auction or online.\"\n # 'organization-api': \"A REST API for operations on Organizations and their relationships in AMS. Organizations (dealerships), may be \\n\"\n # \"related to Organization Groups, Issues, Contacts and can grant Roles to Persons.\",\n # 'organizationgroup-api': \"A REST API for operations on OrganizationGroups and their relationships in AMS. Organization Groups (franchise groups)\\n\"\n # \"may be related to Persons, Organizations, and Application Policies. A relationship between an Organization Group\\n\"\n # \"and Organization (OrganizationGroupMember) may have its own attributes.\",\n 'person-api': \"A REST API for operations on Persons and their relationships in AMS. Persons may be related \\n\"\n \"to Identifiers, Contacts, Issues, Addresses, and can have Roles assigned to them by Organizations.\",\n # 'role-api': \"A REST API for operations on Roles and their relationships in AMS. Roles grant permissions to Persons on behalf\"\n # \" of Organizations.\",\n # 'tag-api': \"A REST API for operations on Tags and their relationships in AMS. Tags may be related \\n\"\n # \"to Organizations to help establish hierarchies or add other metadata.\",\n # 'authorization-api': \"An API for verifying authorization with AMS.\",\n # 'authentication-api': \"An API for authenticating with AMS.\"\n}\n\n\ndef props_for_method(m: MethodDeclaration) -> dict:\n if perfec2.util.is_getbyuid_web_endpoint(m):\n print(f'{m.name} is_getbyuid_web_endpoint')\n return {\n 'nickname': '\"Retrieve xxx\"',\n 'value': '\"Retrieve an existing xxx\"',\n 'notes': '\"Retrieves an existing xxx by its uid. \"',\n 'response': 'Object.class',\n 'code': 200\n }\n elif perfec2.util.is_deletebyuid_web_endpoint(m):\n print(f'{m.name} is_deletebyuid_web_endpoint')\n return {\n 'nickname': '\"Delete xxx\"',\n 'value': '\"Delete an existing xxx\"',\n 'notes': '\"Deletes an existing xxx by its uid. \"',\n 'response': 'Void.class',\n 'code': 204\n }\n elif perfec2.util.is_updatebyuid_web_endpoint(m):\n print(f'{m.name} is_updatebyuid_web_endpoint')\n return {\n 'nickname': '\"Update xxx\"',\n 'value': '\"Update an existing xxx\"',\n 'notes': '\"Updates an existing xxx by its uid with the supplied properties. Unknown properties are ignored.\"',\n 'response': 'Object.class',\n 'code': 200\n }\n elif perfec2.util.is_create_web_endpoint(m):\n print(f'{m.name} is_create_web_endpoint')\n return {\n 'nickname': '\"Create xxx\"',\n 'value': '\"Create a new xxx entity\"',\n 'notes': '\"Creates a new xxx with the supplied properties. Unknown properties are ignored. \"',\n 'response': 'Object.class',\n 'code': 201\n }\n elif perfec2.util.is_associate_web_endpoint(m):\n print(f'{m.name} is_associate_web_endpoint')\n return {\n 'nickname': '\"Associate xxx and yyy\"',\n 'value': '\"Associate existing xxx and yyy\"',\n 'notes': '\"Creates a relationship between existing xxx and yyy. \"',\n 'response': 'Object.class',\n 'code': 200\n }\n elif perfec2.util.is_disassociate_web_endpoint(m):\n print(f'{m.name} is_disassociate_web_endpoint')\n return {\n 'nickname': '\"Associate xxx and yyy\"',\n 'value': '\"Associate existing xxx and yyy\"',\n 'notes': '\"Creates a relationship between existing xxx and yyy. \"',\n 'response': 'Void.class',\n 'code': 204\n }\n elif perfec2.util.is_create_and_associate_web_endpoint(m):\n print(f'{m.name} is_create_and_associate_web_endpoint')\n return {\n 'nickname': '\"Create xxx and associate yyy\"',\n 'value': '\"Create xxx and associate with an existing yyy\"',\n 'notes': '\"Creates a new xxx and associates it with an existing yyy. \"',\n 'response': 'Object.class',\n 'code': 201\n }\n elif perfec2.util.is_get_associations_web_endpoint(m):\n print(f'{m.name} is_get_associations_web_endpoint')\n return {\n 'nickname': '\"Retrieve xxx associated yyy\"',\n 'value': '\"Retrieve existing yyys associated with xxx\"',\n 'notes': '\"Retrieves existing yyyx associate with existing xxx. \"',\n 'response': 'Object.class',\n 'code': 200\n }\n elif perfec2.util.is_get_type_by_key_web_endpoint(m):\n print(f'{m.name} is_get_type_by_key_web_endpoint')\n return {\n 'nickname': '\"Retrieve xxx Type by key\"',\n 'value': '\"Retrieve existing xxx Type by key\"',\n 'notes': '\"Retrieves existing xxx Type with a given key. \"',\n 'response': 'Object.class',\n 'code': 200\n }\n elif perfec2.util.is_delete_type_by_key_web_endpoint(m):\n print(f'{m.name} is_delete_type_by_key_web_endpoint')\n return {\n 'nickname': '\"Delete xxx Type by key\"',\n 'value': '\"Delete existing xxx Type by key\"',\n 'notes': '\"Deletes existing xxx Type with a given key. \"',\n 'response': 'Object.class',\n 'code': 204\n }\n elif perfec2.util.is_update_type_by_key_web_endpoint(m):\n print(f'{m.name} is_update_type_by_key_web_endpoint')\n return {\n 'nickname': '\"Update xxx Type by key\"',\n 'value': '\"Update existing xxx Type by key\"',\n 'notes': '\"Updates existing xxx Type with a given key. \"',\n 'response': 'Object.class',\n 'code': 200\n }\n elif perfec2.util.is_create_type_by_key_web_endpoint(m):\n print(f'{m.name} is_create_type_by_key_web_endpoint')\n return {\n 'nickname': '\"Create new xxx Type by key\"',\n 'value': '\"Create a new xxx Type by key\"',\n 'notes': '\"Creates a new xxx Type with a given key. \"',\n 'response': 'Object.class',\n 'code': 201\n }\n elif perfec2.util.is_associate_type_with_entity_web_endpoint(m):\n print(f'{m.name} is_associate_type_with_entity_web_endpoint')\n return {\n 'nickname': '\"Associate xxx Type with xxx\"',\n 'value': '\"Associate an existing xxx Type with an existing xxx\"',\n 'notes': '\"Associates an existing xxx Type with an existing xxx.\"',\n 'response': 'Object.class',\n 'code': 200\n }\n elif perfec2.util.is_disassociate_type_with_entity_web_endpoint(m):\n print(f'{m.name} is_disassociate_type_with_entity_web_endpoint')\n return {\n 'nickname': '\"Disassociate xxx Type from xxx\"',\n 'value': '\"Disassociate an existing xxx Type from an existing xxx\"',\n 'notes': '\"Disassociates an existing xxx Type from an existing xxx.\"',\n 'response': 'Object.class',\n 'code': 204\n }\n elif perfec2.util.is_get_type_associations_web_endpoint(m):\n print(f'{m.name} is_get_type_associations_web_endpoint')\n return {\n 'nickname': '\"Retrieve xxx by xxx Type key\"',\n 'value': '\"Retrieve existing xxxs of given xxx Type by key\"',\n 'notes': '\"Retrieves all xxx associated with existing xxx Type by key. \"',\n 'response': 'Object.class',\n 'code': 200\n }\n else:\n print(f'{m.name} is unknown <<<========================')\n return {\n 'nickname': '\"\"',\n 'value': '\"\"',\n 'notes': '\"\"',\n 'response': 'Object.class',\n 'code': 999\n }\n\n\ndef request_mapping_props(m: MethodDeclaration) -> dict:\n if perfec2.util.method_has_annotation(m, 'PostMapping'):\n post_mapping = perfec2.util.get_method_annotation(m, 'PostMapping')\n path = perfec2.util.get_annotation_property(post_mapping)\n return {'value': path, 'method': 'RequestMethod.POST'}\n\n if perfec2.util.method_has_annotation(m, 'GetMapping'):\n post_mapping = perfec2.util.get_method_annotation(m, 'GetMapping')\n path = perfec2.util.get_annotation_property(post_mapping)\n return {'value': path, 'method': 'RequestMethod.GET'}\n\n if perfec2.util.method_has_annotation(m, 'PutMapping'):\n post_mapping = perfec2.util.get_method_annotation(m, 'PutMapping')\n path = perfec2.util.get_annotation_property(post_mapping)\n return {'value': path, 'method': 'RequestMethod.PUT'}\n\n\ndef use_RequestMapping_only(lines: [str]) -> [str]:\n perfec2.replace_annotation_on_members(perfec2.this_clazz(), 'methods', lines,\n 'PostMapping', 'RequestMapping', request_mapping_props, oneline=True)\n\ndef use_RestController_only(lines: [str]) -> [str]:\n if perfec2.get_class_annotation(perfec2.this_clazz(), 'Controller'):\n perfec2.remove_node_annotation(perfec2.this_clazz(), 'Controller', lines)\n perfec2.add_node_annotation(perfec2.this_clazz(), 'RestController', lines)\n\n\ndef add_link_to_readme():\n # todo\n pass\n\n\n# fixme some methods are using @PutMapping or @GetMapping instead of @RequestMapping\ndef refactor_controller(lines: List[str]) -> List[str]:\n \"\"\"\n within this method we have access to the lines of the file being processed\n and the compilation unit itself\n :param lines: provided by perfec2.process_file()\n :return:\n \"\"\"\n clazz = perfec2.this_clazz()\n print(f'annotating {clazz.name}')\n use_RestController_only(lines)\n use_RequestMapping_only(lines)\n do_swagger_annotations(lines)\n class_RequestMapping_produces_consumes(lines)\n\n return lines\n\n\ndef do_swagger_annotations(lines):\n perfec2.annotate_methods_having_annotation(perfec2.this_clazz(), lines, 'RequestMapping', 'ApiOperation', props_for_method)\n perfec2.add_import('io.swagger.annotations.ApiOperation', lines)\n\n\ndef class_RequestMapping_produces_consumes(lines):\n # fix class-level annotation\n try:\n req_mapping_anno = perfec2.get_class_annotation(perfec2.this_clazz(), 'RequestMapping')\n path = perfec2.util.get_annotation_property(req_mapping_anno)\n new_anno_props = {\n 'value': path,\n 'produces': '\"application/json\"',\n 'consumes': '\"application/json\"',\n }\n perfec2.remove_node_annotation(perfec2.this_clazz(), 'RequestMapping', lines)\n perfec2.add_node_annotation(perfec2.this_clazz(), 'RequestMapping', lines, anno_props=new_anno_props, oneline=True)\n except Exception as e:\n print(traceback.format_exc())\n\n\ndef refactor_pom(xml_path: str, repo: str):\n new_plugins_lines = [\n '\\n',\n ' io.swagger.codegen.v3\\n',\n ' swagger-codegen-maven-plugin\\n',\n ' 3.0.14\\n',\n ' \\n',\n ' \\n',\n ' process-classes\\n',\n ' \\n',\n ' generate\\n',\n ' \\n',\n ' \\n',\n ' ${apidocs.directory}/swagger.json\\n',\n ' html\\n',\n ' ${apidocs.directory}\\n',\n ' \\n',\n ' \\n',\n ' \\n',\n '\\n',\n '\\n',\n ' com.github.kongchen\\n',\n ' swagger-maven-plugin\\n',\n ' ${swagger-maven-plugin-version}\\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' true\\n',\n ' com.coxautoinc.acctmgmt\\n',\n ' \\n',\n ' ${name}\\n',\n ' ${build.version}\\n',\n ' \\n',\n ' {}\\n'.format(repos[repo]),\n ' \\n',\n ' \\n',\n ' accmt@coxautoinc.com\\n',\n ' Cox Auto Account Management Development Team\\n',\n ' Please contact the product group for Account Management\\n',\n ' \\n',\n ' \\n',\n ' http://www.coxautoinc.com\\n',\n ' Cox Automotive, Inc.\\n',\n ' \\n',\n ' \\n',\n ' ${apidocs.directory}\\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' compile\\n',\n ' \\n',\n ' generate\\n',\n ' \\n',\n ' \\n',\n ' \\n',\n '\\n']\n new_properties_lines = [\n '3.1.7\\n',\n '1.5.24\\n',\n '${project.basedir}/src/main/resources/static/${name}\\n'\n ]\n with open(xml_path, 'r+') as pomfile:\n lines = pomfile.readlines()\n\n for i, line in enumerate(lines):\n if '' in line:\n lines = perfec2.util.match_indentation_and_insert(new_properties_lines, i, lines, indent_mod=4)\n break\n\n for i, line in enumerate(lines):\n if '' in line:\n lines = perfec2.util.match_indentation_and_insert(new_plugins_lines, i, lines, indent_mod=4)\n break\n\n perfec2.util.write_file(lines, pomfile)\n\n\ndef main():\n for repo in repos:\n full_dir = f'{repos_root}/{repo}'\n for root, dirs, files in os.walk(full_dir):\n for f in files:\n if f.endswith('Controller.java'):\n perfec2.process_file(os.path.join(root, f), refactor_controller)\n elif f == 'pom.xml':\n refactor_pom(os.path.join(root, f), repo)\n # perfec2.util.mvn_clean_install(full_dir)\n # add_link_to_readme(full_dir)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"addswagger.py","file_name":"addswagger.py","file_ext":"py","file_size_in_byte":17116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"296669408","text":"import heapq\nimport adjcn_graph\n\ngraph = adjcn_graph.AdjGraph()\n\nfor _ in range(int(input(\"\"))):\n node, adj_node, weight = input().split()\n graph.add_node(node, int(weight), adj_node)\n graph.add_node(adj_node, int(weight), node) # This is an undirected graph.\n \ndef dijkstra(graph: graph.graph, source_node: str):\n #stnc = source_to_node_costs\n stnc = {node:9999999999999 for node in list(graph.keys())} #setting the initial value as inf distance to the source\n visited_nodes = set() # A set to track the visited nodes\n \n # Inserting all the edges to source_node -> adj_nodes\n edges = [(weight, source_node, adj_node) for weight, adj_node in graph[source_node]]\n # Transforming those edges into a Priority Queue\n heapq.heapify(edges)\n visited_nodes.add(source_node) # Adding the source_node into visited_node\n stnc[source_node] = 0\n count = 0\n while edges: # While the edges are not empty\n count += 1\n #print(f\"While loop run time {count}\")\n edge = heapq.heappop(edges) # edge[0] = weight, edge[1] = source_node, edge[2] = adj_node\n weight = edge[0]\n src_node = edge[1]\n adj_node = edge[2]\n \n if stnc[src_node] + weight < stnc[adj_node] and adj_node not in visited_nodes: \n stnc[adj_node] = stnc[src_node] + weight # picking up the smallest value (node to source)for a node\n # Adding the edges of adjacent nodes to the priority queue\n [heapq.heappush(edges, (weight, adj_node, next_node)) for weight, next_node in graph[adj_node] if next_node not in visited_nodes]\n visited_nodes.add(src_node)\n print(\"All node cost from source A\")\n print(stnc)\n \ndijkstra(graph.graph, \"A\")","sub_path":"11/submission/dijkstra_submission.py","file_name":"dijkstra_submission.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"159859538","text":"import pygame\nfrom Dreadline import Image\nfrom Character_Selection import Button\nfrom Guide import *\nfrom Type_Guide import *\n\n\npygame.init()\n\nblack = (0, 0, 0)\n\ndisplay_width = 1280\ndisplay_height = 720\ncaption = 'Guide'\n\nlogo = pygame.image.load(\"Chara_select\\Pic\\dreadline_logo.png\")\npygame.display.set_icon(logo)\npygame.display.set_caption(caption)\nscreen = pygame.display.set_mode((display_width, display_height))\nscreen.fill(black)\n\n\nclass GuideImage(Image):\n def __init__(self, pos_x, pos_y, image_name):\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.bg_image = 'Picture\\Guide\\%s.png' % image_name\n self.draw()\n\n\nclass NextPageButton(Button):\n def __init__(self, pos_x, pos_y, image_name):\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.width = 70\n self.height = 70\n self.bg_image = 'Picture\\Guide\\%s.png' % image_name\n self.draw()\n\n\nclass StatusGuide:\n def __init__(self):\n screen.fill(black)\n\n self.status_guide = GuideImage(0, 0, 'Status_guide')\n self.next = NextPageButton(1210, 655, 'modern_right_arrow')\n\n def draw(self):\n pygame.display.update()\n\n def mouse_click_event_handle(self, x, y):\n print('mouse click down', x, y)\n if self.next.bg_image_area.collidepoint(x, y):\n print('click next page!')\n LastPageGuide().main()\n\n\nclass PageController:\n def __init__(self):\n print('init page controller')\n self.guide = StatusGuide()\n\n def get_guide(self):\n self.guide.draw()\n\n def get_guide_mouse_click_action(self, x, y):\n self.guide.mouse_click_event_handle(x, y)\n\n\nclass SecondPageGuide:\n def __init__(self):\n pygame.init()\n self.running = True\n self.clock = pygame.time.Clock()\n self.FPS = 30\n self.page_controller = PageController()\n self.page_controller.get_guide()\n\n def main(self):\n while self.running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n x, y = event.pos\n self.page_controller.get_guide_mouse_click_action(x, y)\n self.clock.tick(self.FPS)\n\n\nif __name__ == \"__main__\":\n SecondPageGuide().main()\n","sub_path":"Status_Guide.py","file_name":"Status_Guide.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"172784787","text":"# conversionist.py\n# \n# Copyright 2014 Jacob Swart\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\nimport pickle, daswesen, timeit\nn='[library conversionist]'\n\ndef convertMap(mapx,area=None):\n\tall_tiles=[]\n\ttilesets=[]\n\ttry:\n\t\tmapx=mapx.c_map\n\texcept:\n\t\tpass\n\tif area==None:\n\t\tfor key,value in mapx.tilesets.items():\n\t\t\tmockup=True\n\t\t\ttry:\n\t\t\t\tvalue.is_wesen_mockup\n\t\t\texcept:\n\t\t\t\tmockup=False\n\t\t\ttilesets.append((mockup,key))\n\t\tfor tile in mapx.tiles:\n\t\t\tiswesen=True\n\t\t\ttry:\n\t\t\t\ttile.is_wesen\n\t\t\texcept:\n\t\t\t\tiswesen=False\n\t\t\tif iswesen:\n\t\t\t\t#print(tile.name)\n\t\t\t\ttile.index=0\n\t\t\t\ttile.tileset_name=tile.name\n\t\t\t\tent_id=tile.id\n\t\t\telse:\n\t\t\t\tent_id=0\n\t\t\ttry:\n\t\t\t\ttile.owner\n\t\t\texcept:\n\t\t\t\ttile.owner=None\n\t\t\tall_tiles.append((iswesen,tile.tileset_name,tile.l_index,tile.c_layer_indexes,tile.index,tile.rect.x,tile.rect.y,ent_id,tile.owner))\n\telse:\n\t\tfor key,value in mapx.tilesets.items():\n\t\t\tmockup=True\n\t\t\ttry:\n\t\t\t\tvalue.is_wesen_mockup\n\t\t\texcept:\n\t\t\t\tmockup=False\n\t\t\ttilesets.append((mockup,key))\n\t\tfor tile in mapx.tiles:\n\t\t\tif tile.rect.colliderect(area):\n\t\t\t\tiswesen=True\n\t\t\t\ttry:\n\t\t\t\t\ttile.is_wesen\n\t\t\t\texcept:\n\t\t\t\t\tiswesen=False\n\t\t\t\tif iswesen:\n\t\t\t\t\t#print(tile.name)\n\t\t\t\t\ttile.index=0\n\t\t\t\t\ttile.tileset_name=tile.name\n\t\t\t\t\tent_id=tile.id\n\t\t\t\telse:\n\t\t\t\t\tent_id=0\n\t\t\t\ttry:\n\t\t\t\t\ttile.owner\n\t\t\t\texcept:\n\t\t\t\t\ttile.owner=None\n\t\t\t\tall_tiles.append((iswesen,tile.tileset_name,tile.l_index,tile.c_layer_indexes,tile.index,tile.rect.x,tile.rect.y,ent_id,tile.owner))\n\n\tbytes_data=pickle.dumps((all_tiles,tilesets))\n\treturn bytes_data\ndef reverseConvertMap(data,mapobj,sender):\n\tt0=timeit.default_timer()\n\t#print(data)\n\tdata=pickle.loads(data)\n\t#print(data)\n\ttiles=data[0]\n\t#print('TILE_DATA',tiles)\n\ttilesets=data[1]\n\tfor tileset in tilesets:\n\t\tis_mockup,name=tileset\n\t\tif not is_mockup:\n\t\t\tinthing=False\n\t\t\tfor key,foo in mapobj.tilesets.items():\n\t\t\t\tif key==name:\n\t\t\t\t\tinthing=True\n\t\t\tif not inthing:\n\t\t\t\tmapobj.load_tileset(name)\n\t\telse:\n\t\t\tinthing=False\n\t\t\tfor key,foo in mapobj.tilesets.items():\n\t\t\t\tif key==name:\n\t\t\t\t\tinthing=True\n\t\t\tif not inthing:\n\t\t\t\tmapobj.load_entdef(name)\n\tfor tile in tiles:\n\t\tiswesen,tilesetname,layerindex,clayerindexes,tileindex,tilex,tiley,ent_id,owner=tile\n\t\tif not iswesen:\n\t\t\tclayers=[]\n\t\t\tfor index in clayerindexes:\n\t\t\t\tclayers.append(mapobj.collisions_l[int(index)-1])\n\t\t\tmapobj.add_tile(mapobj.tilesets[tilesetname].tiles[int(tileindex)],mapobj.layers_l[int(layerindex)-1],layerindex,(int(tilex),int(tiley)),clayerindexes,clayers)\n\t\telse:\n\t\t\tent=daswesen.load_wesen(tilesetname,(int(tilex),int(tiley)),mapobj.layers_l,mapobj.collisions_l,mapobj.reqs_update,sender,ent_id)\n\t\t\tif owner!=None:\n\t\t\t\tent.owner=owner\n\t\t\tmapobj.tiles.append(ent)\n\t#print('IN THING:',tiles)\n\tt1=timeit.default_timer()\n\tprint('Exec reverseConvertMap time %s' % str(t1-t0))\n\n\n\n\n\n","sub_path":"conversionist.py","file_name":"conversionist.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"473237135","text":"import const\nfrom database import Database\n\nclass Command:\n def __init__(self, user):\n self.user = user\n command = self.get()\n\n if command == None:\n self.create()\n\n self.update_att()\n\n def default(self):\n return {\"user\": self.user, \"command\": \"\", \"status\": 0, \"cancel_status\": 0, \"element\": 0}\n\n def create(self):\n db = Database(const.mongo_server, const.mongo_port)\n ret = db.insert(const.database_name, const.lastc_collection, self.default())\n\n del db\n\n return ret\n\n def get(self):\n db = Database(const.mongo_server, const.mongo_port)\n ret = db.get_single(const.database_name, const.lastc_collection, {\"user\": self.user})\n\n del db\n\n return ret\n\n def update_att(self):\n command = self.get()\n\n self.command = command['command']\n self.status = command['status']\n self.cancel_status = command['cancel_status']\n self.user = command['user']\n self.element = command['element']\n\n def update(self, command, status=0, cancel_status=0, element=0):\n db = Database(const.mongo_server, const.mongo_port)\n db.update(const.database_name, const.lastc_collection, {\"user\": self.user}, {\"command\" : command,\n \"status\" : status,\n \"cancel_status\" : cancel_status,\n \"element\" : element})\n del db\n","sub_path":"command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"397717582","text":"#!/usr/bin/env python3\nfrom chibi.config import basic_config\nfrom chibi.file import Chibi_path\nfrom chibi_command.centos import Yum\nfrom chibi_command.echo import cowsay\n\n\nbasic_config()\nfile_check_path = Chibi_path( '~/provision_installed' )\nfile_check = file_check_path.open()\n\n\nversion_to_check = \"logstash\\n\".format( file=__file__, )\n\n\nif __name__ == \"__main__\" and not version_to_check in file_check:\n cowsay( \"Starting install for filebeat\" )\n\n result = Yum.install( 'logstash' )\n if not result:\n raise Exception(\n f\"no se puedo instalar logstash {vars(result)}\" )\n\n file_check.append( version_to_check )\n cowsay( \"Ending install for filebeat\" )\n","sub_path":"provision/logstash/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"303653282","text":"from linode import util\n\ndef make(id, client, parent_id=None, cls=None):\n \"\"\"\n Makes an api object based on an id. The type depends on the mapping.\n \"\"\"\n from linode.objects import DerivedBase\n if cls:\n if issubclass(cls, DerivedBase):\n return cls(client, id, parent_id)\n else:\n return cls(client, id)\n return None\n\ndef make_list(json_arr, client, parent_id=None, cls=None):\n result = []\n\n for obj in json_arr:\n id_val = None\n\n if 'id' in obj:\n id_val = obj['id']\n elif hasattr(cls, 'id_attribute') and getattr(cls, 'id_attribute') in obj:\n id_val = obj[getattr(cls, 'id_attribute')]\n else:\n continue\n o = make(id_val, client, parent_id=parent_id, cls=cls)\n o._populate(obj)\n result.append(o)\n\n return result\n\ndef make_paginated_list(json, key, client, parent_id=None, page_url=None, cls=None):\n l = make_list(json[key], client, parent_id=parent_id, cls=cls)\n p = util.PaginatedList(client, page_url if page_url else key, page=l, \\\n max_pages=json['total_pages'], total_items=json['total_results'], parent_id=parent_id, \\\n key=key)\n return p\n","sub_path":"linode/mappings.py","file_name":"mappings.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"416819863","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nPsychopy based eyetracker interface - Example.\r\n\r\n\"\"\"\r\n\r\ndef connect_eyetracker(sample_rate = 300):\r\n from psychopy.iohub import launchHubServer\r\n \r\n iohub_config = {'eyetracker.hw.tobii.EyeTracker':\r\n {'name': 'tracker',\r\n 'runtime_settings': {'sampling_rate': sample_rate}}\r\n }\r\n \r\n io = launchHubServer(**iohub_config) \r\n return io\r\n\r\n#io = connect_eyetracker()\r\n\r\n# Get the eye tracker device.\r\n#tracker = io.devices.tracker\r\n\r\ndef calibrate(tracker):\r\n r = tracker.runSetupProcedure()\r\n \r\n if r:\r\n print('calibration success')\r\n else:\r\n print('calibration unsuccessful')\r\n\r\n\r\ndef stream(tracker, duration):\r\n \"\"\"\r\n\r\n Parameters\r\n ----------\r\n tracker : io.devices.tracker attribute\r\n DESCRIPTION. Object - the connection to a Tobii-Studio Eyetracker\r\n duration : Float\r\n DESCRIPTION. Duration in seconds to report stream of gaze coordinates \r\n\r\n Yields\r\n ------\r\n TUPLE\r\n (Left Eye Gaze Screen Coordinate in X (Pixels),\r\n Left Eye Gaze Screen Coordinate in Y (Pixels),\r\n Time)\r\n\r\n \"\"\"\r\n import time\r\n \r\n # Check for and print any eye tracker events received...\r\n tracker.setRecordingState(True)\r\n\r\n stime = time.time()\r\n #print(stime)\r\n while time.time()-stime < duration:\r\n #print(time.time())\r\n for e in tracker.getEvents(asType='dict'):\r\n #logic to remove bad samples\r\n \r\n yield (e['left_gaze_x'], e['left_gaze_y'], e['time'])\r\n\r\n\r\ndef __main__(duration = 5, # 5 seconds for demo \r\n calibrate_system = False,\r\n sample_rate = 300):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n duration : Float\r\n Duration in seconds to report stream of gaze coordinates\r\n calibrate : Bool, optional\r\n Whether to calibrate the eyetracking system or not. \r\n The default is False.\r\n sample_rate : Int, optional\r\n The sampling frequency of the eyetracker in Hertz\r\n The default is 300 (Maximum).\r\n Common options: 300, 250, 200, 120, 100 \r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n \r\n io = connect_eyetracker(sample_rate = sample_rate)\r\n \r\n tracker = io.devices.tracker\r\n \r\n if calibrate_system:\r\n calibrate(tracker)\r\n \r\n for event in stream(tracker, duration):\r\n print(event) #do some stuff\r\n \r\n io.quit()","sub_path":"Eyetracker_Interface/psychopy_working_example.py","file_name":"psychopy_working_example.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"434272108","text":"import csv\r\n\r\n\r\npath = \"d:/Eutils/\"\r\n\r\n\r\ndef get_id(path):\r\n with open(path+\"geop_idlist.txt\", encoding=\"utf-8\") as f:\r\n idlist = eval(f.read())\r\n\r\n return idlist\r\n\r\n\r\ndef create_csv(idlist, path):\r\n with open(path+\"geovirna.csv\", \"w\",\r\n newline=\"\", encoding=\"utf-8\") as csvfile:\r\n writer = csv.writer(csvfile)\r\n writer.writerow(['geo_id', 'pubmed_id', 'virus', 'host',\r\n 'time', 'treatment', 'data', 'year', 'species'])\r\n for id in idlist:\r\n c = [id]\r\n print(c)\r\n writer.writerow(c)\r\n\r\n\r\ndef main():\r\n idlist = get_id(path)\r\n create_csv(idlist, path)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"geop_idcsv.py","file_name":"geop_idcsv.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"103838191","text":"\"\"\"\nTexasHoldem Game\n\"\"\"\nfrom ontology.interop.Ontology.Contract import Migrate\nfrom ontology.interop.System.Storage import GetContext, Get, Put\nfrom ontology.interop.System.Runtime import CheckWitness, GetTime, Notify\nfrom ontology.interop.Ontology.Runtime import GetCurrentBlockHash, Base58ToAddress\nfrom ontology.builtins import concat, sha256\n\n\nAdmin = Base58ToAddress('address')\nPLAYER_LAST_CHECK_IN_DAY = \"P1\"\nGAME_SALT_KEY = \"G1\"\nDaySeconds = 86400\n\n\ndef Main(operation, args):\n ######################### for Admin to invoke Begin #########################\n if operation == \"startGame\":\n assert (len(args) == 3)\n pokerHashList = args[0]\n playerList = args[1]\n gameId = args[2]\n return startGame(pokerHashList, playerList, gameId)\n if operation == \"endGame\":\n assert (len(args) == 2)\n gameId = args[0]\n salt = args[1]\n return endGame(gameId, salt)\n if operation == \"migrateContract\":\n assert (len(args) == 7)\n code = args[0]\n needStorage = args[1]\n name = args[2]\n version = args[3]\n author = args[4]\n email = args[5]\n description = args[6]\n return migrateContract(code, needStorage, name, version, author, email, description)\n ######################### for Admin to invoke End #########################\n\n ######################### for Player to invoke Begin #########################\n if operation == \"checkIn\":\n assert (len(args) == 1)\n player = args[0]\n return checkIn(player)\n if operation == \"ifCheckIn\":\n assert (len(args) == 1)\n account = args[0]\n return ifCheckIn(account)\n if operation == \"projectReward\":\n assert (len(args) == 2)\n player = args[0]\n projectId = args[1]\n return projectReward(player, projectId)\n if operation == \"getSaltAfterEnd\":\n assert (len(args) == 1)\n gameId = args[0]\n return getSaltAfterEnd(gameId)\n if operation == \"checkPokerHash\":\n assert (len(args) == 2)\n gameId = args[0]\n pokerNum = args[1]\n return checkPokerHash(gameId, pokerNum)\n ######################### for Player to invoke End #########################\n return False\n\n\n######################### for Admin to invoke Begin #########################\ndef startGame(pokerHashList, playerList, gameId):\n \"\"\"\n admin send param to start game\n algorithm: pokeHash = abs(sha256(pokerNum) ^ sha256(salt))\n :param pokerHashList: [pokerHash1, pokerHash2, pokerHash3, ..., pokerHash52] send a list include 52 pokerHash\n :param playerList: [address1, address2, address3...] send all player address in this game\n :param gameId: game's id\n :return: bool\n \"\"\"\n assert (CheckWitness(Admin))\n playerNum = len(playerList)\n pokerNum = len(pokerHashList)\n helperRandom = abs(GetCurrentBlockHash()) % pokerNum\n # deal poker to player\n playerPokerList = []\n\n tmp = 0\n while tmp < playerNum:\n # deal first poker\n playerHandPokerList = []\n poker = pokerHashList[helperRandom]\n pokerHashList.remove(helperRandom)\n playerHandPokerList.append(poker)\n pokerNum = Sub(pokerNum, 1)\n\n # deal second poker\n helperRandom = abs(sha256(concat(helperRandom, poker))) % pokerNum\n poker = pokerHashList[helperRandom]\n pokerHashList.remove(helperRandom)\n playerHandPokerList.append(poker)\n pokerNum = Sub(pokerNum, 1)\n\n helperRandom = abs(sha256(concat(helperRandom, poker))) % pokerNum\n tmp += 1\n playerPokerList.append(playerHandPokerList)\n\n # deal common poker\n commonPokerList = []\n commonPokerNum = 0\n while commonPokerNum < 5:\n poker = pokerHashList[helperRandom]\n pokerHashList.remove(helperRandom)\n commonPokerList.append(poker)\n pokerNum = Sub(pokerNum, 1)\n helperRandom = abs(sha256(concat(helperRandom, poker))) % pokerNum\n commonPokerNum += 1\n\n Notify([\"startGame\", pokerHashList, commonPokerList, playerPokerList, gameId])\n return True\n\n\ndef endGame(gameId, salt):\n \"\"\"\n send game id and salt to\n :param gameId: game's id\n :param salt: salt number\n :return: bool\n \"\"\"\n assert (CheckWitness(Admin))\n # set salt\n Put(GetContext(), concatKey(gameId, GAME_SALT_KEY), salt)\n Notify([\"endGame\", gameId, salt])\n return True\n\n\ndef migrateContract(code, needStorage, name, version, author, email, description):\n \"\"\"\n admin can migrate contract\n :param code: new contract's avm code\n :param needStorage: if need storage default true\n :param name: contract's name\n :param version: contract's version\n :param author: contract's author\n :param email: contract's email\n :param description: contract's description\n :return: bool\n \"\"\"\n assert (CheckWitness(Admin))\n res = Migrate(code, needStorage, name, version, author, email, description)\n assert (res)\n Notify([\"Migrate Contract successfully\"])\n return True\n######################### for Admin to invoke End #########################\n\n\n######################### for Player to invoke Begin #########################\ndef checkIn(player):\n \"\"\"\n check in function\n :param account: player's account addresss\n :return: bool\n \"\"\"\n assert (CheckWitness(player))\n checkInDays = ifCheckIn(player)\n assert (checkInDays)\n assert (not checkInDays)\n Put(GetContext(), concatKey(PLAYER_LAST_CHECK_IN_DAY, player), checkInDays)\n\n Notify([\"checkIn\", player, checkInDays])\n return True\n\n\ndef ifCheckIn(player):\n \"\"\"\n :param player: player's account address\n :return: return == False => can NOT check in.\n return > 0 => can check in.\n \"\"\"\n lastTimeCheckIn = Get(GetContext(), concatKey(PLAYER_LAST_CHECK_IN_DAY, player))\n now = Add(GetTime(), Mul(8, 3600)) # to UTC8\n days = Div(now, DaySeconds)\n if not lastTimeCheckIn:\n return days\n if days == lastTimeCheckIn:\n return True\n if days > lastTimeCheckIn:\n return days\n else:\n return False\n\n\ndef projectReward(player, projectId):\n \"\"\"\n :param player: player's account address\n :param projectId: project's id\n :return: bool\n \"\"\"\n assert (CheckWitness(player))\n Notify([\"getReward\", player, projectId])\n return True\n\n\ndef getSaltAfterEnd(gameId):\n \"\"\"\n can only get game's salt after end\n :param gameId: game's id\n :return: salt number\n \"\"\"\n salt = Get(GetContext(), concatKey(gameId, GAME_SALT_KEY))\n return salt\n\n\ndef checkPokerHash(gameId, pokerNum):\n \"\"\"\n can only check hash after end\n :param gameId: game's id\n :param pokeNum: player's poker num\n :return: pokeHash\n \"\"\"\n assert (pokerNum >= 1)\n assert (pokerNum <= 52)\n salt = getSaltAfterEnd(gameId)\n pokeHash = abs(sha256(pokerNum) ^ sha256(salt))\n return pokeHash\n######################### for Player to invoke End #########################\n\n\n######################### Utility Methods Start #########################\n\"\"\"\nhttps://github.com/ONT-Avocados/python-template/blob/master/libs/SafeMath.py\n\"\"\"\n\n\ndef Add(a, b):\n \"\"\"\n Adds two numbers, throws on overflow.\n \"\"\"\n c = a + b\n assert (c >= a)\n return c\n\n\ndef Sub(a, b):\n \"\"\"\n Substracts two numbers, throws on overflow (i.e. if subtrahend is greater than minuend).\n :param a: operand a\n :param b: operand b\n :return: a - b if a - b > 0 or revert the transaction.\n \"\"\"\n assert(a>=b)\n return a-b\n\n\ndef Mul(a, b):\n \"\"\"\n Multiplies two numbers, throws on overflow.\n :param a: operand a\n :param b: operand b\n :return: a - b if a - b > 0 or revert the transaction.\n \"\"\"\n if a == 0:\n return 0\n c = a * b\n assert (c / a == b)\n return c\n\n\ndef Div(a, b):\n \"\"\"\n Integer division of two numbers, truncating the quotient.\n \"\"\"\n assert (b > 0)\n c = a / b\n return c\n\n\ndef concatKey(str1, str2):\n \"\"\"\n connect str1 and str2 together as a key\n :param str1: string1\n :param str2: string2\n :return: string1_string2\n \"\"\"\n return concat(concat(str1, '_'), str2)\n\n######################### Utility Methods End #########################\n","sub_path":"tmp-TexasHoldem.py","file_name":"tmp-TexasHoldem.py","file_ext":"py","file_size_in_byte":8249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"334634944","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 18年8月13日 下午2:25\n# @Author : dongyouyuan\n# @email : 15099977712@163.com\n# @File : helpers.py\n# @Software: PyCharm\n# 提供公用的工具类\n\n\nimport requests\nimport random\nimport json\nimport time\nimport operator\nimport datetime\nimport base64\nfrom configuer import Config\nfrom configuer.const import Const\n\n\nclass Helper(object):\n\n @staticmethod\n def get_random_header(mall):\n \"\"\"\n 获取一个随机请求头部\n :return:\n \"\"\"\n headers = Const.ALL_HEADERS.get(mall, [])\n\n if headers:\n i = random.randint(0, len(headers)-1)\n return headers[i]\n raise Exception(\"Please add '{}' headers to const.Const.ALL_HEADERS\".format(mall))\n\n @staticmethod\n def get_random_proxy():\n \"\"\"\n 获取一个随机代理\n :return:\n \"\"\"\n # 三次请求机会\n for i in range(3):\n proxies = requests.get(url=Config.PROXIES_URL).text\n proxie_list = json.loads(proxies)\n if proxie_list:\n i = random.randint(0, len(proxie_list)-1)\n return proxie_list[i]\n raise Exception(\"Proxies requests disable\")\n\n @staticmethod\n def get_proxy_authorization():\n \"\"\"\n 获取代理验证信息(用户,密码)\n :return:\n \"\"\"\n proxy_user_pass = b'shein:helloshein'\n encode_user_pass = base64.b64encode(proxy_user_pass).decode()\n return 'Basic ' + encode_user_pass\n\n @staticmethod\n def to_string(s):\n \"\"\"\n 转化成字符串\n :param s:\n :return:\n \"\"\"\n return str(s)\n\n @staticmethod\n def to_int(s, base=10):\n \"\"\"\n 字符串转换成整型,对于不能转换的返回0\n :param s: 需要转换的字符串\n :param base: 多少进制,默认是10进制。如果是16进制,可以写0x或者0X\n :return: int\n \"\"\"\n try:\n ns = '%s' % s\n ns = ns.split('.')[0]\n return int(ns, base)\n except ValueError:\n # 忽略错误\n pass\n # print('to int error')\n return 0\n\n @staticmethod\n def to_float(s):\n \"\"\"\n 字符串转换成整型,对于不能转换的返回0\n :param s: 需要转换的字符串\n :return: float\n \"\"\"\n try:\n ns = '%s' % s\n return float(ns)\n except ValueError:\n # 忽略错误\n pass\n # print('to int error')\n return 0.0\n\n @staticmethod\n def timestamp_to_datatime(timestamp, format='%Y-%m-%d %H:%M:%S'):\n \"\"\"\n 时间戳转换成格式化的时间\n :param timestamp: 时间错\n :param format: 格式\n :return:\n \"\"\"\n if timestamp is None or not timestamp:\n return ''\n return datetime.datetime.fromtimestamp(int(timestamp)).strftime(format)\n\n @staticmethod\n def datatime_to_timestamp(data_time, format='%Y-%m-%d %H:%M'):\n \"\"\"\n :param data_time:\n :param format:\n :return:\n \"\"\"\n if not data_time:\n return Helper.to_int(time.time())\n else:\n try:\n time_array = time.strptime(data_time, format)\n time_stamp = int(time.mktime(time_array))\n return time_stamp\n except Exception as error_msg:\n print(error_msg)\n return 0\n\n @staticmethod\n def split_list(target, size):\n \"\"\"\n 截取list,每段最大为interval个\n :param target: [1,2,3,4,5,6,7,8,9,10] 目标列表\n :param size: 4 每段最大数\n :return: [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]]\n \"\"\"\n len_target = len(target)\n rounds = int(len_target / size) # 除数\n remainder = int(len_target % size) # 余数\n list_result = list()\n if rounds == 0:\n list_result.append(target)\n else:\n # 使用除数计算\n for index in range(rounds):\n start_index = index * size\n end_index = (index + 1) * size\n list_result.append(target[start_index:end_index])\n # 判断是否有余数\n if remainder != 0:\n start_index = (index + 1) * size\n end_index = (index + 1) * size + remainder\n list_result.append(target[start_index:end_index])\n return list_result\n\n @staticmethod\n def pagination(total, size):\n \"\"\"\n 提供分页功能,\n :param total: 21 总数\n :param size: 10 每页大小\n :return: [{'start': 0, 'end': 10}, {'start': 10, 'end': 20}, {'start': 20, 'end': 21}]\n \"\"\"\n rounds = int(total / size) # 除数\n remainder = int(total % size) # 余数\n list_result = list()\n if rounds == 0:\n tmp_dict = dict()\n tmp_dict['start'] = 0\n tmp_dict['end'] = total\n list_result.append(tmp_dict)\n else:\n # 使用除数计算\n for index in range(rounds):\n start_index = index * size\n end_index = (index + 1) * size\n\n tmp_dict = dict()\n tmp_dict['start'] = start_index\n tmp_dict['end'] = end_index\n list_result.append(tmp_dict)\n # 判断是否有余数\n if remainder != 0:\n start_index = (index + 1) * size\n end_index = (index + 1) * size + remainder\n tmp_dict = dict()\n tmp_dict['start'] = start_index\n tmp_dict['end'] = end_index\n list_result.append(tmp_dict)\n return list_result\n\n @staticmethod\n def sorted_for_list_dict(list_dict, key, reverse=False):\n \"\"\"\n 为列表排序,按照元素中dict的key\n :param list_dict:\n :param key:\n :param reverse:排序方式\n :return:\n \"\"\"\n return sorted(list_dict, key=operator.itemgetter(key), reverse=reverse)\n\n @staticmethod\n def iso_time_to_datetime(iso_time, format=\"%Y-%m-%d %H:%M:%S\"):\n \"\"\"\n 将mongodb里面iso时间格式转化成datetime格式\n :param iso_time:\n :param format:\n :return:\n \"\"\"\n return datetime.datetime.strptime(iso_time, format)\n\n @staticmethod\n def get_datetime_from_timestamp(timestamp=None, region=None):\n \"\"\"\n 获取时间(默认当前时间)\n :timestamp: 时间戳\n :return:\n \"\"\"\n # utc时间\n if region == 'utc':\n if timestamp:\n return datetime.datetime.utcfromtimestamp(timestamp)\n else:\n return datetime.datetime.utcfromtimestamp(time.time())\n else:\n if timestamp:\n return datetime.datetime.fromtimestamp(timestamp)\n else:\n return datetime.datetime.fromtimestamp(time.time())\n\n @staticmethod\n def get_history_date_time(days=0, format=\"%Y-%m-%d\"):\n \"\"\"\n 获取与现在时间相差n天的整天时间(00:00:00)\n :param days:相差天数\n :return:\n \"\"\"\n res_date = (datetime.date.today() - datetime.timedelta(days=days)).strftime(format)\n return res_date\n\n\nif __name__ == \"__main__\":\n print(Helper.get_from_timestamp(''))\n","sub_path":"common/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":7487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"67887657","text":"from astar import Astar\nfrom sudokustate import SudokuState\nclass AstarSudoku(Astar):\n\tdef buildChildren(self, parentState):\n\t\ts = 0\n\t\tchildren = set()\n\t\t\"\"\"PRINTING\"\"\"\n\t\t#print(parentState)\n\t\t#print(range(parentState.arraySize))\n\t\tfor i in range(parentState.arraySize):\n\t\t\tfor j in range(parentState.arraySize):\n\t\t\t\tif(parentState.at(i,j) == 0):\n\t\t\t\t\t\"\"\"PRINTING\"\"\"\n\t\t\t\t\t#print('adding children for',i+1,j+1)\n\t\t\t\t\t#print(parentState.getQuad(i,j))\n\t\t\t\t\t#print(parentState.getRow(i))\n\t\t\t\t\t#print(parentState.getColumn(j))\n\t\t\t\t\t#print(parentState.getPossibleNumbers(i,j))\n\t\t\t\t\tfor k in parentState.getPossibleNumbers(i,j):\n\t\t\t\t\t\ts += 1\n\t\t\t\t\t\tchild = SudokuState.newChild(parentState,i,j,k)\n\t\t\t\t\t\tchildren.add(child)\n\t\t\t\t\t\"\"\"PRINTING\"\"\"\n\t\t\t\t\t#print('added',s,'children')\n\t\t\t\t\treturn children\n\t\t\t\t\t\n\n\t\t","sub_path":"astarsudoku.py","file_name":"astarsudoku.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"66162382","text":"\"\"\"\nDESCRIPTION:\n The tool is for creating secondary controller on top of main rig. This controller will be attached with main\n mesh of your rig.\n\nUSAGE:\n You may go to this link to have more detail >>\n https://youtu.be/IqMGcvEzJCk\n\nAUTHOR:\n Adien Dendra\n\nCONTACT:\n adprojects.animation@gmail.com | hello@adiendendra.com\n\nVERSION:\n 1.0 - 10 November 2020 - Initial Release\n\nLICENSE:\n Copyright (C) 2020 Adien Dendra - hello@adiendendra.com>\n This is commercial license can not be copied and/or\n distributed without the express permission of Adien Dendra\n\nADDITIONAL NOTES:\n\tThis is a small industry. Please do not purchased or pirates the scripts from the outside of the parameters describes.\n\tBe a fair and support me in order for me to continue developing the upcoming rigging stuff.\n\"\"\"\nfrom functools import partial\n\nimport pymel.core as pm\n\nSHAPE_CTRL = [[-1.0, 1.0, 1.0], [-1.0, 1.0, -1.0], [1.0, 1.0, -1.0], [1.0, 1.0, 1.0], [-1.0, 1.0, 1.0],\n [-1.0, -1.0, 1.0],\n [-1.0, -1.0, -1.0], [-1.0, 1.0, -1.0], [-1.0, 1.0, 1.0], [-1.0, -1.0, 1.0], [1.0, -1.0, 1.0],\n [1.0, 1.0, 1.0],\n [1.0, 1.0, -1.0], [1.0, -1.0, -1.0], [1.0, -1.0, 1.0], [1.0, -1.0, -1.0], [-1.0, -1.0, -1.0]]\n\nlayout = 400\npercentage = 0.01 * layout\non_selector = 0\nmethod_radio_button = 1\n\n\ndef ad_show_ui():\n adien_tweak_ctrl = 'AD_TweakControllerTool'\n pm.window(adien_tweak_ctrl, exists=True)\n if pm.window(adien_tweak_ctrl, exists=True):\n pm.deleteUI(adien_tweak_ctrl)\n with pm.window(adien_tweak_ctrl, title='AD Tweak Controller Tool', width=400, height=400):\n with pm.tabLayout('tab', width=410, height=160):\n # CREATE TAB\n with pm.columnLayout('Create Tweak Controller', rowSpacing=1 * percentage, w=layout,\n co=('both', 1 * percentage)):\n pm.separator(h=10, st=\"in\", w=layout)\n with pm.rowLayout(nc=3, columnAttach=[(1, 'right', 0), (2, 'left', 1 * percentage),\n (3, 'left', 1 * percentage)],\n cw3=(29.5 * percentage, 30 * percentage, 30 * percentage,\n )\n ):\n pm.text('Tweak Method:')\n method_radio_button = pm.radioCollection()\n method_joint_hierarchy = pm.radioButton(label='Hierarchy Joint',\n onCommand=lambda x: ad_on_selection_button(1)\n )\n pm.radioButton(label='Non-Hierarchy Joint',\n onCommand=lambda x: ad_on_selection_button(2)\n )\n\n ad_defining_object_text_field(define_object='List_Tweak_Joint', label=\"List Tweak Joint:\",\n multiple_selection=True)\n\n pm.radioCollection(method_radio_button, edit=True, select=method_joint_hierarchy)\n\n ad_defining_object_text_field(define_object='Tweak_Mesh', label=\"Tweak Mesh:\")\n\n ad_defining_object_text_field(define_object='Main_Mesh', label=\"Main Mesh:\")\n\n # ad_defining_object_text_field(define_object='Static_Joint', label=\"Static Joint:\")\n with pm.rowColumnLayout(nc=2, rowSpacing=(2, 1 * percentage),\n co=(1 * percentage, 'both', 1 * percentage),\n cw=[(1, 3 * percentage), (2, 93 * percentage)]):\n pm.checkBox(label='',\n cc=partial(ad_enabling_disabling_ui, ['Scale_Connection']),\n value=False)\n ad_defining_object_text_field(define_object='Scale_Connection', label=\"Scale Connection:\",\n add_feature=True, enable=False)\n\n with pm.rowLayout(nc=2, cw2=(48 * percentage, 48 * percentage), cl2=('center', 'left'),\n columnAttach=[(1, 'both', 0.5 * percentage), (2, 'both', 0.5 * percentage)]):\n pm.button(l=\"Clear All Define Objects\", bgc=(1, 1, 0),\n c=partial(ad_clearing_all_text_field, 'List_Tweak_Joint', 'Tweak_Mesh',\n 'Main_Mesh', 'Scale_Connection'))\n\n # create button to delete last pair of text fields\n pm.button(\"create_tweak_controller\", l=\"Create Tweak Controller\", bgc=(0, 0.5, 0),\n c=partial(ad_create_tweak_controller)\n )\n pm.separator(h=10, st=\"in\", w=layout)\n with pm.rowLayout(nc=2, cw2=(49 * percentage, 49 * percentage), cl2=('center', 'center'),\n columnAttach=[(1, 'both', 2 * percentage), (2, 'both', 2 * percentage)]):\n pm.text(l='Adien Dendra | 11/2020', al='left')\n pm.text(\n l='find out how to use it! >> ',\n hl=True,\n al='right')\n\n # EDIT TAB\n with pm.columnLayout('Reconnect Tweak Controller', rowSpacing=1 * percentage, w=layout,\n co=('both', 1 * percentage), adj=1):\n pm.separator(h=10, st=\"in\", w=layout)\n\n ad_defining_object_text_field(define_object='Reconnect_List_Tweak_Joint', label=\"List Tweak Joint:\",\n multiple_selection=True)\n ad_defining_object_text_field(define_object='Reconnect_Tweak_Mesh', label=\"Tweak Mesh:\")\n\n with pm.rowLayout(nc=3, cw3=(32 * percentage, 32 * percentage, 32 * percentage),\n columnAttach=[(1, 'both', 0 * percentage), (2, 'both', 0 * percentage),\n (3, 'both', 0 * percentage)]\n ):\n pm.button(l=\"Clear Define Objects\", bgc=(1, 1, 0),\n c=partial(ad_clearing_all_text_field, 'Reconnect_List_Tweak_Joint',\n 'Reconnect_Tweak_Mesh'))\n # create button to delete last pair of text fields\n pm.button('disconnect_tweak_controller', l=\"Disconnect Tweak Ctrl\", bgc=(0.5, 0, 0),\n c=partial(ad_disconnect_tweak_controller)\n )\n # create button to delete last pair of text fields\n pm.button(\"reconnect_tweak_controller\", l=\"Reconnect Tweak Ctrl\", bgc=(0, 0, 0.5),\n c=partial(ad_reconnect_tweak_controller)\n )\n pm.separator(h=10, st=\"in\", w=layout)\n with pm.rowLayout(nc=2, cw2=(49 * percentage, 49 * percentage), cl2=('center', 'center'),\n columnAttach=[(1, 'both', 2 * percentage), (2, 'both', 2 * percentage)]):\n pm.text(l='Adien Dendra | 11/2020', al='left')\n pm.text(\n l='find out how to use it! >> ',\n hl=True,\n al='right')\n pm.setParent(u=True)\n pm.showWindow()\n\n\ndef ad_enabling_disabling_ui(object, value, *args):\n # query for enabling and disabling layout\n for item in object:\n objectType = pm.objectTypeUI(item)\n if objectType == 'rowGroupLayout':\n pm.textFieldButtonGrp(item, edit=True, enable=value, tx='')\n else:\n pass\n\n\ndef ad_adding_object_sel_to_textfield(text_input, *args):\n # elect and add object\n select = pm.ls(sl=True, l=True, tr=True)\n if len(select) == 1:\n object_selection = select[0]\n pm.textFieldButtonGrp(text_input, e=True, tx=object_selection)\n else:\n pm.error(\"please select one object!\")\n\n\ndef ad_adding_multiple_object_sel_to_texfield(text_input, *args):\n select = pm.ls(sl=True, l=True, tr=True)\n list_joint = (','.join([item.name() for item in select]))\n pm.textFieldButtonGrp(text_input, e=True, tx=str(list_joint))\n\n\ndef ad_on_selection_button(on):\n # save the current shape selection into global variable\n global on_selector\n on_selector = on\n ad_clearing_all_text_field(['List_Tweak_Joint'])\n\n\ndef ad_hierarchy_joint(*args):\n hierarchy = []\n # query object with value on shape selector status\n\n if on_selector == 1:\n hierarchy = True\n elif on_selector == 2:\n hierarchy = False\n else:\n pass\n\n return hierarchy\n\n\ndef ad_clearing_all_text_field(*args):\n # clearing object text field\n for object in args:\n if object:\n pm.textFieldButtonGrp(object, edit=True, tx='')\n else:\n pass\n\n\ndef ad_defining_object_text_field(define_object, label, add_feature=False, multiple_selection=False, *args, **kwargs):\n if not add_feature:\n # if object doesn't has checkbox\n if multiple_selection:\n pm.textFieldButtonGrp(define_object, label=label, cal=(1, \"right\"),\n cw3=(30 * percentage, 58 * percentage, 15 * percentage),\n cat=[(1, 'right', 2), (2, 'both', 2), (3, 'left', 2)],\n bl=\"<<\",\n bc=partial(ad_adding_multiple_object_sel_to_texfield, define_object,\n ))\n else:\n pm.textFieldButtonGrp(define_object, label=label, cal=(1, \"right\"),\n cw3=(30 * percentage, 58 * percentage, 15 * percentage),\n cat=[(1, 'right', 2), (2, 'both', 2), (3, 'left', 2)],\n bl=\"<<\",\n bc=partial(ad_adding_object_sel_to_textfield, define_object,\n ))\n else:\n if multiple_selection:\n pm.textFieldButtonGrp(define_object, label=label, cal=(1, \"right\"),\n cw3=(27 * percentage, 58 * percentage, 15 * percentage),\n cat=[(1, 'right', 2), (2, 'both', 2), (3, 'left', 2)],\n bl=\"<<\",\n bc=partial(ad_adding_multiple_object_sel_to_texfield, define_object),\n **kwargs)\n else:\n pm.textFieldButtonGrp(define_object, label=label, cal=(1, \"right\"),\n cw3=(27 * percentage, 58 * percentage, 15 * percentage),\n cat=[(1, 'right', 2), (2, 'both', 2), (3, 'left', 2)],\n bl=\"<<\",\n bc=partial(ad_adding_object_sel_to_textfield, define_object),\n **kwargs)\n\n\ndef ad_query_textfield_object(object_define, *args):\n text = []\n if pm.textFieldButtonGrp(object_define, q=True, en=True):\n if pm.textFieldButtonGrp(object_define, q=True, tx=True):\n text = pm.textFieldButtonGrp(object_define, q=True, tx=True)\n if pm.ls(text):\n text = pm.textFieldButtonGrp(object_define, q=True, tx=True)\n else:\n pm.error(\"'%s' has wrong input object name. There is no object with name '%s'!\" % (object_define, text))\n else:\n pm.error(\"'%s' can not be empty!\" % object_define)\n else:\n pass\n return text, object_define\n\n\ndef ad_query_list_textfield_object(object_define, *args):\n listing_object = []\n if pm.textFieldButtonGrp(object_define, q=True, en=True):\n if pm.textFieldButtonGrp(object_define, q=True, tx=True):\n text = pm.textFieldButtonGrp(object_define, q=True, tx=True)\n listing = text.split(',')\n set_duplicate = set([x for x in listing if listing.count(x) > 1])\n if set_duplicate:\n for item in list(set_duplicate):\n pm.error(\"'%s' is duplicate object!\" % item)\n else:\n for item in listing:\n if pm.ls(item):\n listing_object.append(item)\n else:\n pm.error(\"'%s' has wrong input object name. There is no object with name '%s'!\" % (\n object_define, item))\n else:\n pm.error(\"'%s' can not be empty!\" % object_define)\n else:\n pass\n\n return listing_object, object_define\n\n\ndef ad_create_tweak_controller(*args):\n # text group define\n joints = ad_query_list_textfield_object('List_Tweak_Joint')[0]\n tweak_mesh = ad_query_list_textfield_object('Tweak_Mesh')[0]\n main_mesh = ad_query_textfield_object('Main_Mesh')[0]\n scale_connection = ad_query_textfield_object('Scale_Connection')[0]\n\n # checking the skin with joint\n ad_query_object_skin_influence(tweak_mesh, joints)\n # query skin exists on mesh\n ad_query_skin_name(tweak_mesh)\n\n # checking the method type\n list_joint = pm.ls(joints)\n\n if ad_hierarchy_joint():\n ad_tweak_hierarchy_condition(list_joint, main_mesh, scale_connection, tweak_mesh)\n else:\n ad_tweak_non_hierarchy_condition(list_joint, main_mesh, scale_connection, tweak_mesh)\n\n\ndef ad_tweak_non_hierarchy_condition(list_joint, main_mesh, scale_connection, tweak_mesh):\n for joint in list_joint:\n parents = joint.getParent()\n children = joint.getChildren()\n if pm.ls(parents, type='joint') or pm.ls(children, type='joint'):\n pm.error(\"Joint '%s' shouldn't have a hierarchy, check your Tweak Method option!\" % joint)\n else:\n if pm.objExists(\"%s.AD_Parent_Grp\" % joint):\n if pm.listConnections(joint + '.AD_Parent_Grp'):\n pm.error(\"Tweak controller '%s' already added!\" % joint)\n else:\n ad_create_tweak_non_hierarchy_controller(joint, main_mesh, scale_connection, tweak_mesh)\n else:\n ad_create_tweak_non_hierarchy_controller(joint, main_mesh, scale_connection, tweak_mesh)\n\n\ndef ad_tweak_hierarchy_condition(list_joint, main_mesh, scale_connection, tweak_mesh):\n if len(list_joint) > 1:\n # create exception\n for joint in list_joint:\n if pm.objExists(\"%s.AD_Parent_Grp\" % joint):\n if pm.listConnections(joint + '.AD_Parent_Grp'):\n pm.error(\"Tweak controller '%s' already added!\" % joint)\n else:\n continue\n else:\n continue\n\n for joint_child in list_joint[:-1]:\n children = joint_child.getChildren()\n if not pm.ls(children, type='joint'):\n pm.error(\"Joint '%s' should have a hierarchy, check your Tweak Method option!\" % joint_child)\n else:\n continue\n for joint_parent in list_joint[1:]:\n parent = joint_parent.getParent()\n if not pm.ls(parent, type='joint'):\n pm.error(\"Joint '%s' should have a hierarchy, check your Tweak Method option!\" % joint_parent)\n else:\n continue\n\n # create group\n all_grp = ad_create_group('transform', \"%s_%s\" % ('AllTweakCtrl_HJ', 'grp'))\n follicle_grp = ad_create_group('transform', \"%s_%s\" % ('FollicleTweakCtrl_HJ', 'grp'))\n drive_grp = ad_create_group('transform', \"%s_%s\" % ('DriverTweakCtrl_HJ', 'grp'))\n ctrl_grp = ad_create_group('transform', \"%s_%s\" % ('ControllerTweakCtrl_HJ', 'grp'))\n driver_group = ad_group_object_outside('DriverHJ', list_joint)\n\n # create controller\n controller = ad_create_controller(object_list=list_joint,\n ctrl_color=20,\n )\n # parenting to group\n pm.parent(controller['group'][0], ctrl_grp)\n pm.parent(driver_group[0], drive_grp)\n pm.parent(follicle_grp, drive_grp, ctrl_grp, all_grp)\n\n # looping the object\n for joint, driver, controller in zip(list_joint, driver_group, controller['group']):\n ad_create_tweak_hierarchy_controller(joint, main_mesh, scale_connection, tweak_mesh, driver, controller,\n follicle_grp)\n\n else:\n for joint in list_joint:\n if pm.objExists(\"%s.AD_Parent_Grp\" % joint):\n if pm.listConnections(joint + '.AD_Parent_Grp'):\n pm.error(\"Tweak controller '%s' already added!\" % joint)\n else:\n pm.error(\n \"Joint '%s' shouldn't as a single joint input, check your Tweak Method option!\" % list_joint[0])\n\n else:\n pm.error(\n \"Joint '%s' shouldn't as a single joint input, check your Tweak Method option!\" % list_joint[0])\n\n\ndef ad_create_tweak_hierarchy_controller(joints, main_mesh, scale_connection, tweak_mesh, driver, controller,\n follicle_grp):\n # create follicle\n follicles = ad_follicle_set(joints, main_mesh)\n\n # add attribute\n ad_add_attr_message(joints, driver)\n\n # connected the bind pre matrix\n pm.connectAttr('%s.worldInverseMatrix[0]' % driver,\n '%s.bindPreMatrix[%d]' % (\n ad_query_skin_name(tweak_mesh), ad_skin_matrix_list_from_joint(joints)))\n\n if pm.textFieldButtonGrp('Scale_Connection', q=True, en=True):\n ad_scale_constraint(scale_connection, follicles['folTrans'])\n\n # hide visibility\n pm.setAttr(follicles['folShape'] + '.lodVisibility', 0)\n pm.setAttr(joints + '.drawStyle', 2)\n\n # lock_attr\n pm.setAttr(follicles['folTrans'] + '.translate', l=1)\n pm.setAttr(follicles['folTrans'] + '.rotate', l=1)\n pm.setAttr(follicles['folTrans'] + '.scale', l=1)\n pm.setAttr('%s.%s' % (follicles['folTrans'], 'v'), l=True, k=False)\n pm.setAttr('%s.%s' % (follicles['folTrans'], 'pu'), l=True, k=False)\n pm.setAttr('%s.%s' % (follicles['folTrans'], 'pv'), l=True, k=False)\n\n pm.parent(follicles['folTrans'], follicle_grp)\n # constraining\n ad_parent_scale_constraint(follicles['folTrans'], driver)\n\n # connect attribute\n pm.connectAttr(driver + '.translate', controller + '.translate')\n pm.connectAttr(driver + '.rotate', controller + '.rotate')\n pm.connectAttr(driver + '.scale', controller + '.scale')\n\n\ndef ad_create_tweak_non_hierarchy_controller(joint, main_mesh, scale_connection, tweak_mesh):\n all_grp = ad_create_group('transform', \"%s_%s\" % ('AllTweakCtrl_NonHJ', 'grp'))\n controller = ad_create_controller(object_list=[joint],\n ctrl_color=18,\n )\n\n # for list_joint, group_ctrl in zip(list_joint, controller['group']):\n # create follicle\n follicles = ad_follicle_set(joint, main_mesh)\n\n # # parent the object to follicle\n pm.parent(controller['group'], follicles['folTrans'])\n\n # for scl in scaleObj:\n if pm.textFieldButtonGrp('Scale_Connection', q=True, en=True):\n ad_scale_constraint(scale_connection, follicles['folTrans'])\n\n # add attribute\n ad_add_attr_message(joint, controller['group'][0])\n\n pm.setAttr(follicles['folShape'] + '.lodVisibility', 0)\n pm.setAttr(joint + '.drawStyle', 2)\n\n pm.setAttr(follicles['folTrans'] + '.translate', l=1)\n pm.setAttr(follicles['folTrans'] + '.rotate', l=1)\n pm.setAttr('%s.%s' % (follicles['folTrans'], 'v'), l=True, k=False)\n pm.setAttr('%s.%s' % (follicles['folTrans'], 'pu'), l=True, k=False)\n pm.setAttr('%s.%s' % (follicles['folTrans'], 'pv'), l=True, k=False)\n\n pm.setAttr(controller['group'][0] + '.translate', l=1)\n pm.setAttr(controller['group'][0] + '.rotate', l=1)\n pm.setAttr(controller['group'][0] + '.scale', l=1)\n\n pm.connectAttr('%s.worldInverseMatrix[0]' % ad_array_tweak_folder(follicles['folTrans']),\n '%s.bindPreMatrix[%d]' % (\n ad_query_skin_name(tweak_mesh), ad_skin_matrix_list_from_joint(joint)))\n\n pm.parent(follicles['folTrans'], all_grp)\n\n print\n \"Tweak controller '%s' has been created!\" % joint\n\n\n################################################# reconnect tweak controller ##################################################\ndef ad_reconnect_tweak_controller(*args):\n ad_reconnect_or_disconnect_looping(reconnect=True)\n\n\ndef ad_disconnect_tweak_controller(*args):\n ad_reconnect_or_disconnect_looping(reconnect=False)\n\n\ndef ad_reconnect_or_disconnect_looping(reconnect):\n joints = ad_query_list_textfield_object('Reconnect_List_Tweak_Joint')[0]\n tweak_mesh = ad_query_list_textfield_object('Reconnect_Tweak_Mesh')[0]\n\n # checking the skin with joint\n ad_query_object_skin_influence(tweak_mesh, joints)\n # query skin exists on mesh\n ad_query_skin_name(tweak_mesh)\n\n for joint in joints:\n\n # listing the connection exists\n grp_driver = pm.listConnections(joint + '.AD_Parent_Grp', s=1)\n\n # query whether it has contain parent hook\n if not grp_driver:\n pm.error(\n \"Tweak controller '%s' doesn't exists! Create setup first or delete existing setup before create tweaker.\" % joint)\n\n # query list connection\n list_connection = pm.listConnections('%s.worldInverseMatrix[0]' % grp_driver[0], p=1)\n\n # got error if it has connection\n if reconnect:\n if list_connection:\n pm.warning(\"Tweak controller '%s' already has connected! Skip that tweak controller.\" % joint)\n\n else:\n # connect the bpm transform to skin bind pre matrix\n pm.connectAttr('%s.worldInverseMatrix[0]' % grp_driver[0],\n '%s.bindPreMatrix[%d]' % (\n ad_query_skin_name(tweak_mesh), ad_skin_matrix_list_from_joint(joint)))\n print(\"Tweak controller '%s' is reconnected!\" % joint)\n else:\n # condition if it has connection\n if list_connection:\n pm.disconnectAttr('%s.worldInverseMatrix[0]' % grp_driver[0], list_connection[0])\n print(\"Tweak controller '%s' is disconnected!\" % joint)\n\n else:\n pm.warning(\n \"Tweak controller '%s' already has disconnected! Skip that tweak controller.\" % joint)\n\n\n############################################## function tweak controller ###############################################\ndef ad_suffix_name(obj):\n objs = obj.split('|')[-1:]\n for l in objs:\n get_len = l.split('_')\n if len(get_len) > 1:\n get_suffix_name = get_len[1]\n return get_suffix_name\n else:\n get_suffix_no = l.replace(l, '')\n return get_suffix_no\n\n\ndef ad_group_object(grp_name_list, obj_base):\n list_relatives = pm.listRelatives(obj_base, ap=1)\n\n cGrp = ad_grouping_parent(grp_name_list, ad_prefix_name(obj_base), ad_suffix_name(obj_base).title())\n\n if list_relatives == None:\n pm.parent(obj_base, cGrp[-1])\n else:\n # parent group offset to list relatives\n pm.parent(cGrp[0], list_relatives)\n # parent obj to grp offset\n pm.parent(obj_base, cGrp[-1])\n\n return cGrp\n\n\ndef ad_group_object_outside(add_suffix, obj_base):\n # create group hierarchy\n grps = []\n for i in obj_base:\n grps.append(pm.createNode('transform', n=\"%s%s_%s\" % (ad_prefix_name(i), add_suffix, 'grp')))\n\n for i, item in enumerate(obj_base):\n pm.delete(pm.parentConstraint(item, grps[i], mo=0))\n\n if i > 0:\n pm.parent(grps[i], grps[i - 1])\n\n return grps\n\n\ndef ad_parent_scale_constraint(obj_base, obj_target, mo=1):\n ad_parent_constraint(obj_base, obj_target, mo=mo)\n ad_scale_constraint(obj_base, obj_target)\n\n\ndef ad_query_skin_name(obj):\n # get the skincluster name\n relatives = pm.listRelatives(obj, type=\"shape\")\n skin_cluster = pm.listConnections(relatives, type=\"skinCluster\")\n if not skin_cluster:\n return pm.error(\n \"Please add bind skin to '%s' before create or reconnect or disconnect tweak controller!\" % obj[0])\n else:\n return skin_cluster[0]\n\n\ndef ad_query_object_skin_influence(object_mesh, joints):\n skin_cluster = ad_query_skin_name(object_mesh)\n query_object_influence = pm.skinCluster(skin_cluster, query=True, inf=True)\n for joint in joints:\n if not joint in query_object_influence:\n pm.error(\"Joint '%s' doesn't have influence bind skin to '%s' mesh!\" % (joint, object_mesh[0]))\n\n return query_object_influence\n\n\n# CREATE FOLLICLE BASED ON OBJECT (JOINT OR TRANSFORM) SELECTED\ndef ad_create_follicle_selection(obj_select, obj_mesh, prefix=None, suffix=None):\n obj_mesh = pm.listRelatives(obj_mesh, s=1)[0]\n\n closest_node = None\n # If the inputSurface is of type 'nurbsSurface', connect the surface to the closest node\n if pm.objectType(obj_mesh) == 'nurbsSurface':\n closest_node = pm.createNode('closestPointOnSurface')\n pm.connectAttr((obj_mesh + '.local'), (closest_node + '.inputSurface'))\n\n # If the inputSurface is of type 'mesh', connect the surface to the closest node\n elif pm.objectType(obj_mesh) == 'mesh':\n closest_node = pm.createNode('closestPointOnMesh')\n pm.connectAttr((obj_mesh + '.outMesh'), (closest_node + '.inMesh'))\n else:\n pm.error('please check your type object. Object must be either nurbs or mesh')\n\n # query object selection\n xform = pm.xform(obj_select, ws=True, t=True, q=True)\n\n # set the position of node according to the loc\n pm.setAttr(closest_node + '.inPositionX', xform[0])\n pm.setAttr(closest_node + '.inPositionY', xform[1])\n pm.setAttr(closest_node + '.inPositionZ', xform[2])\n\n # create follicle\n follicle_node = pm.createNode('follicle')\n\n # query the transform follicle\n follicle_transform = pm.listRelatives(follicle_node, type='transform', p=True)\n\n # CONNECTING THE FOLLICLE\n pm.connectAttr(follicle_node + '.outRotate', follicle_transform[0] + '.rotate')\n pm.connectAttr(follicle_node + '.outTranslate', follicle_transform[0] + '.translate')\n\n # connect the world matrix mesh to the follicle shape\n pm.connectAttr(obj_mesh + '.worldMatrix[0]', follicle_node + '.inputWorldMatrix')\n\n # connect the output mesh of mesh to input mesh follicle\n if pm.objectType(obj_mesh) == 'nurbsSurface':\n pm.connectAttr((obj_mesh + '.local'), (follicle_node + '.inputSurface'))\n\n # If the inputSurface is of type 'mesh', connect the surface to the follicle\n if pm.objectType(obj_mesh) == 'mesh':\n pm.connectAttr(obj_mesh + '.outMesh', follicle_node + '.inputMesh')\n\n # turn off the simulation follicle\n pm.setAttr(follicle_node + '.simulationMethod', 0)\n\n # get u and v output closest point on mesh node\n par_u = pm.getAttr(closest_node + '.result.parameterU')\n par_v = pm.getAttr(closest_node + '.result.parameterV')\n\n # connect output closest point on mesh node to follicle\n pm.setAttr(follicle_node + '.parameterU', par_u)\n pm.setAttr(follicle_node + '.parameterV', par_v)\n\n # deleting node\n pm.delete(closest_node)\n\n # rename follicle\n if prefix or suffix:\n follicle_transform = pm.rename(follicle_transform, '%s_%s' % (ad_prefix_name(prefix), suffix))\n else:\n follicle_transform = pm.rename(follicle_transform, '%s_%s' % (ad_prefix_name(obj_select), 'fol'))\n\n # listing the shape of follicle\n follicle_shape = pm.listRelatives(follicle_transform, s=1)[0]\n pm.setAttr(follicle_shape + '.rsp', k=False)\n pm.setAttr(follicle_shape + '.ptl', k=False)\n pm.setAttr(follicle_shape + '.sim', k=False)\n pm.setAttr(follicle_shape + '.sdr', k=False)\n pm.setAttr(follicle_shape + '.fld', k=False)\n pm.setAttr(follicle_shape + '.ovd', k=False)\n pm.setAttr(follicle_shape + '.cld', k=False)\n pm.setAttr(follicle_shape + '.dmp', k=False)\n pm.setAttr(follicle_shape + '.stf', k=False)\n pm.setAttr(follicle_shape + '.lfl', k=False)\n pm.setAttr(follicle_shape + '.cwm', k=False)\n pm.setAttr(follicle_shape + '.sct', k=False)\n pm.setAttr(follicle_shape + '.ad', k=False)\n pm.setAttr(follicle_shape + '.dml', k=False)\n pm.setAttr(follicle_shape + '.ctf', k=False)\n pm.setAttr(follicle_shape + '.brd', k=False)\n pm.setAttr(follicle_shape + '.cbl', k=False)\n pm.setAttr(follicle_shape + '.cr', k=False)\n pm.setAttr(follicle_shape + '.cg', k=False)\n pm.setAttr(follicle_shape + '.fsl', k=False)\n pm.setAttr(follicle_shape + '.sgl', k=False)\n pm.setAttr(follicle_shape + '.sdn', k=False)\n pm.setAttr(follicle_shape + '.dgr', k=False)\n pm.setAttr(follicle_shape + '.cw', k=False)\n pm.setAttr(follicle_shape + '.cml', k=False)\n pm.setAttr(follicle_shape + '.cb', k=False)\n\n return follicle_transform, follicle_shape\n\n\ndef ad_follicle_set(obj_select, obj_mesh, prefix=None, suffix=None, ):\n grps = []\n grps.extend(ad_create_follicle_selection(obj_select, obj_mesh, prefix=prefix, suffix=suffix))\n return {'folTrans': grps[0],\n 'folShape': grps[1]}\n\n\ndef ad_joint_destination_matrix(obj):\n list_connection = pm.listConnections(obj + '.worldMatrix[0]', p=True)\n return list_connection\n\n\ndef ad_skin_matrix_list_from_joint(obj):\n for item in ad_joint_destination_matrix(obj):\n split = item.split('.')[1:]\n integer = int((split[0].split('[')[-1][:-1]))\n return integer\n\n\ndef ad_array_tweak_folder(obj):\n list = pm.ls(obj)\n for node in list:\n children = node.getChildren()\n return children[1]\n\n\ndef ad_create_group(node, name):\n if not pm.objExists(name):\n node_name = pm.createNode(node, n=name)\n return node_name\n else:\n return name\n\n\ndef ad_create_ctrl(shape):\n ctrl = pm.curve(d=1, p=shape)\n return ctrl\n\n\ndef ad_grouping_parent(groups, prefix, suffix, number=''):\n # create group hierarchy\n grps = []\n for i in range(len(groups)):\n grps.append(pm.createNode('transform', n=\"%s%s%s%s_%s\" % (prefix, suffix, groups[i], number, 'grp')))\n if i > 0:\n pm.parent(grps[i], grps[i - 1])\n return grps\n\n\ndef ad_prefix_name(obj):\n if '_' in obj:\n get_prefix_name = obj.split('_')[:-1]\n joining = '_'.join(get_prefix_name)\n return joining\n else:\n print\n obj\n\n\ndef ad_set_color(ctrl, color):\n list_relatives = pm.listRelatives(ctrl, s=1)[0]\n pm.setAttr(list_relatives + '.ove', 1)\n pm.setAttr(list_relatives + '.ovc', color)\n return list_relatives\n\n\ndef ad_add_attr_message(obj_target, obj):\n if pm.objExists('%s.AD_Parent_Grp' % obj_target):\n attr = pm.connectAttr('%s.message' % obj, '%s.AD_Parent_Grp' % obj_target)\n else:\n pm.addAttr(obj_target, ln='AD_Parent_Grp', at='message')\n attr = pm.connectAttr('%s.message' % obj, '%s.AD_Parent_Grp' % obj_target)\n return attr\n\n\ndef ctrl_attributes(rename_controller, ctrl_color, grp_parent):\n ad_set_color(rename_controller, ctrl_color)\n ad_add_attr_message(rename_controller, grp_parent[0])\n\n\ndef ad_parent_constraint(obj_base, obj_target, mo=1):\n par_constraint = pm.parentConstraint(obj_base, obj_target, mo=mo)\n split = par_constraint.split('_')\n x = '_'.join(split[:-1])\n n = x.replace(x, x + '_pac')\n pm.rename(par_constraint, n)\n\n\ndef ad_scale_constraint(obj_base, obj_target, mo=1):\n scale_constraint = pm.scaleConstraint(obj_base, obj_target, mo=mo)\n split = scale_constraint.split('_')\n x = '_'.join(split[:-1])\n n = x.replace(x, x + '_sc')\n pm.rename(scale_constraint, n)\n\n\ndef ad_create_controller(object_list=None,\n groups_ctrl=['Zro', 'Offset'],\n ctrl_color=20,\n shape=SHAPE_CTRL,\n ):\n controllers = []\n parent_groups = []\n for number, obj in enumerate(object_list):\n # create control\n creating_ctrl = ad_create_ctrl(shape)\n controller = pm.rename(creating_ctrl, '%s_%s' % (ad_prefix_name(obj), 'ctrl'))\n parent_group = ad_grouping_parent(groups_ctrl, ad_prefix_name(obj), 'ctrl'.title())\n\n # ctrl_grp = group_ctrl(obj, 'ctrl', groups_ctrl, ctrl)\n pm.parent(controller, parent_group[-1])\n\n controllers.append(controller)\n parent_groups.append(parent_group[0])\n\n # add control attributes\n ctrl_attributes(controller, ctrl_color, parent_group)\n\n # match position the group ctrl as the obj\n pm.delete(pm.parentConstraint(obj, parent_group[0], mo=0))\n\n # connection parent\n # query list relatives\n list_relatives_parent = pm.listRelatives(obj, p=1)\n pm.parent(obj, controller)\n\n if list_relatives_parent:\n # parent ctrl group to list relatives\n pm.parent(parent_group[0], list_relatives_parent[0])\n\n return {'ctrl': controllers,\n 'group': parent_groups}\n","sub_path":"ui/ad_tweak_controller.py","file_name":"ad_tweak_controller.py","file_ext":"py","file_size_in_byte":33351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"322369983","text":"\nx = True\ni = 0\n\nwhile x is True:\n try:\n a = input('Enter a float number')\n assert isinstance(a, float)\n x = False\n except:\n print('Entered value is not a number')\n i += 1\n if i==4: break\n else:\n print('You have entered {}'.format(a))\n finally:\n a = 23\n print(a)\n print(a/3)\n\n\n","sub_path":"Assert.py","file_name":"Assert.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"250014714","text":"import theano\nimport lasagne\nimport numpy as np\nimport math\nimport Visualization.underoverfitting\n\n\ndef train_and_predict(train_data, train_truths, dev_data, dev_truths, create_nn, dev_func=None, **nn_kwargs):\n print(\"Device: \" + theano.config.device)\n assert 'output_num_units' in nn_kwargs.keys()\n assert 'input_shape' not in nn_kwargs.keys()\n assert 'on_epoch_finished' not in nn_kwargs.keys()\n\n result = []\n\n def dev(nn2, history):\n train_accuracy = history[-1]['valid_accuracy']\n dev_accuracy = get_accuracy(nn2.layers_[0], nn2.layers_[-1], dev_data, dev_truths, 100)\n result.append((train_accuracy, dev_accuracy))\n\n if dev_func is not None:\n dev_func(result)\n\n assert len(set(train_truths)) <= nn_kwargs['output_num_units']\n nn = create_nn(**nn_kwargs, on_epoch_finished=[dev], input_shape=train_data.shape)\n nn.fit(train_data, train_truths)\n\n return result\n\n\ndef train_and_predict_and_plot(train_data, train_truths, dev_data, dev_truths, create_nn, **nn_kwargs):\n def plot(accuracies):\n pts = Visualization.underoverfitting.scale_batch(accuracies)\n Visualization.underoverfitting.plot(pts)\n\n return train_and_predict(train_data, train_truths, dev_data, dev_truths, create_nn, plot, **nn_kwargs)\n\n\ndef get_accuracy(input_layer, output_layer, data, truths, dev_batch_size):\n dev_batch_size = dev_batch_size if dev_batch_size != 0 else data.shape[0]\n n_batches = int(math.ceil(data.shape[0] / dev_batch_size))\n\n compute_output = theano.function([input_layer.input_var],\n lasagne.layers.get_output(output_layer, deterministic=True))\n\n val_predictions = []\n for batch_index in range(0, n_batches):\n dev_batch = data[dev_batch_size * batch_index: dev_batch_size * (batch_index + 1)]\n val_output = compute_output(dev_batch)\n val_predictions.extend(np.argmax(val_output, axis=1))\n\n accuracy = np.mean(val_predictions == truths)\n return accuracy\n\n\ndef train(nn, data):\n train_data, train_truths, dev_data, dev_truths = data\n\n nn.fit(train_data, train_truths)\n\n train_accuracy = nn.train_history_[-1]['valid_accuracy']\n dev_accuracy = get_accuracy(nn.layers_[0], nn.layers_[-1], dev_data, dev_truths, 100)\n return train_accuracy, dev_accuracy\n","sub_path":"underoverfitting.py","file_name":"underoverfitting.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"613957529","text":"import sys\nimport time\nfrom color import Color\nfrom textwrap import wrap\n\nclass GameObj:\n def __init__(self, name, desc, holding=[], isDark=False):\n desc = wrap(desc, 50)\n self.name = name\n self.desc = '\\n'.join(desc)\n self.holding = holding\n self.isDark = isDark\n self.seen = False\n\n def crawlText(self, text, delay=0.01):\n text = wrap(text, 50)\n text = '\\n\\r'.join(text)\n for ln in text:\n sys.stdout.write(ln)\n sys.stdout.flush()\n time.sleep(delay)\n print('\\n')\n\n def showHelp(self):\n print(f'{Color.RED}go{Color.END} [{Color.PURPLE}north, east, west, south{Color.END}] {Color.GREEN}$ Move in that direction (ex. go north){Color.END}')\n print(f'{Color.RED}get{Color.END} [{Color.RED}item{Color.END}] {Color.GREEN}$ Pick up an item you see (ex. get Rubber Duck){Color.END}')\n print(f'{Color.RED}drop{Color.END} [{Color.RED}item{Color.END}] {Color.GREEN}$ Drop an item you are holding (ex. drop Flashlight){Color.END}')\n print(f'{Color.RED}use{Color.END} [{Color.RED}item{Color.END}] {Color.GREEN}$ Use an item you are holding (ex. use Flashlight){Color.END}')\n print(f'{Color.RED}look{Color.END} {Color.GREEN}$ Observe your surroundings{Color.END}')\n print(f'{Color.RED}q{Color.END} {Color.GREEN}$ Quit{Color.END}')\n\n def showItems(self):\n if self.isDark == True:\n return None\n items = [i.name for i in self.holding]\n itemString = ', '.join(items)\n if len(items) > 0:\n itemString = f'{Color.RED}{items}{Color.END}'\n return itemString\n else: return None\n\n def quitGame(self):\n print('\\n')\n self.crawlText(f'{Color.RED}Your mind feels electric, the taste of copper fills your mouth, and you wonder:')\n self.crawlText(f'{Color.PURPLE} \"Is this real? Am I dreaming this moment?\"\\n\\f\\t\\t{Color.END}')\n sys.exit()\n\n def __str__(self):\n return f'++ {self.name}\\n\\f{self.desc} ++'","sub_path":"src/gameObj.py","file_name":"gameObj.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"313594570","text":"n1 = int(input())\nn2 = int(input())\nopcode = input()\nresult = 0\nparity = \"\"\nif opcode == \"+\" or opcode == \"-\" or opcode == \"*\":\n if opcode == \"+\":\n result = n1 + n2\n elif opcode == \"-\":\n result = n1 - n2\n elif opcode == \"*\":\n result = n1 * n2\n if result % 2 == 0:\n parity = \"even\"\n else:\n parity = \"odd\"\n print(f\"{n1} {opcode} {n2} = {result} - {parity}\")\nelif opcode == \"/\":\n if n2 == 0:\n print(f\"Cannot divide {n1} by zero\")\n else:\n result = n1 / n2\n print(f\"{n1} / {n2} = {result:.2f}\")\nelif opcode == \"%\":\n if n2 == 0:\n print(f\"Cannot divide {n1} by zero\")\n else:\n result = n1 % n2\n print(f\"{n1} % {n2} = {result}\")\n","sub_path":"exercises/03 - Conditional Statements Advanced/06 - Operations Between Numbers.py","file_name":"06 - Operations Between Numbers.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"451852821","text":"from __future__ import print_function\nfrom keras.callbacks import EarlyStopping\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Model\nfrom keras.layers import Input, LSTM, Dense, Embedding\nimport numpy as np\nfrom keras.utils import to_categorical\nimport matplotlib.pyplot as plt\nimport pickle\nimport re\nimport unicodedata\n\ndef plot_history(model):\n plt.plot(model.history.history['loss'])\n plt.plot(model.history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n # plt.imsave('plot.jpg')\n\n\ndef remove_long_sentences(train_data, test_data, num_samples):\n new_train = []\n new_test = []\n new_index = 0\n for index in range(len(train_data)):\n if len(train_data[index]) < 30:\n new_train.append(train_data[index])\n new_test.append(test_data[index])\n new_index += 1\n if new_index == num_samples:\n break\n del train_data\n del test_data\n return new_train, new_test\n\n\ndef convert_binary_matrix(data, num_tokens):\n return to_categorical(data, num_classes=num_tokens)\n\n\ndef preproc(docs, max_length, name):\n token_model = Tokenizer()\n token_model.fit_on_texts(docs)\n vocab_size = len(token_model.word_index) + 1\n # integer encode the documents\n encoded_docs = token_model.texts_to_sequences(docs)\n print(encoded_docs[:10])\n # pad documents to a max length of 4 words\n # max_length = 50\n padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')\n print(padded_docs[:10])\n with open(name, 'wb') as handle:\n pickle.dump(token_model, handle, protocol=pickle.HIGHEST_PROTOCOL)\n # load the whole embedding into memory\n return vocab_size, padded_docs, token_model\n\ndef no_accent_vietnamese(source,target):\n # s = s.decode('utf-8')\n target = re.sub(u'Đ', 'D', target)\n target = re.sub(u'đ', 'd', target)\n no_accent_string=unicodedata.normalize('NFKD', target).encode('ASCII', 'ignore')\n if (target == source):\n return True\n else:\n return False\n\n\nSTART_SEQ = 'startseq'\nEND_SEQ = 'endseq'\nmax_length = 20\nbatch_size = 128 # Batch size for training.\nepochs = 50 # Number of epochs to train for.\nlatent_dim = 2048 # Latent dimensionality of the encoding space.\nnum_samples = 25000 # Number of samples to train on.\n# Path to the data txt file on disk.\ndata_path = 'fra.txt'\n\nwith open('target.txt', encoding='utf-8') as f:\n source_data = f.readlines()\n# you may also want to remove whitespace characters like `\\n` at the end of each line\nsource_data = [str(x.strip()) for x in source_data]\n\nwith open('source.txt', encoding='utf-8') as f:\n target_data = f.readlines()\n# you may also want to remove whitespace characters like `\\n` at the end of each line\ntarget_data = [(START_SEQ + ' ' + str(x.strip()) + ' ' + END_SEQ) for x in target_data]\n# train_data, test_data = remove_long_sentences(train_data, test_data, num_samples)\n# Vectorize the data.\nsource_data, target_data = remove_long_sentences(source_data, target_data, num_samples)\nnum_decoder_tokens, decoder_input_data, decoder_token_model = preproc(target_data, max_length, 'decoder')\nnum_encoder_tokens, encoder_input_data, encoder_token_model = preproc(source_data, max_length, 'encoder')\ndecoder_target_data = np.c_[decoder_input_data[:, 1:], np.zeros(num_samples)]\n# del source_data\n# del target_data\n# encoder_input_data=convert_binary_matrix(encoder_input_data,num_encoder_tokens)\n# decoder_input_data=convert_binary_matrix(decoder_input_data,num_decoder_tokens)\ndecoder_target_data = convert_binary_matrix(decoder_target_data, num_decoder_tokens)\n\n# Define an input sequence and process it.\nencoder_inputs = Input(shape=(None,))\nembed_input = Embedding(num_encoder_tokens, latent_dim)(encoder_inputs)\nencoder_1 = LSTM(latent_dim, return_state=True, return_sequences=True)\nencoder_outputs_1 = encoder_1(embed_input)\ny_1, state_h_1, state_c_1 = encoder_outputs_1\n# We discard `encoder_outputs` and only keep the states.\nencoder_states_1 = [state_h_1, state_c_1]\n\nencoder_2 = LSTM(latent_dim, return_state=True)\ny_2, state_h_2, state_c_2 = encoder_2(y_1)\n# We discard `encoder_outputs` and only keep the states.\nencoder_states_2 = [state_h_2, state_c_2]\n\n# Set up the decoder, using `encoder_states` as initial state.\ndecoder_inputs = Input(shape=(None,))\nembed_target = Embedding(num_decoder_tokens, latent_dim)(decoder_inputs)\n# # We set up our decoder to return full output sequences,\n# # and to return internal states as well. We don't use the\n# # return states in the training model, but we will use them in inference.\ndecoder_lstm_1 = LSTM(latent_dim, return_sequences=True, return_state=True)\ndecoder_outputs_1, _, _ = decoder_lstm_1(embed_target,\n initial_state=encoder_states_1)\ndecoder_lstm_2 = LSTM(latent_dim, return_sequences=True, return_state=True)\ndecoder_outputs_2, _, _ = decoder_lstm_2(decoder_outputs_1,\n initial_state=encoder_states_2)\ndecoder_dense = Dense(num_decoder_tokens, activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs_2)\n# decoder_inputs = Input(shape=(None,))\n# x = Embedding(num_decoder_tokens, latent_dim)(decoder_inputs)\n# x = LSTM(latent_dim, return_sequences=True)(x, initial_state=encoder_states)\n# decoder_outputs = Dense(num_decoder_tokens, activation='softmax')(x)\n\n# Define the model that will turn\n# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`\nmodel = Model([encoder_inputs, decoder_inputs], decoder_outputs)\n\n# Run training\nearly_stopping = EarlyStopping(monitor='val_loss', patience=5)\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy')\nmodel.summary()\nmodel.fit([encoder_input_data, decoder_input_data], decoder_target_data,\n batch_size=batch_size,\n epochs=epochs,\n validation_split=0.1, callbacks=[early_stopping])\nplot_history(model)\n# for i in range(epochs):\n# for index in range(int(len(input_texts) / batch_size) + 1):\n# input_batch = input_texts[index:(index * batch_size)]\n# output_batch = target_texts[index:(index * batch_size)]\n# encoder_input_data, decoder_input_data, decoder_target_data = loop_batch(input_batch, output_batch,\n# input_token_index, target_token_index)\n# loss = model.train_on_batch([encoder_input_data, decoder_input_data], decoder_target_data)\n# print(\"Epoch : \" + str(i) + \" Batch : \" + str(index) + \" Loss : \" + str(loss))\n# Save model\nmodel.save('s2s_ex.h5')\n\n# Next: inference mode (sampling).\n# Here's the drill:\n# 1) encode input and retrieve initial decoder state\n# 2) run one step of decoder with this initial state\n# and a \"start of sequence\" token as target.\n# Output will be the next target token\n# 3) Repeat with the current target token and current states\n\n# Define sampling models\nencoder_model_1 = Model(encoder_inputs, encoder_outputs_1)\ny_1 = Input(shape=(None, latent_dim))\n# encoder_y_1, encoder_h_1,encoder_c_1=encoder_outputs_1\n# encoder_model_2 = Model([encoder_y_1], encoder_states_2)\n_, encoder_state_h_2, encoder_state_c_2 = encoder_2(y_1)\nencoder_states_2 = [encoder_state_h_2, encoder_state_c_2]\nencoder_model_2 = Model([y_1], encoder_states_2)\n\ndecoder_state_input_h = Input(shape=(latent_dim,))\ndecoder_state_input_c = Input(shape=(latent_dim,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\nembed_output = Embedding(num_decoder_tokens, latent_dim)(decoder_inputs)\ndecoder_outputs_1, decoder_state_h_1, decoder_state_c_1 = decoder_lstm_1(\n embed_output, initial_state=decoder_states_inputs)\ndecoder_model_1 = Model(\n [decoder_inputs] + decoder_states_inputs,\n [decoder_outputs_1] + [decoder_state_h_1] + [decoder_state_c_1])\n\ndecoder_outputs_2, decoder_state_h_2, decoder_state_c_2 = decoder_lstm_2(\n y_1, initial_state=decoder_states_inputs)\ndecoder_outputs_2 = decoder_dense(decoder_outputs_2)\ndecoder_model_2 = Model(\n [y_1] + decoder_states_inputs,\n [decoder_outputs_2] + [decoder_state_h_2] + [decoder_state_c_2])\n\n\ndef get_words_from_token_model(token_model, index):\n for key, value in token_model.word_index.items():\n if value == index:\n return key\n\n\ndef decode_input_seq(input_seq):\n encoded_sentence = ''\n for index in input_seq[0]:\n encoded_sentence += get_words_from_token_model(encoder_token_model, index)\n return encoded_sentence\n\n\ndef decode_sequence(input_seq):\n # Encode the input as state vectors.\n encoder_y_1, encoder_h_1, encoder_c_1 = encoder_model_1.predict(input_seq)\n encoder_states_1 = [encoder_h_1, encoder_c_1]\n encoder_states_2 = encoder_model_2.predict(encoder_y_1)\n\n # Generate empty target sequence of length 1.\n # target_seq = np.zeros((1, 1, num_decoder_tokens))\n # Populate the first character of target sequence with the start character.\n # target_seq[0, 0, decoder_token_model.word_index[START_SEQ]] = 1\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = decoder_token_model.word_index[START_SEQ]\n\n # Sampling loop for a batch of sequences\n # (to simplify, here we assume a batch of size 1).\n stop_condition = False\n decoded_sentence = ''\n decoder_state_1_input = encoder_states_1\n decoder_state_2_input = encoder_states_2\n while not stop_condition:\n decoder_output_1, decoder_state_h_1, decoder_state_c_1 = decoder_model_1.predict(\n [target_seq] + decoder_state_1_input)\n output_tokens, decoder_state_h_2, decoder_state_c_2 = decoder_model_2.predict(\n [decoder_output_1] + decoder_state_2_input)\n\n # Sample a token\n sampled_token_index = np.argmax(output_tokens[0, -1, :])\n sampled_char = get_words_from_token_model(decoder_token_model, sampled_token_index)\n if sampled_char == None:\n sampled_char = \"\"\n decoded_sentence += sampled_char\n decoded_sentence += \" \"\n\n # Exit condition: either hit max length\n # or find stop character.\n if (sampled_char == END_SEQ or\n len(decoded_sentence) > max_length + 2):\n stop_condition = True\n\n # Update the target sequence (of length 1).\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = sampled_token_index\n\n # Update states\n decoder_state_1_input = [decoder_state_h_1, decoder_state_c_1]\n decoder_state_2_input = [decoder_state_h_2, decoder_state_c_2]\n\n return decoded_sentence\n\n\nfor seq_index in range(100):\n # Take one sequence (part of the training test)\n # for trying out decoding.\n input_seq = encoder_input_data[seq_index: seq_index + 1]\n decoded_sentence = decode_sequence(input_seq)\n print('-')\n print('Input sentence:', source_data[seq_index])\n print('Decoded sentence:', decoded_sentence)\n","sub_path":"seq2seq/word_embed/2_LSTM_all_.py","file_name":"2_LSTM_all_.py","file_ext":"py","file_size_in_byte":11037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"421695577","text":"import pickle as pkl\nfrom xml.etree import ElementTree\nimport csv\nimport re\nfrom format import *\nimport argparse\n\nnumber = re.compile(r'NUMBER_([0-9]*)')\nanswer = re.compile(r'ANSWER_([0-9]*)')\natt = re.compile(r'ATTENTION-CHECK.*')\n\n\n\ndef convert(path_pkl, path_out, keep_attention = True, add_hitid=False):\n\n pik = pkl.load(open(path_pkl, 'rb'))\n #print(pik)\n with open(path_out, 'w', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=',')\n column_names = ['UID', 'ANSWER', 'ANNOTATOR']\n if add_hitid:\n column_names.append('HITId')\n spamwriter.writerow(column_names)\n workers = {}\n comments = []\n times = [['HITId', 'WorkerId', 'Timer']]\n att_chks = [['UID', 'ANSWER', 'ANNOTATOR']]\n\n for hit in pik:\n hit_id = None\n for assign in hit['Assignments']:\n hit_id = assign['HITId']\n\n comments.append(hit_id + '\\n')\n worker = assign['WorkerId']\n times.append([hit_id, worker, assign['SubmitTime'] - assign['AcceptTime']])\n root = ElementTree.fromstring(assign['Answer'])\n lis = {}\n for i in range(len(root)):\n l = []\n for j in range(len(root[i])):\n l.append(root[i][j].text)\n match_n = number.match(l[0])\n if match_n is not None:\n if match_n[1] in lis.keys():\n lis[match_n[1]][0] = l[1] if l[1] != \"ATTENTION-CHECK\" else f\"{l[1]}-{assign['HITId']}-{match_n[1]}\"\n else:\n lis[match_n[1]] = [l[1] if l[1] != \"ATTENTION-CHECK\" else f\"{l[1]}-{assign['HITId']}-{match_n[1]}\", '', worker]\n if add_hitid:\n if len(lis[match_n[1]]) < 4:\n lis[match_n[1]].append(hit_id)\n else:\n match_a = answer.match(l[0])\n if match_a is not None:\n if match_a[1] in lis.keys():\n lis[match_a[1]][1] = l[1] if l[1] != \"ATTENTION-CHECK\" else f\"{l[1]}-{assign['HITId']}-{match_a[1]}\"\n else:\n lis[match_a[1]] = ['', l[1] if l[1] != \"ATTENTION-CHECK\" else f\"{l[1]}-{assign['HITId']}-{match_a[1]}\", worker]\n if add_hitid:\n if len(lis[match_a[1]]) < 4:\n lis[match_a[1]].append(hit_id)\n else:\n if l[0] == 'comment':\n comments.append(f\"{worker}, {l[1]}\\n\")\n if l[0] == 'participant':\n workers[worker] = l[1]\n print(lis)\n\n for k, v in lis.items():\n if keep_attention:\n spamwriter.writerow(v)\n else:\n m = att.match(v[0])\n if m is None:\n spamwriter.writerow(v)\n else:\n att_chks.append(v)\n\n write_csv('attention_check.csv', att_chks)\n f = open(f\"comments.txt\", 'w')\n f.writelines(comments)\n f.close()\n pkl.dump(workers, open('workers.pkl', 'wb'))\n write_csv('timers.csv', times)\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('path_in', type=str, help=\"Path of the .pkl containing the retrieved hits\")\n parser.add_argument('path_out', type=str, help=\"Path of .csv where the converted data will be written\")\n parser.add_argument('-a', '--keep_attention', action='store_true', default=False, help=\"Whether to keep the attention checks in the output or in a seperate file (default: False, which means attention in a seperate file)\")\n parser.add_argument('-i', '--hit_id', action='store_true', default=False, help=\"Whether to add hit_id to the csv (default : False)\")\n args = parser.parse_args()\n convert(args.path_in, args.path_out, keep_attention=args.keep_attention, add_hitid=args.hit_id)","sub_path":"python/format/retrieve_to_csv.py","file_name":"retrieve_to_csv.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"102975214","text":"from PyQt5 import QtWidgets\n\nfrom point_spectra_gui.ui.SpecDeriv import Ui_Form\nfrom point_spectra_gui.util.Modules import Modules\nfrom point_spectra_gui.util.spectral_data import spectral_data\nfrom libpyhat.transform.deriv import deriv\n\nclass SpecDeriv(Ui_Form, Modules):\n def setupUi(self, Form):\n super().setupUi(Form)\n Modules.setupUi(self, Form)\n\n def get_widget(self):\n return self.formGroupBox\n\n def connectWidgets(self):\n self.setComboBox(self.chooseDataToDerivComboBox, self.datakeys)\n\n def run(self):\n datakey = self.chooseDataToDerivComboBox.currentText()\n new_datakey = datakey + ' - Derivative'\n self.datakeys.append(new_datakey)\n self.data[new_datakey].deriv()\n print(\"Derivative Applied\")\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n\n Form = QtWidgets.QWidget()\n ui = SpecDeriv()\n ui.setupUi(Form)\n Form.show()\n sys.exit(app.exec_())\n","sub_path":"point_spectra_gui/core/SpecDeriv.py","file_name":"SpecDeriv.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"228859882","text":"# coding: utf-8\n'''\nTrain a (set of) WAC models. Modified template for a specific parameter\nsetting.\n'''\n\nfrom __future__ import division\nimport sys\nimport argparse\nimport configparser\nimport json\nimport os\nfrom os.path import isfile\nfrom joblib import Parallel, delayed\nimport numpy as np\nimport pandas as pd\nfrom sklearn import linear_model\n\nsys.path.append('../../Utils')\nfrom utils import print_timestamped_message\nsys.path.append('../WAC_Utils')\nfrom wac_utils import filter_X_by_filelist, filter_refdf_by_filelist\nfrom wac_utils import filter_relational_expr\nfrom wac_utils import create_word2den, make_X_id_index, make_mask_matrix\nfrom wac_utils import train_this_word\n\n# The first features in the image feature Xs encode the region ID\nID_FEATS = 3\n\nN_JOBS = 2 # how many threads to run in parallel during training\n\n\ndef main(config):\n basename = os.path.splitext(os.path.basename(__file__))[0]\n print_timestamped_message('Starting to train model %s'\n % (basename))\n\n outfile_base = config.get('runtime', 'out_dir') + '/' + basename\n if isfile(outfile_base + '.npz'):\n print('%s exists. Will not overwrite. ABORTING.' % (outfile_base + '.npz'))\n return\n\n # Model description:\n model = {\n 'rcorp': 'refcoco', # ref corpus\n 'cnn': 'vgg19-fc2', # CNN used for vision feats\n 'rel': 'excl', # exclude relational expressions\n 'wrdl': 'min', # wordlist: minimal n occurrences...\n 'wprm': 40, # ... 40 times\n 'clsf': 'logreg-l1', # logistic regression, l1 regularized\n 'nneg': 20000, # maximally 20k neg instances\n 'nsrc': 'randmax', # ... randomly selected\n 'notes': ''\n }\n classifier = linear_model.LogisticRegression\n classf_params = {'penalty': 'l1', 'warm_start': True}\n\n dsgv_home = config.get('DSGV-PATHS', 'dsgv_home')\n preproc_path = dsgv_home + '/Preproc/PreprocOut/'\n feats_path = dsgv_home + '/ExtractFeats/ExtractOut/'\n\n # ========================= DATA =================================\n print_timestamped_message('loading up data.', indent=4)\n\n with open(preproc_path + 'refcoco_splits.json', 'r') as f:\n rc_splits = json.load(f)\n\n # Image features\n X = np.load(feats_path + 'mscoco_bbdf_vgg19-fc2.npz')['arr_0']\n X_t = filter_X_by_filelist(X, rc_splits['train'])\n\n # Referring expressions\n refcoco_refdf = pd.read_json(preproc_path + 'refcoco_refdf.json.gz',\n typ='frame', orient='split',\n compression='gzip')\n refdf_train = filter_refdf_by_filelist(refcoco_refdf, rc_splits['train'])\n\n refdf_train = filter_relational_expr(refdf_train)\n\n # ======================= Intermediate ==============================\n print_timestamped_message('creating intermediate data structures',\n indent=4)\n word2den = create_word2den(refdf_train)\n X_idx = make_X_id_index(X_t)\n mask_matrix = make_mask_matrix(X_t, X_idx, word2den, word2den.keys())\n\n # ======================= Wordlist ==============================\n print_timestamped_message('selecting words to train models for',\n indent=4)\n min_freq = model['wprm']\n counts = mask_matrix.sum(axis=1)\n wordlist = np.array(list(word2den.keys()))[counts > min_freq]\n\n # ======================= TRAIN ==============================\n print_timestamped_message('and training the %d WACs!' % (len(wordlist)),\n indent=4)\n\n wacs = Parallel(n_jobs=N_JOBS, require='sharedmem', prefer='threads')\\\n (delayed(train_this_word)(X_t, word2den, mask_matrix,\n model['nneg'],\n classifier, classf_params,\n this_word)\n for this_word in wordlist)\n print('') # newline, because train_this_word prints . as progress bar\n\n # ======================= SAVE ==============================\n print_timestamped_message('writing to disk',\n indent=4)\n weight_matrix = np.stack([np.append(this_wac.coef_, this_wac.intercept_)\n for this_wac in [w[3] for w in wacs]])\n wordinfo = [e[:-1] for e in wacs]\n with open(outfile_base + '.json', 'w') as f:\n json.dump((model, wordinfo), f)\n np.savez_compressed(outfile_base + '.npz', weight_matrix)\n\n print_timestamped_message('DONE!')\n\n\n#\n#\n#\n# ======== MAIN =========\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Train a (set of) WAC model(s)')\n parser.add_argument('-c', '--config_file',\n help='''\n path to config file specifying data paths.\n default: '../../Config/default.cfg' ''',\n default='../../Config/default.cfg')\n parser.add_argument('-o', '--out_dir',\n help='''\n where to put the resulting files.\n default: './ModelsOut' ''')\n args = parser.parse_args()\n\n config = configparser.ConfigParser()\n\n try:\n with open(args.config_file, 'r', encoding='utf-8') as f:\n config.read_file(f)\n except IOError:\n print('no config file found at %s' % (args.config_file))\n sys.exit(1)\n\n if args.out_dir:\n out_dir = args.out_dir\n elif config.has_option('DSGV-PATHS', 'train_out_dir'):\n out_dir = config.get('DSGV-PATHS', 'train_out_dir')\n else:\n out_dir = './ModelsOut'\n\n config.add_section('runtime')\n config.set('runtime', 'out_dir', out_dir)\n\n main(config)\n","sub_path":"WACs/TrainWACs/mod02_refcoco_vgg.py","file_name":"mod02_refcoco_vgg.py","file_ext":"py","file_size_in_byte":5807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"395098055","text":"import re\n\nsymbols = ['{', '}', '(', ')', '[', ']', '.', ',', ';', '+', '-', '*', '/', '&', '|', '<', '>', '=', '~']\n\n\ndef __is_int(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\ndef __is_keyword(s):\n return s in ['class', 'constructor', 'function', 'method', 'field', 'static', 'var', 'int', 'char', 'boolean',\n 'void', 'true', 'false', 'null', 'this', 'let', 'do', 'if', 'else', 'while', 'return']\n\n\ndef __is_string(s):\n return len(s) > 2 and s[0] == '\"' and s.find('\"', ) != -1\n\n\ndef __is_symbol(s):\n return s in symbols\n\n\ndef __is_identifier(s):\n return re.search('\\W*', s).group(0) is not None\n\n\ndef __clear_comments(s):\n is_string = False\n is_slash_comment = False\n is_star_comment = False\n out_string = \"\"\n for i, c in enumerate(s):\n if s[i] == '\"' and not (is_slash_comment or is_star_comment):\n is_string = not is_string\n\n if not is_string and not (is_slash_comment or is_star_comment):\n # Slash comment type\n if s[i:i + 2] == '//':\n is_slash_comment = True\n\n # Star comment type\n if s[i:i + 2] == '/*':\n is_star_comment = True\n\n if not (is_star_comment or is_slash_comment):\n out_string += c\n\n if not is_string:\n # Closing one line comment\n if is_slash_comment and c == '\\n':\n is_slash_comment = False\n\n # Closing slash comment\n elif is_star_comment and s[i - 1:i + 1] == '*/':\n is_star_comment = False\n out_string += \" \"\n\n return out_string\n\n\ndef tokenize(str_input):\n str_output = \"\" + \"\\n\"\n pat = '(\".*?\"|[^a-zA-Z0-9_]|{})'.format(['\\{}'.format(k)] for k in symbols)\n for t in [k.strip() for k in re.split(pat, __clear_comments(str_input)) if k.strip() != \"\"]:\n str_output += __get_token(t) + \"\\n\"\n\n str_output += \"\" + \"\\n\"\n return str_output\n\n\ndef __get_token(t):\n s = \"<{0}> {1} \"\n if __is_keyword(t):\n return s.format(\"keyword\", t)\n elif __is_symbol(t):\n if t in ['>', '<', '\"\"', \"&\"]:\n return s.format(\"symbol\", {\n '>': \">\",\n '<': \"<\",\n '\"\"': \""\",\n '&': \"&\"\n }[t])\n else:\n return s.format(\"symbol\", t)\n elif __is_int(t):\n return s.format(\"integerConstant\", t)\n elif __is_string(t):\n return s.format(\"stringConstant\",\n t[1:t.find('\"', 1)].replace('&', '&').replace('<', '<').replace('>', '>'))\n elif __is_identifier(t):\n return s.format(\"identifier\", t)\n","sub_path":"projects/10/Syntax analysis/Tokenizer.py","file_name":"Tokenizer.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"259754800","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 17 13:08:56 2020\n\n@author: sega01\n\"\"\"\n\nf = open(\"msos.txt\", \"r\")\nmsos=[]\nfor x in f:\n a=x.split()\n l=[]\n for new in a[2:]:\n l.append(int(new))\n msos.append(l)\nprint(msos)","sub_path":"PM_docker-compose/PM_module/classes/test_read.py","file_name":"test_read.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"479824867","text":"__author__ = 'Luke Merrett'\n\nimport argparse\nfrom analytics import playtime\n\ndef get_cmd_arguments():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '-np', '--never-played',\n dest='never_played',\n action='store_true',\n help='Only returns games you\\'ve never played'\n )\n\n parser.add_argument(\n '-i', '--installed',\n dest='installed',\n action='store_true',\n help='Only returns games that are currently installed'\n )\n\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = get_cmd_arguments()\n\n game_found, game_to_play = playtime.choose_a_random_game_to_play(\n args.never_played,\n args.installed\n )\n\n if not game_found:\n print('Couldn\\'t find a game to play!')\n else:\n print('You should totally play: \"' + game_to_play + '\"')\n input()\n","sub_path":"random_game_chooser.py","file_name":"random_game_chooser.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"17224596","text":"from sqlalchemy import func\nfrom flask import Blueprint, request, render_template\n\nfrom app import app, db\n\nfrom .models import Post, Tag, Comment, tags\nfrom .forms import CommentForm\n\nmod_posts = Blueprint('posts', __name__, url_prefix='/posts')\n\ndef sidebar_data():\n recent = Post.query.order_by(\n Post.publish_date.desc()\n ).limit(5).all()\n\n # SQL func library:\n # http://docs.sqlalchemy.org/en/rel_1_0/core/sqlelement.html#sqlalchemy.sql.expression.func\n top_tags = db.session.query(\n Tag, func.count(tags.c.post_id).label('total')\n ).join(\n tags\n ).group_by(Tag).order_by('total DESC').limit(5).all()\n\n return recent, top_tags\n\n\n@mod_posts.route('/')\n@mod_posts.route('/')\ndef home(page=1):\n posts = Post.query.order_by(\n Post.publish_date.desc()\n ).paginate(page, 10)\n recent, top_tags = sidebar_data()\n\n return render_template('posts/home.html', posts=posts, recent=recent,\n top_tags=top_tags)\n\n\n@mod_posts.route('/post/', methods=('GET', 'POST'))\ndef post(post_id):\n form = CommentForm()\n \n if form.validate_on_submit():\n new_comment = Comment()\n new_comment.name = form.name.data\n new_comment.text = form.text.data\n new_comment.post_id = post_id\n new_comment.date = datetime.datetime.now()\n\n db.session.add(new_comment)\n db.session.commit()\n\n post = Post.query.get_or_404(post_id)\n tags = post.tags\n comments = post.comments.order_by(Comment.date.desc()).all()\n recent, top_tags = sidebar_data()\n\n return render_template('posts/post.html', post=post, tags=tags, comments=comments,\n recent=recent, top_tags=top_tags, form=form)\n\n\n@mod_posts.route('/tag/')\ndef tag(tag_name):\n tag = Tag.query.filter_by(title=tag_name).first_or_404()\n posts = tag.posts.order_by(Post.publish_date.desc()).all()\n recent, top_tags = sidebar_data()\n\n return render_template('posts/tag.html', tag=tag, posts=posts, recent=recent,\n top_tags=top_tags)\n\n","sub_path":"app/posts/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"341603861","text":"from django import forms\nfrom django.utils import timezone\n\nfrom .models import Cpu, CpuCore, Memory, Storage, StoragePartition\n\nimport json\n\n\nclass BaseForm(forms.Form):\n time = forms.DateTimeField(input_formats=['%Y%m%d %H:%M:%S'])\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n\n super(BaseForm, self).__init__(*args, **kwargs)\n\n def clean_mac_addr(self):\n mac_addr = self.cleaned_data['mac_addr'].split(':')\n\n if len(mac_addr) != 6:\n forms.ValidationError('Invalid format.')\n\n return '-'.join(mac_addr)\n\n\nclass CpuForm(BaseForm, forms.ModelForm):\n core = forms.CharField()\n\n class Meta:\n model = Cpu\n exclude = ('received_at', )\n\n def clean_core(self):\n core_list = self.cleaned_data['core']\n try:\n core_list = json.loads(core_list)\n for core in core_list:\n core['usage'] = float(core['usage'])\n except:\n forms.ValidationError('Invalid format.')\n\n return core_list\n\n def save(self, commit=True):\n new_cpu = super(CpuForm, self).save(commit=False)\n new_cpu.user = self.user\n new_cpu.save()\n for i, core in enumerate(self.cleaned_data['core']):\n CpuCore.objects.create(num=i, cpu=new_cpu, usage=core['usage'])\n\n return new_cpu\n\n\nclass MemoryForm(BaseForm, forms.ModelForm):\n class Meta:\n model = Memory\n exclude = ('received_at', )\n\n def save(self, commit=True):\n new_mem = super(MemoryForm, self).save(commit=False)\n new_mem.user = self.user\n new_mem.save()\n\n return new_mem\n\n\nclass StorageForm(BaseForm, forms.ModelForm):\n partition = forms.CharField()\n\n class Meta:\n model = Storage\n exclude = ('received_at', )\n\n def clean_partition(self):\n partition_list = self.cleaned_data['partition']\n try:\n partition_list = json.loads(partition_list)\n for partition in partition_list:\n partition['total'] = float(partition['total'])\n partition['used'] = float(partition['used'])\n partition['unused'] = float(partition['unused'])\n except:\n forms.ValidationError('Invalid format.')\n\n return partition_list\n\n def save(self, commit=True):\n new_storage = super(StorageForm, self).save(commit=False)\n new_storage.user = self.user\n new_storage.save()\n for partition in self.cleaned_data['partition']:\n StoragePartition.objects.create(\n storage=new_storage,\n mount=partition['mount'],\n total=partition['total'],\n used=partition['used'],\n unused=partition['unused'],\n )\n\n return new_storage\n","sub_path":"client_api/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"301725285","text":"\nfrom bootstrap_datepicker.widgets import DatePicker\nfrom django import forms\nfrom .models import lporecords\n\nclass Lpoform(forms.ModelForm):\n \n class Meta:\n model = lporecords\n fields = '__all__'\n labels = {\n 'order_desc':'Order Description',\n 'order_to':'Vendor',\n 'order_no':'Order Number',\n 'order_value':'Order Value', \n }\n \n def __init__(self, *args, **kwargs):\n super(Lpoform,self).__init__(*args, **kwargs)\n self.fields['currency'].empty_label = 'Select Currency'\n self.fields['currency'].required = True\n self.fields['order_date'].required = True\n self.fields['validity_date'].required = True\n ","sub_path":"lpo_app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"120861431","text":"import sys\n# Find jVMC package\nsys.path.append(sys.path[0] + \"/..\")\n\nimport jax\nfrom jax.config import config\nconfig.update(\"jax_enable_x64\", True)\nfrom jax import jit, grad, vmap\nfrom jax import numpy as jnp\nfrom jax import random\nfrom jax.tree_util import tree_flatten, tree_unflatten\nfrom jax.flatten_util import ravel_pytree\nimport flax\nfrom flax import nn\nimport numpy as np\n\nimport jVMC\nimport jVMC.global_defs as global_defs\nfrom jVMC.nets import CpxRBM\nfrom jVMC.nets import RBM\nimport jVMC.mpi_wrapper as mpi\n\nfrom functools import partial\nimport collections\nimport time\n\n\ndef flat_gradient(fun, params, arg):\n gr = grad(lambda p, y: jnp.real(fun.apply(p, y)))(params, arg)\n gr = tree_flatten(jax.tree_util.tree_map(lambda x: x.ravel(), gr))[0]\n gi = grad(lambda p, y: jnp.imag(fun.apply(p, y)))(params, arg)\n gi = tree_flatten(jax.tree_util.tree_map(lambda x: x.ravel(), gi))[0]\n return jnp.concatenate(gr) + 1.j * jnp.concatenate(gi)\n\n\ndef flat_gradient_real(fun, params, arg):\n g = grad(lambda p, y: jnp.real(fun.apply(p, y)))(params, arg)\n g = tree_flatten(jax.tree_util.tree_map(lambda x: x.ravel(), g))[0]\n return jnp.concatenate(g)\n\n\nclass NQS:\n \"\"\"Wrapper class providing basic functionality of variational states.\n\n This class can operate in two modi:\n\n #. Single-network ansatz\n\n Quantum state of the form :math:`\\psi_\\\\theta(s)\\equiv\\exp(r_\\\\theta(s))`, \\\n where the network :math:`r_\\\\theta` is\n\n a) holomorphic, i.e., parametrized by complex valued parameters :math:`\\\\theta`.\n\n b) real, i.e., parametrized by real valued parameters :math:`\\\\theta`.\n\n #. Two-network ansatz\n\n Quantum state of the form \n :math:`\\psi_\\\\theta(s)\\equiv\\exp(r_{\\\\theta_r}(s)+i\\\\varphi_{\\\\theta_\\\\phi}(s))` \\\n with an amplitude network :math:`r_{\\\\theta_{r}}` and a phase network \\\n :math:`\\\\varphi_{\\\\theta_\\phi}` \\\n parametrized by real valued parameters :math:`\\\\theta_r,\\\\theta_\\\\phi`.\n\n Initializer arguments:\n\n * ``nets``: Variational network or tuple of networks.\n * ``batchSize``: Batch size for batched network evaluation. Choice \\\n of this parameter impacts performance: with too small values performance \\\n is limited by memory access overheads, too large values can lead \\\n to \"out of memory\" issues.\n * ``seed``: Seed for the PRNG to initialize the network parameters.\n \"\"\"\n\n def __init__(self, nets, batchSize=1000, seed=1234):\n \"\"\"Initializes NQS class.\n\n This class can operate in two modi:\n\n #. Single-network ansatz\n\n Quantum state of the form :math:`\\psi_\\\\theta(s)\\equiv\\exp(r_\\\\theta(s))`, \\\n where the network :math:`r_\\\\theta` is\n\n a) holomorphic, i.e., parametrized by complex valued parameters :math:`\\\\theta`.\n\n b) real, i.e., parametrized by real valued parameters :math:`\\\\theta`.\n\n #. Two-network ansatz\n\n Quantum state of the form \n :math:`\\psi_\\\\theta(s)\\equiv\\exp(r_{\\\\theta_r}(s)+i\\\\varphi_{\\\\theta_\\\\phi}(s))` \\\n with an amplitude network :math:`r_{\\\\theta_{r}}` and a phase network \\\n :math:`\\\\varphi_{\\\\theta_\\phi}` \\\n parametrized by real valued parameters :math:`\\\\theta_r,\\\\theta_\\\\phi`.\n\n Args:\n\n * ``nets``: Variational network or tuple of networks.\\\n A network has to be registered as pytree node and provide \\\n a ``__call__`` function for evaluation.\n * ``batchSize``: Batch size for batched network evaluation. Choice \\\n of this parameter impacts performance: with too small values performance \\\n is limited by memory access overheads, too large values can lead \\\n to \"out of memory\" issues.\n * ``seed``: Seed for the PRNG to initialize the network parameters.\n \"\"\"\n\n # The net arguments have to be instances of flax.nn.Model\n self.realNets = False\n self.holomorphic = False\n self.flat_gradient_function = flat_gradient_real\n\n self.initialized = False\n self.seed = seed\n self.parameters = None\n\n if not isinstance(nets, collections.abc.Iterable):\n self.net = nets\n else:\n self.net = list(nets)\n self.realNets = True\n\n\n # Check whether wave function can generate samples\n self._isGenerator = False\n sampleNet = None\n if self.realNets:\n sampleNet = self.net[0]\n else:\n sampleNet = self.net\n\n if \"sample\" in dir(sampleNet):\n if callable(sampleNet.sample):\n self._isGenerator = True\n\n self.batchSize = batchSize\n\n # Need to keep handles of jit'd functions to avoid recompilation\n self.evalJitdNet1 = global_defs.pmap_for_my_devices(self._eval, in_axes=(None, None, 0, None), static_broadcasted_argnums=(0, 3))\n self.evalJitdNet2 = global_defs.pmap_for_my_devices(self._eval, in_axes=(None, None, 0, None), static_broadcasted_argnums=(0, 3))\n self.evalJitdReal = global_defs.pmap_for_my_devices(self._eval_real, in_axes=(None, None, 0, None), static_broadcasted_argnums=(0, 3))\n self._get_gradients_net1_pmapd = global_defs.pmap_for_my_devices(self._get_gradients, in_axes=(None, None, 0, None, None), static_broadcasted_argnums=(0, 3, 4))\n self._get_gradients_net2_pmapd = global_defs.pmap_for_my_devices(self._get_gradients, in_axes=(None, None, 0, None, None), static_broadcasted_argnums=(0, 3, 4))\n self._append_gradients = global_defs.pmap_for_my_devices(lambda x, y: jnp.concatenate((x[:, :], 1.j * y[:, :]), axis=1), in_axes=(0, 0))\n self._sample_jitd = {}\n\n # ** end def __init__\n\n\n def init_net(self, s):\n\n if not self.initialized:\n \n if not isinstance(self.net, collections.abc.Iterable):\n \n self.parameters = self.net.init(jax.random.PRNGKey(self.seed), s[0,0,...])\n \n if np.concatenate([p.ravel() for p in tree_flatten(self.parameters)[0]]).dtype == np.complex128:\n self.holomorphic = True\n else:\n self.flat_gradient_function = flat_gradient\n\n self.paramShapes = [(p.size, p.shape) for p in tree_flatten(self.parameters)[0]]\n self.netTreeDef = jax.tree_util.tree_structure(self.parameters)\n self.numParameters = jnp.sum(jnp.array([p.size for p in tree_flatten(self.parameters)[0]]))\n\n else:\n \n keys = jax.random.split(jax.random.PRNGKey(self.seed), 2)\n self.parameters = [n.init(k, s[0,0,...]) for k,n in zip(keys, self.net)]\n\n self.paramShapes = [[(p.size, p.shape) for p in tree_flatten(params)[0]] for params in self.parameters]\n self.netTreeDef = [jax.tree_util.tree_structure(params) for params in self.parameters]\n self.numParameters1 = jnp.sum(jnp.array([p.size for p in tree_flatten(self.parameters[0])[0]]))\n self.numParameters2 = jnp.sum(jnp.array([p.size for p in tree_flatten(self.parameters[1])[0]]))\n self.numParameters = self.numParameters1 + self.numParameters2\n \n\n self.initialized = True\n\n # ** end init_net\n\n\n def __call__(self, s):\n \"\"\"Evaluate variational wave function.\n\n Compute the logarithmic wave function coefficients :math:`\\ln\\psi(s)` for \\\n computational configurations :math:`s`.\n\n Args:\n\n * ``s``: Array of computational basis states.\n\n Returns:\n Logarithmic wave function coefficients :math:`\\ln\\psi(s)`.\n\n :meta public:\n \"\"\"\n\n self.init_net(s)\n\n if self.realNets:\n logMod = self.evalJitdNet1(self.net[0], self.parameters[0], s, self.batchSize)\n phase = self.evalJitdNet2(self.net[1], self.parameters[1], s, self.batchSize)\n return logMod + 1.j * phase\n else:\n return self.evalJitdNet1(self.net, self.parameters, s, self.batchSize)\n\n # ** end def __call__\n\n def real_coefficients(self, s):\n \"\"\"Evaluate real part of variational wave function.\n\n Compute the real part of the logarithmic wave function coefficients, \\\n :math:`\\\\text{Re}(\\ln\\psi(s))`, for computational configurations :math:`s`.\n\n Args:\n\n * ``s``: Array of computational basis states.\n\n Returns:\n Real part of logarithmic wave function coefficients \\\n :math:`\\\\text{Re}(\\ln\\psi(s))`.\n \"\"\"\n\n if self.realNets:\n return self.evalJitdNet1(self.net[0], self.parameters[0], s, self.batchSize)\n else:\n return self.evalJitdReal(self.net, self.parameters, s, self.batchSize)\n\n # ** end def real_coefficients\n\n def get_sampler_net(self):\n \"\"\"Returns net used for sampling\n\n Returns:\n Variational network that yields :math:`\\\\text{Re}(\\log\\psi(s))`.\n \"\"\"\n\n if self.realNets:\n return self.net[0], self.parameters[0]\n else:\n return self.net, self.parameters\n\n # ** end def get_sampler_net\n\n def _get_gradients(self, net, params, s, batchSize, flat_grad):\n\n def create_batches(s, b):\n\n append = b * ((s.shape[0] + b - 1) // b) - s.shape[0]\n pads = [(0, append), ] + [(0, 0)] * (len(s.shape) - 1)\n\n return jnp.pad(s, pads).reshape((-1, b) + s.shape[1:])\n\n sb = create_batches(s, batchSize)\n\n def scan_fun(c, x):\n return c, jax.vmap(lambda y: flat_grad(net, params, y), in_axes=(0,))(x)\n\n g = jax.lax.scan(scan_fun, None, sb)[1]\n\n g = g.reshape((-1,) + g.shape[2:])\n\n return g[:s.shape[0]]\n\n\n def gradients(self, s):\n \"\"\"Compute gradients of logarithmic wave function.\n\n Compute gradient of the logarithmic wave function coefficients, \\\n :math:`\\\\nabla\\ln\\psi(s)`, for computational configurations :math:`s`.\n\n Args:\n\n * ``s``: Array of computational basis states.\n\n Returns:\n A vector containing derivatives :math:`\\partial_{\\\\theta_k}\\ln\\psi(s)` \\\n with respect to each variational parameter :math:`\\\\theta_k` for each \\\n input configuration :math:`s`.\n \"\"\"\n \n self.init_net(s)\n\n if self.realNets: # FOR REAL NETS\n\n gradOut1 = self._get_gradients_net1_pmapd(self.net[0], self.parameters[0], s, self.batchSize, self.flat_gradient_function)\n gradOut2 = self._get_gradients_net2_pmapd(self.net[1], self.parameters[1], s, self.batchSize, self.flat_gradient_function)\n\n return self._append_gradients(gradOut1, gradOut2)\n\n else: # FOR COMPLEX NET\n\n gradOut = self._get_gradients_net1_pmapd(self.net, self.parameters, s, self.batchSize, self.flat_gradient_function)\n\n if self.holomorphic:\n return self._append_gradients(gradOut, gradOut)\n\n return gradOut\n\n # ** end def gradients\n\n\n def update_parameters(self, deltaP):\n \"\"\"Update variational parameters.\n\n Sets new values of all variational parameters by adding given values.\n\n If parameters are not initialized, parameters are set to ``deltaP``.\n\n Args:\n\n * ``deltaP``: Values to be added to variational parameters.\n \"\"\"\n\n if not self.initialized:\n self.set_parameters(deltaP)\n\n if self.realNets: # FOR REAL NETS\n\n # Reshape parameter update according to net tree structure\n newParams = self._param_unflatten_real(deltaP)\n\n # Update model parameters\n for netId in [0, 1]:\n self.parameters[netId] = jax.tree_util.tree_multimap(\n jax.lax.add, self.parameters[netId],\n newParams[netId]\n )\n\n else: # FOR COMPLEX NET\n\n # Compute new parameters\n newParams = jax.tree_util.tree_multimap(\n jax.lax.add, self.parameters,\n self._param_unflatten_cpx(deltaP)\n )\n\n # Update model parameters\n self.parameters = newParams\n\n # ** end def update_parameters\n\n\n def set_parameters(self, P):\n \"\"\"Set variational parameters.\n\n Sets new values of all variational parameters.\n\n Args:\n\n * ``P``: New values of variational parameters.\n \"\"\"\n\n if not self.initialized:\n raise RuntimeError(\"Error in NQS.set_parameters(): Network not initialized. Evaluate net on example input for initialization.\")\n\n if self.realNets: # FOR REAL NETS\n\n newP = self._param_unflatten_real(P)\n\n # Update model parameters\n for netId in [0, 1]:\n self.parameters[netId] = newP[netId]\n\n else: # FOR COMPLEX NET\n\n # Update model parameters\n self.parameters = self._param_unflatten_cpx(P)\n\n # ** end def set_parameters\n\n def _param_unflatten_real(self, P):\n\n # Reshape parameter update according to net tree structure\n PTreeShape = [[], []]\n start = 0\n for netId in [0, 1]:\n for s in self.paramShapes[netId]:\n PTreeShape[netId].append(P[start:start + s[0]].reshape(s[1]))\n start += s[0]\n\n # Return unflattened parameters\n return (tree_unflatten(self.netTreeDef[0], PTreeShape[0]), tree_unflatten(self.netTreeDef[1], PTreeShape[1]))\n\n # ** end def _param_unflatten_real\n\n\n def _param_unflatten_cpx(self, P):\n\n if self.holomorphic:\n # Get complex-valued parameter update vector\n P = P[:self.numParameters] + 1.j * P[self.numParameters:]\n\n # Reshape parameter update according to net tree structure\n PTreeShape = []\n start = 0\n for s in self.paramShapes:\n PTreeShape.append(P[start:start + s[0]].reshape(s[1]))\n start += s[0]\n\n # Return unflattened parameters\n return tree_unflatten(self.netTreeDef, PTreeShape)\n\n # ** end def _param_unflatten_cpx\n\n\n def get_parameters(self):\n \"\"\"Get variational parameters.\n\n Returns:\n Array holding current values of all variational parameters.\n \"\"\"\n\n if not self.initialized:\n\n return None\n\n\n if self.realNets: # FOR REAL NETS\n\n paramOut = jnp.empty(self.numParameters, dtype=global_defs.tReal)\n\n start = 0\n for netId in [0, 1]:\n parameters, _ = tree_flatten(self.parameters[netId])\n # Flatten parameters to give a single vector\n for p in parameters:\n numParams = p.size\n paramOut = paramOut.at[start:start + numParams].set(p.reshape(-1))\n start += numParams\n\n return paramOut\n\n else: # FOR COMPLEX NET\n\n paramOut = jnp.concatenate([p.ravel() for p in tree_flatten(self.parameters)[0]])\n\n if self.holomorphic:\n paramOut = jnp.concatenate([paramOut.real, paramOut.imag])\n\n return paramOut\n\n # ** end def set_parameters\n\n def sample(self, numSamples, key, parameters=None):\n\n if self._isGenerator:\n net, params = self.get_sampler_net()\n\n if parameters is not None:\n params = parameters\n\n numSamplesStr = str(numSamples)\n\n # check whether _get_samples is already compiled for given number of samples\n if not numSamplesStr in self._sample_jitd:\n self._sample_jitd[numSamplesStr] = global_defs.pmap_for_my_devices(lambda p, n, x: net.apply(p, n, x, method=net.sample),\n static_broadcasted_argnums=(1,), in_axes=(None, None, 0))\n\n samples = self._sample_jitd[numSamplesStr](params, numSamples, key)\n\n return samples\n\n return None\n\n # ** end def sample\n\n def _sample(self, net, params, numSamples, key):\n\n return net.apply(params, numSamples, key, method=net.sample)\n\n @property\n def is_generator(self):\n return self._isGenerator\n\n def _eval_real(self, net, params, s, batchSize):\n def create_batches(configs, b):\n\n append = b * ((configs.shape[0] + b - 1) // b) - configs.shape[0]\n pads = [(0, append), ] + [(0, 0)] * (len(configs.shape) - 1)\n\n return jnp.pad(configs, pads).reshape((-1, b) + configs.shape[1:])\n\n sb = create_batches(s, batchSize)\n\n def scan_fun(c, x):\n return c, jax.vmap(lambda y: jnp.real(net.apply(params, y)), in_axes=(0,))(x)\n\n res = jax.lax.scan(scan_fun, None, jnp.array(sb))[1].reshape((-1,))\n\n return res[:s.shape[0]]\n\n def _eval(self, net, params, s, batchSize):\n\n def create_batches(configs, b):\n\n append = b * ((configs.shape[0] + b - 1) // b) - configs.shape[0]\n pads = [(0, append), ] + [(0, 0)] * (len(configs.shape) - 1)\n\n return jnp.pad(configs, pads).reshape((-1, b) + configs.shape[1:])\n\n sb = create_batches(s, batchSize)\n\n def scan_fun(c, x):\n return c, jax.vmap(lambda y: net.apply(params, y), in_axes=(0,))(x)\n\n res = jax.lax.scan(scan_fun, None, jnp.array(sb))[1].reshape((-1,))\n\n return res[:s.shape[0]]\n\n\n# ** end class NQS\n\n\n# Register NQS class as new pytree node\n\n# def flatten_nqs(nqs):\n# auxReal = nqs.realNets\n# if auxReal:\n# flatNet1, auxNet1 = jax.tree_util.tree_flatten(nqs.net[0])\n# flatNet2, auxNet2 = jax.tree_util.tree_flatten(nqs.net[1])\n# return (flatNet1, flatNet2), (auxReal, auxNet1, auxNet2)\n# else:\n# flatNet, auxNet = jax.tree_util.tree_flatten(nqs.net)\n# return (flatNet,), (auxReal, auxNet)\n#\n# def unflatten_nqs(aux,treeData):\n# if aux[0]:\n# net1 = jax.tree_util.tree_unflatten(aux[1], treeData[0])\n# net2 = jax.tree_util.tree_unflatten(aux[2], treeData[1])\n# return NQS(net1, net2)\n# else:\n# net = jax.tree_util.tree_unflatten(aux[1], treeData[0])\n# return NQS(net)\n#\n#jax.tree_util.register_pytree_node(NQS, flatten_nqs, unflatten_nqs)\n\n\n# Register NQS class for flax serialization\n\ndef nqs_to_state_dict(nqs):\n\n stateDict = {}\n if nqs.realNets:\n stateDict['net1'] = flax.serialization.to_state_dict(nqs.net[0])\n stateDict['net2'] = flax.serialization.to_state_dict(nqs.net[1])\n else:\n stateDict['net'] = flax.serialization.to_state_dict(nqs.net)\n\n return stateDict\n\n\ndef nqs_from_state_dict(nqs, stateDict):\n\n if nqs.realNets:\n return NQS(\n flax.serialization.from_state_dict(nqs.net[0], stateDict['net1']),\n flax.serialization.from_state_dict(nqs.net[1], stateDict['net2'])\n )\n else:\n return NQS(\n flax.serialization.from_state_dict(nqs.net, stateDict['net'])\n )\n\n\nflax.serialization.register_serialization_state(NQS, nqs_to_state_dict, nqs_from_state_dict)\n\n","sub_path":"jVMC/vqs.py","file_name":"vqs.py","file_ext":"py","file_size_in_byte":19326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"368497680","text":"\nfrom __future__ import print_function, absolute_import\n\nimport os\nimport sys\nimport glob\nimport shutil\nimport logging\nimport fnmatch\nimport tarfile\nimport argparse\nfrom collections import deque\n\nfrom build.command import _BuildCommand\n\n\nclass WriteCommand(_BuildCommand):\n \"\"\"\n Write text to a file\n \"\"\"\n alias = 'WRITE'\n\n def description(self):\n return 'Write data to a file'\n\n\n def populate_parser(self, parser):\n \"\"\"\n We need the text to commit as well as the file we're outputing\n it to.\n \"\"\"\n parser.add_argument(\n '-a', '--append',\n action='store_true',\n help='Should we just append to the file. (Defaults to overwrite)'\n )\n parser.add_argument(\n 'content',\n help='The content to store within out file'\n )\n parser.add_argument(\n 'file',\n help='The to write our information to'\n )\n\n\n def run(self, build_file):\n \"\"\"\n Run the write command (but not the wrong one bum-dum-chhh)\n \"\"\"\n open_type = 'a' if self.data.append else 'w'\n if not os.path.isfile(self.data.file):\n open_type = 'w' # Nothing to append to\n\n logging.info('Writing to file: {}'.format(self.data.file))\n\n try:\n with open(self.data.file, open_type) as f:\n f.write(self.data.content)\n except OSError as e:\n raise RuntimeError(\"Could not open file: {} - {}\".format(\n self.data.file, str(e)\n ))\n\n\nclass ReadCommand(_BuildCommand):\n \"\"\"\n The other side of the WRITE command. Push information into a property\n that can be injected into other variables later\n \"\"\"\n alias = 'READ'\n\n def description(self):\n return 'Read data from a file - Careful! This is not built for large files'\n\n\n def populate_parser(self, parser):\n \"\"\"\n We require a file to read and the property to inject the read\n information into\n \"\"\"\n parser.add_argument(\n '-b', '--read-bytes',\n help='Open the file as a bytes object'\n )\n parser.add_argument(\n 'file',\n help='The file that we want to read from'\n )\n parser.add_argument(\n 'property',\n help='The property name to store the new information into'\n )\n\n\n def run(self, build_file):\n \"\"\"\n Read our file!\n \"\"\"\n read_type = 'rb' if self.data.read_bytes else 'r'\n\n logging.info('Reading file: {} -> PROPERTY({})'.format(\n self.data.file, self.data.property\n ))\n\n d = None\n try:\n with open(self.data.file, read_type) as f:\n d = f.read()\n except OSError as e:\n raise RuntimeError(\"Could not read file: {} - {}\".format(\n self.data.file, str(e)\n ))\n\n build_file.add_attribute(self.data.property, d)\n\n\nclass MkDirCommand(_BuildCommand):\n \"\"\"\n Creating directories in a platform agnostic way\n \"\"\"\n alias = 'MKDIR'\n\n def description(self):\n return 'Create directories where required'\n\n\n def populate_parser(self, parser):\n \"\"\"\n Just a few arguments to suppliment creating directories\n \"\"\"\n parser.add_argument(\n '-s', '--skip-exist',\n action='store_true',\n help='Continue if the directory already exists'\n )\n parser.add_argument(\n 'path',\n help='The directory(ies) to create. Recursive by default'\n )\n\n\n def run(self, build_file):\n \"\"\"\n Make some dirs!\n \"\"\"\n if os.path.isdir(self.data.path):\n if not self.data.skip_exist:\n raise RuntimeError(\n ('Directory already exists! {} ' \\\n '(Consider passing --skip-exist to ignore)') \\\n .format(self.data.path)\n )\n return # All done\n\n # If we've made it here, we have something to make\n os.makedirs(self.data.path)\n\n\nclass DelCommand(_BuildCommand):\n \"\"\"\n File deletion command\n \"\"\"\n alias = 'DEL'\n\n def description(self):\n return 'Delete files/folders'\n\n\n def populate_parser(self, parser):\n \"\"\"\n Pretty simple\n \"\"\"\n parser.add_argument(\n 'path',\n help='Files to remove (accepts * patterns)'\n )\n\n parser.add_argument(\n '-x', '--exclude',\n action='append',\n help='Patterns to ignore when deleting'\n )\n\n\n def run(self, build_file):\n \"\"\"\n Time to run!\n \"\"\"\n to_remove = glob.glob(self.data.path)\n\n for rem in to_remove:\n if not os.path.exists(rem):\n continue\n\n for pattern in (self.data.exclude or []):\n if fnmatch.fnmatch(rem, pattern):\n continue\n\n if os.path.isdir(rem):\n shutil.rmtree(rem)\n else:\n os.unlink(rem)\n\n\nclass ZipCommand(_BuildCommand):\n \"\"\"\n Zip up files/directories\n \"\"\"\n alias = 'ZIP'\n\n def description(self):\n return '(Un)Zip files/directories - recursive by default'\n\n\n def populate_parser(self, parser):\n \"\"\"\n A few options to give the user preference over\n what to do\n \"\"\"\n parser.add_argument(\n 'archive',\n help='The name of the archive to create'\n )\n\n parser.add_argument(\n '-x', '--extract',\n action='store_true',\n help='Extract the archive (use -o to declare output location)'\n )\n\n parser.add_argument(\n '-o', '--output',\n help='Directory to move files into. Directory must exist'\n )\n\n parser.add_argument(\n '-e', '--exclude',\n help='File patterns to ignore (unix pattern matching ok)',\n action='append'\n )\n\n parser.add_argument(\n '-f', '--file',\n help='File or directory to include/unpack (can be used multiple times)',\n action='append'\n )\n\n parser.add_argument(\n '-r', '--root',\n help='Alternate root location to use. Default is the commmon prefix'\n )\n\n parser.add_argument(\n '-a', '--append',\n help='If the archive already exists, just append to it.',\n action='store_true'\n )\n\n parser.add_argument(\n '-n', '--noisey',\n action='store_true',\n help='Log files being manipulated'\n )\n\n\n def _common_prefix(self, files):\n \"\"\"\n Platform agnostic way to determine the common prefix in a set\n of paths\n :param files: list[str] of files to check on\n :return: str\n \"\"\"\n return os.path.commonprefix(files)\n\n\n def run(self, build_file):\n \"\"\"\n Run the command, zipping up files as needed\n \"\"\"\n from common import compression\n\n raw_files = self.data.file\n\n if self.data.extract:\n compression.unzip_files(\n archive=self.data.archive,\n files=raw_files or [],\n ignore=self.data.exclude or [],\n output=self.data.output or os.getcwd(),\n noisey=self.data.noisey\n )\n\n else:\n files = []\n for file_info in raw_files:\n files.extend(glob.glob(file_info))\n\n # Make sure we have absolute paths\n ready_files = list(map(os.path.abspath, files))\n\n # With said paths, make sure we all have the same slash direction\n ready_files = list(map(lambda x: x.replace('\\\\', '/'), ready_files))\n\n def _dir(f):\n if os.path.isdir(f):\n return f\n return os.path.dirname(f)\n\n root = self.data.root\n cleaned = [_dir(r) for r in ready_files]\n if root is None:\n root = self._common_prefix(cleaned)\n\n if self.data.noisey:\n logging.debug('Cleaned Driectories: {}'.format('\\n'.join(cleaned)))\n logging.info('Zip root: {}'.format(root))\n\n name = self.data.archive\n if not name.endswith('.zip'):\n name += '.zip'\n\n compression.zip_files(\n name=name,\n files=ready_files,\n root=root,\n mode='a' if self.data.append else 'w',\n ignore=self.data.exclude or [],\n noisey=self.data.noisey\n )\n\n\nclass TarCommand(_BuildCommand):\n \"\"\"\n tar up files/directories\n \"\"\"\n alias = 'TAR'\n\n def description(self):\n return '(Un)tar files/directories - recursive by default'\n\n\n def populate_parser(self, parser):\n \"\"\"\n A few options to give the user preference over\n what to do\n \"\"\"\n parser.add_argument(\n 'archive',\n help='The name of the archive to create'\n )\n\n parser.add_argument(\n '-x', '--extract',\n action='store_true',\n help='Extract the archive (use -o to declare output location)'\n )\n\n parser.add_argument(\n '-o', '--output',\n help='Directory to move files into. Directory must exist'\n )\n\n parser.add_argument(\n '-e', '--exclude',\n help='File patterns to ignore (unix pattern matching ok)',\n action='append'\n )\n\n parser.add_argument(\n '-f', '--file',\n help='File or directory to include/unpack (can be used multiple times)',\n action='append'\n )\n\n parser.add_argument(\n '-r', '--root',\n help='Alternate root location to use. Default is the commmon prefix'\n )\n\n parser.add_argument(\n '-a', '--append',\n help='If the archive already exists, just append to it.',\n action='store_true'\n )\n\n parser.add_argument(\n '-n', '--noisey',\n action='store_true',\n help='Log files being manipulated'\n )\n\n\n def _common_prefix(self, files):\n \"\"\"\n Platform agnostic way to determine the common prefix in a set\n of paths\n :param files: list[str] of files to check on\n :return: str\n \"\"\"\n return os.path.commonprefix(files)\n\n\n def run(self, build_file):\n \"\"\"\n Run the command, taring up files as needed\n \"\"\"\n raw_files = self.data.file\n archive = self.data.archive\n\n from common import compression\n\n raw_files = self.data.file\n\n if self.data.extract:\n compression.untar_files(\n archive=self.data.archive,\n files=raw_files or [],\n ignore=self.data.exclude or [],\n output=self.data.output or os.getcwd(),\n noisey=self.data.noisey\n )\n\n else:\n\n files = []\n for file_info in raw_files:\n files.extend(glob.glob(file_info))\n\n # Make sure we have absolute paths\n ready_files = list(map(os.path.abspath, files))\n\n # With said paths, make sure we all have the same slash direction\n ready_files = list(map(lambda x: x.replace('\\\\', '/'), ready_files))\n\n def _dir(f):\n if os.path.isdir(f):\n return f\n return os.path.dirname(f)\n\n root = self.data.root\n cleaned = [_dir(r) for r in ready_files]\n if root is None:\n root = self._common_prefix(cleaned)\n\n if self.data.noisey:\n logging.debug('Cleaned Driectories: {}'.format('\\n'.join(cleaned)))\n logging.info('Tar root: {}'.format(root))\n\n name = self.data.archive\n if not any(name.endswith(g) for g in ('.tar', '.tar.gz', '.tgz')):\n name += '.tar.gz'\n\n compression.tar_files(\n name=name,\n files=ready_files,\n root=root,\n mode='a' if self.data.append else 'w',\n ignore=self.data.exclude or [],\n noisey=self.data.noisey\n )\n","sub_path":"src/build/commands/fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":12462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"451556636","text":"# -*- coding: utf-8 -*-\n\nfrom maintable import *\nfrom conveyor import *\nfrom PIL import Image, ImageTk\nfrom tkinter import messagebox\nimport sys\nimport os\n\n\nclass App(Frame):\n picture_image = [] # 메인 테이블용 도형 이미지\n alphabet_image = [] # 메인 테이블용 알파벳 이미지\n resized_picture = [] # 컨베이어용 도형 이미지\n continue_game = 1\n \n def __init__(self, master, n):\n super(App, self).__init__()\n self.master = master # root를 parent로\n self.n = n\n self.create_images() # 이미지 읽어와서 저장\n\n # Maintable frame widget 생성\n self.table = Maintable(self, self.picture_image, self.alphabet_image, self.n)\n self.table.grid(row=0, column=0, pady=(10,20))\n\n # Conveyor frame widget 생성\n self.conveyor = Conveyor(self, self.resized_picture, self.n)\n self.conveyor.grid(row=1, column=0)\n\n # 이미지 파일을 읽어와서 이미지 객체를 생성하고 리스트에 저장하는 부분\n # 생성된 이미지 객체 리스트에는 추가적인 변경이 없고, index를 통해 randomize\n def create_images(self):\n self.picture_image = list(Image.open(\"picture\\\\%d.JPG\" % (i+1)) for i in range(self.n*self.n))\n self.alphabet_image = list(PhotoImage(file=\"alphabet\\\\%d.GIF\" % (i+1)) for i in range(self.n*self.n))\n self.resized_picture = list(self.picture_image[i].resize((50,50), Image.ANTIALIAS) for i in range(self.n*self.n))\n self.resized_picture = list(ImageTk.PhotoImage(self.resized_picture[i]) for i in range(self.n*self.n))\n self.picture_image = list(ImageTk.PhotoImage(self.picture_image[i]) for i in range(self.n*self.n))\n\n # 게임 종료시 처리 부분\n def quit_game(self, win):\n if win == True:\n messagebox.showinfo(\"게임 종료\", \"성공하였습니다\")\n else:\n messagebox.showinfo(\"게임 종료\", \"실패하였습니다\")\n\n result = messagebox.askquestion(\"다시 시작\", \"다시 시작하시겠습니까?\", icon='warning')\n if result == 'no':\n App.continue_game = 0\n \n self.conveyor.quit()\n self.table.quit()\n self.quit()\n\n\n \n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"297966109","text":"# Calculate function\ndef ecal(enumber):\n\tmoney = 0\n\tif enumber < 0:\n\t\tprint(\"The electrical number cannot be negative!\")\n\t\treturn 0\n\tif enumber <= 50:\n\t\tmoney = enumber * 4\n\t\treturn money\n\tif enumber <= 100:\n\t\tmoney = 50 * 4 + (enumber - 50) * 3\n\t\treturn money\n\telse:\n\t\tmoney = 50 * 4 + 50 * 3 + (enumber - 100) * 2\n\t\treturn money\n\nprint(\"Enter the electric number of first family:\")\nef1 = float(input())\nmf1 = ecal(ef1)\n\nprint(\"Enter the electric number of second family:\")\nef2 = float(input())\nmf2 = ecal(ef2)\n\nprint(\"Family \\t| F1 \\t\\t| F2\")\nprint(\"enumber | %f \\t| %f\"%(ef1,ef2))\nprint(\"money \\t| %f \\t| %f\"%(mf1,mf2))\n","sub_path":"PythonCode/Maths/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"171422851","text":"import django_rq\nfrom django.shortcuts import render\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom dnasearch_app.models import DnaSearch\nfrom dnasearch_app.models import DnaSearchRequest\nfrom dnasearch_app.serializers import DnaSearchSerializer\nfrom dnasearch_app.serializers import DnaSearchRequestSerializer\nfrom datetime import datetime\nfrom dnasearch_app.dna_search_task import dna_search_task\n\nVALID_DNA_CHARS: set = {'A', 'C', 'T', 'G'}\n\n\n# endpoint to fetch dna searches for the current logged in user\n@api_view(['GET'])\ndef get_user_searches(request) -> Response:\n current_user_id = request.user.id\n current_user_searches = DnaSearch.objects \\\n .filter(user_id=current_user_id) \\\n .order_by('-started_at')\n\n dna_search_serializer: DnaSearchSerializer = DnaSearchSerializer(current_user_searches, many=True, context={'request': request})\n\n return Response(dna_search_serializer.data)\n\n\n# endpoint to start a new dna search for the current logged in user\n@api_view(['POST'])\ndef start_dna_search(request) -> Response:\n # validate DnaSearchRequest\n dna_search_request_serializer: DnaSearchRequestSerializer = DnaSearchRequestSerializer(data=request.data)\n\n if not dna_search_request_serializer.is_valid():\n return Response(dna_search_request_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n dna_search_request: DnaSearchRequest = dna_search_request_serializer.save()\n\n # normalize + validate search string\n normalized_search_string: str = normalize_search_string(dna_search_request.search_string)\n\n error_response: Response = validate_search_string(normalized_search_string)\n if error_response is not None:\n return error_response\n\n # transform to DnaSearch\n dna_search: DnaSearch = DnaSearch(\n search_state='SEARCHING',\n user_id=request.user,\n started_at=datetime.now(),\n search_string=normalized_search_string,\n )\n\n # write to DB first so we can have an ID to hand to RQ\n dna_search.save()\n\n # enqueue to RQ\n queue = django_rq.get_queue('default', autocommit=True, is_async=True, default_timeout=360)\n queue.enqueue(dna_search_task, dna_search)\n\n # return success and enqueued search\n dna_search_serializer = DnaSearchSerializer(dna_search, context={'request': request})\n return Response(dna_search_serializer.data, status=status.HTTP_200_OK)\n\n\ndef normalize_search_string(search_string: str) -> str:\n return search_string.strip().upper()\n\n\ndef validate_search_string(search_string: str) -> Response:\n for c in search_string:\n if not c in VALID_DNA_CHARS:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n return None\n","sub_path":"dnasearch/dnasearch_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"322183809","text":"from jinja2 import Template, Environment, FileSystemLoader\n\ndef render_template(path_template, output=None, filters={}, **kw):\n env = Environment(loader=FileSystemLoader('./'))\n\n for key,value in filters.items():\n env.filters[key] = value\n \n template = env.get_template(path_template)\n rendered_html = template.render(**kw)\n\n if output is not None:\n with open(output, 'w+', encoding='UTF-8') as file:\n file.write(rendered_html)\n\n return rendered_html\n","sub_path":"jinja_renderer.py","file_name":"jinja_renderer.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"415093846","text":"from abc import ABC, abstractmethod\nfrom enum import Enum, auto\nfrom typing import List, Optional, Tuple, Union\n\nfrom starkware.cairo.lang.compiler.ast.cairo_types import TypeFelt\nfrom starkware.cairo.lang.compiler.ast.code_elements import (\n CodeElement, CodeElementTemporaryVariable)\nfrom starkware.cairo.lang.compiler.ast.expr import (\n ExprConst, ExprDeref, Expression, ExprIdentifier, ExprNeg, ExprOperator, ExprReg)\nfrom starkware.cairo.lang.compiler.ast.types import TypedIdentifier\nfrom starkware.cairo.lang.compiler.error_handling import Location\nfrom starkware.cairo.lang.compiler.instruction import Register\nfrom starkware.cairo.lang.compiler.instruction_builder import (\n InstructionBuilderError, _parse_offset, _parse_register_offset)\nfrom starkware.cairo.lang.compiler.preprocessor.preprocessor_error import PreprocessorError\nfrom starkware.cairo.lang.compiler.references import translate_ap\n\n\nclass SimplicityLevel(Enum):\n # An expression of the form [reg + offset].\n DEREF = 0\n # An expression of the form DEREF or [reg + offset] + offset.\n DEREF_OFFSET = auto()\n # An expression of the form DEREF or a constant.\n DEREF_CONST = auto()\n # An expression of the form DEREF_CONST or with one operation.\n OPERATION = auto()\n\n\nclass CompoundExpressionContext(ABC):\n @abstractmethod\n def new_tempvar_name(self) -> str:\n \"\"\"\n Allocates a new tempvar name.\n \"\"\"\n\n @abstractmethod\n def get_fp_val(self, location: Optional[Location]) -> Expression:\n \"\"\"\n Returns an expression with the current value of fp.\n Usually, this should resolve the expression \"__fp__\".\n \"\"\"\n\n\ndef is_simple_deref(expr: Expression):\n \"\"\"\n Returns True if expr is of the form [reg + offset].\n \"\"\"\n if not isinstance(expr, ExprDeref):\n return False\n try:\n # Try to write expr.addr as \"reg + off\".\n _parse_register_offset(expr.addr)\n return True\n except InstructionBuilderError:\n return False\n\n\nclass CompoundExpressionVisitor:\n \"\"\"\n Helper class for process_compound_expressions().\n Don't use this class directly, use process_compound_expressions() instead.\n Rewrites expressions by adding temporary variables (tempvar).\n \"\"\"\n\n def __init__(self, context: CompoundExpressionContext):\n \"\"\"\n Constructs a CompoundExpressionVisitor.\n \"\"\"\n self.code_elements: List[CodeElement] = []\n self.context = context\n # Number of variables created so far.\n self.n_vars: int = 0\n\n def rewrite(self, expr: Expression, sim: SimplicityLevel):\n \"\"\"\n Rewrites the given expression in the given simplicity level.\n For example, the expression \"5\" will be left unchanged for DEREF_CONST and OPERATION, but\n will be replaced by a variable for DEREF. The expression \"[ap] + 6\" will be left unchanged\n for OPERATION but will be replaced by a variable for DEREF and DEREF_CONST.\n \"\"\"\n funcname = f'rewrite_{type(expr).__name__}'\n return getattr(self, funcname)(expr, sim)\n\n def rewrite_ExprConst(self, expr: ExprConst, sim: SimplicityLevel):\n if sim in [SimplicityLevel.DEREF_CONST, SimplicityLevel.OPERATION]:\n return expr\n return self.wrap(expr)\n\n def rewrite_ExprReg(self, expr: ExprReg, sim: SimplicityLevel):\n if expr.reg is Register.AP:\n raise PreprocessorError(\n 'ap may only be used in an expression of the form [ap + ].',\n location=expr.location)\n elif expr.reg is Register.FP:\n return self.rewrite(expr=self.context.get_fp_val(expr.location), sim=sim)\n else:\n raise NotImplementedError(f'Unknown register {expr.reg}.')\n\n def rewrite_ExprOperator(self, expr: ExprOperator, sim: SimplicityLevel):\n expr = ExprOperator(\n a=self.rewrite(expr.a, SimplicityLevel.DEREF),\n op=expr.op,\n b=self.rewrite(expr.b, SimplicityLevel.DEREF_CONST),\n location=expr.location)\n\n if sim is SimplicityLevel.OPERATION:\n return expr\n\n if sim is SimplicityLevel.DEREF_OFFSET:\n # Check if it's of the form [reg + offset] + offset.\n try:\n inner_expr, offset = _parse_offset(expr)\n except InstructionBuilderError:\n pass\n else:\n assert inner_expr == expr.a\n return expr\n\n return self.wrap(expr)\n\n def rewrite_ExprNeg(self, expr: ExprNeg, sim: SimplicityLevel):\n # Treat \"-val\" as \"val * (-1)\".\n return self.rewrite(ExprOperator(\n a=expr.val,\n op='*',\n b=ExprConst(val=-1, location=expr.location),\n location=expr.location), sim)\n\n def rewrite_ExprDeref(self, expr: ExprDeref, sim: SimplicityLevel):\n if is_simple_deref(expr):\n # This is already a simple expression, just return it.\n return expr\n\n expr = ExprDeref(\n addr=self.rewrite(expr.addr, SimplicityLevel.DEREF_OFFSET), location=expr.location)\n return expr if sim is SimplicityLevel.OPERATION else self.wrap(expr)\n\n def wrap(self, expr: Expression) -> ExprIdentifier:\n identifier = ExprIdentifier(name=self.context.new_tempvar_name(), location=expr.location)\n\n expr = self.translate_ap(expr)\n self.n_vars += 1\n\n self.code_elements.append(CodeElementTemporaryVariable(\n typed_identifier=TypedIdentifier(\n identifier=identifier, expr_type=TypeFelt(location=expr.location)),\n expr=expr,\n location=expr.location))\n return identifier\n\n def translate_ap(self, expr):\n return translate_ap(expr, self.n_vars)\n\n\ndef process_compound_expressions(\n exprs: List[Expression], simplicity: Union[SimplicityLevel, List[SimplicityLevel]],\n context: CompoundExpressionContext) \\\n -> Tuple[List[CodeElement], List[Expression], Optional[Expression]]:\n \"\"\"\n Rewrites the given list of expressions, by adding temporary variables, in the required\n simiplicity levels.\n For example, in SimplicityLevel.OPERATION, the expression \"[[ap + 3]]\" will be left unchanged,\n and \"[ap] + [ap] + [ap]\" will be replaced by \"__temp0 + [ap]\" where __temp0 is a new temporary\n variable.\n\n 'simplicity' may be one SimplicityLevel for all the expressions or a list of SimplicityLevel\n for each expression separately.\n Returns a list of code elements with the temporary variables, the list of simplified expression,\n and the first expression from exprs which required simplification (or None if all the\n expressions were already at the required simplicity level).\n \"\"\"\n if isinstance(simplicity, SimplicityLevel):\n simplicity = [simplicity] * len(exprs)\n assert isinstance(simplicity, list) and len(simplicity) == len(exprs)\n\n visitor = CompoundExpressionVisitor(context=context)\n # First, visit all of the expressions.\n simplified_exprs = []\n first_compound_expr = None\n for expr, sim in zip(exprs, simplicity):\n simplified_exprs.append(visitor.rewrite(expr, sim))\n # Check if this is the first compound expression.\n if len(visitor.code_elements) > 0 and first_compound_expr is None:\n first_compound_expr = expr\n\n # Second, translate ap according to the total number of instructions.\n simplified_exprs = [visitor.translate_ap(expr) for expr in simplified_exprs]\n return visitor.code_elements, simplified_exprs, first_compound_expr\n\n\ndef process_compound_assert(\n expr_a: Expression, expr_b: Expression, context: CompoundExpressionContext):\n \"\"\"\n A version of process_compound_expressions() for assert instructions. Takes two expressions\n and returns them simplified to levels [DEREF, OPERATION] or [OPERATION, DEREF],\n so that the following will hold:\n 1. If expr_a = expr_b or expr_b = expr_a is already a valid Cairo instruction, it will not be\n modified.\n 2. If expr_a is assignable (that is, a dereference), then the assignment is treated as\n right-to-left: the right-hand side is simplified to DEREF to avoid unnecessary\n computations on the left-hand side (this allows the assignment without hints).\n \"\"\"\n\n if is_simple_deref(expr_a):\n # In this case, the left-hand side does not require any simplification, so there is room\n # for complexity on the right-hand side.\n simplicity = [SimplicityLevel.DEREF, SimplicityLevel.OPERATION]\n else:\n # Left-hand side is already too complicated for DEREF.\n simplicity = [SimplicityLevel.OPERATION, SimplicityLevel.DEREF]\n\n code_elements, exprs, _ = process_compound_expressions(\n exprs=[expr_a, expr_b], simplicity=simplicity, context=context)\n return code_elements, exprs\n","sub_path":"src/starkware/cairo/lang/compiler/preprocessor/compound_expressions.py","file_name":"compound_expressions.py","file_ext":"py","file_size_in_byte":8889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"389572560","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/probe_plotting2/plotting.py\n# Compiled at: 2017-03-23 09:14:00\nimport matplotlib.pyplot as plt\nbokeh2matplotlib_colors = {'blue': 'b', \n 'green': 'g', \n 'red': 'r', \n 'cyan': 'c', \n 'magenta': 'm', \n 'yellow': 'y', \n 'black': 'k', \n 'white': 'w'}\nbokeh2matplotlib_markers = {'circle': 'o', \n 'square': 's', \n 'triangle': 'v', \n 'asterisk': '*', \n 'inverted_triangle': '^', \n 'diamond': 'D', \n 'cross': 'x'}\n\ndef plot_one_line(x, y, y_errorbar=None, figsize=(15, 9), x_label='', y_label='', y2_label='', line_color=None, data_label='', line_style='-', line_width=2, xlim=None, ylim=None, y2lim=None, show=False, fig=None, y_secondary=False, ticks_font_size=18, label_font_size=18, legend_font_size=18, pad=10, border_thickness=2.0, ticks_length=5.0, ticks_width=2.0, legend_loc='best', fill_alpha=0.3, legend_ncol=1, ylabelpad=None, y2labelpad=None, xlabelpad=None):\n if line_color is None:\n line_color = 'blue'\n if fig is None:\n fig = {'fig': None, 'ax': None, \n 'ax2': None, \n 'lns': list(), \n 'labs': list()}\n if fig['fig'] is None:\n fig['fig'], fig['ax'] = plt.subplots(1, 1, figsize=figsize)\n current_ax = fig['ax']\n if y_secondary and fig['ax2'] is None:\n fig['ax2'] = plt.twinx(fig['ax'])\n current_ax = fig['ax2']\n ln = current_ax.plot(x, y, color=line_color, linestyle=line_style, linewidth=line_width, label=data_label)\n if y_errorbar is not None:\n current_ax.fill_between(x, y - y_errorbar / 2.0, y + y_errorbar / 2.0, facecolor=line_color, alpha=fill_alpha)\n lab = ln[0].get_label()\n if data_label not in ('', None):\n fig['lns'].append(ln[0])\n fig['labs'].append(lab)\n if show:\n if xlim:\n fig['ax'].set_xlim(xlim)\n if ylim:\n fig['ax'].set_ylim(ylim)\n if y2lim:\n fig['ax2'].set_ylim(y2lim)\n fig['ax'].set_xlabel(x_label, fontsize=label_font_size)\n fig['ax'].set_ylabel(y_label, fontsize=label_font_size)\n if fig['lns'] != []:\n current_ax.legend(fig['lns'], fig['labs'], loc=legend_loc, fontsize=legend_font_size, ncol=legend_ncol)\n fig['ax'].tick_params(axis='both', which='major', labelsize=ticks_font_size)\n fig['ax'].tick_params(direction='inout', pad=pad)\n [ i.set_linewidth(border_thickness) for i in fig['ax'].spines.values() ]\n fig['ax'].tick_params('both', direction='in', length=ticks_length, width=ticks_width, which='major')\n if xlabelpad:\n fig['ax'].xaxis.labelpad = xlabelpad\n if ylabelpad:\n fig['ax'].yaxis.labelpad = ylabelpad\n if fig['ax2'] is not None:\n fig['ax2'].set_ylabel(y2_label, fontsize=label_font_size)\n fig['ax2'].tick_params(axis='both', which='major', labelsize=ticks_font_size)\n fig['ax2'].tick_params(direction='inout', pad=pad)\n [ i.set_linewidth(border_thickness) for i in fig['ax2'].spines.itervalues() ]\n fig['ax2'].tick_params('both', direction='in', length=ticks_length, width=ticks_width, which='major')\n if y2labelpad:\n fig['ax2'].yaxis.labelpad = y2labelpad\n return fig\n\n\ndef plot_one_scatter(x, y, err_x=None, err_y=None, figsize=(15, 9), x_label='', y_label='', data_label='', size=1, show=False, fig=None, marker='circle', alpha=1.0, fill_color='blue', line_color='blue', legend_fill_alpha=0.5, xlim=None, ylim=None, ticks_font_size=20, label_font_size=40, legend_font_size=20, pad=20, border_thickness=3.0, ticks_length=5.0, ticks_width=2.0, legend_loc='best', err_line_width=2.0, err_every=1, err_capthick=2.0, err_capsize=2.0):\n assert marker in bokeh2matplotlib_markers.keys(), ('only these markers are supported: {}').format(bokeh2matplotlib_markers.keys())\n if line_color is None:\n line_color = 'blue'\n if fill_color is None:\n fill_color = 'blue'\n assert line_color in bokeh2matplotlib_colors.keys(), ('only these colors are supported: {}').format(bokeh2matplotlib_colors.keys())\n if fig is None:\n f, a = plt.subplots(1, 1, figsize=figsize)\n fig = {'fig': f, \n 'ax': a}\n if err_x is not None or err_y is not None:\n fig['ax'].errorbar(x, y, xerr=err_x, yerr=err_y, ecolor=bokeh2matplotlib_colors[line_color], elinewidth=err_line_width, errorevery=err_every, ms=size, alpha=alpha, fmt=bokeh2matplotlib_markers[marker], label=data_label, markerfacecolor=bokeh2matplotlib_colors[fill_color], markeredgecolor=bokeh2matplotlib_colors[line_color], capthick=err_capthick, capsize=err_capsize)\n else:\n fig['ax'].scatter(x, y, s=size, alpha=alpha, marker=bokeh2matplotlib_markers[marker], label=data_label, c=bokeh2matplotlib_colors[fill_color], edgecolors=bokeh2matplotlib_colors[line_color])\n if show:\n if xlim:\n fig['ax'].set_xlim(xlim)\n if ylim:\n fig['ax'].set_ylim(ylim)\n fig['ax'].set_xlabel(x_label, fontsize=label_font_size)\n fig['ax'].set_ylabel(y_label, fontsize=label_font_size)\n fig['ax'].legend(loc=legend_loc, fontsize=legend_font_size)\n fig['ax'].tick_params(axis='both', which='major', labelsize=ticks_font_size)\n fig['ax'].tick_params(direction='inout', pad=pad)\n [ i.set_linewidth(border_thickness) for i in fig['ax'].spines.itervalues() ]\n fig['ax'].tick_params('both', direction='in', length=ticks_length, width=ticks_width, which='major')\n return fig","sub_path":"pycfiles/probe_plotting2-0.0.1-py2.7/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":5654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"461838329","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/migrations/versions/939bb1e647c8_task_reschedule_fk_on_cascade_delete.py\n# Compiled at: 2019-09-11 03:47:34\n# Size of source mod 2**32: 1986 bytes\n\"\"\"task reschedule fk on cascade delete\n\nRevision ID: 939bb1e647c8\nRevises: 4ebbffe0a39a\nCreate Date: 2019-02-04 20:21:50.669751\n\n\"\"\"\nfrom alembic import op\nrevision = '939bb1e647c8'\ndown_revision = 'dd4ecb8fbee3'\nbranch_labels = None\ndepends_on = None\n\ndef upgrade():\n with op.batch_alter_table('task_reschedule') as (batch_op):\n batch_op.drop_constraint('task_reschedule_dag_task_date_fkey',\n type_='foreignkey')\n batch_op.create_foreign_key('task_reschedule_dag_task_date_fkey',\n 'task_instance',\n [\n 'task_id', 'dag_id', 'execution_date'],\n [\n 'task_id', 'dag_id', 'execution_date'],\n ondelete='CASCADE')\n\n\ndef downgrade():\n with op.batch_alter_table('task_reschedule') as (batch_op):\n batch_op.drop_constraint('task_reschedule_dag_task_date_fkey',\n type_='foreignkey')\n batch_op.create_foreign_key('task_reschedule_dag_task_date_fkey', 'task_instance', [\n 'task_id', 'dag_id', 'execution_date'], [\n 'task_id', 'dag_id', 'execution_date'])","sub_path":"pycfiles/apache_airflow_arup-1.10.5-py3.6/939bb1e647c8_task_reschedule_fk_on_cascade_delete.cpython-36.py","file_name":"939bb1e647c8_task_reschedule_fk_on_cascade_delete.cpython-36.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"467682998","text":"#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu Nguyen\" at 18:23, 06/04/2020 %\n# %\n# Email: nguyenthieu2102@gmail.com %\n# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %\n# Github: https://github.com/thieunguyen5991 %\n# -------------------------------------------------------------------------------------------------------%\n\nfrom model.root.root_base import RootBase\nimport time\n\n\nclass RootRnn(RootBase):\n def __init__(self, root_base_paras=None, root_rnn_paras=None):\n RootBase.__init__(self, root_base_paras)\n self.epoch = root_rnn_paras[\"epoch\"]\n self.batch_size = root_rnn_paras[\"batch_size\"]\n self.learning_rate = root_rnn_paras[\"learning_rate\"]\n self.activations = root_rnn_paras[\"activations\"]\n self.optimizer = root_rnn_paras[\"optimizer\"]\n self.loss = root_rnn_paras[\"loss\"]\n self.dropouts = root_rnn_paras[\"dropouts\"]\n if root_rnn_paras[\"hidden_sizes\"][-1]:\n self.hidden_sizes = root_rnn_paras[\"hidden_sizes\"][:-1]\n else:\n num_hid = len(root_rnn_paras[\"hidden_sizes\"]) - 1\n self.hidden_sizes = [(num_hid - i) * root_base_paras[\"sliding\"] * root_base_paras[\"feature_size\"] + 1 for i in range(num_hid)]\n\n def _forecasting__(self):\n y_pred = self.model.predict(self.X_test)\n pred_inverse = self.scaler.inverse_transform(y_pred)\n real_inverse = self.scaler.inverse_transform(self.y_test)\n return real_inverse, pred_inverse, self.y_test, y_pred\n\n def _running__(self):\n self.time_system = time.time()\n self._preprocessing_3d__()\n self.time_total_train = time.time()\n self._training__()\n self.time_total_train = round(time.time() - self.time_total_train, 4)\n self.time_epoch = round(self.time_total_train / self.epoch, 4)\n self.time_predict = time.time()\n y_true_unscaled, y_pred_unscaled, y_true_scaled, y_pred_scaled = self._forecasting__()\n self.time_predict = round(time.time() - self.time_predict, 8)\n self.time_system = round(time.time() - self.time_system, 4)\n self._save_results__(y_true_unscaled, y_pred_unscaled, y_true_scaled, y_pred_scaled, self.loss_train, self.n_runs)\n","sub_path":"model/root/traditional/root_rnn.py","file_name":"root_rnn.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"647529426","text":"#\n# Complete the pageCount function below.\n#\nclass Page:\n def __init__(self, left, right):\n self.leftPage = left\n self.rightPage = right\n\nclass Book:\n def __init__(self, pageNumber):\n self.pages = []\n if pageNumber > 0:\n firstPage = Page(0, 1)\n self.pages.append(firstPage)\n for i in range(2, pageNumber + 1, 2):\n if (i < pageNumber):\n self.pages.append(Page(i, i + 1))\n else:\n self.pages.append(Page(i, -1))\n\n\n\ndef pageCount(n, p):\n book = Book(n)\n frontCount = 0 # onden cevirdigimizde kac adim attik\n backCount = 0 # arkadan cevirdigimizde kac adim attik\n for page in book.pages: #onden baslarsak\n if page.leftPage == p or page.rightPage == p:\n break\n frontCount += 1\n book.pages.reverse()\n for page in book.pages:\n if page.leftPage == p or page.rightPage == p:\n break\n backCount += 1\n return min(frontCount, backCount)\n\nprint(pageCount(83246, 78132))","sub_path":"page-number.py","file_name":"page-number.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"55413919","text":"# -*- coding: utf-8 -*-\n# Api Documents Url https://www.bithumb.com/u1/US127\n\n# from api_token import *\n\nfrom common.Exchange import Exchange\nfrom common.order import Order\n\nimport urllib\ntry:\n import urllib2\nexcept ImportError:\n import urllib.request as urllib2\n\nimport hmac\nimport hashlib\nimport json\n\nclass BITHUMB(Exchange):\n \"\"\"docstring for bithumb.com\"\"\"\n def __init__(self, api_key, secret):\n self.api_key = api_key\n self.secret = secret\n self.base_api_url = \"https://api.bithumb.com\"\n self.__currencies = None\n super(BITHUMB, self).__init__()\n self.name = str.upper('BITHUMB')\n self.trading_fee = 0.002\n self.timestamp = 0\n\n # @property\n # implemented abstract functions\n\n # def unauthenticated_request(url_suffix):\n # url_request_object = urllib2.Request(\"https://api.bithumb.com/%s\" % (url_suffix))\n # response = urllib2.urlopen(url_request_object)\n # response_json = {}\n # try:\n # response_content = response.read()\n # response_json = json.loads(response_content)\n # return response_json\n # finally:\n # response.close()\n # return \"failed\"\n\n @property\n def get_currencies(self):\n if self.__currencies is None:\n # tickers = self.unauthenticated_request('public/ticker/all')\n # self.__currencies = [m for m in tickers['data'].keys() if type(tickers['data'][m]) == dict]\n tickers = self.unauthenticated_request('public/ticker/all')['data']\n self.__currencies = [str.lower(m) for m in tickers.keys() if type(tickers[m]) == dict]\n return self.__currencies\n\n\n def get_orderbook(self, currency=None):\n marketdata = {}\n orderBook = { \"bids\" : [], \"offers\" : [] }\n if currency is None:\n tmpData = self.unauthenticated_request('public/orderbook/all')['data']\n self.timestamp = int(tmpData['timestamp'])\n\n for currency in self.get_currencies:\n marketdata[currency] = tmpData[currency]\n else:\n marketdata[currency] = self.unauthenticated_request('public/orderbook/%s' % currency)['data']\n self.timestamp = int(marketdata[currency]['timestamp'])\n\n for currency in marketdata.keys():\n # if int(marketdata[currency]['timestamp']) > int(self.timestamp):\n\n for bid in marketdata[currency]['bids']:\n o = Order(currency, int(bid['price']), float(bid['quantity']))\n orderBook['bids'].append(o)\n\n for ask in marketdata[currency]['asks']:\n o = Order(currency, int(ask['price']), float(ask['quantity']))\n orderBook['offers'].append(o)\n return orderBook\n\n # def get_highest_bid(self, currency='all'):\n # return -1\n\n # def get_lowest_offer(self, currency='all'):\n # return -1\n\n\n # TODO: 인증 rq 완료후 진행\n def get_balance(self, currency):\n # data = self.authenticated_request('account/balance/')\n # return float(data['wallet']['available'])\n return -1\n\n def get_all_balances(self):\n # data = self.authenticated_request('wallet/all/', \"getwallets\")\n # balances = {k:float(v[\"a\"]) for k,v in data[\"wallets\"].items()}\n # return balances\n return -1\n\n def submit_order(self, gc, gv, rc, rv):\n pass\n #order_request = self.authenticated_request('market/%s/' % (working_pair), \"neworder\", {'order_type':order_type, 'rate':rate, 'quantity':amount,})\n\n def confirm_order(self, orderID):\n# get_order_request = self.authenticated_request('market/%s/' % (working_pair),\"getorder\",{'order_id':new_order_request['order']['id']})\n# print get_order_request\n # TODO\n pass\n\n # Bithumb specific methods\n\n # Public API / GET Method\n # 90 requests per minute\n def unauthenticated_request(self, url_suffix):\n url_request_object = urllib2.Request(\"%s/%s\" % (self.base_api_url, url_suffix))\n response = urllib2.urlopen(url_request_object)\n response_json = {}\n try:\n response_content = response.read()\n response_json = json.loads(response_content)\n return response_json\n finally:\n response.close()\n return \"failed\"\n\n # Private API / POST Method\n # Account/Order/Transaction API, 6 requests per second\n def authenticated_request(self, url_suffix, method, post_args={}):\n nonce = 1000\n try:\n f = open('coins-e_nonce', 'r')\n nonce = int(f.readline())\n f.close()\n finally:\n f = open('coins-e_nonce', 'w')\n nonce += 1\n f.write(str(nonce))\n f.close()\n\n post_args['method'] = method\n post_args['nonce'] = nonce\n try:\n post_data = urllib.urlencode(post_args)\n except:\n post_data = urllib.parse.urlencode(post_args)\n\n required_sign = hmac.new(self.secret, post_data, hashlib.sha512).hexdigest()\n headers = {}\n headers['key'] = self.api_key\n headers['sign'] = required_sign\n url_request_object = urllib2.Request(\"%s/%s\" % (self.base_api_url, url_suffix),\n post_data,\n headers)\n response = urllib2.urlopen(url_request_object)\n\n\n try:\n response_content = response.read()\n response_json = json.loads(response_content)\n if not response_json['status']:\n print(response_content)\n print(\"request failed\")\n print(response_json['message'])\n\n return response_json\n finally:\n response.close()\n return \"failed\"\n\n","sub_path":"singleton_abc/xchg_kr/Bithumb.py","file_name":"Bithumb.py","file_ext":"py","file_size_in_byte":5826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"507381489","text":"import requests\nimport json\nimport config\nimport mysql.connector\n\ncnx = mysql.connector .connect(\n host = config.host,\n user = config.user,\n passwd = config.pw,\n database = 'yelp_tea'\n\n)\n\ncursor = cnx.cursor()\n\nurl = 'https://api.yelp.com/v3/businesses/search'\n\nclient_id = 'U1F8RNabNLQ1xoiZ4aU0eA'\napi_key = 'MWsKI2UOfPh3dk-fo9clIPR1qpgWtoMYqJ9ASIpzS4A0USmzJ3SuFwKeSHBGviPWf_hFBj940yamvqe4ssszQO59x2kh--IWRu5CeUeoSRBnjYuuDCs-bdk42GPpXXYx'\n\nheaders = {\n 'Authorization': 'Bearer {}'.format(api_key),\n }\n\nterm = \"Tea\"\nlocation = \"Alphabet City\"\nradius = 1000\nlimit = 50\n\nurl_params = {\n\n \"term\": term.replace(' ', '+'),\n \"location\": location.replace(' ', '+'),\n \"radius\" : radius,\n \"limit\": limit\n\n }\n\ndef yelp_call(url_params, headers, url):\n print(headers)\n response = requests.get(url, headers=headers, params=url_params)\n data = response.json()\n return data['businesses']\n\ndef parse_data(businesses):\n parsed_data = []\n for business in businesses:\n if 'price' in business.keys():\n biz_tuple = (business['id'], business['name'], business['rating'], business['review_count'], business['price'])\n parsed_data.append(biz_tuple)\n else:\n biz_tuple = (business['id'], business['name'], None, business['rating'], business['review_count'])\n parsed_data.append(biz_tuple)\n return parsed_data\n\ndef db_insert(cnx, cursor, parsed_results):\n add_business = (\"\"\"INSERT INTO yelp_businesses\n (business_id, name, rating, review_count, price)\n VALUES (%s, %s, %s, %s, %s)\"\"\")\n cursor.executemany(add_business, parsed_results)\n cnx.commit()\n\ndef paginate(url_params, headers, url, max=266):\n cur = 0\n while cur < max:\n url_params['offset'] = cur\n businesses = yelp_call(url_params, headers, url)\n parsed_results = parse_data(businesses)\n db_insert(cnx, cursor, parsed_results)\n cur += 50\n\n#cnx = mysql.connector .connect(\n# host = config.host,\n# user = config.user,\n# passwd = config.pw\n#)\n\npaginate(url_params, headers, url)\n","sub_path":"Mod_2/SQL/b_etl.py","file_name":"b_etl.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"10128492","text":"import os\nimport typing\n\nimport pytest\nfrom pymatgen.analysis.phase_diagram import PhaseDiagram\n\nfrom mp_api.client.routes.thermo import ThermoRester\nfrom emmet.core.thermo import ThermoType\n\n\n@pytest.fixture\ndef rester():\n rester = ThermoRester()\n yield rester\n rester.session.close()\n\n\nexcluded_params = [\n \"sort_fields\",\n \"chunk_size\",\n \"num_chunks\",\n \"all_fields\",\n \"fields\",\n \"equilibrium_reaction_energy\",\n]\n\nsub_doc_fields = [] # type: list\n\nalt_name_dict = {\n \"formula\": \"formula_pretty\",\n \"material_ids\": \"material_id\",\n \"thermo_ids\": \"thermo_id\",\n \"thermo_types\": \"thermo_type\",\n \"total_energy\": \"energy_per_atom\",\n \"formation_energy\": \"formation_energy_per_atom\",\n \"uncorrected_energy\": \"uncorrected_energy_per_atom\",\n \"equilibrium_reaction_energy\": \"equilibrium_reaction_energy_per_atom\",\n \"num_elements\": \"nelements\",\n \"num_sites\": \"nsites\",\n} # type: dict\n\ncustom_field_tests = {\n \"material_ids\": [\"mp-149\"],\n \"thermo_ids\": [\"mp-149_GGA_GGA+U\"],\n \"thermo_types\": [ThermoType.GGA_GGA_U],\n \"formula\": \"SiO2\",\n \"chemsys\": \"Si-O\",\n} # type: dict\n\n\n@pytest.mark.skipif(os.environ.get(\"MP_API_KEY\", None) is None, reason=\"No API key found.\")\ndef test_client(rester):\n search_method = rester.search\n\n if search_method is not None:\n # Get list of parameters\n param_tuples = list(typing.get_type_hints(search_method).items())\n\n # Query API for each numeric and boolean parameter and check if returned\n for entry in param_tuples:\n param = entry[0]\n print(param)\n if param not in excluded_params:\n param_type = entry[1].__args__[0]\n q = None\n\n if param_type == typing.Tuple[int, int]:\n project_field = alt_name_dict.get(param, None)\n q = {\n param: (-100, 100),\n \"chunk_size\": 1,\n \"num_chunks\": 1,\n \"fields\": [project_field if project_field is not None else param],\n }\n elif param_type == typing.Tuple[float, float]:\n project_field = alt_name_dict.get(param, None)\n q = {\n param: (-100.12, 100.12),\n \"chunk_size\": 1,\n \"num_chunks\": 1,\n \"fields\": [project_field if project_field is not None else param],\n }\n elif param_type is bool:\n project_field = alt_name_dict.get(param, None)\n q = {\n param: False,\n \"chunk_size\": 1,\n \"num_chunks\": 1,\n \"fields\": [project_field if project_field is not None else param],\n }\n elif param in custom_field_tests:\n project_field = alt_name_dict.get(param, None)\n q = {\n param: custom_field_tests[param],\n \"chunk_size\": 1,\n \"num_chunks\": 1,\n \"fields\": [project_field if project_field is not None else param],\n }\n\n doc = search_method(**q)[0].dict()\n for sub_field in sub_doc_fields:\n if sub_field in doc:\n doc = doc[sub_field]\n\n assert doc[project_field if project_field is not None else param] is not None\n\n\ndef test_get_phase_diagram_from_chemsys():\n # Test that a phase diagram is returned\n\n assert isinstance(ThermoRester().get_phase_diagram_from_chemsys(\"Hf-Pm\"), PhaseDiagram)\n","sub_path":"tests/test_thermo.py","file_name":"test_thermo.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"4906241","text":"\"\"\"\nCopyright (C) 2019 NVIDIA Corporation. All rights reserved.\nLicensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).\n#\n# BSD 3-Clause License\n#\n# Copyright (c) 2017 xxxx\n# All rights reserved.\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ============================================================================\n#\n\"\"\"\n\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\nfrom models.networks.base_network import BaseNetwork\nfrom models.networks.normalization import get_nonspade_norm_layer\nimport util.util as util\n\n\nclass MultiscaleDiscriminator(BaseNetwork):\n @staticmethod\n def modify_commandline_options(parser, is_train):\n parser.add_argument('--netD_subarch', type=str, default='n_layer',\n help='architecture of each discriminator')\n parser.add_argument('--num_D', type=int, default=2,\n help='number of discriminators to be used in multiscale')\n opt, _ = parser.parse_known_args()\n\n # define properties of each discriminator of the multiscale discriminator\n subnetD = util.find_class_in_module(opt.netD_subarch + 'discriminator',\n 'models.networks.discriminator')\n subnetD.modify_commandline_options(parser, is_train)\n\n return parser\n\n def __init__(self, opt):\n super().__init__()\n self.opt = opt\n\n for i in range(opt.num_D):\n subnetD = self.create_single_discriminator(opt)\n self.add_module('discriminator_%d' % i, subnetD)\n\n def create_single_discriminator(self, opt):\n subarch = opt.netD_subarch\n if subarch == 'n_layer':\n netD = NLayerDiscriminator(opt)\n else:\n raise ValueError('unrecognized discriminator subarchitecture %s' % subarch)\n return netD\n\n def downsample(self, input):\n return F.avg_pool2d(input, kernel_size=3,\n stride=2, padding=[1, 1],\n count_include_pad=False)\n\n # Returns list of lists of discriminator outputs.\n # The final result is of size opt.num_D x opt.n_layers_D\n def forward(self, input):\n result = []\n get_intermediate_features = not self.opt.no_ganFeat_loss\n for name, D in self.named_children():\n out = D(input)\n if not get_intermediate_features:\n out = [out]\n result.append(out)\n input = self.downsample(input)\n\n return result\n\n\n# Defines the PatchGAN discriminator with the specified arguments.\nclass NLayerDiscriminator(BaseNetwork):\n @staticmethod\n def modify_commandline_options(parser, is_train):\n parser.add_argument('--n_layers_D', type=int, default=4,\n help='# layers in each discriminator')\n return parser\n\n def __init__(self, opt):\n super().__init__()\n self.opt = opt\n\n kw = 4\n padw = int(np.ceil((kw - 1.0) / 2))\n nf = opt.ndf\n input_nc = self.compute_D_input_nc(opt)\n\n norm_layer = get_nonspade_norm_layer(opt, opt.norm_D)\n sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, False)]]\n\n for n in range(1, opt.n_layers_D):\n nf_prev = nf\n nf = min(nf * 2, 512)\n stride = 1 if n == opt.n_layers_D - 1 else 2\n sequence += [[norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw,\n stride=stride, padding=padw)),\n nn.LeakyReLU(0.2, False)\n ]]\n\n sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]\n\n # We divide the layers into groups to extract intermediate layer outputs\n for n in range(len(sequence)):\n self.add_module('model' + str(n), nn.Sequential(*sequence[n]))\n\n def compute_D_input_nc(self, opt):\n input_nc = opt.label_nc + opt.output_nc\n if opt.contain_dontcare_label:\n input_nc += 1\n if not opt.no_instance:\n input_nc += 1\n return input_nc\n\n def forward(self, input):\n results = [input]\n for submodel in self.children():\n intermediate_output = submodel(results[-1])\n results.append(intermediate_output)\n\n get_intermediate_features = not self.opt.no_ganFeat_loss\n if get_intermediate_features:\n return results[1:]\n else:\n return results[-1]\n","sub_path":"PyTorch/dev/cv/image_classification/SmartSketch_ID1046_for_PyTorch/backend/SPADE/models/networks/discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":6070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"458171001","text":"from django.forms import Form,fields,ModelForm\nfrom django.forms import widgets as wd\nfrom app01 import models\n\nclass QuestionForm(Form):\n name = fields.CharField(\n required=True, # 表示此项必填\n max_length=18,\n label=\"问题\",\n label_suffix=\":\",\n error_messages={ # 出现错误时对应的提示信息\n 'max_length': '问题名称不能超过18字',\n 'required': '问题名称不能为空',\n },\n widget=wd.TextInput(\n attrs={'class':\"dfinput question_name\",'placeholder': '请输入问题名称'}\n )\n\n )\n\n type = fields.ChoiceField(\n choices=(),\n # initial=1,\n label=\"类型\",\n label_suffix=\":\",\n widget=wd.Select(attrs={'class':\"dfinput question_type\"})\n )\n\n def __init__(self, *args, **kwargs):\n super(QuestionForm, self).__init__(*args, **kwargs)\n\n self.fields['type'].widget.choices = models.Question.question_types\n if kwargs.get('initial'):\n self.fields['type'].initial = kwargs['initial']['type']\n self.fields['name'].widget.attrs['value'] = kwargs['initial']['value']\n\n\nclass OptionModelForm(ModelForm):\n class Meta:\n model = models.Option\n fields = ['name','score']\n labels = {\n 'name':'内容',\n 'score':'分值',\n }\n error_messages = {\n 'name':{\n 'required':'选项名称不能为空',\n 'invalid':'选项名称类型错误',\n },\n 'score': {\n 'required': '选项值不能为空',\n 'invalid': '选项值类型错误',\n }\n }\n\n widgets = {\n 'name':wd.TextInput(\n attrs={\n 'class':'dfinput-sm option_value '\n }\n ),\n 'score': wd.NumberInput(\n attrs={\n 'class': 'dfinput-sm option_key'\n }\n )\n }\n\n\nclass SurveyModelForm(ModelForm):\n class Meta:\n model = models.SurveyInfo\n fields = ['title', 'cls']\n\n labels = {\n 'title': '问卷名称',\n 'cls': '所属班级',\n }\n\n widgets = {\n 'title':wd.TextInput(\n attrs={\n 'class':'dfinput'\n }\n ),\n 'cls': wd.Select(\n attrs={\n 'class': 'dfinput'\n }\n )\n }\n","sub_path":"app01/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"345594069","text":"# _*_ coding: utf-8 _*_\n\n# Copyright (c) 2020 NMC Developers.\n# Distributed under the terms of the GPL V3 License.\n\n\"\"\"\n检索特定日期和范围的CFSR再分析数据, 并绘制相关的天气图.\n\"\"\"\n\nimport sys\nimport requests\nimport datetime\nfrom multiprocessing import Process, Manager\nimport numpy as np\nimport xarray as xr\nimport streamlit as st\n\n# set page title\nst.set_page_config(page_title=\"历史天气图分析\", layout=\"wide\")\n\nfrom nmc_met_io.retrieve_cmadaas import cmadaas_obs_by_time\nfrom nmc_met_graphics.util import get_map_regions\nfrom nmc_met_graphics.web import SessionState, ipyplot\n\nsys.path.append('.')\nimport draw_maps\n\n# set session state\nstate = SessionState.get(img1=None, img2=None, img3=None)\n\n\ndef main():\n # application title\n st.title(\"历史天气图分析\")\n st.header('——检索1979年1月1日以来的地面观测日值和天气图')\n\n # application information\n st.sidebar.image('http://image.nmc.cn/assets/img/index/nmc_logo_3.png', width=300)\n st.sidebar.markdown('**天气预报技术研发室**')\n\n # Input data date\n data_date = st.sidebar.date_input(\n \"输入日期:\", datetime.date(2016, 7, 19),\n min_value=datetime.date(1979, 1, 1),\n max_value=datetime.date.today() - datetime.timedelta(days=2))\n\n # Input data time\n data_time = st.sidebar.selectbox('选择时刻(UTC):', ('00', '06', '12', '18'), index=2)\n\n # construct datetime string\n datetime_str = data_date.strftime('%Y%m%d') + data_time\n date_obj = datetime.datetime.strptime(datetime_str,'%Y%m%d%H')\n\n # subset data\n map_regions = get_map_regions()\n select_items = list(map_regions.keys())\n select_items.append('自定义')\n select_item = st.sidebar.selectbox('选择空间范围:', select_items)\n if select_item == '自定义':\n map_region_str = st.sidebar.text_input('输入空间范围[West, East, South, North]:', '70, 140, 10, 65')\n map_region = list(map(float, map_region_str.split(\", \")))\n else:\n map_region = map_regions[select_item]\n\n # draw the figures\n if st.sidebar.button('绘制天气图'):\n # load data\n st.info('载入CFSR数据, http://thredds.atmos.albany.edu:8080/thredds/dodsC/ (taking 30s)')\n data = load_variables(date_obj, map_region=map_region)\n \n # draw the synoptic compsite\n fig = draw_maps.draw_composite_map(\n date_obj, data['t850'], data['u200'], data['v200'], data['u500'], \n data['v500'], data['mslp'], data['gh500'], data['u850'], data['v850'], data['pwat'])\n state.img1 = fig\n\n # draw weather analysis maps\n manager = Manager()\n return_dict = manager.dict()\n p = Process(target=draw_maps.draw_weather_analysis, args=(date_obj, data, map_region, return_dict))\n p.start()\n p.join()\n if return_dict[0] is not None:\n state.img2 = return_dict[0]\n else:\n st.info('制作各层天气图失效!')\n state.img2 = None\n return_dict.clear()\n manager.shutdown()\n p.close()\n del data\n\n # load observation data\n obs_data = cmadaas_obs_by_time(\n date_obj.strftime('%Y%m%d000000'), data_code=\"SURF_CHN_MUL_DAY\", sta_levels=\"011,012,013\",\n elements=\"Station_Id_C,Lat,Lon,Alti,TEM_Max,TEM_Min,VIS_Min,PRE_Time_0808,WIN_S_Max\")\n if obs_data is not None:\n state.img3 = draw_maps.draw_observation(obs_data, date_obj, map_region)\n else:\n state.img3 = None\n\n if state.img1 is None or state.img2 is None:\n st.info('请点击左侧**绘制天气图**按钮生成或更新图像.')\n else:\n # display observation\n if state.img3 is not None:\n st.markdown(\n '''\n ------\n ### 选择站点实况观测''')\n options = [key for key in state.img3.keys()]\n option = st.selectbox('', options, 0)\n st.plotly_chart(state.img3[option], use_container_width=False)\n\n # display synoptic composite\n st.markdown(\n '''\n ------\n ### 环流形势综合图''')\n st.pyplot(state.img1)\n\n # display weather analysis maps\n st.markdown(\n '''\n ------\n ### 点击天气图弹出放大''')\n images = np.asarray([*state.img2.values()], dtype=object)\n labels = np.asarray([*state.img2.keys()])\n html = ipyplot.display_image_gallery(images, labels, img_width=200)\n st.markdown(html, unsafe_allow_html=True)\n # options = [key for key in state.img2.keys()]\n #options = st.multiselect(\n # '', options, ['500hPa_height', 'precipitable_water', 'mean_sea_level_pressure'])\n #for option in options:\n # st.image(state.img2[option], use_column_width=True)\n\n st.sidebar.markdown(\n '''\n ------\n [CFSR](https://climatedataguide.ucar.edu/climate-data/climate-forecast-system-reanalysis-cfsr)(CLIMATE FORECAST SYSTEM REANALYSIS)\n 再分析资料是NCEP提供的第三代再分析产品. 本程序从Albany大学Thredds服务器检索指定日期时刻及范围的CFSR数据(从1979年1月1日以来每日4次, 全球范围), \n 并绘制高空环流形势天气图. \n ''')\n\n\ndef read_variable(varname, date_obj):\n \"\"\"\n read varaible from load or remote thredds server.\n \"\"\"\n # To make parsing the date easier, convert it into a datetime object\n # and get it into various formats\n yyyy = date_obj.year\n\n filepath_html = \"http://10.28.49.118:8080/thredds/dodsC/CFSR/%s/%s.%s.0p5.anl.nc.html\"%(yyyy,'%s',yyyy)\n filepath_local = \"http://10.28.49.118:8080/thredds/dodsC/CFSR/%s/%s.%s.0p5.anl.nc\"%(yyyy,'%s',yyyy)\n filepath_remote = \"http://thredds.atmos.albany.edu:8080/thredds/dodsC/CFSR/%s/%s.%s.0p5.anl.nc\"%(yyyy,'%s',yyyy)\n\n result = requests.get(filepath_html%(varname))\n if result.status_code == 200:\n data = xr.open_dataset(filepath_local%(varname))\n else:\n data = xr.open_dataset(filepath_remote%(varname))\n data = data.sel(time=date_obj)\n\n return data\n\n\ndef load_variables(date_obj, map_region=[50, 160, 6, 60]):\n \"\"\"\n Load the variables from UAlbany's opendap server\n\n Args:\n date_obj (datetime): a datetime object\n \"\"\"\n\n # construct sub region\n sub_region = {'lon':slice(map_region[0], map_region[1]),\n 'lat':slice(map_region[2], map_region[3])}\n\n # Subset and load data\n subdata = {}\n\n # wind field\n my_bar = st.progress(0)\n data = read_variable('u', date_obj)\n subdata['u200'] = data['u'].sel(lev=200, **sub_region).load() ; my_bar.progress(4)\n subdata['u500'] = data['u'].sel(lev=500, **sub_region).load() ; my_bar.progress(8)\n subdata['u700'] = data['u'].sel(lev=700, **sub_region).load() ; my_bar.progress(12)\n subdata['u850'] = data['u'].sel(lev=850, **sub_region).load() ; my_bar.progress(16)\n subdata['u925'] = data['u'].sel(lev=925, **sub_region).load() ; my_bar.progress(20)\n data = read_variable('v', date_obj)\n subdata['v200'] = data['v'].sel(lev=200, **sub_region).load() ; my_bar.progress(24)\n subdata['v500'] = data['v'].sel(lev=500, **sub_region).load() ; my_bar.progress(28)\n subdata['v700'] = data['v'].sel(lev=700, **sub_region).load() ; my_bar.progress(32)\n subdata['v850'] = data['v'].sel(lev=850, **sub_region).load() ; my_bar.progress(36)\n subdata['v925'] = data['v'].sel(lev=925, **sub_region).load() ; my_bar.progress(40)\n\n # vertical velocity field\n data = read_variable('w', date_obj)\n subdata['w700'] = data['w'].sel(lev=700, **sub_region).load() ; my_bar.progress(45)\n\n # pressure on pv surface\n data = read_variable('pres_pv', date_obj)\n subdata['pres_pv2'] = data['pres_pv'].sel(lev=2.0E-6, **sub_region).load() ; my_bar.progress(50)\n subdata['pres_pv2'].metpy.convert_units('hPa')\n\n # geopotential height\n data = read_variable('g', date_obj)\n subdata['gh200'] = data['g'].sel(lev=200, **sub_region).load() ; my_bar.progress(55)\n subdata['gh500'] = data['g'].sel(lev=500, **sub_region).load() ; my_bar.progress(60)\n subdata['gh700'] = data['g'].sel(lev=700, **sub_region).load() ; my_bar.progress(62)\n\n # high temperature\n data = read_variable('t', date_obj)\n subdata['t500'] = data['t'].sel(lev=500, **sub_region).load() ; my_bar.progress(64)\n subdata['t700'] = data['t'].sel(lev=700, **sub_region).load() ; my_bar.progress(66)\n subdata['t850'] = data['t'].sel(lev=850, **sub_region).load() ; my_bar.progress(68)\n subdata['t925'] = data['t'].sel(lev=925, **sub_region).load() ; my_bar.progress(70)\n subdata['t500'].metpy.convert_units('degC')\n subdata['t700'].metpy.convert_units('degC')\n subdata['t850'].metpy.convert_units('degC')\n subdata['t925'].metpy.convert_units('degC')\n \n # high moisture field\n data = read_variable('q', date_obj)\n subdata['q700'] = data['q'].sel(lev=700, **sub_region).load() ; my_bar.progress(75)\n subdata['q850'] = data['q'].sel(lev=850, **sub_region).load() ; my_bar.progress(80)\n subdata['q925'] = data['q'].sel(lev=925, **sub_region).load() ; my_bar.progress(85)\n\n # mean sea level pressure\n data = read_variable('pmsl', date_obj)\n subdata['mslp'] = data['pmsl'].sel(**sub_region).load() ; my_bar.progress(90)\n subdata['mslp'].metpy.convert_units('hPa')\n\n # precipitable water\n data = read_variable('pwat', date_obj)\n subdata['pwat'] = data['pwat'].sel(**sub_region).load() ; my_bar.progress(100)\n subdata['pwat'].metpy.convert_units('mm')\n\n # surface temperature, 2018-, this data is wrong.\n #data = read_variable('tsfc', date_obj)\n #subdata['tsfc'] = data['tsfc'].sel(**sub_region).load() ; my_bar.progress(100)\n #subdata['tsfc'].metpy.convert_units('degC')\n\n return subdata\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"apps/reanalysis_maps/back/app_20210702.py","file_name":"app_20210702.py","file_ext":"py","file_size_in_byte":10032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"118929951","text":"# -*- coding: utf-8 -*-\n#\nfrom project import app\nfrom flask import abort, request, make_response\nfrom ..models import session\nfrom ..models.Form import Form\nfrom ..models.Input import Input\nfrom sqlalchemy import *\nfrom sqlalchemy.exc import ProgrammingError\nimport urllib.parse\nimport json\n\n\ndef getTrackSqlConnection(forcedSqlConn = None):\n\tif forcedSqlConn != None:\n\t\ttrackSqlConnexion = forcedSqlConn\n\telse:\n\t\twith open (\"project/config/config.json\", \"r\") as myfile:\n\t\t\tdata = json.loads(myfile.read())\n\n\t\ttrackSqlConnexion = data[\"sql\"][\"urlTrack\"] if 'sql' in data and 'urlTrack' in data['sql'] else abort(make_response('config.json file has no url referencing a Referential Track Database !', 400))\n\t\ttrackSqlConnexion = urllib.parse.quote_plus(trackSqlConnexion)\n\t\ttrackSqlConnexion = \"mssql+pyodbc:///?odbc_connect=%s\" % trackSqlConnexion\n\n\t#print(\"trackSqlConnexion = \" + trackSqlConnexion)\n\ttry:\n\t\ttrackEngine = create_engine(trackSqlConnexion)\n\texcept ProgrammingError:\n\t\tabort(make_response('Could not open database !', 400))\n\telse:\n\t\treturn trackEngine\n\tfinally:\n\t\tpass\n\n\treturn None\n\n@app.route('/Track/getData', methods = ['POST'])\ndef getData():\n\tif 'datas' in request.json:\n\t\ttoret = {}\n\t\ttrackEngine = getTrackSqlConnection()\n\n\t\tif (trackEngine != None):\n\t\t\tfor key, value in request.json[\"datas\"].items():\n\t\t\t\ttoret[key] = {}\n\t\t\t\ttable, column = value.split(\":\")\n\t\t\t\tresult = trackEngine.execute(\"SELECT DISTINCT [\"+table[:4]+\"_\"+column+\"] FROM [\"+table+\"]\")\n\t\t\t\ttry:\n\t\t\t\t\tfor row in result:\n\t\t\t\t\t\ttoret[key][row[0]] = row[0]\n\t\t\t\texcept:\n\t\t\t\t\tprint('iteration stopped')\n\t\t\treturn json.dumps(toret, ensure_ascii=False)\n\telse:\n\t\tabort(make_response('No datas given !', 400))\n\n@app.route('/unities//', methods = ['GET'])\n@app.route('/getTrackUnities/', methods = ['GET'])\ndef getTrackUnities(lang, context):\n\ttoret = {}\n\ttrackEngine = getTrackSqlConnection()\n\n\tif (trackEngine != None):\n\t\ttoret[\"unities\"] = []\n\t\tresult = trackEngine.execute(\"SELECT DISTINCT [TUni_Label\"+lang+\"] FROM [TUnite] ORDER BY [TUni_Label\"+lang+\"]\")\n\t\tfor row in result:\n\t\t\ttoret[\"unities\"].append(row[0])\n\t\treturn json.dumps(toret, ensure_ascii=False)\n\n@app.route('/TrackTypes/', methods = ['GET'])\ndef getTrackTypes(lang):\n\ttoret = {}\n\ttrackEngine = getTrackSqlConnection()\n\n\tif (trackEngine != None):\n\t\ttoret[\"types\"] = []\n\t\tresult = trackEngine.execute(\"SELECT DISTINCT [TTyp_Nom_Label\"+lang+\"] FROM [TType] ORDER BY [TTyp_Nom_Label\"+lang+\"]\")\n\t\tfor row in result:\n\t\t\ttoret[\"types\"].append(row[0])\n\t\treturn json.dumps(toret, ensure_ascii=False)\n\n\n@app.route('/Track/FormWeightWFBID/', methods = ['GET'])\ndef getTrackFormWeightWFBID(formbuilderID):\n\tmyForm = session.query(Form).filter_by(pk_Form = formbuilderID).first()\n\tif myForm != None:\n\t\treturn (getTrackFormWeight(myForm.originalID))\n\telse:\n\t\tabort(make_response('The form with ID ' + formbuilderID + ' could not be found !', 400))\n\n@app.route('/Track/FormWeight/', methods = ['GET'])\ndef getTrackFormWeight(originalID):\n\ttoret = {}\n\n\twith open (\"project/config/config.json\", \"r\") as myfile:\n\t\tdata = json.loads(myfile.read())\n\n\ttrackDatabases = data[\"refsDB\"][\"track\"] if 'refsDB' in data and 'track' in data['refsDB'] else abort(make_response('config.json file is incomplete and not referencing any Track Databases !', 400))\n\n\ttoret[\"FormWeight\"] = {}\n\tfor itemDB in trackDatabases:\n\t\ttrackSqlConnexion = \"DRIVER={SQL Server};Server=TRACK\\CORE;Database=\" + itemDB + \";UID=FormBuilderUser;PWD=fbuser42;\"\n\t\ttrackSqlConnexion = urllib.parse.quote_plus(trackSqlConnexion)\n\t\ttrackSqlConnexion = \"mssql+pyodbc:///?odbc_connect=%s\" % trackSqlConnexion\n\n\t\ttrackEngine = getTrackSqlConnection(trackSqlConnexion)\n\n\t\tif (trackEngine != None):\n\t\t\ttry:\n\t\t\t\tresult = trackEngine.execute(\"SELECT count(*) FROM [TSaisie] WHERE TSai_FK_TPro_ID = \" + str(originalID))\n\t\t\texcept ProgrammingError:\n\t\t\t\tabort(make_response('Could not open database ' + itemDB + ' ! You might not have the proper rights ? Or the Database is missreferenced ?', 400))\n\n\t\t\ttoret[\"FormWeight\"][itemDB] = result.fetchone()[0]\n\n\treturn json.dumps(toret, ensure_ascii=False)\n\n@app.route('/Track/InputWeightWFBID/', methods = ['GET'])\ndef getTrackInputWeightWFBID(formbuilderID):\n\tmyInput = session.query(Input).filter_by(pk_Input = formbuilderID).first()\n\tif myInput != None:\n\t\treturn (getTrackInputWeight(myInput.originalID))\n\telse:\n\t\tabort(make_response('The input with ID ' + formbuilderID + ' could not be found !', 400))\n\n@app.route('/Track/InputWeight/', methods = ['GET'])\ndef getTrackInputWeight(originalID):\n\ttoret = {}\n\n\twith open (\"project/config/config.json\", \"r\") as myfile:\n\t\tdata = json.loads(myfile.read())\n\n\ttrackDatabases = data[\"refsDB\"][\"track\"] if 'refsDB' in data and 'track' in data['refsDB'] else abort(make_response('config.json file is incomplete and not referencing any Track Databases !', 400))\n\n\ttoret[\"InputWeight\"] = {}\n\tfor itemDB in trackDatabases:\n\t\ttrackSqlConnexion = \"DRIVER={SQL Server};Server=TRACK\\CORE;Database=\" + itemDB + \";UID=FormBuilderUser;PWD=fbuser42;\"\n\t\ttrackSqlConnexion = urllib.parse.quote_plus(trackSqlConnexion)\n\t\ttrackSqlConnexion = \"mssql+pyodbc:///?odbc_connect=%s\" % trackSqlConnexion\n\n\t\ttrackEngine = getTrackSqlConnection(trackSqlConnexion)\n\n\t\tif (trackEngine != None):\n\t\t\ttry:\n\t\t\t\ttotresult = 0\n\n\t\t\t\ttotresult = trackEngine.execute(\"SELECT count(*) from [TDChar] where TDCh_FK_TObs_ID = \" + str(originalID)).fetchone()[0]\n\t\t\t\t\n\t\t\t\tif (totresult == 0):\n\t\t\t\t\ttotresult = trackEngine.execute(\"SELECT count(*) from [TDEntier] where TDEn_FK_TObs_ID = \" + str(originalID)).fetchone()[0]\n\n\t\t\t\tif (totresult == 0):\n\t\t\t\t\ttotresult = trackEngine.execute(\"SELECT count(*) from [TDDate] where TDDa_FK_TObs_ID = \" + str(originalID)).fetchone()[0]\n\t\t\t\t\n\t\t\t\tif (totresult == 0):\n\t\t\t\t\ttotresult = trackEngine.execute(\"SELECT count(*) from [TDReel] where TDRe_FK_TObs_ID = \" + str(originalID)).fetchone()[0]\n\t\t\t\n\t\t\texcept ProgrammingError:\n\t\t\t\tabort(make_response('Could not open database ' + itemDB + ' ! You might not have the proper rights ? Or the Database is missreferenced ?', 400))\n\n\t\t\ttoret[\"InputWeight\"][itemDB] = totresult\n\n\treturn json.dumps(toret, ensure_ascii=False)\n","sub_path":"project/controllers/TrackController.py","file_name":"TrackController.py","file_ext":"py","file_size_in_byte":6261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"181942633","text":"\r\nfrom copy import deepcopy\r\nfrom itertools import product\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\n\r\ndef make_unit_list():\r\n '''\r\n Makes a list of dataframe of the SMILES of different types of building block units\r\n Contains: smi_filename, smiles, sTDDFTxtb_HOMO, calibrated B3LYP_HOMO, A_or_D, fused_ring_count\r\n\r\n Returns\r\n -------\r\n units: list\r\n list of dataframes containing SMILES, filenames, HOMO, A_or_D, and fused ring count\r\n [left_terminals, fused_cores, right_terminals]\r\n '''\r\n\r\n cores = pd.read_csv('../../building_blocks/cores_with_ringcount.csv', index_col=0)\r\n left_terminals = pd.read_csv('../../building_blocks/left_terminals_with_ringcount.csv', index_col=0)\r\n right_terminals = pd.read_csv('../../building_blocks/right_terminals_with_ringcount.csv', index_col=0)\r\n\r\n # cores with 4 or more fused rings\r\n fused_cores = cores[cores['fused_ring_count'] > 3]\r\n # drops the old index\r\n fused_cores = fused_cores.reset_index(drop=True)\r\n\r\n # list of dataframes containing SMILES for terminal and fused cores\r\n units = [left_terminals, fused_cores, right_terminals]\r\n\r\n return units\r\n\r\ndef make_NFA_str(temp_NFA, unit_list):\r\n '''\r\n Makes the SMILES of the new molecule from the indices selected\r\n\r\n Parameters\r\n -----------\r\n temp_NFA: list\r\n list of indices in order of: [left terminal, core, right terminal]\r\n unit_list: list\r\n list of dataframes containing SMILES of each unit. [left_terminals, fused_cores, right_terminals]\r\n \r\n Returns\r\n ----------\r\n SMILES: str\r\n SMILES string of the new molecule\r\n '''\r\n\r\n left_term = temp_NFA[0]\r\n core = temp_NFA[1]\r\n right_term = temp_NFA[2]\r\n SMILES = unit_list[0].iloc[left_term][1] + unit_list[1].iloc[core][1] + unit_list[2].iloc[right_term][1]\r\n\r\n return SMILES\r\n\r\ndef make_filename(NFA):\r\n '''\r\n Makes a string of the indices for use as a file name\r\n\r\n Parameters\r\n ----------\r\n NFA: list\r\n list of indices: [term, core, term_R]\r\n\r\n Returns\r\n --------\r\n file_name: str\r\n filename in the formant: LT_C_RT \r\n where LT is index of left terminal, C is index of core, and RT is indiex of right terminal\r\n '''\r\n\r\n # capture building block indexes as strings for file naming\r\n left_term = str(NFA[0])\r\n core = str(NFA[1])\r\n right_term = str(NFA[2])\r\n\r\n # make file name string for\r\n file_name = '%s_%s_%s' % (left_term, core, right_term)\r\n\r\n return file_name","sub_path":"GA/GA_1/old_rf_ann_model/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"249330167","text":"import os\nimport cv2\nimport torch\nimport numpy as np\nfrom PIL import Image\nfrom utils.config import CONFIG\nfrom torchvision import transforms\n\ndef img_transform():\n data_transform = transforms.Compose([\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n return data_transform\n\n\ndef model_save(gen, dis, gen_optimizer, dis_optimizer, epoch, max_save=5, verbose=True):\n if (verbose):\n print(\"saving the model in epoch {}\".format(epoch))\n \n writePath = os.path.join(CONFIG['CHECKPOINT_DIR'] + \"model_epoch{}.pth\".format(epoch))\n\n if (len(CONFIG['GPU'])>1):\n torch.save({\n 'epoch': epoch,\n 'gen_state_dict': gen.module.state_dict(),\n 'dis_state_dict': dis.module.state_dict(),\n 'gen_optimizer': gen_optimizer.state_dict(),\n 'dis_optimizer': dis_optimizer.state_dict()\n }, writePath)\n\n else:\n torch.save({\n 'epoch': epoch,\n 'gen_state_dict': gen.state_dict(),\n 'dis_state_dict': dis.state_dict(),\n 'gen_optimizer': gen_optimizer.state_dict(),\n 'dis_optimizer': dis_optimizer.state_dict()\n }, writePath)\n\n \n # Check the model and only preserve the last max_save(default:5) models\n flist = os.listdir(CONFIG['CHECKPOINT_DIR'])\n if (len(flist) > max_save):\n for f in flist:\n if (f == 'model_epoch' + str(epoch-max_save*CONFIG['SAVE_LAG']) + '.pth'):\n os.remove(os.path.join(CONFIG['CHECKPOINT_DIR'], f))\n break\n \n\ndef train_log(logs, mode='train', verbose=True):\n \"\"\"\n Write the train log in file\n \"\"\"\n with open(CONFIG['LOG_DIR'] + mode + '_log.txt','a') as f:\n for _, item in enumerate(logs, 0):\n log = \"{}: {:.4f}\\t\".format(item, logs[item])\n f.write(log)\n \n if (verbose):\n print(log, end='')\n \n f.write(\"\\n\")\n \n if (verbose):\n print('\\n', end='')\n \n \n \ndef sample_save(image, ori_mask, ori_edge, fake_output, epoch, verbose=True):\n \"\"\"\n Save a batch to image with format [groud_truth, mask, output_of_gan]\n \"\"\"\n if (verbose):\n print(\"saving a batch of sample for validation in epoch{}\".format(epoch))\n \n for i in range(image.shape[0]):\n # gt = post_process(ground_truth[i,:,:,:])\n img = post_process(image[i,:,:,:])\n mask = post_process(ori_mask[i,:,:,:])\n edge = post_process(ori_edge[i,:,:,:])\n output = post_process(fake_output[i,:,:,:], inverseTrans=False)\n img1 = np.hstack((img, 1-mask/255))\n img2 = np.hstack((img*(mask/255)+(255-edge)*(1-mask/255), output))\n img_combinbe = np.vstack((img1, img2))\n cv2.imwrite(os.path.join(CONFIG['SAMPLE_DIR'], \n 'sample_'+str(epoch)+'_sample'+str(i)+'.jpg'), img_combinbe[:,:,::-1])\n \n \ndef post_process(img, inverseTrans=CONFIG['NORM']):\n \n img = img.to('cpu')\n \n if (inverseTrans):\n mean = torch.tensor([ [[-2.1179]], [[-2.0357]], [[-1.8044]] ])\n std = torch.tensor([ [[0.017124]], [[0.0175070]], [[0.017429]] ])\n img = (img - mean)/std\n \n else:\n img *= 255\n\n torch.clamp(img, 0, 255)\n \n img = img.to(torch.uint8)\n ch, h, w = img.shape\n if (ch == 1):\n img = img.expand(3, h, w)\n \n img = img.numpy()\n img = img.transpose([1,2,0])\n\n return img\n\ndef clear_legacy(clear_model=False):\n # Clear the history log\n flist = os.listdir(CONFIG['LOG_DIR'])\n for f in flist:\n os.remove(CONFIG['LOG_DIR'] + f)\n \n # Clear the sample validation history\n flist = os.listdir(CONFIG['SAMPLE_DIR'])\n for f in flist:\n os.remove(CONFIG['SAMPLE_DIR'] + f)\n \n if(clear_model):\n # Clear history model\n flist = os.listdir(CONFIG['CHECKPOINT_DIR'])\n for f in flist:\n os.remove(CONFIG['CHECKPOINT_DIR'] + f)\n\ndef PSNR(img1, img2):\n \"\"\"\n Caculate PSNR between two tensor (float32), and 3 channel rgb image will be converted to gray\n Note that 255*[0.299*r + 0.587*g + 0.144*b] = 76.245*r + 149.685*g + 36.720*b\n \"\"\"\n assert(img1.shape == img2.shape)\n mse = 0.\n eps = 1e-8\n\n if (img1.shape[0] == 3):\n gray1 = 76.245 * img1[:, 1, :, :] + 149.685 * img1[:, 1, :, :] + 36.720 * img1[:, 2, :, :] \n gray2 = 76.245 * img2[:, 0, :, :] + 149.685 * img2[:, 1, :, :] + 36.720 * img2[:, 2, :, :]\n elif (img1.shape[0] == 1):\n gray1 = img1*255\n gray2 = img2*255\n \n mse = torch.mean((gray1 - gray2)**2)\n psnr = 10 * torch.log10(255.0**2 / (mse + eps))\n return psnr.view(-1)","sub_path":"utils/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"283300283","text":"from __future__ import division\nimport matplotlib.pyplot as plt\nimport math\nfrom math import pi as pi\nimport datetime as dt\nfrom datetime import datetime\nimport statistics\nimport numpy as np\nimport scipy.optimize\nfrom scipy.optimize import leastsq\nsources = ['','variation/temporal/EU','variation/temporal/ASIA','variation/temporal/US']\nlabels = ['All', 'Europe', 'Asia & Aus.', 'North America']\n\nplt.title(\"Mean diurnal shift across each observer location category\")\nplt.ylabel(\"Average detection count\")\nplt.xlabel(\"Time from midnight (hours)\")\nplt.subplots_adjust(top=0.93,bottom=0.12)\nplt.xlim((0,24))\n\nfor src in sources:\n plotTimes = range(0,24)\n plotData = [[] for i in range(0,24)]\n maxs=[]\n mins=[]\n\n with open('/home/cwp/EMC/lib/analysis/'+src+'plotTimes.txt', 'r') as f:\n prelimTime = f.readlines()\n time = []\n for i in range(len(prelimTime)):\n time.append(datetime.strptime(prelimTime[i].split('\\n')[0],\"%Y-%m-%d %H:%M:%S\"))\n\n with open('/home/cwp/EMC/lib/analysis/'+src+'plotData.txt', 'r') as f:\n prelimData = f.readlines()\n data = []\n for i in range(len(prelimData)):\n newData = prelimData[i].split('\\n')[0]\n newData = [int(x) for x in newData.split(',')]\n data.append(newData)\n\n for i in range(len(data)):\n if time[i].hour in plotTimes:\n plotData[plotTimes.index(time[i].hour)].extend(data[i])\n\n for i in range(len(plotTimes)):\n mean = statistics.mean(plotData[i])\n stdev = statistics.stdev(plotData[i])\n error = stdev/math.sqrt(len(plotData[i]))\n plotData[i] = mean\n maxs.append(error)\n\n plotData = np.array(plotData)\n plotTimes = np.array(plotTimes)\n\n np.append(plotTimes, 24)\n plotTimes = np.append(plotTimes, 24)\n plotData = np.insert(plotData, 0, plotData[-1])\n maxs = np.insert(maxs, 0, maxs[-1])\n #mins = np.insert(mins, 0, mins[-1])\n plt.errorbar(plotTimes, plotData, yerr=[maxs, maxs], label=labels[sources.index(src)])\n #plt.plot(plotTimes, plotData, label=\"Original data\")\n\n #yy = [0.1*math.sin(-0.1+int(x)*2*math.pi/24)+0.5 for x in plotTimes]\n guess_mean = np.mean(plotData*2*pi/24)\n guess_std = 3*np.std(plotData*2*pi/24)/(2**0.5)\n guess_phase = 0\n\n optimize_func = lambda x: x[0]*np.sin(plotTimes*2*pi/24+x[1])+x[2]-plotData\n\n result = leastsq(optimize_func, [guess_std,guess_phase,guess_mean], full_output=True)\n\n est_std,est_phase,est_mean = result[0]\n\n s_sq = (result[2]['fvec']**2).sum()/(len(plotData)-len(result[0]))\n\n fit = np.sqrt(np.diagonal(result[1]*s_sq))\n #print(fit)\n\n data_fit = est_std*np.sin(plotTimes*2*pi/24+est_phase)+est_mean\n\n\n #Work out the fit?\n\n #def func(x, a, b, c, d):\n #return (a*x**3)+(b*x**2)+(c*x)+d\n\n #params = scipy.optimize.curve_fit(func, plotTimes, plotData)[0]\n\n #poly_fit = [(params[0]*x**3)+(params[1]*x**2)+(params[2]*x)+params[3] for x in plotTimes]\n\n #print(est_std, est_phase, est_mean)\n \"\"\"\n plt.plot(plotTimes, data_fit, 'r', label=\"Fitted sine curve\")\n #plt.plot(plotTimes, yy, 'k')\n #plt.plot(plotTimes, poly_fit, 'g', label=\"Fitted cubic\")\n plt.legend()\n \"\"\"\n #plt.savefig('/home/cwp/EMC/plots/general/diurnal_shift_fit.png')\n\nplt.legend()\nplt.show()\n#plt.savefig('/home/cwp/EMC/plots/general/all_diurnal_shifts.png',dpi=500)\n","sub_path":"lib/analysis/plotShift.py","file_name":"plotShift.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"116553582","text":"import unittest\n\nfrom soccer import create_app, db\nfrom soccer.model import parse_json, Match, Team, Event, Group\n\n\nclass ParserModelTestCase(unittest.TestCase):\n def setUp(self):\n self.app = create_app('testing')\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n def test_parser_em(self):\n runner = self.app.test_cli_runner()\n result = runner.invoke(parse_json, ['soccer/assets/2018_matches.json'])\n self.assertEqual(result.exit_code, 0)\n self.assertEqual(result.output, 'parsing done\\n')\n matches = Match.query.all()\n self.assertEqual(len(matches), 36)\n teams = Team.query.all()\n self.assertEqual(len(teams), 24)\n events = Event.query.all()\n self.assertEqual(len(events), 1)\n groups = Group.query.all()\n self.assertEqual(len(groups), 6)\n\n def test_parser_bl(self):\n runner = self.app.test_cli_runner()\n result = runner.invoke(parse_json, ['soccer/assets/2002_matches.json'])\n self.assertEqual(result.exit_code, 0)\n self.assertEqual(result.output, 'parsing done\\n')\n","sub_path":"tests/test_model parser.py","file_name":"test_model parser.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"575723148","text":"import copy\nimport json\nfrom app import db\n\nclass Token(object):\n @classmethod\n def load_contract(cls):\n contract_file = './config/%s.json' % cls.__tablename__\n with open(contract_file) as f:\n cls.contract_cfg = json.load(f)\n\n\ndef __eq__(self, other):\n if not other:\n return False\n return (self.circulation, self.deposit) == (other.circulation, other.deposit)\n\nclass_template = {\n 'iden' : db.Column(db.Integer, primary_key=True, autoincrement=True),\n\n # the time when the statistics was executed\n 'timestamp' : db.Column(db.Integer),\n\n # the block height of mvs\n 'heightM' : db.Column(db.Integer),\n\n # the asset quantity in circulation (except the 'crosschain' address) on metaverse chain\n 'circulation' : db.Column(db.BigInteger),#db.Column(db.Numeric(64, 18), nullable=False),\n\n # the block height of ethereum\n 'heightE' : db.Column(db.Integer),\n\n # the token quantity locked by the swap address on ethereum chain\n 'deposit' : db.Column(db.BigInteger),#db.Column(db.Numeric(64, 18), nullable=False),\n\n '__eq__':__eq__\n}\n\ndef init_db():\n tokens = ['EDU', ]\n ret = []\n for token in tokens:\n classdict = {'__tablename__': token }\n classdict.update( copy.deepcopy(class_template) )\n classobj = type(token, (db.Model, Token), classdict)\n classobj.load_contract()\n ret.append(classobj)\n\n # db.drop_all()\n db.create_all()\n return ret\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"300136539","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\nPROJECT_PATH = os.path.abspath(os.path.dirname(__file__))\n\nsys.path.insert(0, os.path.join(PROJECT_PATH, \"apps\"))\nsys.path.insert(0, os.path.join(PROJECT_PATH, \"vendors\"))\nsys.path.insert(0, os.path.join(PROJECT_PATH, \"libs\"))\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = True\n\n\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# URL prefix for admin static files -- CSS, JavaScript and images.\n# Make sure to use a trailing slash.\n# Examples: \"http://foo.com/static/admin/\", \"/static/admin/\".\nADMIN_MEDIA_PREFIX = '/static/admin/'\n\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n # ('django.template.loaders.cached.Loader', (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n # )),\n # 'django.template.loaders.eggs.Loader',\n)\n\n\n# TEMPLATE_LOADERS = (\n# ('django.template.loaders.cached.Loader', (\n# 'django.template.loaders.filesystem.Loader',\n# 'django.template.loaders.app_directories.Loader',\n# )),\n# 'django.template.loaders.eggs.Loader',\n# )\n\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.request',\n 'social_auth.context_processors.social_auth_by_type_backends',\n #'django.contrib.messages.context_processors.messages',\n)\n\n\n\nMIDDLEWARE_CLASSES = (\n 'localeurl.middleware.LocaleURLMiddleware',\n 'oauth2_provider.middleware.OauthSessionMiddleware',\n # 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django_sorting.middleware.SortingMiddleware',\n 'oauth2_provider.middleware.OauthUserMiddleware',\n # 'statistics.middleware.RequestLog',\n # 'debug_toolbar.middleware.DebugToolbarMiddleware',\n)\n\nROOT_URLCONF = 'libcms.urls'\nWSGI_APPLICATION = 'libcms.wsgi.application'\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n 'sso.backend.SSOBackend',\n 'sso_ruslan.backend.RuslanAuthBackend',\n 'oauth2_provider.backend.OauthUserBackend',\n 'guardian.backends.ObjectPermissionBackend',\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'huey.contrib.djhuey',\n # 'django.contrib.admindocs',\n\n # vendor apps\n 'localeurl',\n 'mptt',\n 'guardian',\n 'captcha',\n # 'debug_toolbar',\n 'django_sorting',\n # cms apps\n 'core',\n 'accounts',\n 'profile',\n # 'social_auth',\n 'filebrowser',\n 'menu',\n 'pages',\n 'news',\n 'events',\n 'extsystems',\n 'ldap_sync',\n 'participants',\n 'participant_news',\n 'participant_events',\n 'participant_pages',\n 'participant_site',\n 'participant_menu',\n 'participant_photopolls',\n 'participant_banners',\n 'professionals_pages',\n 'professionals_news',\n 'professionals',\n 'personal',\n 'ruslan_users',\n 'ask_librarian',\n 'ssearch',\n 'statistics',\n 'rbooks',\n 'forum',\n # 'urt',\n 'orders',\n 'polls',\n 'ruslan_cabinet',\n 'external_orders',\n 'zgate',\n 'mydocs',\n 'guestbook',\n 'newinlib',\n 'attacher',\n 'oauth2_provider',\n 'sso',\n 'sso_ruslan',\n 'sso_esia',\n 'sso_tatedu',\n 'subscribe',\n 'recommended_reading',\n 'bootstrap3',\n)\n\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n\n\n# префикс для системы кеширования\nKEY_PREFIX = 'libcms'\n\n# guardian settings\nANONYMOUS_USER_ID = -1\n\nLOGIN_REDIRECT_URL = \"/\"\n\nDEBUG_TOOLBAR_PANELS = [\n # 'debug_toolbar.panels.versions.VersionsPanel',\n # 'debug_toolbar.panels.timer.TimerPanel',\n # 'debug_toolbar.panels.settings.SettingsPanel',\n # 'debug_toolbar.panels.headers.HeadersPanel',\n # 'debug_toolbar.panels.request.RequestPanel',\n 'debug_toolbar.panels.sql.SQLPanel',\n # 'debug_toolbar.panels.staticfiles.StaticFilesPanel',\n # 'debug_toolbar.panels.templates.TemplatesPanel',\n # 'debug_toolbar.panels.cache.CachePanel',\n # 'debug_toolbar.panels.signals.SignalsPanel',\n # 'debug_toolbar.panels.logging.LoggingPanel',\n # 'debug_toolbar.panels.redirects.RedirectsPanel',\n]\n\nLOCALE_INDEPENDENT_PATHS = (\n r'^/esia_sso/redirect',\n r'^/statistics/api/',\n r'^/participants/api/',\n r'^/robots.txt/',\n)\n\nPARTICIPANTS_SHOW_ORG_TYPES = ['library', 'school']\n\nLOCALE_PATHS = (\n os.path.join(PROJECT_PATH, 'locale/'),\n)\n\nfrom local_settings import *\n\n","sub_path":"libcms/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"579150799","text":"__author__ = 'markadm'\nimport json\n\ndef equal_items(left, right):\n\n if left == None and right == None:\n return True\n\n if isinstance(right, basestring):\n jsonList = '{\"list\":' + right.replace(\"'\", '\"') + '}'\n\n try:\n right = json.loads(jsonList)['list']\n except:\n print('Could not read the \"expected value\"')\n\n if not isinstance(left, list) or not isinstance(right, list):\n return False\n\n return sorted(left) == sorted(right)\n","sub_path":"extractors/equalItemsComparator.py","file_name":"equalItemsComparator.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"526137126","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nimport time\nimport random\nimport hashlib\nfrom quotetutorial.items import ScrapyItem\nimport requests\nimport os\n\n\n# 国家知识产权局\nclass SipoSpider(scrapy.Spider):\n index = 25\n name = 'sipo'\n allowed_domains = ['www.sipo.gov.cn']\n start_urls = [\n 'http://www.sipo.gov.cn/gztz/index.htm',\n 'http://www.sipo.gov.cn/zfgg/index.htm',\n 'http://www.sipo.gov.cn/szywn/index.htm',\n 'http://www.sipo.gov.cn/dtxx/index.htm',\n 'http://www.sipo.gov.cn/mtsd/index.htm'\n ]\n category_index = {'gztz': '1', 'zfgg': '2', 'szywn': '3', 'dtxx': '4', 'mtsd': '5'}\n category_desc = {'gztz': '通知公告-工作通知', 'zfgg': '通知公告-政府公告', 'szywn': '时政要闻', 'dtxx': '地方动态', 'mtsd': '媒体视点' }\n url_descs = ['通知公告-工作通知', '通知公告-政府公告','时政要闻','地方动态','媒体视点' ]\n base_url = 'http://www.sipo.gov.cn'\n\n def parse(self, response):\n print(\"**********\", response.url)\n if response.status == 200:\n id_prefix = ''\n cate = ''\n if response.url in self.start_urls:\n cate_index = self.start_urls.index(response.url)\n id_prefix = str(self.index) + '-' + str(cate_index + 1)\n cate = self.url_descs[cate_index]\n\n fenye_num = response.css('form[name=\"pageForm\"] select option:last-child ::text').extract_first()\n if fenye_num:\n for page in range(1, int(fenye_num)):\n url = response.url.replace('index.htm', 'index%s.htm' % page)\n yield scrapy.Request(url, meta={'id_prefix': id_prefix, 'category': cate}, callback=self.parse)\n else:\n id_prefix = response.meta['id_prefix']\n cate = response.meta['category']\n info_list = response.css('div.index_articl_list ul > li')\n for info in info_list:\n date = info.css('span::text').extract_first().strip()\n title = info.css('a::attr(\"title\")').extract_first().strip()\n href = response.url[0:response.url.rfind('/') + 1] + info.css('a::attr(\"href\")').extract_first()\n\n yield scrapy.Request(href, meta={'id_prefix': id_prefix,\n 'category': cate,\n 'title': title, 'date': date}, callback=self.parse_content)\n time.sleep(random.randint(1, 6))\n\n def parse_content(self, response):\n print(\"############\", response.url)\n if response.status == 200:\n md5 = hashlib.md5()\n md5.update(response.url.encode(encoding='utf-8'))\n item = ScrapyItem()\n id_prefix = response.meta['id_prefix']\n item['id'] = id_prefix + \"-\" + md5.hexdigest()\n category = response.meta['category']\n item['category'] = category\n item['title'] = response.meta['title']\n item['published_date'] = response.meta['date']\n item['source'] = ''\n details = response.css('div.index_art_con p::text').extract()\n if details:\n dd = ''.join(details)\n item['content'] = dd.replace(u'\\u3000', '').replace(u' ', '').replace(u'\\t', '').replace(u'\\xa0',\n '').replace(\n u'-->', '').replace(u';', '').replace(u'\\n', '').strip()\n item['view_count'] = '0'\n item['url'] = response.url\n attach_path_arra = []\n attach_arra = []\n atta_arra = response.css('a[href*=\".xls\"]') + response.css('a[href*=\".doc\"]') + response.css(\n 'a[href*=\".pdf\"]') + response.css('a[href*=\".zip\"]') + response.css('a[href*=\".rar\"]')\n for attch in atta_arra:\n save_path = ''\n href_attch = attch.css('::attr(\"href\")').extract_first()\n if href_attch.startswith('http'):\n attch_url = href_attch\n else:\n attch_url = self.base_url + href_attch[href_attch.find('/'):]\n attach_path_arra.append(attch_url)\n attch_name = attch.css('::text').extract_first()\n if not attch_name:\n attch_name = attch_url[attch_url.rfind(\"/\") + 1:]\n attach_arra.append(attch_name)\n if attch_name.rfind('.') == -1:\n save_path = save_path + attch_name + attch_url[attch_url.rfind('.'):]\n else:\n save_path = save_path + attch_name\n self.download_file(attch_url, save_path)\n item['attchment_path'] = ','.join(attach_path_arra)\n item['attchment'] = ','.join(attach_arra)\n # print(item)\n yield item\n\n def download_file(self, url, local_path):\n if os.path.exists(local_path):\n print(\"the %s exist\" % local_path)\n else:\n print(\"start download the file\", url)\n r = requests.get(url)\n print('down loading status ', r.status_code)\n with open(local_path, \"wb\") as code:\n code.write(r.content)\n","sub_path":"quotetutorial/quotetutorial/spiders/sipo.py","file_name":"sipo.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"528061474","text":"import os\nimport cv2 as cv\nimport numpy as np\n\nos.chdir('16-contours\\\\images')\nimg_color = cv.imread('test.jpg', cv.IMREAD_COLOR)\nimg_gray = cv.cvtColor(img_color, cv.COLOR_BGR2GRAY)\nret, img_binary = cv.threshold(img_gray, 150, 255, cv.THRESH_BINARY_INV)\nkernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5))\nimg_binary = cv.morphologyEx(img_binary, cv.MORPH_CLOSE, kernel)\n\ncontours, hierarchy = cv.findContours(img_binary, cv.RETR_LIST, \n cv.CHAIN_APPROX_SIMPLE)\n\ncv.drawContours(img_color, contours, 0, (0,0,255), 2)\ncv.drawContours(img_color, contours, 1, (0,255,0), 2)\n\nfor contour in contours :\n # 물체 회전을 고려하지 않은\n x,y, w,h = cv.boundingRect(contour)\n print(type(cv.boundingRect(contour))) # \n print(cv.boundingRect(contour)) # (66, 102, 276, 357)\n print('x: ',x, 'y : ', y, 'w : ',w, 'h : ', h) # x: 66 y : 102 w : 276 h : 357\n cv.rectangle(img_color, (x,y), (x+w,y+h), (255,255,0), 2)\n\n # 물체의 회전을 고려함 \n rect = cv.minAreaRect(contour)\n print(type(rect)) # \n print(rect)\n # ((160.07183837890625, 293.6353759765625), (410.744384765625, 117.58914947509766), 48.012786865234375)\n\n box = cv.boxPoints(rect)\n print('box1 : ',box)\n # [[-21.016571 180.31491 ]\n # [ 66.38675 101.65193 ]\n # [341.16025 406.95584 ]\n # [253.75693 485.61884 ]]\n\n box = np.int0(box)\n # [[-21 180]\n # [ 66 101]\n # [341 406]\n # [253 485]]\n print('box2 : ',box)\n cv.drawContours(img_color, [box], 0, (255,0,255), 2)\n\ncv.imshow('result', img_color)\ncv.waitKey(0)\n\ncv.destroyAllWindows()","sub_path":"OpenCV/16-contours/106.externalRect.py","file_name":"106.externalRect.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"406435632","text":"'''\nGiven an array arr[] of n integers, construct a Product Array prod[] (of same size)\nsuch that prod[i] is equal to the product of all the elements of arr[] except arr[i] without using division.\nExample: arr[] = {10, 3, 5, 6, 2}. So , prod[] = {180, 600, 360, 300, 900}\n'''\ndef product_array_puzzle(array):\n runningProduct = 1\n size = len(array)\n result = [1 for x in range(0,size)]\n for i in range(0,size):\n result[i] = runningProduct\n runningProduct *= array[i]\n runningProduct = 1\n for i in range(size-1,-1,-1):\n result[i] *= runningProduct\n runningProduct *= array[i]\n return result\n\n\n\n#Testing\narrays = [[2,1,6,3],[10, 3, 5, 6, 2], [1,3,4,6,2]]\nfor array in arrays:\n print(product_array_puzzle(array))\n","sub_path":"product_array_puzzle.py","file_name":"product_array_puzzle.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"379389181","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\n\nregex = re.compile(r'\\(.*\\(.*\\).*\\)')\n\nwith open('numerical.f90','r') as inFile:\n with open('OutputLineNumbers', 'w') as outFile:\n\n # loop each line of input\n for line_i, line in enumerate(inFile, 1):\n\n # check if regex matched\n if regex.search( line ):\n # if so, write it the output file, with inFile line no.\n outFile.write( \"%d| %s\" % (line_i, line) )\n","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"224617107","text":"#!/usr/bin/python3 -O\n\n\"\"\"\n\nTwitter Data Sentiment Analyzer Neural Network Model for Text Classification.\n\nOwner : paul-tqh-nguyen\n\nCreated : 10/30/2019\n\nFile Name : models.py\n\nFile Organization:\n* Imports\n* Misc Utilities\n* Models\n* Data Loading Utilities\n* Classifier Classes\n* Main Runner\n\n\"\"\"\n\n###########\n# Imports #\n###########\n\n# @todo verify that these are all used\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, TensorDataset\nimport pandas as pd\nimport sklearn.utils\nfrom tqdm import tqdm\nimport warnings\nimport os\nimport csv\nimport math\nimport statistics\nimport time\nfrom contextlib import contextmanager\nfrom string import punctuation\nfrom functools import reduce\nfrom collections import OrderedDict\nfrom typing import List\n\n##################\n# Misc Utilities #\n##################\n\n@contextmanager\ndef timer(section_name=None, exitCallback=None):\n start_time = time.time()\n yield\n end_time = time.time()\n elapsed_time = end_time - start_time\n if exitCallback != None:\n exitCallback(elapsed_time)\n elif section_name:\n pass\n #print('Execution of \"{section_name}\" took {elapsed_time} seconds.'.format(section_name=section_name, elapsed_time=elapsed_time))\n else:\n print('Execution took {elapsed_time} seconds.'.format(elapsed_time=elapsed_time))\n\n\n##########\n# Models #\n##########\n\nclass SentimentAnalysisNetwork(nn.Module):\n def __init__(self, vocabulary_size,\n embedding_size=400,\n lstm_hidden_size=300,\n number_of_lstm_layers=2,\n drop_out_probability=0.2) -> None:\n super().__init__()\n # @todo make this not have to be batch_first\n \n self.number_of_lstm_layers=number_of_lstm_layers\n self.lstm_hidden_size=lstm_hidden_size\n #Embedding and LSTM layers\n self.embedding=nn.Embedding(vocabulary_size, embedding_size)\n self.lstm=nn.LSTM(embedding_size, lstm_hidden_size, number_of_lstm_layers, dropout=drop_out_probability, batch_first=True)\n #dropout layer\n self.dropout=nn.Dropout(0.3) # @todo do we need this?\n #Linear and sigmoid layer\n self.fc=nn.Linear(lstm_hidden_size, 1)\n self.sigmoid=nn.Sigmoid()\n\n def forward(self, x): # @todo rename this input ; add types\n batch_size=x.size()\n #Embadding and LSTM output\n embedd=self.embedding(x)\n lstm_out, _ = self.lstm(embedd)\n #stack up the lstm output\n lstm_out=lstm_out.contiguous().view(-1, self.lstm_hidden_size)\n #dropout and fully connected layers\n out=self.dropout(lstm_out)\n out=self.fc(out)\n sig_out=self.sigmoid(out)\n sig_out=sig_out.view(batch_size)\n sig_out=sig_out[:, -1] # takes just the last state?\n return sig_out\n\n##########################\n# Data Loading Utilities #\n##########################\n\ndef normalize_string(input_string: str) -> str:\n normalized_string = input_string\n normalized_string = ''.join(filter(lambda character: character not in punctuation, normalized_string))\n normalized_string = ' '.join(filter(lambda word: word is not '', normalized_string.split()))\n normalized_string = normalized_string.lower()\n return normalized_string\n\nCURRENT_FILE_PATH = os.path.abspath(os.path.dirname(__file__))\nTEXT_DATA_LOCATION = os.path.join(CURRENT_FILE_PATH, \"./data/reviews.txt\")\nLABEL_DATA_LOCATION = os.path.join(CURRENT_FILE_PATH, \"./data/labels.txt\")\n\ndef sentiment_to_id(sentiment_label: str) -> int:\n id = 0 if sentiment_label == \"negative\" else 1 if sentiment_label == \"positive\" else None\n assert id is not None, \"Input data not handled correctly.\"\n return id\n\nTRAINING_DATA_SENTIMENT_COLUMN_NAME = \"label\"\nTRAINING_DATA_TEXT_COLUMN_NAME = \"text\"\n\ndef get_normalized_training_dataframe() -> pd.DataFrame:\n global TRAINING_DATA_LOCATION\n texts_dataframe = pd.read_csv(TEXT_DATA_LOCATION, names=[TRAINING_DATA_TEXT_COLUMN_NAME], quoting=csv.QUOTE_NONE)\n texts_series = texts_dataframe[TRAINING_DATA_TEXT_COLUMN_NAME]\n tqdm.pandas(desc=\"Normalizing texts\")\n normalized_texts_series = texts_series.progress_apply(normalize_string)\n tqdm.pandas(desc=\"Loading labels\")\n labels_dataframe = pd.read_csv(LABEL_DATA_LOCATION, names=[TRAINING_DATA_SENTIMENT_COLUMN_NAME], quoting=csv.QUOTE_NONE)\n labels_series = labels_dataframe[TRAINING_DATA_SENTIMENT_COLUMN_NAME]\n label_ids_series = labels_series.progress_apply(sentiment_to_id)\n training_dataframe = pd.DataFrame({\n TRAINING_DATA_TEXT_COLUMN_NAME: normalized_texts_series,\n TRAINING_DATA_SENTIMENT_COLUMN_NAME: label_ids_series,\n })\n return training_dataframe\n\nUNKNOWN_WORD_ID = 0\nPADDING_TOKEN_ID = 1\nSPECIAL_TOKENS = [UNKNOWN_WORD_ID, PADDING_TOKEN_ID]\n\ndef vocabulary_from_training_dataframe(training_dataframe: pd.DataFrame): # @todo add return types\n global UNKNOWN_WORD_ID\n global SPECIAL_TOKENS\n texts_series = training_dataframe[TRAINING_DATA_TEXT_COLUMN_NAME]\n all_text = \" \".join(texts_series)\n all_words = all_text.split()\n all_words_progress_bar = tqdm(all_words)\n all_words_progress_bar.set_description(\"Determining unique vocabulary\")\n vocabulary = set(all_words_progress_bar)\n number_of_special_tokens = len(SPECIAL_TOKENS)\n vocabulary_to_id_mapping = {word : index+number_of_special_tokens for index, word in enumerate(vocabulary)}\n word_tokenizer = lambda word: UNKNOWN_WORD_ID if word not in vocabulary_to_id_mapping else vocabulary_to_id_mapping[word]\n vocabulary_size = len(vocabulary) + number_of_special_tokens\n return vocabulary, word_tokenizer, vocabulary_size\n\ndef tokenized_training_dataframe_from_normalized_training_dataframe(normalized_training_dataframe: pd.DataFrame, word_tokenizer) -> pd.DataFrame:\n sentence_tokenizer = lambda sentence: list(map(word_tokenizer, sentence.split()))\n texts_series = normalized_training_dataframe[TRAINING_DATA_TEXT_COLUMN_NAME]\n tokenized_texts_series = texts_series.apply(sentence_tokenizer)\n tokenized_training_dataframe = normalized_training_dataframe.copy()\n tokenized_training_dataframe.loc[:, TRAINING_DATA_TEXT_COLUMN_NAME] = tokenized_texts_series\n return tokenized_training_dataframe\n\ndef pad_tokens(tokens: List[int], final_length: int) -> List[int]:\n global PADDING_TOKEN_ID\n number_of_padding_tokens_needed = final_length-len(tokens)\n if number_of_padding_tokens_needed < 0:\n padded_tokens = tokens[:final_length]\n else:\n padded_tokens = tokens + [PADDING_TOKEN_ID]*number_of_padding_tokens_needed\n return padded_tokens\n\ndef pad_training_tokenized_texts_series(training_tokenized_texts_series: pd.Series, final_length: int) -> pd.Series:\n padder = lambda tokens: pad_tokens(tokens, final_length)\n padded_training_tokenized_texts_series = training_tokenized_texts_series.apply(padder)\n return padded_training_tokenized_texts_series\n\nTRAINING_DATA_PORTION = 0.90\nVALIDATION_DATA_PORTION = 1 - TRAINING_DATA_PORTION\n\nMAX_TEXT_WORD_LENGTH = None # @todo use this\n\nRANDOMNESS_SEED = 1\n\ndef get_training_and_validation_dataloaders(batch_size: int): # @todo add return types\n # @todo abstract out chunks below\n global TRAINING_DATA_PORTION\n global VALIDATION_DATA_PORTION\n global TRAINING_DATA_SENTIMENT_COLUMN_NAME\n global TRAINING_DATA_TEXT_COLUMN_NAME\n global MAX_TEXT_WORD_LENGTH\n normalized_training_dataframe = get_normalized_training_dataframe()\n normalized_training_dataframe = sklearn.utils.shuffle(normalized_training_dataframe, random_state=RANDOMNESS_SEED).reset_index(drop=True)\n # normalized_training_dataframe = normalized_training_dataframe.truncate(after=200) # @todo remove this\n vocabulary, word_tokenizer, vocabulary_size = vocabulary_from_training_dataframe(normalized_training_dataframe)\n tokenized_training_dataframe = tokenized_training_dataframe_from_normalized_training_dataframe(normalized_training_dataframe, word_tokenizer)\n text_lengths_series = tokenized_training_dataframe[TRAINING_DATA_TEXT_COLUMN_NAME].apply(len)\n unpadded_max_text_word_length = max(text_lengths_series)\n if unpadded_max_text_word_length > 1024:\n warnings.warn(\"Got an unexpectedly large input text size of {max_text_word_length}\".format(max_text_word_length=unpadded_max_text_word_length))\n unpadded_median_text_word_length = statistics.median(text_lengths_series)\n MAX_TEXT_WORD_LENGTH = 2**math.ceil(math.log2(unpadded_median_text_word_length))\n print(\"Max Sequence Length: {max_sequence_length}\".format(max_sequence_length=MAX_TEXT_WORD_LENGTH))\n all_training_tokenized_texts_series = tokenized_training_dataframe[TRAINING_DATA_TEXT_COLUMN_NAME]\n all_training_padded_tokenized_texts_series = pad_training_tokenized_texts_series(all_training_tokenized_texts_series, MAX_TEXT_WORD_LENGTH)\n all_training_sentiments_series = tokenized_training_dataframe[TRAINING_DATA_SENTIMENT_COLUMN_NAME]\n all_training_data_size = len(all_training_sentiments_series)\n training_length = round(TRAINING_DATA_PORTION * all_training_data_size)\n validation_length = round(VALIDATION_DATA_PORTION * all_training_data_size)\n training_inputs = all_training_padded_tokenized_texts_series[:training_length]\n training_outputs = all_training_sentiments_series[:training_length]\n validation_inputs = all_training_padded_tokenized_texts_series[training_length:]\n validation_outputs = all_training_sentiments_series[training_length:]\n assert len(validation_inputs) == validation_length and len(validation_outputs) == validation_length\n training_inputs = torch.LongTensor(list(training_inputs))\n training_outputs = torch.FloatTensor(list(training_outputs))\n validation_inputs = torch.LongTensor(list(validation_inputs))\n validation_outputs = torch.FloatTensor(list(validation_outputs))\n training_dataset = TensorDataset(training_inputs, training_outputs)\n validation_dataset = TensorDataset(validation_inputs, validation_outputs)\n training_dataloader = DataLoader(training_dataset, batch_size=batch_size)\n validation_dataloader = DataLoader(validation_dataset, batch_size=batch_size)\n return training_dataloader, validation_dataloader, vocabulary_size\n\n######################\n# Classifier Classes #\n######################\n\nclass SentimentAnalysisClassifier():\n def __init__(self, # @todo add types\n batch_size=1,\n embedding_size=400,\n lstm_hidden_size=256,\n number_of_lstm_layers=2,\n drop_out_probability=0.2,\n learning_rate=1e-3,\n number_of_iterations_between_updates=1000,\n ) -> None:\n self.training_dataloader, self.validation_dataloader, vocabulary_size = get_training_and_validation_dataloaders(batch_size)\n self.loss_function = nn.BCELoss()\n self.model = SentimentAnalysisNetwork(\n vocabulary_size, \n embedding_size=embedding_size,\n lstm_hidden_size=lstm_hidden_size,\n number_of_lstm_layers=number_of_lstm_layers,\n drop_out_probability=drop_out_probability)\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)\n self.number_of_iterations_between_updates = number_of_iterations_between_updates\n self.number_of_completed_epochs = 0\n self.most_recent_epoch_training_loss = 0\n\n def train(self, number_of_epochs_to_train: int) -> None:\n self.model.train()\n for _ in range(number_of_epochs_to_train):\n self.print_epoch_training_preamble()\n self.note_validation_statistics()\n self.most_recent_epoch_training_loss = 0\n training_process_bar = tqdm(enumerate(self.training_dataloader), total=len(self.training_dataloader))\n for training_input_index, (inputs, labels) in training_process_bar:\n self.train_one_iteration(inputs, labels)\n mean_training_loss = round(self.most_recent_epoch_training_loss/(1+training_input_index), 8)\n training_process_bar.set_description(\"Mean training loss: {:0.8f}\".format(mean_training_loss))\n if 0 == (training_input_index % self.number_of_iterations_between_updates) and training_input_index != 0:\n self.note_validation_statistics()\n self.print_epoch_training_postamble()\n self.number_of_completed_epochs += 1\n return None\n \n def train_one_iteration(self, inputs, labels) -> None: # @todo add input types\n self.model.zero_grad()\n predicted_label = self.model(inputs).squeeze()\n label = labels.float().squeeze()\n loss = self.loss_function(predicted_label, label) # @todo make this support batch sizes that are not 1\n self.most_recent_epoch_training_loss += float(loss)\n loss.backward()\n nn.utils.clip_grad_norm_(self.model.parameters(), MAX_GRADIENT_NORM)\n self.optimizer.step()\n return None\n \n def print_epoch_training_preamble(self) -> None:\n print()\n print(\"Training for epoch {number_of_completed_epochs}\".format(number_of_completed_epochs=self.number_of_completed_epochs))\n return None\n \n def print_epoch_training_postamble(self) -> None:\n print(\"Training for epoch {number_of_completed_epochs}.\".format(number_of_completed_epochs=self.number_of_completed_epochs))\n print(\"Total loss for epoch {number_of_completed_epochs}: {loss}\".format(number_of_completed_epochs=self.number_of_completed_epochs, loss=self.most_recent_epoch_training_loss))\n print(\"Mean loss for epoch {number_of_completed_epochs}: {loss}\".format(\n number_of_completed_epochs=self.number_of_completed_epochs,\n loss=self.most_recent_epoch_training_loss/len(self.training_dataloader)))\n print()\n return None\n \n def note_validation_statistics(self) -> None:\n number_of_validation_datapoints = len(self.validation_dataloader.dataset)\n total_validation_loss = 0\n total_number_of_incorrect_results = number_of_validation_datapoints\n self.model.eval()\n validation_data_progress_bar = tqdm(self.validation_dataloader, total=len(self.validation_dataloader))\n validation_data_progress_bar.set_description(\"Calculating validation statistics...\")\n for validation_inputs, validation_labels in validation_data_progress_bar:\n validation_labels = validation_labels.float()\n validation_label = validation_labels.squeeze()\n predicted_validation_labels = self.model(validation_inputs)\n predicted_validation_label = predicted_validation_labels.squeeze()\n validation_loss = self.loss_function(predicted_validation_label, validation_label) # @todo make this support batch sizes that are not 1\n total_validation_loss += float(validation_loss)\n number_of_correct_results = torch.sum(validation_labels.eq(torch.round(predicted_validation_labels))).squeeze()\n total_number_of_incorrect_results -= number_of_correct_results\n mean_validation_loss = total_validation_loss / number_of_validation_datapoints\n self.model.train()\n # @todo make this look nicer\n print()\n print(\" Mean Validation Loss: {:.6f}\".format(mean_validation_loss))\n print(\" Incorrect Results: {} / {}\".format(total_number_of_incorrect_results, number_of_validation_datapoints))\n print()\n return None\n\n###############\n# Main Runner #\n###############\n\n# @todo turn these into CLI parameters\n\nBATCH_SIZE = 64\nEMBEDDING_SIZE = 400\nLSTM_HIDDEN_SIZE = 256\nNUMBER_OF_LSTM_LAYERS = 2\nDROP_OUT_PROBABILITY = 0.2\nLEARNING_RATE = 1e-3\nMAX_GRADIENT_NORM = 5\nNUMBER_OF_EPOCHS = 9999 # @todo change this\nNUMBER_OF_ITERATIONS_BETWEEN_UPDATES = 5000000000000\n\ndef main() -> None:\n global BATCH_SIZE \n global EMBEDDING_SIZE\n global LSTM_HIDDEN_SIZE\n global NUMBER_OF_LSTM_LAYERS\n global DROP_OUT_PROBABILITY\n global LEARNING_RATE\n global MAX_GRADIENT_NORM\n global NUMBER_OF_EPOCHS\n global NUMBER_OF_ITERATIONS_BETWEEN_UPDATES\n print(\"Initializing classifier...\")\n classifier = SentimentAnalysisClassifier(\n batch_size=BATCH_SIZE,\n embedding_size=EMBEDDING_SIZE,\n lstm_hidden_size=LSTM_HIDDEN_SIZE,\n number_of_lstm_layers=NUMBER_OF_LSTM_LAYERS,\n drop_out_probability=DROP_OUT_PROBABILITY,\n learning_rate=LEARNING_RATE,\n number_of_iterations_between_updates=NUMBER_OF_ITERATIONS_BETWEEN_UPDATES,\n )\n print(\"Starting training...\")\n classifier.train(NUMBER_OF_EPOCHS)\n #################################################\n print(\"This module contains sentiment analysis models for Twitter text classification.\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"nlp_dump/unsuccessful_attempts/sentiment_analysis_twitter/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":16853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"209600744","text":"from Testing.IncludeURLTest import IncludeURLTest\n\n\nclass IncludeURLTest2(IncludeURLTest):\n \"\"\"This is the second part of the URL test code.\n\n It gets included into the IncludeURLTest, and calls methods\n on other servlets to verify the references continue to work.\n \"\"\"\n\n def writeBody(self):\n self.writeln('')\n name = self.__class__.__name__\n self.writeln(f'

{name}

')\n module = self.__module__\n self.writeln(f'

class = {name},'\n f' module= {module}

')\n doc = self.__class__.__doc__.replace('\\n\\n', '

')\n self.writeln(f'

{doc}

')\n self.writeStatus()\n self.cmos(\n \"/Testing/\", \"serverSidePath\",\n \"Expect to see the serverSidePath of the Testing/Main module.\")\n self.writeln('')\n","sub_path":"webware/Testing/Dir/IncludeURLTest2.py","file_name":"IncludeURLTest2.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"141949483","text":"#! python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 23 13:47:27 2017\n\"\"\"\n\nimport sys\nfrom scipy.misc import imread\nfrom scipy.misc import imsave\n\nIN1 = imread(sys.argv[1])\nOUT = imread(sys.argv[2])\n\nINDEX = OUT.shape\nfor i in range(INDEX[0]):\n for j in range(INDEX[1]):\n same = 1\n for k in range(INDEX[2]):\n if IN1[i][j][k] != OUT[i][j][k]:\n same = 0\n break\n if same == 1:\n for k in range(INDEX[2]):\n OUT[i][j][k] = 0\nimsave('ans_two.png', OUT)\n","sub_path":"hw0/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"609755824","text":"# import the Queue class here\nfrom Study.ADT.queue import Queue\n\n# define variables for arrival time and service time (you may change these later if you want)\ncustomer_arrival_time = 10\ncustomer_service_time = 20\n\n# create a new queue\na = Queue()\n\n# define variables to keep track of the current time and service time (initialise them to 0)\ncur_time = 0\nser_time = 0\n\n# other required variables\ncustomer_id = 1\n\n# this variable controls the access of a customer in the service area\n\n# it should be turned to True when the customer needs to be served; it remains False otherwise\nlock=False\n\n# simulation will run for 60 time units (you may change it later if you want)\nfor i in range(1,121):\n # if the customer has not arrived... increment time unit by 1\n if i%10==0: # # increment time based upon the variable# otherwise say that the customer with some unique ID has arrived\n print(\"The customer with id \" + str(customer_id) + \" has arrived \"+str(i))\n # reset the time counter\n\n\n # add customer to queue\n a.append(customer_id)\n # change the customer ID\n customer_id += 1\n # open the lock to the service area\n lock = True\n\n # if the customer has been served\n if lock==True and ser_time==20:\n b=a.serve()\n ser_time=1\n # \n # remove customer from queue and print which customer was served\n\n print(\"The customer with id \" + str(b) + \" has been served \"+str(i))\n # reset service time\n\n # lock the area\n if a.count==0:\n lock=False\n # otherwise increment service time\n elif lock==True :\n ser_time+=1\n\n# increment time based upon the variable\n","sub_path":"Study/ADT/Module 3/customer_service.py","file_name":"customer_service.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"190985450","text":"# Copyright 2011 OpenStack Foundation\n# Copyright 2013 Rackspace Hosting\n# Copyright 2013 Hewlett-Packard Development Company, L.P.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport testtools\nimport mock\n\nfrom troveclient.v1 import management\nfrom troveclient import base\n\n\"\"\"\nUnit tests for management.py\n\"\"\"\n\n\nclass RootHistoryTest(testtools.TestCase):\n\n def setUp(self):\n super(RootHistoryTest, self).setUp()\n self.orig__init = management.RootHistory.__init__\n management.RootHistory.__init__ = mock.Mock(return_value=None)\n\n def tearDown(self):\n super(RootHistoryTest, self).tearDown()\n management.RootHistory.__init__ = self.orig__init\n\n def test___repr__(self):\n root_history = management.RootHistory()\n root_history.id = \"1\"\n root_history.created = \"ct\"\n root_history.user = \"tu\"\n self.assertEqual('',\n root_history.__repr__())\n\n\nclass ManagementTest(testtools.TestCase):\n\n def setUp(self):\n super(ManagementTest, self).setUp()\n self.orig__init = management.Management.__init__\n management.Management.__init__ = mock.Mock(return_value=None)\n self.management = management.Management()\n self.management.api = mock.Mock()\n self.management.api.client = mock.Mock()\n\n self.orig_hist__init = management.RootHistory.__init__\n self.orig_base_getid = base.getid\n base.getid = mock.Mock(return_value=\"instance1\")\n\n def tearDown(self):\n super(ManagementTest, self).tearDown()\n management.Management.__init__ = self.orig__init\n management.RootHistory.__init__ = self.orig_hist__init\n base.getid = self.orig_base_getid\n\n def test_show(self):\n def side_effect_func(path, instance):\n return path, instance\n self.management._get = mock.Mock(side_effect=side_effect_func)\n p, i = self.management.show(1)\n self.assertEqual(('/mgmt/instances/instance1', 'instance'), (p, i))\n\n def test_index(self):\n page_mock = mock.Mock()\n self.management._paginated = page_mock\n self.management.index(deleted=True)\n page_mock.assert_called_with('/mgmt/instances?deleted=true',\n 'instances', None, None)\n self.management.index(deleted=False)\n page_mock.assert_called_with('/mgmt/instances?deleted=false',\n 'instances', None, None)\n self.management.index(deleted=True, limit=10, marker=\"foo\")\n page_mock.assert_called_with('/mgmt/instances?deleted=true',\n 'instances', 10, \"foo\")\n\n def test_root_enabled_history(self):\n self.management.api.client.get = mock.Mock(return_value=('resp', None))\n self.assertRaises(Exception,\n self.management.root_enabled_history, \"instance\")\n body = {'root_history': 'rh'}\n self.management.api.client.get = mock.Mock(return_value=('resp', body))\n management.RootHistory.__init__ = mock.Mock(return_value=None)\n rh = self.management.root_enabled_history(\"instance\")\n self.assertTrue(isinstance(rh, management.RootHistory))\n\n def test__action(self):\n resp = mock.Mock()\n self.management.api.client.post = mock.Mock(\n return_value=(resp, 'body')\n )\n resp.status_code = 200\n self.management._action(1, 'body')\n self.assertEqual(1, self.management.api.client.post.call_count)\n resp.status_code = 400\n self.assertRaises(Exception, self.management._action, 1, 'body')\n self.assertEqual(2, self.management.api.client.post.call_count)\n\n def _mock_action(self):\n self.body_ = \"\"\n\n def side_effect_func(instance_id, body):\n self.body_ = body\n self.management._action = mock.Mock(side_effect=side_effect_func)\n\n def test_stop(self):\n self._mock_action()\n self.management.stop(1)\n self.assertEqual(1, self.management._action.call_count)\n self.assertEqual({'stop': {}}, self.body_)\n\n def test_reboot(self):\n self._mock_action()\n self.management.reboot(1)\n self.assertEqual(1, self.management._action.call_count)\n self.assertEqual({'reboot': {}}, self.body_)\n\n def test_migrate(self):\n self._mock_action()\n self.management.migrate(1)\n self.assertEqual(1, self.management._action.call_count)\n self.assertEqual({'migrate': {}}, self.body_)\n\n def test_migrate_to_host(self):\n hostname = 'hostname2'\n self._mock_action()\n self.management.migrate(1, host=hostname)\n self.assertEqual(1, self.management._action.call_count)\n self.assertEqual({'migrate': {'host': hostname}}, self.body_)\n\n def test_update(self):\n self._mock_action()\n self.management.update(1)\n self.assertEqual(1, self.management._action.call_count)\n self.assertEqual({'update': {}}, self.body_)\n\n def test_reset_task_status(self):\n self._mock_action()\n self.management.reset_task_status(1)\n self.assertEqual(1, self.management._action.call_count)\n self.assertEqual({'reset-task-status': {}}, self.body_)\n\n\nclass MgmtFlavorsTest(testtools.TestCase):\n\n def setUp(self):\n super(MgmtFlavorsTest, self).setUp()\n self.orig__init = management.MgmtFlavors.__init__\n management.MgmtFlavors.__init__ = mock.Mock(return_value=None)\n self.flavors = management.MgmtFlavors()\n self.flavors.api = mock.Mock()\n self.flavors.api.client = mock.Mock()\n self.flavors.resource_class = mock.Mock(return_value=\"flavor-1\")\n self.orig_base_getid = base.getid\n base.getid = mock.Mock(return_value=\"flavor1\")\n\n def tearDown(self):\n super(MgmtFlavorsTest, self).tearDown()\n management.MgmtFlavors.__init__ = self.orig__init\n base.getid = self.orig_base_getid\n\n def test_create(self):\n def side_effect_func(path, body, inst):\n return path, body, inst\n\n self.flavors._create = mock.Mock(side_effect=side_effect_func)\n p, b, i = self.flavors.create(\"test-name\", 1024, 30, 2, 1)\n self.assertEqual(\"/mgmt/flavors\", p)\n self.assertEqual(\"flavor\", i)\n self.assertEqual(\"test-name\", b[\"flavor\"][\"name\"])\n self.assertEqual(1024, b[\"flavor\"][\"ram\"])\n self.assertEqual(2, b[\"flavor\"][\"vcpu\"])\n self.assertEqual(1, b[\"flavor\"][\"flavor_id\"])\n","sub_path":"horizon-new/virtualenv/lib/python2.6/site-packages/troveclient/tests/test_management.py","file_name":"test_management.py","file_ext":"py","file_size_in_byte":7109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"456722819","text":"from clint.textui import progress\nfrom collections import OrderedDict\n\ndef even(n):\n\treturn int(n/2)\n\ndef odd(n):\n\treturn int((3*n)+1)\n\ndef cal(n):\n\tif(n % 2 == 0):\n\t\treturn even(n)\n\telse:\n\t\treturn odd(n)\n\ntable = {}\nfor n in progress.bar(range(1, 1000001, 1)):\n\tprint()\t#bar bug fix\n\n\tif(n not in table):\n\t\ttable[n] = 1\n\tm = cal(n)\n\n\twhile m is not 1:\n\t\tm = cal(m)\n\t\ttable[n] += 1\n\t\n\tif m == 1: \n\t\ttable[n] += 1\n\n\nprint(\"Produced the following: (ascending order)\")\nod = OrderedDict(sorted(table.items(), key=lambda t: t[1]))\nprint(od)","sub_path":"python/ap014.py","file_name":"ap014.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"616938577","text":"# Kieran Parikh - CS701 Senior Seminar\n# Transformer adapted for Authorship Attribution\n# Code adapted from \"The Annotated Transformer\" (http://nlp.seas.harvard.edu/2018/04/03/attention.html)\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math, copy, time\nfrom torch.autograd import Variable\n#import matplotlib.pyplot as plt\n#import seaborn\nfrom torchtext import data, datasets\nimport spacy\n#seaborn.set_context(context=\"talk\")\nimport pdb\nimport pandas as pd\nimport argparse\n\n### MODEL DEFINITION ###\n# Overall Architecture\nclass EncoderDecoder(nn.Module):\n \"\"\"\n A standard Encoder-Decoder architecture. Base for this and many \n other models.\n \"\"\"\n def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):\n super(EncoderDecoder, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.src_embed = src_embed\n self.tgt_embed = tgt_embed\n self.generator = generator\n \n def forward(self, src, tgt, src_mask, tgt_mask):\n \"Take in and process masked src and target sequences.\"\n return self.decode(self.encode(src, src_mask), src_mask,\n tgt, tgt_mask)\n \n def encode(self, src, src_mask):\n return self.encoder(self.src_embed(src), src_mask)\n \n def decode(self, memory, src_mask, tgt, tgt_mask):\n return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)\n\nclass Generator(nn.Module):\n \"Define standard linear + softmax generation step.\"\n def __init__(self, d_model, vocab):\n super(Generator, self).__init__()\n self.proj = nn.Linear(d_model, vocab)\n\n def forward(self, x):\n return F.log_softmax(self.proj(x), dim=-1)\n\ndef clones(module, N):\n \"Produce N identical layers.\"\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\n# Encoder Architecture\nclass Encoder(nn.Module):\n \"Core encoder is a stack of N layers\"\n def __init__(self, layer, N):\n super(Encoder, self).__init__()\n self.layers = clones(layer, N)\n self.norm = LayerNorm(layer.size)\n \n def forward(self, x, mask):\n \"Pass the input (and mask) through each layer in turn.\"\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)\n\nclass LayerNorm(nn.Module):\n \"Construct a layernorm module.\"\n def __init__(self, features, eps=1e-6):\n super(LayerNorm, self).__init__()\n self.a_2 = nn.Parameter(torch.ones(features))\n self.b_2 = nn.Parameter(torch.zeros(features))\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.a_2 * (x - mean) / (std + self.eps) + self.b_2\n\nclass SublayerConnection(nn.Module):\n \"\"\"\n A residual connection followed by a layer norm.\n Note for code simplicity the norm is first as opposed to last.\n \"\"\"\n def __init__(self, size, dropout):\n super(SublayerConnection, self).__init__()\n self.norm = LayerNorm(size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, sublayer):\n \"Apply residual connection to any sublayer with the same size.\"\n return x + self.dropout(sublayer(self.norm(x)))\n\nclass EncoderLayer(nn.Module):\n \"Encoder is made up of self-attn and feed forward (defined below)\"\n def __init__(self, size, self_attn, feed_forward, dropout):\n super(EncoderLayer, self).__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n self.sublayer = clones(SublayerConnection(size, dropout), 2)\n self.size = size\n\n def forward(self, x, mask):\n \"Follow Figure 1 (left) for connections.\"\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n return self.sublayer[1](x, self.feed_forward)\n\n# Decoder Architecture\nclass Decoder(nn.Module):\n \"Generic N layer decoder with masking.\"\n def __init__(self, layer, N):\n super(Decoder, self).__init__()\n self.layers = clones(layer, N)\n self.norm = LayerNorm(layer.size)\n \n def forward(self, x, memory, src_mask, tgt_mask):\n for layer in self.layers:\n x = layer(x, memory, src_mask, tgt_mask)\n return self.norm(x)\n\nclass DecoderLayer(nn.Module):\n \"Decoder is made of self-attn, src-attn, and feed forward (defined below)\"\n def __init__(self, size, self_attn, src_attn, feed_forward, dropout):\n super(DecoderLayer, self).__init__()\n self.size = size\n self.self_attn = self_attn\n self.src_attn = src_attn\n self.feed_forward = feed_forward\n self.sublayer = clones(SublayerConnection(size, dropout), 3)\n \n def forward(self, x, memory, src_mask, tgt_mask):\n \"Follow Figure 1 (right) for connections.\"\n m = memory\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))\n x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))\n return self.sublayer[2](x, self.feed_forward)\n\ndef subsequent_mask(size):\n \"Mask out subsequent positions.\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n# Attention Mechanism\ndef attention(query, key, value, mask=None, dropout=None):\n \"Compute 'Scaled Dot Product Attention'\"\n d_k = query.size(-1)\n scores = torch.matmul(query, key.transpose(-2, -1)) \\\n / math.sqrt(d_k)\n if mask is not None:\n scores = scores.masked_fill(mask == 0, -1e9)\n p_attn = F.softmax(scores, dim = -1)\n if dropout is not None:\n p_attn = dropout(p_attn)\n return torch.matmul(p_attn, value), p_attn\n\nclass MultiHeadedAttention(nn.Module):\n def __init__(self, h, d_model, dropout=0.1):\n \"Take in model size and number of heads.\"\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n # We assume d_v always equals d_k\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_model), 4)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n \n def forward(self, query, key, value, mask=None):\n \"Implements multiple attention heads.\"\n if mask is not None:\n # Same mask applied to all h heads.\n mask = mask.unsqueeze(1)\n nbatches = query.size(0)\n \n # 1) Do all the linear projections in batch from d_model => h x d_k \n query, key, value = \\\n [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)\n for l, x in zip(self.linears, (query, key, value))]\n \n # 2) Apply attention on all the projected vectors in batch. \n x, self.attn = attention(query, key, value, mask=mask, \n dropout=self.dropout)\n \n # 3) \"Concat\" using a view and apply a final linear. \n x = x.transpose(1, 2).contiguous() \\\n .view(nbatches, -1, self.h * self.d_k)\n return self.linears[-1](x)\n\n# Position Wise Feed-Forward Network\nclass PositionwiseFeedForward(nn.Module):\n \"Implements FFN equation.\"\n def __init__(self, d_model, d_ff, dropout=0.1):\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n return self.w_2(self.dropout(F.relu(self.w_1(x))))\n\n# Word Embeddings\nclass Embeddings(nn.Module):\n def __init__(self, d_model, vocab):\n super(Embeddings, self).__init__()\n self.lut = nn.Embedding(vocab, d_model)\n self.d_model = d_model\n\n def forward(self, x):\n return self.lut(x) * math.sqrt(self.d_model)\n\n# Positional Encodings\nclass PositionalEncoding(nn.Module):\n \"Implement the PE function.\"\n def __init__(self, d_model, dropout, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n \n # Compute the positional encodings once in log space.\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0., max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0., d_model, 2) * -(math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n \n def forward(self, x):\n x = x + Variable(self.pe[:, :x.size(1)], \n requires_grad=False)\n return self.dropout(x)\n\n### END MODEL DEFINITION ###\n\n### TRAINING MECHANICS ###\n# Batches\nclass Batch:\n \"Object for holding a batch of data with mask during training.\"\n def __init__(self, src, trg=None, pad=0):\n self.src = src\n self.src_mask = (src != pad).unsqueeze(-2)\n if trg is not None:\n self.trg = trg[:, :-1]\n self.trg_y = trg[:, 1:]\n self.trg_mask = \\\n self.make_std_mask(self.trg, pad)\n self.ntokens = (self.trg_y != pad).data.sum()\n \n @staticmethod\n def make_std_mask(tgt, pad):\n \"Create a mask to hide padding and future words.\"\n tgt_mask = (tgt != pad).unsqueeze(-2)\n tgt_mask = tgt_mask & Variable(\n subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))\n return tgt_mask\n\n# Train epoch\ndef run_epoch(data_iter, model, loss_compute):\n \"Standard Training and Logging Function\"\n start = time.time()\n total_tokens = 0\n total_loss = 0\n tokens = 0\n for i, batch in enumerate(data_iter):\n out = model.forward(batch.src, batch.trg, \n batch.src_mask, batch.trg_mask)\n loss = loss_compute(out, batch.trg_y, batch.ntokens)\n total_loss += loss\n total_tokens += batch.ntokens.float()\n tokens += batch.ntokens\n if i % 50 == 1:\n elapsed = time.time() - start\n if elapsed < 1:\n elapsed = 1\n print(\"Epoch Step: %d Loss: %f Tokens per Sec: %f\" % (i, loss / batch.ntokens.float(), tokens / elapsed))\n start = time.time()\n tokens = 0\n return total_loss / total_tokens\n\n# Batching helper function \nglobal max_src_in_batch, max_tgt_in_batch\ndef batch_size_fn(new, count, sofar):\n \"Keep augmenting batch and calculate total number of tokens + padding.\"\n global max_src_in_batch, max_tgt_in_batch\n if count == 1:\n max_src_in_batch = 0\n max_tgt_in_batch = 0\n max_src_in_batch = max(max_src_in_batch, len(new.src))\n max_tgt_in_batch = max(max_tgt_in_batch, len(new.trg) + 2)\n src_elements = count * max_src_in_batch\n tgt_elements = count * max_tgt_in_batch\n return max(src_elements, tgt_elements)\n\n# Optimizer \nclass NoamOpt:\n \"Optim wrapper that implements rate.\"\n def __init__(self, model_size, factor, warmup, optimizer):\n self.optimizer = optimizer\n self._step = 0\n self.warmup = warmup\n self.factor = factor\n self.model_size = model_size\n self._rate = 0\n \n def step(self):\n \"Update parameters and rate\"\n self._step += 1\n rate = self.rate()\n for p in self.optimizer.param_groups:\n p['lr'] = rate\n self._rate = rate\n self.optimizer.step()\n \n def rate(self, step = None):\n \"Implement `lrate` above\"\n if step is None:\n step = self._step\n return self.factor * \\\n (self.model_size ** (-0.5) *\n min(step ** (-0.5), step * self.warmup ** (-1.5)))\n \ndef get_std_opt(model):\n return NoamOpt(model.src_embed[0].d_model, 2, 4000,\n torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))\n\n# Regularization\nclass LabelSmoothing(nn.Module):\n \"Implement label smoothing.\"\n def __init__(self, size, padding_idx, smoothing=0.0):\n super(LabelSmoothing, self).__init__()\n self.criterion = nn.KLDivLoss(size_average=False)\n self.padding_idx = padding_idx\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.size = size\n self.true_dist = None\n \n def forward(self, x, target):\n assert x.size(1) == self.size\n true_dist = x.data.clone()\n true_dist.fill_(self.smoothing / (self.size - 2))\n true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\n true_dist[:, self.padding_idx] = 0\n #mask = torch.nonzero(target.data == self.padding_idx)\n #if mask.dim() > 0:\n # true_dist.index_fill_(0, mask.squeeze(), 0.0)\n self.true_dist = true_dist\n return self.criterion(x, Variable(true_dist, requires_grad=False))\n\n# Loss function \nclass SimpleLossCompute:\n \"A simple loss compute and train function.\"\n def __init__(self, generator, criterion, opt=None):\n self.generator = generator\n self.criterion = criterion\n self.opt = opt\n \n def __call__(self, x, y, norm):\n x = self.generator(x)\n loss = self.criterion(x.contiguous().view(-1, x.size(-1)), \n y.contiguous().view(-1)) / norm\n loss.backward()\n if self.opt is not None:\n self.opt.step()\n self.opt.optimizer.zero_grad()\n return loss.data[0] * norm\n\nclass MultiGPULossCompute:\n \"A multi-gpu loss compute and train function.\"\n def __init__(self, generator, criterion, devices, opt=None, chunk_size=5):\n # Send out to different gpus.\n self.generator = generator\n self.criterion = nn.parallel.replicate(criterion, \n devices=devices)\n self.opt = opt\n self.devices = devices\n self.chunk_size = chunk_size\n \n def __call__(self, out, targets, normalize):\n total = 0.0\n generator = nn.parallel.replicate(self.generator, \n devices=self.devices)\n out_scatter = nn.parallel.scatter(out, \n target_gpus=self.devices)\n out_grad = [[] for _ in out_scatter]\n targets = nn.parallel.scatter(targets, \n target_gpus=self.devices)\n\n # Divide generating into chunks.\n chunk_size = self.chunk_size\n for i in range(0, out_scatter[0].size(1), chunk_size):\n # Predict distributions\n out_column = [[Variable(o[:, i:i+chunk_size].data, \n requires_grad=self.opt is not None)] \n for o in out_scatter]\n gen = nn.parallel.parallel_apply(generator, out_column)\n\n # Compute loss. \n y = [(g.contiguous().view(-1, g.size(-1)), \n t[:, i:i+chunk_size].contiguous().view(-1)) \n for g, t in zip(gen, targets)]\n loss = nn.parallel.parallel_apply(self.criterion, y)\n\n # Sum and normalize loss\n l = nn.parallel.gather(loss, \n target_device=self.devices[0])\n l = l.sum()[0] / normalize.float()\n total += l.data[0]\n\n # Backprop loss to output of transformer\n if self.opt is not None:\n l.backward()\n for j, l in enumerate(loss):\n out_grad[j].append(out_column[j][0].grad.data.clone())\n\n # Backprop all loss through transformer. \n if self.opt is not None:\n out_grad = [Variable(torch.cat(og, dim=1)) for og in out_grad]\n o1 = out\n o2 = nn.parallel.gather(out_grad, \n target_device=self.devices[0])\n o1.backward(gradient=o2)\n self.opt.step()\n self.opt.optimizer.zero_grad()\n return total * normalize.float()\n\n\n# Greedy decoder\ndef greedy_decode(model, src, src_mask, max_len, start_symbol):\n memory = model.encode(src, src_mask)\n ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)\n for i in range(max_len-1):\n out = model.decode(memory, src_mask, \n Variable(ys), \n Variable(subsequent_mask(ys.size(1))\n .type_as(src.data)))\n prob = model.generator(out[:, -1])\n _, next_word = torch.max(prob, dim = 1)\n next_word = next_word.data[0]\n ys = torch.cat([ys, \n torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)\n return ys\n\n# Custom batch iterator\nclass MyIterator(data.Iterator):\n def create_batches(self):\n if self.train:\n def pool(d, random_shuffler):\n for p in data.batch(d, self.batch_size * 100):\n p_batch = data.batch(\n sorted(p, key=self.sort_key),\n self.batch_size, self.batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n self.batches = pool(self.data(), self.random_shuffler)\n \n else:\n self.batches = []\n for b in data.batch(self.data(), self.batch_size,\n self.batch_size_fn):\n self.batches.append(sorted(b, key=self.sort_key))\n\ndef rebatch(pad_idx, batch):\n \"Fix order in torchtext to match ours\"\n src, trg = batch.src.transpose(0, 1), batch.trg.transpose(0,1)\n return Batch(src, trg, pad_idx)\n\n### END TRAINING MECHANICS ###\n\n# Helper function to produce a model\ndef make_model(src_vocab, tgt_vocab, N=6, \n d_model=512, d_ff=2048, h=8, dropout=0.1):\n \"Helper: Construct a model from hyperparameters.\"\n c = copy.deepcopy\n attn = MultiHeadedAttention(h, d_model)\n ff = PositionwiseFeedForward(d_model, d_ff, dropout)\n position = PositionalEncoding(d_model, dropout)\n model = EncoderDecoder(\n Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),\n Decoder(DecoderLayer(d_model, c(attn), c(attn), \n c(ff), dropout), N),\n nn.Sequential(Embeddings(d_model, src_vocab), c(position)),\n nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),\n Generator(d_model, tgt_vocab))\n \n # This was important from their (Vaswani et Al.) code. \n # Initialize parameters with Glorot / fan_avg.\n for p in model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n return model\n\n### LOAD DATASET AND TRAIN MODEL ###\n# Parse command line args\nparser = argparse.ArgumentParser(description='Train transformer for aeuthorship attribution.')\nparser.add_argument('--stack_layers', type=int, default=6, help='num encoder layers')\nparser.add_argument('--epochs', type=int, default=10, help='num training epochs')\nparser.add_argument('--batch_size', type=int, default=3500, help='num training epochs')\nparser.add_argument('--dataset', type=str, default=\"C50\", help='num training epochs')\nargs = parser.parse_args()\n\n# Load dataset\ntrain_file = args.dataset+'-trainData.csv'\ntest_file = args.dataset+'-testData.csv'\n\nspacy_en = spacy.load('en')\ndef tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(text)]\n\nBOS_WORD = ''\nEOS_WORD = ''\nBLANK_WORD = \"\"\nSRC = data.Field(tokenize=tokenize_en, pad_token=BLANK_WORD)\nTGT = data.Field(init_token = BOS_WORD, eos_token = EOS_WORD, pad_token=BLANK_WORD) \n\ntrain, test = data.TabularDataset.splits(path='../data', train=train_file, test=test_file, format='csv',\n fields=[('src', SRC), ('trg', TGT)])\n\nprint(\"Dataset loaded\")\nprint(\"Train examples: \" + str(len(train)))\n\n# Build vocabulary\nSRC.build_vocab(train)\nTGT.build_vocab(train)\n\nprint(\"Vocabulary built\")\nprint(\"Source vocab length: \" + str(len(SRC.vocab)))\nprint(\"Target vocab length: \" + str(len(TGT.vocab)))\n\ndevices = [0]\nmodel = make_model(len(SRC.vocab), len(TGT.vocab), N=args.stack_layers)\nmodel = model.cuda()\npad_idx = TGT.vocab.stoi[\"\"]\ncriterion = LabelSmoothing(size=len(TGT.vocab), padding_idx=pad_idx, smoothing=0.1)\ncriterion = criterion.cuda()\n\n\nBATCH_SIZE = args.batch_size\ntrain_iter = MyIterator(train, batch_size=BATCH_SIZE, device=torch.device('cuda:0'),\n repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),\n batch_size_fn=batch_size_fn, train=True)\n\nmodel_par = nn.DataParallel(model, device_ids=devices)\n\n# Train model\nmodel_opt = NoamOpt(model.src_embed[0].d_model, 1, 2000,\n torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))\n\neval_epochs = [0,4,9,14,24,39, 49, 59, 69, 79, 89, 99]\neval_accs = {}\n\nwith open('../data/'+test_file, newline='', encoding=\"utf-8\") as csvfile:\n dataset = pd.read_csv(csvfile, delimiter=',', names = ['text', 'author'], encoding='utf-8')\n texts = dataset['text'].tolist()\n labels = dataset['author'].tolist()\n\n\nprint(\"Training...\")\nfor epoch in range(args.epochs):\n print(\"Epoch \" + str(epoch) + \":\")\n model.train()\n model_par.train()\n run_epoch((rebatch(pad_idx, b) for b in train_iter), \n #model,\n #SimpleLossCompute(model.generator, criterion, opt=model_opt))\n model_par, \n MultiGPULossCompute(model.generator, criterion, devices=devices, opt=model_opt))\n model_par.eval()\n\n if epoch in eval_epochs:\n model.eval()\n correct = 0\n\n for sentence, label in zip(texts, labels):\n sent = tokenize_en(sentence)\n src = torch.LongTensor([[SRC.vocab.stoi[w] for w in sent]])\n src = Variable(src)\n src = src.cuda()\n src_mask = (src != SRC.vocab.stoi[\"\"]).unsqueeze(-2)\n out = greedy_decode(model, src, src_mask.cuda(), \n max_len=3, start_symbol=TGT.vocab.stoi[\"\"])\n print(\"Pred:\", end=\"\\t\")\n trans = \"\"\n for i in range(1, out.size(1)):\n sym = TGT.vocab.itos[out[0, i]]\n if sym == \"\": break\n trans += sym + \" \"\n print(trans)\n print(label)\n\n try:\n pred = int(trans)\n except ValueError:\n pred = -1\n\n if pred == label:\n correct += 1\n\n acc = float(correct)/float(len(labels))\n eval_accs[str(epoch)] = acc\n print(acc)\n\nfor epoch in eval_epochs:\n if str(epoch) in eval_accs:\n print(\" Epoch \" + str(epoch) + \": \" + str(eval_accs[str(epoch)]))\n\n# save trained model\ntorch.save(model, 'model.pt')\n\n#model = torch.load('model.pt')\n\n","sub_path":"transformer-aa.py","file_name":"transformer-aa.py","file_ext":"py","file_size_in_byte":22791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"618054012","text":"\"\"\"Tests for nextbus\"\"\"\nimport nextbus\nfrom nextbus.model import Agency, Route\n\nimport unittest\n\nclass AgencyListTest(unittest.TestCase):\n def test_agencyList(self):\n #agency list should always return a list of agencies\n agencies = nextbus.agency_list()\n self.assertTrue(len(agencies) > 0, \"Agencies list should not be empty\")\n self.assertTrue(isinstance(agencies[0], Agency), \"agency_list not returning a list of Agencies\")\n # if willing to break 2.6, can use assertIsInstance instead\n\n def test_routeList(self):\n #route list should always return a list of routes filtered by agency.\n routes = nextbus.route_list(agency='sf-muni')\n self.assertTrue(len(routes) > 0,\n \"Routes list should not be empty\")\n self.assertTrue(isinstance(routes[0], Route),\n \"route_list not returning a list of Routes\")\n self.assertTrue(\"F-Market & Wharves\" in [route.title for route in routes],\n \"A route is missing from the route list\")\n self.assertFalse(\"Sandwich\" in [route.title for route in routes],\n \"route_list is including multiple agencies\")\n\n def test_routeConfig(self):\n #route config should return a route object by the given route tag\n route = nextbus.route_config(agency='sf-muni', route='1')\n self.assertTrue(route, \"Route is empty\")\n self.assertTrue(isinstance(route, Route), \"route_config is not returning a Route\")\n self.assertEqual(route.title, \"1-California\",\n \"Route title is not right (expected '{0}', got '{1}'\".format(\"1-California\", route.title))\n\n\n","sub_path":"nextbus/test_nextbus.py","file_name":"test_nextbus.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"428540270","text":"import random\n\nuser_wins = 0\ncomputer_wins = 0\n\noptions = [\"rock\", \"paper\", \"scissors\"]\n\nwhile True:\n user_input = input(\"Type Rock/Paper/Scissors or Q to quit: \").lower()\n\n if user_input == \"q\":\n break\n\n if user_input not in options:\n print(\"Invalid input, please try again\")\n continue\n\n random_number = random.randint(0, 2)\n computer_choice = options[random_number] \n # 0 is rock\n # 1 is paper\n # 2 is scissors\n print(\"computer picked\", computer_choice +\".\")\n\n if computer_choice == user_input:\n print(\"The game is a tie\")\n\n elif computer_choice == \"paper\" and user_input == \"rock\":\n print(\"Computer wins\")\n computer_wins += 1\n\n elif computer_choice == \"scissors\" and user_input == \"rock\":\n print(\"You win\")\n user_wins += 1\n\n elif computer_choice == \"rock\" and user_input == \"paper\":\n print(\"You win\")\n user_wins += 1\n \n else:\n print(\"Computer wins\")\n computer_wins += 1\n\nprint(\"User wins\",user_wins)\nprint(\"Computer wins\",computer_wins)","sub_path":"Rock_Paper_Scissors.py","file_name":"Rock_Paper_Scissors.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"527510260","text":"\nimport pygame\nfrom utils import vector\n\nclass Vect(vector.Vector):\n pass\n\nclass Sphere:\n def __init__(self,pos,filename):\n pygame.sprite.Sprite.__init__(self)\n self.rect.x = pos[0]\n self.rect.y = pos[1]\n self.image = pygame.image.load(filename).convert()\n self.rect = self.image.get_rect()\n self.image.set_colorkey(WHITE)\n\nBLACK = [0, 0, 0]\nWHITE = [255, 255, 255]\nGREEN = [0, 255, 0]\nRED = [255, 0, 0]\nBLUE = [0,0,255]\nAQUA = [0,255,255]\nYELLOW = [255,255,0]\n\npygame.init()\n\nsize = [1200, 660]\nscreen = pygame.display.set_mode(size)\n\npygame.display.set_caption('New Game')\n\n\ndone = False\nclock = pygame.time.Clock()\n\nwhile not done:\n\n# --- Event Processing\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n '''elif event.type == pygame.MOUSEBUTTONDOWN:\n BLACK[0] += 90\n BLACK[1] += 90\n BLACK[2] += 90\n\n elif event.type == pygame.MOUSEBUTTONUP:\n\n BLACK[0] -= 80\n BLACK[1] -= 90\n BLACK[2] -= 90'''\n\n\n screen.fill(BLACK)\n\n pygame.display.flip()\n clock.tick(40)\n\npygame.quit()\n","sub_path":".idea/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"224630796","text":"def extend_action_labels(video_action_data):\n dense_annots = {}\n for row_idx, action_row in video_action_data.iterrows():\n start_frame = action_row['start_frame']\n stop_frame = action_row['stop_frame']\n\n narration = action_row['narration']\n # all_nouns = action_row['all_nouns']\n # noun = action_row['noun']\n # verb = action_row['verb']\n for frame_idx in range(start_frame, stop_frame + 1):\n dense_annots[frame_idx] = narration\n return dense_annots\n\n\ndef get_annot_adv(frame_idx, action_labels, cmapping, span=100, extent=7):\n colors = []\n for adv_idx in range(frame_idx - span, frame_idx + span):\n if adv_idx in action_labels:\n action = action_labels[adv_idx]\n color = cmapping[action]\n else:\n color = [0, 0, 0, 1]\n colors.append(color)\n colors = np.array([[color,] * extent for color in colors]).reshape(len(colors) * extent, 4)\n colors = colors[:, :3].reshape((-1, *colors[:, :3].shape)).repeat(extent * 10, 0)\n return colors\n","sub_path":"epic/labelutils.py","file_name":"labelutils.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"241959587","text":"x = (input().split())\na = int(input())\nsetN = set()\nfor i in range(a):\n n = input().split()\n for j in n:\n setN.add(j)\nsetO = set()\nfor i in x:\n if not(i in setN):\n setO.add(i)\nprint(*(sorted(setO,reverse=True)),sep = \" \")","sub_path":"midterm/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"586073384","text":"import cv2\r\ndef video_show():\r\n cap = cv2.VideoCapture(r\"D:\\1_2.avi\")\r\n count = 0\r\n while(True):\r\n # Capture frame-by-frame\r\n ret, frame = cap.read()\r\n\r\n #frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\r\n\r\n cv2.imshow('frame',frame)\r\n\r\n pic_path = format(\"E:\\\\work\\\\pic_test\\\\pic_%d.jpg\"%count)\r\n cv2.imwrite(pic_path,frame)\r\n count += 1\r\n\r\n if cv2.waitKey(30) & 0xFF == ord('q'):\r\n break\r\n # When everything done, release the capture\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n\r\nif __name__=='__main__':\r\n video_show()","sub_path":"pytorch/yolo_v3_test/avi_read.py","file_name":"avi_read.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"372033540","text":"import socket\r\n\r\nsocket = socket.socket()\r\nprint(\"Intentando conectar a Servidor...\")\r\nwhile True:\r\n socket.connect((\"192.168.0.10\", 44000))\r\n break\r\n\r\nprint(\"Conexion establecida\")\r\n\r\nwhile True:\r\n mensaje = input(\"Escribe tu mensaje: \")\r\n paquete = mensaje.encode('utf-8')\r\n if mensaje == \"Adios\":\r\n \tsocket.send(paquete)\r\n \tbreak\r\n socket.send(paquete)\r\n\r\ns.close()\r\nprint(\"Adios Mundo\")","sub_path":"cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"620740768","text":"#encoding=utf8\n#Author=LclMichael\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n\nfrom scipy import misc\n\ndef read_from_file(file_path):\n image = Image.open(file_path)\n image = image.convert(\"RGB\")\n return np.array(image)\n\ndef get_image_size(file_path):\n image = Image.open(file_path)\n size = image.size\n return [size[1], size[0]]\n\ndef show_image(img, num=0):\n plt.figure(num)\n plt.imshow(img)\n plt.show()\n\ndef show_images(img_list):\n for index, img in enumerate(img_list):\n show_image(img, index)\n\n#重新resize图像大小,size为[height, width]\ndef resize_image(img, size):\n return misc.imresize(img, size)\n\n#裁剪图片\ndef crop_image(img, y1, x1, y2, x2):\n return img[y1:y2, x1:x2]\n\n\ndef get_images_mean(paths):\n all_mean = [0, 0, 0]\n for path in paths:\n image = read_from_file(path)\n all_mean += np.mean(image, axis=(0,1))\n print(\"\\r\" + \"all mean: {}\".format(all_mean), end=\"\")\n print(\"\")\n return all_mean / len(paths)\n\ndef process_image(path, new_size, mean, is_crop=True):\n img = read_from_file(path)\n if is_crop:\n width = img.shape[1]\n heigth = img.shape[0]\n min_edge = min(width,heigth)\n xx = int((width - min_edge) / 2.0)\n yy = int((heigth - min_edge) / 2.0)\n img = img[yy : yy + min_edge, xx: xx + min_edge]\n img = np.subtract(img, mean)\n img = resize_image(img, new_size)\n img = np.array(img, dtype=np.float32)\n\n return img\n\ndef process_images(paths, new_size, mean, is_crop=True):\n image_list = []\n for path in paths:\n image_list.append(process_image(path, new_size, mean, is_crop))\n\n return image_list\n\n\n","sub_path":"util/image_utils.py","file_name":"image_utils.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"210323719","text":"import numpy as np\nimport torch\nfrom bbox_iou import bbox_iou\nfrom bbox2loc import bbox2loc\nclass ProposalTargetCreator:\n '''给rpn的proposal 分配ground truth,采样n_sample个roi\n '''\n def __init__(self,n_sample=128,pos_ratio=0.25,pos_iou_thresh=0.5,neg_iou_thresh_hi=0.5,neg_iou_thresh_lo=0.0,loc_normalize_mean=(0.,0.,0.,0.),loc_normalize_std=(0.1,0.1,0.2,0.2),device=torch.device('cpu')):\n self.n_sample=n_sample\n self.pos_ratio = pos_ratio\n self.pos_iou_thresh=pos_iou_thresh\n self.neg_iou_thresh_hi = neg_iou_thresh_hi\n self.neg_iou_thresh_lo = neg_iou_thresh_lo\n self.loc_normalize_mean=torch.tensor(loc_normalize_mean,device=device)\n self.loc_normalize_std=torch.tensor(loc_normalize_std,device=device)\n def __call__(self,roi,bbox,label):\n iou = bbox_iou(roi,bbox)\n # gt_assignment = iou.argmax(dim=1)\n max_iou,gt_assignment = iou.max(dim=1)\n gt_roi_label = label[gt_assignment] + 1#把类别全部加一,背景由-1变为0\n\n #选择前景ROI\n pos_index = (max_iou>=self.pos_iou_thresh).nonzero().reshape(-1)\n pos_roi_per_this_image = self.pos_ratio*self.n_sample\n pos_roi_per_this_image = int(min(pos_roi_per_this_image,pos_index.shape[0]))\n if pos_index.shape[0]>0:\n pos_index = np.random.choice(pos_index.cpu().numpy(),size=pos_roi_per_this_image,replace=False)\n\n #选择背景ROI\n neg_index = ((max_iou=self.neg_iou_thresh_lo)).nonzero().reshape(-1)\n neg_roi_per_this_image = self.n_sample-pos_roi_per_this_image\n neg_roi_per_this_image = int(min(neg_roi_per_this_image,neg_index.shape[0]))\n if neg_index.shape[0]>0:\n neg_index=np.random.choice(neg_index.cpu().numpy(),size = neg_roi_per_this_image,replace=False)\n \n keep_index = np.concatenate([pos_index,neg_index])\n gt_roi_label = gt_roi_label[keep_index]\n gt_roi_label[pos_roi_per_this_image:]=0#负例为0\n sample_roi = roi[keep_index]\n\n gt_roi_loc = bbox2loc(sample_roi,bbox[gt_assignment[keep_index]])\n gt_roi_loc = ((gt_roi_loc-self.loc_normalize_mean)/self.loc_normalize_std)\n return sample_roi,gt_roi_loc,gt_roi_label\n\n","sub_path":"proposal_target_creator.py","file_name":"proposal_target_creator.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"78646322","text":"import urllib.request, urllib.parse, urllib.error\nimport twurl\nimport ssl\nimport pandas as pd\nimport json\nimport os\nimport sqlite3\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nTWITTER_URL = 'https://api.twitter.com/1.1/statuses/user_timeline.json'\ntree = '/home/ceewick/cs/py4e/twitter/'\nacct = 'realdonaldtrump'\nurl = twurl.augment(TWITTER_URL,\n {'screen_name': acct})\nc = urllib.request.urlopen(url, context=ctx)\ndata = c.read().decode()\n\n## Create/Export JSON file of tweets\nwith open(os.path.join(tree, 'tTweets.json'),'w') as file:\n js = json.loads(data)\n json.dump(js,file,indent=4,sort_keys=True)\nprint('{}'.format('JSON output file created'))\n\n## JSON to Pandas\ndf = pd.DataFrame.from_dict(js)\ndf2 = pd.Series()\nprint('{}'.format('Pandas DataFrame created'))\n\n# ## Pandas to CSV\ndf.to_csv('{}.csv'.format('tTweets'), sep=',')\nprint('{}'.format('CSV output file created'))\n\n## Pandas DataFrame to SQLite3 Database\n''' issues here. Need to give dataTypes'''\n# with sqlite3.connect('tTweets.db') as conn:\n# df.to_sql(name='tTweets', con=conn, if_exists=append)\n\n\n","sub_path":"extractTrump.py","file_name":"extractTrump.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"248060401","text":"import sys\nimport os\nimport shutil\nimport uuid\nimport subprocess as sp\nfrom multiprocessing import Pool\n\nimport numpy as np\n\n\nqc_rem = \"\"\"\\\n$rem\nJOBTYPE force\nSYMMETRY false\nSYM_IGNORE true\nQM_MM true\nBASIS 6-31G*\nSCF_CONVERGENCE 7\nMETHOD B3LYP\nSKIP_CHARGE_SELF_INTERACT true\n$end\n\"\"\"\n\n\nELEMENTS = [\"None\", 'H', 'He',\n 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne',\n 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar',\n 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br', 'Kr',\n 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe',\n 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb',\n 'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi']\n\n\ndef get_element_symbols(atomic_number):\n try:\n return [ELEMENTS[i] for i in atomic_number]\n except TypeError:\n return ELEMENTS[atomic_number]\n\n\ndef get_qchem_input(qc_inp, qm_pos, qm_elem_num, charge, mult):\n assert len(qm_pos) == len(qm_elem_num)\n qm_element = get_element_symbols(qm_elem_num)\n with open(qc_inp, 'w') as f:\n f.write(\"$molecule\\n\")\n f.write(\"%d %d\\n\" % (charge, mult))\n for i in range(len(qm_pos)):\n f.write(\"\".join([\"%-3s\" % qm_element[i],\n \"%25.16f\" % qm_pos[i, 0],\n \"%25.16f\" % qm_pos[i, 1],\n \"%25.16f\" % qm_pos[i, 2], \"\\n\"]))\n f.write(\"$end\" + \"\\n\\n\")\n\n f.write(\"$external_charges\\n\")\n f.write(\"$end\\n\\n\")\n\n f.write(qc_rem + \"\\n\")\n\n\ndef get_qchem_force(qm_pos, qm_elem_num, charge, mult):\n cwd = \"/dev/shm/run_\" + str(uuid.uuid4())\n if not os.path.exists(cwd):\n os.mkdir(cwd)\n get_qchem_input(cwd + \"/qchem.inp\", qm_pos, qm_elem_num, charge, mult)\n\n cmdline = f\"cd {cwd}; \"\n cmdline += f\"QCSCRATCH={cwd} qchem qchem.inp qchem.out save > qchem_run.log\"\n proc = sp.Popen(args=cmdline, shell=True)\n proc.wait()\n\n force = np.fromfile(cwd + \"/save/131.0\", dtype=\"f8\").reshape(-1, 3)\n energy = np.fromfile(cwd + \"/save/99.0\", dtype=\"f8\", count=2)[1]\n\n shutil.rmtree(cwd)\n\n return force, energy\n\n\ndef get_qchem_training_set(coords, qm_elem_num, charge, mult):\n args = ((qm_pos, qm_elem_num, charge, mult) for qm_pos in coords)\n with Pool() as p:\n res = p.starmap(get_qchem_force, args)\n forces = []\n energies = []\n for r in res:\n forces.append(r[0])\n energies.append(r[1])\n forces = np.array(forces)\n energies = np.array(energies)\n\n return forces, energies\n","sub_path":"src/training_set.py","file_name":"training_set.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"83396077","text":"from core.check_export_sbg.service import handler as check_export_handler\n# from core.check_export_sbg.service import get_inputfile_accession\nimport pytest\nfrom ..conftest import valid_env\nimport json\nfrom core import ff_utils\n\n\n@pytest.fixture\ndef fastqc_payload(): # pylint: disable=fixme\n return {\"ff_meta\":\n {\"run_platform\": \"SBG\",\n \"uuid\": \"d8e1b6a8-71d4-46e8-93cb-e07416252ca5\",\n \"parameters\": [],\n \"workflow\": \"2324ad76-ff37-4157-8bcc-3ce72b7dace9\",\n \"title\": \"fastqc-0-11-4-1 run 2017-02-16 02:49:29.566538\",\n \"sbg_import_ids\": [\"pAqEn5E2N5zL0X32gaRwogs8nrZ9LGJg\"],\n \"award\": \"1U01CA200059-01\",\n \"sbg_task_id\": \"\",\n \"lab\": \"4dn-dcic-lab\",\n \"sbg_mounted_volume_ids\": [\"4dn_s30zdfj3xs\",\n \"4dn_s3ijocpds2\"],\n \"run_status\": \"output_files_transferring\",\n \"input_files\": [{\"workflow_argument_name\": \"input_fastq\",\n \"value\": \"0048955c-7cb6-4e56-a4d8-56fad52f688c\"}],\n \"sbg_export_ids\": [],\n \"output_files\": [{\"workflow_argument_name\": \"report_zip\",\n \"export_id\": \"Z3X8ylki1QIKleYiccGu8V7ethxPBSfm\",\n \"value\": \"f3716210-0593-498a-bc66-c9d883bd3171\",\n \"upload_key\": \"d8e1b6a8-71d4-46e8-93cb-e07416252ca5/_1_4DNFIW7Q5UDL_fastqc.zip\"}]},\n \"workflow\": {\"import_id_list\": [\"pAqEn5E2N5zL0X32gaRwogs8nrZ9LGJg\"],\n \"app_name\": \"fastqc-0-11-4-1\",\n \"task_id\": \"06121cfb-39a8-47a1-a0bf-852a9053cec0\",\n \"task_input\": {\"project\": \"4dn-dcic/dev\",\n \"inputs\": {\"input_fastq\": {\"path\": \"58a5133ae4b0bd9c28204633\",\n \"class\": \"File\",\n \"name\": \"4DNFIW7Q5UDL.fastq.gz\"}},\n \"app\": \"4dn-dcic/dev/fastqc-0-11-4-1\",\n \"name\": \"fastqc-0-11-4-1\"},\n \"volume_list\": [{\"id\": \"4dn-labor/4dn_s30zdfj3xs\",\n \"name\": \"4dn_s30zdfj3xs\"},\n {\"id\": \"4dn-labor/4dn_s3ijocpds2\",\n \"name\": \"4dn_s3ijocpds2\"}],\n \"output_volume_id\": \"4dn-labor/4dn_s3ijocpds2\",\n \"export_report\": [{\"workflow_argument_name\": \"report_zip\",\n \"export_id\": \"Z3X8ylki1QIKleYiccGu8V7ethxPBSfm\",\n \"value\": \"f3716210-0593-498a-bc66-c9d883bd3171\",\n \"upload_key\":\n \"d8e1b6a8-71d4-46e8-93cb-e07416252ca5/_1_4DNFIW7Q5UDL_fastqc.zip\"}],\n # pylint: disable=fixme, line-too-long\n \"project_id\": \"4dn-dcic/dev\",\n \"export_id_list\": [\"Z3X8ylki1QIKleYiccGu8V7ethxPBSfm\",\n \"Z3X8ylki1QIKleYiccGu8V7ethxPBSfm\"]}} # pylint: disable=fixme\n\n\n@valid_env\n@pytest.mark.webtest\ndef test_check_export_fastqc_e2e(fastqc_payload, ff_keys, tibanna_env):\n # lets make sure we have a valid fastqc file\n fastqs = ff_utils.get_metadata(\"/search/?type=FileFastq&limit=1\", ff_keys)['@graph'][0]\n fastqc_payload['ff_meta']\n\n filename = fastqs['upload_key'].split('/')[1]\n fastqc_payload['workflow']['task_input']['inputs']['input_fastq']['name'] = filename\n fastqc_payload['workflow']['export_report'][0]['value'] = fastqs['uuid']\n fastqc_payload['ff_meta']['output_files'][0]['value'] = fastqs['uuid']\n\n try:\n fastqc_payload.update(tibanna_env)\n ret = check_export_handler(fastqc_payload, None)\n except Exception as e:\n if \"409\" in e:\n # duplicate UUID, just ignore that\n return\n if \"NoSuchKey\" in e.message:\n pytest.skip(\"file not on s3 ignorring test\")\n return\n raise e\n ret = check_export_handler(fastqc_payload, None)\n assert json.dumps(ret)\n assert ret['workflow']\n\n # sbg = sbg_utils.create_sbg_workflow(**ret['workflow'])\n # accession = get_inputfile_accession(sbg, input_file_name='input_fastq')\n # original_file = ff_utils.get_metadata(accession, ff_keys)\n\n\n@valid_env\n@pytest.mark.webtest\ndef test_check_export_sbg_e2e(check_export_event_data, tibanna_env):\n try:\n check_export_event_data.update(tibanna_env)\n ret = check_export_handler(check_export_event_data, None)\n except KeyError as key_e:\n pytest.skip('Data issue, skipping test: %s' % key_e)\n except Exception as e:\n if type(e) is AssertionError:\n # data issue I think\n return\n elif \"409\" in e:\n # duplicate UUID, just ignore that\n return\n raise e\n assert json.dumps(ret)\n assert ret['workflow']\n # assert ret['ff_meta']['output_files']\n # assert ret['ff_meta']['sbg_export_ids']\n","sub_path":"tests/core/check_export_sbg/test_service.py","file_name":"test_service.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"134839049","text":"#! /usr/bin/env python\nimport rospy\n\nfrom std_msgs.msg import Header\nfrom darknet_ros_msgs.msg import BoundingBoxes\nfrom butia_vision_msgs.msg import Description, Recognitions\nfrom butia_vision_msgs.srv import ListClasses, ListClassesResponse\n\nclass YoloRecognition():\n def __init__(self):\n self.readParameters()\n\n self.bounding_boxes_sub = rospy.Subscriber(self.bounding_boxes_topic, BoundingBoxes, self.yoloRecognitionCallback, queue_size=self.bounding_boxes_qs)\n \n self.recognized_objects_pub = rospy.Publisher(self.object_recognition_topic, Recognitions, queue_size=self.object_recognition_qs)\n self.recognized_people_pub = rospy.Publisher(self.people_detection_topic, Recognitions, queue_size=self.people_detection_qs)\n self.object_list_updated_pub = rospy.Publisher(self.object_list_updated_topic, Header, queue_size=self.object_list_updated_qs)\n\n self.list_objects_server = rospy.Service(self.list_objects_service, ListClasses, self.getObjectList)\n\n self.object_list_updated_header = Header()\n self.object_list_updated_header.stamp = rospy.get_rostime().now\n self.object_list_updated_header.frame_id = \"objects_list\"\n\n self.object_list_updated_pub.publish(self.object_list_updated_header)\n \n def getObjectList(self, req):\n objects_list = []\n for key, value in self.possible_classes.items():\n for elem in value:\n objects_list.append(key + '/' + elem)\n return ListClassesResponse(objects_list)\n\n def yoloRecognitionCallback(self, bbs):\n rospy.loginfo('Image ID: ' + str(bbs.image_header.seq))\n\n bbs_l = bbs.bounding_boxes\n\n objects = []\n people = []\n\n for bb in bbs_l:\n if bb.Class in self.possible_classes['people'] and bb.probability >= self.threshold:\n person = Description()\n person.label_class = 'people' + '/' + bb.Class\n person.probability = bb.probability\n person.bounding_box.minX = bb.xmin\n person.bounding_box.minY = bb.ymin\n person.bounding_box.width = bb.xmax - bb.xmin\n person.bounding_box.height = bb.ymax - bb.ymin\n people.append(person)\n\n elif bb.Class in [val for sublist in self.possible_classes.values() for val in sublist] and bb.probability >= self.threshold:\n object_d = Description()\n index = 0\n i = 0\n for value in self.possible_classes.values():\n if bb.Class in value:\n index = i\n i += 1\n object_d.label_class = self.possible_classes.keys()[index] + '/' + bb.Class\n object_d.probability = bb.probability\n object_d.bounding_box.minX = bb.xmin\n object_d.bounding_box.minY = bb.ymin\n object_d.bounding_box.width = bb.xmax - bb.xmin\n object_d.bounding_box.height = bb.ymax - bb.ymin\n objects.append(object_d)\n\n objects_msg = Recognitions()\n people_msg = Recognitions()\n\n if len(objects) > 0:\n objects_msg.header = bbs.header\n objects_msg.image_header = bbs.image_header\n objects_msg.descriptions = objects\n self.recognized_objects_pub.publish(objects_msg)\n\n if len(people) > 0:\n people_msg.header = bbs.header\n people_msg.image_header = bbs.image_header\n people_msg.descriptions = people\n self.recognized_people_pub.publish(people_msg)\n \n\n\n def readParameters(self):\n self.bounding_boxes_topic = rospy.get_param(\"/object_recognition/subscribers/bounding_boxes/topic\", \"/darknet_ros/bounding_boxes\")\n self.bounding_boxes_qs = rospy.get_param(\"/object_recognition/subscribers/bounding_boxes/queue_size\", 1)\n\n self.object_recognition_topic = rospy.get_param(\"/object_recognition/publishers/object_recognition/topic\", \"/butia_vision/or/object_recognition\")\n self.object_recognition_qs = rospy.get_param(\"/object_recognition/publishers/object_recognition/queue_size\", 1)\n\n self.people_detection_topic = rospy.get_param(\"/object_recognition/publishers/people_detection/topic\", \"/butia_vision/or/people_detection\")\n self.people_detection_qs = rospy.get_param(\"/object_recognition/publishers/people_detection/queue_size\", 1)\n\n self.object_list_updated_topic = rospy.get_param(\"/object_recognition/publishers/object_list_updated/topic\", \"/butia_vision/or/object_list_updated\")\n self.object_list_updated_qs = rospy.get_param(\"/object_recognition/publishers/object_list_updated/queue_size\", 1)\n\n self.list_objects_service = rospy.get_param(\"/object_recognition/servers/list_objects/service\", \"/butia_vision/or/list_objects\")\n\n self.threshold = rospy.get_param(\"/object_recognition/threshold\", 0.5)\n \n self.possible_classes = dict(rospy.get_param(\"/object_recognition/possible_classes\"))","sub_path":"object_recognition/scripts/yolo_recognition/yolo_recognition.py","file_name":"yolo_recognition.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"354971997","text":"from __future__ import unicode_literals\n#encoding:utf8\nimport pandas as pd\nfrom pymongo import MongoClient\n\ndb = MongoClient('localhost:27017')['tfm']\nnames = [\"movie_id\" , \"movie_title\" , \"release_date\" , \"video_release_date\" ,\n \"imdb_url\" , \"unknown\" , \"action\" , \"adventure\" , \"animation\" ,\n \"children's\" , \"comedy\" , \"crime\" , \"documentary\" , \"drama\" , \"fantasy\" ,\n \"film-Noir\" , \"horror\" , \"musical\" , \"mystery\" , \"romance\" , \"sci_fi\" ,\n \"thriller\" , \"war\" , \"western\" ]\ndf = pd.read_csv('../ml-100k/u.item', sep='|', names=names)\n\nfor index,row in df.iterrows():\n document = dict()\n for field, value in row.iteritems():\n document[field] = value if not isinstance(value, basestring) else value.decode('utf8', errors='ignore')\n db.movies.insert_one(document)\n\ndf2 = pd.read_csv('../ml-100k/u.data', sep='\\t', names=['user_id', 'item_id', 'rating', 'timestamp'])\n\nfor index,row in df2.iterrows():\n document = dict()\n for field, value in row.iteritems():\n document[field] = value if not isinstance(value, basestring) else value.decode('utf8', errors='ignore')\n db.ratings.insert_one(document)\n","sub_path":"import_data.py","file_name":"import_data.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"64186037","text":"import psycopg2\r\nimport uuid\r\nfrom zipfile import ZipFile\r\nimport pathlib\r\n\r\nconn = psycopg2.connect(dbname=\"gxgeo\", user=\"gxgeo\", password=\"asdfasdf\", host='192.168.99.100', port='15432')\r\n\r\ncur = conn.cursor()\r\n\r\npak_ids = (\r\n '23ecdbb1b5efa4166f0895de0a36060915c3f9527e55fd8b64c0d1038574f1d3',\r\n 'cec20703efc542420d43531ee2985cc5606225c6c0d1c082bf88d2bbb4253555',\r\n '16080eb5405e05b3b18d5d16e4f6cce80e6c01ee32e83bc47d85bcf6486b1aad'\r\n)\r\n\r\ncur.execute(\"create temp table t_doc_pack as select * from doc_packs where uuid_v4 in %s;\", (pak_ids,))\r\ncur.execute(\"create temp table t_docs as select * from docs where doc_pack in %s;\", (pak_ids,))\r\n\r\n\r\n\r\ncur.execute('select uuid_v4, uri from t_docs where doc_pack in %s;', (pak_ids,))\r\n\r\nnew_docs = []\r\nfileset = set()\r\nfor row in cur:\r\n new_id = str(uuid.uuid4())\r\n fileset.add(row[1])\r\n new_uri = '.'.join([*row[1].split('.')[:-1], 'zip'] )\r\n new_docs.append((new_id, new_uri, 'zip', row[0]) )\r\n\r\nfor doc in new_docs:\r\n cur.execute('update t_docs set uuid_v4 = %s, uri=%s, file_type=%s where uuid_v4=%s', doc)\r\n\r\nnew_keeper = str(uuid.uuid4())\r\n\r\nfor pak_id in pak_ids:\r\n new_id = str(uuid.uuid4())\r\n cur.execute('update t_doc_pack set uuid_v4 = %s , doc_keeper = %s where uuid_v4=%s', (new_id, new_keeper, pak_id,) )\r\n cur.execute('update t_docs set doc_pack = %s , doc_keeper = %s where doc_pack=%s', (new_id, new_keeper, pak_id,) )\r\ncur.execute('insert into doc_dir select doc_keeper, archive_type, specialty from t_doc_pack;')\r\ncur.execute('insert into pqx_syskey (kid, kparent, kname) values (%s, %s, %s)', (new_keeper, 'b1aa1890-5c1c-48c7-ac06-e526891da829', '新测试单位'))\r\n\r\n\r\ncur.execute('select uuid_v4, uri, file_type from t_docs;')\r\nfor row in cur:\r\n print(row)\r\n\r\nrepo_path = '../docker/data/REPOSITORY/sample_repo'\r\n\r\n# for filename in fileset:\r\n# source = pathlib.Path(repo_path, filename)\r\n# dest = pathlib.Path(source.stem + '.zip')\r\n# with ZipFile(str(dest), 'w') as myzip:\r\n# myzip.write(str(source))\r\n\r\n\r\ncur.execute('insert into doc_packs select * from t_doc_pack')\r\ncur.execute('insert into docs select * from t_docs')\r\n\r\nconn.commit()\r\n\r\nconn.close()\r\n","sub_path":"python_services/gis/gen_test_data.py","file_name":"gen_test_data.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"632433453","text":"from keras.datasets import mnist\nfrom keras import models\nfrom keras import layers\nfrom keras.utils import to_categorical\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n(iTrain, lTrain), (iTest, lTest) = mnist.load_data()\n\n#reshapes the training values so that they will fit easier into the network, 1D array of pixel values between 0 and 1\niTrain = iTrain.reshape((60000, 28, 28, 1))\niTrain = iTrain.astype('float32') / 255\n\niTest = iTest.reshape((10000, 28, 28, 1))\niTest = iTest.astype('float32') / 255\n\nlTrain = to_categorical(lTrain)\nlTest = to_categorical(lTest)\n\n#Sets up the convolutional network, input shape shrinks as it goes in\nmodel = models.Sequential()\nmodel.add(layers.Conv2D(32,(3,3), activation='relu', input_shape=(28,28,1)))\nmodel.add(layers.MaxPooling2D((2,2)))\nmodel.add(layers.Conv2D(64,(3,3), activation='relu'))\nmodel.add(layers.MaxPooling2D((2,2)))\nmodel.add(layers.Conv2D(64,(3,3), activation='relu'))\n\n#Adds a classifier of densely connected networks that uses vectors (similar to earlier projects)\n#first must flatten output of last 3d layer\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(64, activation='relu'))\nmodel.add(layers.Dense(10, activation='softmax'))\n\n\nmodel.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\nmodel.fit(iTrain, lTrain, epochs=7, batch_size=64)\n\ntLoss, tAcc = model.evaluate(iTest, lTest)\n","sub_path":"NumberAnalysis.py","file_name":"NumberAnalysis.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"46129302","text":"#!/usr/bin/env python3\nimport sys\nimport os\nfrom virtualenvapi.manage import VirtualEnvironment\nimport logging\nimport strings\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\nclass DjangoProject(object):\n def __init__(self, project_name, default_app_name=None,\n overwrite_root_dir=None):\n self.overwrite_root_dir = overwrite_root_dir\n self.project_name = self.normalize_project_name(project_name)\n self.default_app_name = default_app_name\n\n if not default_app_name:\n self.default_app_name = self.project_name + \"_app\"\n\n self.parent_dir = os.getcwd()\n self.root_server_directory = os.path.join(self.parent_dir,\n self.project_name)\n\n self.venv_path = os.path.join(self.root_server_directory,\n \"venv\")\n\n def create(self):\n \"\"\"\n \"\"\"\n try:\n self.create_root_server_directory()\n self.create_virtualenv()\n self.start_project()\n self.create_apps_dir()\n self.activate_venv()\n self.create_default_app()\n\n except Exception as exception:\n logger.error(exception)\n exit(1)\n\n def create_root_server_directory(self):\n if os.path.isdir(self.root_server_directory):\n if not self.overwrite_root_dir:\n logger.error(strings.ROOT_DIR_ALREADY_EXISTS_NO_OVERWRITE)\n exit(1)\n else:\n logger.info(strings.ROOT_DIR_ALREADY_EXISTS_OVERWRITE)\n try:\n os.makedirs(self.root_server_directory)\n except OSError as error:\n logger.error(strings.ROOT_DIR_CREATION_ERROR + \": ({0})\"\n .format(error))\n exit(1)\n else:\n logger.info(strings.ROOT_DIR_CREATION_OK)\n\n def create_virtualenv(self):\n \"\"\"\n \"\"\"\n venv = VirtualEnvironment(self.venv_path, python=\"python3\")\n venv.open_or_create()\n\n # TODO: Move all workarounds to the \"too long pathname\" bug to specific\n # functions\n # os.rename(\"venv/bin/pip3\", \"venv/bin/pip3-old\")\n # os.remove(\"venv/bin/pip\")\n # python_latest_filename = \"\"\n # for file in os.listdir('venv/bin'):\n # if file.startswith(\"pip3.\"):\n # python_latest_filename = file\n # os.remove(\"venv/bin/\" + file)\n\n # pip_fix_shell_script = (\"#!/bin/bash\" + \"\\n\" +\n # os.path.abspath('.') + \"/venv/bin/python \" +\n # os.path.abspath('.') + \"/venv/bin/pip3-old $@\")\n\n # pip_fix_shell_script = (\"#!/bin/bash\" + \"\\n\" +\n # \"./python pip3-old $@\")\n\n # with open(\"venv/bin/pip\", \"w\") as fd:\n # fd.write(pip_fix_shell_script)\n\n # os.chmod(\"venv/bin/pip\", 0o775)\n\n # os.symlink(\"pip\", \"venv/bin/pip3\")\n # os.symlink(\"pip\", \"venv/bin/\" + python_latest_filename)\n\n venv.install(\"django\")\n\n # TODO: use the mkpppp3 fix for the ptipython prompt-toolkit bug.\n\n def start_project(self):\n python_path = os.path.join(self.venv_path, \"bin/python3\")\n django_admin_path = os.path.join(self.venv_path, \"bin/django-admin.py\")\n os.system(\"{0} {1} startproject {2} {2}\".format(python_path,\n django_admin_path,\n self.project_name))\n\n def activate_venv(self):\n activate_path = os.path.join(self.venv_path, \"bin/activate\")\n os.system(\". {0}\".format(activate_path))\n\n def create_default_app(self):\n python_path = os.path.join(self.venv_path, \"bin/python3\")\n django_admin_path = os.path.join(self.venv_path, \"bin/django-admin.py\")\n apps_path = os.path.join(self.root_server_directory,\n \"apps\",\n self.default_app_name)\n os.makedirs(apps_path)\n os.system(\"{0} {1} startapp {2} {3}\".format(python_path,\n django_admin_path,\n self.default_app_name,\n apps_path))\n\n self.update_root_settings()\n self.update_root_urls()\n self.create_app_urls()\n self.update_app_views()\n self.create_app_templates()\n\n def create_apps_dir(self):\n apps_path = os.path.join(self.root_server_directory, \"apps\")\n os.makedirs(apps_path)\n open(os.path.join(apps_path, \"__init__.py\"), 'a').close()\n\n def update_root_settings(self):\n root_settings_path = os.path.join(self.root_server_directory,\n self.project_name,\n \"settings.py\") \n input_file = open(root_settings_path, 'r').readlines()\n write_file = open(root_settings_path, 'w')\n for line in input_file:\n write_file.write(line)\n if 'INSTALLED_APPS = [' in line:\n new_line = \" 'apps.{}',\".format(self.default_app_name)\n write_file.write(new_line + \"\\n\")\n write_file.close()\n\n def update_root_urls(self):\n root_urls_path = os.path.join(self.root_server_directory,\n self.project_name,\n \"urls.py\")\n input_file = open(root_urls_path, 'r').readlines()\n write_file = open(root_urls_path, 'w')\n for line in input_file:\n write_file.write(line)\n if 'from django.urls import path' in line:\n new_line = (\"from django.urls import include\" + '\\n' +\n \"from django.conf.urls import url\")\n write_file.write(new_line + \"\\n\")\n\n elif 'urlpatterns = [' in line:\n new_line = (\" url(r'^', include('apps.{}.urls')),\"\n .format(self.default_app_name))\n write_file.write(new_line + \"\\n\")\n write_file.close()\n\n def create_app_urls(self):\n app_urls_path = os.path.join(self.root_server_directory,\n \"apps\",\n self.default_app_name,\n \"urls.py\")\n \n app_urls_txt = (\"from django.conf.urls import url\" + '\\n' +\n \"from . import views\" + '\\n' + '\\n' +\n \"urlpatterns = [\" + '\\n' +\n \" url(r'^$', views.index)]\")\n\n with open(app_urls_path, \"w\") as fd:\n fd.write(app_urls_txt)\n\n def update_app_views(self):\n app_views_path = os.path.join(self.root_server_directory,\n \"apps\",\n self.default_app_name,\n \"views.py\")\n \n input_file = open(app_views_path, 'r').readlines()\n write_file = open(app_views_path, 'w')\n for line in input_file:\n write_file.write(line)\n if 'from django.shortcuts import render' in line:\n\n new_line = (\"def index(request):\\n\" +\n \" context = {'email': 'john@nowhere.net',\\n\" +\n \" 'name': 'John'}\\n\" +\n \" return render(request, '{}/index.html',\\n\".format(self.default_app_name) + # noqa\n \" context)\")\n\n write_file.write(new_line + \"\\n\")\n\n elif 'urlpatterns = [' in line:\n new_line = (\" url(r'^', include('apps.{}.urls')),\"\n .format(self.default_app_name))\n write_file.write(new_line + \"\\n\")\n write_file.close()\n\n def create_app_templates(self):\n app_templates_path = os.path.join(self.root_server_directory,\n \"apps\",\n self.default_app_name,\n \"templates\",\n self.default_app_name)\n\n os.makedirs(app_templates_path)\n\n index_template = (\"\\n\\n\\n\" +\n \"Index\\n\" +\n \"\\n\\n

{{name}}

\\n\" +\n \"

{{email}}

\\n\")\n\n with open(os.path.join(app_templates_path, \"index.html\"), \"w\") as fd:\n fd.write(index_template)\n\n app_static_path = os.path.join(self.root_server_directory,\n \"apps\",\n self.default_app_name,\n \"static\",\n self.default_app_name)\n\n os.makedirs(app_static_path)\n\n def normalize_project_name(self, project_name: str) -> str:\n invalid_chars = ['.', '-', ' ']\n for invalid_char in invalid_chars:\n if invalid_char in project_name:\n logger.info(\"Replacing invalid char ({0}) in project name.\"\n .format(invalid_char))\n project_name = project_name.replace(invalid_char, '_')\n return project_name\n\n\ndef main():\n if len(sys.argv) != 2:\n print(\"Usage: {0} \".format(sys.argv[0]))\n print(\"Example: {0} my-project\".format(sys.argv[0]))\n exit(1)\n\n project = DjangoProject(sys.argv[1])\n project.create()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"django_project_starter.py","file_name":"django_project_starter.py","file_ext":"py","file_size_in_byte":9716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"503196411","text":"# ---\n# jupyter:\n# jupytext:\n# formats: py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.11.2\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\nimport pandas as pd\nimport numpy as np\nimport os\n\nfrom dash.exceptions import PreventUpdate\n\nfrom Historic_Crypto import HistoricalData\n\nimport os\nimport pandas as pd\n\naudiolizer_temp_dir = os.environ.get('AUDIOLIZER_TEMP', './history/')\nprint('audiolizer temp data:', audiolizer_temp_dir)\n\ngranularity = 300 # seconds\n\nstart_date = pd.to_datetime('2021-01-01-00-00')\nticker = 'BTC-USD'\n\n\n# +\ndef get_history(ticker, start_date, end_date = None, granularity=granularity):\n start_date = pd.to_datetime(start_date).tz_localize(None)\n \n today = pd.Timestamp.now().tz_localize(None)\n if end_date is None:\n end_date = today\n else:\n end_date = min(today, pd.to_datetime(end_date).tz_localize(None))\n \n fnames = []\n for int_ in pd.interval_range(start_date,\n end_date):\n fname = audiolizer_temp_dir + '/{}-{}.csv'.format(\n ticker, int_.left.strftime('%Y-%m-%d'))\n\n if not os.path.exists(fname):\n int_df = HistoricalData(ticker,\n granularity,\n int_.left.strftime('%Y-%m-%d-%H-%M'),\n int_.right.strftime('%Y-%m-%d-%H-%M'),\n ).retrieve_data()\n int_df.to_csv(fname)\n fnames.append(fname)\n return pd.concat(map(lambda file: pd.read_csv(file,index_col='time', parse_dates=True),\n fnames)).drop_duplicates()\n\n\nnew = get_history(ticker, start_date)\nnew\n\n# +\nimport audiogen_p3\nimport itertools\nimport sys\n\ndef beeper(freq, amplitude = 1):\n return (amplitude*_ for _ in audiogen_p3.beep(freq))\n\ntry:\n audiogen_p3.sampler.play(itertools.chain(\n *[beeper(100*(1+i), 1./(1+i)) for i in range(8)]\n ))\nexcept:\n print('could not play sampler')\n pass\n# -\n\nimport dash_html_components as html\n\nfrom psidash.psidash import load_app\n\nimport dash_core_components as dcc\n\nfrom datetime import datetime\n\nimport plotly.graph_objs as go\n\n\n# +\ndef refactor(df, frequency = '1W'):\n low = df.low.groupby(pd.Grouper(freq=frequency)).min()\n \n high = df.high.groupby(pd.Grouper(freq=frequency)).max()\n \n close = df.close.groupby(pd.Grouper(freq=frequency)).last()\n \n open_ = df.open.groupby(pd.Grouper(freq=frequency)).first()\n \n volume = df.volume.groupby(pd.Grouper(freq=frequency)).sum()\n \n return pd.DataFrame(dict(low=low, high=high, open=open_, close=close, volume=volume))\n\ndef candlestick_plot(df): \n return go.Figure(data=[\n go.Bar(\n x=df.index,\n y=df.volume,\n marker_color='rgba(158,202,225,.5)',\n yaxis='y2'),\n go.Candlestick(\n x=df.index,\n open=df.open,\n high=df.high,\n low=df.low,\n close=df.close),\n ],\n \n layout=dict(yaxis=dict(title='BTC price [USD]'),\n yaxis2=dict(\n title='BTC volume [BTC]',\n overlaying = 'y',\n side='right'),\n dragmode='select',\n ))\n\n\n\n# -\n\nfrom psidash.psidash import get_callbacks, load_conf, load_dash, load_components\n\n# +\nA4 = 440 # tuning\nC0 = A4*pow(2, -4.75)\n\nfrequencies = dict(\n# A4 = A4,\n# C0 = C0,\n A0 = 27.5,\n C2 = 65.40639,\n C3 = 130.8128,\n C4 = 262,\n C5 = 523.2511,\n C6 = 1046.502,\n C7 = 2093.005,\n C8 = 4186, # high C on piano\n)\n# -\n\nfrequency_marks = {np.log10(v): k for k,v in frequencies.items()}\nfrequency_marks\n\n# +\nfrom math import log2\n\ndef pitch(freq):\n \"\"\"convert from frequency to pitch\n \n Borrowed from John D. Cook https://www.johndcook.com/blog/2016/02/10/musical-pitch-notation/\n \"\"\"\n name = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\n h = round(12*log2(freq/C0))\n octave = h // 12\n n = h % 12\n return name[n] + str(octave)\n\ndef freq(note, A4=A4):\n \"\"\" convert from pitch to frequency\n \n based on https://gist.github.com/CGrassin/26a1fdf4fc5de788da9b376ff717516e\n \"\"\"\n notes = ['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#']\n\n octave = int(note[2]) if len(note) == 3 else int(note[1])\n \n keyNumber = notes.index(note[0:-1]);\n \n if (keyNumber < 3) :\n keyNumber = keyNumber + 12 + ((octave - 1) * 12) + 1; \n else:\n keyNumber = keyNumber + ((octave - 1) * 12) + 1; \n\n return A4 * 2** ((keyNumber- 49) / 12)\n\n\n# -\n\ndef get_beats(start, end, freq):\n return len(pd.date_range(pd.to_datetime(start),\n pd.to_datetime(end) + pd.Timedelta(freq),\n freq=freq, closed=None,\n ))\n\n\n# +\ndef merge_pitches(beeps, amp_min):\n merged = []\n last_freq = 0\n last_amp = 0\n for freq, amp, dur in beeps:\n if freq == last_freq:\n if merged[-1][1] < amp_min:\n merged[-1][1] = (amp + last_amp)/2 # todo: use moving average\n merged[-1][2] += dur\n continue\n merged.append([freq, amp, dur])\n last_freq = freq\n last_amp = amp\n return merged\n\ndef quiet(beeps, min_amp):\n silenced = []\n for freq, amp, dur in beeps:\n if amp < min_amp:\n amp = 0\n silenced.append((freq, amp, dur))\n return silenced \n\n\n# +\nconf = load_conf('../audiolizer.yaml')\napp = load_dash(__name__, conf['app'], conf.get('import'))\napp.layout = load_components(conf['layout'], conf.get('import'))\n\nif 'callbacks' in conf:\n callbacks = get_callbacks(app, conf['callbacks'])\n\n\ndef update_graph(start, end, frequency):\n start = pd.to_datetime(start)\n end = pd.to_datetime(end)\n \n new_ = new[start:end]\n return candlestick_plot(refactor(new_, frequency))\n\ndef beeper(freq, amplitude=1, duration=.25):\n return (amplitude*_ for _ in audiogen_p3.beep(freq, duration))\n\ndef get_frequency(price, min_price, max_price, log_frequency_range):\n return np.interp(price, [min_price, max_price], [10**_ for _ in log_frequency_range])\n\n@callbacks.slider_marks\ndef update_marks(url):\n return frequency_marks\n\n@callbacks.play\ndef play(start, end, cadence, log_freq_range, mode, drop_quantile, beat_quantile, tempo, toggle_merge, silence, selectedData):\n\n new = get_history(ticker, start, end)\n start_, end_ = new.index[[0, -1]]\n\n if toggle_merge:\n merged = 'merged'\n else:\n merged = ''\n if silence:\n silences = 'rests'\n else:\n silences = ''\n \n fname = 'BTC_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}.wav'.format(\n start_.date(),\n end_.date(),\n cadence,\n *['{}'.format(pitch(10**_).replace('#','sharp')) for _ in log_freq_range],\n mode,\n drop_quantile,\n beat_quantile,\n '{}bpm'.format(tempo),\n merged,\n silences,\n )\n \n duration = 60./tempo # length of the beat in seconds (tempo in beats per minute)\n \n play_time=''\n \n if selectedData is not None:\n start_select, end_select = selectedData['range']['x']\n # need the number of beats from beginning to start_select\n start_time = duration*(get_beats(start_, start_select, cadence)-1)\n # number of beats from beginning to end_select\n end_time = duration*(get_beats(start_, end_select, cadence)-1)\n total_time = duration*(get_beats(start_, end_, cadence)-1)\n# print('selected start, end time, total time:', start_time, end_time, total_time)\n play_time='#t={},{}'.format(start_time, end_time)\n# print(start_select, end_select, play_time)\n \n new_ = refactor(new[start_:end_], cadence)\n \n if os.path.exists(fname):\n return candlestick_plot(new_), app.get_asset_url(fname)+play_time\n \n \n \n# assert get_beats(*new_.index[[0,-1]], cadence) == len(new_)\n \n max_vol = new_.volume.max() # normalizes peak amplitude\n min_close = new_.close.min() # sets lower frequency bound\n max_close = new_.close.max() # sets upper frequency bound\n \n amp_min = beat_quantile/100 # threshold amplitude to merge beats\n min_vol = new_.volume.quantile(drop_quantile/100)\n \n if mode == 'tone':\n beeps = [(get_frequency(close_, min_close, max_close, log_freq_range),\n volume_/max_vol,\n duration) for close_, volume_ in new_[['close', 'volume']].values]\n \n elif mode == 'pitch':\n beeps = [(freq(pitch(get_frequency(close_, min_close, max_close, log_freq_range))),\n volume_/max_vol,\n duration) for close_, volume_ in new_[['close', 'volume']].values]\n if toggle_merge:\n beeps = merge_pitches(beeps, amp_min)\n if silence:\n beeps = quiet(beeps, min_vol/max_vol)\n \n# print(mode, 'unique frequencies:', len(np.unique([_[0] for _ in beeps])))\n \n audio = [beeper(*beep) for beep in beeps]\n \n with open('assets/'+fname, \"wb\") as f:\n audiogen_p3.sampler.write_wav(f, itertools.chain(*audio))\n \n return candlestick_plot(new_), app.get_asset_url(fname)+play_time\n \n\nif __name__ == '__main__':\n app.run_server(host='0.0.0.0', port=8051, mode='external', debug=True, dev_tools_hot_reload=False)\n# -\n\n","sub_path":"audiolizer/audiolizer.py","file_name":"audiolizer.py","file_ext":"py","file_size_in_byte":9587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"227501509","text":"from collections import defaultdict\nimport multiprocessing\nimport os\nimport random\nimport sqlite3\nimport uuid\n\nlogger = multiprocessing.get_logger()\n\n# FIXME these are hardcoded in some SQL statements below. SQLite does not\n# seem to have the concept of variables...\n\n# Status\nINITIALIZED = 0\nASSIGNED = 1\nSUCCESSFUL = 2\nFAILED = 3\nABORTED = 4\nPUBLISHED = 6\nMERGING = 7\nMERGED = 8\n\n# Job type\nPROCESS = 0\nMERGE = 1\n\nclass JobitStore:\n def __init__(self, config):\n self.uuid = str(uuid.uuid4()).replace('-', '')\n self.config = config\n self.db_path = os.path.join(config['workdir'], \"lobster.db\")\n self.db = sqlite3.connect(self.db_path)\n\n # Use four databases: one for jobits, jobs, hosts, datasets each\n self.db.execute(\"\"\"create table if not exists datasets(\n id integer primary key autoincrement,\n dataset text,\n label text,\n path text,\n release text,\n global_tag text,\n publish_label text,\n pset_hash text default null,\n cfg text,\n uuid text,\n jobsize text,\n file_based int,\n empty_source int,\n jobits integer,\n masked_lumis int default 0,\n jobits_running int default 0,\n jobits_done int default 0,\n jobits_left int default 0,\n events int default 0,\n merged int default 0)\"\"\")\n self.db.execute(\"\"\"create table if not exists jobs(\n id integer primary key autoincrement,\n job int,\n type int,\n host text,\n dataset int,\n published_file_block text,\n status int default 0,\n exit_code int,\n submissions int default 0,\n jobits int default 0,\n jobits_processed int default 0,\n events_read int default 0,\n events_written int default 0,\n time_submit int,\n time_transfer_in_start int,\n time_transfer_in_end int,\n time_wrapper_start int,\n time_wrapper_ready int,\n time_stage_in_end int,\n time_prologue_end int,\n time_file_requested int,\n time_file_opened int,\n time_file_processing int,\n time_processing_end int,\n time_epilogue_end int,\n time_stage_out_end int,\n time_transfer_out_start int,\n time_transfer_out_end int,\n time_retrieved int,\n time_on_worker int,\n time_total_on_worker int,\n time_cpu int,\n cache_start_size int,\n cache_end_size int,\n cache int,\n bytes_received int,\n bytes_sent int,\n bytes_output int default 0,\n bytes_bare_output int default 0,\n foreign key(dataset) references datasets(id))\"\"\")\n\n self.db.commit()\n\n try:\n cur = self.db.execute(\"select max(id) from jobs\")\n count = int(cur.fetchone()[0])\n except:\n pass\n\n def disconnect(self):\n self.db.close()\n\n def register(self, dataset_cfg, dataset_info, filemap):\n label = dataset_cfg['label']\n unique_args = dataset_cfg.get('unique parameters', [None])\n\n cur = self.db.cursor()\n cur.execute(\"\"\"insert into datasets\n (dataset,\n label,\n path,\n release,\n global_tag,\n publish_label,\n cfg,\n uuid,\n file_based,\n empty_source,\n jobsize,\n jobits,\n masked_lumis,\n jobits_left,\n events)\n values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\"\", (\n dataset_cfg.get('dataset', label),\n label,\n os.path.join(self.config['stageout location'], label),\n os.path.basename(os.environ['LOCALRT']),\n dataset_cfg.get('global tag'),\n dataset_cfg.get('publish label', dataset_cfg['label']).replace('-', '_'), #TODO: more lexical checks #TODO: publish label check\n dataset_cfg.get('cmssw config'),\n self.uuid,\n dataset_info.file_based,\n dataset_info.empty_source,\n dataset_info.jobsize,\n dataset_info.total_lumis * len(unique_args),\n dataset_info.masked_lumis,\n dataset_info.total_lumis * len(unique_args),\n dataset_info.total_events))\n dset_id = cur.lastrowid\n\n self.db.execute(\"\"\"create table if not exists files_{0}(\n id integer primary key autoincrement,\n filename text,\n skipped int default 0,\n jobits int,\n jobits_done int default 0,\n jobits_running int default 0,\n events int,\n events_read int default 0,\n bytes int default 0)\"\"\".format(label))\n\n cur.execute(\"\"\"create table if not exists jobits_{0}(\n id integer primary key autoincrement,\n job integer,\n run integer,\n lumi integer,\n file integer,\n status integer default 0,\n arg text,\n foreign key(job) references jobs(id),\n foreign key(file) references files_{0}(id))\"\"\".format(label))\n\n for file in dataset_info.files:\n file_lumis = len(dataset_info.lumis[file])\n cur.execute(\n \"\"\"insert into files_{0}(jobits, events, filename, bytes) values (?, ?, ?, ?)\"\"\".format(label), (\n file_lumis * len(unique_args),\n dataset_info.event_counts[file],\n filemap(file),\n dataset_info.filesizes[file]))\n file_id = cur.lastrowid\n\n for arg in unique_args:\n columns = [(file_id, run, lumi, arg) for (run, lumi) in dataset_info.lumis[file]]\n self.db.executemany(\"insert into jobits_{0}(file, run, lumi, arg) values (?, ?, ?, ?)\".format(label), columns)\n\n self.db.execute(\"create index if not exists index_filename_{0} on files_{0}(filename)\".format(label))\n self.db.execute(\"create index if not exists index_events_{0} on jobits_{0}(run, lumi)\".format(label))\n self.db.execute(\"create index if not exists index_files_{0} on jobits_{0}(file)\".format(label))\n\n self.db.commit()\n\n def pop_jobits(self, num=1):\n \"\"\"\n Create a predetermined number of jobs. The task these are\n created for is drawn randomly from all unfinished tasks.\n\n Arguments:\n num: the number of jobs to be created (default 1)\n Returns:\n a list containing an id, dataset label, file information (id,\n filename), lumi information (id, file id, run, lumi)\n \"\"\"\n\n rows = [xs for xs in self.db.execute(\"\"\"\n select label, id, jobits - jobits_done - jobits_running, jobsize, empty_source\n from datasets\n where jobits_done + jobits_running < jobits\"\"\")]\n if len(rows) == 0:\n return []\n\n dataset, dataset_id, remaining, jobsize, empty_source = random.choice(rows)\n size = [int(jobsize)] * num\n\n fileinfo = list(self.db.execute(\"\"\"select id, filename\n from files_{0}\n where jobits_done + jobits_running < jobits\n order by skipped asc\"\"\".format(dataset)))\n files = [x for (x, y) in fileinfo]\n fileinfo = dict(fileinfo)\n\n rows = []\n for i in range(0, len(files), 40):\n chunk = files[i:i + 40]\n rows.extend(self.db.execute(\"\"\"\n select id, file, run, lumi, arg\n from jobits_{0}\n where file in ({1}) and status not in (1, 2, 6, 7, 8)\n \"\"\".format(dataset, ', '.join('?' for _ in chunk)), chunk))\n\n # files and lumis for individual jobs\n files = set()\n jobits = []\n\n # lumi veto to avoid duplicated processing\n all_lumis = set()\n\n # job container and current job size\n jobs = []\n current_size = 0\n\n for id, file, run, lumi, arg in rows:\n if (run, lumi) in all_lumis:\n continue\n\n if current_size == 0:\n if len(size) == 0:\n break\n cur = self.db.cursor()\n cur.execute(\"insert into jobs(dataset, status, type) values (?, 1, 0)\", (dataset_id,))\n job_id = cur.lastrowid\n\n if lumi > 0:\n all_lumis.add((run, lumi))\n for (ls_id, ls_file, ls_run, ls_lumi) in self.db.execute(\"\"\"\n select id, file, run, lumi\n from jobits_{0}\n where run=? and lumi=? and status not in (1, 2, 6, 7, 8)\"\"\".format(dataset), (run, lumi)):\n jobits.append((ls_id, ls_file, ls_run, ls_lumi))\n files.add(ls_file)\n else:\n jobits.append((id, file, run, lumi))\n files.add(file)\n\n current_size += 1\n\n if current_size == size[0]:\n jobs.append((\n str(job_id),\n dataset,\n [(id, fileinfo[id]) for id in files],\n jobits,\n arg,\n empty_source,\n False))\n\n files = set()\n jobits = []\n\n current_size = 0\n size.pop(0)\n\n if current_size > 0:\n jobs.append((\n str(job_id),\n dataset,\n [(id, fileinfo[id]) for id in files],\n jobits,\n arg,\n empty_source,\n False))\n\n dataset_update = []\n file_update = defaultdict(int)\n job_update = defaultdict(int)\n jobit_update = []\n\n for (job, label, files, jobits, arg, empty_source, merge) in jobs:\n dataset_update += jobits\n job_update[job] = len(jobits)\n jobit_update += [(job, id) for (id, file, run, lumi) in jobits]\n for (id, filename) in files:\n file_update[id] += len(filter(lambda tpl: tpl[1] == id, jobits))\n\n self.db.execute(\n \"update datasets set jobits_running=(jobits_running + ?) where id=?\",\n (len(dataset_update), dataset_id))\n\n self.db.executemany(\"update files_{0} set jobits_running=(jobits_running + ?) where id=?\".format(dataset),\n [(v, k) for (k, v) in file_update.items()])\n self.db.executemany(\"update jobs set jobits=? where id=?\",\n [(v, k) for (k, v) in job_update.items()])\n self.db.executemany(\"update jobits_{0} set status=1, job=? where id=?\".format(dataset),\n jobit_update)\n\n self.db.commit()\n\n return jobs if len(jobit_update) > 0 else []\n\n def reset_jobits(self):\n with self.db as db:\n ids = [id for (id,) in db.execute(\"select id from jobs where status=1\")]\n db.execute(\"update datasets set jobits_running=0, merged=0\")\n db.execute(\"update jobs set status=4 where status=1\")\n db.execute(\"update jobs set status=2 where status=7\")\n for (label, dset_id) in db.execute(\"select label, id from datasets\"):\n db.execute(\"update files_{0} set jobits_running=0\".format(label))\n db.execute(\"update jobits_{0} set status=4 where status=1\".format(label))\n db.execute(\"update jobits_{0} set status=2 where status=7\".format(label))\n self.update_dataset_stats(label)\n\n db.commit()\n\n return ids\n\n def update_jobits(self, jobinfos):\n job_updates = []\n\n for ((dset, jobit_source), updates) in jobinfos.items():\n file_updates = []\n jobit_updates = []\n jobit_generic_updates = []\n\n for (job_update, file_update, jobit_update) in updates:\n job_updates.append(job_update)\n file_updates += file_update\n\n # jobits either fail or are successful\n # FIXME this should really go into the job handler\n if jobit_source == 'jobs':\n jobit_status = SUCCESSFUL if job_update[-2] == FAILED else MERGED\n else:\n jobit_status = FAILED if job_update[-2] == FAILED else SUCCESSFUL\n\n jobit_updates += jobit_update\n # the last entry in the job_update is the id\n jobit_generic_updates.append((jobit_status, job_update[-1]))\n\n # update all jobits of the jobs\n self.db.executemany(\"\"\"update {0} set\n status=?\n where job=?\"\"\".format(jobit_source),\n jobit_generic_updates)\n\n # update selected, missed jobits\n self.db.executemany(\"\"\"update {0} set\n status=?\n where id=?\"\"\".format(jobit_source),\n jobit_updates)\n\n # update files in the dataset\n if len(file_updates) > 0:\n self.db.executemany(\"\"\"update files_{0} set\n jobits_running=(select count(*) from jobits_{0} where status==1 and file=files_{0}.id),\n jobits_done=(select count(*) from jobits_{0} where status==2 and file=files_{0}.id),\n events_read=(events_read + ?),\n skipped=(skipped + ?)\n where id=?\"\"\".format(dset),\n file_updates)\n\n self.db.executemany(\"\"\"update jobs set\n host=?,\n exit_code=?,\n submissions=?,\n time_submit=?,\n time_transfer_in_start=?,\n time_transfer_in_end=?,\n time_wrapper_start=?,\n time_wrapper_ready=?,\n time_stage_in_end=?,\n time_prologue_end=?,\n time_file_requested=?,\n time_file_opened=?,\n time_file_processing=?,\n time_processing_end=?,\n time_epilogue_end=?,\n time_stage_out_end=?,\n time_transfer_out_start=?,\n time_transfer_out_end=?,\n time_retrieved=?,\n time_on_worker=?,\n time_total_on_worker=?,\n time_cpu=?,\n cache_start_size=?,\n cache_end_size=?,\n cache=?,\n bytes_received=?,\n bytes_sent=?,\n bytes_output=?,\n bytes_bare_output=?,\n jobits_processed=(jobits - ?),\n events_read=?,\n events_written=?,\n status=?\n where id=?\"\"\",\n job_updates)\n\n for label, _ in jobinfos.keys():\n self.update_dataset_stats(label)\n\n self.db.commit()\n\n def update_dataset_stats(self, label):\n self.db.execute(\"\"\"\n update datasets set\n jobits_running=(select count(*) from jobits_{0} where status in (1, 7)),\n jobits_done=(select count(*) from jobits_{0} where status in (2, 6, 8)),\n jobits_left=(select count(*) from jobits_{0} where status not in (1, 2, 6, 7, 8))\n where label=?\"\"\".format(label), (label,))\n\n def merged(self):\n unmerged = self.db.execute(\"select count(*) from datasets where merged <> 1\").fetchone()[0]\n return unmerged == 0\n\n def unfinished_jobits(self):\n cur = self.db.execute(\"select sum(jobits - jobits_done) from datasets\")\n return cur.fetchone()[0]\n\n def running_jobits(self):\n cur = self.db.execute(\"select sum(jobits_running) from datasets\")\n return cur.fetchone()[0]\n\n def dataset_info(self, label):\n cur = self.db.execute(\"\"\"select dataset,\n path,\n release,\n global_tag,\n publish_label,\n cfg,\n pset_hash,\n id,\n uuid\n from datasets\n where label=?\"\"\", (label,))\n\n return cur.fetchone()\n\n def pop_unmerged_jobs(self, bytes, num=1):\n \"\"\"Create merging jobs.\n\n This creates `num` merge jobs with a maximal size of `bytes`.\n \"\"\"\n\n if bytes <= 0:\n return []\n\n rows = self.db.execute(\"\"\"\n select label, id, jobits_done == jobits\n from datasets\n where\n merged <> 1 and\n jobits_done * 10 >= jobits\n and (select count(*) from jobs where dataset=datasets.id and status=2) > 0\n \"\"\").fetchall()\n\n if len(rows) == 0:\n logger.debug(\"no merge possibility found\")\n return []\n\n dataset, dset_id, jobits_complete = random.choice(rows)\n\n logger.debug(\"trying to merge jobs from {0}\".format(dataset))\n\n # Select the finished processing jobs from the task\n rows = self.db.execute(\"\"\"\n select id, jobits, bytes_bare_output\n from jobs\n where status=? and dataset=? and type=0\n order by bytes_bare_output desc\"\"\", (SUCCESSFUL, dset_id)).fetchall()\n\n # If we don't have enough rows, or the smallest two jobs can't be\n # merge, set this up so that the loop below is not evaluted and we\n # skip to the check if the merge for this dataset is complete for\n # the given maximum size.\n if len(rows) < 2 or rows[-2][1] + rows[-1][1] > bytes:\n rows = []\n else:\n minsize = rows[-1][1]\n\n class Merge(object):\n def __init__(self, job, jobits, size, maxsize):\n self.jobs = [job]\n self.jobits = jobits\n self.size = size\n self.maxsize = maxsize\n def __cmp__(self, other):\n return cmp(self.size, other.size)\n def add(self, job, jobits, size):\n if self.size + size > self.maxsize:\n return False\n self.size += size\n self.jobits += jobits\n self.jobs.append(job)\n return True\n def left(self):\n return self.maxsize - self.size\n\n candidates = []\n for job, jobits, size in rows:\n # Try to add the current job to a merge, in increasing order of\n # size left\n for merge in reversed(sorted(candidates)):\n if merge.add(job, jobits, size):\n break\n else:\n # If we're too large to merge, we're skipped\n if size + minsize <= bytes:\n candidates.append(Merge(job, jobits, size, bytes))\n\n merges = []\n for merge in reversed(sorted(candidates)):\n if len(merge.jobs) == 1:\n continue\n # For one iteration only: merge if we are either close enough\n # to the target size (TODO maybe this threshold should be\n # configurable? FIXME it's a magic number, anyways) or we are\n # done processing the task, when we merge everything we can.\n if jobits_complete or merge.size >= bytes * 0.9:\n merges.append(merge)\n\n logger.debug(\"created {0} merge jobs\".format(len(merges)))\n\n if len(merges) == 0 and jobits_complete:\n rows = self.db.execute(\"\"\"select count(*) from jobs where status=1 and dataset=?\"\"\", (dset_id,)).fetchone()\n if rows[0] == 0:\n logger.debug(\"fully merged {0}\".format(dataset))\n self.db.execute(\"\"\"update datasets set merged=1 where id=?\"\"\", (dset_id,))\n self.db.commit()\n return []\n\n res = []\n merge_update = []\n for merge in merges:\n merge_id = self.db.execute(\"\"\"\n insert into\n jobs(dataset, jobits, status, type)\n values (?, ?, ?, ?)\"\"\", (dset_id, merge.jobits, ASSIGNED, MERGE)).lastrowid\n logger.debug(\"inserted merge job {0} with jobs {1}\".format(merge_id, \", \".join(map(str, merge.jobs))))\n res += [(str(merge_id), dataset, [], [(id, None, -1, -1) for id in merge.jobs], \"\", False, True)]\n merge_update += [(merge_id, id) for id in merge.jobs]\n\n self.db.executemany(\"update jobs set status=7, job=? where id=?\", merge_update)\n self.update_dataset_stats(dataset)\n\n self.db.commit()\n\n return res\n\n def update_published(self, block):\n unmerged = [(name, job) for (name, job, merge_job) in block]\n merged = [(name, merge_job) for (name, job, merge_job) in block]\n jobit_update = [job for (name, job, merge_job) in block]\n\n self.db.executemany(\"\"\"update jobs\n set status=6,\n published_file_block=?\n where id=?\"\"\", unmerged)\n\n self.db.executemany(\"\"\"update jobs\n set status=6,\n published_file_block=?\n where job=?\"\"\", unmerged)\n\n for job, dataset in self.db.execute(\"\"\"select jobs.id,\n datasets.label\n from jobs, datasets\n where jobs.id in ({0})\n and jobs.dataset=datasets.id\"\"\".format(\", \".join(jobit_update))):\n self.db.execute(\"update jobits_{0} set status=6 where job=?\".format(dataset), (job,))\n\n self.db.commit()\n\n def successful_jobs(self, label):\n dset_id = self.db.execute(\"select id from datasets where label=?\", (label,)).fetchone()[0]\n\n cur = self.db.execute(\"\"\"\n select id, type\n from jobs\n where status=2 and dataset=?\n \"\"\", (dset_id,))\n\n return cur\n\n def merged_jobs(self, label):\n dset_id = self.db.execute(\"select id from datasets where label=?\", (label,)).fetchone()[0]\n\n cur = self.db.execute(\"\"\"select id, type\n from jobs\n where status=8 and dataset=?\n \"\"\", (dset_id,))\n\n return cur\n\n def failed_jobs(self, label):\n dset_id = self.db.execute(\"select id from datasets where label=?\", (label,)).fetchone()[0]\n\n cur = self.db.execute(\"\"\"select id, type\n from jobs\n where status in (3, 4) and dataset=?\n \"\"\", (dset_id,))\n\n return cur\n\n def update_pset_hash(self, pset_hash, dataset):\n self.db.execute(\"update datasets set pset_hash=? where label=?\", (pset_hash, dataset))\n\n self.db.commit()\n\n def update_missing(self, jobs):\n for job, dataset in self.db.execute(\"\"\"select jobs.id,\n datasets.label\n from jobs, datasets\n where jobs.id in ({0})\n and jobs.dataset=datasets.id\"\"\".format(\", \".join(map(str, jobs)))):\n self.db.execute(\"update jobits_{0} set status=3 where job=?\".format(dataset), (job,))\n\n # update jobs to be failed\n self.db.executemany(\"update jobs set status=3 where id=?\", [(job,) for job in jobs])\n # reset merged jobs from merging\n self.db.executemany(\"update jobs set status=2 where job=?\", [(job,) for job in jobs])\n\n self.db.commit()\n\n","sub_path":"lobster/cmssw/jobit.py","file_name":"jobit.py","file_ext":"py","file_size_in_byte":23571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"460722445","text":"import time\nfrom time import sleep\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport sys, io\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.proxy import *\nimport csv\nimport timeit\nfrom multiprocessing import Pool\n#number of reviews - initial 40 + multiples of 40. (eg. 0 -> 40 + 40(0) = 40, 10 -> 40 + 40(10) = 440)\nmultiples_of_40 = 100\n\ndriver = webdriver.Firefox(executable_path='/usr/local/bin/geckodriver') #Initiate driver\n\n#create a urls array to store app store urls\nurls = []\n\n#\"depression\" apps\ndriver.get(\"https://play.google.com/store/search?q=depression&c=apps\")\n\n#loop the number of times we want to fetch urls\nfor i in range(20):\n try:\n show_more_button = driver.find_element_by_id(\"show-more-button\")\n show_more_button.click()\n sleep(3)\n\n except Exception:\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n sleep(3)\n\n\npage = driver.page_source\nsoup_expatistan = BeautifulSoup(page, \"html.parser\")\n\nfor i in soup_expatistan.find_all('div', attrs={'class' : 'cover'}):\n ##print (i.a['href'])\n urls.append(\"https://play.google.com\" + i.a['href'] + \"&showAllReviews=true\")\n\ndownload_dir = \"depression-apps.csv\" #where you want the file to be downloaded to \n\ncsv_file = open(download_dir, \"w\") #\"w\" indicates that you're writing strings to the file\ncsv_writer=csv.writer(csv_file) #create writer\n\n#column names for title row\ncsv_writer.writerow((\"appname\", \"rating\", \"ratingcount\", \"developer\", \"apptype\", \"reviewer\", \"date\", \"reviewer_rating\", \"thumbsup\", \"review\"))\n\nstart = timeit.default_timer()\n\n\nfor url in urls:\n try:\n show_more_button_available = False\n count = 0\n driver.get(url)\n\n #loop the number of times we want to fetch reviews\n for i in range(multiples_of_40):\n try:\n #if the \"Show More\" button is present, then attempt to click on it to load more reviews\n show_more_button = driver.find_element_by_class_name(\"RveJvd\")\n show_more_button.click()\n ##print (\"show more button\")\n show_more_button_available = False\n count = 0\n \n except Exception:\n ##print (\"exception scroll to \")\n ##print (show_more_button_available)\n if (show_more_button_available) and (count > 3):\n break\n SCROLL_PAUSE_TIME = 1\n\n # Get scroll height\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\n\n while True:\n # Scroll down to bottom\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Wait to load page\n sleep(SCROLL_PAUSE_TIME)\n\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n #print (new_height)\n \n if new_height == last_height:\n show_more_button_available = True\n count += 1\n break\n last_height = new_height\n\n page = driver.page_source #access page source\n\n soup_expatistan = BeautifulSoup(page, \"html.parser\") #get page source html code\n\n #app info\n appname = str(soup_expatistan.find(\"h1\", class_=\"AHFaub\").span.string)\n rating = str(soup_expatistan.find(\"div\", class_=\"pf5lIe\").next_element.attrs[\"aria-label\"].split(\" \")[1]) \n ratingcount = str(soup_expatistan.find(\"span\", class_=\"AYi5wd TBRnV\").text) \n developer = str(soup_expatistan.find(\"a\", class_=\"hrTbp R8zArc\").text) \n apptype = str(soup_expatistan.find(\"a\", itemprop=\"genre\").text)\n\n #User review box\n page = driver.page_source #access page source\n soup_expatistan = BeautifulSoup(page, \"html.parser\") #get page source html code\n expand_pages = soup_expatistan.find_all(\"div\", class_=\"d15Mdf bAhLNe\")\n\n print (\"Rating - \" + url)\n # iterate through pages\n for count, expand_page in enumerate(expand_pages):\n reviewer = expand_page.find(\"span\", class_=\"X43Kjb\").string \n date = expand_page.find(\"span\", class_=\"p2TkOb\").string \n reviewer_rating = str(expand_page.find(\"div\", class_=\"pf5lIe\").next_element.attrs[\"aria-label\"].split(\" \")[1])\n thumbsup = str(expand_page.find(\"div\", class_=\"jUL89d y92BAb\").text) \n fullreview = expand_page.find(\"button\", jsname=\"gxjVle\")\n if fullreview is not None:\n review = expand_page.find(\"span\", jsname=\"fbQN7e\").text\n review = \"Review Text %s: %s\" %(count+1, review)\n review = review.replace(\"\\n\", \" \")\n else:\n review = expand_page.find(\"span\", jsname=\"bN97Pc\").text\n review = \"Review Text %s: %s\" %(count+1, review)\n review = review.replace(\"\\n\", \" \")\n\n # write rows into csv file\n csv_writer.writerow([appname, rating, ratingcount, developer, apptype, reviewer, date, reviewer_rating, thumbsup, review])\n except Exception:\n driver.get(url) #get URLs\n page = driver.page_source #access page source\n\n soup_expatistan = BeautifulSoup(page, \"html.parser\") #get page source html code\n\n # app info\n appname = str(soup_expatistan.find(\"h1\", class_=\"AHFaub\").span.string)\n print (\"No Rating - \" + url)\n csv_writer.writerow([appname])\n \n csv_writer.writerow([])\n\nstop = timeit.default_timer()\nprint (stop - start) \ndriver.quit()\n\n","sub_path":"Scraping Tool/depression-apps.py","file_name":"depression-apps.py","file_ext":"py","file_size_in_byte":5841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"114750533","text":"\"\"\"\"\"\"\n\nimport platform\n\nfrom setuptools import find_packages, setup, Extension\nfrom setuptools.command.build_ext import build_ext\nfrom setuptools.command.install import install\n\n\nclass custom_build_ext(build_ext):\n def build_extensions(self):\n self.compiler.set_executable(\"compiler_so\", \"g++\")\n self.compiler.set_executable(\"compiler_cxx\", \"g++\")\n self.compiler.set_executable(\"linker_so\", \"g++\")\n build_ext.build_extensions(self)\n\n def get_ext_filename(self, ext_name):\n return f\"{ext_name}.so\"\n\n\nclass build_ext_first(install):\n def run(self):\n self.run_command(\"build_ext\")\n super(build_ext_first, self).run()\n\n\nPACKAGENAME = \"openke\"\nVERSION = \"2021.9.20\"\nPYTHON_REQUIRES = \">=3.9\"\nINSTALL_REQUIRES = [\n \"setuptools\",\n \"torch>=1.9.0\",\n \"tqdm\",\n \"sklearn\"\n]\n\nextra_compile_args = [\"-fPIC\"]\nextra_link_args = [\"-shared\"]\n\nif platform.system() == 'Linux':\n try:\n if platform.architecture()[0] == '64bit':\n extra_compile_args = ['-pthread', '-march=native']\n else:\n extra_compile_args = ['-pthread', '-march=pentium4']\n except KeyError:\n extra_compile_args = ['-pthread', '-march=pentium4']\n extra_link_args = ['-pthread', '-export-dynamic']\n\nsetup(\n name=PACKAGENAME,\n version=VERSION,\n description=\"OpenKE: An Open Toolkit for Knowledge Embedding\",\n url=\"git@github.com:occamzrazor/OpenKE.git\",\n author=\"Han, Xu and Cao, Shulin and Lv Xin and Lin, Yankai and Liu, Zhiyuan and Sun, Maosong and Li, Juanzi\",\n packages=find_packages(exclude=[\"benchmarks\", \"examples\"]),\n ext_modules=[\n Extension(\n \"openke/release/Base\",\n sources=[\"openke/base/Base.cpp\"],\n extra_compile_args=extra_compile_args,\n extra_link_args=extra_link_args\n )\n ],\n zip_safe=False,\n cmdclass={\n \"install\": build_ext_first,\n \"build_ext\": custom_build_ext,\n },\n install_requires=INSTALL_REQUIRES,\n python_requires=PYTHON_REQUIRES,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"327132629","text":"__version__ = \"0.3.1\"\n\nfrom soxs.simput import \\\n write_photon_list, \\\n read_simput_catalog\n\nfrom soxs.spectra import \\\n Spectrum, \\\n ApecGenerator, \\\n ConvolvedSpectrum\n\nfrom soxs.instrument import \\\n add_instrument_to_registry, \\\n show_instrument_registry, \\\n write_instrument_json, \\\n instrument_simulator, \\\n get_instrument_from_registry, \\\n AuxiliaryResponseFile, \\\n RedistributionMatrixFile\n\nfrom soxs.background import \\\n add_background_to_registry, \\\n show_background_registry, \\\n BackgroundSpectrum, \\\n ConvolvedBackgroundSpectrum\n\nfrom soxs.spatial import \\\n PointSourceModel, \\\n RadialFunctionModel, \\\n RadialArrayModel, \\\n RadialFileModel, \\\n AnnulusModel, \\\n BetaModel, \\\n FillFOVModel, \\\n RectangleModel","sub_path":"soxs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"363944873","text":"from conans import ConanFile, CMake, tools\nimport os\nimport platform\nimport shutil\n\nclass LibfacedetectionConan(ConanFile):\n name = 'libfacedetection'\n\n # libfacedetection has no tagged releases, so just use package_version.\n source_version = '1'\n package_version = '1'\n version = '%s-%s' % (source_version, package_version)\n\n build_requires = (\n 'llvm/5.0.2-1@vuo/stable',\n 'macos-sdk/11.0-0@vuo/stable',\n )\n settings = 'os', 'compiler', 'build_type', 'arch'\n url = 'https://github.com/ShiqiYu/libfacedetection'\n license = 'https://github.com/ShiqiYu/libfacedetection/blob/master/LICENSE'\n description = 'A library for face detection in images'\n source_dir = 'libfacedetection'\n\n build_x86_dir = '_build_x86'\n build_x86avx2_dir = '_build_x86avx2'\n build_arm_dir = '_build_arm'\n install_x86_dir = '_install_x86'\n install_x86avx2_dir = '_install_x86avx2'\n install_arm_dir = '_install_arm'\n\n generators = 'cmake'\n\n def source(self):\n self.run(\"git clone https://github.com/ShiqiYu/libfacedetection.git\")\n with tools.chdir(self.source_dir):\n self.run(\"git checkout 1ed30e7\")\n\n self.run('mv %s/LICENSE %s/%s.txt' % (self.source_dir, self.source_dir, self.name))\n\n def build(self):\n cmake = CMake(self)\n\n cmake.definitions['CMAKE_BUILD_TYPE'] = 'Release'\n cmake.definitions['BUILD_SHARED_LIBS'] = 'ON'\n cmake.definitions['CMAKE_CXX_COMPILER'] = self.deps_cpp_info['llvm'].rootpath + '/bin/clang++'\n cmake.definitions['CMAKE_C_COMPILER'] = self.deps_cpp_info['llvm'].rootpath + '/bin/clang'\n cmake.definitions['CMAKE_C_FLAGS'] = cmake.definitions['CMAKE_CXX_FLAGS'] = '-Oz -DNDEBUG'\n cmake.definitions['CMAKE_CXX_FLAGS'] += ' -stdlib=libc++'\n cmake.definitions['CMAKE_OSX_DEPLOYMENT_TARGET'] = '10.11'\n cmake.definitions['CMAKE_OSX_SYSROOT'] = self.deps_cpp_info['macos-sdk'].rootpath\n\n cmake.definitions['CMAKE_INSTALL_NAME_DIR'] = '@rpath'\n cmake.definitions['CMAKE_BUILD_WITH_INSTALL_NAME_DIR'] = 'ON'\n\n self.output.info(\"=== Build for x86_64 ===\")\n cmake.definitions['CMAKE_INSTALL_PREFIX'] = '%s/%s' % (os.getcwd(), self.install_x86_dir)\n cmake.definitions['CMAKE_OSX_ARCHITECTURES'] = 'x86_64'\n tools.mkdir(self.build_x86_dir)\n with tools.chdir(self.build_x86_dir):\n cmake.definitions['ENABLE_AVX2'] = 'OFF'\n cmake.definitions['ENABLE_AVX512'] = 'OFF'\n cmake.definitions['ENABLE_NEON'] = 'OFF'\n cmake.configure(source_dir='../%s' % self.source_dir, build_dir='.')\n cmake.build()\n cmake.install()\n\n self.output.info(\"=== Build for x86_64-avx2 ===\")\n cmake.definitions['CMAKE_INSTALL_PREFIX'] = '%s/%s' % (os.getcwd(), self.install_x86avx2_dir)\n tools.mkdir(self.build_x86avx2_dir)\n with tools.chdir(self.build_x86avx2_dir):\n cmake.definitions['CMAKE_OSX_ARCHITECTURES'] = 'x86_64'\n cmake.definitions['ENABLE_AVX2'] = 'ON'\n cmake.definitions['ENABLE_AVX512'] = 'OFF'\n cmake.definitions['ENABLE_NEON'] = 'OFF'\n cmake.configure(source_dir='../%s' % self.source_dir, build_dir='.')\n cmake.build()\n cmake.install()\n\n self.output.info(\"=== Build for arm64 ===\")\n cmake.definitions['CMAKE_INSTALL_PREFIX'] = '%s/%s' % (os.getcwd(), self.install_arm_dir)\n tools.mkdir(self.build_arm_dir)\n with tools.chdir(self.build_arm_dir):\n cmake.definitions['CMAKE_OSX_ARCHITECTURES'] = 'arm64'\n cmake.definitions['ENABLE_AVX2'] = 'OFF'\n cmake.definitions['ENABLE_AVX512'] = 'OFF'\n cmake.definitions['ENABLE_NEON'] = 'ON'\n cmake.configure(source_dir='../%s' % self.source_dir, build_dir='.')\n cmake.build()\n cmake.install()\n\n def package(self):\n if platform.system() == 'Darwin':\n libext = 'dylib'\n elif platform.system() == 'Linux':\n libext = 'so'\n else:\n raise Exception('Unknown platform \"%s\"' % platform.system())\n\n self.copy('*.h', src='%s/include/facedetection' % self.install_x86_dir, dst='include')\n\n with tools.chdir('%s/lib' % self.install_x86_dir):\n shutil.move('libfacedetection.%s' % libext, 'libfacedetection_x86.%s' % libext)\n self.run('install_name_tool -id @rpath/libfacedetection_x86.dylib libfacedetection_x86.dylib')\n self.copy('libfacedetection_x86.%s' % libext, src='%s/lib' % self.install_x86_dir, dst='lib')\n\n with tools.chdir('%s/lib' % self.install_x86avx2_dir):\n shutil.move('libfacedetection.%s' % libext, 'libfacedetection_x86avx2.%s' % libext)\n self.run('install_name_tool -id @rpath/libfacedetection_x86avx2.dylib libfacedetection_x86avx2.dylib')\n self.copy('libfacedetection_x86avx2.%s' % libext, src='%s/lib' % self.install_x86avx2_dir, dst='lib')\n\n with tools.chdir('%s/lib' % self.install_arm_dir):\n shutil.move('libfacedetection.%s' % libext, 'libfacedetection_arm.%s' % libext)\n self.run('install_name_tool -id @rpath/libfacedetection_arm.dylib libfacedetection_arm.dylib')\n self.copy('libfacedetection_arm.%s' % libext, src='%s/lib' % self.install_arm_dir, dst='lib')\n\n self.copy('%s.txt' % self.name, src=self.source_dir, dst='license')\n\n def package_info(self):\n # Don't set self.cpp_info.libs, since the client needs to decide which of the 2 libraries to use.\n self.cpp_info.includedirs = ['include']\n","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":5616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"491085001","text":"def main():\n N, K = map(int, input().split())\n S = input()\n\n if S[K-1] == 'A':\n char = 'a'\n elif S[K-1] == 'B':\n char = 'b'\n elif S[K-1] == 'C':\n char = 'c'\n\n print(S[:K-1] + char + S[K:N])\n\n\nif __name__ == '__main__':\n main()","sub_path":"abc126/abc126_a.py","file_name":"abc126_a.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"162908612","text":"#! /usr/bin/env python\n\n\"\"\"Comparison of algorithms.\n\nComparison of bfs, dfs (right and left handed), dijkstra and A* algorithms over\na list of maps. The comparison is made between the route lenght found, the\nnumber of cells accessed over the map and the execution time.\n\"\"\"\n\nimport os\nimport sys\nimport argparse\nimport time\nfrom collections import Counter\nfrom enum import Enum\n\nfrom utils import CharMap, UserInputException, get_route, OUTPUT_MODE, Output\nfrom bfs.bfs import bfs\nfrom dfs.dfs import dfs\nfrom dijkstra.dijkstra import dijkstra, CharMapCost\nfrom astar.astar import astar\nfrom astar.astar import CharMapCost as CharMapCost2\n\n__author__ = \"Pedro Arias Perez\"\n\n\nLOCAL_PATH = os.path.dirname(os.path.abspath(__file__)).split(\"extra\")[0]\nFILE_NAME = LOCAL_PATH + \"maps/{0}/{0}.csv\"\n\nRESULTS = []\nBFS_ACC = [0, 0, 0]\nDFS_ACCR = [0, 0, 0]\nDFS_ACCL = [0, 0, 0]\nDIJK_ACC = [0, 0, 0]\nA_ACC = [0, 0, 0]\n\n\nclass Algorithms(Enum):\n \"\"\"Algorithms compared.\"\"\"\n BFS = 0\n DFSr = 1\n DFSl = 2\n DIJK = 3\n ASTAR = 4\n\ndef run_bfs(map):\n \"\"\"\n Execs bfs silenced (no output is printed) and registering start and end time.\n\n map: a map (CharMap)\n\n return: results: route length, number of cells accessed and time ([int, int, float])\n \"\"\"\n sys.stdout = open(os.devnull, 'w') # silence\n t0 = time.time()\n goalParentId = bfs(map)\n route = get_route(map.nodes, goalParentId)\n tf = time.time()\n sys.stdout = sys.__stdout__\n result = [len(route), map.n_checked, round((tf-t0), 5)]\n return result\n\ndef run_dfs(map, is_clockwise=True):\n \"\"\"\n Execs dfs silenced (no output is printed) and registering start and end time.\n\n map: a map (CharMap)\n is_clockwise: right or left handed (bool)\n\n return: results: route length, number of cells accessed and time ([int, int, float])\n \"\"\"\n sys.stdout = open(os.devnull, 'w') # silence\n t0 = time.time()\n goalParentId = dfs(map, is_clockwise)\n route = get_route(map.nodes, goalParentId)\n tf = time.time()\n sys.stdout = sys.__stdout__\n result = [len(route), map.n_checked, round((tf-t0), 5)]\n return result\n\ndef run_dijkstra(map):\n \"\"\"\n Execs dijkstra silenced (no output is printed) and registering start and end time.\n\n map: a map (CharMapCost)\n\n return: results: route length, number of cells accessed and time ([int, int, float])\n \"\"\"\n sys.stdout = open(os.devnull, 'w') # silence\n t0 = time.time()\n goalParentId = dijkstra(map)\n route = get_route(map.closed_nodes, goalParentId)\n tf = time.time()\n sys.stdout = sys.__stdout__\n result = [len(route), map.n_checked, round((tf-t0), 5)]\n return result\n\ndef run_astar(map):\n \"\"\"\n Execs a* silenced (no output is printed) and registering start and end time.\n\n map: a map (CharMapCost)\n\n return: results: route length, number of cells accessed and time ([int, int, float])\n \"\"\"\n sys.stdout = open(os.devnull, 'w') # silence\n t0 = time.time()\n goalParentId = astar(map)\n route = get_route(map.closed_nodes, goalParentId)\n tf = time.time()\n sys.stdout = sys.__stdout__\n result = [len(route), map.n_checked, round((tf-t0), 5)]\n return result\n\ndef do_compare(map_name, start, end, only_uniform):\n \"\"\"\n Compares the uninformed algorithms in a specific map and prints the partial results.\n\n map_name: the map name (str)\n start: start point ([int, int])\n end: end point ([int, int])\n only_uniform: true to only compare BFS and DFS (bool)\n \"\"\"\n map = CharMap(FILE_NAME.format(map_name), start, end)\n\n results = []\n\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print(\"%%\\t\\t{0}\\t\\t%%\".format(map_name))\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print()\n print(\" \\tRoute \\t Cells \\t\")\n print(\"Alg.\\tLength\\tChecked\\t Time\")\n print(\"--------------------------------\")\n\n bfs_result = run_bfs(map)\n print(\"{0}\\t{1}\\t{2}\\t{3}\".format(\"BFS\", *bfs_result))\n BFS_ACC[0] += bfs_result[0]\n BFS_ACC[1] += bfs_result[1]\n BFS_ACC[2] += round(bfs_result[2], 5)\n results.append(bfs_result)\n\n # time.sleep(1)\n map.reset()\n\n dfsr_result = run_dfs(map)\n print(\"{0}\\t{1}\\t{2}\\t{3}\".format(\"DFSr\", *dfsr_result))\n DFS_ACCR[0] += dfsr_result[0]\n DFS_ACCR[1] += dfsr_result[1]\n DFS_ACCR[2] += round(dfsr_result[2], 5)\n results.append(dfsr_result)\n\n # time.sleep(1)\n map.reset()\n\n dfsl_result = run_dfs(map, False)\n print(\"{0}\\t{1}\\t{2}\\t{3}\".format(\"DFSl\", *dfsl_result))\n DFS_ACCL[0] += dfsl_result[0]\n DFS_ACCL[1] += dfsl_result[1]\n DFS_ACCL[2] += round(dfsl_result[2], 5)\n results.append(dfsl_result)\n\n map.reset()\n # time.sleep(1)\n\n if not only_uniform:\n map_cost = CharMapCost(FILE_NAME.format(map_name), start, end)\n\n dijkstra_result = run_dijkstra(map_cost)\n print(\"{0}\\t{1}\\t{2}\\t{3}\".format(\"DIJK\", *dijkstra_result))\n DIJK_ACC[0] += dijkstra_result[0]\n DIJK_ACC[1] += dijkstra_result[1]\n DIJK_ACC[2] += round(dijkstra_result[2], 5)\n results.append(dijkstra_result)\n\n map_cost.reset()\n # time.sleep(1)\n\n map_cost2 = CharMapCost2(FILE_NAME.format(map_name), start, end)\n\n astar_result = run_astar(map_cost2)\n print(\"{0}\\t{1}\\t{2}\\t{3}\".format(\"ASTAR\", *astar_result))\n A_ACC[0] += astar_result[0]\n A_ACC[1] += astar_result[1]\n A_ACC[2] += round(astar_result[2], 5)\n results.append(astar_result)\n\n map_cost2.reset()\n # time.sleep(1)\n\n shortest, less_cell, fastest = get_best(results)\n\n print()\n print(\"Shortest -------> {0} ({1})\".format(*shortest))\n print(\"Less checks ----> {0} ({1})\".format(*less_cell))\n print(\"Fastest --------> {0} ({1})\".format(*fastest))\n print()\n\n RESULTS.append([map_name, shortest[0], less_cell[0], fastest[0]])\n time.sleep(1)\n\ndef get_best(results):\n \"\"\"\n Gets shortest route, less cells accessed and fastest algorithm.\n\n results: Nx3 matrix, being N number of algorithms compared. Each row is made up of route lenght (int), number of cells accessed (int) and execution time (float)\n\n return: [shortest alg (str), route lenght (int)], [less accessed alg (str), number of accesses (int)], [fastest alg (str), time (float)]\n \"\"\"\n routes = [item[0] for item in results] # get route lengths from results matrix\n cells = [item[1] for item in results] # get number of accesses from results matrix\n times = [item[2] for item in results] # get execution times from results matrix\n\n # get list of enums (alg) with min values and format it as csv\n shortest = (\"\".join(f\"{i}, \" for i in [Algorithms(i).name for i, val in enumerate(routes) if val == min(routes)])[:-2], min(routes))\n less_cell = (\"\".join(f\"{i}, \" for i in [Algorithms(i).name for i, val in enumerate(cells) if val == min(cells)])[:-2], min(cells))\n fastest = (\"\".join(f\"{i}, \" for i in [Algorithms(i).name for i, val in enumerate(times) if val == min(times)])[:-2], min(times))\n return shortest, less_cell, fastest\n\ndef print_final_results():\n \"\"\"\n Prints final and accumulative results.\n \"\"\"\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print(\"%% TOTAL %%\")\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print()\n print(\" \\tRoute \\t Cells \\t\")\n print(\"Alg.\\tLength\\tChecked\\t Time\")\n print(\"--------------------------------\")\n print(\"{0}\\t{1}\\t{2}\\t{3}\".format(\"BFS\", *BFS_ACC))\n print(\"{0}\\t{1}\\t{2}\\t{3}\".format(\"DFSR\", *DFS_ACCR))\n print(\"{0}\\t{1}\\t{2}\\t{3}\".format(\"DFSL\", *DFS_ACCL))\n if DIJK_ACC:\n print(\"{0}\\t{1}\\t{2}\\t{3}\".format(\"DIJK\", *DIJK_ACC))\n if A_ACC:\n print(\"{0}\\t{1}\\t{2}\\t{3}\".format(\"ASTAR\", *A_ACC))\n print()\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print(\"%% RANKINGS %%\")\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print()\n print(\" LESS\")\n print(\" SHORTEST CHECKS FASTEST\")\n print(\"--------------------------------\")\n\n shortests = []\n less_cells = []\n fastests = []\n for entry in RESULTS:\n shortests.append(entry[1])\n less_cells.append(entry[2])\n fastests.append(entry[3])\n print(\"{0}\\t{1}\\t{2}\\t{3}\".format(*entry))\n print(\"--------------------------------\")\n print()\n\n short_rank = Counter(shortests).most_common(3)\n cells_rank = Counter(less_cells).most_common(3)\n fast_rank = Counter(fastests).most_common(3)\n\n print(\"\\tSHORTEST ROUTE\\t\")\n print(\"--------------------------------\")\n for i, entry in enumerate(short_rank):\n print(\"{0}º: {1} ({2})\".format(i+1, *entry))\n print()\n\n print(\"\\tLESS CELLS CHECKED\")\n print(\"--------------------------------\")\n for i, entry in enumerate(cells_rank):\n print(\"{0}º: {1} ({2})\".format(i+1, *entry))\n print()\n\n print(\"\\tFASTEST ALGORITHM\")\n print(\"--------------------------------\")\n for i, entry in enumerate(fast_rank):\n print(\"{0}º: {1} ({2})\".format(i+1, *entry))\n print()\n\n\ndef main(only_uniform):\n maps = [[\"map1\", [2, 2], [7, 2]],\n [\"map2\", [2, 2], [10, 7]],\n [\"map3\", [4, 10], [4, 14]],\n [\"map4\", [4, 10], [4, 14]],\n [\"map5\", [4, 15], [4, 9]],\n [\"map6\", [2, 2], [10, 17]],\n [\"map7\", [3, 9], [3, 15]],\n [\"map8\", [2, 2], [10, 17]],\n [\"map9\", [3, 9], [3, 15]],\n [\"map10\", [3, 9], [3, 15]],\n [\"map11\", [3, 15], [3, 9]]\n ]\n\n labs = [[\"lab1\", [2, 2], [15, 15]]]\n\n for map in maps:\n do_compare(*map, only_uniform)\n\n print_final_results()\n\n\nif __name__ == \"__main__\":\n # Command line argument parser, try: python3 compare.py -h\n parser = argparse.ArgumentParser(description=\"Algorithm Comparison.\")\n parser.add_argument('-u', action='store_true', help='compare only uniform algorithms')\n args = parser.parse_args()\n\n main(args.u)\n","sub_path":"extra/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":9960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"158036785","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 3 19:14:57 2017\n\n@author: bmitchell\n\"\"\"\nnames = [\"Dennis Ritchie\", \"Alan Kay\", \"John Backus\", \"James Gosling\"]\nnames.sort(key=lambda name: name.split()[0])\nnameString = \", \".join(names)\nprint(nameString)\n","sub_path":"NameSort.py","file_name":"NameSort.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"456996425","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport djangolettings.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Room',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(help_text='What to call this room', unique=True, max_length=100)),\n ('slug', models.SlugField(help_text='Will be used in URLs to identify this room - must be fairly short, and should be entirely lowercase', unique=True, validators=[djangolettings.validators.validate_lowercase])),\n ('details', models.TextField(help_text='Give a brief (1 or 2 paragraph) description of this room - Markdown supported here')),\n ('extended_details', models.TextField(help_text='Any other details relevant to this room - this will only be shown in conjunction with the details above. Markdown supported here', blank=True)),\n ('seats', models.IntegerField(help_text='The number of people this room can accommodate')),\n ],\n options={\n 'ordering': ['-seats'],\n },\n ),\n ]\n","sub_path":"djangolettings/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"344504274","text":"class Solution:\n def reverseWords(self, str):\n def reverse(s):\n tmp = \"\"\n for i in range(len(s) - 1, -1, -1):\n tmp = tmp + s[i]\n return tmp\n\n strList = str.split(\" \")\n res = []\n for i in strList:\n res.append(reverse(i))\n str2 = \" \".join(res)\n return reverse(str2)\n","sub_path":"Solutions/186. Reverse Words in a String II/186.py","file_name":"186.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"540342997","text":"from celery import Celery\nfrom celery_once import QueueOnce\nfrom time import sleep\nBROKER_URL = 'redis://localhost:6379/0'\ncelery = Celery('tasks', broker=BROKER_URL)\ncelery.conf.ONCE_REDIS_URL = 'redis://localhost:6379/1'\ncelery.conf.ONCE_DEFAULT_TIMEOUT = 60 * 60\n\n\n@celery.task(base=QueueOnce)\ndef slow_task():\n sleep(30)\n return \"Done!\"\n\n\nslow_task.delay()\nslow_task.delay()\n","sub_path":"celery-once/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"537442291","text":"import cv2\nimport numpy as np\nimport dlib\n\nimg = cv2.imread(\"images/angie.jpg\")\nimg_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nmask = np.zeros_like(img_gray)\n\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\nfaces = detector(img_gray)\nfor face in faces:\n landmarks = predictor(img_gray, face)\n landmarks_points = []\n for n in range(0, 68):\n x = landmarks.part(n).x\n y = landmarks.part(n).y\n landmarks_points.append((x, y))\n\n cv2.circle(img, (x, y), 3, (0, 0, 255), -1)\n\n points = np.array(landmarks_points, np.int32)\n convexhull = cv2.convexHull(points)\n cv2.polylines(img, [convexhull], True, (255, 0, 0), 3)\n cv2.fillConvexPoly(mask, convexhull, 255)\n\n # Delaunay triangulation\n rect = cv2.boundingRect(convexhull)\n (x, y, w, h) = rect\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0))\n subdiv = cv2.Subdiv2D(rect)\n subdiv.insert(landmarks_points)\n triangles = subdiv.getTriangleList()\n triangles = np.array(triangles, dtype=np.int32)\n\n for t in triangles:\n pt1 = (t[0], t[1])\n pt2 = (t[2], t[3])\n pt3 = (t[4], t[5])\n\n cv2.line(img, pt1, pt2, (0, 0, 255), 2)\n cv2.line(img, pt2, pt3, (0, 0, 255), 2)\n cv2.line(img, pt1, pt3, (0, 0, 255), 2)\n\n face_image_1 = cv2.bitwise_and(img, img, mask=mask)\n\ncv2.imshow(\"Image 1\", img)\n# cv2.imshow(\"Face image 1\", face_image_1)\n# cv2.imshow(\"Mask\", mask)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"clase5_tp/facial-landmarks/triangulation_face.py","file_name":"triangulation_face.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"415892356","text":"#pyhton3\n#マイクから入力し録音したwavデータをDFTしてグラフを表示\n##変更点->ユーザーがwavファイル名を入力せずに自動でDFTを行う(l84)\nimport pyaudio\nimport wave\nimport numpy as np\nfrom datetime import datetime\nimport cmath as cm #複素数のための数学関数を扱うためのモジュール\nimport matplotlib.pyplot as plt\n\nchunk = 1024\nFORMAT = pyaudio.paInt16\nCHANNELS = 1\nRATE = 44100\nRECORD_SECONDS = input(\"How long? *second => \") #userが入力##streamが開く前にしておかないとinput overflowになってしまう##井上先生に大感謝\nthreshold = 0.02\n\ndef wave_load(filename):\n # open wave file\n wf = wave.open(filename,'r')\n channels = wf.getnchannels() # 追記\n print(wf.getparams())\n\n # load wave data\n chunk_size = wf.getnframes()\n amp = (2**8) ** wf.getsampwidth() / 2\n data = wf.readframes(chunk_size) # バイナリ読み込み\n data = np.frombuffer(data,'int16') # intに変換\n data = data / amp # 振幅正規化\n\n return data\n\n\ndef DFT(data):\n res = []\n N = len(data)\n for k in range(N): #各周波数に関して\n w = cm.exp(-1j * 2 * cm.pi * k / float(N))\n X_k = 0\n for n in range(N): #信号*重みの総和をとる\n X_k += data[n] * (w ** n)\n res.append(abs(X_k)) #abs()関数は絶対値をとる<-absolute\n return res\n\np = pyaudio.PyAudio()\nstream = p.open( format = FORMAT, channels = CHANNELS, rate = RATE, input = True, frames_per_buffer = chunk)\ncnt = 0\n\ndata = stream.read(chunk)\nx = np.frombuffer(data, dtype=\"int16\") / 32768.0\n\n#マイク出力を録音しwavファイルを保存する\nif x.max() > threshold: #threshold = 閾値\n\n filename = datetime.today().strftime(\"%Y%m%d%H%M%S\") + \".wav\"\n print(cnt, filename)\n\n all = []\n all.append(data)\n for i in range(0, int(RATE / chunk * int(RECORD_SECONDS))):\n data = stream.read(chunk)\n all.append(data)\n data = b''.join(all)\n\n out = wave.open(filename,'w')\n out.setnchannels(CHANNELS)\n out.setsampwidth(2)\n out.setframerate(RATE)\n out.writeframes(data)\n out.close()\n\n print(\"Saved.\")\n\n cnt += 1\n#if cnt > 5:\n# break\n\nstream.close()\np.terminate()\n\nif __name__ == '__main__':\n # 波形データ読み込み\n fs = 8000\n wave1 = wave_load(filename)\n\n # DFT.時間かかるので一部のみを利用\n dt1 = DFT(wave1[10000:11024])\n\n # 周波数リストを作成\n # 標本化周波数をデータ数で分割\n frq = np.arange(1024) * fs / 1024\n\n # グラフ表示\n plt.subplot(2,1,1) ##<-たぶん三つ目はグラフの番号\n plt.title('name')\n plt.plot( frq, dt1) ##<-一つ目が x軸、二つ目が y軸\n plt.show()\n","sub_path":"RedBeryl/rcdwDFT2.py","file_name":"rcdwDFT2.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"421005512","text":"import requests\r\nimport re\r\nfrom bs4 import BeautifulSoup as bs\r\nfrom time import sleep\r\nimport urllib.request\r\nimport urllib.parse\r\nimport json\r\nfrom pprint import pprint\r\n\r\nminers=[]\r\nstats=[]\r\n\r\nrecord = {}\r\n\r\nfor i in range(100):\r\n\tpage='https://ardor.tools/ardor/nxt?requestType=getBlocks&firstIndex='+ str(100*i) + '&lastIndex=' + str(100*(i+1))\r\n\tpage=requests.get(page)\r\n\thtml = page.text\r\n\tdata = json.loads(html)\r\n\r\n\tfor j in range(100):\r\n\t\tkey = data[\"blocks\"][j][\"generatorRS\"]\r\n\t\tif key in record:\r\n\t\t\trecord[key] += 1\r\n\t\telse:\r\n\t\t\trecord[key] = 1\r\n\tprint(i)\r\nfor key in record:\r\n\tprint(key)\r\n\r\nfor key in record:\r\n\tprint(record[key])","sub_path":"ardor.py","file_name":"ardor.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"458916215","text":"import getpass\nimport os\n\nimport pandas as pd\n\nfrom cdis_pipe_utils import df_util\nfrom cdis_pipe_utils import pipe_util\nfrom cdis_pipe_utils import time_util\n\ndef all_tsv_to_df(tsv_path, logger):\n logger.info('all_tsv_to_df open: %s' % tsv_path)\n data_dict = dict()\n with open(tsv_path, 'r') as tsv_open:\n i = 0\n for line in tsv_open:\n line = line.strip('\\n')\n line_split = line.split('\\t')\n data_dict[i] = line_split\n i += 1\n logger.info('data_dict=\\n%s' % data_dict)\n df = pd.DataFrame.from_dict(data_dict, orient='index')\n logger.info('df=\\n%s' % df)\n return df\n\ndef samtools_idxstat_to_df(uuid, bam_path, idx_outfile, logger):\n logger.info('storing idx_outfile %s to db' % idx_outfile)\n df = all_tsv_to_df(idx_outfile, logger)\n df.columns = ['NAME', 'LENGTH', 'ALIGNED_READS', 'UNALIGNED_READS']\n logger.info('df=\\n%s' % df)\n return df\n\n\ndef samtools_idxstats(uuid, bam_path, input_state, engine, logger):\n step_dir = os.getcwd()\n bam_name = os.path.basename(bam_path)\n bam_base, bam_ext = os.path.splitext(bam_name)\n idx_outfile = 'samtools_idxstats_' + bam_base + '.txt'\n if pipe_util.already_step(step_dir, 'samtools_' + bam_base + '_idxstats', logger):\n logger.info('already completed step `samtools idxstats` of: %s' % bam_path)\n else:\n logger.info('running step `samtools idxstats` of: %s' % bam_name)\n\n home_dir = os.path.join('/home', getpass.getuser()) #cwltool sets HOME to /var/spool/cwl, so need to be explicit\n samtools_path = os.path.join(home_dir, '.local', 'bin', 'samtools')\n\n cmd = [samtools_path, 'idxstats', bam_path ]\n idxstats_output = pipe_util.do_command(cmd, logger)\n with open(idx_outfile, 'w') as idx_outfile_open:\n for aline in idxstats_output.decode().format():\n idx_outfile_open.write(aline)\n\n #save time/mem to db\n df = time_util.store_time(uuid, cmd, idxstats_output, logger)\n df['bam_name'] = bam_name\n df['input_state'] = input_state\n unique_key_dict = {'uuid': uuid, 'bam_name': bam_name}\n table_name = 'time_mem_samtools_idxstats'\n df_util.save_df_to_sqlalchemy(df, unique_key_dict, table_name, engine, logger)\n pipe_util.create_already_step(step_dir, 'samtools_' + bam_base + '_idxstats', logger)\n logger.info('completed running step `samtools idxstats` of: %s' % bam_name)\n\n\n #save stats to db\n if pipe_util.already_step(step_dir, 'samtools_' + bam_base + '_idxstats_db', logger):\n logger.info('already stored `samtools idxstats` to db: %s' % idx_outfile)\n else:\n logger.info('storing `samtools idxstats` to db: %s' % idx_outfile)\n df = samtools_idxstat_to_df(uuid, bam_path, idx_outfile, logger)\n df['uuid'] = uuid\n df['bam_name'] = bam_name\n df['input_state'] = input_state\n table_name = 'samtools_idxstats'\n unique_key_dict = {'uuid': uuid, 'bam_name': bam_name}\n df_util.save_df_to_sqlalchemy(df, unique_key_dict, table_name, engine, logger)\n pipe_util.create_already_step(step_dir, 'samtools_' + bam_base + '_idxstats_db', logger)\n logger.info('completed storing `samtools idxstats` to db: %s' % idx_outfile)\n return\n","sub_path":"samtools_tool/tools/samtools_idxstats.py","file_name":"samtools_idxstats.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"365510465","text":"class Point:\n\n\n def assign(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n\n\n def printPoint(self):\n print(self.x, self.y, self.z)\n\n\np1 = Point()\np1.assign(2, 3, 5)\np1.printPoint()\n\np2 = Point()\np1.assign(6, 2, -4)\np1.printPoint()\n","sub_path":"Classes and Objects/prog03.py","file_name":"prog03.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"550226025","text":"from django.urls import path, re_path\nfrom accounts import views\n\nurlpatterns = [\n path('', views.index, name='home'),\n path('login/', views.handleLogin, name='login'),\n path('register/', views.handleSignUp, name='signup'),\n path('faker/', views.handle_fake, name='faker1'),\n path('scfaker/', views.student_course_fake, name='scfaker1'),\n path('logout/', views.handleLogout, name='logout')\n # re_path(r'^run/$', views.runCode, name='run')\n ]","sub_path":"src/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"604451838","text":"from rest_framework import views, permissions\nfrom rest_framework.response import Response\nfrom django.utils.translation import ugettext_lazy as _\nfrom utils4geek.base.renderers import PlainTextRenderer\nfrom backend.settings import LOG_FILE_ABS_PATH, PROJECT_ROOT, SITE_ASSOCIATION_IOS_ROOT\nimport json\nimport mimetypes\nimport os\nimport stat\nimport re\n\nfrom django.http import ( HttpResponse,\n HttpResponseNotModified, StreamingHttpResponse)\nfrom django.utils.http import http_date, parse_http_date\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\n\n# Create your views here.\n\n\nclass LogViewSet(views.APIView):\n permission_classes = (permissions.IsAdminUser,)\n\n def get(self, _):\n log_file = open(LOG_FILE_ABS_PATH, 'r')\n text = log_file.read()\n log_file.close()\n return Response(text, status=200)\n\n\nclass CredentialIOSView(views.APIView):\n \"\"\"\n IOS Credentials for universal links\n \"\"\"\n def get(self, _):\n file = open(SITE_ASSOCIATION_IOS_ROOT, 'r')\n text = json.loads(file.read())\n file.close()\n return Response(text, status=200)\n\n\nclass LogFileViewSet(views.APIView):\n permission_classes = (permissions.IsAdminUser,)\n renderer_classes = (PlainTextRenderer,)\n\n def get(self, *args, **kwargs):\n filename = self.kwargs.get(\"filename\")\n DIRNAME = os.path.join(PROJECT_ROOT, \"logs\")\n DIRNAME = os.path.join(DIRNAME, filename)\n log_file = open(DIRNAME, mode=\"r\", errors=\"ignore\")\n text = log_file.read().encode(\"utf-8\")\n log_file.close()\n return Response(text, status=200)\n\n\nclass ServeMediaFileView(views.APIView):\n renderer_classes = (PlainTextRenderer,)\n\n def get(self, *args, **kwargs):\n filename = self.kwargs.get(\"filename\")\n DIRNAME = os.path.join(PROJECT_ROOT, \"media\")\n log.info(DIRNAME)\n\n fullpath = os.path.join(DIRNAME, filename)\n\n if not os.path.exists(fullpath):\n text = _(\"File des not exists\")\n return Response(text, status=404)\n\n response = serve(self.request, fullpath)\n return response\n\n\ndef serve(request, fullpath):\n \"\"\"\n Serve static files below a given point in the directory structure.\n To use, put a URL pattern such as::\n (r'^(?P.*)$', 'django.views.static.serve', {'document_root': '/path/to/my/files/'})\n in your URLconf. You must provide the ``document_root`` param. You may\n also set ``show_indexes`` to ``True`` if you'd like to serve a basic index\n of the directory. This index view will use the template hardcoded below,\n but if you'd like to override it, you can create a template called\n ``static/directory_index.html``.\n \"\"\"\n\n # Respect the If-Modified-Since header.\n statobj = os.stat(fullpath)\n if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),\n statobj.st_mtime, statobj.st_size):\n return HttpResponseNotModified()\n content_type, encoding = mimetypes.guess_type(fullpath)\n content_type = content_type or 'application/octet-stream'\n ranged_file = RangedFileReader(open(fullpath, 'rb'))\n response = StreamingHttpResponse(ranged_file,\n content_type=content_type)\n response[\"Last-Modified\"] = http_date(statobj.st_mtime)\n if stat.S_ISREG(statobj.st_mode):\n size = statobj.st_size\n response[\"Content-Length\"] = size\n response[\"Accept-Ranges\"] = \"bytes\"\n # Respect the Range header.\n if \"HTTP_RANGE\" in request.META:\n try:\n ranges = parse_range_header(request.META['HTTP_RANGE'], size)\n except ValueError:\n ranges = None\n # only handle syntactically valid headers, that are simple (no\n # multipart byteranges)\n if ranges is not None and len(ranges) == 1:\n start, stop = ranges[0]\n if stop > size:\n # requested range not satisfiable\n return HttpResponse(status=416)\n ranged_file.start = start\n ranged_file.stop = stop\n response[\"Content-Range\"] = \"bytes %d-%d/%d\" % (start, stop - 1, size)\n response[\"Content-Length\"] = stop - start\n response.status_code = 206\n if encoding:\n response[\"Content-Encoding\"] = encoding\n return response\n\n\ndef was_modified_since(header=None, mtime=0, size=0):\n \"\"\"\n Was something modified since the user last downloaded it?\n header\n This is the value of the If-Modified-Since header. If this is None,\n I'll just return True.\n mtime\n This is the modification time of the item we're talking about.\n size\n This is the size of the item we're talking about.\n \"\"\"\n try:\n if header is None:\n raise ValueError\n matches = re.match(r\"^([^;]+)(; length=([0-9]+))?$\", header,\n re.IGNORECASE)\n header_mtime = parse_http_date(matches.group(1))\n header_len = matches.group(3)\n if header_len and int(header_len) != size:\n raise ValueError\n if int(mtime) > header_mtime:\n raise ValueError\n except (AttributeError, ValueError, OverflowError):\n return True\n return False\n\n\ndef parse_range_header(header, resource_size):\n \"\"\"\n Parses a range header into a list of two-tuples (start, stop) where `start`\n is the starting byte of the range (inclusive) and `stop` is the ending byte\n position of the range (exclusive).\n Returns None if the value of the header is not syntatically valid.\n \"\"\"\n if not header or '=' not in header:\n return None\n\n ranges = []\n units, range_ = header.split('=', 1)\n units = units.strip().lower()\n\n if units != \"bytes\":\n return None\n\n for val in range_.split(\",\"):\n val = val.strip()\n if '-' not in val:\n return None\n\n if val.startswith(\"-\"):\n # suffix-byte-range-spec: this form specifies the last N bytes of an\n # entity-body\n start = resource_size + int(val)\n if start < 0:\n start = 0\n stop = resource_size\n else:\n # byte-range-spec: first-byte-pos \"-\" [last-byte-pos]\n start, stop = val.split(\"-\", 1)\n start = int(start)\n # the +1 is here since we want the stopping point to be exclusive, whereas in\n # the HTTP spec, the last-byte-pos is inclusive\n stop = int(stop)+1 if stop else resource_size\n if start >= stop:\n return None\n\n ranges.append((start, stop))\n\n return ranges\n\n\nclass RangedFileReader:\n \"\"\"\n Wraps a file like object with an iterator that runs over part (or all) of\n the file defined by start and stop. Blocks of block_size will be returned\n from the starting position, up to, but not including the stop point.\n \"\"\"\n block_size = 8192\n def __init__(self, file_like, start=0, stop=float(\"inf\"), block_size=None):\n self.f = file_like\n self.block_size = block_size or RangedFileReader.block_size\n self.start = start\n self.stop = stop\n\n def __iter__(self):\n self.f.seek(self.start)\n position = self.start\n while position < self.stop:\n data = self.f.read(min(self.block_size, self.stop - position))\n if not data:\n break\n\n yield data\n position += self.block_size","sub_path":"zonia-backend-master/utils4geek/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"492769342","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom __future__ import unicode_literals\r\nfrom common import get_regs, get_compile_regs\r\nimport re\r\nimport os\r\n\r\nREGS = get_compile_regs(get_regs('equity'))\r\nDICT_PATH = './dict/'\r\n\r\n\r\ndef _get_module_path(path):\r\n real_path = os.path.join(os.getcwd(), os.path.dirname(__file__), path)\r\n return os.path.normpath(real_path)\r\n\r\n\r\ndef get_equity(document, mode=2):\r\n\r\n if int(mode) == 1:\r\n return equity_2(document, 1.5)\r\n\r\n if int(mode) == 2:\r\n return equity_2(document)\r\n\r\n\r\ndef equity_1(document):\r\n '''\r\n 抽取股权占比\r\n :param document: chunk 包含 sentence,words,labels\r\n :return: 字典 {\"status\":1,\"equity\":[10%]} 或者 {\"status\":0,\"equity\":[20%]} => 1 表示 关键字抽取,0 表示 全文抽取\r\n '''\r\n global REGS\r\n equity = []\r\n # 股权标签\r\n equityTag = re.compile(u\"(\\d+)[%%]+\")\r\n # 移动窗口\r\n window = 3\r\n for sentence in document.sentences:\r\n # 优先定位 百分比的位置\r\n hasEquity = re.search(equityTag, sentence.raw)\r\n if hasEquity:\r\n # 匹配的起点和终点\r\n start, end = hasEquity.span()\r\n if 9 < int(hasEquity.group(1)) < 40:\r\n\r\n if start < window:\r\n start = window\r\n if end > len(sentence.raw) - window:\r\n end = len(sentence.raw) - window\r\n # 按优先级 检测 窗口词是否包含股权关键词\r\n for reg in REGS:\r\n if re.search(reg, sentence.raw[start - window: end + window]):\r\n return {\"status\": 1, \"equity\": hasEquity.group()}\r\n\r\n # 如果检测不到关键词\r\n equity.append(hasEquity.group())\r\n\r\n return {\"status\": 0, \"equity\": equity}\r\n\r\n\r\ndef equity_2(document, baseLine=0.0):\r\n '''\r\n 抽取股权比\r\n :param document:\r\n :return:\r\n '''\r\n bagofWords = load_bagWords('equity')\r\n windowWord = cutWindowWord(document)\r\n\r\n reg = re.compile(u'(\\d+%)')\r\n result = []\r\n for words in windowWord:\r\n\r\n regCont = re.search(reg, ''.join(words))\r\n if not regCont:\r\n continue\r\n tagTemp = regCont.group()\r\n\r\n if len(tagTemp) == 5:\r\n tagCont = '%d~%d%s' % (int(tagTemp[:2]), int(tagTemp[2:4]), '%')\r\n elif len(tagTemp) == 4:\r\n tagCont = '%d~%d%s' % (int(tagTemp[:1]), int(tagTemp[1:3]), '%')\r\n else:\r\n tagCont = tagTemp\r\n weight = 0.0\r\n for word in words:\r\n if word in bagofWords:\r\n weight += bagofWords[word]\r\n if weight < baseLine:\r\n continue\r\n temp = (tagCont, weight)\r\n result.append(temp)\r\n\r\n return result\r\n\r\n\r\ndef load_bagWords(tag):\r\n '''\r\n 加载词袋\r\n :param tag:\r\n :return:\r\n '''\r\n bagofWords = {}\r\n with open(_get_module_path('%s%s_bagWords.txt' % (DICT_PATH, tag)), 'rb') as f:\r\n for line in f.readlines():\r\n cont = line.decode('utf-8').strip().split()\r\n try:\r\n bagofWords[cont[0]] = float(cont[1])\r\n except:\r\n continue\r\n return bagofWords\r\n\r\n\r\ndef cutWindowWord(document):\r\n '''\r\n 取窗口词\r\n :param text:\r\n :return:\r\n '''\r\n result = []\r\n stopWords = load_stopwords(_get_module_path('%sstopwords.txt' % DICT_PATH))\r\n wordList = []\r\n for sentence in document.sentences:\r\n wordList.extend([w.raw for w in sentence.words])\r\n wordList, index = findIndex(wordList, [u'%'])\r\n windows = 10\r\n for i in xrange(len(index)):\r\n start = index[i] - windows\r\n end = index[i] + windows\r\n\r\n if start < 0:\r\n start = 0\r\n if '%' in wordList[start:index[i] - 1]:\r\n start = index[i - 1] + 1\r\n\r\n if end > len(wordList):\r\n end = len(wordList)\r\n if '%' in wordList[index[i] + 1:end]:\r\n end = index[i + 1] - 1\r\n # 去除10%\r\n try:\r\n if float(wordList[index[i] - 1]) % 1 == 0 and 9.0 < float(wordList[index[i] - 1]) < 40.0:\r\n\r\n temp = removeStopWords(wordList[start:end], stopWords)\r\n result.append(temp)\r\n except:\r\n continue\r\n\r\n return result\r\n\r\n\r\ndef findIndex(inList, value):\r\n\r\n index = [i for i, a in enumerate(inList) if a in value]\r\n # 解决10%-20%的问题\r\n Del = []\r\n for i in xrange(len(index)):\r\n if i > 0:\r\n if index[i] - index[i - 1] < 4:\r\n Del.append(i - 1)\r\n inList[index[i - 1]] = '-'\r\n while Del:\r\n index.pop(Del[-1])\r\n Del.remove(Del[-1])\r\n return inList, index\r\n\r\n\r\ndef removeStopWords(inList, stopWords):\r\n wordList = []\r\n for word in list(inList):\r\n word = word.strip()\r\n if word != '' and word not in stopWords:\r\n wordList.append(word)\r\n return wordList\r\n\r\n\r\ndef load_stopwords(path):\r\n '''\r\n 载入停用词\r\n :param path:\r\n :return:\r\n '''\r\n stopwords = set()\r\n with open(path, 'rb') as f:\r\n for line in f:\r\n word = line.strip().decode('utf-8')\r\n stopwords.add(word)\r\n return stopwords\r\n","sub_path":"ibot-kernel/ibot/extractor/equity.py","file_name":"equity.py","file_ext":"py","file_size_in_byte":5221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"16685751","text":"# Universal Power System Controller\n# USAID Middle East Water Security Initiative\n#\n# Developed by: Nathan Webster\n# Primary Investigator: Nathan Johnson\n#\n# Version History (mm_dd_yyyy)\n# 1.00 07_13_2018_NW\n#\n######################################################\n# Import Libraries\nimport sqlite3\nimport sched\nimport time\nimport zipfile\nimport os\nfrom datetime import datetime\nimport Parameters\nfrom CSV_Manager import *\n\n# Setup event logging\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nfh = logging.FileHandler('UPS_Event.log')\nch = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\nch.setFormatter(formatter)\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n\ndef Archive_Controller_Main(arg):\n\n # Setup Archive scheduler\n Archive_Sched = sched.scheduler(time.time, time.sleep)\n\n while True:\n # Schedule the SQL controller and run\n Archive_Sched.enter(Parameters.Archive_Controller_Interval, 1, Archive_Controller, (\"\",))\n Archive_Sched.run()\n\ndef Archive_Controller(arg):\n\n CSV_Write()\n\n # Create archive directory for current time\n Current_time = datetime.now()\n directory = '/home/pi/datalogger/Archive/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # Create archive\n zf_name = str(Current_time)+'_Archive'\n zf = zipfile.Zipfile(zf_name,'w')\n\n # Write files to zip archive and compress\n try:\n with zipfile.Zipfile(zf_name,'w') as zf:\n zf.write('UPS_DB.sql', zipfile.ZIP_STORED)# Write sql database to zip file\n zf.write('UPS_Messages.log', zipfile.ZIP_STORED)# Write log to zip file\n zf.write('UPS_DB.csv', zipfile.ZIP_STORED)# Write csv file to zip file\n\n except:\n logger.error('Could not write files to zip archive')\n\n try:\n os.remove('UPS_Messages.log')# Delete log file\n os.remove('UPS_DB.csv')# Delete csv file\n\n try:\n conn = sqlite3.connect('UPS_DB.db')\n c = conn.cursor()\n c.execute(\"DELETE FROM UPS_DB WHERE Date <= date('now','-1 day')\")# Delete sql database older than one week\n conn.close()\n except:\n logger.error('Could not update SQL database')\n except:\n logger.error('Could not delete log and csv files')","sub_path":"dev/07_14_2018/Archive_Controller.py","file_name":"Archive_Controller.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"254348211","text":"from types import MethodType\nimport numpy as np\n\nfrom UQpy.Distributions.baseclass import Copula\nfrom UQpy.Distributions.baseclass import DistributionContinuous1D, DistributionND, DistributionDiscrete1D\n\n\n########################################################################################################################\n# Multivariate Continuous Distributions\n########################################################################################################################\n\nclass JointCopula(DistributionND):\n \"\"\"\n Define a joint distribution from a list of marginals and a copula to introduce dependency. ``JointCopula`` is a\n child class of ``DistributionND``.\n\n **Inputs:**\n\n * **marginals** (`list`):\n `list` of ``DistributionContinuous1D`` or ``DistributionDiscrete1D`` objects that define the marginals\n\n * **copula** (`object`):\n object of class ``Copula``\n\n A ``JointCopula`` distribution may possess a ``cdf``, ``pdf`` and ``log_pdf`` methods if the copula allows for it\n (i.e., if the copula possesses the necessary ``evaluate_cdf`` and ``evaluate_pdf`` methods).\n\n The parameters of the distribution are only stored as attributes of the marginals/copula objects. However, the\n ``get_params`` and ``update_params`` methods can still be used for the joint. Note that each parameter of the joint\n is assigned a unique string identifier as `key_index` - where `key` is the parameter name and `index` the index of\n the marginal (e.g., location parameter of the 2nd marginal is identified as `loc_1`); and `key_c` for copula\n parameters.\n\n \"\"\"\n def __init__(self, marginals, copula):\n super().__init__()\n self.order_params = []\n for i, m in enumerate(marginals):\n self.order_params.extend([key + '_' + str(i) for key in m.order_params])\n self.order_params.extend([key + '_c' for key in copula.order_params])\n\n # Check and save the marginals\n self.marginals = marginals\n if not (isinstance(self.marginals, list)\n and all(isinstance(d, (DistributionContinuous1D, DistributionDiscrete1D)) for d in self.marginals)):\n raise ValueError('Input marginals must be a list of 1d continuous Distribution objects.')\n\n # Check the copula. Also, all the marginals should have a cdf method\n self.copula = copula\n if not isinstance(self.copula, Copula):\n raise ValueError('The input copula should be a Copula object.')\n if not all(hasattr(m, 'cdf') for m in self.marginals):\n raise ValueError('All the marginals should have a cdf method in order to define a joint with copula.')\n self.copula.check_marginals(marginals=self.marginals)\n\n # Check if methods should exist, if yes define them bound them to the object\n if hasattr(self.copula, 'evaluate_cdf'):\n def joint_cdf(dist, x):\n x = dist._check_x_dimension(x)\n # Compute cdf of independent marginals\n unif = np.array([marg.cdf(x[:, ind_m]) for ind_m, marg in enumerate(dist.marginals)]).T\n # Compute copula\n cdf_val = dist.copula.evaluate_cdf(unif=unif)\n return cdf_val\n self.cdf = MethodType(joint_cdf, self)\n\n if all(hasattr(m, 'pdf') for m in self.marginals) and hasattr(self.copula, 'evaluate_pdf'):\n def joint_pdf(dist, x):\n x = dist._check_x_dimension(x)\n # Compute pdf of independent marginals\n pdf_val = np.prod(np.array([marg.pdf(x[:, ind_m])\n for ind_m, marg in enumerate(dist.marginals)]), axis=0)\n # Add copula term\n unif = np.array([marg.cdf(x[:, ind_m]) for ind_m, marg in enumerate(dist.marginals)]).T\n c_ = dist.copula.evaluate_pdf(unif=unif)\n return c_ * pdf_val\n self.pdf = MethodType(joint_pdf, self)\n\n if all(hasattr(m, 'log_pdf') for m in self.marginals) and hasattr(self.copula, 'evaluate_pdf'):\n def joint_log_pdf(dist, x):\n x = dist._check_x_dimension(x)\n # Compute pdf of independent marginals\n logpdf_val = np.sum(np.array([marg.log_pdf(x[:, ind_m])\n for ind_m, marg in enumerate(dist.marginals)]), axis=0)\n # Add copula term\n unif = np.array([marg.cdf(x[:, ind_m]) for ind_m, marg in enumerate(dist.marginals)]).T\n c_ = dist.copula.evaluate_pdf(unif=unif)\n return np.log(c_) + logpdf_val\n self.log_pdf = MethodType(joint_log_pdf, self)\n\n def get_params(self):\n \"\"\"\n Return the parameters of a ``Distributions`` object.\n\n To update the parameters of a ``JointInd`` or a ``JointCopula`` distribution, each parameter is assigned a\n unique string identifier as `key_index` - where `key` is the parameter name and `index` the index of the\n marginal (e.g., location parameter of the 2nd marginal is identified as `loc_1`).\n\n **Output/Returns:**\n\n * (`dict`):\n Parameters of the distribution.\n\n \"\"\"\n params = {}\n for i, m in enumerate(self.marginals):\n for key, value in m.get_params().items():\n params[key + '_' + str(i)] = value\n for key, value in self.copula.get_params().items():\n params[key + '_c'] = value\n return params\n\n def update_params(self, **kwargs):\n \"\"\"\n Update the parameters of a ``Distributions`` object.\n\n To update the parameters of a ``JointInd`` or a ``JointCopula`` distribution, each parameter is assigned a\n unique string identifier as `key_index` - where `key` is the parameter name and `index` the index of the\n marginal (e.g., location parameter of the 2nd marginal is identified as `loc_1`).\n\n **Input:**\n\n * keyword arguments:\n Parameters to be updated\n\n \"\"\"\n # check arguments\n all_keys = self.get_params().keys()\n # update the marginal parameters\n for key_indexed, value in kwargs.items():\n if key_indexed not in all_keys:\n raise ValueError('Unrecognized keyword argument ' + key_indexed)\n key_split = key_indexed.split('_')\n key, index = '_'.join(key_split[:-1]), key_split[-1]\n if index == 'c':\n self.copula.params[key] = value\n else:\n self.marginals[int(index)].params[key] = value","sub_path":"src/UQpy/Distributions/collection/joint_copula.py","file_name":"joint_copula.py","file_ext":"py","file_size_in_byte":6615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"167045987","text":"from tkinter import *\nfrom tkinter import scrolledtext\n\nclass Root(Tk):\n def __init__(self):\n super(Root, self).__init__()\n self.title(\"Tkinter Scroll Text\")\n self.minsize(640,400)\n\n self.scrollTextCtr()\n\n def scrollTextCtr(self):\n scroll_w = 30\n scroll_h = 10\n\n scrollText = scrolledtext.ScrolledText(self, width = scroll_w, height = scroll_h, wrap=WORD)\n scrollText.grid(column=0, row=0, columnspan=3)\nroot = Root()\nroot.mainloop()","sub_path":"tk_lesson09.py","file_name":"tk_lesson09.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"268681776","text":"# encoding: utf-8\n\nimport sys\nsys.path.append('..')\nimport pandas as pd\nimport numpy as np\nimport datetime\nfrom common.util import targets\n\n\nclass transfer_iter():\n # 初期化\n def __init__(self, _df_buy, _df_act, _df_pln, _calc_date=None, _calc_factor=None, _atime_flag=None, _verbose=False):\n\n self.verbose_ = _verbose\n self.calc_factor_ = _calc_factor\n self.atime_flag_ = _atime_flag\n\n # Deep Copy\n self.df_act = _df_act.copy()\n self.df_pln = _df_pln.copy()\n self.df_buy = _df_buy.copy()\n\n if self.atime_flag_ == 'Aタイムなし':\n self.df_pln = self.df_pln.loc[self.df_pln.timerank != 'A',]\n self.df_buy = self.df_buy.loc[self.df_buy.timerank != 'A',]\n\n # Data Type Formatting\n self.df_act.date = pd.to_datetime(self.df_act.date)\n self.df_pln.date = pd.to_datetime(self.df_pln.date)\n self.df_buy.date = pd.to_datetime(self.df_buy.date)\n self.df_pln.waku_no = self.df_pln.waku_no.astype(str)\n self.df_buy.waku_no = self.df_buy.waku_no.astype(str)\n\n if _calc_date is not None:\n self.calc_date_ = datetime.datetime.strptime(_calc_date, \"%Y/%m/%d\")\n else:\n self.calc_date_ = self.df_act.date.max()\n self.calc_date_ = datetime.datetime.strptime(self.calc_date_, \"%Y/%m/%d\")\n\n if self.verbose_ is True:\n print('| Set calculation date with {}'.format(self.calc_date_))\n\n # 計算結果カラムの追加\n if len(self.df_pln.columns) == 16:\n self.df_pln['timing'] = self.calc_factor_\n self.df_pln['atime_flag'] = self.atime_flag_\n self.df_pln['brand_before'] = self.df_pln.brand\n self.df_pln['brand_after'] = self.df_pln.brand\n\n # ブランドリスト、1Wスキップ期間も含む /////////// 前後いれかえ ////////////\n self.brand_list_ = self.df_pln.brand_after.unique()\n\n # 予測GRPデータによる達成率の計算には7日間の準備期間を含める\n # self.df_pln = self.df_pln.loc[self.df_pln.date > self.calc_date_,]////////////\n\n\n # if self.verbose is True:\n # print('| List of brands: {}'.format(self.brand_list_))\n\n\n # ブランドごとの達成率を計算する関数、要素は世帯GRPベースで、達成率(%)=(実績GRP+予測GRP)/(買付GRP)\n # 実績GRP : from self.df_act(1週目終了後、2週目終了後のそれぞれのアクチュアル)\n # 予測GRP : from self.df_pln(ブランド変更のたびにブランド情報を再帰的に更新する)\n # 買付GRP : from self.df_buy(初期時点の買付GRP)\n def calc_achive_rate(self):\n self.ach_dict_ = {}\n self.avg_ach_rt_ = 0.0\n a_, b_ = 0, 0\n\n # 達成率の計算には1W飛ばさない'df_pln'を使う\n for i, b in enumerate(self.brand_list_):\n grp_act_ = self.df_act.loc[(self.df_act.target == '世帯') * (self.df_act.brand == b), 'actual_grp'].sum()\n grp_pln_ = self.df_pln.loc[(self.df_pln.target == '世帯') * (self.df_pln.brand_after == b), 'plan_grp'].sum()\n grp_buy_ = self.df_buy.loc[(self.df_buy.target == '世帯') * (self.df_buy.brand == b), 'buy_grp'].sum()\n\n try:\n self.ach_dict_[b] = (grp_act_ + grp_pln_) / grp_buy_\n a_ += grp_act_ + grp_pln_\n b_ += grp_buy_\n except:\n self.ach_dict_[b] = 0\n\n # a_ += grp_act_ + grp_pln_\n # b_ += grp_buy_\n\n # 最大達成率のブランド\n if i == 0:\n self.max_brand_ = b\n elif self.ach_dict_[b] > self.ach_dict_[self.max_brand_]:\n self.max_brand_ = b\n\n # 最小達成率のブランド\n if i == 0:\n self.min_brand_ = b\n elif self.ach_dict_[b] < self.ach_dict_[self.min_brand_]:\n self.min_brand_ = b\n\n # 最大達成率・最小達成率のブランドを特定します(この方法ではうまくいかない…)\n # self.max_brand_ = max(self.ach_dict_)\n # self.min_brand_ = min(self.ach_dict_)\n\n # self.ach_dict_['max_brand'] = self.max_brand_\n # self.ach_dict_['min_brand'] = self.min_brand_\n # self.ach_dict_['max_target'] = targets[self.max_brand_]\n\n # 平均値を計算します(不変です)\n try:\n self.avg_ach_rt_ = a_ / b_\n except:\n self.avg_ach_rt_ = 0\n\n return self\n\n\n # 平均からの誤差の閾値超過を検証\n def validation(self, _threshold=0.05):\n self.threshold_ = _threshold\n self.volatility_ = []\n self.validation_ = None\n\n for k, v in self.ach_dict_.items():\n vol = v - self.avg_ach_rt_\n self.volatility_.append(vol)\n\n if self.verbose_ is True:\n print('| - {} {} ({})'.format(k, round(v * 100, 1), round(vol * 100, 1)))\n\n self.vol_max_ = max(self.volatility_)\n self.vol_min_ = min(self.volatility_)\n self.vol_range_ = self.vol_max_ - self.vol_min_\n self.validation_ = 'OK' if self.vol_range_ < self.threshold_ else 'NG'\n\n print('| AVG ACH: {}, VOL_MAX: {}, VOL_MIN: {}, RANGE: {}, VALID: {}'.format(\n round(self.avg_ach_rt_ * 100, 1),\n round(self.vol_max_ * 100, 1),\n round(self.vol_min_ * 100, 1),\n round(self.vol_range_ * 100, 1),\n self.validation_))\n\n return self\n\n\n # ターゲット含有率の計算\n def calc_content_rate(self):\n\n if self.verbose_ is True:\n print('| Brand with max achive rate: {} (target: {})'.format(self.max_brand_, targets[self.max_brand_]))\n\n self.cont_rt_ = {}\n self.waku_max_ = None\n\n # 最大達成率のブランドに絞る。ブランド変更の対象期間は計算基準日から1W後以降\n skip_1w = self.df_pln.date > self.calc_date_ + datetime.timedelta(days=7)\n max_brd = self.df_pln.brand_after == self.max_brand_\n\n df_mb = self.df_pln.loc[skip_1w * max_brd, ['target','brand_after','waku_no','plan_grp']]\n\n waku_list = df_mb.waku_no.unique()\n waku_list = [\"0122\",\"0124\"]\n\n df_mb_fam = df_mb.loc[df_mb.target == '世帯',]\n df_mb_tar = df_mb.loc[df_mb.target == targets[self.max_brand_],]\n\n try:\n # ブランド変更前の当該ブランドのターゲット含有率\n self.cont_rt_['all'] = df_mb_tar.loc[:,'plan_grp'].sum() / df_mb_fam.loc[:,'plan_grp'].sum()\n except:\n # 計算できない場合最大値1とする\n self.cont_rt_['all'] = 1.0\n\n # 各枠を除いた場合における当該ブランドのターゲット含有率\n for i, w in enumerate(waku_list):\n try:\n self.cont_rt_[w] = df_mb_tar.loc[df_mb_tar.waku_no != w, 'plan_grp'].sum() \\\n / df_mb_fam.loc[df_mb_fam.waku_no != w, 'plan_grp'].sum()\n except:\n self.cont_rt_[w] = 1.0\n\n # 当該ブランドのターゲット含有率が最も保守的(最大)となるときに除外した枠Noを見つける\n if i == 0:\n self.waku_max_ = w\n elif self.cont_rt_[w] > self.cont_rt_[self.waku_max_]:\n self.waku_max_ = w\n\n # 並べ替え\n # self.cont_rt_ = dict(sorted(self.cont_rt_.items(), key=lambda x:x[0]))\n\n return self.max_brand_, self.waku_max_, self.cont_rt_[self.waku_max_]\n\n\n # ブランド変更\n def replace_brand(self, _replace=False):\n\n if self.verbose_ is True:\n print('| We should replace the brand on {} ({} -> {})'.format(self.waku_max_, self.max_brand_, self.min_brand_))\n\n # calc_content_rate で算出した「枠」に対して、ブランドを「達成率が最小」のブランドに変更する:最大ブランド → 最小ブランド\n self.df_pln_wk = self.df_pln.copy()\n\n # 変更対象レコードの特定\n skip_1w = self.df_pln_wk.date > self.calc_date_ + datetime.timedelta(days=7)\n max_brd = self.df_pln_wk.brand_after == self.max_brand_\n max_wak = self.df_pln_wk.waku_no == self.waku_max_\n ref_flg = skip_1w * max_brd * max_wak\n\n self.df_pln_wk.loc[ref_flg, 'brand_after'] = self.min_brand_\n # ブランド変更の対象になるのは1W飛ばし部分\n # if ref_flg is True:\n # self.df_pln_wk.loc[ref_flg, 'brand_after'] = self.min_brand_\n # else:\n # print('| There is not waku that should be replaced.')\n\n return self.df_pln_wk, ref_flg\n","sub_path":"common/func_k.py","file_name":"func_k.py","file_ext":"py","file_size_in_byte":8749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"300264321","text":"transiçõesV = []\r\ntransiçõesP = []\r\nlistageral = []\r\nsoma = 0\r\nsomaV = 0\r\nsomaP = 0\r\nmarcador = 0\r\nwhile marcador < 16:\r\n transição = input(\"qual a transição\").lower()\r\n if transição == str(transição) and (transição == \"v\" or transição == \"p\"):\r\n valor = int(input(\"digite o valor da transição:\"))\r\n if transição == \"p\":transiçõesP += [valor] ; somaP += valor\r\n elif transição == \"v\":transiçõesV += [valor] ; somaV += valor\r\n marcador += 1\r\n soma += valor\r\n listageral += [valor]\r\n maior = valor\r\n menor = valor\r\n else:print(\"transição invalida\")\r\nfor i in listageral:\r\n if maior < i:maior = i\r\n elif menor > i:menor = i\r\n\r\nprint(\"valores transicionados por meio de V:{}, e soma {}\".format(transiçõesV,somaV))\r\nprint(\"valores transicionador por meio de P:{}, e a soma {}\".format(transiçõesP,somaP))\r\nprint(\"soma total de todos os meios:{}\".format(somaV+somaP))\r\nprint(\"menor valor detectado:{}\".format(menor))\r\nprint(\"maior valor detectado:{}\".format(maior))","sub_path":"algoritimo 8.py","file_name":"algoritimo 8.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"484266988","text":"import random\nimport unittest\n\nfrom TM1py.Objects import MDXView\nfrom TM1py.Services import TM1Service\nfrom TM1py.Utils import Utils\n\n# Configuration for tests\naddress = 'localhost'\nport = 8001\nuser = 'admin'\npwd = 'apple'\nssl = False\ndimension_prefix = 'TM1py_unittest_dimension_{}'\n\n\nclass TestOtherMethods(unittest.TestCase):\n tm1 = TM1Service(address=address, port=port, user=user, password=pwd, ssl=ssl)\n\n def test1_execute_mdx(self):\n cube_names = self.tm1.cubes.get_all_names()\n cube_name = cube_names[random.randrange(0, len(cube_names))]\n _, public_views = self.tm1.cubes.views.get_all(cube_name=cube_name)\n # if no views on cube. Recursion\n if len(public_views) == 0:\n self.test1_execute_mdx()\n else:\n # random public view on random cube\n view = public_views[random.randrange(0, len(public_views))]\n # if random view is MDXView. Recursion\n if isinstance(view, MDXView):\n self.test1_execute_mdx()\n else:\n # if native view has no dimensions on the columns. Recursion\n if len(view._columns) == 0:\n self.test1_execute_mdx()\n else:\n # sum up all numeric cells in Native View\n data_native_view = self.tm1.cubes.cells.get_view_content(cube_name, view.name, private=False)\n sum_native_view = sum(\n [float(cell['Value']) for cell in data_native_view.values() if str(cell['Value']).isdigit()])\n\n # get mdx from native view\n mdx = view.as_MDX\n # sum up all numeric cells in the response of the mdx query\n data_mdx = self.tm1.cubes.cells.execute_mdx(mdx)\n sum_mdx = sum([float(cell['Value']) for cell in data_mdx.values() if str(cell['Value']).isdigit()])\n\n # test it !\n self.assertEqual(sum_mdx, sum_native_view)\n\n def test2_read_cube_name_from_mdx(self):\n all_cube_names = self.tm1.cubes.get_all_names()\n all_cube_names_normalized = [cube_name.upper().replace(\" \", \"\") for cube_name in all_cube_names]\n for cube_name in all_cube_names:\n private_views, public_views = self.tm1.cubes.views.get_all(cube_name)\n for view in private_views + public_views:\n mdx = view.MDX\n cube_name = Utils.read_cube_name_from_mdx(mdx)\n self.assertIn(cube_name, all_cube_names_normalized)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Tests/Other.py","file_name":"Other.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"91132796","text":"# Imports\nimport sqlite3\n\n# Queries\nquery1 = 'SELECT COUNT(character_id) \\\n FROM charactercreator_character;'\nquery2a = 'SELECT COUNT(character_ptr_id) \\\n FROM charactercreator_mage;'\nquery2b = 'SELECT COUNT(character_ptr_id) \\\n FROM charactercreator_thief;'\nquery2c = 'SELECT COUNT(character_ptr_id) \\\n FROM charactercreator_cleric;'\nquery2d = 'SELECT COUNT(character_ptr_id) \\\n FROM charactercreator_fighter;'\nquery2e = 'SELECT COUNT(mage_ptr_id) \\\n FROM charactercreator_necromancer;'\nquery3 = 'SELECT COUNT(item_id) \\\n FROM armory_item;'\nquery4a = 'SELECT COUNT(name) \\\n FROM armory_item \\\n INNER JOIN armory_weapon \\\n ON armory_item.item_id = armory_weapon .item_ptr_id;'\nquery4b = 'SELECT COUNT(name) \\\n FROM armory_item \\\n WHERE item_id \\\n NOT IN \\\n (SELECT item_ptr_id \\\n FROM armory_weapon);'\nquery5 = 'SELECT cc.name, COUNT(ai.name) \\\n FROM charactercreator_character AS cc, \\\n armory_item AS ai, \\\n charactercreator_character_inventory AS cci \\\n WHERE cc.character_id = cci.character_id \\\n AND ai.item_id = cci.item_id \\\n GROUP BY ai.name \\\n ORDER BY COUNT(ai.name) DESC \\\n LIMIT 20;'\nquery6 = 'SELECT cc.name, COUNT(ai.name) \\\n FROM charactercreator_character AS cc, \\\n armory_item AS ai, \\\n charactercreator_character_inventory AS cci, \\\n armory_weapon AS aw \\\n WHERE cc.character_id = cci.character_id \\\n AND ai.item_id = cci.item_id \\\n AND ai.item_id = aw.item_ptr_id \\\n GROUP BY ai.name \\\n ORDER BY COUNT(ai.name) DESC \\\n LIMIT 20;'\nquery7 = 'SELECT AVG(COUNT) \\\n FROM \\\n (SELECT COUNT(ai.name) AS COUNT \\\n FROM charactercreator_character AS cc, \\\n armory_item AS ai, \\\n charactercreator_character_inventory AS cci \\\n WHERE cc.character_id = cci.character_id \\\n AND ai.item_id = cci.item_id \\\n GROUP BY ai.name\\\n ) ;'\nquery8 = 'SELECT AVG(COUNT) \\\n FROM \\\n (SELECT COUNT(ai.name) AS COUNT \\\n FROM charactercreator_character AS cc, \\\n armory_item AS ai, \\\n charactercreator_character_inventory AS cci, \\\n armory_weapon AS aw \\\n WHERE cc.character_id = cci.character_id \\\n AND ai.item_id = cci.item_id \\\n AND ai.item_id = aw.item_ptr_id \\\n GROUP BY ai.name\\\n );'\n\n\nconn = sqlite3.connect('rpg_db.sqlite3')\n\n# Execute queries\ncurs1 = conn.cursor()\nprint('Total characters =', curs1.execute(query1).fetchall())\n\ncurs2a = conn.cursor()\nprint('Total mage =', curs2a.execute(query2a).fetchall())\n\ncurs2b = conn.cursor()\nprint('Total thief =', curs2a.execute(query2b).fetchall())\n\ncurs2c = conn.cursor()\nprint('Total cleric =', curs2c.execute(query2c).fetchall())\n\ncurs2d = conn.cursor()\nprint('Total fighter =', curs2d.execute(query2d).fetchall())\n\ncurs2e = conn.cursor()\nprint('Total necromancer =', curs2e.execute(query2e).fetchall())\n\ncurs3 = conn.cursor()\nprint('Total items =', curs3.execute(query3).fetchall())\n\ncurs4a = conn.cursor()\nprint('Total items are weapons =', curs4a.execute(query4a).fetchall())\n\ncurs4b = conn.cursor()\nprint('Total items not weapons =', curs4b.execute(query4b).fetchall())\n\ncurs5 = conn.cursor()\nprint('Items each character has - top 20 =', curs5.execute(query5).fetchall())\n\ncurs6 = conn.cursor()\nprint('Weapons each character has - top 20 =', curs6.execute(query6).fetchall())\n\ncurs7 = conn.cursor()\nprint('Average items each character has =', curs7.execute(query7).fetchall())\n\ncurs8 = conn.cursor()\nprint('Average weapons each character has =', curs8.execute(query8).fetchall())\n\n# Close cursors and commit\ncurs1.close()\ncurs2a.close()\ncurs2b.close()\ncurs2c.close()\ncurs2d.close()\ncurs2e.close()\ncurs3.close()\ncurs4a.close()\ncurs4b.close()\ncurs5.close()\ncurs6.close()\ncurs7.close()\ncurs8.close()\nconn.commit()\n","sub_path":"module1-introduction-to-sql/rpg_queries.py","file_name":"rpg_queries.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"419640495","text":"import json\r\n\r\nimport requests\r\n\r\nimport secrets\r\n\r\n\r\ndef get_musictest(q, mtype='album'):\r\n client_key = secrets.client_key\r\n client_secret = secrets.client_secret\r\n\r\n grant_type='client_credentials'\r\n\r\n body_params={'grant_type':grant_type}\r\n #Code for OAuth starts\r\n url = 'https://accounts.spotify.com/api/token'\r\n #auth = OAuth1(client_key, client_secret)\r\n resp=requests.post(url, data=body_params, auth= (client_key, client_secret))\r\n # resp=requests.post(url, headers={'Authorization' : 'Basic ' + client_key + ':' +client_secret},\r\n # data={'grant_type':'client_credentials'})\r\n json_resp=json.loads(resp.text)\r\n token=json_resp['access_token']\r\n data=requests.get(\"https://api.spotify.com/v1/search\", headers={\"Authorization\": \"Bearer \" +token}, params={\"type\":mtype, 'q':q})#requests.get('https://api.spotify.com/v1/albums/0sNOF9WDwhWunNAHPD3Baj', headers={'authorization': 'Bearer' + token})\r\n #print(data.text)\r\n return data\r\ntest=get_musictest(q=\"Chris Brown\", mtype='artist')\r\nprint(test.text)\r\n","sub_path":"FinalprojectBhagwagar/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"61852432","text":"# -*- coding: UTF-8 -*- \nimport src.util.pyMysql as pymysql\n\n\ntry:\n results = pymysql.execute_select(\"SELECT * from intbee_activity \")\n for row in results:\n name = row[2]\n print (\"name=%s\" % name)\n db = pymysql.get_db_conn()\n print(pymysql.execute_select_count_on_db(db, \"select count(*) from intbee_activity \"))\nexcept Exception as e:\n print (e)\n\n","sub_path":"src/demo/PyMySqlDemo.py","file_name":"PyMySqlDemo.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"296951630","text":"# LinkedListRec class\r\n#\r\n# CSC148 Fall 2014, University of Toronto\r\n# Instructor: David Liu\r\n\"\"\"LinkedListRec class.\r\n\r\nThis is the recursive implementation of a linked list\r\nfor Week 5 of the course.\r\nNote the structural differences between this implementation\r\nand the node-based implementation of Week 4. Even though\r\nboth classes have the same public interface, how they\r\nimplement their methods are quite different!\r\n\r\nThis contains all of the linked list methods from the week:\r\nlecture material, labs, and the exercise.\r\n\"\"\"\r\n\r\n\r\nclass EmptyValue:\r\n \"\"\"Dummy class used to represent the \"first\" item\r\n of an empty list.\r\n\r\n This is created so that we can make linked lists\r\n that contain None.\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass LinkedListRec:\r\n \"\"\"Linked List with a recursive implementation.\r\n\r\n Note that there is no \"Node\" class with this implementation.\r\n\r\n Attributes:\r\n - first (object): the first item stored in this list,\r\n or EmptyValue if this list is empty\r\n - rest (LinkedListRec): a list containing the other items\r\n in this list, or None if this list is empty\r\n \"\"\"\r\n\r\n def __init__(self, items):\r\n \"\"\" (LinkedListRec, list) -> NoneType\r\n\r\n Create a new linked list containing the elements in items.\r\n If list is empty, self.first initialized to EmptyValue.\r\n \"\"\"\r\n if len(items) == 0:\r\n self.first = EmptyValue\r\n self.rest = None\r\n else:\r\n self.first = items[0]\r\n self.rest = LinkedListRec(items[1:])\r\n\r\n # Non-mutating methods\r\n def is_empty(self):\r\n \"\"\" (LinkedListRec) -> bool\r\n Return True if this list is empty.\r\n \"\"\"\r\n return self.first == EmptyValue\r\n\r\n def __len__(self):\r\n \"\"\" (LinkedListRec) -> int\r\n Return the number of items stored in this list.\r\n \"\"\"\r\n if self.is_empty():\r\n return 0\r\n else:\r\n return self.rest.__len__() + 1\r\n \r\n \r\n def __getitem__(self, index):\r\n \"\"\" (LinkedListRec, int) -> object\r\n\r\n Return the item at position index in self.\r\n Raise IndexError if index is >= the length of self.\r\n \"\"\"\r\n\r\n if self.is_empty() or index >= len(self):\r\n raise IndexError\r\n elif index == 0:\r\n return self.first\r\n else:\r\n return self.rest.__getitem__(index - 1)\r\n # Or equivalently, self.rest[index - 1]\r\n\r\n def __contains__(self, item):\r\n \"\"\" (LinkedListRec, object) -> bool\r\n Return True if item is contained in this list.\r\n \"\"\"\r\n if self.is_empty():\r\n return False\r\n elif self.first == item:\r\n return True\r\n else:\r\n return self.rest.__contains__(item)\r\n \r\n\r\n # Mutating methods\r\n def remove_first(self):\r\n \"\"\" (LinkedListRec) -> NoneType\r\n\r\n Remove the first item in self.\r\n Raise an IndexError if self is empty.\r\n \"\"\"\r\n if self.is_empty():\r\n raise IndexError\r\n else:\r\n self.first = self.rest.first\r\n self.rest = self.rest.rest\r\n\r\n def remove(self, index):\r\n \"\"\" (LinkedListRec, int) -> NoneType\r\n\r\n Remove item at position index from self.\r\n Raise an IndexError if index is out of bounds.\r\n \"\"\"\r\n if self.is_empty():\r\n raise IndexError\r\n elif self.__len__() - 1 < index:\r\n raise IndexError\r\n elif index == 0:\r\n self.remove_first()\r\n else:\r\n self.rest.remove(index - 1)\r\n\r\n def insert_first(self, item):\r\n \"\"\" (LinkedListRec, object) -> NoneType\r\n\r\n Insert item at the front of the list.\r\n Note that this should work even if the list\r\n is empty!\r\n \"\"\"\r\n temp = LinkedListRec([])\r\n temp.first = self.first\r\n temp.rest = self.rest\r\n self.first = item\r\n self.rest = temp\r\n\r\n def insert(self, index, item):\r\n \"\"\" (LinkedListRec, int, object) -> NoneType\r\n\r\n Insert item at position index in this list.\r\n Raise an IndexError if index > len(self).\r\n But note that it is possible to insert an item\r\n at the *end* of a list (when index == len(self)).\r\n \"\"\"\r\n # Hint: take a look at remove and think about\r\n # what the base cases and recursive steps are.\r\n if index > len(self):\r\n raise IndexError\r\n elif index == 0:\r\n self.insert_first(item)\r\n else:\r\n self.rest.insert(index - 1, item)\r\n","sub_path":"uoft/CSC148H1F Intro to Comp Sci/@week5_recursions/@@Lab5/linkedlistrec.py","file_name":"linkedlistrec.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"424396682","text":"import os\nimport csv\n\ndate = []\nrevenue = []\nprofit_loss = []\nlast_profit = 0\nbreak_line = \"------------------------------\"\n\ncsvpath = os.path.join(\".\", \"Resources\", \"budget_data.csv\")\n\nwith open(csvpath) as csvfile:\n \n csv_reader = csv.reader(csvfile, delimiter = \",\")\n\n csv_header = next(csv_reader)\n\n total_month = 0\n for row in csv_reader:\n date.append(row[0])\n revenue.append(row[1])\n pl_diff = int(row[1]) - int(last_profit)\n last_profit = row[1]\n profit_loss.append(pl_diff)\n total_month += 1\n\n total_revenue = 0\n for rev_row in revenue:\n total_revenue += int(rev_row)\n \n total_profit = 0\n for pro_row in profit_loss:\n total_profit += int(pro_row)\n \n aver_change = round(total_profit/total_month, 2)\n\n \n great_inc = max(profit_loss)\n inc_index = profit_loss.index(great_inc)\n month_inc = date[inc_index]\n\n great_dec = min(profit_loss)\n dec_index = profit_loss.index(great_dec)\n month_dec = date[dec_index]\n\n\n print(\"Financial Analysis\")\n print(break_line)\n print(\"Total Months: \" + str(total_month))\n print(\"Total Revenue: $\" + str(total_revenue))\n print(\"Total Profit: $\" + str(total_profit))\n print(\"Average Change: $\" + str(aver_change))\n print(\"Greatest Increase in Profits: \" + str(month_inc) + \" \" + str(great_inc))\n print(\"Greatest Decrease in Profits: \" + str(month_dec) + \" \" + str(great_dec))\n\noutput_result = os.path.join(\".\", \"analysis\", \"result.txt\")\n\nwith open(output_result, \"w\") as txt_file:\n txt_file.write(\"Financial Analysis\" + \"\\n\") \n txt_file.write(break_line + \"\\n\")\n txt_file.write(\"Total Months: \" + str(total_month) + \"\\n\")\n txt_file.write(\"Total Revenue: $\" + str(total_revenue) + \"\\n\")\n txt_file.write(\"Total Profit: $\" + str(total_profit) + \"\\n\")\n txt_file.write(\"Average Change: $\" + str(aver_change) + \"\\n\")\n txt_file.write(\"Greatest Increase in Profits: \" + str(month_inc) + \" \" + str(great_inc) + \"\\n\")\n txt_file.write(\"Greatest Decrease in Profits: \" + str(month_dec) + \" \" + str(great_dec) + \"\\n\")\n ","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"83690588","text":"from memory import Memory\nfrom time import sleep\n\nif __name__ == \"__main__\":\n '''Runs an instance of memory!'''\n # Initialize the memory object\n game = Memory()\n\n # Clear Screen\n game.clear()\n\n # Display title and instructions\n game.display_title()\n game.display_instructions()\n\n # Wait for the user to read the instructions and press enter when ready.\n input(\"Press enter to start... \")\n\n # Display the board\n game.display()\n\n # Start the game loop\n while (not game.check_win()):\n # Get two cards from the user\n while(len(game.selected) != 2):\n # Prompt for a move\n prompt = \"Enter move (m row column) or q to quit or h for help: \"\n command = input(prompt)\n info = command.split()\n # Quit if the first item is a q\n if len(info) == 0:\n print(\"Please make a move!\")\n elif info[0] == \"q\":\n game.clear()\n quit()\n # Post instructions again if the first item is an h\n elif info[0] == \"h\":\n game.display_instructions()\n print(\"Try again when ready!\")\n # Try to make the move described\n elif info[0] == \"m\" and len(info) == 3:\n # Make sure that there are two integers provided\n try:\n row = int(info[1])\n col = int(info[2])\n except ValueError:\n print(\"Please follow the proper syntax!\")\n continue\n # Try to make a move if syntactically correct\n result = game.move(row, col)\n if result == False:\n print(\"Try Again!\")\n continue\n else:\n game.display()\n else:\n print(\"Please follow the proper syntax!\")\n # After getting the two cards check for a match.\n result = game.check_match()\n game.display()\n if result == True:\n print(\"There was a match!\")\n else:\n print(\"No Match!\")\n game.display_win()\n","sub_path":"play_memory.py","file_name":"play_memory.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"499067303","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"This module created for downloading topics from portal\"\"\"\nimport urllib\nimport json\n\nurl = \"https://www.reddit.com/r/python.json\"\nresponse = urllib.urlopen(url)\ndata_json = json.loads(response.read())\n\n\ndef red_dit():\n \"\"\"Main function\"\"\"\n\n def inner_func(data):\n \"\"\"Returns new topic from the list of topics\"\"\"\n yield outer_function(data)\n\n return inner_func\n\n\ncontainer = []\n\n\ndef outer_function(data):\n \"\"\"Performs search in the JSON\"\"\"\n global container\n for element in data:\n if element == u'title':\n container.append(data[element])\n if isinstance(element, (list, dict)):\n outer_function(element)\n if isinstance(data, dict) and isinstance(data[element], (list, dict)):\n outer_function(data[element])\n return container\n\n\nred21 = red_dit()\nvar = red21(data_json).next()\nfor title in var:\n print (title)\n","sub_path":"Basic functions and generators/pytask1_subtask2_Generator_for_downloading_topics.py","file_name":"pytask1_subtask2_Generator_for_downloading_topics.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"404327936","text":"#!/usr/bin/python3\nimport random\nimport sys\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nfrom PyQt4 import uic\nfrom time import sleep\n\nclass srtScreen(QDialog):\n def __init__ (self,parent=None):\n super(srtScreen,self).__init__(parent)\n uic.loadUi('srt.ui',self)\n self.pushButton.clicked.connect(self.start)\n QApplication.setStyle(QStyleFactory.create(\"Cleanlooks\"))\n # QApplication.setStyle(QStyleFactory.create(\"Plastique\"))\n\n def start (self):\n lista = []\n for x in range(3):\n lista.append(random.randint(1,9))\n tempo = 0\n\n while lista:\n self.label.setText(\"

Executando processo {0}<\\/span><\\/p><\\/body><\\/html>\".format(lista.index(min(lista))+1))\n try:\n self.progressBar_1.setValue(lista[0])\n except:\n self.progressBar_1.setValue(0)\n try:\n self.progressBar_2.setValue(lista[1])\n except:\n self.progressBar_2.setValue(0)\n try:\n self.progressBar_3.setValue(lista[2])\n except:\n self.progressBar_3.setValue(0)\n try:\n self.progressBar_4.setValue(lista[3])\n except:\n self.progressBar_4.setValue(0)\n try:\n self.progressBar_5.setValue(lista[4])\n except:\n self.progressBar_5.setValue(0)\n #print (\"Executando processo {0}\".format(lista.index(min(lista))+1))\n lista[lista.index(min(lista))] = min(lista)-1\n if min(lista) == 0:\n lista.remove(min(lista))\n sleep (0.3)\n tempo+=1\n if tempo%5 == 0:\n lista.append(random.randint(1,9))\n #print(lista)\n #print(lista)\n self.progressBar_1.setValue(0)\n self.label.setText(\"

Demonstração finalizada!<\\/span><\\/p><\\/body><\\/html>\")\n\nif __name__ == '__main__':\n start = QApplication(sys.argv)\n app = srtScreen()\n app.show()\n start.exec_()\n","sub_path":"srt.py","file_name":"srt.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"506123862","text":"import re\nimport urllib.request\n\ndef getlink(url):\n #模拟成浏览器\n headers=(\"User-Agent\",\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 Safiri/537.36 SE 2.X MetaSr 1.0\")\n opener=urllib.request.build_opener()\n opener.addheaders = [headers]\n #将opener安装为全局\n urllib.request.install_opener(opener)\n file=urllib.request.urlopen(url)\n data=str(file.read())\n #根据需求构建好链接表达式\n pat='(https?://[^\\s)\";]+\\.(\\w|/)*)'\n link=re.compile(pat).findall(data)\n #去重\n link=list(set(link))\n return link\n\n#要爬取的网页链接\nurl=\"http://blog.csdn.net\"\nlinklist=getlink(url)\nfor link in linklist:\n print(link[0])\n","sub_path":"Spider/测试urllib的代码.py","file_name":"测试urllib的代码.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"619660462","text":"from .base import BaseHandler\nfrom baselayer.app.access import auth_or_token\nfrom ..models import User\n\nimport tornado.web\n\n\nclass ProfileHandler(BaseHandler):\n @auth_or_token\n def get(self):\n \"\"\"\n ---\n description: Retrieve user profile\n responses:\n 200:\n content:\n application/json:\n schema: SingleUser\n \"\"\"\n user = (User.query.filter(User.username == self.current_user.username)\n .first())\n user_roles = [role.id for role in user.roles]\n user_acls = [acl.id for acl in user.acls]\n user_tokens = [{'id': token.id,\n 'name': token.name,\n 'acls': [acl.id for acl in token.acls],\n 'created_at': token.created_at}\n for token in user.tokens]\n return self.success(data={'username': self.current_user.username,\n 'roles': user_roles,\n 'acls': user_acls,\n 'tokens': user_tokens})\n","sub_path":"skyportal/handlers/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"20028656","text":"from classes.action import action\nfrom classes.battle import manager\n\nimport classes.enemy as ce\n\n\nclass BattleAction(action.Action):\n def __init__(self, type, loot, enemy):\n super().__init__(type)\n\n self.loot = loot\n self.enemy = self._setup_enemies(enemy)\n\n self.bm = None\n\n def start(self, players):\n self.bm = manager.BattleManager([p for p in players.values()], self.enemy)\n\n players, succ = self.bm.run()\n\n loot = self.loot if succ else []\n return players, loot, succ\n\n def _setup_enemies(self, enemy):\n done = []\n all = []\n for n, nme in enumerate(enemy):\n name = nme\n if nme in done:\n name += f' {n}'\n en = ce.CLASSES[nme](name=name)\n all.append(en)\n done.append(nme)\n return all\n\n\ndef main():\n from classes.playerc import playerclass\n p = playerclass.Player('ME', 'Human', 'Mage')\n print(p)\n ps = {p.name: p}\n loot = ['Runic Staff of Horror']\n enemy = ['Basic Goblin', 'Basic Goblin', 'Tank Goblin']\n\n ac = BattleAction('battle', loot, enemy)\n res = ac.start(ps)\n print(res)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"classes/action/battle.py","file_name":"battle.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"606764516","text":"# Faça um Programa que introduza em um vetor 5 números inteiros e mostre-os e someos.\n\nvetor = []\ncont = 0\nsoma = 0\n\nprint(vetor)\n\nwhile cont < 5:\n num = int(input('Entre com o numero:'))\n cont += 1\n vetor.append(num)\n\n soma = soma + num\n\nprint(vetor)\nprint(soma)\n\n","sub_path":"Vetores com while/5_programa_ler_vetores_e_mostra.py","file_name":"5_programa_ler_vetores_e_mostra.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"646858645","text":"# import libraries\nimport os\nimport shutil\nimport sys\n\n# Declare variables\noriginal_file = \"list.txt\"\nenvironment_code = \"p\"\ncountry_code = \"us\"\ntenant_name = \"afa\"\n\n\ndef main():\n i = 0\n # create empty list for azcopy commands\n commands_list = []\n # check if base folder exist , delete and create new\n base_folder = \"blob_migration\"\n # Delete all contents of a directory using shutil.rmtree() and handle exceptions\n try:\n shutil.rmtree(base_folder)\n print('Folder ' + base_folder + ' has been deleted')\n except:\n print('Error while deleting blob_migration directory')\n #Create new base folder\n base_folder = create_folder(\"blob_migration\")\n # open the original_file and read content\n num_lines = sum(1 for line in open(original_file))\n f = open(original_file, \"r\")\n if f.mode == \"r\":\n # readlines reads the individual lines\n line = f.readlines()\n while i < num_lines:\n for x in line:\n # split line per \"\\\" character and create splitted list\n splitted_list = x.split(\"\\\\\")\n command_string = \".\\\\azcopy.exe copy \\\"\\\\\\\\\" + splitted_list[2] + \"\\\\\" + splitted_list[3] + \"\\\\\" + \\\n splitted_list[4] + \"\\\\\" + splitted_list[5] + \"\\\\\" + splitted_list[\n 6] + \"\\\\\\\"\" + \" \\\"https://igloo\" + environment_code + country_code + tenant_name + \"binariessa\" + container_hex(\n splitted_list[6][:2]) + \".blob.core.windows.net/\" + splitted_list[\n 5] + \"/\\\" --list-of-files \\\"C:\\\\temp\\\\blob_migration\\\\\" + country_code.upper() + tenant_name.upper() + \"\\\\\" + \\\n splitted_list[5] + \"\\\\\" + splitted_list[6] + \".txt\\\"\"\n\n if command_string not in commands_list:\n commands_list.append(command_string)\n\n dest_folder = create_folder(\"blob_migration\\\\\" + splitted_list[5])\n dest_file = \"blob_migration\\\\\" + splitted_list[5] + \"\\\\\" + splitted_list[6] + \".txt\"\n progress(i, num_lines, status=\"Reading files\")\n w = open(dest_file, \"a+\")\n w.write(splitted_list[7] + \"/\" + splitted_list[8])\n w.close()\n i +=1\n for command in commands_list:\n print(command)\n\n\ndef container_hex(item):\n sa_list_01 = [\"00\", \"01\", \"02\", \"03\", \"04\", \"05\", \"06\", \"07\", \"08\", \"09\", \"0a\", \"0b\", \"0c\", \"0d\", \"0e\", \"0f\"]\n sa_list_02 = [\"1\", \"10\", \"11\", \"12\", \"13\", \"14\", \"15\", \"16\", \"17\", \"18\", \"19\", \"1a\", \"1b\", \"1c\", \"1d\", \"1e\", \"1f\"]\n sa_list_03 = [\"20\", \"21\", \"22\", \"23\", \"24\", \"25\", \"26\", \"27\", \"28\", \"29\", \"2a\", \"2b\", \"2c\", \"2d\", \"2e\", \"2f\"]\n sa_list_04 = [\"30\", \"31\", \"32\", \"33\", \"34\", \"35\", \"36\", \"37\", \"38\", \"39\", \"3a\", \"3b\", \"3c\", \"3d\", \"3e\", \"3f\"]\n sa_list_05 = [\"40\", \"41\", \"42\", \"43\", \"44\", \"45\", \"46\", \"47\", \"48\", \"49\", \"4a\", \"4b\", \"4c\", \"4d\", \"4e\", \"4f\"]\n sa_list_06 = [\"50\", \"51\", \"52\", \"53\", \"54\", \"55\", \"56\", \"57\", \"58\", \"59\", \"5a\", \"5b\", \"5c\", \"5d\", \"5e\", \"5f\"]\n sa_list_07 = [\"60\", \"61\", \"62\", \"63\", \"64\", \"65\", \"66\", \"67\", \"68\", \"69\", \"6a\", \"6b\", \"6c\", \"6d\", \"6e\", \"6f\"]\n sa_list_08 = [\"70\", \"71\", \"72\", \"73\", \"74\", \"75\", \"76\", \"77\", \"78\", \"79\", \"7a\", \"7b\", \"7c\", \"7d\", \"7e\", \"7f\"]\n sa_list_09 = [\"80\", \"81\", \"82\", \"83\", \"84\", \"85\", \"86\", \"87\", \"88\", \"89\", \"8a\", \"8b\", \"8c\", \"8d\", \"8e\", \"8f\"]\n sa_list_10 = [\"90\", \"91\", \"92\", \"93\", \"94\", \"95\", \"96\", \"97\", \"98\", \"99\", \"9a\", \"9b\", \"9c\", \"9d\", \"9e\", \"9f\"]\n sa_list_11 = [\"a0\", \"a1\", \"a2\", \"a3\", \"a4\", \"a5\", \"a6\", \"a7\", \"a8\", \"a9\", \"aa\", \"ab\", \"ac\", \"ad\", \"ae\", \"af\"]\n sa_list_12 = [\"b0\", \"b1\", \"b2\", \"b3\", \"b4\", \"b5\", \"b6\", \"b7\", \"b8\", \"b9\", \"ba\", \"bb\", \"bc\", \"bd\", \"be\", \"bf\"]\n sa_list_13 = [\"c0\", \"c1\", \"c2\", \"c3\", \"c4\", \"c5\", \"c6\", \"c7\", \"c8\", \"c9\", \"ca\", \"cb\", \"cc\", \"cd\", \"ce\", \"cf\"]\n sa_list_14 = [\"d0\", \"d1\", \"d2\", \"d3\", \"d4\", \"d5\", \"d6\", \"d7\", \"d8\", \"d9\", \"da\", \"db\", \"dc\", \"dd\", \"de\", \"df\"]\n sa_list_15 = [\"e0\", \"e1\", \"e2\", \"e3\", \"e4\", \"e5\", \"e6\", \"e7\", \"e8\", \"e9\", \"ea\", \"eb\", \"ec\", \"ed\", \"ee\", \"ef\"]\n sa_list_16 = [\"f0\", \"f1\", \"f2\", \"f3\", \"f4\", \"f5\", \"f6\", \"f7\", \"f8\", \"f9\", \"fa\", \"fb\", \"fc\", \"fd\", \"fe\", \"ff\"]\n\n if item in sa_list_01:\n sa_number = \"01\"\n elif item in sa_list_02:\n sa_number = \"02\"\n elif item in sa_list_03:\n sa_number = \"03\"\n elif item in sa_list_04:\n sa_number = \"04\"\n elif item in sa_list_05:\n sa_number = \"05\"\n elif item in sa_list_06:\n sa_number = \"06\"\n elif item in sa_list_07:\n sa_number = \"07\"\n elif item in sa_list_08:\n sa_number = \"08\"\n elif item in sa_list_09:\n sa_number = \"09\"\n elif item in sa_list_10:\n sa_number = \"10\"\n elif item in sa_list_11:\n sa_number = \"11\"\n elif item in sa_list_12:\n sa_number = \"12\"\n elif item in sa_list_13:\n sa_number = \"13\"\n elif item in sa_list_14:\n sa_number = \"14\"\n elif item in sa_list_15:\n sa_number = \"15\"\n elif item in sa_list_16:\n sa_number = \"16\"\n else:\n sa_number = \"not_defined\"\n return sa_number\n\ndef create_folder(folder_name):\n if not os.path.exists(folder_name):\n os.mkdir(folder_name)\n print(\"Directory \" + folder_name + \" created \\n\\r\")\n\ndef progress(count, total, status=''):\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n sys.stdout.write('[%s] %s%s ...%s\\r' % (bar, percents, '%', status))\n sys.stdout.flush()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/dfs_to_blob_converter/dfs_to_blob_converter.py","file_name":"dfs_to_blob_converter.py","file_ext":"py","file_size_in_byte":5685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"532122833","text":"from tkinter import *\nimport covid as cv\n\nroot = Tk()\nroot.geometry(\"300x300\")\nroot.title(\" COVID-19 \")\nstates = ['india', 'maharashtra', 'karnataka', 'andhra pradesh', 'tamil nadu', 'kerala', 'delhi', 'uttar pradesh', 'west bengal', 'odisha', 'rajasthan', 'telangana', 'chhattisgarh', 'haryana', 'bihar', 'gujarat', 'madhya pradesh', 'assam', 'punjab', 'jammu and kashmir', 'jharkhand',\n 'uttarakhand', 'himachal pradesh', 'goa', 'puducherry', 'tripura', 'manipur', 'total', 'chandigarh', 'arunachal pradesh', 'meghalaya', 'nagaland', 'ladakh', 'sikkim', 'andaman and nicobar islands', 'mizoram', 'dadra and nagar haveli and daman and diu', 'lakshadweep']\n\n\ndef Take_input():\n Output.delete('1.0', END)\n INPUT = inputtxt.get(\"1.0\", \"end-1c\")\n INPUT = INPUT.lower()\n print(INPUT)\n if INPUT not in states:\n Output.insert(END, 'Please enter a indian state.')\n else:\n Output.insert(END, cv.fetch(INPUT))\n\n\nl = Label(text=\"Enter India/State\")\ninputtxt = Text(root, height=1,\n width=32,\n bg=\"snow2\")\n\nOutput = Text(root, height=12,\n width=32,\n bg=\"linen\")\nOutput.insert(END, cv.fetch('india'))\n\nDisplay = Button(root, height=2,\n width=20,\n text=\"Fetch\",\n command=lambda: Take_input())\nl2 = Label(text=\"Check docs for list of all available states\")\n\nl.pack()\ninputtxt.pack()\nDisplay.pack()\nOutput.pack()\nl2.pack()\n\nmainloop()\n","sub_path":"covid-19-gui.py","file_name":"covid-19-gui.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"419982961","text":"from tabulate import *\nimport requests\nimport os\nimport logging\nfrom bs4 import BeautifulSoup\n\n\nm2 = 'http://www.groupon.com/deals/gg-into-focus-camera-lens-mugs-1'\n\n#Deal Page Data\ndef dpagedata(url):\n result_set = []\n resp = requests.get(url)\n soup = BeautifulSoup(resp.text)\n \n title = soup.title.text\n result_set.append(soup.title.text.strip('| Groupon'))\n #print title\n des = soup.find(\"div\", class_= \"row hide-diy-gift-card-msg pitch-container\")\n #print des.div.text\n result_set.append(des.div.text)\n nutshell = soup.find(\"div\", class_= \"nutshell highlights \")\n #print nutshell.p.text\n result_set.append(nutshell.p.text)\n return result_set\n\n# Actual Fn\nx = dpagedata(m2)\n\n#Defining a Dict\noutdata = []\nz = {}\nfor i in x:\n outdata.append([line for line in i.split('\\n') if line.strip() != ''])\n\nfor i,j in enumerate(outdata):\n z[m2] = x\n\n\n","sub_path":"base/dealpagecatcher.py","file_name":"dealpagecatcher.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"576130222","text":"from pandas import read_csv\nfrom numpy import mean\nfrom sklearn.metrics import mean_absolute_error\nfrom matplotlib import pyplot\ndata = read_csv('../../data/korea/covid/TimeProvince.csv', header=0, index_col=0)\n\n# get 17 provinces\nprovinces = data['province'].unique()\n\n# For each province\nfor province in provinces:\n\t# prepare situation\n\tX = data.loc[data['province'] == province]\n\tX = X['confirmed']\n\twindow = 3\n\thistory = [X[i] for i in range(window)]\n\ttest = [X[i] for i in range(window, len(X))]\n\tpredictions = list()\n\n\t# walk forward over time steps in test\n\tfor t in range(len(test)):\n\t\tlength = len(history)\n\t\tyhat = mean([history[i] for i in range(length-window,length)])\n\t\tobs = test[t]\n\t\tpredictions.append(yhat)\n\t\thistory.append(obs)\n\t\t# print('predicted=%f, expected=%f' % (yhat, obs))\n\n\terror = mean_absolute_error(test, predictions)\n\tprint(f'{province}\\t%.3f' % error)\n\n\t# plot\n\tpyplot.title(province)\n\tpyplot.plot(test)\n\tpyplot.plot(predictions, color='red')\n\tpyplot.savefig(f'output_test/{province}.png')\n\tpyplot.close()\n\tpyplot.show()\n","sub_path":"code/prediction/moving_average_17_regions_test.py","file_name":"moving_average_17_regions_test.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"463039894","text":"def pow(a, n):\n if n == 0:\n return 1\n elif n % 2 == 1:\n return pow(a, n-1) * a\n else:\n return pow(a**2, n//2)\n\n\nprint(pow(3, 4))\nlst = [1, 2, 3, 5]\n","sub_path":"Quick pow/pow.py","file_name":"pow.py","file_ext":"py","file_size_in_byte":178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"591927296","text":"import ssl\nimport dns\nimport dns.resolver\nimport sys, getopt\nimport json\nimport geoip2.webservice\nimport geoip2.database\nimport requests\nimport time\nimport subprocess\nfrom pprint import pprint\nfrom datetime import datetime\nfrom ripe.atlas.cousteau import (\n Ping,\n Traceroute,\n AtlasSource,\n AtlasCreateRequest,\n AtlasResultsRequest\n)\n\npathToDb = \"Resources/GeoLite2-City.mmdb\"\npathToAsnDb = \"Resources/GeoLite2-ASN.mmdb\"\n\n#some code adopted from RIPE Atlas cousteau tutorial. https://github.com/RIPE-NCC/ripe-atlas-cousteau\ndef send_ripe_measurement_request(domain, apikey):\n traceroute1 = Traceroute(\n af=4,\n target=domain,\n description=\"testing\",\n protocol=\"ICMP\",\n )\n\n vantage1 = AtlasSource(\n type=\"country\",\n value=\"KE\",\n requested=3,\n tags={\"include\": [\"system-ipv4-works\"]}\n )\n\n vantage2 = AtlasSource(\n type=\"country\",\n value=\"SN\", # , SENEGAL],\n requested=3,\n tags={\"include\": [\"system-ipv4-works\"]}\n )\n\n vantage3 = AtlasSource(\n type=\"country\",\n value=\"CA\", # , CANADA],\n requested=3,\n tags={\"include\": [\"system-ipv4-works\"]}\n )\n\n vantage4 = AtlasSource(\n type=\"country\",\n value=\"GB\", # , UNITED KINGDOM],\n requested=3,\n tags={\"include\": [\"system-ipv4-works\"]}\n )\n\n vantage5 = AtlasSource(\n type=\"country\",\n value=\"US\", # , USA],\n requested=3,\n tags={\"include\": [\"system-ipv4-works\"]}\n )\n vantage6 = AtlasSource(\n type=\"country\",\n value=\"BR\", # , Brazil \"CA\", \"PL\", \"AU\", \"AR\"],\n requested=3,\n tags={\"include\": [\"system-ipv4-works\"]}\n )\n vantage7 = AtlasSource(\n type=\"country\",\n value=\"RU\", # , RUSSIA\n requested=3,\n tags={\"include\": [\"system-ipv4-works\"]}\n )\n vantage8 = AtlasSource(\n type=\"country\",\n value=\"CN\", # , CHINA\n requested=3,\n tags={\"include\": [\"system-ipv4-works\"]}\n )\n\n atlas_request = AtlasCreateRequest(\n start_time=datetime.utcnow(),\n key=apikey,\n measurements=[traceroute1],\n sources=[vantage1, vantage2, vantage3, vantage4, vantage5, vantage6, vantage7, vantage8],\n is_oneoff=True\n )\n\n is_success, response = atlas_request.create()\n result_id = []\n if is_success:\n result_id = response['measurements']\n print(result_id)\n return result_id\n else:\n print(\"Measurement request was not successful\")\n return []\n\n\ndef fetch_ripe_result(result_id, output_file):\n # fetch the result\n kwargs = {\n \"msm_id\": result_id,\n }\n\n # while not_fetched:\n is_success, results = AtlasResultsRequest(**kwargs).create()\n if is_success:\n probenumber = 1\n print(len(results))\n for res in results:\n print(probenumber, \":vantage address: \", res['src_addr'])\n probenumber += 1\n with open(output_file, 'w') as outfile:\n json.dump(results, outfile)\n\n\ndef tracecdns(trace_data):\n filename = trace_data\n asn_path = []\n with geoip2.database.Reader(pathToDb) as reader:\n with geoip2.database.Reader(pathToAsnDb) as asn_reader:\n with open(filename) as json_file:\n data = json.load(json_file)\n probenumber = 1\n for result in data:\n try:\n asn_response = asn_reader.asn(result['src_addr'])\n asn = asn_response.autonomous_system_number\n asn_path.append(str(asn))\n except:\n asn = \"vantage ASN unknown\"\n asn_path.append(asn)\n print(\"Result number\", probenumber, \"from address\", result['src_addr'], \"from ASN\", asn)\n probenumber += 1\n for tracert in result['result']:\n\n if tracert['result'][1] != {\"x\": \"*\"}:\n hop_ip = tracert['result'][1]['from']\n # geolocate the ip\n try:\n asn_response = asn_reader.asn(hop_ip)\n response = reader.city(hop_ip)\n asn = asn_response.autonomous_system_number\n asn_path.append(\"->\" + str(asn))\n city = response.city.name\n print(hop_ip, city, asn)\n except:\n print(\"IP not in the ASN database\")\n print(\"ASN path taken from address\", result['src_addr'], \"to destination:\")\n showTrafficPath(list(dict.fromkeys(asn_path)))\n asn_path.clear()\n print(\"\")\n\n\ndef showTrafficPath(asntrace):\n for asn in asntrace:\n print(asn, end=\"\")\n print('\\n')\n\n\ndef executemeasurements():\n a = 'www.google.com'\n wits = 'www.wits.ac.za'\n apikey = \"eb6310a8-2bb2-487d-a277-dd32e6c5256b\"\n\n googleresultid = send_ripe_measurement_request(a, apikey)\n witsresultsid = send_ripe_measurement_request(wits, apikey)\n print(\"Wait 20 minutes to get results from RIPE...\")\n time.sleep(1200)\n if len(googleresultid) != 0:\n fetch_ripe_result(googleresultid[0], \"google_trace.txt\")\n else:\n print(\"Results for\", a, \"could not be fetched\")\n\n if len(witsresultsid) != 0:\n fetch_ripe_result(witsresultsid[0], \"wits_trace.txt\")\n else:\n print(\"Results for\", wits, \"could not be fetched\")\n\n tracecdns(\"google_trace.txt\")\n tracecdns(\"wits_trace.txt\")\n\n\ndef get_location(domain):\n answers = dns.resolver.query(domain, 'A')\n for rdata in answers:\n ip = rdata.to_text()\n print(\"The IPv4 of this website is \", domain, ip)\n with geoip2.webservice.Client(393383, 'kmt9mF2RPrwP5I7A') as client:\n response = client.insights(ip)\n print(\"The Location where hosting of this website: Latitude\", domain, response.location.longitude,\n \"Longitude: \", response.location.latitude)\n print(\"The city of this website server\", domain, response.city.name)\n print(\"The country name where the server of this\", domain, response.country.name)\n\n\ndef get_records(domain):\n \"\"\"\n Get all the records associated to domain parameter.\n :param domain: \n :return: \n \"\"\"\n idzs = ['A', 'AAAA', 'NS', 'MX', 'CNAME', 'TXT', 'SOA']\n print(\"The Website being tested is: \", domain)\n for a in idzs:\n try:\n answers = dns.resolver.query(domain, a)\n\n for rdata in answers:\n print(\"The DNS Record: \", a, ':', rdata.to_text())\n\n except Exception as e:\n print(e) # or pass\n\n\ndef gethttpheaders(domain):\n r = requests.get(domain)\n print(\"Headers for this Domain: \", domain, r.headers.keys())\n print(\"The request performed is: \", r.request.headers)\n print(\"The cookies available for this domain: \", domain, r.cookies)\n\n\n# Testing the functions\na = 'https://www.google.com'\nb = 'https://www.wits.ac.za'\nget_records('google.com')\ncname = dns.resolver.query('analytics.google.com', 'CNAME')\nfor rdata in cname:\n print(\"The canonical name of the analytics.google.com is\", rdata.to_text())\nprint(\" \")\nprint(\" \")\nget_records('wits.ac.za')\ncname = dns.resolver.query('ftp.wits.ac.za', 'CNAME')\nfor rdata in cname:\n print(\"The canonical name of the ftp.wits.ac.za is\", rdata.to_text())\n\nprint(\" \")\nprint(\" \")\nget_location('google.com')\nget_location('wits.ac.za')\nprint(\" \")\nprint(\" \")\ngethttpheaders(a)\ngethttpheaders(b)\nexecutemeasurements()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"506799902","text":"#===========================================================])\r\n# Python Version: 3.5.2 |)\r\n# |)\r\n# Python/Tkinter Phonebook Drill |)\r\n# |)\r\n# Author: The Tech Academy |)\r\n# |)\r\n# Co-Author: N.C.Wood (nicholas.cameron.wood@gmail.com) |)\r\n# |)\r\n# This code was written and tested to work with Windows 10 |)\r\n# |)\r\n#===========================================================])\r\n\r\nimport os\r\nfrom tkinter import *\r\nimport tkinter as tk\r\nimport sqlite3\r\n\r\nimport phoneBook_main\r\nimport phoneBook_gui\r\n\r\n#=============================\r\ndef center_window(self, w, h): \r\n # get screen width and height \r\n screen_width = self.master.winfo_screenwidth() \r\n screen_height = self.master.winfo_screenheight() \r\n # calculate x and y coords to center app \r\n x = int((screen_width/2) - (w/2))\r\n y = int((screen_height/2) - (h/2))\r\n centerGeo = self.master.geometry('{}x{}+{}+{}'.format(w, h, x, y))\r\n return centerGeo\r\n\r\n\r\ndef ask_quit(self):\r\n if messagebox.askokcancel(\"Exit program\", \"Okay to exit application?\"):\r\n self.master.destroy()\r\n os._exit(0)\r\n#==================\r\n\r\n \r\ndef create_db(self):\r\n conn = sqlite3.connect('phonebook.db')\r\n with conn:\r\n cur = conn.cursor()\r\n cur.execute(\"\"\"CREATE TABLE if not exists tbl_phonebook( \\\r\n ID INTEGER PRIMARY KEY AUTOINCREMENT, \\\r\n col_fname TEXT, \\\r\n col_lname TEXT, \\\r\n col_fullname TEXT, \\\r\n col_phone TEXT, \\\r\n col_email TEXT \\\r\n );\"\"\")\r\n conn.commit()\r\n conn.close()\r\n first_run(self)\r\n\r\ndef first_run(self):\r\n data = ('John', 'Doe', 'John Doe', '111-111-1111', 'jdoe@email.com')\r\n conn = sqlite3.connect('phonebook.db')\r\n with conn:\r\n cur = conn.cursor()\r\n cur,count = count_records(cur)\r\n if count < 1:\r\n cur.execute(\"\"\"INSERT INTO tbl_phonebook (col_fname,col_lname,col_fullname,col_phone,col_email) VALUES (?,?,?,?,?)\"\"\", (data))\r\n conn.commit()\r\n conn.close()\r\n\r\ndef count_records(cur):\r\n count = \"\"\r\n cur.execute(\"\"\"SELECT COUNT(*) FROM tbl_phonebook\"\"\")\r\n count = cur.fetchone()[0]\r\n return cur, count\r\n\r\n# Select item in listbox\r\ndef onSelect(self,event):\r\n varList = event.widget\r\n select = varList.curselection()[0]\r\n value = varList.get(select)\r\n conn = sqlite3.connect('phonebook.db')\r\n with conn:\r\n cursor = conn.cursor()\r\n cursor.execute(\"\"\"SELECT col_fname,col_lname,col_phone,col_email FROM tbl_phonebook WHERE col_fullname = (?)\"\"\",[value])\r\n varBody = cursor.fetchall()\r\n for data in varBody:\r\n\r\n self.entry_fname.delete(0,END)\r\n self.entry_lname.delete(0,END)\r\n self.entry_phone.delete(0,END)\r\n self.entry_email.delete(0,END)\r\n\r\n\r\n self.entry_fname.insert(0,data[0])\r\n self.entry_lname.insert(0,data[1])\r\n self.entry_phone.insert(0,data[2])\r\n self.entry_email.insert(0,data[3])\r\n \r\n \r\n \r\n\r\ndef addToList(self):\r\n var_fname = self.entry_fname.get()\r\n var_lname = self.entry_lname.get()\r\n var_fname = var_fname.strip()\r\n var_lname = var_lname.strip()\r\n var_fname = var_fname.title()\r\n var_lname = var_lname.title()\r\n var_fullname = (\"{} {}\".format(var_fname,var_lname))\r\n print(\"var_fullname: {}\".format(var_fullname))\r\n var_phone = self.entry_phone.get().strip()\r\n var_email = self.entry_email.get().strip()\r\n if not \"@\" or not \".\" in var_email:\r\n print(\"Incorrect email format!\")\r\n if (len(var_fname) > 0) and (len(var_lname) > 0) and (len(var_phone) > 0) and (len(var_email) > 0):\r\n conn = sqlite3.connect('phonebook.db')\r\n with conn:\r\n cursor = conn.cursor()\r\n cursor.execute(\"\"\"SELECT COUNT(col_fullname) FROM tbl_phonebook WHERE col_fullname = '{}'\"\"\".format(var_fullname))\r\n count = cursor.fetchone()[0]\r\n chkName = count\r\n if chkName == 0:\r\n print(\"chkName: {}\".format(chkName))\r\n cursor.execute(\"\"\"INSERT INTO tbl_phonebook (col_fname,col_lname,col_fullname,col_phone,col_email) VALUES (?,?,?,?,?)\"\"\",(var_fname,var_lname,var_fullname,var_phone,var_email))\r\n self.listbox_info.insert(END, var_fullname)\r\n onClear(self)\r\n else:\r\n messagebox.showerror(\"Name Error\", \"'{}' already exists in the database! Please choose a different name.\".format(var_fullname))\r\n onClear(self)\r\n conn.commit()\r\n conn.close()\r\n else:\r\n messagebox.showerror(\"Missing Text Error\", \"Please verify that there is data in all four fields.\")\r\n\r\n\r\ndef onDelete(self):\r\n var_select = self.listbox_info.get(self.listbox_info.curselection())\r\n conn = sqlite3.connect('phonebook.db')\r\n with conn:\r\n cur = conn.cursor()\r\n cur.execute(\"\"\"SELECT COUNT(*) FROM tbl_phonebook\"\"\")\r\n count = cur.fetchone()[0]\r\n if count > 1:\r\n confirm = messagebox.askokcancel(\"Delete Confirmation\", \" All information associated with ({}) \\nwill be permanently deleted\".format(var_select))\r\n if confirm:\r\n conn = sqlite3.connect('phonebook.db')\r\n with conn:\r\n cursor = conn.cursor()\r\n cursor.execute(\"\"\"DELETE FROM tbl_phonebook WHERE col_fullname = '{}'\"\"\".format(var_select))\r\n onDeleted(self)\r\n\r\n conn.commit()\r\n else:\r\n confirm = messagebox.showerror(\"Last Record Area\", \"({}) is the last record in the database and cannot be deleted at this time\".format(var_select))\r\n conn.close()\r\n\r\n\r\ndef onDeleted(self):\r\n self.entry_fname.delete(0,END)\r\n self.entry_lname.delete(0,END)\r\n self.entry_phone.delete(0,END)\r\n self.entry_email.delete(0,END)\r\n\r\n try:\r\n index = self.listbox_info.curselection()[0]\r\n self.listbox_info.delete(index)\r\n\r\n except IndexError:\r\n pass\r\n\r\n\r\ndef onClear(self):\r\n self.entry_fname.delete(0,END)\r\n self.entry_lname.delete(0,END)\r\n self.entry_phone.delete(0,END)\r\n self.entry_email.delete(0,END)\r\n\r\n\r\ndef onRefresh(self):\r\n self.listbox_info.delete(0,END)\r\n conn = sqlite3.connect('phonebook.db')\r\n with conn:\r\n cursor = conn.cursor()\r\n cursor.execute(\"\"\"SELECT COUNT(*) FROM tbl_phonebook\"\"\")\r\n count = cursor.fetchone()[0]\r\n i = 0\r\n while i < count:\r\n cursor.execute(\"\"\"SELECT col_fullname FROM tbl_phonebook\"\"\")\r\n varList = cursor.fetchall() [i]\r\n for item in varList:\r\n self.listbox_info.insert(0,str(item))\r\n i = i +1\r\n conn.close()\r\n\r\n\r\ndef onUpdate(self):\r\n try:\r\n var_select = self.listbox_info.curselection()[0]\r\n var_value = self.listbox_info.get(var_select)\r\n except:\r\n messagebox.showinfo(\"Missing selection\",\"No name was selected from the list box. \\nCancelling the Update request.\")\r\n return\r\n\r\n var_phone = self.entry_phone.get().strip()\r\n var_email = self.entry_email.get().strip()\r\n if (len(var_phone) > 0) and (len(var_email) > 0):\r\n conn = sqlite3.connect('phonebook.db')\r\n with conn:\r\n cur = conn.cursor()\r\n cur.execute(\"\"\"SELECT COUNT(col_phone) FROM tbl_phonebook WHERE col_phone = '{}'\"\"\".format(var_phone))\r\n count = cur.fetchone()[0]\r\n print(count)\r\n cur.execute(\"\"\"SELECT COUNT(col_email) FROM tbl_phonebook WHERE col_email = '{}'\"\"\".format(var_email))\r\n count2 = cur.fetchone()[0]\r\n print(count2)\r\n if count == 0 or count2 == 0:\r\n response = messagebox.askokcancel(\"Update Request\", \"The following changes ({}) and ({}) will be implemented for ({}). \\nProceed with the update request?\".format(var_phone, var_email, var_value))\r\n \r\n if response:\r\n with conn:\r\n cursor = conn.cursor()\r\n cursor.execute(\"\"\"UPDATE tbl_phonebook SET col_phone = '{0}',col_email = '{1}' WHERE col_fullname = '{2}'\"\"\".format(var_phone,var_email, var_value))\r\n onClear(self)\r\n conn.commit()\r\n else:\r\n messageabox.showinfo(\"Cancel request\",\"No changes have been made to ({})\".format(var_value))\r\n else:\r\n messagebox.showinfo(\"No changes detected\",\"Both ({}) and ({}) \\nalready exist in the database for this name.\\n\\nYour update request has been cancelled.\".format(var_phone, var_email))\r\n onClear(self)\r\n conn.close()\r\n else:\r\n messagebox.showerror(\"Missing information\",\"Please select a name from the list. \\nThen edit the phone or email information.\")\r\n onClear(self)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n \r\n \r\n \r\n \r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"phoneBook_func.py","file_name":"phoneBook_func.py","file_ext":"py","file_size_in_byte":10535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"510058208","text":"# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\n# Your code here along with comments explaining your approach\n\n# Method1 using DP:\n# TC = O(n^2) | Sc= O(n)\nclass Solution:\n def maxEnvelopes(self, en: List[List[int]]) -> int:\n if len(en) == 0:\n return 0\n dp = [1 for _ in range(len(en))]\n max_ = 1 \n en = sorted(en, key = lambda x : (x[0],x[1]))\n \n for i in range(1, len(dp)):\n for j in range(0, i):\n if en[i][1] > en[j][1] and en[i][0] != en[j][0]:\n dp[i] = max(dp[i], dp[j] + 1 )\n \n for num in dp:\n if num > max_:\n max_ = num\n return max_\n\n\n# Method 2 using Binary Search\n# Tc = O(nlogn) | Sc = O(n)\nclass Solution:\n def maxEnvelopes(self, envelopes: List[List[int]]) -> int:\n if len(envelopes) == 0 :\n return 0 \n \n envelopes = sorted(envelopes, key = lambda x : (x[0], -x[1]))\n print(envelopes)\n arr = [1 for _ in range(len(envelopes))]\n len_ = 1 \n arr[0] = envelopes[0]\n \n for i in range(1, len(envelopes)):\n if envelopes[i][1] > arr[len_ -1][1] and envelopes[i][0] != arr[len_ -1][0]:\n len_ += 1 \n arr[len_ - 1] = envelopes[i]\n else:\n indx = self.binarySearch(arr, 0, len_ -1, envelopes[i])\n arr[indx] = envelopes[i]\n print(arr)\n return len_\n \n def binarySearch(self, nums, low, high, target):\n while low <= high:\n mid = (low+high)//2\n \n if nums[mid] == target:\n return mid \n \n elif nums[mid][1] < target[1] and nums[mid][0] != target[0]:\n low = mid + 1 \n \n else:\n high = mid -1 \n \n return low ","sub_path":"RussianDollsEnvelope.py","file_name":"RussianDollsEnvelope.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"211813216","text":"import pandas, pandas_ext, param, features as ft\nimport useful_stuff\nfrom useful_stuff import *\n\n\n# list of ftures to be defined by this module\nfeature_list = [ft.Feature('post', 'is_stim')\n\t, ft.Feature('post', 'is_resp')\n\t, ft.Feature('user_post', 'like_is_stim')\n\t, ft.Feature('user_post', 'like_is_resp')\n\t, ft.Feature('user', 'is_stim')\n\t, ft.Feature('user', 'is_resp')\n\t, ft.Feature('blog', 'is_stim')\n\t, ft.Feature('blog', 'is_resp')\n]\n\n# setup function to load data (from store) needed to calculate each Feature \ndef setup_func(feature_list, store, recreate_setup):\n\t\n\t# Load universe of posts, set stim and resp\n\tpost_df = store.post.load_df(store.post.keys())\n\tpost_df['is_stim'] = (post_df.is_test == False) & (post_df.week < datetime(2012, 8, 13))\n\tpost_df['is_resp'] = (post_df.is_test == True) \n\n\t# Load univers of likes, set stim and resp\n\tlike_df = store.user_post.load_df(store.user_post.keys())\n\tlike_df = like_df[like_df.is_like]\n\t\n\tlike_df.add_column(name = 'post_is_stim', column = post_df.is_stim, on_level = 'post_id')\n\tlike_df['like_is_stim'] = (like_df.like_week < datetime(2012, 8, 13)) & (like_df.post_is_stim)\n\t\n\tlike_df.add_column(name = 'post_is_resp', column = post_df.is_resp, on_level = 'post_id')\n\tlike_df['like_is_resp'] = (like_df.like_week == datetime(2012, 8, 13)) & (like_df.post_is_resp)\n\n\t# Load universe of users, set stim and resp\n\tuser_df = store.user.load_df(store.user.keys())\n\t\n\tuser_df['is_stim'] = user_df.is_test\n\tuser_df.is_stim = user_df.is_stim.fillna(False)\n\t\n\tuser_df['is_resp'] = user_df.is_stim\n\n\t# Save blog data\n\tblog_df = store.blog.load_df(store.blog.keys())\n\t\n\tstim_posts = post_df.groupby('blog_id')['is_stim'].agg(sum)\n\tblog_df['is_stim'] = stim_posts > 0\n\tblog_df.is_stim = blog_df.is_stim.fillna(False)\n\t\n\tresp_posts = post_df.groupby('blog_id')['is_resp'].agg(sum)\n\tblog_df['is_resp'] = resp_posts > 0\n\tblog_df.is_resp = blog_df.is_resp.fillna(False)\n\t\n\treturn {'user': user_df, 'blog': blog_df, 'post': post_df, 'user_post': like_df}\n\n\n\n# build function to calculate/retrieve fture from setup data\t\ndef build_func(feature, data):\n\n\tsource_df = data[feature.scope]\n\treturn source_df[feature.name]\n\n# define fset: fture set template\nfset = ft.FeatureSet(\"prod_stim_resp\", feature_list, setup_func, build_func)\nfset.save(ft.prod_store, overwrite = True)\n","sub_path":"kaggle/predict-wordpress-likes/carter_s/python/f3_prod_stim_resp.py","file_name":"f3_prod_stim_resp.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"476331098","text":"#!/usr/bin/env python \n#coding=utf-8\n\n\"\"\" 2016/04/05\n\nCopyright (c) 2015 Xu Zhihao (Howe). All rights reserved.\n\nThis program is free software; you can redistribute it and/or modify\n\nThis programm is tested on kuboki base turtlebot. \n\n\"\"\"\n\nimport getpass,PyKDL,collections,numpy,math,tf,rospy\nfrom geometry_msgs.msg import Pose,Twist\nfrom nav_msgs.msg import Path\n\n#turn_speed in rad/s,radius in meter, duration in sec\n\n\n#twist by frame difference\ndef twist_frame(target_frame,source_frame):\n #pub = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist, queue_size=1)\n pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)\n move_cmd= Twist() \n listener = tf.TransformListener()\n achieve_goal=True\n while not rospy.is_shutdown() and achieve_goal:\n try:\n (trans,rot)=listener.lookupTransform(target_frame,source_frame,rospy.Time(0))\n except(tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as error:\n continue\n threld=0.01\n if trans>(threld,threld,threld):\n linear = 0.5 * math.sqrt(trans[0] ** 2 + trans[1] ** 2)\n angular = 4*math.atan2(trans[1], trans[0])\n else:\n linear=0.0\n euler=tf.transformations.euler_from_quaternion(rot)\n angular=euler[2]\n move_cmd.linear.x = linear\n move_cmd.angular.z = angular\n pub.publish(move_cmd)\n if linear==0 and abs(angular)<=0.001:\n linear=0.0\n angular=0.0\n move_cmd.linear.x = linear\n move_cmd.angular.z = angular\n pub.publish(move_cmd)\n achieve_goal=False\n break\n \n#记录plan\ndef plan_recorder():\n path=rospy.wait_for_message('/move_base/NavfnROS/plan', Path)\n path_poses=path.poses\n return path_poses\n \n\ndef quat_to_angle(quat):\n rot = PyKDL.Rotation.Quaternion(quat[0], quat[1], quat[2], quat[3])\n return rot.GetRPY()[2]\n\ndef angle_to_quat(angle):#in rad\n rot = PyKDL.Rotation.RotZ(angle)\n return rot.GetQuaternion()\n \n# 产生一个朝向当前目标点的角度\ndef angle_generater(sub_point,pre_point):\n if pre_point.xsub_point.x:\n angle= math.pi+math.atan((sub_point.y-pre_point.y)/(sub_point.x-pre_point.x))\n if pre_point.x==sub_point.x:\n angle= math.pi+math.pi/2\n return angle\n","sub_path":"4月月报/code/move_reference.py","file_name":"move_reference.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"237909562","text":"# Запросите у пользователя значения выручки и издержек фирмы. Определите, с каким финансовым результатом работает\n# фирма. Например, прибыль — выручка больше издержек, или убыток — издержки больше выручки.\n# Выведите соответствующее сообщение.\n# Если ��ирма отработала с прибылью, вычислите рентабельность выручки.\n# Это отношение прибыли к выручке. Далее запросите численность сотрудников фирмы и определите прибыль фирмы\n# в расчёте на одного сотрудника.\n\nproceeds = float(input(\"Введите выручку организации: \"))\ncosts = float(input(\"Введите сумму расходов организации: \"))\nif proceeds > costs:\n print(\"За выбранный период организация отработала с прибылью\")\n rent = costs / proceeds\n amount = int(input(\"введите количество сотрудников организации: \"))\n result = (proceeds - costs) / amount\n print(\"Прибыль на одного сотрудника составила\", result, \"рублей\")\nelse:\n print(\"За выбранный период организация отработала в убыток\")\n\n","sub_path":"lesson1/task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"70889371","text":"# -*- coding: utf-8 -*-\nimport json\nimport nltk\nimport re\nfrom nltk.corpus import stopwords\nfrom classifier import *\nfrom nltk import word_tokenize\nfrom nltk.util import ngrams\nfrom collections import Counter\nfrom nltk.corpus import stopwords\n\nstop = stopwords.words('english')\ndef ie_preprocess(document):\n document = ' '.join([i for i in document.split() if i not in stop])\n sentences = nltk.sent_tokenize(document)\n sentences = [nltk.word_tokenize(sent) for sent in sentences]\n sentences = [nltk.pos_tag(sent) for sent in sentences]\n return sentences\n\ndef RemovePunctuation(line):\n\ts = \"!#$%&()*+,-./:;<=>?@[\\]^_`{'~}\"\n\ts += '\"1234567890'\n\tfor x in s:\n\t\tline = line.replace(x, '')\n\treturn line\n\ndef extract_names(document,namesdic):\n\tsentences = ie_preprocess(document)\n\tfor tagged_sentence in sentences:\n\t\tfor chunk in nltk.ne_chunk(tagged_sentence):\n\t\t\tif type(chunk) == nltk.tree.Tree:\n\t\t\t\tif chunk.label() == 'PERSON':\n\t\t\t\t\t# names.append(' '.join([c[0] for c in chunk]))\n\t\t\t\t\ttemp = ' '.join([c[0] for c in chunk[0:2]])\n\t\t\t\t\t# A little bit change, set length to 2 which shows in [0:2]\n\t\t\t\t\tif len(temp.split())==2:\n\n\t\t\t\t\t\tif temp in namesdic:\n\t\t\t\t\t\t\tnamesdic[temp] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnamesdic[temp] = 1\n\ndef GetJoker(file):\n\n\tj_file=open(file)\n\tj_str=j_file.read()\n\tj_data=json.loads(j_str)\n\tnamesdic={}\n\tjokeword = ['joke', 'lmao', 'lol', 'hhh', 'funny', '233','haha']\n\tfor i in j_data:\n\t\tfor j in jokeword:\n\t\t\tif j in i['text'].lower():\n\t\t\t\textract_names(i['text'],namesdic)\n\ttemp = sorted(namesdic.items(), key=lambda x: x[1], reverse=True)\n\tfor i in temp[0:20]:\n\t\tif 'golden' in i[0].lower():\n\t\t\tcontinue\n\t\tprint('The person who always said joke was ',i[0])\n\t\treturn i[0]\t\n\tj_file.close()\n\treturn temp\n\n\n\n\ndef getJoke(file):\n\tj_file=open(file)\n\n\tj_str=j_file.read()\n\tj_data=json.loads(j_str)\n\tstr1=''\n\tjokeword = ['joke', 'lmao', 'lol', 'hhh', 'funny', '233','haha']\n\tfor i in j_data:\n\t\tfor j in jokeword:\n\t\t\tif j in i['text'].lower():\n\t\t\t\tstr1 += RemovePunctuation(i['text'].lower())\n\t\t\t\ttoken = nltk.word_tokenize(str1)\n\t\t\t\ttengram = ngrams(token, 10)\n\t\t\t\telegram = ngrams(token, 11)\n\t# trigram = ngrams(token, 4)\n\t\t\t\tGG = Counter(tengram) + Counter(elegram) # + Counter(trigram)\n\tprint(GG.most_common(30))\n\n","sub_path":"joke.py","file_name":"joke.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"14004389","text":"import pyb\nimport upcd8544\nfrom machine import SPI,Pin\nfrom pyb import UART\nfrom ubinascii import hexlify\nfrom ubinascii import *#以上为声明使用到的类库\n\n\nleds = [pyb.LED(i) for i in range(1,5)]\nP,L,SHUCHU=0,0,0\nSPI = pyb.SPI(1) #DIN=>X8-MOSI/CLK=>X6-SCK\n#DIN =>SPI(1).MOSI 'X8' data flow (Master out, Slave in)\n#CLK =>SPI(1).SCK 'X6' SPI clock\nRST = pyb.Pin('X20')\nCE = pyb.Pin('X19')\nDC = pyb.Pin('X18')\nLIGHT = pyb.Pin('X17')\n#以上为初始化显示屏的函数,虽然这次没有用到显示,但是备用\nlcd_5110 = upcd8544.PCD8544(SPI, RST, CE, DC, LIGHT)\ncount_=0\nN2 = Pin('Y3', Pin.OUT_PP)\nN1 = Pin('Y6', Pin.OUT_PP)#定义通信系统启动引脚\nN1.low()\npyb.delay(2000)\nN1.high()\npyb.delay(10000)#拉高拉低引脚,启动通信系统\nu2 = UART(4, 115200)#定义串口4,设置 波特率为115200\nK=5#设置一个选择变量K\nwhile (K==5):#这个循环是为了设置通信区域模式为透传模式。\n u2.write('AT+CIPMODE=1\\r\\n')\n pyb.delay(500)\n if(u2.any()>0):\n print('透传')\n _dataRead=u2.readall()\n print('透传',_dataRead.decode('utf-8'))\n if(_dataRead.find(b'OK')>-1):\n K=0\n pyb.delay(20)\n#这个语句是为了搭建通信连接\nu2.write('AT+CIPSTART=\"TCP\",\"139.196.109.178\",30000\\r\\n')\npyb.delay(10000)\nprint('123')\n#这里是为了判断通信连接是否已经建立起来,如果没有建立起来通信的连接,则等待。\nwhile (K==0):\n pyb.delay(3000)\n if(u2.any()>0):\n _dataRead=u2.readall()\n print('oo',_dataRead)\n #这个判断是为了判断是否已经和服务器建立起连接来\n if(_dataRead.find(b'CONNECT OK')>-1):\n #开发板已经和服务器建立起连接来,则改变选择变量的值,使其进入下一个循环\n K=1\n pyb.LED(1).on()\n#这个循环是执行数据传输命令的执行所在,在这个循环中进行各种数据的裁剪拼接和发送。\nwhile (K==1):\n print('DOU')\n #u2.write('+++') 此时整个系统进入透传通信模式,想要退出,则发送‘+++’,即可 #退出;\n #u2.write('ATO0') 想让系统从指令模式进入透传模式,则发送‘ATO0’,则进入透传;\n #pyb.delay(1500)\n pyb.LED(2).off()\n pyb.LED(3).off()\n pyb.LED(2).on()\n u2.write('TPGPS,1234567890abcde,36.67191670,119.17200000,201701120825,25,50,END')\n #这个报文详细格式参照服务平台示例报文格式。\n #把这格式里面的经纬度数据换成从定位系统获取到的经纬度,就可以实时定位了。\n pyb.delay(13000)#延时一下时间,官方提供的测试平台有上传频率限制\n if(u2.any()>0):\n#这个返回仅适用于官方提供的服务平台\n _dataRead=u2.readall()\n print('1212',_dataRead)\n pyb.LED(3).on()\n pyb.delay(10000)","sub_path":"TPYBoard-v70x-master/03.GPRS功能测试/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"71784410","text":"\"\"\"Datasets of Halide pipelines.\"\"\"\nimport logging\n\nimport os\n# import json\nimport time\nimport numpy as np\nimport scipy.sparse as sp\nimport torch as th\nimport msgpack\n\nimport torch.utils.data as data\nfrom torch.utils.data.dataloader import DataLoader\n\nfrom graphscheduler.func_dag import FuncDAG\n\nLOG = logging.getLogger(__name__)\n\nfrom .constants import SCHEDULE_F\n\nclass MSGPackDataset(data.Dataset):\n def __init__(self, root):\n super(MSGPackDataset, self).__init__()\n self.root = root\n self.files = sorted([f for f in os.listdir(self.root) if os.path.splitext(f)[-1] == \".mp\"])\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, idx):\n try:\n path = os.path.join(self.root, self.files[idx])\n with open(path, \"rb\") as fid:\n data = msgpack.load(fid)\n except Exception as e:\n print(\"Exception when loading {}\".format(path))\n raise e\n return data\n\n\nclass DAGDataset(MSGPackDataset):\n \"\"\"Pipelines, represented as DAG of Func stages.\"\"\"\n\n def __getitem__(self, idx):\n data = super(DAGDataset, self).__getitem__(idx)\n\n start = time.time()\n graph = FuncDAG(features=data[b\"features\"][b\"stage\"], nodes=data[b\"dag\"][b\"nodes\"], edges=data[b\"dag\"][b\"edges\"])\n elapsed = time.time() - start\n LOG.debug(\"graph creation {} ms\".format(elapsed*1000))\n\n sample = {\n \"graph\": graph,\n \"runtime\": data[b\"time\"],\n \"root_runtime\": data[b\"time_root\"],\n \"pipeline_seed\": data[b\"pipeline_seed\"],\n \"schedule_seed\": data[b\"schedule_seed\"],\n }\n return sample\n\n\nclass AdjacencyDataset(MSGPackDataset):\n \"\"\"Pipeline graphs represented as features + adjancency matrix\"\"\"\n\n def __getitem__(self, idx):\n data = super(AdjacencyDataset, self).__getitem__(idx)\n\n features = data[b\"features\"][b\"stage\"]\n nodes = data[b\"dag\"][b\"nodes\"]\n edges = data[b\"dag\"][b\"edges\"]\n graph = FuncDAG(features=features, nodes=nodes, edges=edges)\n\n feats, adj = graph.adjacency_repr()\n\n adj = self.normalize(adj.numpy())\n adj = th.from_numpy(adj.astype(np.float32))\n\n runtime = data[b\"time\"]\n\n return feats, adj, th.tensor(runtime).float()\n\n def normalize(self, mx):\n \"\"\"Row-normalize sparse matrix\"\"\"\n rowsum = np.array(mx.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n mx = r_mat_inv.dot(mx)\n return mx\n\n return sample\n\n\nclass FlatDataset(data.Dataset):\n \"\"\"Pipelines, represented as arrays of features.\"\"\"\n def __init__(self, root):\n super(FlatDataset, self).__init__()\n self.root = root\n self.files = sorted([f for f in os.listdir(self.root) if os.path.splitext(f)[-1] == \".mp\"])\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, idx):\n path = os.path.join(self.root, self.files[idx])\n with open(path, \"rb\") as fid:\n data = msgpack.load(fid)\n\n pipe_f = []\n sched_f = []\n for f in data[b\"features\"][b\"stage\"]:\n pipe_f.append(np.array(f[b\"pipeline\"]))\n sched_f.append(np.array(f[b\"schedule\"][:SCHEDULE_F]))\n\n pipeline_feature = np.stack(pipe_f, 1)\n schedule_feature = np.log(1.0 + np.stack(sched_f, 1))\n runtime = data[b\"time\"]\n length = pipeline_feature.shape[1]\n pipe_id = data[b\"pipeline_seed\"]\n\n return pipeline_feature, schedule_feature, runtime, length, pipe_id\n\n\ndef graph_collate_fn(lst, mdl):\n ret = []\n for sample in lst:\n g = sample[\"graph\"]\n c = th.tensor(sample[\"runtime\"])\n c_root = th.tensor(sample[\"root_runtime\"])\n p = th.tensor(sample[\"pipeline_seed\"])\n s = th.tensor(sample[\"schedule_seed\"])\n\n mdl.initialize_graph(g)\n\n ret.append((g, c, c_root, p, s))\n return ret\n\n\ndef flat_collate_fn(data):\n '''\n Creates mini-batch tensors from the list of tuples (feature, runtime).\n Features are of variable length : num_funcs * num_features so we need to pad them.\n Args:\n data: list of tuple (feature, runtime). \n - feature: torch tensor of shape (num_funcs * num_features, 1).\n - runtime: torch tensor of shape (1)\n Returns:\n feature: torch tensor of shape (batch_size, max_num_funcs * num_features, 1).\n runtime: torch tensor of shape (batch_size, 1).\n - max_num_funcs is the maximum number of funcs over all pipelines in this batch\n '''\n min_length = 22 # minimum number of functions required (zero pad to this length in needed)\n # figure out max num funcs over all pipelines in this batch\n lengths = [tup[3] for tup in data]\n\n max_num_stages = max(min_length, max(lengths))\n\n # TODO(mgharbi): how are the 7 types aggregated?\n\n # assuming default type for zeros is float (7 dimensions for pipe feats, 1 for each type)\n batch_pipeline_features = np.zeros((len(data), 57, 7, max_num_stages), dtype=np.float32)\n batch_schedule_features = np.zeros((len(data), SCHEDULE_F, 1, max_num_stages), dtype=np.float32) \n batch_runtimes = np.zeros((len(data), 1), dtype=np.float32)\n batch_ids = [tup[4] for tup in data]\n batch_lengths = np.array([tup[3] for tup in data]).astype(np.float32)\n\n for i, (pipeline_feature, schedule_feature, runtime, length, _id) in enumerate(data):\n lpad = (max_num_stages - lengths[i]) // 2\n rpad = max_num_stages - lpad - lengths[i]\n\n if lpad != 0:\n batch_pipeline_features[i, ..., :lpad] = 0.0\n batch_schedule_features[i, ..., :lpad] = 0.0\n\n batch_pipeline_features[i, ..., lpad:(lpad+lengths[i])] = np.reshape(pipeline_feature, (57, 7, lengths[i]))\n batch_schedule_features[i, ..., lpad:(lpad+lengths[i])] = np.reshape(schedule_feature, (SCHEDULE_F, 1, lengths[i]))\n\n if rpad != 0:\n batch_pipeline_features[i, ..., -rpad] = 0.0\n batch_schedule_features[i, ..., -rpad] = 0.0\n\n batch_runtimes[i] = runtime\n\n # hack: clamp runtimes to avoid dead relus\n '''\n max_runtime = 250\n batch_runtimes[batch_runtimes > max_runtime] = max_runtime\n '''\n\n pipeline_features_t = th.from_numpy(batch_pipeline_features)\n schedule_features_t = th.from_numpy(batch_schedule_features) \n runtimes_t = th.from_numpy(batch_runtimes) \n lengths_t = th.from_numpy(batch_lengths)\n\n return pipeline_features_t, schedule_features_t, runtimes_t, batch_ids, lengths_t \n\n\nclass DAGDataLoader(DataLoader):\n def __init__(self, data, mdl, batch_size=1, num_workers=0, shuffle=False):\n super(DAGDataLoader, self).__init__(\n data, batch_size=batch_size, num_workers=num_workers,\n shuffle=shuffle,\n collate_fn=lambda l: graph_collate_fn(l, mdl))\n\n\nclass FlatDataLoader(DataLoader):\n def __init__(self, data, batch_size=1, num_workers=0, shuffle=False):\n super(FlatDataLoader, self).__init__(\n data, batch_size=batch_size, num_workers=num_workers,\n shuffle=shuffle,\n collate_fn=flat_collate_fn)\n","sub_path":"graphscheduler/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":6713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"163635343","text":"import subprocess\nimport serial\nimport os\nimport xml.etree.ElementTree as ET\nimport time\n\nSAS_ADDR_SCAN = \"90B11C4E9B88\"\nSAS_ADDR_HIGH = \"5\" + SAS_ADDR_SCAN[:6] + \"3\"\nSAS_ADDR_LOW = SAS_ADDR_SCAN[6:] + \"FF\"\nORIGIN_XML = \"G5_Exp_X00_00_20140910_Version_1_14.xml\"\nCMD1 = \"xmfg -b temp.xml sas2xMfg.xsd TEMP.bin\"\nCMD2 = \"TTPMACRO.EXE /I XMODEM.ttl\"\nCOM_PORT = \"COM8\"\n\nfile_list_1 = [\"temp.xml\", \"out.xml\", \"peppered.xml\", \"temp.bin\"]\nfor i in file_list_1:\n\tif(os.path.exists(i) == True):\n\t\tos.remove(i)\n\nfile_list_2 = [ORIGIN_XML, \"xmfg.exe\", \"sas2xMfg.xsd\", \"TTPMACRO.EXE\", \"XMODEM.ttl\"]\nfor i in file_list_2:\n\tif(os.path.exists(i) == False):\n\t\tprint(\"Couldn't find %s\"%(i))\n\nprint(\"SAS Address --> High: %s, Low: %s\"%(SAS_ADDR_HIGH, SAS_ADDR_LOW))\ntree = ET.parse(ORIGIN_XML)\nroot = tree.getroot()\n\nmfg_major = int(root.find(\"pageMfgImageVersion\").find(\"mpMajor\").text, 16)\nmfg_minor = int(root.find(\"pageMfgImageVersion\").find(\"mpMinor\").text, 16)\nprint(\"MFG Version: %d.%d\"%(mfg_major, mfg_minor))\n\nroot.find(\"pageSasAddr\").find(\"wwnHigh\").text = SAS_ADDR_HIGH\nroot.find(\"pageSasAddr\").find(\"wwnLow\").text = SAS_ADDR_LOW\nroot.find(\"pageReportGeneral\").find(\"enclLogicalIdHigh\").text = SAS_ADDR_HIGH\nroot.find(\"pageReportGeneral\").find(\"enclLogicalIdLow\").text = SAS_ADDR_LOW\nroot.find(\"pageSasAddr\").find(\"wwnHigh\").set(\"QUERY\", \"FALSE\")\nroot.find(\"pageSasAddr\").find(\"wwnLow\").set(\"QUERY\", \"FALSE\")\nroot.find(\"pageReportGeneral\").find(\"enclLogicalIdHigh\").set(\"QUERY\", \"FALSE\")\nroot.find(\"pageReportGeneral\").find(\"enclLogicalIdLow\").set(\"QUERY\", \"FALSE\")\n\ntree.write(\"temp.xml\")\n\nret = subprocess.call(CMD1)\nif(ret != 0):\n\tprint(\"Create XML File Fail!!\")\n\ndef COM_CMD(cmd):\n\tser.write((cmd+\"\\r\").encode())\n\tret = ser.readlines()\n\tfor i in range(len(ret)):\n\t\tret[i] = ret[i].decode().strip()\n\t\tprint(ret[i])\n\nCMD_LIST = [\"rev\", \"showmfg\", \"sasaddr\", \"phyinfo\"]\nser = serial.Serial(COM_PORT, 38400, timeout = 1)\nfor i in CMD_LIST:\n\tCOM_CMD(i)\nser.close()\n\nret = subprocess.call(CMD2)\nif(ret != 0):\n\tprint(\"Program MFG File Fail!!\")\n\ntime.sleep(1)\n\nser = serial.Serial(COM_PORT, 38400, timeout = 1)\nfor i in CMD_LIST:\n\tCOM_CMD(i)\nser.close()","sub_path":"Module/G5_SAS_EXP.py","file_name":"G5_SAS_EXP.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"170614621","text":"from mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.collections import PolyCollection\nfrom matplotlib.colors import colorConverter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.io as spi\n\nraw_data = spi.loadmat('../input/XJlaman_4_1.mat')\nx = raw_data['lamanxiangjing']\n# 取两个样品的数据\nyy = x.T[:,:]\nxx = np.arange(1,2091)\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nfor i in np.arange(0,4):\n xs = np.arange(2090)\n ys = yy[i]\n ax.bar(xs, ys, zs=i, zdir='y',alpha=0.8)\n # ax.plot(xs,ys,zs=z,zdir='y',alpha=0.8)\n\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Z')\n\nplt.show()","sub_path":"MatPlotLib/3D_三维图形.py","file_name":"3D_三维图形.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"430388885","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-x86_64/egg/aruana/same_words.py\n# Compiled at: 2019-02-06 03:52:08\n# Size of source mod 2**32: 4814 bytes\npt_br = {' abs ': ' abraços ', \n ' agr ': ' agora ', \n ' amg ': ' amigo ', \n ' bb ': ' bebê ', \n ' bj ': ' beijo ', \n ' bjs ': ' beijos ', \n ' blz ': ' beleza ', \n ' bpn ': ' bom para nós ', \n ' brb ': ' volto já ', \n ' clr ': ' caralho ', \n ' clrh ': ' caralho ', \n ' cmg ': ' comigo ', \n ' cm ': ' com ', \n ' ctz ': ' certeza ', \n ' c* ': ' cu ', \n ' cvs ': ' conversa ', \n ' d+ ': ' demais ', \n ' dlc ': ' delícia ', \n ' dlç ': ' delícia ', \n ' dm ': ' mensagem privada ', \n ' dmr ': ' demorou ', \n ' dnv ': ' de novo ', \n ' dms ': ' demais ', \n ' doq ': ' do que ', \n ' dps ': ' depois ', \n ' duma ': ' de uma ', \n ' erridê ': ' rodado ', \n ' erride ': ' rodado ', \n ' euem ': ' eu hein ', \n ' fdp ': ' filho da puta ', \n ' fdpta ': ' filho da puta ', \n ' glr ': ' galera ', \n ' gnt ': ' gente ', \n ' hj ': ' hoje ', \n ' ja ': ' já ', \n ' k ': ' risos ', \n ' krai ': ' caralho ', \n ' kg ': ' quilo ', \n ' kgs ': ' quilos ', \n ' kk ': ' risos ', \n ' l1xo ': ' lixo ', \n ' lol ': ' risos ', \n ' miga ': ' amiga ', \n ' migo ': ' amigo ', \n ' mlc ': ' maluco ', \n ' mlk ': ' moleque ', \n ' mlks ': ' moleques ', \n ' mnh ': ' minha ', \n ' mp ': ' ministério público ', \n ' msm ': ' mesmo ', \n ' mta ': ' muita ', \n ' muleke ': ' moleque ', \n ' mulekes ': ' moleques ', \n ' nº ': ' número ', \n ' n° ': ' número ', \n ' n ': ' não ', \n ' ñ ': ' não ', \n ' ng ': ' ninguém ', \n ' np ': ' sem problemas ', \n ' ngm ': ' ninguém ', \n ' obg ': ' obrigado ', \n ' omg ': ' meu deus ', \n ' oq ': ' o que ', \n ' pfv ': ' por favor ', \n ' pls ': ' por favor ', \n ' plmns ': ' pelo menos ', \n ' pdc ': ' pode crer ', \n ' ppk ': ' pepeca ', \n ' pprt ': ' papo reto ', \n ' pq ': ' porque ', \n ' pqp ': ' puta que pariu ', \n ' pra ': ' para ', \n ' pro ': ' para o ', \n ' q ': ' que ', \n ' qd ': ' quando ', \n ' qdo ': ' quando ', \n ' qndo ': ' quando ', \n ' rd ': ' rodado ', \n ' rs ': ' risos ', \n ' sdd ': ' saudade ', \n ' sdds ': ' saudades ', \n ' sfd ': ' safado ', \n ' slc ': ' você é louco ', \n ' smdd ': ' sem maldade ', \n ' so ': ' só ', \n ' smp ': ' sempre ', \n ' sqn ': ' só que não ', \n ' sr ': ' senhor ', \n ' tá ': ' está ', \n ' ta ': ' está ', \n ' tava ': ' estava ', \n ' tb ': ' também ', \n ' tbm ': ' também ', \n ' td ': ' tudo ', \n ' tdb ': ' tudo bom ', \n ' tds ': ' todos ', \n ' tlg ': ' está ligado ', \n ' tmj ': ' estamos juntos ', \n ' tnc ': ' tomar no cu ', \n ' tô ': ' estou ', \n ' to ': ' estou ', \n ' tomannk ': ' tomar no cu ', \n ' tqr ': ' tem que respeitar ', \n ' v4g4bund0 ': ' vagabundo ', \n ' vdb ': ' vai dar bom ', \n ' vdd ': ' verdade ', \n ' vei ': ' velho ', \n ' vc ': ' você ', \n ' vcs ': ' vocês ', \n ' vrdd ': ' verdade ', \n ' vsf ': ' vá se foder ', \n ' wpp ': ' whatsapp '}\nen = {\" i'm \": ' I am ', \n \" i'm'a \": ' I am about to ', \n \" i'm'o \": ' I am going to ', \n ' ive ': ' I have ', \n \" he's \": ' he is ', \n \" she's \": ' she is ', \n \" that's \": ' that is ', \n \" what's \": ' what is ', \n \" where's \": ' where is ', \n \" haven't \": ' have not ', \n \" mustn't've \": ' must not have ', \n \" shouldn't've \": ' should not have ', \n \" shouldn't \": ' should not ', \n ' ur ': ' you are ', \n \" somebody's \": ' somebody is ', \n \" someone's \": ' someone is ', \n \" something's \": ' something is ', \n \" that's \": ' that is ', \n \" 'tis \": ' it is ', \n \" 'twas \": ' it was ', \n \" wasn't \": ' was not ', \n \" we'd've \": ' we would have ', \n \" weren't \": ' were not ', \n \" when's \": ' when is ', \n \" who's \": ' who is ', \n \" there's \": ' there is ', \n \" why's \": ' why is ', \n \" let's \": ' let us ', \n \" how'd \": ' how did ', \n \" needn't \": ' need not ', \n \" ne'er \": ' never ', \n \" o'clock \": ' of the clock ', \n \" o'er \": ' over ', \n \" o'l \": ' old ', \n \" y'all \": ' you all ', \n \" shan't \": ' shall not ', \n \"'ll\": ' will', \n \"'d've\": ' would have', \n \"'ve\": ' have', \n \"'re\": ' are', \n \"'d\": ' would', \n \" won't \": ' will not ', \n \" mayn't \": ' may not ', \n \" mightn't \": ' might not ', \n \" mustn't \": ' must not ', \n \" aren't \": ' are not ', \n \" isn't \": ' is not ', \n \" wouldn't \": ' would not ', \n \" can't \": ' can not ', \n ' cannot ': ' can not ', \n \" don't \": ' do not ', \n \" didn't \": ' did not ', \n \" doesn't \": ' does not ', \n \" it's \": ' it is ', \n \" weren't \": ' were not ', \n ' okay ': ' ok ', \n \" c'mon \": ' come on ', \n \"in'\": ' ing', \n \" everyone's \": ' everyone is ', \n \"'s\": ' s', \n \" ain't \": ' am not ', \n \" amn't \": ' am not ', \n \" cain't \": ' can not ', \n \" 'cause \": ' because ', \n \" could've \": ' could have ', \n \" couldn't've \": ' could not have ', \n \" daren't \": ' dare not ', \n \" daresn't \": ' dare not ', \n \" e'er \": ' ever ', \n ' finna ': ' fixing to ', \n ' gimme ': ' give me ', \n ' gonna ': ' going to ', \n \" gon't \": ' go not ', \n ' gotta ': ' got to ', \n \" hadn't \": ' had not ', \n \" hasn't \": ' has not '}\nfr = {\" c'est \": ' ce est ', \n \"d'\": 'de ', \n \"j'\": 'je ', \n \"s'\": 'se ', \n \"qu'\": 'se ', \n \"t'\": 'te '}","sub_path":"pycfiles/Aruana-1.1.1-py3.5/same_words.cpython-35.py","file_name":"same_words.cpython-35.py","file_ext":"py","file_size_in_byte":5173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"9907009","text":"import numpy as np\nimport transforms3d.euler as txe\nfrom typing import Dict, Union\nfrom media_processing_lib.image import imgResize\n\nfrom .carla_h5_paths_reader import CarlaH5PathsReader\nfrom ....utilities import h5ReadDict, npGetInfo\n\n# TODO: All norms now put data in [0 : 1]. We should look at the rederObj and if some dims want other range, transform\n# the data to that range.\n\ndef rgbNorm(x : np.ndarray, readerObj:CarlaH5PathsReader) -> np.ndarray:\n\t# x [MBx854x854x3] => [MBx256x256x3] :: [0 : 255]\n\tx = imgResize(x, height=readerObj.desiredShape[0], width=readerObj.desiredShape[1], \\\n\t\tresizeLib=\"opencv\", onlyUint8=False)\n\t# x :: [0 : 255] => [0: 1]\n\tx = x.astype(np.float32) / 255\n\treturn x\n\ndef wireframeNorm(x : np.ndarray, readerObj:CarlaH5PathsReader) -> np.ndarray:\n\t# Wireframe is stored as RGB with 3 identical channels\n\t# x :: [0 : 255] ; (MB, H, W, 3) => (MB, H, W)\n\tx = x[..., 0]\n\t# Binarize it x :: [0 : 255] => [0 : 1]\n\tx = (x > 0).astype(np.uint8)\n\t# Resize it to desired shape\n\tx = imgResize(x, interpolation=\"nearest\", height=readerObj.desiredShape[0], \\\n\t\twidth=readerObj.desiredShape[1], resizeLib=\"opencv\", onlyUint8=False)\n\treturn x.astype(np.float32)\n\ndef wireframeRegressionNorm(x : np.ndarray, readerObj : CarlaH5PathsReader) -> np.ndarray:\n\treturn rgbNorm(x, readerObj)\n\ndef halftoneNorm(x : np.ndarray, readerObj:CarlaH5PathsReader) -> np.ndarray:\n\treturn rgbNorm(x, readerObj)\n\ndef depthNorm(x : np.ndarray, readerObj:CarlaH5PathsReader) -> np.ndarray:\n\tdepthStats = {\"min\" : 0, \"max\" : readerObj.hyperParameters[\"maxDepthMeters\"]}\n\n\tx = imgResize(x, height=readerObj.desiredShape[0], width=readerObj.desiredShape[1], \\\n\t\tresizeLib=\"opencv\", onlyUint8=False)\n\t# Depth is stored in [0 : 1] representing up to 1000m from simulator\n\tx = np.clip(x * 1000, depthStats[\"min\"], depthStats[\"max\"])\n\tx = (x - depthStats[\"min\"]) / (depthStats[\"max\"] - depthStats[\"min\"])\n\treturn np.expand_dims(x, axis=-1)\n\ndef poseNorm(x : np.ndarray, readerObj:CarlaH5PathsReader) -> np.ndarray:\n\tdef positionNorm(x : np.ndarray, readerObj:CarlaH5PathsReader) -> np.ndarray:\n\t\tif readerObj.hyperParameters[\"positionNormalization\"] == \"min_max\":\n\t\t\tpositionStats = h5ReadDict(readerObj.dataset[\"others\"][\"dataStatistics\"][\"position\"])\n\t\t\tminPos, maxPos = positionStats[\"min\"][0 : 3], positionStats[\"max\"][0 : 3]\n\t\t\tx = (x - minPos) / (maxPos - minPos)\n\t\telif readerObj.hyperParameters[\"positionNormalization\"] == \"none\":\n\t\t\tpass\n\n\t\treturn x\n\n\tdef orientationNorm(x : np.ndarray, readerObj:CarlaH5PathsReader) -> np.ndarray:\n\t\tassert readerObj.hyperParameters[\"orientationRepresentation\"] == \"euler\"\n\n\t\tif readerObj.hyperParameters[\"orientationNormalization\"] == \"min_max\":\n\t\t\t# -180 : 180 => [-1 : 1]\n\t\t\tx = x / 180\n\t\t\t# [-1 : 1] => [0 : 1]\n\t\t\tx = (x + 1) / 2\n\t\telif readerObj.hyperParameters[\"orientationNormalization\"] == \"none\":\n\t\t\tpass\n\t\telif readerObj.hyperParameters[\"orientationNormalization\"] == \"sin_cos\":\n\t\t\t# -180 : 180 => [-1 : 1]\n\t\t\tx = x / 180\n\t\t\t# [-1 : 1] => [-pi : pi]\n\t\t\tx = x * np.pi\n\t\t\t# [-pi : pi] => [-1 : 1]\n\t\t\txSin = np.sin(x)\n\t\t\txCos = np.cos(x)\n\t\t\tx = np.concatenate([xSin, xCos], axis=-1)\n\t\treturn x\n\n\n\tposition = positionNorm(x[..., 0 : 3], readerObj)\n\torientation = orientationNorm(x[..., 3 : ], readerObj)\n\treturn np.concatenate([position, orientation], axis=-1)\n\ndef opticalFlowNorm(x : np.ndarray, readerObj:CarlaH5PathsReader) -> np.ndarray:\n\t# Optical flow is in [-1:1] and 100% of percentage. Result is in [0:1] using only [-x%:x%] of data.\n\tdef opticalFlowPercentageTransform(x, opticalFlowPercentage):\n\t\t# x :: [0 : 1], centered in 0\n\t\tx = (x - 0.5) * 2\n\t\t# x :: [-1 : 1], centered in 0\n\t\topticalFlowPercentage = np.array(opticalFlowPercentage) / 100\n\t\tflow_x = np.expand_dims(x[..., 0], axis=-1)\n\t\tflow_y = np.expand_dims(x[..., 1], axis=-1)\n\t\t# flow_x :: [-x% : x%], flow_y :: [-y% : y%]\n\t\tflow_x = np.clip(flow_x, -opticalFlowPercentage[0], opticalFlowPercentage[0])\n\t\tflow_y = np.clip(flow_y, -opticalFlowPercentage[1], opticalFlowPercentage[1])\n\t\t# flow_x in [0 : 2*x%], flow_y :: [0 : 2*y%]\n\t\tflow_x += opticalFlowPercentage[0]\n\t\tflow_y += opticalFlowPercentage[1]\n\t\t# flow_x :: [0 : 1], flow_y :: [0 : 1]\n\t\tflow_x *= 1 / (2 * opticalFlowPercentage[0])\n\t\tflow_y *= 1 / (2 * opticalFlowPercentage[1])\n\t\t# flow :: [0 : 1]\n\t\tflow = np.concatenate([flow_x, flow_y], axis=-1).astype(np.float32)\n\t\treturn flow\n\n\tdef opticalFlowMagnitude(x):\n\t\t# flow :: [-1 : 1] => [0 : sqrt(2)]\n\t\tx = np.hypot(x[..., 0], x[..., 1])\n\t\t# norm :: [0 : sqrt(2)] => [0 : 1]\n\t\tx = x / np.sqrt(2)\n\t\treturn np.expand_dims(x, axis=-1)\n\n\t# x :: [0 : 1] => [-1 : 1]\n\tx = (x - 0.5) * 2\n\tx = imgResize(x, height=readerObj.desiredShape[0], width=readerObj.desiredShape[1], \\\n\t\tresizeLib=\"opencv\", onlyUint8=False)\n\n\t# if readerObj.hyperParameters[\"opticalFlowPercentage\"] != (100, 100):\n\t# \tx = opticalFlowPercentageTransform(x, readerObj.hyperParameters[\"opticalFlowPercentage\"])\n\n\tif readerObj.hyperParameters[\"opticalFlowMode\"] == \"xy\":\n\t\treturn x\n\telif readerObj.hyperParameters[\"opticalFlowMode\"] == \"magnitude\":\n\t\treturn opticalFlowMagnitude(x)\n\telif readerObj.hyperParameters[\"opticalFlowMode\"] == \"xy_plus_magnitude\":\n\t\treturn np.concatenate([x, opticalFlowMagnitude(x)], axis=-1)\n\tassert False\n\n# def opticalFlowNorm(x : np.ndarray, readerObj:CarlaH5PathsReader) -> np.ndarray:\n# \t# Data in [0 : 1]\n# \twidth, height = readerObj.desiredShape\n# \tx = imgResize(x, height=height, width=width, resizeLib=\"opencv\", onlyUint8=False)\n# \tx[..., 0] = np.float32(np.int32(x[..., 0] * width))\n# \tx[..., 1] = np.float32(np.int32(x[..., 1] * height))\n# \treturn x\n\ndef normalNorm(x:np.ndarray, readerObj:CarlaH5PathsReader) -> np.ndarray:\n\tx = imgResize(x, height=readerObj.desiredShape[0], width=readerObj.desiredShape[1], \\\n\t\tresizeLib=\"opencv\", onlyUint8=False)\n\t# Normals are stored as [0 - 255] on 3 channels, representing orientation of the 3 axes.\n\tx = x.astype(np.float32) / 255\n\treturn x\n\ndef semanticSegmentationNorm(x:np.ndarray, readerObj:CarlaH5PathsReader) -> np.ndarray:\n\tlabelKeys = list({\n\t\t(0, 0, 0): \"Unlabeled\",\n\t\t(70, 70, 70): \"Building\",\n\t\t(153, 153, 190): \"Fence\",\n\t\t(160, 170, 250): \"Other\",\n\t\t(60, 20, 220): \"Pedestrian\",\n\t\t(153, 153, 153): \"Pole\",\n\t\t(50, 234, 157): \"Road line\",\n\t\t(128, 64, 128): \"Road\",\n\t\t(232, 35, 244): \"Sidewalk\",\n\t\t(35, 142, 107): \"Vegetation\",\n\t\t(142, 0, 0): \"Car\",\n\t\t(156, 102, 102): \"Wall\",\n\t\t(0, 220, 220): \"Traffic sign\"\n\t}.keys())\n\tNC = len(labelKeys)\n\n\tif readerObj.hyperParameters[\"semanticNumClasses\"] == NC and x.shape[-1] == NC:\n\t\tassert x.min() >= 0 and x.max() <= 1\n\telse:\n\t\tsumLabelKeys = list(map(lambda x : x[0] + x[1] * 256 + x[2] * 256 * 256, labelKeys))\n\t\tx = x.astype(np.uint32)\n\t\tx = x[..., 0] + x[..., 1] * 256 + x[..., 2] * 256 * 256\n\t\tfor i in range(NC):\n\t\t\tx[x == sumLabelKeys[i]] = i\n\t\tx = x.astype(np.uint8)\n\t\tx = imgResize(x, interpolation=\"nearest\", height=readerObj.desiredShape[0], \\\n\t\t\twidth=readerObj.desiredShape[1], resizeLib=\"opencv\", onlyUint8=False)\n\t\t# Some fancy way of doing one-hot encoding.\n\t\tx = np.eye(NC)[x].astype(np.float32)\n\n\treturn x","sub_path":"neural_wrappers/readers/datasets/carla/normalizers.py","file_name":"normalizers.py","file_ext":"py","file_size_in_byte":7043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"583538294","text":"import operator\nfrom gtnlplib import scorer\nfrom gtnlplib import preproc\nfrom gtnlplib import clf_base\nfrom gtnlplib.constants import DEV_FILE, OFFSET, TRAIN_FILE\n\nfrom gtnlplib.constants import START_TAG, END_TAG\n\nargmax = lambda x : max(x.iteritems(),key=operator.itemgetter(1))[0]\ndef classifierTagger(words,featfunc,weights,all_tags):\n \"\"\"\n :param words: list of words\n :param features: function from lists of words and tags to list of features\n :param weights: defaultdict of weights\n :param all_tags: list of permissible tags\n :returns list of tags\n \"\"\"\n def inner_product(weights, features):\n ans = 0.0\n for key in features:\n if key in weights:\n ans += features[key] * weights[key]\n return ans\n \n ''' viterbi tagger \n out = []\n opt = []\n prev_link = []\n cur_stats = {}\n cur_links = {} \n for tag in all_tags:\n feat = featfunc(words, tag, START_TAG, 0)\n cur_stats[tag] = inner_product(weights, feat)\n cur_links[tag] = START_TAG\n prev_link.append(cur_links)\n opt.append(cur_stats)\n \n for m in range(1, len(words)):\n cur_stats = {}\n cur_links = {}\n for cur_tag in all_tags:\n cur_stats[cur_tag] = {} \n for prev_tag in all_tags:\n feat = featfunc(words, cur_tag, prev_tag, m)\n cur_stats[cur_tag][prev_tag] = opt[m - 1][prev_tag] + inner_product(weights, feat)\n cur_links[cur_tag] = argmax(cur_stats[cur_tag])\n cur_stats[cur_tag] = cur_stats[cur_tag][cur_links[cur_tag]] \n opt.append(cur_stats)\n prev_link.append(cur_links)\n \n last_stats = {}\n for prev_tag in all_tags:\n feat = featfunc(words, END_TAG, prev_tag, len(words))\n last_stats[prev_tag] = inner_product(weights, feat) + opt[len(words) - 1][prev_tag]\n \n last_tag = argmax(last_stats)\n out.append(last_tag)\n for m in reversed(xrange(len(words))):\n if m == 0:\n break\n prev_tag = prev_link[m][last_tag]\n out.append(prev_tag)\n last_tag = prev_tag\n return out[::-1]\n '''\n \n out = []\n for m in range(len(words)):\n cur_stats = {}\n for tag in all_tags:\n feat = featfunc(words, tag, 'X', m)\n cur_stats[tag] = inner_product(weights, feat)\n out.append(argmax(cur_stats))\n return out\n\ndef evalTagger(tagger,outfilename,testfile=DEV_FILE):\n \"\"\"Calculate confusion_matrix for a given tagger\n\n Parameters:\n tagger -- Function mapping (words, possible_tags) to an optimal\n sequence of tags for the words\n outfilename -- Filename to write tagger predictions to\n testfile -- (optional) Filename containing true labels\n\n Returns:\n confusion_matrix -- dict of occurences of (true_label, pred_label)\n \"\"\"\n alltags = set()\n for i,(words, tags) in enumerate(preproc.conllSeqGenerator(TRAIN_FILE)):\n for tag in tags:\n alltags.add(tag)\n with open(outfilename,'w') as outfile:\n for words,_ in preproc.conllSeqGenerator(testfile):\n pred_tags = tagger(words,alltags)\n for tag in pred_tags:\n print >>outfile, tag\n print >>outfile, \"\"\n return scorer.getConfusion(testfile,outfilename) #run the scorer on the prediction file\n","sub_path":"psets/ps4/gtnlplib/tagger_base.py","file_name":"tagger_base.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"642128868","text":"#parse file into strings - upper/lower/punction\n#for loop in the list add, or else: count --> counting\n#create dictionary (key = word; value = count)\n#make dictionary.keys into list\n#sort list (alpha)\n#(make dictionary.values into a list...?)\n#write file\nimport collections\n\ndef clean_file_into_str_list():\n\twith open('LoremIpsum.txt') as my_file:\n\t\tmy_data = my_file.read().decode(\"utf-8-sig\").encode(\"utf-8\")\n\t\tmy_revised_data = my_data.replace(',', '').replace('.', '')\n\t\tmy_data_list = my_revised_data.split(' ')\n\t\tnew_list = []\n\t\tfor item in my_data_list:\n\t\t\tnew_list.append(item.lower())\n\t\treturn new_list\n\ndef create_word_dict(new_list):\n\tword_dictionary = {}\n\tfor word in new_list:\n\t\tif word in word_dictionary:\n\t\t\tword_dictionary[word] += 1\n\t\telse:\n\t\t\tword_dictionary[word] = 1\n\treturn word_dictionary\n\ndef create_alpha_ordered_str(word_dictionary):\n\treturn collections.OrderedDict(sorted(word_dictionary.items(), key=lambda t: t[0]))\n\ndef create_numeric_ordered_str(word_dictionary):\n\treturn collections.OrderedDict(sorted(word_dictionary.items(), key=lambda t: t[1]))\n\ndef write_to_file(input_dictionary):\n\twith open('output.txt', 'w+') as my_file:\n\t\tfor key in input_dictionary:\n\t\t\toutput_text = key + \": \" + str(input_dictionary[key]) + '\\n'\n\t\t\tmy_file.write(output_text)\n\t\n\ndef main():\n\tnew_list = clean_file_into_str_list()\n\tword_dictionary = create_word_dict(new_list)\n\tnew_alpha_dict = create_alpha_ordered_str(word_dictionary)\n\tnew_num_dict = create_numeric_ordered_str(word_dictionary)\n\t# write_to_file(new_alpha_dict)\n\twrite_to_file(new_num_dict)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"LoremIpsumParsing.py","file_name":"LoremIpsumParsing.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"202577968","text":"# relevant indeces in the html data\nindeces = ([0,2,4]+range(7,21))\n\n# dataframe column names\ncol_names = ['week','date','result','location','opponent', 'Ptscored','Ptallowed', \\\n '1stdowns', 'yards', 'Pyards', 'Ryards', 'TO', \\\n 'opp_1stdowns','yards_allowed', 'opp_Pyards', 'opp_Ryards', 'opp_TO']\n\n# map from month name to number\nmonth_num_dict = {'August':8, 'September':9, 'October':10, 'November':11, \\\n 'December': 12, 'January': 1, 'February':2, 'nan':'nan'}\n\n# season to start gathering data\nstart_season = 2002\n\n# final season\nend_season = 2014\n\n\n","sub_path":"aux_info.py","file_name":"aux_info.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"582766094","text":"import socket\nimport logging\nimport sys\nimport os\nfrom datetime import datetime\nimport shutil\nfrom termcolor import colored\nimport math\n\nfrom torch.optim.lr_scheduler import _LRScheduler\nimport torch\n\ndef weights_init(m, deepth=0, max_depth=2):\n if deepth > max_depth:\n return\n if isinstance(m, torch.nn.Conv2d):\n torch.nn.init.kaiming_uniform_(m.weight.data)\n if m.bias is not None:\n torch.nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, torch.nn.Linear):\n m.weight.data.normal_(0, 0.01)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, torch.nn.BatchNorm2d):\n return\n elif isinstance(m, torch.nn.ReLU):\n return\n elif isinstance(m, torch.nn.Module):\n deepth += 1\n for m_ in m.modules():\n weights_init(m_, deepth)\n else:\n raise ValueError(\"%s is unk\" % m.__class__.__name__)\n\nclass CosineDecayLR(_LRScheduler):\n def __init__(self, optimizer, T_max, alpha=1e-4,\n t_mul=2, lr_mul=0.9,\n last_epoch=-1,\n warmup_step=300,\n logger=None):\n self.T_max = T_max\n self.alpha = alpha\n self.t_mul = t_mul\n self.lr_mul = lr_mul\n self.warmup_step = warmup_step\n self.logger = logger\n self.last_restart_step = 0\n self.flag = True\n super(CosineDecayLR, self).__init__(optimizer, last_epoch)\n\n self.min_lrs = [b_lr * alpha for b_lr in self.base_lrs]\n self.rise_lrs = [1.0 * (b - m) / self.warmup_step \n for (b, m) in zip(self.base_lrs, self.min_lrs)]\n\n def get_lr(self):\n T_cur = self.last_epoch - self.last_restart_step\n assert T_cur >= 0\n if T_cur <= self.warmup_step and (not self.flag):\n base_lrs = [min_lr + rise_lr * T_cur\n for (base_lr, min_lr, rise_lr) in \n zip(self.base_lrs, self.min_lrs, self.rise_lrs)]\n if T_cur == self.warmup_step:\n self.last_restart_step = self.last_epoch\n self.flag = True\n else:\n base_lrs = [self.alpha + (base_lr - self.alpha) *\n (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2\n for base_lr in self.base_lrs]\n if T_cur == self.T_max:\n self.last_restart_step = self.last_epoch\n self.min_lrs = [b_lr * self.alpha for b_lr in self.base_lrs]\n self.base_lrs = [b_lr * self.lr_mul for b_lr in self.base_lrs]\n self.rise_lrs = [1.0 * (b - m) / self.warmup_step \n for (b, m) in zip(self.base_lrs, self.min_lrs)]\n self.T_max = int(self.T_max * self.t_mul)\n self.flag = False\n \n return base_lrs\n\nclass AvgrageMeter(object):\n\n def __init__(self, name=''):\n self.reset()\n self.name = name\n\n def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0\n\n def update(self, val, n=1):\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt\n \n def __str__(self):\n return \"%s: %.5f\" % (self.name, self.avg)\n \n def __repr__(self):\n return self.__str__()\n \n def register_func(self, f):\n self._func = f\n \n def cal(self, *args, **kwargs):\n if not hasattr(self, '_func'):\n raise ValueError(\"Call register_func before this function\")\n return self._func(*args, **kwargs)\n\ndef get_ip():\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()[0]\n finally:\n s.close()\n return ip\nip = get_ip()\n\nclass _MyFormatter(logging.Formatter):\n \"\"\"Copy from tensorpack.\n \"\"\"\n def format(self, record):\n date = colored('IP:%s '%str(ip), 'yellow') + colored('[%(asctime)s @%(filename)s:%(lineno)d]', 'green')\n msg = '%(message)s'\n if record.levelno == logging.WARNING:\n fmt = date + ' ' + colored('WRN', 'red', attrs=['blink']) + ' ' + msg\n elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:\n fmt = date + ' ' + colored('ERR', 'red', attrs=['blink', 'underline']) + ' ' + msg\n else:\n fmt = date + ' ' + msg\n if hasattr(self, '_style'):\n # Python3 compatibility\n self._style._fmt = fmt\n self._fmt = fmt\n return super(_MyFormatter, self).format(record)\n\ndef _getlogger():\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))\n logger.addHandler(handler)\n return logger\n_logger = _getlogger()\ndef _get_time_str():\n return datetime.now().strftime('%m%d-%H%M%S')\ndef _set_file(path):\n if os.path.isfile(path):\n backup_name = path + '.' + _get_time_str()\n shutil.move(path, backup_name)\n _logger.info(\"Existing log file '{}' backuped to '{}'\".\n format(path, backup_name))\n hdl = logging.FileHandler(filename=path,\n encoding='utf-8', mode='w')\n hdl.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))\n _logger.addHandler(hdl)\n\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"615361124","text":"'''\nCreated on 2018-07-16\n\n@author: Kevin Köck\n'''\n\n\"\"\"\nexample config:\n{\n package: .machine.battery\n component: Battery\n constructor_args: {\n adc: 0 # ADC pin number or ADC object (even Amux pin object)\n voltage_max: 14 # maximum voltage of the battery\n voltage_min: 10.5 # minimum voltage of the battery\n multiplier_adc: 2.5 # calculate the needed multiplier to get from the voltage read by adc to the real voltage\n cutoff_pin: null # optional, pin number or object of a pin that will cut off the power if pin.value(1) \n precision_voltage: 2 # optional, the precision of the voltage published by mqtt\n # interval: 600 # optional, defaults to 600s, interval in which voltage gets published\n # mqtt_topic: null # optional, defaults to //battery\n # interval_watching: 1 # optional, the interval in which the voltage will be checked, defaults to 1s\n # friendly_name: null # optional, friendly name shown in homeassistant gui with mqtt discovery\n # friendly_name_abs: null # optional, friendly name for absolute voltage \n }\n}\n\"\"\"\n\n__version__ = \"0.2\"\n__updated__ = \"2018-08-18\"\n\nfrom pysmartnode import config\nfrom pysmartnode import logging\nimport uasyncio as asyncio\nimport gc\nimport machine\nfrom pysmartnode.components.machine.pin import Pin\nfrom pysmartnode.components.machine.adc import ADC\nfrom pysmartnode.utils.component import Component, DISCOVERY_SENSOR\nimport time\n\n_component_name = \"Battery\"\n_component_type = \"sensor\"\n\n_log = logging.getLogger(_component_name)\n_mqtt = config.getMQTT()\ngc.collect()\n\n\nclass Battery(Component):\n def __init__(self, adc, voltage_max, voltage_min, multiplier_adc, cutoff_pin=None,\n precision_voltage=2, interval_watching=1,\n interval=None, mqtt_topic=None, friendly_name=None, friendly_name_abs=None):\n super().__init__()\n self._interval = interval or config.INTERVAL_SEND_SENSOR\n self._interval_watching = interval_watching\n self._topic = mqtt_topic or _mqtt.getDeviceTopic(_component_name)\n self._precision = int(precision_voltage)\n self._adc = ADC(adc) # unified ADC interface\n self._voltage_max = voltage_max\n self._voltage_min = voltage_min\n self._multiplier = multiplier_adc\n self._cutoff_pin = None if cutoff_pin is None else (Pin(cutoff_pin, machine.Pin.OUT))\n if self._cutoff_pin is not None:\n self._cutoff_pin.value(0)\n self._frn = friendly_name\n self._frn_abs = friendly_name_abs\n gc.collect()\n self._event_low = None\n self._event_high = None\n\n def getVoltageMax(self):\n \"\"\"Getter for consumers\"\"\"\n return self._voltage_max\n\n def getVoltageMin(self):\n \"\"\"Getter for consumers\"\"\"\n return self._voltage_min\n\n async def _read(self, publish=True):\n try:\n value = self._adc.readVoltage()\n except Exception as e:\n _log.error(\"Error reading sensor {!s}: {!s}\".format(_component_name, e))\n return None\n if value is not None:\n value *= self._multiplier\n value = round(value, self._precision)\n if value is None:\n _log.warn(\"Sensor {!s} got no value\".format(_component_name))\n elif publish:\n await _mqtt.publish(self._topic, (\"{0:.\" + str(self._precision) + \"f}\").format(value))\n return value\n\n async def voltage(self, publish=True):\n return await self._read(publish=publish)\n\n async def _init(self):\n await super()._init()\n interval = self._interval\n interval_watching = self._interval_watching\n t = time.ticks_ms()\n while True:\n # reset enets on next reading so consumers don't need to do it as there might be multiple consumers awaiting\n if self._event_low is not None:\n self._event_low.release()\n if self._event_high is not None:\n self._event_high.release()\n if time.ticks_ms() > t:\n # publish interval\n voltage = self._read()\n t = time.ticks_ms() + interval\n else:\n voltage = self._read(publish=False)\n if voltage > self._voltage_max:\n if self._event_high is not None:\n self._event_high.set(data=voltage)\n # no log as consumer has to take care of logging or doing something\n else:\n _log.warn(\"Battery voltage of {!s} exceeds maximum of {!s}\".format(voltage, self._voltage_max))\n elif voltage < self._voltage_min:\n if self._event_low is not None:\n self._event_low.set(data=voltage)\n # no log as consumer has to take care of logging or doing something\n else:\n _log.warn(\"Battery voltage of {!s} lower than minimum of {!s}\".format(voltage, self._voltage_min))\n if self._cutoff_pin is not None:\n if self._cutoff_pin.value() == 1:\n _log.critical(\"Cutting off power did not work!\")\n self._cutoff_pin.value(0) # trying again\n await asyncio.sleep(1)\n else:\n _log.warn(\"Cutting off power\")\n await asyncio.sleep(5) # time to send all logs and for consumers to get done\n self._cutoff_pin.value(1)\n await asyncio.sleep(interval_watching)\n\n def registerEventHigh(self, event):\n self._event_high = event\n\n def registerEventLow(self, event):\n self._event_low = event\n\n async def _discovery(self):\n sens = DISCOVERY_SENSOR.format(\"battery\", # device_class\n \"%\", # unit_of_measurement\n \"{{ value_json.relative }}\") # value_template\n await self._publishDiscovery(_component_type, self._topic, _component_name + \"r\", sens,\n self._frn or \"Battery %\")\n sens = '\"unit_of_meas\":\"V\",' \\\n '\"val_tpl\":\"{{ value_json.absolute }}\",' \\\n '\"ic\":\"mdi:car-battery\"'\n await self._publishDiscovery(_component_type, self._topic, _component_name + \"a\", sens,\n self._frn_abs or \"Battery Volt\")\n del sens\n gc.collect()\n","sub_path":"pysmartnode/components/sensors/battery.py","file_name":"battery.py","file_ext":"py","file_size_in_byte":6508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"544038392","text":"import logging\nimport zipfile\nimport progressbar\n\nfrom itertools import chain\nfrom multiprocessing import Pool, Value\n\nbar = progressbar.ProgressBar()\ncounter = Value(\"i\", 0)\n\n\nclass Crawler:\n \"\"\"Zipfile files crawler\n \"\"\"\n\n def __init__(self, processes, files, map_func, reduce_func):\n self.processes = processes\n self.files = files\n self.map_func = map_func\n self.reduce_func = reduce_func\n\n # Setup a progress bar\n bar.max_value = len(self.files)\n bar.interval = 1.0\n\n def __call__(self):\n \"\"\"Start N processes that will open a Zip file and analyze it's content\n\n If processes == 1 run foreground\n \"\"\"\n if self.processes > 1:\n with Pool(self.processes) as p:\n mapped = p.map(self._run, self.files)\n else:\n mapped = (self._run(f) for f in self.files)\n\n return self.reduce_func(chain.from_iterable(mapped))\n\n def _run(self, filepath):\n logger = logging.getLogger('analyzer')\n\n mapped = []\n try:\n # Open zipfile\n with zipfile.ZipFile(filepath, \"r\") as zippedfile:\n\n # Only read files that are txt, to avoid duplicates results\n iter_filer = (\n f for f in zippedfile.filelist\n if \"txt\" in f.filename\n )\n for inner_zip_file in iter_filer:\n with zippedfile.open(inner_zip_file) as f:\n try:\n lines = f.read().decode(\"utf-8\")\n except UnicodeDecodeError:\n # If can't decode probably is an attached file\n logger.debug(\"Can't decode file {} in {}\".format(\n inner_zip_file.filename,\n filepath\n ))\n continue\n\n mapped.append(self.map_func(lines))\n\n except zipfile.BadZipFile:\n logger.debug(\"Can't read file\".format(filepath))\n return []\n else:\n return mapped\n finally:\n counter.value += 1\n bar.update(counter.value)\n","sub_path":"analyzer/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"24033962","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef num_coefficients_2(d):\n t = 0\n for n in range(d+1):\n for i in range(n+1):\n for j in range(n+1):\n if i+j==n:\n t = t+1\n return t\n\ndef calculate_model_function(deg,data, p):\n result = np.zeros(data.shape[0]) \n k=0\n for n in range(deg+1):\n for i in range(n+1):\n result += p[k]*(data[:,0]**i)*(data[:,1]**(n-i))\n k+=1\n return result\n\ndef linearize(deg,data, p0):\n f0 = calculate_model_function(deg,data,p0)\n J = np.zeros((len(f0), len(p0)))\n epsilon = 1e-6\n for i in range(len(p0)):\n p0[i] += epsilon\n fi = calculate_model_function(deg,data,p0)\n p0[i] -= epsilon\n di = (fi - f0)/epsilon\n J[:,i] = di\n return f0,J\n \ndef calculate_update(y,f0,J):\n l=1e-2\n N = np.matmul(J.T,J) + l*np.eye(J.shape[1])\n r = y-f0\n n = np.matmul(J.T,r)\n dp = np.linalg.solve(N,n) \n return dp\n\n\ndef main():\n bike = pd.read_csv(\"day.csv\")\n data = np.array(bike[[\"temp\",\"windspeed\"]])\n target = np.array(bike[\"casual\"])\n\n plt.close(\"all\")\n\n max_iter = 10\n for deg in range(5):\n p0 = np.zeros(num_coefficients_2(deg))\n for i in range(max_iter):\n f0,J = linearize(deg,data, p0)\n dp = calculate_update(target,f0,J)\n p0 += dp\n\n x, y = np.meshgrid(np.arange(np.min(data[:,0]), np.max(data[:,0]), 0.1), \n np.arange(np.min(data[:,1]), np.max(data[:,1]), 0.1)) \n test_data = np.array([x.flatten(),y.flatten()]).transpose() \n test_target = calculate_model_function(deg,test_data, p0)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(data[:,0],data[:,1],target,c='r')\n ax.plot_surface(x,y,test_target.reshape(x.shape))\n plt.show()\n \nmain()\n","sub_path":"Machine_Learning/Lab_10/Lab_ML_10.py","file_name":"Lab_ML_10.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"294628896","text":"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 20 11:58:35 2019\r\n\r\n@author: ROHIT SINGH\r\n\"\"\"\r\n\r\nimport datetime\r\nimport hashlib\r\nimport json\r\nfrom flask import Flask, jsonify, request\r\nimport requests\r\nfrom uuid import uuid4\r\nfrom urllib.parse import urlparse\r\n\r\n\r\n\r\n\r\nclass Blockchain:\r\n \r\n def __init__(self):\r\n self.chain= []\r\n self.transactions=[]\r\n self.create_block(proof=1,previous_hash='0')\r\n self.nodes=set()\r\n def create_block(self,proof,previous_hash):\r\n block={'index': len(self.chain)+1,'timestamp': str(datetime.datetime.now()),\r\n 'proof': proof,\r\n 'transactions':self.transactions,\r\n 'previous_hash':previous_hash}\r\n self.transactions = []\r\n self.chain.append(block)\r\n return block\r\n \r\n def get_previous_block(self):\r\n return self.chain[-1]\r\n def proofofwork(self,previous_proof):\r\n new_proof=1\r\n check_proof=False\r\n while check_proof is False:\r\n hash_operation = hashlib.sha256(str(new_proof**5/new_proof**2-previous_proof**3).encode()).hexdigest()\r\n if hash_operation[:4]=='0000':\r\n check_proof= True\r\n else:\r\n new_proof+=1;\r\n \r\n return new_proof\r\n \r\n def hash(self,block):\r\n encoded_block=json.dumps(block,sort_keys=True).encode()\r\n return hashlib.sha256(encoded_block).hexdigest()\r\n def is_chain_valid(self,chain):\r\n block_index=1\r\n previous_block=chain[0]\r\n while block_index < len(chain):\r\n block=chain[block_index]\r\n if block['previous_hash']!=self.hash(previous_block):\r\n return False\r\n previous_proof = previous_block['proof']\r\n proof=block['proof']\r\n hash_operation = hashlib.sha256(str(proof**5/proof**2-previous_proof**3).encode()).hexdigest()\r\n if hash_operation[:4]!='0000':\r\n return False\r\n previous_block=block\r\n block_index+=1\r\n return True\r\n def add_transaction(self,sender,reciever,amount):\r\n self.transactions.append({'sender':sender,'reciever':reciever,'amount':amount})\r\n previous_block = self.get_previous_block()\r\n return previous_block['index']+1\r\n def add_node(self,address):\r\n parsed_url=urlparse(address)\r\n self.nodes.add(parsed_url.netloc)\r\n \r\n def replace_chain(self):\r\n network = self.nodes\r\n longest_chain=None\r\n max_length = len(self.chain)\r\n for node in network:\r\n response = requests.get(f'http://{node}/get_chain')\r\n if response.status_code == 200:\r\n length = response.json()['length']\r\n chain = response.json()['chain']\r\n if length>max_length and self.is_chain_valid(chain):\r\n max_length=length\r\n longest_chain=chain\r\n if longest_chain:\r\n self.chain = longest_chain\r\n return True\r\n return False\r\napp=Flask(__name__)\r\n\r\nnode_address = str(uuid4()).replace('-','')\r\n\r\nblockchain= Blockchain()\r\n#http://127.0.0.1:5000\r\n@app.route('/mine_block',methods=['GET'])\r\ndef mine_block():\r\n previous_block=blockchain.get_previous_block()\r\n prev_proof=previous_block['proof']\r\n proof=blockchain.proofofwork(prev_proof)\r\n prev_hash=blockchain.hash(previous_block)\r\n blockchain.add_transaction(sender=node_address,reciever='Rohit',amount=1)\r\n block=blockchain.create_block(proof,prev_hash)\r\n \r\n response = {'message':'Congrats You Just Mined a Block!',\r\n 'index':block['index'],\r\n 'timestamp':block['timestamp'],\r\n 'proof':block['proof'],\r\n 'transactions':block['transactions'],\r\n 'previous_hash':block['previous_hash']\r\n }\r\n return jsonify(response),200\r\n@app.route('/get_chain',methods=['GET'])\r\ndef get_chain():\r\n response= {'chain':blockchain.chain,'length':len(blockchain.chain)}\r\n return jsonify(response),200\r\n@app.route('/is_valid',methods=['GET'])\r\ndef is_valid():\r\n \r\n isvalid=blockchain.is_chain_valid(blockchain.chain)\r\n if isvalid is True:\r\n mess = 'The chain is valid'\r\n else:\r\n mess = 'Invalid Mine'\r\n response = {\r\n 'Validity':mess\r\n \r\n }\r\n return jsonify(response),200\r\n\r\n@app.route('/add_transaction',methods=['POST'])\r\ndef add_transaction():\r\n json = request.get_json()\r\n transaction_keys=['sender','reciever','amount']\r\n if not all (key in json for key in transaction_keys):\r\n return 'Some problem in json file',400\r\n \r\n index = blockchain.add_transaction(json['sender'],json['reciever'],json['amount'])\r\n response = {'message':f'This transaction will be added to Block {index}'}\r\n return jsonify(response),201\r\n\r\n@app.route('/connect_node',methods=['POST'])\r\ndef connect_node():\r\n json = request.get_json()\r\n nodes=json.get('nodes')\r\n if nodes is None:\r\n return \"No node\",400\r\n for node in nodes:\r\n blockchain.add_node(node)\r\n response = {'message':'All the nodes are now connected. The Blockchain has the following nodes','total_nodes':list(blockchain.nodes)}\r\n return jsonify(response),201\r\n\r\n@app.route('/replace_chain',methods=['GET'])\r\ndef replace_chain():\r\n \r\n is_chain_replaced=blockchain.replace_chain()\r\n if is_chain_replaced :\r\n response={'message' : 'The node had different chain','new_chain':blockchain.chain}\r\n else:\r\n response={'message' : 'The chain is the largest one.','actual_chain':blockchain.chain}\r\n \r\n return jsonify(response),200\r\n\r\napp.run(host='0.0.0.0',port=5003)\r\n","sub_path":"BC/mod2/mynode.py","file_name":"mynode.py","file_ext":"py","file_size_in_byte":5763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"241717937","text":"\"\"\"\n\nQuestion:\nDefine a class with a generator which can iterate the numbers, which are divisible by 7, between a given range 0 and n.\n\nHints:\nConsider use yield\n\n\"\"\"\nclass generator:\n def putNumber(self, n):\n i = 0\n while i < n:\n j = i\n i = i + 1\n if j % 7 == 0:\n yield j\n\ngen = generator()\n\nfor i in gen.putNumber(100):\n print(\"\")","sub_path":"class_generator.py","file_name":"class_generator.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"36485298","text":"hostname = 'www.python.org'\r\n# PROTOCOL_TLS_CLIENT requires valid cert chain and hostname\r\ncontext = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\r\n\r\n\r\n\r\ncontext.load_verify_locations('path/to/cabundle.pem')\r\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:\r\n with context.wrap_socket(sock, server_hostname=hostname) as ssock:\r\n print(ssock.version())\r\n","sub_path":"9.2.py","file_name":"9.2.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"451038031","text":"import json\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError, URLError\nfrom datetime import datetime\nfrom pymongo import MongoClient, UpdateOne\n\n\ndef get_jsonparsed_data(url):\n while True:\n try:\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)\n except HTTPError:\n save_error(\"HTTPError\", url)\n return False\n except URLError:\n save_error(\"URLError\", url)\n return False\n\n\ndef save_error(e, url):\n fe = open('./error.log', 'a')\n error = f\"Bad request to:{url}, type error:{e}\\n\"\n fe.write(error)\n fe.close()\n\n\ndef save_data(url, service):\n cars_data = get_jsonparsed_data(url)\n if cars_data:\n cars = cars_data[\"cars\"]\n connection = MongoClient(\"mongodb://localhost:27017/carsharing\")\n db = connection.carsharing\n bulk_list = []\n for car in cars:\n car_id = car[\"Id\"]\n # if service == 'vezuha':\n # vezuha_url = f'http://service.vezuha.club//api/rates?carId={car_id}'\n # vezuha_data = get_jsonparsed_data(vezuha_url)\n # if vezuha_data:\n # car_id = vezuha_data[\"perMinuteRates\"][0][\"id\"]\n data = {\n \"car_id\": car_id,\n \"brand\": car[\"Brand\"],\n \"model\": car[\"Model\"],\n \"color\": car[\"Color\"],\n \"timestemp\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n \"lat\": car[\"Lat\"],\n \"lon\": car[\"Lon\"],\n \"service\": service\n }\n bulk_list.append(UpdateOne({'car_id': data['car_id']}, {'$set': data}, upsert=True))\n db['cars'].bulk_write(bulk_list)\n connection.close()\n return True\n else:\n return False\n\n\nurls = [\n \"https://drivetime.cartrek.online/api/cars\",\n \"http://service.vezuha.club//api/cars\"\n]\n\nfor url in urls:\n service = 'anytime'\n if \"vezuha\" in url:\n service = 'vezuha'\n save_data(url, service)","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"70740408","text":"if __name__ == \"__main__\":\n n = int(input(\"How many numbers ?\\t\"))\n nums = [int(x) for x in input(\"Enter {} numbers\\t\".format(n)).split()] \n\n for i in range(n-1): \n\n #set changed to false, false means no swapping of elements occured\n changed = False\n\n for j in range(n-i-1):\n if(nums[j] > nums[j+1]):\n nums[j],nums[j+1] = nums[j+1],nums[j] \n changed = True #some element is swapped\n\n #if no element is swapped then no need for further looping , break outer loop\n if not changed : \n break\n\n for i in nums:\n print(i,end=' ')\n \n print()\n\n ","sub_path":"sorting/bubble-sort.py","file_name":"bubble-sort.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"533272248","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# @file: base/flasky.py\n# Copyright (c) 2013 Korepwx. All rights reserved.\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Contributors:\n# Korepwx 2013-08-18\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Use of this source code is governed by BSD license that can be found in the\n# LICENSE file.\n\n# Model the behaviours of Flask to fit MyBlog.\n#\n# These hacks may depend on internal details of flask, which may differ from\n# version to version. Hacks including error-free flask global object access,\n# pure Flask application without static & templates, default actions before\n# requests and many more.\n\nimport sysconf\nimport babel\nimport functools\nfrom flask import Flask, Blueprint, request, redirect\n\nfrom thirdparty.proxies import CallbackProxy\nfrom base import util\nfrom base import i18n\nfrom base.log import getLogger\nfrom base.session import get_session_interface\nfrom base.context import Page, User\n\nclass FlaskApp(Flask):\n '''\n Override default Flask constructor to disable static & template folder.\n Also, this customized app supports local url_for method.\n '''\n def __init__(self, *args, **kwargs):\n if ('static_folder' not in kwargs):\n kwargs['static_folder'] = None\n if ('template_folder' not in kwargs):\n kwargs['template_folder'] = None\n Flask.__init__(self, *args, **kwargs)\n\n def url_for(self, endpoint, method='GET', **kwargs):\n return self.url_map.bind('', '/').build(endpoint, kwargs)\n\nclass FlaskBlueprint(Blueprint):\n '''\n Override default Blueprint constructor to diable static & template folder.\n '''\n def __init__(self, *args, **kwargs):\n if ('static_folder' not in kwargs):\n kwargs['static_folder'] = None\n if ('template_folder' not in kwargs):\n kwargs['template_folder'] = none\n Blueprint.__init__(self, *args, **kwargs)\n\ndef boot_flask_app(app):\n '''\n Initialize Flask app instance for myblog.\n\n This method should be applied on all Flask app instances. For global\n applications instances created by plugins when module is loaded,\n this method should be later when plugin is inited.\n\n This method will register boot_flask_request to app.before_request.\n '''\n # Update configurations according to sysconf.\n cookie_domain = sysconf.SITE_DOMAIN\n if (sysconf.MULTISITE['enable']):\n cookie_domain = '.' + cookie_domain\n app.config.update(\n # debug settings.\n DBEUG=sysconf.DEBUG,\n # client session settings.\n SECRET_KEY=sysconf.SECRET_KEY,\n SESSION_COOKIE_DOMAIN=cookie_domain,\n SESSION_COOKIE_PATH='/',\n PERMANENT_SESSION_LIFETIME=sysconf.PERMANENT_SESSION_LIFETIME,\n )\n # Register handlers\n app.session_interface = get_session_interface()\n app.before_request(boot_flask_request)\n\ndef boot_flask_request():\n request.page = Page()\n request.user = User()\n","sub_path":"src/base/flasky.py","file_name":"flasky.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"142498692","text":"# 调用Matplotlib中的pyplot以及plt\n# 绘制一个简单的线图\nfrom matplotlib import pyplot as plt\n\nyears=[1950,1960,1970,1980,1990,2000,2010]\ngdp=[300.2,543.3,1075.9,2862.5,5979.6,10289.7,14958.3]\n\n# 创建一幅图,x轴是年份,y轴是gdp\nplt.plot(years,gdp,color='green',marker='o',linestyle='solid')\n\n# 添加一个标题\nplt.title(\"NominalGDP\")\n# 给y轴添加标记\nplt.ylabel(\"1 billion\")\n# 新窗口绘图\nplt.show()\n\n# 条形图:离散项目的数量变化\nmovies=[\"Annie Hall\",\"Ben-Hur\",\"Casablanca\",\"Gandhi\",\"West Side Story\"]\nnum_oscars=[5,11,3,8,10]\n\n# 定义左侧x坐标[xs]\nxs=[i+0.1 for i, _ in enumerate(movies)]\n\n# 使用左侧x坐标和高度[num_oscars]\nplt.bar(xs,num_oscars)\n\nplt.ylabel(\"The Amount of Oscar\")\nplt.title(\"The movie I like\")\n\n# 使用电影的名字标记x轴,位置在x轴上的条形的中心\nplt.xticks([i+0.5 for i, _ in enumerate(movies)],movies)\nplt.show()\n\n# 条形图:取值是如何分布的\nfrom collections import Counter\ndef make_chart_histogram(plt):\n grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]\n decile = lambda grade: grade // 10 * 10 \n histogram = Counter(decile(grade) for grade in grades)\n\n plt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4\n histogram.values(), # give each bar its correct height\n 8) # give each bar a width of 8\n plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,\n # y-axis from 0 to 5\n plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100\n plt.xlabel(\"Decile\")\n plt.ylabel(\"# of Students\")\n plt.title(\"Distribution of Exam 1 Grades\")\n plt.show()\n\t\nmake_chart_histogram(plt)\n\n# 统计学上的欺诈:y轴不是从0开始\n# 线图:事物的变化趋势\ndef make_chart_several_line_charts(plt):\n\n variance = [1,2,4,8,16,32,64,128,256]\n bias_squared = [256,128,64,32,16,8,4,2,1]\n total_error = [x + y for x, y in zip(variance, bias_squared)]\n\n xs = range(len(variance))\n\n # we can make multiple calls to plt.plot \n # to show multiple series on the same chart\n plt.plot(xs, variance, 'g-', label='variance') # green solid line\n plt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line\n plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line\n\n # because we've assigned labels to each series\n # we can get a legend for free\n # loc=9 means \"top center\"\n plt.legend(loc=9)\n plt.xlabel(\"model complexity\")\n plt.title(\"The Bias-Variance Tradeoff\")\n plt.show()\n\nmake_chart_several_line_charts(plt)\t\n\n# 散点图:社交网站上\n# 朋友个数与在社交网站上花的时间\ndef make_chart_scatter_plot(plt):\n\n friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]\n minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]\n labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\n\n plt.scatter(friends, minutes)\n \n # label each point\n for label, friend_count, minute_count in zip(labels, friends, minutes):\n plt.annotate(label,\n xy=(friend_count, minute_count), # put the label with its point\n xytext=(5, -5), # but slightly offset\n textcoords='offset points')\n\n plt.title(\"Daily Minutes vs. Number of Friends\")\n plt.xlabel(\"# of friends\")\n plt.ylabel(\"daily minutes spent on the site\")\n plt.show()\n\t\nmake_chart_scatter_plot(plt)\n# plt.axis(\"equal\")更精确\n# 统计学会撒谎\n\n# 数据可视化\n# seaborn\n# D3.js\n# Bokeh\n# ggplot2\n# 《现代统计图形》\n","sub_path":"datavis.py","file_name":"datavis.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"112779762","text":"#coding=utf-8\nimport yaml\nimport re\n\ndef load_yaml(fp):\n try:\n return yaml.load(open(fp, encoding='utf-8'))\n except (TypeError, FileNotFoundError, ParserError):\n return {}\n\nposi_word = load_yaml('positive_adj.yml')\nnega_word = load_yaml('negative_adj.yml')\nturning_word = ['怀疑', '是不是', '不知道', '大概', '有可能', '搞不懂', '不知',\n'算不算', '为何', '真不知道']\n\nclass TurningFinder(object):\n def __init__(self, file, bk=False):\n self.file = file\n self.bk = bk\n self.count = 0\n\n def _word_in_sentence(self ,words, sentence):\n for w in words:\n if w in sentence:\n return w\n return False\n \n def run(self):\n with open(self.file, 'r') as f:\n for line in f.readlines():\n ret = self._word_in_sentence(turning_word, line)\n if ret:\n print('>>>',line),\n print(ret,'\\n', '\\n'.join(line.strip().split(ret))),\n if self.bk:\n input()\n self.count += 1\n print(self.count)\n\nif __name__ == \"__main__\":\n\n t = TurningFinder('amazon_reviews.txt', bk=True)\n #t = TurningFinder('amazon_reviews.txt', bk=False)\n t.run()\n","sub_path":"syntax_test/argu.py","file_name":"argu.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"519607913","text":"\r\n\r\nimport pygame, sys\r\nfrom pygame.locals import *\r\n\r\nimport random\r\nimport time\r\n\r\nlast_shoot = time.time()\r\nSHOOT_TIMEOUT = 0.25\r\n\r\njust_hit = time.time()\r\nHIT_TIMEOUT = 1.5\r\n\r\n\r\npygame.init()\r\n\r\n\r\n##GLOBAL VARIABLES!!!\r\n##THINGS LIKE CONSTANTS WE WILL USE ALL CAPS VARIABLE NAMES FOR\r\nFPS = 30 # frames per second setting\r\nSCREENWIDTH = 400\r\nSCREENHEIGHT = 300\r\n\r\nFOODSIZE = 5 #dimensions of food (pixels)\r\nENEMYSIZE = 10 #dimensions of enemy (squares so x by x)\r\nMISSILEWIDTH=3 #missile width\r\nMISSILEHEIGHT=8 #missile height\r\n\r\n\r\nTYPESOFENEMIES = 5\r\n\r\nCHARWIDTH = 10 #your width\r\nCHARHEIGHT = 15 #your height\r\n\r\nMISSILERATE = 3 #movement rate of missiles\r\n\r\nfpsClock = pygame.time.Clock()\r\n\r\nFALL_RATE = 3 #fall rate of objects/food (how many pixels per clock tick)\r\n\r\nBASICFONT = pygame.font.Font('freesansbold.ttf', 18)\r\nTITLEFONT = pygame.font.Font('freesansbold.ttf', 18)\r\nINSFONT = pygame.font.Font('freesansbold.ttf', 12)\r\n\r\n# set up the window\r\nDISPLAYSURF = pygame.display.set_mode((400, 300), 0, 32)\r\npygame.display.set_caption('')\r\n\r\n\r\n\r\n###load game graphics\r\nsquid = pygame.image.load(\"squidpic.png\").convert()\r\n\r\n\r\n\r\n#make some colors with easy to understand names for use throughout game:\r\nWHITE = (255,255,255)\r\nGREY = (200, 200, 200)\r\nRED = (255,0,0)\r\nGREEN = (0,255,0)\r\nBLUE = (0,0,100)\r\nLIGHTBLUE = (0,0,255)\r\nPURPLE = (255,0,255)\r\nORANGE = (255,165,0)\r\nPINK = (255,192,203)\r\nYELLOW = (255,255,0)\r\nBLACK = (0,0,0)\r\n\r\n\r\n\r\n#max foods on screen at any point in time\r\nFOOD_CAP = 15\r\nENEMY_CAP = 150\r\n\r\nfoods = [] #list to contain food instances\r\nenemies = [] #list to contain enemy instances\r\nmissiles = [] #list to contain missile instances\r\n\r\n\r\n###important variables for player/character run time\r\n#starting character position:\r\nchar_posx = SCREENWIDTH/2\r\nchar_posy = SCREENHEIGHT-30\r\n#how many missiles you've got left:\r\n#score:\r\nmissile_stock = 10\r\nscore = 0\r\n#lives:\r\nlives =3\r\n\r\ninstructions = '''You are a dark grey rectangle.\r\nThe world is full of colored square which will hurt you.\r\nYou must avoid these squares or destroy them with missiles.\r\nFire missiles with the spacebar.\r\nThe small red things are food. Eat the food.'''\r\n\r\n\r\n\r\n\r\nclass Player(pygame.sprite.Sprite):\r\n\tdef __init__(self):\r\n\t\tpygame.sprite.Sprite.__init__(self)\r\n\t\tself.image = squidpic\r\n\t\tself.rect = self.image.get_rect()\r\n\t\tself.rect.centerx = SCREENWIDTH/2\r\n\t\tself.rect.bottom = SCREENHEIGHT - 10\r\n\t\tself.speedx = 0\r\n\tdef update(self):\r\n\t\tself.speedx = 0\r\n\t\tkeystate = pygame.key.get_pressed()\r\n\t\tif keystate[pygame.K_LEFT]:\r\n\t\t\tself.speedx = -8\r\n\t\tif keystate[pygame.K_RIGHT]:\r\n\t\t\tself.speedx = 8\r\n\t\tself.rect.x += self.speedx\r\n\t\tif self.rect.right> SCREENWIDTH:\r\n\t\t\tself.rect.right = SCREENWIDTH\r\n\t\tif self.rect.left < 0:\r\n\t\t\tself.rect.right = 0\r\n\t\tall_sprites.update()\r\n\t\tall_sprites.draw(100,900)\r\n\r\n\r\n\r\n\r\nall_sprites = pygame.sprite.Group()##############\r\nplayerchar = Player()\r\nall_sprites.add(player)\r\n\r\ndef main():\r\n global CLOCK, SURFACE\r\n pygame.init()\r\n CLOCK = pygame.time.Clock()\r\n SURFACE = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))\r\n pygame.display.set_caption('Edutainment')\r\n startScreen()\r\n while True:\r\n instructionsScreen()\r\n gameRun()\r\n endScreen()\r\n\r\n\r\n\r\ndef gameRun():\r\n global char_posx\r\n global char_posy\r\n global missile_stock\r\n global foods\r\n global enemies\r\n global missiles\r\n global lives\r\n global score\r\n global last_shoot\r\n score = 0 #start score at zero\r\n lives = 3 #start lives at 3\r\n missile_stock =10 #start missile stock at 10\r\n move_command = 0 #initial move command\r\n char_posx = SCREENWIDTH/2 #starting character x pos\r\n char_posy = SCREENHEIGHT-30 #starting character y pos\r\n foods = []\r\n enemies = []\r\n missiles = []\r\n\r\n while True: #main game loop...get out of it by calling \"break\"\r\n move_command = 0\r\n shoot_command = False\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n terminate()\r\n elif event.type == KEYDOWN:\r\n if event.key == K_SPACE:\r\n shoot_command = True\r\n if event.key == K_ESCAPE:\r\n terminate()\r\n # pressed = pygame.key.get_pressed()\r\n # if pressed[pygame.K_LEFT] or pressed[pygame.K_a]:\r\n # move_command-=1\r\n # if pressed[pygame.K_RIGHT] or pressed[pygame.K_d]:\r\n # move_command+=1\r\n if shoot_command:\r\n if missile_stock>0:\r\n missiles.append([char_posx + 0.5*CHARWIDTH-0.5*MISSILEWIDTH,char_posy])\r\n missile_stock-=1\r\n char_posx +=move_command*3\r\n char_posx = min(max(char_posx,20),SCREENWIDTH-20)\r\n updateFoods()\r\n updateEnemiesAndMissiles()\r\n if lives<=0:\r\n break\r\n DISPLAYSURF.fill(LIGHTBLUE)\r\n drawCharacter([char_posx,char_posy])\r\n for food in foods:\r\n drawFood(food)\r\n for enemy in enemies:\r\n drawEnemy(enemy)\r\n for missile in missiles:\r\n drawMissile(missile)\r\n drawScore()\r\n pygame.display.update()\r\n fpsClock.tick(FPS)\r\n\r\n \r\n\r\n#start screen graphics\r\ndef startScreen():\r\n DISPLAYSURF.fill(LIGHTBLUE)\r\n TitleSurf = TITLEFONT.render('Video Game', True, WHITE)\r\n SubTitleSurf = TITLEFONT.render('Fun Times' , True, WHITE)\r\n InstructionSurf = TITLEFONT.render('PRESS ANY KEY TO START', True, WHITE)\r\n TitleRect = TitleSurf.get_rect()\r\n SubTitleRect = SubTitleSurf.get_rect()\r\n InstructionRect = InstructionSurf.get_rect()\r\n TitleRect.topleft = (SCREENWIDTH/2-80, 40)\r\n SubTitleRect.topleft = (SCREENWIDTH/2-30 , 80)\r\n InstructionRect.topleft = (SCREENWIDTH/2-80 , 120)\r\n DISPLAYSURF.blit(TitleSurf, TitleRect)\r\n DISPLAYSURF.blit(SubTitleSurf, SubTitleRect)\r\n DISPLAYSURF.blit(InstructionSurf, InstructionRect)\r\n pygame.display.update()\r\n while True:\r\n if checkForKeyPress():\r\n pygame.event.get() # clear event queue\r\n return\r\n\r\n#instruction screen graphics:\r\ndef instructionsScreen():\r\n DISPLAYSURF.fill(LIGHTBLUE)\r\n TitleSurf = TITLEFONT.render('Video Game', True, WHITE)\r\n TitleRect = TitleSurf.get_rect()\r\n TitleRect.topleft = (SCREENWIDTH/2-80, 40)\r\n DISPLAYSURF.blit(TitleSurf, TitleRect)\r\n spot = 80\r\n b = instructions.split('\\n')\r\n for q in b:\r\n SubTitleSurf = INSFONT.render(q , True, WHITE)\r\n SubTitleRect = SubTitleSurf.get_rect()\r\n SubTitleRect.topleft = (SCREENWIDTH/2-80 , spot)\r\n spot+=20\r\n DISPLAYSURF.blit(SubTitleSurf, SubTitleRect)\r\n InstructionSurf = TITLEFONT.render('PRESS ANY KEY TO START', True, WHITE)\r\n InstructionRect = InstructionSurf.get_rect()\r\n InstructionRect.topleft = (SCREENWIDTH/2-80 , spot)\r\n DISPLAYSURF.blit(InstructionSurf, InstructionRect)\r\n pygame.display.update()\r\n while True:\r\n if checkForKeyPress():\r\n pygame.event.get() # clear event queue\r\n return\r\n\r\n#end Screen Graphics\r\ndef endScreen():\r\n DISPLAYSURF.fill(BLACK)\r\n TitleSurf = TITLEFONT.render('GAME OVER :(', True, WHITE)\r\n SubTitleSurf = TITLEFONT.render('SCORE: %s' %(score), True, WHITE)\r\n TitleRect = TitleSurf.get_rect()\r\n SubTitleRect = SubTitleSurf.get_rect()\r\n TitleRect.topleft = (SCREENWIDTH/2-30, 40)\r\n SubTitleRect.topleft = (SCREENWIDTH/2-30 , 80)\r\n DISPLAYSURF.blit(TitleSurf, TitleRect)\r\n DISPLAYSURF.blit(SubTitleSurf, SubTitleRect)\r\n pygame.display.update()\r\n time.sleep(3)\r\n pygame.event.get() #empty queue\r\n DISPLAYSURF.fill(BLACK)\r\n TitleSurf = TITLEFONT.render('GAME OVER :(', True, WHITE)\r\n SubTitleSurf = TITLEFONT.render('SCORE: %s' %(score), True, WHITE)\r\n InstructionSurf = TITLEFONT.render('PRESS ANY KEY TO RESTART', True, WHITE)\r\n TitleRect = TitleSurf.get_rect()\r\n SubTitleRect = SubTitleSurf.get_rect()\r\n InstructionRect = InstructionSurf.get_rect()\r\n TitleRect.topleft = (SCREENWIDTH/2-30, 40)\r\n SubTitleRect.topleft = (SCREENWIDTH/2-30 , 80)\r\n InstructionRect.topleft = (SCREENWIDTH/2-80 , 120)\r\n DISPLAYSURF.blit(TitleSurf, TitleRect)\r\n DISPLAYSURF.blit(SubTitleSurf, SubTitleRect)\r\n DISPLAYSURF.blit(InstructionSurf, InstructionRect)\r\n pygame.display.update()\r\n while True:\r\n if checkForKeyPress():\r\n pygame.event.get() # clear event queue\r\n return\r\n\r\ndef terminate():\r\n pygame.quit()\r\n sys.exit()\r\n\r\ndef randXSpot():\r\n return random.randint(0, SCREENWIDTH - 1)\r\n\r\ndef rand(prob):\r\n return random.random() coord2[0]\r\n cond3 = coord1[1] coord2[1]\r\n if cond1 and cond2 and cond3 and cond4:\r\n return True\r\n else:\r\n return False\r\n\r\ndef updateFoods():\r\n global foods\r\n global score\r\n new_foods=[]\r\n global missile_stock\r\n for food in foods:\r\n candidate = [food[0],food[1]+FALL_RATE]\r\n if collision(candidate,FOODSIZE,FOODSIZE,[char_posx,char_posy],CHARWIDTH,CHARHEIGHT):\r\n missile_stock+=1\r\n score +=1\r\n elif candidate[1] 0:\r\n return enemyx + 1\r\n elif deltx < 0:\r\n return enemyx - 1\r\n else:\r\n return enemyx \r\n elif type == 3:\r\n deltx = (charx - enemyx)\r\n if deltx - 10 > 0:\r\n return enemyx + 1 \r\n elif deltx-10 < 0:\r\n return enemyx - 1 \r\n else:\r\n return enemyx\r\n else:\r\n deltx = (charx - enemyx)\r\n if deltx + 10> 0:\r\n return enemyx + 1\r\n elif deltx + 10< 0:\r\n return enemyx - 1\r\n else:\r\n return enemyx\r\n\r\ndef enemy_release(score):\r\n if score<120:\r\n return (1/120)*score\r\n else:\r\n return 1\r\n \r\n\r\ndef updateEnemiesAndMissiles():\r\n global enemies\r\n global missiles\r\n global missile_stock\r\n global lives\r\n global score\r\n global just_hit\r\n new_enemies=[]\r\n new_missiles=[]\r\n for missile in missiles:\r\n candidate = [missile[0],missile[1]-MISSILERATE]\r\n if candidate[1] >0:\r\n new_missiles.append(candidate)\r\n for enemy in enemies:\r\n newx = AI(char_posx,enemy[0],enemy[2])\r\n candidate = [newx,enemy[1]+FALL_RATE,enemy[2]]\r\n hit = False\r\n for m in range(len(new_missiles)):\r\n if collision(candidate[:2],ENEMYSIZE,ENEMYSIZE,new_missiles[m],MISSILEWIDTH,MISSILEHEIGHT):\r\n new_missiles.pop(m)\r\n hit=True\r\n score +=1\r\n print(\"HITHIT\")\r\n break #enemy hit and blown up!\r\n if hit:\r\n pass\r\n elif collision(candidate[:2],ENEMYSIZE,ENEMYSIZE,[char_posx,char_posy],CHARWIDTH,CHARHEIGHT):\r\n lives-=1\r\n just_hit = time.time()\r\n elif candidate[1]HIT_TIMEOUT:\r\n # pygame.draw.rect(DISPLAYSURF,BLUE,charRect)\r\n #else:\r\n # pygame.draw.rect(DISPLAYSURF,RED,charRect)\r\n\r\ndef drawFood(coordinates):\r\n appleRect = pygame.Rect(coordinates[0], coordinates[1], FOODSIZE, FOODSIZE)\r\n pygame.draw.rect(DISPLAYSURF, RED, appleRect)\r\n \r\ndef drawEnemy(coordinates):\r\n enemyRect = pygame.Rect(coordinates[0],coordinates[1], ENEMYSIZE, ENEMYSIZE)\r\n if coordinates[2]==0:\r\n pygame.draw.rect(DISPLAYSURF,GREEN,enemyRect)\r\n elif coordinates[2]==1:\r\n pygame.draw.rect(DISPLAYSURF,YELLOW,enemyRect)\r\n elif coordinates[2]==2:\r\n pygame.draw.rect(DISPLAYSURF,PURPLE,enemyRect)\r\n elif coordinates[2]==3:\r\n pygame.draw.rect(DISPLAYSURF,PINK,enemyRect)\r\n elif coordinates[2]==4:\r\n pygame.draw.rect(DISPLAYSURF,ORANGE,enemyRect)\r\n\r\ndef drawMissile(coordinates):\r\n missile = pygame.Rect(coordinates[0],coordinates[1], MISSILEWIDTH,MISSILEHEIGHT)\r\n pygame.draw.rect(DISPLAYSURF, GREY,missile)\r\n\r\ndef checkForKeyPress():\r\n if len(pygame.event.get(QUIT)) > 0:\r\n terminate()\r\n keyUpEvents = pygame.event.get(KEYUP)\r\n if len(keyUpEvents) == 0:\r\n return None\r\n if keyUpEvents[0].key == K_ESCAPE:\r\n terminate()\r\n return keyUpEvents[0].key\r\n\r\ndef drawScore():\r\n scoreSurf = BASICFONT.render('Score: %s' % (score), True, WHITE)\r\n missilesSurf = BASICFONT.render('Missiles: %s' % (missile_stock), True, WHITE)\r\n livesSurf = BASICFONT.render('Lives: %s' % (lives), True, WHITE)\r\n scoreRect = scoreSurf.get_rect()\r\n missilesRect = missilesSurf.get_rect()\r\n livesRect = livesSurf.get_rect()\r\n scoreRect.topleft = (SCREENWIDTH - 120, 10)\r\n missilesRect.topleft = (SCREENWIDTH - 120, 25)\r\n livesRect.topleft = (SCREENWIDTH - 120, 40)\r\n DISPLAYSURF.blit(scoreSurf, scoreRect)\r\n DISPLAYSURF.blit(livesSurf, livesRect)\r\n DISPLAYSURF.blit(missilesSurf, missilesRect)\r\n\r\n#code below runs the main function when you just call python3 lab08.py in terminal\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"finalcode.py","file_name":"finalcode.py","file_ext":"py","file_size_in_byte":13850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"649549815","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\np_16.py : transitively reload nested modules\n\"\"\"\nimport types\n# from imp import reload\nfrom importlib import reload\n\n\ndef status(mod):\n print('reloading ' + mod.__name__)\n\n\ndef transitive_reload(mod, visited):\n if mod not in visited:\n status(mod)\n reload(mod)\n visited[mod] = None\n for attr_obj in mod.__dict__.values():\n print(\"type(attr_obj)=>\", type(attr_obj), \"types.ModuleType=>\", types.ModuleType)\n if type(attr_obj) == types.ModuleType:\n transitive_reload(attr_obj, visited)\n\n\ndef reload_all(*args):\n visited = {}\n for arg in args:\n if type(arg) == types.ModuleType:\n transitive_reload(arg, visited)\n\n\nif __name__ == '__main__':\n import p_16\n\n reload_all(p_16)\n","sub_path":"grammar/p_16.py","file_name":"p_16.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"334495917","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom transformers import BertModel, BertTokenizerFast, AutoTokenizer, AutoModel\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom sklearn.metrics import f1_score\n\nfrom tqdm import tqdm\n\nfrom helpers import *\nfrom learn import *\n\nimport wandb\n\nwandb.init(project='SAILGoemotions', entity='justinolah')\nconfig = wandb.config\n\nseed_value=42\nnp.random.seed(seed_value)\ntorch.manual_seed(seed_value)\nrandom.seed(seed_value) \ntorch.cuda.manual_seed(seed_value)\ntorch.cuda.manual_seed_all(seed_value)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nclass BERT_Model(nn.Module):\n\tdef __init__(self, bert, numEmotions):\n\t\tsuper(BERT_Model, self).__init__()\n\t\tself.bert = bert \n\t\tself.fc = nn.Linear(768, numEmotions)\n\n\tdef forward(self, sent_id, mask):\n\t\t_, cls_hs = self.bert(sent_id, attention_mask=mask, return_dict=False)\n\t\tout = self.fc(cls_hs)\n\t\treturn out\n\nclass SBERT_Model(nn.Module):\n\tdef __init__(self, sbert, numEmotions):\n\t\tsuper(SBERT_Model, self).__init__()\n\t\tself.sbert = sbert\n\t\tself.fc = nn.Linear(384, numEmotions)\n\n\tdef forward(self, sent_id, mask):\n\t\tatt = self.sbert(sent_id, attention_mask=mask)\n\t\tembed = self.mean_pooling(att, mask)\n\t\tout = self.fc(embed)\n\t\treturn out\n\n\tdef mean_pooling(self, model_output, attention_mask):\n\t\ttoken_embeddings = model_output[0] #First element of model_output contains all token embeddings\n\t\tinput_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n\t\tsum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)\n\t\tsum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\t\treturn sum_embeddings / sum_mask\n\n\ndef makeBERTDataset(data, tokenizer, max_length, emotions, new_emotions=None, idx_map=None):\n\tif idx_map is None:\n\t\tdata.labels = data.labels.apply(lambda x: getYMatrix(x,len(emotions)))\n\telse:\n\t\tdata.labels = data.labels.apply(lambda x: getYMatrixWithMap(x, len(new_emotions), idx_map))\n\n\ttokens = tokenizer.batch_encode_plus(\n\t\tdata.text.tolist(),\n\t\tmax_length = max_length,\n\t\tpadding='max_length',\n\t\ttruncation=True\n\t)\n\n\tseq = torch.tensor(tokens['input_ids'])\n\tmask = torch.tensor(tokens['attention_mask'])\n\ty = torch.tensor(data.labels.tolist())\n\n\tdataset = TensorDataset(seq, mask, y)\n\treturn dataset\n\ndef trainNN(model, trainloader, devloader, optimizer, loss_fn, threshold, device):\n\tsigmoid = nn.Sigmoid()\n\n\tmodel.train()\n\n\ttrain_running_loss = 0.0\n\ttraintargets = []\n\ttrainpredictions = []\n\n\tloop = tqdm(enumerate(trainloader), total=len(trainloader), leave=False)\n\tfor i, batch in loop:\n\t\tseq, mask, labels = batch\n\t\ttraintargets.append(labels.detach())\n\n\t\toptimizer.zero_grad()\n\t\toutput = model(seq.to(device), mask.to(device))\n\n\t\tloss = loss_fn(output, labels.float().to(device))\n\t\tloss.backward()\n\t\toptimizer.step()\n\n\t\toutput = sigmoid(output)\n\t\ttrainpredictions.append((output.cpu() > threshold).int().detach())\n\n\t\ttrain_running_loss += loss.item()\n\n\t\tloop.set_postfix(loss = loss.item())\n\n\ttrainLoss = train_running_loss / len(trainloader)\n\n\ttrainpredictions = np.concatenate(trainpredictions)\n\ttraintargets = np.concatenate(traintargets)\n\tf1_train = f1_score(traintargets, trainpredictions, average='macro')\n\n\t#dev evaluation\n\tmodel.eval()\n\tdev_running_loss = 0.0\n\tdevtargets = []\n\tdevpredictions = []\n\tfor batch in devloader:\n\t\tseq, mask, labels = batch\n\t\tdevOutput = model(seq.to(device), mask.to(device))\n\n\t\tdev_running_loss += loss_fn(devOutput, labels.float().to(device)).item()\n\n\t\tdevOutput = sigmoid(devOutput)\n\t\tdevpredictions.append((devOutput.cpu() > threshold).int().detach())\n\t\tdevtargets.append(labels)\n\n\tdevLoss = dev_running_loss / len(devloader)\n\n\tdevpredictions = np.concatenate(devpredictions)\n\tdevtargets = np.concatenate(devtargets)\n\tf1_dev = f1_score(devtargets, devpredictions, average='macro')\n\n\treturn trainLoss, devLoss, f1_train, f1_dev\n\ndef main():\n\tepochs = 5\n\tbatch_size = 16\n\tmax_length = 128\n\tweight_decay = 0\n\tthreshold = 0.5\n\tlr = 5e-5\n\tfilename = \"bert\"\n\tframework = \"bert\"\n\tgrouping = None\n\n\tfreeze_bert = False\n\n\tconfig.epochs = epochs\n\tconfig.batch_size = batch_size\n\tconfig.max_length = max_length\n\tconfig.weight_decay = weight_decay\n\tconfig.lr = lr\n\tconfig.freeze_bert = freeze_bert\n\tconfig.framework = framework\n\tconfig.grouping = grouping\n\n\tif torch.cuda.is_available():\n\t\tprint(\"Using cuda...\")\n\t\ttorch.cuda.set_device(2)\n\t\tdevice = torch.device(\"cuda:2\")\n\telse:\n\t\tprint(\"Using cpu...\")\n\t\tdevice = torch.device(\"cpu\")\n\n\t#make Datasets\n\temotions = getEmotions()\n\temotions.remove(\"neutral\")\n\n\tekmanDict = getEkmanDict()\n\tsentDict = getSentimentDict()\n\tekEmotions = ekmanDict.keys()\n\tsentEmotions = sentDict.keys()\n\tek_idx_map = getEmotionIndexMap(emotions, ekmanDict)\n\tsent_idx_map = getEmotionIndexMap(emotions, sentDict)\n\n\ttrain = getTrainSet()\n\ttest = getTestSet()\n\tdev = getValSet()\n\n\ttokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')\n\t#tokenizer = AutoTokenizer.from_pretrained(\"sentence-transformers/paraphrase-MiniLM-L6-v2\")\n\t#tokenizer = AutoTokenizer.from_pretrained(\"sentence-transformers/all-mpnet-base-v2\")\n\n\tif grouping == \"ekman\":\n\t\tnew_emotions = ekEmotions\n\t\tidx_map = ek_idx_map\n\telif grouping == \"sentiment\":\n\t\tnew_emotions = sentEmotions\n\t\tidx_map = sent_idx_map\n\telse:\n\t\tnew_emotions = emotions\n\t\tidx_map = None\n\n\ttrain_set = makeBERTDataset(train, tokenizer, max_length, emotions, new_emotions=new_emotions, idx_map=idx_map)\n\ttest_set = makeBERTDataset(test, tokenizer, max_length, emotions, new_emotions=new_emotions, idx_map=idx_map)\n\tdev_set = makeBERTDataset(dev, tokenizer, max_length, emotions, new_emotions=new_emotions, idx_map=idx_map)\n\n\ttrainloader = DataLoader(train_set, batch_size=batch_size)\n\ttestloader = DataLoader(test_set, batch_size=batch_size)\n\tdevLoader = DataLoader(dev_set, batch_size=batch_size)\n\n\tif grouping is None:\n\t\t#pos_weight = torch.ceil(torch.div((5000 - torch.sum(torch.tensor(train.labels),0)), torch.sum(torch.tensor(train.labels),0))).to(device)\n\t\tpos_weight = torch.sqrt(torch.div((len(train.labels) - torch.sum(torch.tensor(train.labels),0)), torch.sum(torch.tensor(train.labels),0))).to(device)\n\t\t#pos_weight = 8 * torch.ones(len(emotions)).to(device)\n\telse:\n\t\tpos_weight = None\n\t\n\tif grouping == \"ekman\":\n\t\temotions = ekEmotions\n\telif grouping == \"sentiment\":\n\t\temotions = sentEmotions\n\n\t#initialize model\n\tbert = BertModel.from_pretrained('bert-base-uncased')\n\t#sbert = AutoModel.from_pretrained(\"sentence-transformers/paraphrase-MiniLM-L6-v2\")\n\t#sbert = AutoModel.from_pretrained(\"sentence-transformers/all-mpnet-base-v2\")\n\tif freeze_bert:\n\t\tfor param in bert.parameters():\n\t\t\tparam.requires_grad = False\n\tmodel = BERT_Model(bert, len(emotions))\n\t#model = SBERT_Model(sbert, len(emotions))\n\tmodel = model.to(device)\n\twandb.watch(model)\n\n\tloss_fn= nn.BCEWithLogitsLoss(pos_weight=pos_weight)\n\n\toptimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n\n\t#train model\n\tmodel.train()\n\ttrainLoss = []\n\tdevLoss = []\n\ttrainF1 = []\n\tdevF1 = []\n\n\tfor epoch in range(epochs):\n\t\tprint(\"Epoch:\", epoch)\n\t\tepoch_loss, dev_loss, f1_train, f1_dev = trainNN(model, trainloader, devLoader, optimizer, loss_fn, threshold, device)\n\t\ttrainLoss.append(epoch_loss)\n\t\tdevLoss.append(dev_loss)\n\t\ttrainF1.append(f1_train)\n\t\tdevF1.append(f1_dev)\n\t\tprint(\"Training Loss:\", epoch_loss)\n\t\tprint(\"Dev Loss:\", dev_loss)\n\t\tprint(\"Training Macro F1:\", f1_train)\n\t\tprint(\"Dev Macro F1:\", f1_dev, \"\\n\")\n\n\t\twandb.log({\"train_loss\": epoch_loss,\n\t\t\t\"dev_loss\": dev_loss,\n\t\t\t\"train_f1\": f1_train,\n\t\t\t\"dev_f1\": f1_dev\n\t\t})\n\n\t\tif epoch == 0 or np.argmax(devF1) == epoch:\n\t\t\tprint(\"saving checkpoint...\")\n\t\t\ttorch.save({\n\t\t\t\t'epoch': epoch,\n\t\t\t\t'model_state_dict': model.state_dict(),\n\t\t\t\t'devF1' : devF1[-1],\n\t\t\t\t}, f\"{filename}.pt\")\n\n\tprint(\"Training complete\\n\")\n\n\t#learning curve \n\tfig, (ax1, ax2) = plt.subplots(2, figsize=(11, 9))\n\tax1.plot(trainLoss, color='b', label='Training loss')\n\tax1.plot(devLoss, color='r', label='Dev loss')\n\tfig.suptitle(f\"Bert Arch. LR:{lr}, BS:{batch_size}\")\n\tax1.set(xlabel='Epochs', ylabel=\"Loss\")\n\tax1.legend()\n\tax2.plot(trainF1, color='b', label='Training Macro F1')\n\tax2.plot(devF1, color='r', label='Dev Macro F1')\n\tax2.set(xlabel='Epochs', ylabel=\"Macro F1\")\n\tax2.legend()\n\tfig.savefig(\"plots/learningcurve_\" + filename + \".png\")\n\n\t#Testing metrics\n\tbestCheckpoint = torch.load(f\"{filename}.pt\", map_location=device)\n\tmodel.load_state_dict(bestCheckpoint['model_state_dict'])\n\tbestEpochDevF1 = bestCheckpoint['epoch']\n\tbestDevF1 = bestCheckpoint['devF1']\n\tprint(\"Best Dev F1:\", bestDevF1, \"at epoch\", bestEpochDevF1, \"\\n\")\n\n\tmodel.eval()\n\tsigmoid = nn.Sigmoid()\n\ttargets = []\n\toutputs = []\n\tpredictions = []\n\tfor batch in testloader:\n\t\tseq, mask, labels = batch\n\t\toutput = model(seq.to(device), mask.to(device))\n\n\t\toutput = sigmoid(output)\n\t\toutput = output.cpu()\n\t\toutputs.append(output.detach())\n\t\tpredictions.append((output > threshold).int().detach())\n\t\ttargets.append(labels)\n\n\tpredictions = np.concatenate(predictions)\n\ttargets = np.concatenate(targets)\n\toutputs = np.concatenate(outputs)\n\n\taccuracy = accuracy_score(targets, predictions)\n\n\tprint(\"Subset Accuracy:\", accuracy)\n\tprint(classification_report(targets, predictions, target_names=emotions, zero_division=0, output_dict=False))\n\tprint(\"Best Dev F1:\", bestDevF1, \"at epoch\", bestEpochDevF1, \"\\n\")\n\treport = classification_report(targets, predictions, target_names=emotions, zero_division=0, output_dict=True)\n\n\ttable = wandb.Table(dataframe=pd.DataFrame.from_dict(report))\n\twandb.log({\"report\": table})\n\n\t#export resuls to csv\n\tmicro = list(report['micro avg'].values())\n\tmicro.pop() \n\tmacro = list(report['macro avg'].values())\n\tmacro.pop()\n\tscores = [accuracy, *micro, *macro]\n\tresults = pd.DataFrame(data=[scores], columns=['accuracy', 'micro_precision', 'micro_recall', 'micro_f1', 'macro_precision', 'macro_recall', 'macro_f1'])\n\tresults.to_csv(\"tables/\" + filename + \"_results.csv\")\n\n\t#confusion matrix\n\tmultilabel_confusion_matrix(np.array(targets), np.array(outputs), emotions, top_x=3, filename=filename)\n\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"train_bert.py","file_name":"train_bert.py","file_ext":"py","file_size_in_byte":10095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"54999220","text":"import datetime\nimport urllib.request\nfrom collections import defaultdict\n\nimport requests\nfrom requests.utils import quote\n\n#sys.path.insert(0,'/mvp-module-library/Biclusters/bicluster_RNAseqDB_wrapper')\n\n\nFA_genes_url = \"https://raw.githubusercontent.com/NCATS-Tangerine/cq-notebooks/master/FA_gene_sets/FA_1_core_complex.txt\"\nFA_genes_all_url = \"https://raw.githubusercontent.com/NCATS-Tangerine/cq-notebooks/master/FA_gene_sets/FA_4_all_genes.txt\"\nFA_geneset = []\n\nwith urllib.request.urlopen(FA_genes_all_url) as url:\n FA_geneset.append(url.read().decode().split('\\n'))\n\nprint(datetime.datetime.now().time())\n\nprint(FA_geneset[0])\nprint(len(FA_geneset[0]))\n\ntissues = []\ncounts = dict()\nmultidict_tissue = defaultdict(lambda: defaultdict(set))\nfor i in range(1, len(FA_geneset[0])):\n split = FA_geneset[0][i].split('\\t')\n if len(split) <= 1:\n print(split)\n continue\n curid = split[0].lower()\n curlabel = split[1].lower()\n\n print(\"curid\", curid)\n if(len(curid) > 1):\n api_url = \"https://bicluster.renci.org/RNAseqDB_bicluster_gene_to_tissue_gene/\"+quote(curid)+\"/?include_similar=true\"\n\n print(api_url)\n responses = requests.get(api_url).json()\n #import pudb; pu.db\n\n for d in responses:\n key = d['col_enrich_all']\n for n in ['MONDO', 'UBERON', 'DOID', 'NCIT']:\n label = d[f'col_enrich_{n}_label']\n if label != '':\n multidict_tissue[key]['tissue_labels'].add(label)\n for n in ['MONDO', 'UBERON', 'DOID', 'NCIT']:\n label = d[f'col_enrich_{n}']\n if label != '':\n multidict_tissue[key]['tissue_ids'].add(label)\n multidict_tissue[key]['gene_ids'].add(curid)\n multidict_tissue[key]['gene_labels'].add(curlabel)\n\n\n\n\n #print(response)\n if(len(responses) > 1):\n for a in range(1, len(responses)):\n #import pudb; pu.db\n #col_enrich_all\n\n key = responses[a]['col_enrich_all']\n d = responses[0]\n\n if len(key) > 0:\n if len(responses[a]['col_enrich_MONDO_label']) > 0:\n multidict_tissue[key]['tissue_labels'].add(d['col_enrich_MONDO_label'])\n if len(responses[a]['col_enrich_UBERON_label']) > 0:\n multidict_tissue[key]['tissue_labels'].add(d['col_enrich_UBERON_label'])\n if len(responses[a]['col_enrich_DOID_label']) > 0:\n multidict_tissue[key]['tissue_labels'].add(d['col_enrich_DOID_label'])\n if len(responses[a]['col_enrich_NCIT_label']) > 0:\n multidict_tissue[key]['tissue_labels'].add(d['col_enrich_NCIT_label'])\n\n if len(responses[a]['col_enrich_MONDO']) > 0:\n multidict_tissue[key]['tissue_ids'].add(d['col_enrich_MONDO'])\n if len(responses[a]['col_enrich_UBERON']) > 0:\n multidict_tissue[key]['tissue_ids'].add(d['col_enrich_UBERON'])\n if len(responses[a]['col_enrich_DOID']) > 0:\n multidict_tissue[key]['tissue_ids'].add(d['col_enrich_DOID'])\n if len(responses[a]['col_enrich_NCIT']) > 0:\n multidict_tissue[key]['tissue_ids'].add(d['col_enrich_NCIT'])\n\n multidict_tissue[key]['gene_ids'].add(curid)\n multidict_tissue[key]['gene_labels'].add(curlabel)\n\n\n #print(response[0]['col_enrich_MONDO_label'])\n #print(response[0]['col_enrich_UBERON_label'])\n #print(response[0]['col_enrich_DOID_label'])\n #print(response[0]['col_enrich_NCIT_label'])\n\n\n\n #print(response.text[0])\n #for i in range(1, len(response.text)):\n # print(response.text[i])\n #with urllib.request.urlopen(api_url) as url:\n # data = json.loads(url.read().decode())\n # print(data)\n # tissues.append(data)\n\n\n#print(sort(counts))\n#sortcounts = sorted(counts.items(), key=lambda x: x[1])\n\n#print(json.dumps(sortcounts, indent = 4))\n#print(len(sortcounts))\n\nfor x in multidict_tissue:\n print (x, ':', multidict_tissue[x]['tissue_ids'], multidict_tissue[x]['tissue_labels'],multidict_tissue[x]['gene_ids'], multidict_tissue[x]['gene_labels'])\n\n\nprint(\"id and label order may not correspond\")\n#print(json.dumps(list(tissue_to_gene), indent = 4))\n\n#for x in sortcounts:\n #print (x)\n# for y in sortcounts[x]:\n# print (y,':',sortcounts[x][y])\n\n\n#for key, value in sorted(tissue_to_gene.iteritems(), key=lambda (k,v): (v,k)):\n# print \"%s: %s\" % (key, value)","sub_path":"scripts/MVP2_FA_biclusters.py","file_name":"MVP2_FA_biclusters.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"536612303","text":"from spi_rack import *\nfrom chip_mode import *\nimport math\n\nclass S5i_module(object):\n \"\"\"S5i module interface class\n\n This class does the low level interfacing with the S5i RF generator module.\n It requires an SPI Rack object and module number at initialization. A start\n up frequency can be given, otherwise it defaults to 100 MHz.\n\n The RF frequency can be changed via setRfFrequency, which calculates the\n register values and updates the frequency of the ADF4351.\n\n Attributes:\n rfFrequency: the current set RF output frequency\n \"\"\"\n\n def __init__(self, spi_rack, module, frequency=100):\n \"\"\"Inits S5i module class\n\n The S5i module needs an SPI_rack class for communication. If no frequency\n is given at initialization, the output will be set to 100 MHz.\n\n Args:\n spi_rack: SPI_rack class object via which the communication runs\n module: module number set on the hardware\n frequency: RF frequency at startup, default 100 MHz\n Example:\n S5i_1 = S5i_module(SPI_Rack_1, 4)\n \"\"\"\n self.spi_rack = spi_rack\n self.module = module\n\n self.rf_frequency = frequency\n self.outputPower = None\n\n # These are the 6 registers present in the ADF4351\n self.registers = 6*[0]\n # In REG3: set ABP=1 (3 ns, INT-N) and CHARGE CANCEL=1\n self.registers[3] = (1<<22) | (1<<21) | 3\n # In REG5: set LD PIN MODE to 1 -> digital lock detect\n self.registers[5] = (1<<22) | 5\n\n self.set_frequency(frequency)\n\n def write_registers(self):\n \"\"\"Writes data via the SPI Rack class to the SPI Rack\n\n Writes the current register settings to the ADF4351 in reversed order\n of storage: REG5 to REG0. This is required according to datasheet. The\n output is only updated when REG0 is written to\n \"\"\"\n for reg in reversed(self.registers):\n b1 = (reg>>24)&0xFF\n b2 = (reg>>16)&0xFF\n b3 = (reg>>8)&0xFF\n b4 = reg&0xFF\n data = bytearray([b1, b2, b3, b4])\n # Write to ADF at SPI address 0\n self.spi_rack.write_data(self.module, 0, ADF4351_MODE, data)\n\n def set_frequency(self, frequency):\n \"\"\"Calculates and sets the RF output to given frequency\n\n Calculates the registers for the given RF frequency, optimized for the\n smalles value for the multiplier to minimize the (phase) noise. Writes\n the settings to the module after calculation\n\n Args:\n frequency: the wanted output frequency in MHz\n \"\"\"\n ###\n #TODO: add doubler/divided in algorithm to potentially lower noise\n # add checks to see if possible to set frequency\n # add user feedback in case of problems\n ###\n\n #Get the backplane reference frequency\n fref = float(self.spi_rack.ref_frequency)\n #Calculate VCO output divider:\n div = 0\n for n in range(0,7):\n VCO = 2**n * frequency\n if VCO >= 2200 and VCO <= 4400:\n div = n\n break\n #Prescaler: 0 (4/5) if < 3.6 GHz, 1 (8/9) if >= 3.6 GHz\n #Nmin changes with prescaler:\n if frequency >= 3600.0:\n prescaler = 1\n Nmin = 75\n else:\n prescaler = 0\n Nmin = 23\n\n #Find INT/R relation with minimum size for INT to keep noise as low\n #as possible\n INT = 0\n R = 0\n for n in range(Nmin, 65536):\n R_t = (n*fref)/(frequency)\n if R_t.is_integer():\n INT = n\n R = int(R_t)\n break\n\n #Check that band select is smaller than 125 kHz, otherwise divide\n #until it is\n fpfd = fref/R\n band_sel = 1\n if fpfd > 0.125:\n band_sel = int(math.ceil(fpfd/0.125))\n\n # In REG4: Set calculated divider and band select, enable RF out at max power\n self.registers[4] = (div<<20) | (band_sel<<12) | (1<<5) | (3<<3) | 4\n # In REG2: Set calculated R value, enable double buffer, LDF=INT-N, LDP=6ns, PD_POL = Positive\n self.registers[2] = (R<<14) | (1<<13) | (7<<9) | (1<<8) | (1<<7) | (1<<6) | 2\n # In REG1: Set prescaler value\n self.registers[1] = (prescaler <<27) | 1\n # In REG0: Set calculated INT value\n self.registers[0] = (INT<<15)\n\n self.write_registers()\n","sub_path":"S5i/S5i_module.py","file_name":"S5i_module.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"133070012","text":"'''\nA number of convenience functions used by the different algorithms.\nAlso includes some constants.\n\nThere are 5 functions that call d and therefore require the an explicit metric:\n- cost_fn\n- cost_fn_difference\n- cost_fn_difference_FP1\n- get_best_distances\n- estimate_sigma\n- medoid_swap\n'''\n\nimport os\nimport sys\nimport numpy as np\nimport mnist\nimport matplotlib.pyplot as plt\nimport argparse\nimport pandas as pd\nimport pickle\n\nfrom zss import simple_distance, Node\nfrom sklearn.manifold import TSNE\nfrom scipy.spatial.distance import cosine\nfrom sklearn.metrics import pairwise_distances\n\nDECIMAL_DIGITS = 5\nSIGMA_DIVISOR = 1\n\ndef get_args(arguments):\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('-v', '--verbose', help = 'print debugging output', action = 'count', default = 0)\n parser.add_argument('-k', '--num_medoids', help = 'Number of medoids', type = int, default = 3)\n parser.add_argument('-N', '--sample_size', help = 'Sampling size of dataset', type = int, default = 700)\n parser.add_argument('-s', '--seed', help = 'Random seed', type = int, default = 42)\n parser.add_argument('-d', '--dataset', help = 'Dataset to use', type = str, default = 'MNIST')\n parser.add_argument('-c', '--cache_computed', help = 'Cache computed', default = None)\n parser.add_argument('-m', '--metric', help = 'Metric to use (L1 or L2)', type = str)\n parser.add_argument('-f', '--force', help = 'Recompute Experiments', action = 'store_true')\n parser.add_argument('-p', '--fast_pam1', help = 'Use FastPAM1 optimization', action = 'store_true')\n parser.add_argument('-r', '--fast_pam2', help = 'Use FastPAM2 optimization', action = 'store_true')\n parser.add_argument('-w', '--warm_start_medoids', help = 'Initial medoids to start with', type = str, default = '')\n parser.add_argument('-B', '--build_ao_swap', help = 'Build or Swap, B = just build, S = just swap, BS = both', type = str, default = 'BS')\n parser.add_argument('-e', '--exp_config', help = 'Experiment configuration file to use', required = False)\n args = parser.parse_args(arguments)\n return args\n\ndef load_data(args):\n '''\n Load the different datasets, as a numpy matrix if possible. In the case of\n HOC4 and HOC18, load the datasets as a list of trees.\n '''\n if args.dataset == 'MNIST':\n N = 70000\n m = 28\n sigma = 0.7\n train_images = mnist.train_images()\n train_labels = mnist.train_labels()\n\n test_images = mnist.test_images()\n test_labels = mnist.test_labels()\n\n total_images = np.append(train_images, test_images, axis = 0)\n total_labels = np.append(train_labels, test_labels, axis = 0)\n\n assert((total_images == np.vstack((train_images, test_images))).all())\n assert((total_labels == np.hstack((train_labels, test_labels))).all()) # NOTE: hstack since 1-D\n assert(total_images.shape == (N, m, m))\n assert(total_labels.shape == (N,))\n\n # Normalizing images\n return total_images.reshape(N, m * m) / 255, total_labels, sigma\n elif args.dataset == \"SCRNA\":\n file = 'scrna_data/np_data.npy'\n data_ = np.load(file)\n sigma = 25\n return data_, None, sigma\n elif args.dataset == \"SCRNAPCA\":\n file = 'person1/fresh_68k_pbmc_donor_a_filtered_gene_bc_matrices/analysis_csv/pca/projection.csv'\n df = pd.read_csv(file, sep=',', index_col = 0)\n np_arr = df.to_numpy()\n sigma = 0.01\n return np_arr, None, sigma\n elif args.dataset == 'HOC4':\n dir_ = 'hoc_data/hoc4/trees/'\n tree_files = [dir_ + tree for tree in os.listdir(dir_) if tree != \".DS_Store\"]\n trees = []\n for tree_f in sorted(tree_files):\n with open(tree_f, 'rb') as fin:\n tree = pickle.load(fin)\n trees.append(tree)\n\n if args.verbose >= 1:\n print(\"NUM TREES:\", len(trees))\n\n return trees, None, 0.0\n elif args.dataset == 'HOC18':\n dir_ = 'hoc_data/hoc18/trees/'\n tree_files = [dir_ + tree for tree in os.listdir(dir_) if tree != \".DS_Store\"]\n trees = []\n for tree_f in tree_files:\n with open(tree_f, 'rb') as fin:\n tree = pickle.load(fin)\n trees.append(tree)\n\n if args.verbose >= 1:\n print(\"NUM TREES:\", len(trees))\n\n return trees, None, 0.0\n elif args.dataset == 'GAUSSIAN':\n dataset = create_gaussians(args.sample_size, ratio = 0.6, seed = args.seed, visualize = False)\n return dataset\n else:\n raise Exception(\"Didn't specify a valid dataset\")\n\n\n\ndef visualize_medoids(dataset, medoids, visualization = 'tsne'):\n '''\n Helper function to visualize the given medoids of a dataset using t-SNE\n '''\n\n if visualization == 'tsne':\n X_embedded = TSNE(n_components=2).fit_transform(dataset)\n plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c='b')\n plt.scatter(X_embedded[medoids, 0], X_embedded[medoids, 1], c='r')\n plt.show()\n else:\n raise Exception('Bad Visualization Arg')\n\ndef create_gaussians(N, ratio = 0.6, seed = 42, visualize = True):\n '''\n Create some 2-D Gaussian toy data.\n '''\n\n np.random.seed(seed)\n cluster1_size = int(N * ratio)\n cluster2_size = N - cluster1_size\n\n cov1 = np.array([[1, 0], [0, 1]])\n cov2 = np.array([[1, 0], [0, 1]])\n\n mu1 = np.array([-10, -10])\n mu2 = np.array([10, 10])\n\n cluster1 = np.random.multivariate_normal(mu1, cov1, cluster1_size)\n cluster2 = np.random.multivariate_normal(mu2, cov2, cluster2_size)\n\n if visualize:\n plt.scatter(cluster1[:, 0], cluster1[:, 1], c='r')\n plt.scatter(cluster2[:, 0], cluster2[:, 1], c='b')\n plt.show()\n\n return np.vstack((cluster1, cluster2))\n\n\n\nif __name__ == \"__main__\":\n create_gaussians(1000, 0.5, 42)\n\n ####### Use the code below to visualize the some medoids with t-SNE\n # args = get_args(sys.argv[1:])\n # total_images, total_labels, sigma = load_data(args)\n # np.random.seed(args.seed)\n # imgs = total_images[np.random.choice(range(len(total_images)), size = args.sample_size, replace = False)]\n # visualize_medoids(imgs, [891, 392])\n","sub_path":"python_generate_plots/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"151633350","text":"import getopt\nimport os\nimport sys\nimport threading\nimport time\nfrom tkinter import Tk\n\nimport numpy as np\nimport serial\n\nfrom CANcom import VESC\nfrom TCPcom import TCPCOM, encode_data\nfrom dashboard import Dashboard\n\n# liste stockant les valeurs reçu (par le matlab et l'arduino volant)\nlastmsgTCP = [0, 0, 0, 0, 0, 0, 0]\nlastmsgUART = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\nstop = False\n\n\n# export DISPLAY=:0.0\n# python3 home/pi/tmp/pycharm_project_130/PV3eRasp.py -i 46.105.28.70 -p 11000\n\ndef start(IP, PORT):\n # configuration du module CAN\n os.system(\"sudo ip link set can0 up type can bitrate 500000\")\n os.system(\"sudo ifconfig can0 txqueuelen 65536\")\n\n c = TCPCOM(IP, PORT)\n vesc = VESC(0, 1030)\n\n # lancement du thread d'écoute du port CAN\n listen_can_thr = threading.Thread(target=vesc.listen_can_thread)\n listen_can_thr.start()\n # lancement du thread d'écoute du port UART\n listen_uart_thr = threading.Thread(target=listen_uart_thread)\n listen_uart_thr.start()\n\n # (pour tester)\n data = rand_data()\n\n #lancement de l'affichage\n window = Tk()\n dash = Dashboard(window, data)\n\n tic = time.time() * 1000\n\n\n time_between_update = 1000 # temps entre chaque envoi de données au serveur (ms)\n while not dash.stop:\n if c.connected:\n recevied = c.read()\n if recevied is not None:\n on_receive(recevied)\n\n # recuperer les données ici\n dash.data = get_data(vesc, c)\n\n # envoi des données toutes les secondes\n if time.time() * 1000 - tic > time_between_update:\n tic = time.time() * 1000\n if c.connected:\n c.send_data(encode_data(dash.data))\n else:\n c.connect()\n if dash.master is None:\n continue\n dash.master.update_idletasks()\n dash.master.update()\n\n c.disconnect()\n vesc.stop = True\n global stop\n stop = True\n listen_can_thr.join()\n print(\"thread can ended\")\n listen_uart_thr.join()\n print(\"thread uart ended\")\n\n\n# ecoute du port UART et décodage des valeurs (il faudra peut etre change '/dev/ttyUSB0' en fonction du port utilisé)\ndef listen_uart_thread():\n ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=1)\n ser.flush()\n while not stop:\n incom = bytearray()\n if ser.in_waiting > 0:\n start = ser.readline()\n boo = ser.read();\n boo2 = ser.read()\n speed = ser.read()\n speed2 = ser.read()\n dist = ser.read()\n dist2 = ser.read()\n pot = ser.read()\n pot2 = ser.read()\n mode = ser.read()\n mode2 = ser.read()\n lastmsgUART[2] = (boo[0] >> 0) & 0x01\n lastmsgUART[3] = (boo[0] >> 1) & 0x01\n lastmsgUART[4] = (boo[0] >> 2) & 0x01\n lastmsgUART[5] = (boo[0] >> 3) & 0x01\n lastmsgUART[6] = (boo[0] >> 4) & 0x01\n lastmsgUART[7] = (boo[0] >> 5) & 0x01\n lastmsgUART[8] = (boo[0] >> 6) & 0x01\n lastmsgUART[9] = (boo[0] >> 7) & 0x01\n lastmsgUART[10] = (boo2[0] >> 0) & 0x01\n lastmsgUART[11] = (boo2[0] >> 1) & 0x01\n lastmsgUART[12] = (boo2[0] >> 2) & 0x01\n lastmsgUART[13] = (boo2[0] >> 3) & 0x01\n lastmsgUART[14] = (boo2[0] >> 4) & 0x01\n lastmsgUART[15] = (boo2[0] >> 5) & 0x01\n lastmsgUART[0] = toInt16(speed[0] << 8 | speed2[0]) / 10.0\n lastmsgUART[1] = toInt16(dist[0] << 8 | dist2[0])\n lastmsgUART[16] = toInt16(pot[0] << 8 | pot2[0])\n lastmsgUART[17] = toInt16(mode[0] << 8 | mode2[0])\n\n\n# genere des données aléatoire (pour tester)\ndef rand_data():\n data = np.array([])\n for i in range(12):\n data = np.append(data, np.random.rand() * 1000)\n for i in range(16):\n data = np.append(data, np.random.randint(2))\n data = np.append(data, np.random.randint(1, 8))\n data = np.append(data, np.random.randint(0, 8))\n data[10] = data[10] / 5\n\n data = np.append(data, np.random.rand() * 1000)\n data = np.append(data, np.random.rand() * 1000)\n data = np.append(data, np.random.rand() * 1000)\n data = np.append(data, np.random.rand() * 100)\n data = np.append(data, np.random.rand() * 1000)\n data = np.append(data, np.random.randint(20))\n data = np.append(data, np.random.rand() * 100)\n data = np.append(data, np.random.rand() * 100)\n data = np.append(data, np.random.randint(20))\n data = np.append(data, np.random.rand() * 10 - 5)\n data = np.append(data, np.random.rand() * 10 - 5)\n data = np.append(data, np.random.rand() * 10 - 5)\n data = np.append(data, np.random.rand() * 100)\n data = np.append(data, np.random.rand() * 100)\n data = np.append(data, np.random.rand() * 100)\n data = np.append(data, np.random.rand() * 100)\n data = np.append(data, np.random.rand() * 100)\n data = np.append(data, np.random.rand() * 100)\n data = np.append(data, np.random.rand() * 100)\n\n return data\n\n\n# décodage des données reçu par le matlab\n# à noter que matlab envoi les données dans le format little-endian\n# j'utilise partout ailleurs le big-endian\ndef on_receive(msg):\n arr = bytearray(msg)\n if len(arr) >= 13:\n lastmsgTCP[0] = arr[1] << 8 | arr[0]\n lastmsgTCP[1] = 0.1 * (arr[3] << 8 | arr[2])\n lastmsgTCP[2] = arr[5] << 8 | arr[4]\n lastmsgTCP[3] = 0.1 * (toInt16(arr[7] << 8 | arr[6]))\n lastmsgTCP[4] = 0.1 * (toInt16(arr[9] << 8 | arr[8]))\n lastmsgTCP[5] = 0.1 * (toInt16(arr[11] << 8 | arr[10]))\n lastmsgTCP[6] = arr[13] << 8 | arr[12]\n\n# transfome un entier 16 bits en un int (32 bits)\ndef toInt16(value):\n vint = int(value)\n if vint >> 15 == 1:\n toadd = int(-1)\n toadd = toadd - pow(2, 16) + 1\n vint |= toadd\n return vint\n\n\n# fonction qui arrange toutes les données récuperer dans une liste\ndef get_data(vesc, tcp):\n newdata = rand_data()\n\n newdata[6] = vesc.rpm\n newdata[8] = vesc.torque\n newdata[9] = vesc.temp_fet\n newdata[25] = 0\n if tcp.connected:\n newdata[25] = 1\n newdata[10] = lastmsgUART[0]\n newdata[11] = lastmsgUART[1]\n newdata[12] = lastmsgUART[2]\n newdata[13] = lastmsgUART[3]\n newdata[14] = lastmsgUART[4]\n newdata[15] = lastmsgUART[5]\n newdata[16] = lastmsgUART[6]\n newdata[17] = lastmsgUART[7]\n newdata[18] = lastmsgUART[8]\n newdata[19] = lastmsgUART[9]\n newdata[20] = lastmsgUART[10]\n newdata[21] = lastmsgUART[11]\n newdata[22] = lastmsgUART[12]\n newdata[23] = lastmsgUART[13]\n newdata[24] = lastmsgUART[14]\n newdata[27] = lastmsgUART[15]\n newdata[28] = lastmsgUART[16]\n newdata[29] = lastmsgUART[17]\n\n newdata[35] = lastmsgTCP[0]\n newdata[37] = lastmsgTCP[1]\n newdata[38] = lastmsgTCP[2]\n newdata[39] = lastmsgTCP[3]\n newdata[40] = lastmsgTCP[4]\n newdata[41] = lastmsgTCP[5]\n newdata[42] = lastmsgTCP[6]\n\n newdata[43] = vesc.temp_mot\n newdata[44] = vesc.vin\n newdata[45] = vesc.current\n newdata[46] = vesc.wath\n\n return newdata\n\n\nif __name__ == '__main__':\n port = None\n ip = None\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hi:p:\", [\"ip=\", \"port=\"])\n except getopt.GetoptError:\n print('PV3eRasp.py -i -p ')\n sys.exit()\n for opt, arg in opts:\n if opt == '-h':\n print('test.py PV3eRasp.py -i -p ')\n sys.exit()\n elif opt in (\"-i\", \"--ip\"):\n ip = arg\n elif opt in (\"-p\", \"--port\"):\n port = arg\n if port is not None and ip is not None:\n start(ip, int(port))\n else:\n print('PV3eRasp.py -i -p ')\n sys.exit()\n","sub_path":"PV3eRasp.py","file_name":"PV3eRasp.py","file_ext":"py","file_size_in_byte":7772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"150876804","text":"# -*- coding: utf-8 -*-\r\n\"\"\"ALGORYTMY SORTOWANIA TOPOLOGICZNEGO\"\"\"\r\nimport queue\r\n\r\n\r\nclass DirectedCyclicGraphException(ValueError):\r\n pass\r\n\r\n\r\ndef sort_topological1(digraph):\r\n \"\"\"Sortowanie topologiczne przez liczenie poprzedników.\r\n :param digraph: graf skierowany\r\n :returns: porządek topologiczny wierzchołków\"\"\"\r\n vertex_queue = queue.PriorityQueue()\r\n indegs = [digraph.get_indegree(v) for v in digraph.get_vertices()]\r\n order = []\r\n\r\n for v in digraph.get_vertices():\r\n if indegs[v] == 0:\r\n vertex_queue.put(v)\r\n\r\n while not vertex_queue.empty():\r\n v = vertex_queue.get()\r\n order.append(v)\r\n indegs[v] = None\r\n\r\n for nb in digraph.get_neighbours(v):\r\n indegs[nb] -= 1\r\n\r\n if indegs[nb] == 0:\r\n vertex_queue.put(nb)\r\n\r\n if len(order) != digraph.vertices_number:\r\n raise DirectedCyclicGraphException()\r\n\r\n return iter(order)\r\n\r\n\r\ndef sort_topological2(digraph):\r\n \"\"\"Sortowanie topologiczne z użyciem DFS.\r\n :param digraph: graf skierowany\r\n :returns: porządek topologiczny wierzchołków\"\"\"\r\n indices = [None] * (digraph.vertices_number)\r\n order = []\r\n\r\n for v in reversed(sorted(digraph.get_vertices())):\r\n if indices[v] is None:\r\n _dfs(v, v, digraph, order, indices)\r\n\r\n return (v for v in reversed(order))\r\n\r\n\r\ndef _dfs(vertex, index, digraph, order, indices):\r\n \"\"\"Algorytm DFS wyznaczający kolejność wierzchołków.\r\n :param vertex: aktualny wierzchołek\r\n :param index: numer iteracji\r\n :param digraph: graf skierowany\r\n :param order: aktualny porządek topologiczny\r\n :param indices: indeksy iteracji i przetwarzania wierzchołków\"\"\"\r\n indices[vertex] = (index, True)\r\n\r\n for neighbour in digraph.get_neighbours(vertex):\r\n if indices[neighbour] is None:\r\n _dfs(neighbour, index, digraph, order, indices)\r\n elif indices[neighbour] == (index, True):\r\n raise DirectedCyclicGraphException()\r\n\r\n order.append(vertex)\r\n indices[vertex] = (index, False)\r\n","sub_path":"AlgoLib_Python/algolib/graphs/topological_sorting.py","file_name":"topological_sorting.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"203894022","text":"import csv\r\nimport itertools\r\nimport re\r\nimport time\r\nimport os\r\nimport sys\r\nimport numpy as np\r\nfrom sklearn import (\r\n linear_model, model_selection, naive_bayes, preprocessing, svm)\r\nfrom sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\r\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.metrics import (accuracy_score, auc, average_precision_score,\r\n cohen_kappa_score, confusion_matrix, f1_score,\r\n precision_score, recall_score, roc_auc_score,\r\n roc_curve)\r\nfrom sklearn.model_selection import (KFold, ShuffleSplit, StratifiedKFold,\r\n cross_validate, learning_curve)\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.preprocessing import Imputer\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.manifold import TSNE\r\nimport gensim\r\nimport matplotlib.pyplot as plt\r\nimport nltk\r\nimport pandas as pd\r\nfrom gensim.models import word2vec\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem.snowball import SnowballStemmer\r\nfrom nltk.tokenize import sent_tokenize, word_tokenize\r\nfrom nltk.util import ngrams\r\nfrom textblob import TextBlob\r\nfrom wordcloud import WordCloud\r\nfile_name = os.path.basename(sys.argv[0])\r\nstop_words = set(stopwords.words('english'))\r\n# %matplotlib inline\r\nstopwordsk = stopwords.words('english')\r\n\r\n# Train a word2vec model with the given parameters.\r\n\r\ndef word2Vec(tweets):\r\n import logging\r\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',\r\n level=logging.INFO)\r\n num_features = 500 # Word vector dimensionality\r\n min_word_count = 40 # Minimum word count\r\n num_workers = 4 # Number of threads to run in parallel\r\n context = 10 # Context window size\r\n downsampling = 1e-3 # Downsample setting for frequent words\r\n model = word2vec.Word2Vec(tweets, workers=num_workers, iter=3,\r\n size=num_features, min_count=min_word_count,\r\n window=context, sample=downsampling)\r\n model.init_sims(replace=True)\r\n # It can be helpful to create a meaningful model name and\r\n # save the model for later use. You can load it later using Word2Vec.load()\r\n model_name = \"OpinionMiningModel500dim\"\r\n model.save(model_name)\r\n\r\n return model\r\n\r\n\r\ndef preprocessdata(tweets):\r\n tweets = tweets.lower()\r\n # ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\",\" \",tweets).split())\r\n # Converting to URL.\r\n tweets = re.sub(\r\n '((www\\.[^\\s]+)|(https?://[^\\s]+)|(http?://[^/s]+))', '', tweets)\r\n # Removing repeating letters; more than 2.\r\n tweets = re.compile(r'(.)\\1{2,}', re.IGNORECASE).sub(r'\\1', tweets)\r\n # Remove Username.\r\n tweets = re.sub('@[^\\s]+', ' ', tweets)\r\n # tweets = BeautifulSoup(tweets, features='lxml').get_text()\r\n # Remove Punctuation.\r\n tweets = re.sub('[^\\w\\s]', \"\", tweets)\r\n # remove '#' sign.\r\n tweets = re.sub(r'#([^\\s]+)', r'\\1', tweets)\r\n # Make Multiple spaces into a single space.\r\n tweets = re.sub('[\\s]+', ' ', tweets)\r\n tweets = re.sub('<.*?>', \" \", tweets)\r\n # remove '&' tags.\r\n tweets = re.sub('&[\\s]+', ' ', tweets)\r\n tweets = re.sub(r'[^a-zA-Z\\s]', '', tweets, re.I | re.A)\r\n\r\n tweets = tweets.strip()\r\n return tweets\r\n\r\n\r\n'''\r\n# def preprocessstopwordsdata(tweets):\r\n # Remove Punctuation.\r\n tweets = re.sub('[^\\w\\s]', \"\", tweets)\r\n tweets = re.sub(r'[^a-zA-Z\\s]', '', tweets, re.I | re.A)\r\n tweets = tweets.lower()\r\n tweets = tweets.strip()\r\n return tweets\r\n'''\r\n# Snowball stemming the sentences, without stemming stop words.\r\n\r\n\r\ndef stem_sentences(sentence):\r\n stemmer = SnowballStemmer('english')\r\n tokens = sentence.split()\r\n stemmed_tokens = [stemmer.stem(token) for token in tokens]\r\n return ' '.join(stemmed_tokens)\r\n\r\n# TF-IDF the tweets to train the model.\r\n\r\n\r\n# Transforming the Query entered to give the sentiment.\r\ndef makeFeatureVec(tweets, model, num_features):\r\n # Function to average all of the word vectors in a given\r\n # paragraph\r\n # Pre-initialize an empty numpy array (for speed)\r\n featureVec = np.zeros((num_features,), dtype=\"float32\")\r\n nwords = 1.\r\n # Index2word is a list that contains the names of the words in\r\n # the model's vocabulary. Convert it to a set, for speed\r\n index2word_set = set(model.wv.index2word)\r\n # Loop over each word in the review and, if it is in the model's\r\n # vocaublary, add its feature vector to the total\r\n for word in tweets:\r\n if word in index2word_set:\r\n nwords = nwords + 1.\r\n featureVec = np.add(featureVec, model[word])\r\n # Divide the result by the number of words to get the average\r\n np.divide(featureVec, nwords)\r\n return featureVec\r\n\r\n\r\ndef tsne_plot(model):\r\n \"Creates and TSNE model and plots it\"\r\n labels = []\r\n tokens = []\r\n\r\n for word in model.wv.vocab:\r\n tokens.append(model[word])\r\n labels.append(word)\r\n\r\n tsne_model = TSNE(perplexity=40, n_components=2,\r\n init='pca', n_iter=2500, random_state=23)\r\n new_values = tsne_model.fit_transform(tokens)\r\n x = []\r\n y = []\r\n for value in new_values:\r\n x.append(value[0])\r\n y.append(value[1])\r\n\r\n plt.figure(figsize=(16, 16))\r\n for i in range(len(x)):\r\n plt.scatter(x[i], y[i])\r\n plt.annotate(labels[i],\r\n xy=(x[i], y[i]),\r\n xytext=(5, 2),\r\n textcoords='offset points',\r\n ha='right',\r\n va='bottom')\r\n plt.show()\r\n\r\n\r\ndef getAvgFeatureVecs(tweets, model, num_features):\r\n # Given a set of reviews (each one a list of words), calculate\r\n # the average feature vector for each one and return a 2D numpy array\r\n # Initialize a counter\r\n\r\n counter = 0.\r\n # Preallocate a 2D numpy array, for speed\r\n tweetsFeatureVecs = np.zeros((len(tweets), num_features), dtype=\"float32\")\r\n # Loop through the reviews\r\n for tweets in tweets:\r\n # Print a status message every 1000th review\r\n if counter % 1000. == 0.:\r\n print(\"Tweets %d of %d\" % (counter, len(tweets)))\r\n # Call the function (defined above) that makes average feature vectors\r\n tweetsFeatureVecs[int(counter)] = makeFeatureVec(tweets, model,\r\n num_features)\r\n # Increment the counter\r\n counter = counter + 1.\r\n return tweetsFeatureVecs\r\n\r\n\r\ndef plotroccurves(modelname, y_pred_prob, fpr, tpr, roc_auc, predict):\r\n plt.figure()\r\n plt.plot(fpr, tpr, color='green',\r\n label='ROC curve (area = %0.2f)' % roc_auc)\r\n plt.grid(True)\r\n plt.plot([0, 1], [0, 1], color='black', linestyle='--')\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title(f'ROC Curve for {modelname}')\r\n plt.legend(loc=\"lower right\")\r\n plt.savefig(f'{file_name} {modelname}.jpg')\r\n plt.clf()\r\n plt.cla()\r\n plt.close()\r\n\r\n\r\ndef modelselectionword2vec(trainDataVecs, testDataVecs, y_train, y_test, modelname):\r\n # # accuracyscores = []\r\n # TP = []\r\n # FP = []\r\n # TN = []\r\n # FN = []\r\n # f1scores = []\r\n dictionarylist = {}\r\n for modelname in modelname:\r\n if modelname == 'Logistic Regression':\r\n clf = LogisticRegression(C=1.0, solver = 'lbfgs')\r\n if modelname == 'Naive Bayes':\r\n clf = naive_bayes.MultinomialNB()\r\n if modelname == 'Random Forest':\r\n clf = RandomForestClassifier(n_estimators=100)\r\n if modelname == 'SVM':\r\n clf = svm.LinearSVC()\r\n clf.fit(trainDataVecs, y_train)\r\n predict = clf.predict(testDataVecs)\r\n accuracyscore = accuracy_score(y_test, predict)\r\n precision = precision_score(y_test, predict)\r\n recall = recall_score(y_test, predict)\r\n TP = (confusion_matrix(y_test, predict)[1][1])\r\n FP = (confusion_matrix(y_test, predict)[1][0])\r\n TN = (confusion_matrix(y_test, predict)[0][0])\r\n FN = (confusion_matrix(y_test, predict)[0][1])\r\n f1score = f1_score(y_test, predict)\r\n print(f'The accuracy score for {modelname} is', accuracyscore)\r\n print(f'The F1 Score for the {modelname} is :', f1_score(\r\n y_test, predict))\r\n print(f'The confusion matrix for the {modelname} is \\n', confusion_matrix(\r\n y_test, predict))\r\n print('TP =', TP)\r\n print('TN =', TN)\r\n print('FP =', FP)\r\n print('FN =', FN)\r\n print('Precision = ', precision)\r\n print('Recall = ', recall)\r\n dictionary = dict()\r\n dictionary = ({modelname: {'Accuracy score': accuracyscore, 'f1score': f1score, 'precision': precision,\r\n 'recall': recall, 'true positive': TP, 'true negative': TN, 'false positive': FP, 'false negative': FN}})\r\n dictionarylist.update(dictionary)\r\n return dictionarylist\r\n\r\n\r\nif __name__ == \"__main__\":\r\n start = time.time()\r\n num_features = 500\r\n dataset = pd.read_csv('DatasetF.csv', encoding='latin-1')\r\n print(\"The length of Dataset is: \", len(dataset))\r\n \"\"\" \r\n datasetwhole = pd.read_csv('onemilliontweets.csv', header=None, encoding ='latin-1')\r\n datasetwhole.columns = ['sentiment', 'id', 'date', 'query', 'user', 'tweets']\r\n datasetwhole = datasetwhole.drop(['id','date','query','user'],axis = 1)\r\n datasetwhole.tweets = datasetwhole.tweets.apply(preprocessdata)\r\n datasetwhole.tweets = datasetwhole['tweets'].apply(lambda x: ' '.join(\r\n [word for word in x.split() if word not in (stopwordsk)]))\r\n datasetwhole['tokenizedtweets'] = datasetwhole.tweets.apply(nltk.word_tokenize)\r\n \"\"\"\r\n # Making a copy of the dataset.\r\n dataset = dataset.copy()\r\n # Making the sentiment score 0 and 1.\r\n dataset.sentiment = dataset.sentiment.replace(4, 1)\r\n dataset.tweets = dataset.tweets.apply(preprocessdata)\r\n dataset.tweets = dataset['tweets'].apply(lambda x: ' '.join(\r\n [word for word in x.split() if word not in (stopwordsk)]))\r\n dataset['tokenizedtweets'] = dataset.tweets.apply(nltk.word_tokenize)\r\n # Function to train the model with the given parameters:\r\n\r\n # model = word2Vec(dataset.tokenizedtweets)\r\n # print(model)\r\n # Loading a saved model\r\n model = gensim.models.Word2Vec.load('OpinionMiningModel500dim')\r\n tsne_plot(model)\r\n # Shuffling the dataset\r\n dataset = dataset.sample(frac=1).reset_index(drop=True)\r\n X = dataset.tweets.values\r\n y = dataset.sentiment.values\r\n X_train, X_test, y_train, y_test = model_selection.train_test_split(\r\n X, y, test_size=0.2)\r\n trainDataVecs = getAvgFeatureVecs(X_train, model, num_features)\r\n testDataVecs = getAvgFeatureVecs(X_test, model, num_features)\r\n imp = Imputer(missing_values=np.nan, strategy='mean')\r\n trainDataVecs = imp.fit_transform(trainDataVecs)\r\n testDataVecs = imp.fit_transform(testDataVecs)\r\n trainDataVecs = trainDataVecs.reshape(len(X_train), -1)\r\n testDataVecs = testDataVecs.reshape(len(X_test), -1)\r\n models = ['Logistic Regression', 'Random Forest', 'SVM']\r\n dictionary = modelselectionword2vec(trainDataVecs, testDataVecs,\r\n y_train, y_test, models)\r\n # modelselectionword2vec(trainDataVecs, testDataVecs, y_train, y_test,\r\n # models)\r\n\r\n # Dictionary is saved to a csv file.\r\n newdf = pd.DataFrame(data=dictionary)\r\n print(newdf.head())\r\n newdf = newdf.T\r\n print(newdf.head())\r\n newdf.to_excel(f'{file_name}results.xlsx', encoding='utf-8',\r\n sheet_name='OpinionMining')\r\n end = time.time()\r\n print('The time taken : ', (end-start))\r\n","sub_path":"Word2vecwithdifferentdimensions.py","file_name":"Word2vecwithdifferentdimensions.py","file_ext":"py","file_size_in_byte":12013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"574421024","text":"import random\nimport torch\nimport torchvision\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms.transforms as T\n\n#scale_factor = min(self.target_image_size[0] / img.shape[-1], self.target_image_size[1] / img.shape[-2])\n#img = F.interpolate(img.unsqueeze(0), self.target_image_size).squeeze(0) if img.numel() > 0 else torch.empty((0, self.target_image_size[1], self.target_image_size[0]), dtype = torch.uint8)\n#mask = F.interpolate(mask.unsqueeze(0), self.target_image_size).squeeze(0) if mask.numel() > 0 else torch.empty((0, self.target_image_size[1], self.target_image_size[0]), dtype = torch.uint8)\n##img = F.interpolate(img.unsqueeze(0), scale_factor = scale_factor).squeeze(0)\n##mask = F.interpolate(img.unsqueeze(0), scale_factor = scale_factor).squeeze(0)\n#bbox = [bbox[0] * scale_factor, bbox[1] * scale_factor, bbox[2] * scale_factor, bbox[3] * scale_factor]\n\nclass MaskRCNNAugmentations(nn.Module):\n # https://detectron2.readthedocs.io/en/latest/modules/config.html#yaml-config-references\n # _C.INPUT.MIN_SIZE_TRAIN = (640, 672, 704, 736, 768, 800)\n # short_edge_length = (800,), max_size = 1333\n def __init__(self, p = 0.5, short_edge_length = 480, max_size = 640):\n super().__init__()\n # https://github.com/facebookresearch/detectron2/blob/main/configs/common/data/coco.py\n self.transforms = [ResizeShortestEdge(short_edge_length = short_edge_length, max_size = max_size), RandomHorizontalFlip(p = p)]\n # L(T.ResizeShortestEdge)(short_edge_length=800, max_size=1333),\n\n def forward(self, image, target):\n for t in self.transforms:\n image, target = t(image, target)\n return image, target\n\nclass Mask2CADAugmentations(nn.Module):\n def __init__(self, shape_view_side_size = 128):\n super().__init__()\n self.shape_view_transforms = [RandomPhotometricDistort(), T.RandomCrop(shape_view_side_size)]\n\n def forward(self, image, target):\n for t in self.shape_view_transforms:\n target['shape_views'] = t(target['shape_views'])\n return image, target\n\nclass ResizeShortestEdge(nn.Module):\n #Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.\n #If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.\n\n def __init__(self, short_edge_length, max_size=int(1e9), interp='bilinear'):\n super().__init__()\n self.short_edge_length = short_edge_length\n self.max_size = max_size\n self.interp = interp\n\n def forward(self, image, target):\n h, w = image.shape[-2:]\n size = self.short_edge_length\n\n scale = size * 1.0 / min(h, w)\n if h < w:\n newh, neww = size, scale * w\n else:\n newh, neww = scale * h, size\n if max(newh, neww) > self.max_size:\n scale = self.max_size * 1.0 / max(newh, neww)\n newh = newh * scale\n neww = neww * scale\n neww = int(neww + 0.5)\n newh = int(newh + 0.5)\n\n image = F.interpolate(image.unsqueeze(0), (newh, neww), mode = self.interp, align_corners = None if self.interp == \"nearest\" else False).squeeze(0)\n target['image_width_height_resized'] = (neww, hewh)\n if 'boxes' in target:\n target['boxes'][..., 0::2] *= neww / w\n target['boxes'][..., 1::2] *= newh / h\n if 'masks' in target: \n target['masks'] = F.interpolate(target['masks'].to(torch.uint8), (newh, neww), mode='nearest', align_corners = None ).to(torch.bool)\n\n return image, target\n\nclass RandomHorizontalFlip(T.RandomHorizontalFlip):\n def forward(self, image, target):\n if torch.rand(1) < self.p:\n image = image.flip(-1)\n if target is not None:\n width = image.shape[-1]\n target[\"boxes\"][:, [0, 2]] = width - target[\"boxes\"][:, [2, 0]]\n if \"masks\" in target:\n target[\"masks\"] = target[\"masks\"].flip(-1)\n return image, target\n\nclass RandomPhotometricDistort(nn.Module):\n def __init__(self, contrast = (0.5, 1.5), saturation = (0.5, 1.5),\n hue = (-0.05, 0.05), brightness = (0.875, 1.125), p = 0.5):\n super().__init__()\n self._brightness = T.ColorJitter(brightness=brightness)\n self._contrast = T.ColorJitter(contrast=contrast)\n self._hue = T.ColorJitter(hue=hue)\n self._saturation = T.ColorJitter(saturation=saturation)\n self.p = p\n\n def forward(self, image, target = None):\n if isinstance(image, torch.Tensor):\n if image.ndimension() not in {2, 3}:\n raise ValueError('image should be 2/3 dimensional. Got {} dimensions.'.format(image.ndimension()))\n elif image.ndimension() == 2:\n image = image.unsqueeze(0)\n\n r = torch.rand(7)\n\n if r[0] < self.p:\n image = self._brightness(image)\n\n contrast_before = r[1] < 0.5\n if contrast_before:\n if r[2] < self.p:\n image = self._contrast(image)\n\n if r[3] < self.p:\n image = self._saturation(image)\n\n if r[4] < self.p:\n image = self._hue(image)\n\n if not contrast_before:\n if r[5] < self.p:\n image = self._contrast(image)\n\n if r[6] < self.p:\n channels = image.shape[-3]\n permutation = torch.randperm(channels)\n\n image = image[..., permutation, :, :]\n\n return image, target\n","sub_path":"transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":5535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"14721421","text":"import subprocess\nimport shutil\nfrom pathlib import Path\nfrom icecream import ic\nfrom src import set_parameters as sp\nfrom time import sleep\n\n\nclass RoutingEngine:\n def __init__(self, transport_params, city) -> None:\n self.transport_params = transport_params\n self.city = city\n self._container_id = None\n\n def _run_docker_sync(self, cmd_str):\n \"\"\"\n Runs a docker command synchronisly.\n returns the exist code of the command\n \"\"\"\n ic(cmd_str)\n cmd_ary = cmd_str.split()\n subprocess.run(cmd_ary, check=True)\n\n def _run_docker_background(self, cmd_str):\n \"\"\"\n Runs a docker command synchronisly.\n returns the CONTAINER ID\n \"\"\"\n # `-d` arg\n ic(cmd_str)\n if (\" -d \" not in cmd_str) and (\"--detach\" not in cmd_str):\n raise ValueError(\n \"Docker command called as background process, but does not include the `--detach/-d` switch. Command=`{cmd_str}\"\n )\n\n cmd_ary = cmd_str.split()\n\n completed_process = subprocess.run(cmd_ary, check=True)\n # Allow at five secounds for the OSRM to start\n sleep(5)\n completed_process.check_returncode()\n self._container_id = completed_process.stdout\n ic(self._container_id)\n\n def stop_routing_server(self):\n if self._container_id:\n cmd_stop = f\"docker stop {self._container_id}\"\n self._run_docker_sync(cmd_stop)\n self._container_id = None\n\n def _copy_routing_data(self, osm_dir, osm_fname):\n if not Path(osm_dir).exists():\n Path(osm_dir).mkdir(parents=True)\n\n shutil.copyfile(f\"{sp.download_dir}/{osm_fname}\", f\"{osm_dir}/{osm_fname}\")\n\n def _get_osrm_filename(self, osm_fname=None):\n if not osm_fname:\n osm_fname = self.city[\"osm_fname\"]\n base_fname = osm_fname.removesuffix(\".osm.pbf\")\n return f\"{base_fname}.osrm\"\n\n def preprocess_routing_data(self):\n\n for verb, t_params in self.transport_params.items():\n data_path = t_params[\"data_path\"]\n profile_path = t_params[\"osrm_profile\"]\n # Use `map` and `route` as prefixes here becuase `osm` and `osrm` look too simular\n map_fname = self.city[\"osm_fname\"]\n route_fname = self._get_osrm_filename(map_fname)\n ic(verb, data_path, profile_path, map_fname, route_fname)\n\n self._copy_routing_data(data_path, map_fname)\n\n cmd_extract = f'docker run -t -v \"{data_path}:/data\" osrm/osrm-backend osrm-extract -p {profile_path} /data/{map_fname}'\n cmd_partition = f'docker run -t -v \"{data_path}:/data\" osrm/osrm-backend osrm-partition /data/{route_fname}'\n cmd_customize = f'docker run -t -v \"{data_path}:/data\" osrm/osrm-backend osrm-customize /data/{route_fname}'\n self._run_docker_sync(cmd_extract)\n self._run_docker_sync(cmd_partition)\n self._run_docker_sync(cmd_customize)\n\n def run_routing_server(self, verb):\n data_path = self.transport_params[verb][\"data_path\"]\n # Use `map` and `route` as prefixes here becuase `osm` and `osrm` look too simular\n map_fname = self.city[\"osm_fname\"]\n route_fname = self._get_osrm_filename(map_fname)\n\n cmd = f'docker run --detach -t -i -p 5000:5000 -v \"{data_path}:/data\" osrm/osrm-backend osrm-routed --algorithm mld /data/{route_fname}'\n self._run_docker_background(cmd)\n","sub_path":"src/docker_utils.py","file_name":"docker_utils.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"369558759","text":"#-----------------------------------------------------------------------\n# blackscholes.py\n#-----------------------------------------------------------------------\n#website:https://introcs.cs.princeton.edu/python/21function/blackscholes.py.html\n\nimport sys\nimport math\nimport numpy as np\nimport scipy.stats as si\nimport sympy as sy\n#from sympy.statistics as systats\nfrom sympy import *\n#-----------------------------------------------------------------------\n\n# Return the value of the Gaussian probability function with mean 0.0\n# and standard deviation 1.0 at the given x value.\nclass blackscholes_model:\n#-----------------------------------------------------------------------\n\n# python blackscholes.py 23.75 15.00 0.01 0.35 0.5\n# 8.879159263714124 (actual = 9.10)\n\n# $ python blackscholes.py 30.14 15.0 0.01 0.332 0.25\n# 15.177462481558178 (actual = 14.50)\n\n# Information calculated based on closing data on Monday, June 9th 2003.\n#\n# Microsoft: share price: 23.75\n# strike price: 15.00\n# risk-free interest rate: 1%\n# volatility: 35% (historical estimate)\n# time until expiration: 0.5 years\n#\n# GE: share price: 30.14\n# strike price: 15.00\n# risk-free interest rate 1%\n# volatility: 33.2% (historical estimate)\n# time until expiration 0.25 years\n#\n# Reference: http://www.hoadley.net/options/develtoolsvolcalc.htm\n\n def phi(self,x):\n return math.exp(-x * x / 2.0) / math.sqrt(2.0 * math.pi)\n\n #-----------------------------------------------------------------------\n\n # Return the value of the Gaussian probability function with mean mu\n # and standard deviation sigma at the given x value.\n\n def pdf(self,x, mu=0.0, sigma=1.0):\n return self.phi((x - mu) / sigma) / sigma\n\n #-----------------------------------------------------------------------\n\n # Return the value of the cumulative Gaussian distribution function\n # with mean 0.0 and standard deviation 1.0 at the given z value.\n\n def Phi(self,z):\n if z < -8.0: return 0.0\n if z > 8.0: return 1.0\n total = 0.0\n term = z\n i = 3\n while total != total + term:\n total += term\n term *= z * z / float(i)\n i += 2\n return 0.5 + total * self.phi(z)\n\n #-----------------------------------------------------------------------\n\n # Return standard Gaussian cdf with mean mu and stddev sigma.\n # Use Taylor approximation.\n\n def cdf(self,z, mu=0.0, sigma=1.0):\n return self.Phi((z - mu) / sigma)\n\n #-----------------------------------------------------------------------\n\n # Black-Scholes formula.\n\n def callPrice(self,s, x, r, sigma, t):\n a = (math.log(s/x) + (r + sigma * sigma/2.0) * t) / \\\n (sigma * math.sqrt(t))\n b = a - sigma * math.sqrt(t)\n return s * self.cdf(a) - x * math.exp(-r * t) * self.cdf(b)\n\n#-----------------------------------------------------------------------\n\nclass nonDividend_Paying_Black_Scholes_model: \n###################################################################################\n#website: https://aaronschlegel.me/black-scholes-formula-python.html\n \"\"\"\n The Black-Scholes model was first introduced by Fischer Black and Myron Scholes in 1973 in the paper \"The Pricing of Options and Corporate Liabilities\". Since being published, the model has become a widely used tool by investors and is still regarded as one of the best ways to determine fair prices of options.\n\n The purpose of the model is to determine the price of a vanilla European call and put options (option that can only be exercised at the end of its maturity) based on price variation over time and assuming the asset has a lognormal distribution.\n\n Assumptions\n To determine the price of vanilla European options, several assumptions are made:\n\n European options can only be exercised at expiration\n No dividends are paid during the option's life\n Market movements cannot be predicted\n The risk-free rate and volatility are constant\n Follows a lognormal distribution\n Non-Dividend Paying Black-Scholes Formula\n In Black-Scholes formulas, the following parameters are defined.\n\n SS, the spot price of the asset at time tt\n TT, the maturity of the option. Time to maturity is defined as T−tT−t\n KK, strike price of the option\n rr, the risk-free interest rate, assumed to be constant between tt and TT\n σσ, volatility of underlying asset, the standard deviation of the asset returns\n \"\"\"\n########################################################################################\n def euro_vanilla_call(self,S, K, T, r, sigma):\n \n #S: spot price\n #K: strike price\n #T: time to maturity\n #r: interest rate\n #sigma: volatility of underlying asset\n \n d1 = (np.log(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n d2 = (np.log(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n \n call = (S * si.norm.cdf(d1, 0.0, 1.0) - K * np.exp(-r * T) * si.norm.cdf(d2, 0.0, 1.0))\n \n return call\n \n def euro_vanilla_put(self,S, K, T, r, sigma):\n \n #S: spot price\n #K: strike price\n #T: time to maturity\n #r: interest rate\n #sigma: volatility of underlying asset\n \n d1 = (np.log(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n d2 = (np.log(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n \n put = (K * np.exp(-r * T) * si.norm.cdf(-d2, 0.0, 1.0) - S * si.norm.cdf(-d1, 0.0, 1.0))\n \n return put\n \n def euro_vanilla(self,S, K, T, r, sigma, option = 'call'):\n \n #S: spot price\n #K: strike price\n #T: time to maturity\n #r: interest rate\n #sigma: volatility of underlying asset\n \n d1 = (np.log(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n d2 = (np.log(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n \n if option == 'call':\n result = (S * si.norm.cdf(d1, 0.0, 1.0) - K * np.exp(-r * T) * si.norm.cdf(d2, 0.0, 1.0))\n if option == 'put':\n result = (K * np.exp(-r * T) * si.norm.cdf(-d2, 0.0, 1.0) - S * si.norm.cdf(-d1, 0.0, 1.0))\n \n return result\n \n def euro_call_sym(self,S, K, T, r, sigma):\n \n #S: spot price\n #K: strike price\n #T: time to maturity\n #r: interest rate\n #sigma: volatility of underlying asset\n \n N = systats.Normal(0.0, 1.0)\n \n d1 = (sy.ln(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))\n d2 = (sy.ln(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))\n \n call = (S * N.cdf(d1) - K * sy.exp(-r * T) * N.cdf(d2))\n \n return call\n\n def euro_put_sym(self,S, K, T, r, sigma):\n \n #S: spot price\n #K: strike price\n #T: time to maturity\n #r: interest rate\n #sigma: volatility of underlying asset\n \n N = systats.Normal(0.0, 1.0)\n \n d1 = (sy.ln(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))\n d2 = (sy.ln(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))\n \n put = (K * sy.exp(-r * T) * N.cdf(-d2) - S * N.cdf(-d1))\n \n return put\n\n def sym_euro_vanilla(self,S, K, T, r, sigma, option = 'call'):\n \n #S: spot price\n #K: strike price\n #T: time to maturity\n #r: interest rate\n #sigma: volatility of underlying asset\n \n N = systats.Normal(0.0, 1.0)\n \n d1 = (sy.ln(S / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))\n d2 = (sy.ln(S / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))\n \n if option == 'call':\n result = (S * N.cdf(d1) - K * sy.exp(-r * T) * N.cdf(d2))\n if option == 'put':\n result = (K * sy.exp(-r * T) * N.cdf(-d2) - S * N.cdf(-d1))\n \n return result\n \nclass dividend_Paying_Black_Scholes_model:\n \"\"\"\n For assets that pay dividends, the Black-Scholes formula is rather similar to the non-dividend paying asset formula; however, a new parameter qq, is added.\n\n SS, the spot price of the asset at time tt\n TT, the maturity of the option. Time to maturity is defined as T−tT−t\n KK, strike price of the option\n rr, the risk-free interest rate, assumed to be constant between tt and TT\n σσ, volatility of underlying asset, the standard deviation of the asset returns\n qq, the dividend rate of the asset. This is assumed to pay dividends at a continuous rate\n \"\"\" \n def black_scholes_call_div(self,S, K, T, r, q, sigma):\n \n #S: spot price\n #K: strike price\n #T: time to maturity\n #r: interest rate\n #q: rate of continuous dividend paying asset \n #sigma: volatility of underlying asset\n \n d1 = (np.log(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n d2 = (np.log(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n \n call = (S * np.exp(-q * T) * si.norm.cdf(d1, 0.0, 1.0) - K * np.exp(-r * T) * si.norm.cdf(d2, 0.0, 1.0))\n \n return call\n\n def black_scholes_put_div(self,S, K, T, r, q, sigma):\n \n #S: spot price\n #K: strike price\n #T: time to maturity\n #r: interest rate\n #q: rate of continuous dividend paying asset \n #sigma: volatility of underlying asset\n \n d1 = (np.log(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n d2 = (np.log(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n \n put = (K * np.exp(-r * T) * si.norm.cdf(-d2, 0.0, 1.0) - S * np.exp(-q * T) * si.norm.cdf(-d1, 0.0, 1.0))\n \n return put\n\n def euro_vanilla_dividend(self,S, K, T, r, q, sigma, option = 'call'):\n #Implementation that can be used to determine the put or call option price depending on specification\n #S: spot price\n #K: strike price\n #T: time to maturity\n #r: interest rate\n #q: rate of continuous dividend paying asset \n #sigma: volatility of underlying asset\n \n d1 = (np.log(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n d2 = (np.log(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))\n \n if option == 'call':\n result = (S * np.exp(-q * T) * si.norm.cdf(d1, 0.0, 1.0) - K * np.exp(-r * T) * si.norm.cdf(d2, 0.0, 1.0))\n if option == 'put':\n result = (K * np.exp(-r * T) * si.norm.cdf(-d2, 0.0, 1.0) - S * np.exp(-q * T) * si.norm.cdf(-d1, 0.0, 1.0))\n \n return result\n\n def black_scholes_call_div_sym(self,S, K, T, r, q, sigma):\n #Sympy Implementation of Black-Scholes with Dividend-paying asset\n #S: spot price\n #K: strike price\n #T: time to maturity\n #r: interest rate\n #q: rate of continuous dividend paying asset \n #sigma: volatility of underlying asset\n \n N = systats.Normal(0.0, 1.0)\n \n d1 = (sy.ln(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))\n d2 = (sy.ln(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))\n \n call = S * sy.exp(-q * T) * N.cdf(d1) - K * sy.exp(-r * T) * N.cdf(d2)\n \n return call\n\n def black_scholes_call_put_sym(self,S, K, T, r, q, sigma):\n #Sympy Implementation of Black-Scholes with Dividend-paying asset\n\n #S: spot price\n #K: strike price\n #T: time to maturity\n #r: interest rate\n #q: rate of continuous dividend paying asset \n #sigma: volatility of underlying asset\n \n N = systats.Normal(0.0, 1.0)\n \n d1 = (sy.ln(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))\n d2 = (sy.ln(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))\n \n put = K * sy.exp(-r * T) * N.cdf(-d2) - S * sy.exp(-q * T) * N.cdf(-d1)\n \n return put\n\n def sym_euro_vanilla_dividend(self,S, K, T, r, q, sigma, option = 'call'):\n #Sympy implementation of pricing a European put or call option depending on specification\n #S: spot price\n #K: strike price\n #T: time to maturity\n #r: interest rate\n #q: rate of continuous dividend paying asset \n #sigma: volatility of underlying asset\n \n N = systats.Normal(0.0, 1.0)\n \n d1 = (sy.ln(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))\n d2 = (sy.ln(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * sy.sqrt(T))\n \n if option == 'call':\n result = S * sy.exp(-q * T) * N.cdf(d1) - K * sy.exp(-r * T) * N.cdf(d2)\n if option == 'put':\n result = K * sy.exp(-r * T) * N.cdf(-d2) - S * sy.exp(-q * T) * N.cdf(-d1)\n \n return result\n\n\n\n\ndef main():\n # Accept s, x, r, sigma, and t from the command line and write\n # the Black-Scholes value.\n\n s = float(sys.argv[1])\n x = float(sys.argv[2])\n r = float(sys.argv[3])\n sigma = float(sys.argv[4])\n t = float(sys.argv[5])\n\n model=blackscholes_model()\n print(\"res:\",model.callPrice(s, x, r, sigma, t))\n model2=nonDividend_Paying_Black_Scholes_model()\n x=model2.sym_euro_vanilla(50, 100, 1, 0.05, 0.25, option = 'put')\n print(\"x:\",x)\n\nif __name__==\"__main__\":\n main()\n","sub_path":"tools/trade_algotithms/black_scholes_model.py","file_name":"black_scholes_model.py","file_ext":"py","file_size_in_byte":13783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"394728268","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n# (c) Copyright 2019 \n# author(s): Noel Tchidjo\n# All rights reserved\n\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.externals import joblib\nimport pandas as pd\nfrom sklearn.base import BaseEstimator, TransformerMixin\nimport glob\nimport sys\nsys.path.append('thrift_generated/house_prediction')\n\nfrom thrift_generated.house_prediction.house_prediction import Processor\nfrom thrift_generated.house_prediction.house_prediction import Iface\nfrom thrift_generated.house_prediction.ttypes import PredictionResult\n\n\nfrom thrift.transport import TSocket\nfrom thrift.transport import TTransport\nfrom thrift.protocol import TBinaryProtocol\nfrom thrift.server import TServer\nimport logging\nfrom datetime import datetime\nimport configparser\nimport os\n\n\nclass DataFrameSelector(BaseEstimator, TransformerMixin):\n def __init__(self, attribute_names):\n self.attribute_names = attribute_names\n def fit(self, X, y=None):\n return self\n def transform(self, X):\n return X[self.attribute_names].values\n\n\nclass PredictorHandler(Iface):\n def __init__(self, config):\n logging.info(\"Loading pipeline ...\")\n self.full_pipeline = joblib.load(config['DEFAULT']['pipeline'])\n logging.info(\"Loading pipeline finished.\")\n logging.info(\"Loading model ...\")\n self.model_loaded = joblib.load(config['DEFAULT']['model'])\n logging.info(\"Loading model finished.\")\n\n self.df = pd.DataFrame(columns=[[\"date_mutation\",\"code_postal\",\"lot1_surface_carrez\",\"lot2_surface_carrez\",\"lot3_surface_carrez\",\"lot4_surface_carrez\",\"lot5_surface_carrez\",\"nombre_lots\",\"code_type_local\",\"surface_reelle_bati\",\"nombre_pieces_principales\",\"surface_terrain\",\"longitude\",\"latitude\"]])\n \n\n def getprice(self, house_desc):\n house_crit = []\n house_crit.append(pd.to_datetime(house_desc[\"date_mutation\"]).toordinal()) if \"date_mutation\" in house_desc else house_crit.append(0)\n house_crit.append(float(house_desc[\"code_postal\"])) if \"code_postal\" in house_desc else house_crit.append(0.0)\n house_crit.append(float(house_desc[\"lot1_surface_carrez\"])) if \"lot1_surface_carrez\" in house_desc else house_crit.append(0.0)\n house_crit.append(float(house_desc[\"lot2_surface_carrez\"])) if \"lot2_surface_carrez\" in house_desc else house_crit.append(0.0)\n house_crit.append(float(house_desc[\"lot3_surface_carrez\"])) if \"lot3_surface_carrez\" in house_desc else house_crit.append(0.0)\n house_crit.append(float(house_desc[\"lot4_surface_carrez\"])) if \"lot4_surface_carrez\" in house_desc else house_crit.append(0.0)\n house_crit.append(float(house_desc[\"lot5_surface_carrez\"])) if \"lot5_surface_carrez\" in house_desc else house_crit.append(0.0)\n house_crit.append(float(house_desc[\"nombre_lots\"])) if \"nombre_lots\" in house_desc else house_crit.append(0.0)\n house_crit.append(float(house_desc[\"code_type_local\"])) if \"code_type_local\" in house_desc else house_crit.append(0.0)\n house_crit.append(float(house_desc[\"surface_reelle_bati\"])) if \"surface_reelle_bati\" in house_desc else house_crit.append(0.0)\n house_crit.append(float(house_desc[\"nombre_pieces_principales\"])) if \"nombre_pieces_principales\" in house_desc else house_crit.append(0.0)\n house_crit.append(float(house_desc[\"surface_terrain\"])) if \"surface_terrain\" in house_desc else house_crit.append(0.0)\n house_crit.append(float(house_desc[\"longitude\"])) if \"longitude\" in house_desc else house_crit.append(0.0)\n house_crit.append(float(house_desc[\"latitude\"])) if \"latitude\" in house_desc else house_crit.append(0.0) \n\n logging.info(str(house_crit))\n \n self.df.loc[0] = house_crit\n\n house_prepared = self.full_pipeline.transform(self.df)\n price = self.model_loaded.predict(house_prepared)\n\n logging.info(\"value computed:%f\" %price[0])\n val = PredictionResult()\n val.predictedvalue = price[0]\n val.error = \"\"\n return val\n\n\nif __name__ == '__main__':\n now = datetime.now()\n log_filename = os.path.join(\"logs\", \"predict_house_price_\" + now.strftime(\"%Y-%m-%d-%H-%M-%S\") + \".log\")\n logging.basicConfig(filename=log_filename,level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n logging.info(\"Starting House predictor ...\")\n config = configparser.ConfigParser()\n config.read('config.ini')\n host = config['DEFAULT']['host']\n port = int(config['DEFAULT']['port'])\n handler = PredictorHandler(config)\n processor = Processor(handler)\n transport = TSocket.TServerSocket(host, port)\n tfactory = TTransport.TBufferedTransportFactory()\n pfactory = TBinaryProtocol.TBinaryProtocolFactory()\n\n server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)\n\n logging.info('Waiting for request ...')\n server.serve()\n logging.info('done.')\n\n","sub_path":"sklearn/predict_house_price.py","file_name":"predict_house_price.py","file_ext":"py","file_size_in_byte":4841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"34553376","text":"\"\"\"\r\nConsider the fraction, n/d, where n and d are positive integers. If n lis[length - 1]:\n lis.append(n)\n length += 1\n else :\n idx = bisect.bisect_left(lis, n)\n lis[idx] = n\n\n sys.stdout.write('%d\\n'%length)\n\nif __name__ == '__main__':\n N = int(read())\n nums = list(map(int, read().split()))\n\n solution(nums)","sub_path":"10001~20000/12015/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"592291375","text":"# -*- coding: utf-8 -*-\nimport decimal\nimport hashlib\nimport json\nimport random\nimport time\n\nfrom flask import g\n\nfrom application import db\nfrom common.libs.Helper import getCurrentDate, seconds2str\nfrom common.libs.MemberService import MemberService\nfrom common.libs.mall.ProductService import ProductService\nfrom common.models.ciwei.BalanceOder import BalanceOrder\nfrom common.models.ciwei.BalanceOrderCallbackData import BalanceOrderCallbackData\nfrom common.models.ciwei.GoodsTopOrder import GoodsTopOrder\nfrom common.models.ciwei.GoodsTopOrderCallbackData import GoodsTopOrderCallbackData\nfrom common.models.ciwei.ThankOrder import ThankOrder\nfrom common.models.ciwei.ThankOrderCallbackData import ThankOrderCallbackData\nfrom common.models.ciwei.mall.Order import Order\nfrom common.models.ciwei.mall.OrderCallBackData import OrderCallbackData\nfrom common.models.ciwei.mall.OrderProduct import OrderProduct\nfrom common.models.ciwei.mall.Product import Product\nfrom common.models.ciwei.mall.ProductSaleChangeLog import ProductSaleChangeLog\n\n\nclass PayService:\n\n def __init__(self):\n pass\n\n def createOrder(self, member_id, items=None, params=None):\n \"\"\"\n 创建Order,OderProduct,更新Product库存\n 记录商品库存&销售日志\n :param member_id:\n :param items:\n :param params:\n :return: 订单id,流水号,待支付价格\n \"\"\"\n resp = {'code': 200, 'msg': '操作成功~', 'data': {}}\n\n # 总价=运费+商品价格\n pay_price = decimal.Decimal(0.00)\n continue_cnt = 0\n product_ids = []\n for item in items:\n if decimal.Decimal(item['price']) < 0:\n continue_cnt += 1\n continue\n\n pay_price = pay_price + decimal.Decimal(item['price']) * int(item['number'])\n product_ids.append(item['id'])\n\n if continue_cnt >= len(items):\n resp['code'] = -1\n resp['msg'] = '商品items为空~~'\n return resp\n\n yun_price = params['yun_price'] if params and 'yun_price' in params else 0\n discount_price = params['discount_price'] if params and 'discount_price' in params else 0\n discount_type = params['discount_type'] if params and 'discount_type' in params else ''\n note = params['note'] if params and 'note' in params else ''\n express_address_id = params['express_address_id'] if params and 'express_address_id' in params else 0\n express_info = params['express_info'] if params and 'express_info' in params else {}\n yun_price = decimal.Decimal(yun_price)\n discount_price = decimal.Decimal(discount_price)\n total_price = pay_price + yun_price - discount_price\n\n # 执行事务:新增Order和OrderProduct,更新Product库存\n # 新增ProductSaleChangeLog和ProductStockChangeLog\n # 库存更新悲观锁并发控制\n try:\n tmp_product_list = db.session.query(Product).filter(Product.id.in_(product_ids)) \\\n .with_for_update().all()\n\n tmp_food_stock_mapping = {}\n for tmp_item in tmp_product_list:\n tmp_food_stock_mapping[tmp_item.id] = tmp_item.stock_cnt\n\n model_order = Order()\n model_order.order_sn = self.geneOrderSn()\n model_order.member_id = member_id\n model_order.total_price = total_price\n model_order.yun_price = yun_price\n model_order.pay_price = pay_price\n model_order.discount_price = discount_price\n model_order.discount_type = discount_type\n model_order.note = note\n model_order.status = -8\n model_order.express_status = -8\n model_order.express_address_id = express_address_id\n model_order.express_info = json.dumps(express_info)\n model_order.updated_time = model_order.created_time = getCurrentDate()\n db.session.add(model_order)\n\n for item in items:\n tmp_left_stock = tmp_food_stock_mapping[item['id']]\n\n if decimal.Decimal(item['price']) < 0:\n continue\n\n if int(item['number']) > int(tmp_left_stock):\n raise Exception(\"您购买的这美食太火爆了,剩余:%s,你购买%s~~\" % (tmp_left_stock, item['number']))\n\n tmp_ret = Product.query.filter_by(id=item['id']).update({\n \"stock_cnt\": int(tmp_left_stock) - int(item['number'])\n })\n if not tmp_ret:\n raise Exception(\"下单失败请重新下单\")\n\n tmp_order_item = OrderProduct()\n tmp_order_item.order_id = model_order.id\n tmp_order_item.member_id = member_id\n tmp_order_item.product_num = item['number']\n tmp_order_item.price = item['price']\n tmp_order_item.product_id = item['id']\n tmp_order_item.note = note\n tmp_order_item.updated_time = tmp_order_item.created_time = getCurrentDate()\n db.session.add(tmp_order_item)\n # db.session.flush()\n\n ProductService.setStockChangeLog(item['id'], -item['number'], \"在线购买\")\n db.session.commit()\n resp['data'] = {\n 'id': model_order.id,\n 'order_sn': model_order.order_sn,\n 'total_price': str(total_price)\n }\n except Exception as e:\n db.session.rollback()\n print(e)\n resp['code'] = -1\n resp['msg'] = \"下单失败请重新下单\"\n resp['msg'] = str(e)\n return resp\n return resp\n\n def closeOrder(self, pay_order_id=0):\n \"\"\"\n 关单数据操作:日志, Order状态,库存\n :param pay_order_id:\n :return:\n \"\"\"\n if pay_order_id < 1:\n return False\n order_info = Order.query.filter_by(id=pay_order_id, status=-8).first()\n if not order_info:\n return False\n\n # 更新Product库存,日志,更新Order状态\n order_items = OrderProduct.query.filter_by(order_id=pay_order_id).all()\n if order_items:\n for item in order_items:\n tmp_product_info = Product.query.filter_by(id=item.product_id).first()\n if tmp_product_info:\n tmp_product_info.stock_cnt = tmp_product_info.stock_cnt + item.product_num\n tmp_product_info.updated_time = getCurrentDate()\n db.session.add(tmp_product_info)\n db.session.commit()\n ProductService.setStockChangeLog(item.product_id, item.product_num, \"订单取消\")\n\n order_info.status = 0\n order_info.updated_time = getCurrentDate()\n db.session.add(order_info)\n db.session.commit()\n return True\n\n def orderSuccess(self, pay_order_id=0, params=None):\n \"\"\"\n 支付成功后,更新订单状态,记录销售日志,根据折扣类型扣除用户余额\n 消息提醒队列\n :param pay_order_id:\n :param params:\n :return: 数据库操作成功\n \"\"\"\n try:\n # 更新Order支付状态与物流状态\n # 该订单的商品销售日志\n order_info = Order.query.filter_by(id=pay_order_id).first()\n if not order_info or order_info.status not in [-8, -7]:\n return True\n\n order_info.pay_sn = params['pay_sn'] if params and 'pay_sn' in params else ''\n order_info.status = 1\n order_info.express_status = -7\n order_info.updated_time = getCurrentDate()\n db.session.add(order_info)\n\n pay_order_items = OrderProduct.query.filter_by(order_id=pay_order_id).all()\n for order_item in pay_order_items:\n tmp_product = Product.query.filter_by(id=order_item.product_id).first()\n tmp_product.sale_cnt += order_item.product_num\n db.session.add(tmp_product)\n tmp_model_sale_log = ProductSaleChangeLog()\n tmp_model_sale_log.product_id = order_item.product_id\n tmp_model_sale_log.quantity = order_item.product_num\n tmp_model_sale_log.price = order_item.price\n tmp_model_sale_log.member_id = order_item.member_id\n tmp_model_sale_log.created_time = getCurrentDate()\n db.session.add(tmp_model_sale_log)\n\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n print(e)\n return False\n\n return True\n\n def goodsTopOrderSuccess(self, pay_order_id=0, params=None):\n \"\"\"\n 支付成功后,更新订单状态\n :param pay_order_id:\n :param params:\n :return: 数据库操作成功\n \"\"\"\n\n # 更新TopOrder支付状态\n order_info = GoodsTopOrder.query.filter_by(id=pay_order_id).first()\n if not order_info or order_info.status not in [0]:\n return True\n order_info.transaction_id = params['pay_sn'] if params and 'pay_sn' in params else ''\n order_info.status = 1\n order_info.updated_time = getCurrentDate()\n order_info.paid_time = params['paid_time'] if params and 'paid_time' in params else getCurrentDate()\n db.session.add(order_info)\n db.session.commit()\n return True\n\n def thankOrderSuccess(self, pay_order_id=0, params=None):\n \"\"\"\n 支付成功后,更新订单状态\n :param pay_order_id:\n :param params:\n :return: 数据库操作成功\n \"\"\"\n\n # 更新ThankOrder支付状态\n order_info = ThankOrder.query.filter_by(id=pay_order_id).first()\n if not order_info or order_info.status not in [0]:\n return True\n order_info.transaction_id = params['pay_sn'] if params and 'pay_sn' in params else ''\n order_info.status = 1\n order_info.updated_time = getCurrentDate()\n order_info.paid_time = params['paid_time'] if params and 'paid_time' in params else getCurrentDate()\n db.session.add(order_info)\n db.session.commit()\n return True\n\n def balanceOrderSuccess(self, pay_order_id=0, params=None):\n \"\"\"\n 支付成功后,更新订单状态\n :param pay_order_id:\n :param params:\n :return: 数据库操作成功\n \"\"\"\n # 更新BalanceOrder支付状态\n order_info = BalanceOrder.query.filter_by(id=pay_order_id).first()\n if not order_info or order_info.status not in [0]:\n return True\n order_info.transaction_id = params['pay_sn'] if params and 'pay_sn' in params else ''\n order_info.status = 1\n order_info.updated_time = getCurrentDate()\n order_info.paid_time = params['paid_time'] if params and 'paid_time' in params else getCurrentDate()\n db.session.add(order_info)\n db.session.commit()\n return True\n\n def addPayCallbackData(self, pay_order_id=0, type='pay', data=''):\n \"\"\"\n 微信支付回调记录\n :param pay_order_id:\n :param type:\n :param data:\n :return:\n \"\"\"\n # 新增\n model_callback = OrderCallbackData()\n model_callback.order_id = pay_order_id\n if type == \"pay\":\n model_callback.pay_data = data\n model_callback.refund_data = ''\n else:\n model_callback.refund_data = data\n model_callback.pay_data = ''\n\n model_callback.created_time = model_callback.updated_time = getCurrentDate()\n db.session.add(model_callback)\n db.session.commit()\n return True\n\n def addGoodsTopPayCallbackData(self, pay_order_id=0, type='pay', data=''):\n \"\"\"\n 微信支付回调记录\n :param pay_order_id:\n :param type:\n :param data:\n :return:\n \"\"\"\n # 新增\n model_callback = GoodsTopOrderCallbackData()\n model_callback.top_order_id = pay_order_id\n if type == \"pay\":\n model_callback.pay_data = data\n model_callback.refund_data = ''\n else:\n model_callback.refund_data = data\n model_callback.pay_data = ''\n\n model_callback.created_time = model_callback.updated_time = getCurrentDate()\n db.session.add(model_callback)\n db.session.commit()\n return True\n\n def addThankPayCallbackData(self, pay_order_id=0, type='pay', data=''):\n \"\"\"\n 微信支付回调记录\n :param pay_order_id:\n :param type:\n :param data:\n :return:\n \"\"\"\n # 新增\n model_callback = ThankOrderCallbackData()\n model_callback.thank_order_id = pay_order_id\n if type == \"pay\":\n model_callback.pay_data = data\n model_callback.refund_data = ''\n else:\n model_callback.refund_data = data\n model_callback.pay_data = ''\n\n model_callback.created_time = model_callback.updated_time = getCurrentDate()\n db.session.add(model_callback)\n db.session.commit()\n return True\n\n def addBalancePayCallbackData(self, pay_order_id=0, type='pay', data=''):\n \"\"\"\n 微信支付回调记录\n :param pay_order_id:\n :param type:\n :param data:\n :return:\n \"\"\"\n # 新增\n model_callback = BalanceOrderCallbackData()\n model_callback.balance_order_id = pay_order_id\n if type == \"pay\":\n model_callback.pay_data = data\n model_callback.refund_data = ''\n else:\n model_callback.refund_data = data\n model_callback.pay_data = ''\n\n model_callback.created_time = model_callback.updated_time = getCurrentDate()\n db.session.add(model_callback)\n db.session.commit()\n return True\n\n def geneOrderSn(self):\n \"\"\"\n :return:不重复的流水号\n \"\"\"\n m = hashlib.md5()\n sn = None\n while True:\n # 毫秒级时间戳-千万随机数\n sn_str = \"%s-%s\" % (int(round(time.time() * 1000)), random.randint(0, 9999999))\n m.update(sn_str.encode(\"utf-8\"))\n sn = m.hexdigest()\n if not Order.query.filter_by(order_sn=sn).first():\n break\n return sn\n\n def geneGoodsTopOrderSn(self):\n \"\"\"\n :return:不重复的流水号\n \"\"\"\n m = hashlib.md5()\n sn = None\n while True:\n # 毫秒级时间戳-千万随机数\n sn_str = \"%s-%s\" % (int(round(time.time() * 1000)), random.randint(0, 9999999))\n m.update(sn_str.encode(\"utf-8\"))\n sn = m.hexdigest()\n if not GoodsTopOrder.query.filter_by(order_sn=sn).first():\n break\n return sn\n\n def geneThankOrderSn(self):\n \"\"\"\n :return:不重复的流水号\n \"\"\"\n m = hashlib.md5()\n sn = None\n while True:\n # 毫秒级时间戳-千万随机数\n sn_str = \"%s-%s\" % (int(round(time.time() * 1000)), random.randint(0, 9999999))\n m.update(sn_str.encode(\"utf-8\"))\n sn = m.hexdigest()\n if not ThankOrder.query.filter_by(order_sn=sn).first():\n break\n return sn\n\n def geneBalanceOrderSn(self):\n \"\"\"\n :return:不重复的流水号\n \"\"\"\n m = hashlib.md5()\n sn = None\n while True:\n # 毫秒级时间戳-千万随机数\n sn_str = \"%s-%s\" % (int(round(time.time() * 1000)), random.randint(0, 9999999))\n m.update(sn_str.encode(\"utf-8\"))\n sn = m.hexdigest()\n if not BalanceOrder.query.filter_by(order_sn=sn).first():\n break\n return sn\n\n def autoCloseOrder(self, member_id=0):\n \"\"\"\n 自动关单\n :param member_id:\n :return:\n \"\"\"\n order_list = Order.query.filter(Order.member_id == member_id, Order.status == -8,\n Order.updated_time <= seconds2str(time.time() - 1800)).all()\n member_info = g.member_info\n for order in order_list:\n self.closeOrder(pay_order_id=order.id)\n if order.discount_type == \"账户余额\":\n member_info.balance += order.discount_price\n db.session.add(member_info)\n db.session.commit()\n","sub_path":"backend/ciwei/common/libs/mall/PayService.py","file_name":"PayService.py","file_ext":"py","file_size_in_byte":16549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"472437044","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#write a program to calculate the sum of first digit and the last digit of a given number\nn = int(input(\"Enter a Number :\"))\n\np = 0\nf = 0\ns = 0\n\nl = n % 10\n\nwhile n > 0:\n r = n % 10\n\n p= p * 10 + r\n\n n = int(n / 10)\n \nf = p % 10\n\ns = f + l\n\nprint(\"Sum of first and last digit is :\", s)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"sum first and last.py","file_name":"sum first and last.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"444218191","text":"import pyperclip ,re\n\n\nphonenoRe=re.compile(r'''((\n (\\d{3}|\\(\\d{3}\\))?\n (\\s|-|\\.)?\n (\\d{3})\n (\\s|-|\\.)\n (\\d{4})\n (\\s*(ext|x|ext.)\\s*(\\d{2,5}))?)\n |(([+]{1}) #indian phone number\n ([9])? #start with 9\n ([0123456789]{11}) #rest 11 digits\n )|\\d{8})''',re.VERBOSE|re.IGNORECASE|re.DOTALL)\n\n\n\n\nemailRe=re.compile(r'''(\n \\w+ #username\n @\n \\w+ #domain name\n (\\.[a-zA-Z]{2,4})\n )''',re.VERBOSE)\n\n\n\n#coping from the clipboard\ntext=str(pyperclip.paste())\nmatches=[]\nmatches=phonenoRe.findall(text)\n#pyperclip.copy(matches)\n\nemail=[]\nemail= emailRe.findall(text)\n#pyperclip.copy(email)\n\nelen=len(email)\nfor i in range (0,elen):\n print(email[i][0])\n\nplen=len(matches)\nfor i in range (0,plen):\n print(matches[i][0])\n\n\n#print(matches)\n#print(email)\n","sub_path":"PhoneNoAndEmailChecker.py","file_name":"PhoneNoAndEmailChecker.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"231983417","text":"'''\n for n in range(2, limit):\n while num % n == 0:\n num = num / n\n answer = n\n print('{}/{} = {}'.format(num * n, n, num))\n if num == 1:\n break\n n += 1\n'''\nsum_num = 0\nprime_num_list = [2]\n\n\ndef decide_prime_number(q_num, prime_numbers):\n for p in prime_numbers:\n if q_num != p and q_num % p == 0:\n return False\n return True\n\n\n# limit = 2000000\nlimit = 2000000\ncnt = 2\nwhile cnt < limit:\n if decide_prime_number(cnt, prime_num_list):\n # print('{}'.format(cnt), end=' , ')\n sum_num += cnt\n prime_num_list.append(cnt)\n cnt += 1\nprint()\nprint(\"답 : \", sum_num)\n","sub_path":"euler/problem10/sanggon.py","file_name":"sanggon.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"529834736","text":"import discord, dotenv\nconfig = dotenv.dotenv_values(\".env\")\n\nimport time\nimport sys\nimport asyncio\nimport math\nimport random\nimport logging\nimport re\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\nimport players\nfrom game import SingleHole\nfrom swordfighting import SwordfightingDecree\n\n\ndebug = False\nprefix = '!'\nMAX_GAMES = 10\n\nif len(sys.argv) > 1:\n if sys.argv[1] == \"debug\":\n debug=True\n logging.info(\"Debug mode on!\")\n prefix = 'd' + prefix\n\nusers_with_games_active = []\n\ndef limit_one_game_per_person(func):\n '''\n A decorator to ensure someone can only run one command at a time.\n Also, has some bonus error reporting if something goes wrong.\n The first argument of a function using this decorator must be 'message', a discord message to react to if something goes wrong\n '''\n async def wrapper(message, *args, **kwargs):\n global users_with_games_active\n\n if message.author in users_with_games_active:\n await message.channel.send(\"To avoid lag, please wait for your current game to finish before starting any more.\")\n return\n users_with_games_active.append(message.author)\n try:\n return await func(message, *args, **kwargs)\n except (Exception, KeyboardInterrupt) as e:\n logging.exception(e)\n await message.add_reaction('⚠️')\n raise e\n finally:\n if message.author in users_with_games_active:\n users_with_games_active.remove(message.author)\n\n return wrapper\n\nupdate_coming = False\n\ndef disable_if_update_coming(func):\n '''\n A decorator to disable starting new games if updates are coming\n '''\n async def wrapper(message, *args, **kwargs):\n global update_coming\n if update_coming:\n await message.channel.send(\":loop: HANG TIGHT FOR A MOMENT, GOT SOME RADICAL RENOVATIONS COMIN' RIGHT UP\\n:loop: PLEASE LEAVE A MESSAGE AFTER THE TONE\")\n return\n return await func(message, *args, **kwargs)\n return wrapper\n\n\nasync def newglolfgame(message, glolfer_names, header=None, max_turns=60, is_tournament=False):\n # start a round of glolf and return the winning player's name\n\n glolfgame = await message.channel.send(\"Beginning game...\")\n logging.info(f\"Starting game between {glolfer_names} in channel #{message.channel} in server '{message.channel.guild.name}'\")\n try:\n game = SingleHole(debug=debug,glolfer_names=glolfer_names,max_turns=max_turns,is_tournament=is_tournament)\n await asyncio.sleep(2)\n await glolfgame.edit(content=game.printgamestate(header=header))\n await asyncio.sleep(2)\n\n while not game.over:\n delay = game.update()\n await glolfgame.edit(content=game.printgamestate(header=header))\n await asyncio.sleep(delay)\n\n await glolfgame.edit(content=game.printgamestate(include_board=True,header=header))\n await asyncio.sleep(10)\n await glolfgame.edit(content=game.printgamestate(include_board=False,header=header))\n return game.compute_winner()\n except (Exception, KeyboardInterrupt) as e:\n await glolfgame.add_reaction('⚠️')\n raise e\n\n@disable_if_update_coming\n@limit_one_game_per_person\nasync def glolfcommand(message):\n # parse a glolf command\n\n arguments = message.content.split(\"\\n\") #first line has \"!glolf\" on it\n glolfer_names = []\n if len(arguments) > 1: # 0 players is fine\n glolfer_names = arguments[1:]\n if len(glolfer_names) == 1:\n await message.channel.send(\"It's too dangerous to glolf alone. Bring an opponent.\")\n return\n\n if len(users_with_games_active) > MAX_GAMES:\n await message.channel.send(\"There's too many games going on right now. To avoid lag, please wait a little bit till some games are done and try again later!\")\n return\n \n\n await newglolfgame(message, glolfer_names)\n\ndef biggest_power_of_two_less_than(n):\n return 2 ** math.floor(math.log2(n))\n\ndef biggest_power_of_k_less_than(n, k=2):\n return k ** math.floor(math.log2(n)/math.log2(k))\n\n\nbattleRoyaleTypeRegex = re.compile(\"1(v1)+\")\n\n@disable_if_update_coming\n@limit_one_game_per_person\nasync def battle_royale_glolftourney(message, glolfers_per_game=2):\n # a tourney where each game has multiple people, but only one can win each game\n # if glolfers_per_game is 2, it's a 1v1, if glolfers_per_game is 3, each game is a 1v1v1, etc\n assert glolfers_per_game > 1\n arguments = message.content.split(\"\\n\") #first line has \"!tourney\" on it\n glolfer_names = []\n if len(arguments) > 1:\n glolfer_names = arguments[1:]\n num_players = len(glolfer_names)\n if num_players == 1:\n await message.channel.send(\"That's a short tournament... I guess they win by default!\")\n return None\n if num_players < glolfers_per_game:\n await message.channel.send(\"There isn't enough players for even a single match. Everyone wins!\")\n return None\n else: # 0 players\n await message.channel.send(\"To use, please give a list of competitors after the command, each on a separate line.\")\n return\n\n if len(users_with_games_active) > MAX_GAMES:\n await message.channel.send(\"There's too many games going on right now. To avoid lag, please wait a little bit till some games are done and try again later!\")\n return\n\n\n await message.channel.send(f\"{len(glolfer_names)}-person tournament starting...\")\n move_onto_next_round = []\n\n random.shuffle(glolfer_names)\n\n competitors_this_round = glolfer_names[:]\n\n # If # of entrants isn't a power of two, we don't have a full bracket, so give some contestants byes\n full_bracket_size = biggest_power_of_k_less_than(len(glolfer_names), glolfers_per_game)\n if not len(glolfer_names) == full_bracket_size:\n if glolfers_per_game > 2:\n return await message.channel.send(f\"For this type of tourney I need a power-of-{glolfers_per_game} number of people. You have {len(glolfer_names)}\")\n num_matches_required = len(glolfer_names) - full_bracket_size\n\n competitors_this_round = glolfer_names[0:num_matches_required*glolfers_per_game]\n move_onto_next_round = glolfer_names[num_matches_required*glolfers_per_game:]\n\n print(len(competitors_this_round)/glolfers_per_game + len(move_onto_next_round))\n await message.channel.send(f\"{', '.join(move_onto_next_round)} randomly recieve byes and move onto the next round. Let's see who joins them!\")\n\n # actual tourney time!\n round_num = 0\n while len(competitors_this_round) > 1:\n round_num+= 1\n\n max_turns = 60\n if len(competitors_this_round) >= 2*glolfers_per_game**3:\n max_turns = 40\n if debug:\n max_turns = 3\n\n round_name = f\"round {round_num}\"\n if len(competitors_this_round) + len(move_onto_next_round) == glolfers_per_game:\n round_name = \"the finals\"\n if len(competitors_this_round) + len(move_onto_next_round) == glolfers_per_game**2 and round_num != 1:\n round_name = \"the almostfinals\"\n if len(competitors_this_round) + len(move_onto_next_round) == glolfers_per_game**3 and round_num != 1:\n round_name = \"the nearfinals\"\n if len(move_onto_next_round) > 0:\n # this is right after move_onto_next_round should be []\n #if it isn't [], we're in the first round of a tourney with a non-power-of-two number of entrants\n round_name = \"qualifiers\"\n\n for index in range(0,len(competitors_this_round)-1,glolfers_per_game):\n # go down the bracket\n # competitors\n glolfers = [competitors_this_round[index+i] for i in range(glolfers_per_game) if index+i < len(competitors_this_round)]\n await asyncio.sleep(2)\n\n match_number = int(index/glolfers_per_game)+1\n total_matches = int(len(competitors_this_round)/glolfers_per_game)\n\n match_name = f\"Match {match_number}/{total_matches}\"\n if match_number == total_matches and round_name != \"the finals\" and total_matches == 1:\n match_name = \"Final match\"\n\n winner = await newglolfgame(message, glolfer_names=glolfers, header=f\"{match_name} of {round_name}!\",max_turns=max_turns, is_tournament=True)\n if winner is not None:\n move_onto_next_round.append(winner.name)\n else:\n winningname = random.choice(glolfers)\n move_onto_next_round.append(winningname)\n await message.channel.send(f\"Tie game! {winningname} wins the tiebreaking duel to advance to the next round!\")\n await asyncio.sleep(5)\n\n if len(move_onto_next_round) > 1:\n\n round_descriptor = f\"Round {round_num} results:\"\n if len(move_onto_next_round) == glolfers_per_game and round_num != 1:\n round_descriptor = \"Almostfinals results:\"\n if len(move_onto_next_round) == glolfers_per_game**2 and round_num != 1:\n round_descriptor = \"Nearfinals results:\"\n\n await message.channel.send(f\"**{round_descriptor}** {len(move_onto_next_round)} contestants move on: **{', '.join(move_onto_next_round)}**. Next round starts in one minute...\")\n await asyncio.sleep(60)\n\n competitors_this_round = move_onto_next_round\n move_onto_next_round = []\n\n await message.channel.send(f\"**{competitors_this_round[0]} wins the tournament!**\")\n\n\n\nasync def get_glolfer_stats(message):\n try:\n rest = message.content.replace(prefix + \"glolfer\",\"\").strip()\n if len(rest) == 0:\n await message.channel.send(\"Please add a glolfer's name to check their stlats!\")\n else:\n newplayer = players.get_player_from_name(rest)\n newmessage = f'''**{newplayer.name}**\nSignature: {newplayer.emoji}\nStance: **{newplayer.stlats.stance}**\nFavorite Tea: **{newplayer.stlats.fav_tea}**\n**Driving:**\n{newplayer.driving_rating()}\n**Grip:**\n{newplayer.precision_rating()}\n**Aerodynamics:**\n{newplayer.aerodynamics_rating()}\n**Self-Awareness:**\n{newplayer.self_awareness_rating()}\n{newplayer.vk_stat_of_the_day()}\n{newplayer.modifications_string()}'''\n await message.channel.send(newmessage)\n\n except (Exception, KeyboardInterrupt) as e:\n await message.add_reaction('⚠️')\n raise e\n\n\nasync def add_temp_modification(message):\n # Add a modification to the player until the bot restarts. Admin-only\n try:\n print(message.content)\n rest = message.content.replace(prefix + \"addtempmodification\",\"\").strip()\n if len(rest) == 0:\n await message.channel.send(\"Please add a glolfer's name! It's !addtempmodification \\n\")\n else:\n if len(rest.split(\"\\n\")) < 2:\n return await message.channel.send(\"Please add a glolfer's name, then the modification on a new line.\")\n\n glolfername = rest.split(\"\\n\")[0].strip()\n modification = rest.split(\"\\n\")[1].strip()\n\n newplayer = players.get_player_from_name(glolfername)\n newplayer.modifications.append(modification)\n players.known_players[glolfername.title()] = newplayer\n\n return await message.channel.send(f\"Added modification {modification} to player {glolfername}. It'll go away when you restart the bot, so make sure to edit the code!\")\n\n except (Exception, KeyboardInterrupt) as e:\n await message.add_reaction('⚠️')\n raise e\n\ndef user_is_admin(message):\n if \"ADMIN_IDS\" in config:\n if str(message.author.id) in config[\"ADMIN_IDS\"].strip().split(\",\"):\n return True\n return False\n\n\n\n\nasync def parse_tourney_message(message):\n firstline = message.content.split(\"\\n\")[0]\n tourneytype = firstline.replace(prefix + \"tourney\",'').strip()\n\n if len(tourneytype) == 0:\n return await message.channel.send(\"What type of tournament? Try \"+prefix+\"tourney 1v1\")\n\n is_battleroyale = battleRoyaleTypeRegex.match(tourneytype)\n if is_battleroyale:\n battletype = is_battleroyale.group(0)\n\n num_participants_per_game = battletype.count(\"1\")\n if num_participants_per_game > 8:\n return await message.channel.send(\"umm thats probably too many people\")\n return await battle_royale_glolftourney(message, num_participants_per_game)\n\n\n return await message.channel.send(\"umm im not sure what that means. maybe you could try \"+prefix+\"tourney 1v1\")\n\n\n\n\n\nclient = discord.Client()\n@client.event\nasync def on_ready():\n logging.info(\"The bot is ready!\")\n\n\n@client.event\nasync def on_message(message):\n global users_with_games_active\n if message.author == client.user or message.webhook_id is not None:\n return\n if message.content.startswith(prefix + \"glolfer\"):\n await get_glolfer_stats(message)\n\n\n elif message.content.startswith(prefix + \"glolf\"):\n logging.info(\"glolf detected\")\n await glolfcommand(message)\n\n\n elif message.content.startswith(prefix + \"tourney\"):\n await parse_tourney_message(message)\n\n elif message.content.startswith(prefix + \"admincommands\"):\n return await message.channel.send(\"!discordid, !addtempmodification, !updatecoming , !clear_game_list, !forcequit, !countgames\")\n\n elif message.content.startswith(prefix + \"discordid\"):\n logging.info(message.author.id) # you should only be able to access this if you're an admin\n\n elif user_is_admin(message) and message.content.startswith(prefix + \"void\"):\n \n await message.channel.send(f\"There are {len(SwordfightingDecree.players_in_interdimensional_void)} people in the interdimensional void right now: {SwordfightingDecree.players_in_interdimensional_void}\")\n logging.info(message.author.id) # you should only be able to access this if you're an admin\n\n elif user_is_admin(message) and message.content.startswith(prefix + \"addtempmodification\"):\n await add_temp_modification(message)\n\n elif user_is_admin(message) and message.content.startswith(prefix + \"countgames\"):\n await message.channel.send(f\"There are {len(users_with_games_active)} users with games active right now.\")\n\n elif user_is_admin(message) and message.content.startswith(prefix + \"updatecoming\"):\n global update_coming\n if \"true\" not in message.content and \"false\" not in message.content:\n return await message.channel.send(f\"New games are disabled because an update's coming: {update_coming}. Change this by adding 'true' or 'false' to the command\")\n elif \"true\" in message.content:\n\n update_coming = True \n await message.channel.send(\"New games are now disabled. use !countgames to see how many are running.\")\n elif \"false\" in message.content:\n update_coming = False\n await message.channel.send(\"New games are enabled again.\")\n logging.info(f\"Changed update_coming to {update_coming}\")\n\n\n # \"clear_game_list\" command. just in case\n elif user_is_admin(message) and message.content.startswith(prefix + \"clear_game_list\"):\n msg = f\"Cleared users with active games list. It was previously {users_with_games_active}. The games are still running but those players can now start new games.\"\n await message.channel.send(msg)\n logging.info(msg)\n users_with_games_active = []\n\n# now run the bot\ntoken = config[\"TOKEN\"]\nif debug:\n token = config[\"DEV_TOKEN\"]\n\nclient.run(token)\n","sub_path":"glolf/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":15682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"469593992","text":"import os\r\nimport nltk\r\nimport re\r\nimport numpy as np\r\nfrom collections import Counter\r\nimport pandas as pd\r\n\r\nif __name__=='__main__':\r\n\r\n def readInChunks(fileObj, chunkSize=4096):\r\n \"\"\"\r\n Lazy function to read a file piece by piece.\r\n Default chunk size: 4kB.\r\n \"\"\"\r\n while 1:\r\n data = fileObj.read(chunkSize)\r\n if not data:\r\n break\r\n yield data\r\n\r\n\r\n text_= open(\"D:/nlp/corpora/sogout.txt\", 'r', encoding=\"utf-8\")\r\n\r\n #file_path='D:/nlp/corpora/sogout.txt'\r\n #file=open(os.path.join(file_path), \"r\", encoding='UTF8')\r\n #text=file.read()\r\n\r\n for chunk in readInChunks(text_):\r\n print(type(chunk))\r\n #text=re.sub('[..:。;;!!??——%\\]\\[\\t\\n\\\"\\')(】【)(+\\-\\*/<>《》]', '', chunk)\r\n #text = re.sub('[a-zA-Z]', '', chunk)\r\n final_text=[x for x in chunk.split(\" \") if len(x) > 3]\r\n #token=nltk.wordpunct_tokenize(final_text)\r\n #bigram=nltk.bigrams(token)\r\n #print (list(bigram)[0:])\r\n #print(text)\r\n #final_text=re.sub('[..:。;;!!??——%\\]\\[\\t\\n\\\"\\')(】【)(+\\-\\*/<>《》]', '', final_text)\r\n #final_text = re.sub('[a-zA-Z]', '', final_text)\r\n c=Counter(final_text)\r\n n=len(c)\r\n print(c.keys())\r\n finder=nltk.collocations.BigramCollocationFinder.from_words(final_text)\r\n bigram_measures=nltk.collocations.BigramAssocMeasures()\r\n finder.nbest(bigram_measures.pmi,10)\r\n res=sorted(finder.ngram_fd.items(),key=lambda t:(-t[1],t[0]))\r\n print(\"res已经计算\")\r\n d=np.zeros([n,2],int)\r\n matrix = pd.DataFrame(d, index=c.keys(), columns=[0,1])\r\n tp=[]\r\n\r\n for i in range(0,len(res)):\r\n matrix.loc[res[i][0][0],0]+=res[i][1]\r\n matrix.loc[res[i][0][1],1]+=res[i][1]\r\n print(\"矩阵matrix已生成\")\r\n '''\r\n for i in range(0,len(res)):\r\n #t_test\r\n n_ii=res[i][1]\r\n n_ix=0\r\n n_xi=0\r\n n_xx=len(res)\r\n for j in range(0,len(res)):\r\n if res[j][0][0]==res[i][0][0]: n_ix+=1\r\n if res[j][0][1]==res[i][0][1]: n_xi+=1\r\n '''\r\n for i in range(0,len(res)):\r\n n_ii=res[i][1]\r\n n_ix=matrix.loc[res[i][0][0],0]\r\n n_xi=matrix.loc[res[i][0][0],1]\r\n n_xx=len(res)\r\n if i%100000==0:\r\n print(\"已计算:\")\r\n print(i*10000)\r\n t=bigram_measures.student_t(n_ii,(n_ix,n_xi),n_xx)\r\n tp.append(t)\r\n\r\n # print(tp[0])\r\n ty = tp.copy()\r\n # ty=list(set(ty))\r\n ty = sorted(ty, reverse=True)\r\n final = []\r\n final.append(tp.index(ty[0]))\r\n j = 1\r\n while len(final) <= 10:\r\n k = 0\r\n j1 = j\r\n while tp.index(ty[j1]) == tp.index(ty[j1 - 1]):\r\n k = k + 1\r\n j1 = j1 + 1\r\n final.append(tp.index(ty[j]) + k)\r\n j = j + 1\r\n for f in range(10):\r\n print(res[final[f]][0],res[final[f]][1], tp[final[f]], \"\\n\")\r\n\r\n\r\n\r\n\r\n","sub_path":"t2-test.py","file_name":"t2-test.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"649724035","text":"'''We all have played snake, water, gun game in out childhood. If you haven't, google the rules of this game and write a\npython program capable of playing this game with the user'''\n\n\nimport random\n\n\ndef game():\n num =10\n compScore = 0\n playerScore = 0\n i=0\n while i<=num:\n randNo= random.randint(1,3)\n if randNo== 1:\n compChoice = \"s\"\n elif randNo==2:\n compChoice=\"w\"\n elif randNo==3:\n compChoice=\"g\"\n\n yourChoice = input(\"Player's turn: SNAKE(s) WATER(w) GUN(g): \")\n if yourChoice ==compChoice:\n print(\"It's a Draw\")\n elif yourChoice == \"s\" and compChoice==\"w\":\n print(\"You Wins!!\")\n playerScore=playerScore+ 1\n elif yourChoice == \"w\" and compChoice==\"g\":\n print(\"You Wins!!\")\n playerScore=playerScore+ 1\n elif yourChoice == \"g\" and compChoice==\"w\":\n print(\"You Wins!!\")\n playerScore=playerScore+ 1\n elif yourChoice == \"s\" and compChoice==\"g\":\n print(\"Computer Wins!!\")\n compScore =compScore+1\n elif yourChoice == \"w\" and compChoice==\"s\":\n print(\"Computer Wins!!\")\n compScore =compScore+1\n elif yourChoice == \"g\" and compChoice==\"s\":\n print(\"Computer Wins!!\")\n compScore =compScore+1\n print(f\"{num-i}times left!\")\n i=i+1\n \n if playerScore>compScore:\n print(f\"You Wins with a score of:{playerScore} out of 10 and computer score is {compScore}\")\n elif playerScore=1.17.0\", \"bokeh>=1.4.0\", \"plotly>=4.3.0\", \"scikit-learn>=0.21.3\"],\n keywords = ['Data generator'],\n packages=find_packages()\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"384010731","text":"\"\"\"\n Download topo and dtopo files needed for this example.\n\n Call functions with makeplots==True to create plots of topo, slip, and dtopo.\n\"\"\"\n\nfrom __future__ import print_function\nimport os,sys\nimport clawpack.clawutil.data\nfrom clawpack.geoclaw import topotools\nfrom numpy import *\n\ntry:\n CLAW = os.environ['CLAW']\nexcept:\n raise Exception(\"*** Must first set CLAW enviornment variable\")\n\n# Scratch directory for storing topo and dtopo files:\nscratch_dir = os.path.join(CLAW, 'geoclaw', 'scratch')\n\n\n# Initial data for adjoint is Gaussian hump around the gauge locations:\n# Pulled from fgmax_grid.txt\n\n\ndef get_topo(makeplots=False):\n \"\"\"\n Retrieve the topo file from the GeoClaw repository.\n \"\"\"\n from clawpack.geoclaw import topotools\n topo_fname = 'etopo10min120W60W60S0S.asc'\n url = 'http://depts.washington.edu/clawpack/geoclaw/topo/etopo/' + topo_fname\n clawpack.clawutil.data.get_remote_file(url, output_dir=scratch_dir,\n file_name=topo_fname, verbose=True)\n\n if makeplots:\n from matplotlib import pyplot as plt\n topo = topotools.Topography(os.path.join(scratch_dir,topo_fname),\n topo_type=2)\n topo.plot()\n fname = os.path.splitext(topo_fname)[0] + '.png'\n plt.savefig(fname)\n print(\"Created \",fname)\n\n\ndef makeqinit():\n \"\"\"\n Create qinit data file\n \"\"\"\n\n with open(\"../../PreRun/InputData/fgmax_grid.txt\") as f:\n temp = f.readlines()\n temp = temp[7:]\n data = zeros((len(temp),2))\n for i in range(len(temp)):\n lon, lat = temp[i].split()\n data[i][0] = lon\n data[i][1] = lat\n xcenter = data[:,0]\n ycenter = data[:,1]\n\n xlower = min(xcenter)-1\n xupper = max(xcenter)+1\n ylower = min(ycenter)-1\n yupper = max(ycenter)+1\n\n nxpoints = int((xupper-xlower)*60)+1\n nypoints = int((yupper-ylower)*60)+1\n\n print(\"nxpoints etc\")\n print(nxpoints)\n print(nypoints)\n\n def qinit(x,y,scale=150):\n from numpy import where\n from clawpack.geoclaw.util import haversine\n\n z = 0\n\n # Gaussian using distance in meters:\n for i in range(len(xcenter)):\n r = haversine(x,y,xcenter[i],ycenter[i])\n # z = exp(-(r/20e3)**2)\n z = z+exp(-(r/scale)**2) #This is highly debatable...does this decay to quickly to register on the rough grid we are using? If we have the decay slower, are we going to have the problem of multiple points stacking up and biasing the solver?\n return z\n\n\n\n outfile= \"hump.xyz\"\n topotools.topo1writer(outfile,qinit,xlower,xupper,ylower,yupper,nxpoints,nypoints)\n\nif __name__=='__main__':\n get_topo(False)\n makeqinit()\n","sub_path":"Model_Scripts/Scenarios/1852mag/InputData/adjoint/make_adjoint_topo.py","file_name":"make_adjoint_topo.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"402524662","text":"\"\"\"\nThis module imports all the necessary classes to implement the controller aspect\nof the MVC framework.\n\"\"\"\nimport random\n\nfrom dojo import Dojo\nfrom Room.room import Room\nfrom Room.office import Office\nfrom Person.person import Person\nfrom Person.fellow import Fellow\nfrom Room.living_space import LivingSpace\nfrom Person.staff import Staff\nfrom model import offices, living_spaces, fellows, staffs, full_offices, full_living_spaces\n\ndef create_office(name):\n \"\"\"\n Accepts argument name to create an Office object.\n \"\"\"\n office = Office(name)\n print(\"An office called\", name, \"has been successfully created!\")\n offices.append(office)\n\ndef create_living_space(name):\n \"\"\"\n Accepts argument name to create a LivingSpace object.\n \"\"\"\n living_space = LivingSpace(name)\n print(living_space.name)\n print(\"Living Space created successfully!\")\n living_spaces.append(living_space)\n return living_space\n\ndef create_person(fname, lname, job_status, accom=\"\"):\n \"\"\"\n Accepts four arguments to create a person object.\n \"\"\"\n if job_status.upper() == \"FELLOW\":\n person = Fellow(fname, lname, job_status)\n if accom == \"Y\":\n allocate_living_space(person)\n fellows.append(person)\n elif job_status.upper() == \"STAFF\":\n person = Staff(fname, lname, job_status)\n staffs.append(person)\n \n return person\n\ndef allocate_office_space(person):\n \"\"\"\n Accepts the argument of person object and allocates a random office space.\n \"\"\"\n while True:\n random_office = random.choice(offices)\n if random_office.spaces_left > 0:\n person.assign_office_space(random_office.name)\n random_office.allocate_space()\n break\n else:\n random_office_index = offices.index(random_office)\n office = offices.pop(random_office_index)\n full_offices.append(office)\n print(person.firstName, \"has been allocated the office\", person.office_name)\n\ndef allocate_living_space(person):\n \"\"\"\n Accepts the argument of person object and allocates a random living space.\n \"\"\"\n while True:\n random_living_space = random.choice(living_spaces)\n if random_living_space.spaces_left > 0:\n person.assign_living_space(random_living_space.name)\n random_living_space.allocate_space()\n break\n else:\n random_living_space_index = living_spaces.index(random_living_space)\n living_space = living_spaces.pop(random_living_space)\n full_living_spaces.append(living_space)\n print(person.firstName, \"has been allocated the living space\", person.living_space_name)\n\ndef display_persons_office(fname, lname):\n \"\"\"\n Accepts a person object and displays their office name.\n \"\"\"\n persons = fellows + staffs\n for person in persons:\n if fname == person.firstName and lname == person.lastName:\n print(\"\\t\" + person.firstName, person.lastName, person.office_name)\n else:\n print(\"No matching name in the database.\")\n\ndef display_object_types(objects_list):\n \"\"\"\n Accepts a list of objects_list objects and displays each object_list name.\n \"\"\"\n for object_type in objects_list:\n print(\"\\t{:*^30s}\\n\".format(object_type.name))\n\ndef display_offices(offices):\n \"\"\"\n Accepts a list of offices objects and displays each office name.\n \"\"\"\n print(\"\\tOffice Names\\n\")\n display_object_types(offices)\n\ndef display_full_offices(full_offices):\n \"\"\"\n Accepts a list of full_offices objects and displays each office name.\n \"\"\"\n print(\"Full Offices\")\n display_object_types(full_offices)\n\ndef display_full_living_spaces(full_living_spaces):\n \"\"\"\n Accepts a list of full_living_spaces objects and displays each living_space_name.\n \"\"\"\n print(\"\\tFull Living Spaces\")\n display_object_types(full_living_spaces)\n\ndef display_room(name):\n \"\"\"\n Accepts a name argument and loops through objects to determine if attributes \n match name.\n \"\"\"\n print(\"\\t\" + name + \"\\n\")\n if fellows != [] and staffs != []:\n people = fellows + staffs\n for person in people:\n if person.office_name == name:\n print(\"\\t\",person.firstName, person.office_name)\n\n print()\n if fellows != []:\n for person in fellows:\n if person.living_space_name == name:\n print(\"\\t\", person.firstName, person.living_space_name)\n\ndef display_allocations(people, filename=\"\"):\n \"\"\"\n Accepts a list of people objects and prints their name and room attributes.\n \"\"\"\n if filename:\n with open(filename, 'w') as file_obj:\n for person in people:\n if person.status.upper() == \"FELLOW\":\n file_obj.write(person.firstName + \" \" + person.lastName + \" \" + person.living_space_name + \"\\n\")\n file_obj.write(person.firstName + \" \" + person.lastName + \" \" + person.office_name + \"\\n\")\n else:\n for person in people:\n if person.status.upper() == \"FELLOW\":\n print(person.firstName + \" \" + person.lastName + \" \" + person.living_space_name)\n print(person.firstName + \" \" + person.lastName + \" \" + person.office_name)\n\ndef display_prog_greeting():\n \"\"\"\n Prints the program greeting to the user.\n \"\"\"\n title = \" Office Space Allocation System \"\n print(\"\\n{:*^80}\\n\".format(title.title()))\n print(\"\\tThis program randomly allocates Offices and Living SPaces to\")\n print(\"\\tAndela Fellows and Staff. \")\n print(\"\\n{:*^80s}\\n\".format(\"*\"))\n print()\n","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":5637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"67467418","text":"#!/usr/bin/python3\n\"\"\" Python Script \"\"\"\nimport requests\n\n\ndef number_of_subscribers(subreddit):\n \"\"\" Python Method \"\"\"\n link = \"https://api.reddit.com/r/{}/about\".format(subreddit)\n headers = {\n 'User-Agent': 'My User Agent 1.0',\n 'From': 'mechken.atef@gmail.com'\n }\n\n reddit_req = requests.get(link, headers=headers)\n reddit_info = reddit_req.json()\n try:\n subscribers = reddit_info[\"data\"][\"subscribers\"]\n return subscribers\n except Exception as e:\n return 0\n","sub_path":"0x16-api_advanced/0-subs.py","file_name":"0-subs.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"561774282","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom ..items import *\nfrom ..SqliteOperator import *\nimport time\nimport sys\nimport urllib\n\nconn = sqlite3.connect('tencentURL')\ntable = 'tencentURL'\nfields = ('URL','Time')\ndb_create_table(conn, table, fields, ('URL'))\nconn.text_factory=str\ngQuery = None\nkeyword = []\n\nwith open('./keyword.txt','r') as f:\n for line in f:\n keyword.append(line.split('\\n')[0])\n\n\nclass YoukuSpider(scrapy.Spider): ##re bo\n name = 'qq'\n allowed_domains = ['qq.com']\n\n def __init__(self):\n self.cnt = 0\n self.nameCnt = 1\n self.suffix = time.strftime('%Y-%m-%d-%H-%M-%S-', time.localtime(time.time()))\n self.start_urls = []\n \n\n for query in keyword:\n for i in range(1,21):\n self.start_urls.append('https://v.qq.com/x/search/?q=' + query + '&cur=' + str(i) + '&filter=sort=1')\n\n print(urllib.quote(query))\n\n\n def parse(self, response):\n item = TencentkeywordItem()\n for each in response.xpath('.//div[@class=\"result_item result_item_h _quickopen\"]'):\n link = each.xpath('./a/@href').extract()[0]\n authorLink = each.xpath('.//span[@class=\"content\"]/a/@href').extract()[0]\n # title = each.xpath('./strong[@class=\"figure_title figure_title_two_row\"]/a[@title]/text()').extract()[0]\n item['link']=link.encode('utf-8')\n item['authorLink'] = authorLink.encode('utf-8')\n\n if db_table_get_count(conn, table, ('URL=?', [str(item['link'])])) == 0:\n self.cnt += 1\n if self.cnt % 100 == 0:\n self.nameCnt += 1\n\n with open('Tencenturl ' + str(self.suffix) + str(self.nameCnt) + '.txt', 'a')as f:\n rows = [fields]\n curTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n rows.append((str(item['link']), curTime))\n db_table_add_rows(conn, table, rows, ['URL'])\n f.write(str(item['link']) + '\\n')\n\n if db_table_get_count(conn, table, ('URL=?', [str(item['authorLink'])])) == 0:\n self.cnt += 1\n if self.cnt % 100 == 0:\n self.nameCnt += 1\n\n with open('TencentAuthorUrl ' + str(self.suffix) + str(self.nameCnt) + '.txt', 'a')as f:\n rows = [fields]\n curTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n rows.append((str(item['authorLink']), curTime))\n db_table_add_rows(conn, table, rows, ['URL'])\n f.write(str(item['authorLink']) + '\\n')\n\n # yield item\n # yield scrapy.Request(url=self.urls[self.cnt],callback =self.parse)\n # self.cnt += 1\n","sub_path":"TencentAuthorAndKeyWord/TencentKeyWord/spiders/crawlTencent.py","file_name":"crawlTencent.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"64439020","text":"from itemadapter import ItemAdapter\nimport orator\nfrom snownlp import SnowNLP\nfrom nest.models import Product\nfrom nest.models import Comment\n\n\nclass NestPipeline:\n def __init__(self, db_config):\n self.db_config = db_config\n\n @classmethod\n def from_crawler(cls, crawler):\n config = {\n 'mysql': {\n 'driver': 'mysql',\n 'host': crawler.settings.get('MYSQL_HOST'),\n 'database': crawler.settings.get('MYSQL_DB'),\n 'user': crawler.settings.get('MYSQL_USER'),\n 'password': crawler.settings.get('MYSQL_PASSWORD'),\n }\n }\n return cls(\n db_config=config\n )\n\n def open_spider(self, spider):\n db = orator.DatabaseManager(self.db_config)\n orator.Model.set_connection_resolver(db)\n\n def process_item(self, item, spider):\n product = Product.where('title', '=', item['title']).get().first()\n if product is None:\n product = Product()\n product.title = item['title']\n product.save()\n\n for comment in item['comments']:\n db_comment = Comment()\n db_comment.product_id = product.id\n try:\n s = SnowNLP(comment)\n db_comment.emotion = s.sentiments\n except Exception as err:\n print(err)\n db_comment.content = comment\n db_comment.save()\n\n return item\n","sub_path":"week10/nest/nest/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"495933254","text":"import os\nimport re\n\nimport cv2\nimport imutils as imutils\nimport numpy as np\nfrom skimage import img_as_ubyte, filters\n\nfrom letter_recognition.PredictLP import PredictLP\n\nROOT_DIR = os.path.split(os.environ['VIRTUAL_ENV'])[0]\n\n\nclass MSER_Segmentation:\n '''\n license plate processing\n input: path to a image with a plate\n output: the licence plate is saved in plateNumber(can be taken with get_lp())\n '''\n def __init__(self, file_name=\"../saved/plate2.jpeg\", enhance=True, visu=False, predictor=None):\n self.palteNo = re.findall(\"\\d+(?=\\.\\w+$)\", file_name)[0]\n\n if predictor is None:\n # load the predictor only once if it was not loaded previously\n self.predictor = PredictLP()\n else:\n self.predictor = predictor\n self.plateNumber = \"\"\n self.visu = visu # true or false, flag that helps for debugging and presentation\n self.startSegmentation(file_name)\n\n def startSegmentation(self, file_name):\n self.gray_img = cv2.imread(file_name, 0) # read the image\n self.gray_img = imutils.resize(self.gray_img, height=100, width=230) # resize the image\n if self.visu:\n cv2.imshow(\"original\", self.gray_img)\n\n self.gray_img = self.pow_law_transformation(self.gray_img) # enhance\n\n # create a binary image that has all letters as a connected component\n bin_image = self.mser_processor(self.gray_img)\n\n # segement the characters from the binary image\n self.segment_chars(bin_image)\n\n if self.visu:\n cv2.imshow(\"mesr+otsu_regional\", bin_image)\n cv2.waitKey()\n\n print(\"detected LP:\" + self.plateNumber)\n\n # track detections result in a txt file\n with open(ROOT_DIR+\"\\\\plates.txt\", \"a\") as myfile:\n myfile.write(file_name + \" \" + self.plateNumber + \"\\n\")\n\n def get_lp(self):\n return self.plateNumber\n\n def pow_law_transformation(self, gray_img_p):\n im_power_law_transformation = gray_img_p / 255.0\n im_power_law_transformation = cv2.pow(im_power_law_transformation, 0.6)\n im_power_law_transformation = img_as_ubyte(im_power_law_transformation)\n if self.visu:\n cv2.imshow(\"power transfomration\", im_power_law_transformation)\n return im_power_law_transformation\n\n def mser_processor(self, cv_image, debug=False):\n\n # create an empty np array of the shape of the image\n mask = np.zeros(self.gray_img.shape, dtype=np.uint8)\n\n # user cv2 MSER to extract regions\n mser = cv2.MSER_create()\n regions, _ = mser.detectRegions(cv_image)\n\n for p in regions:\n xmax, ymax = np.amax(p, axis=0)\n xmin, ymin = np.amin(p, axis=0)\n\n # filter the regions\n if abs(ymax - ymin) > 50 or abs(ymax - ymin) < 10 or abs(xmax - xmin) > 30 or abs(xmax - xmin) < 10:\n continue\n if debug:\n print(\"rectangle (xmin=%02d,ymin=%02d) -> (xmax=%02d,ymax=%02d) \" % (xmin, ymin, xmax, ymax))\n\n # compute the value of the OTSU threshold for a given region(not valid for the whole image)\n val = filters.threshold_otsu(cv_image[ymin:ymax, xmin:xmax])\n\n # construct a mask for the current rectangle\n slice_of_mask = np.zeros(self.gray_img.shape, dtype=np.uint8)\n\n # apply otsu threshold\n slice_of_mask[cv_image < val] = 255\n\n # copy to the to the global mask only the slice for which we applied the threshold\n mask[ymin:ymax, xmin:xmax] = slice_of_mask[ymin:ymax, xmin:xmax]\n\n # if we don't want to apply the threshold we could simply put the values from the initial image\n # in a specific region to the exact same region in the glabal mask\n # mask[ymin:ymax, xmin:xmax] = cv_image[ymin:ymax, xmin:xmax]\n\n # draw on vis the rectangle\n # cv2.rectangle(vis, (xmin, ymax), (xmax, ymin), (128, 128, 128), 1)\n return mask\n\n def segment_chars(self, image):\n ret, labels = cv2.connectedComponents(image)\n try:\n os.mkdir(ROOT_DIR + \"\\chars\\plate\" + self.palteNo)\n print(\"created dir \" + ROOT_DIR + \"chars\\plate\" + self.palteNo)\n except:\n pass\n symbols = []\n for crt in range(1, ret):\n oneSymbol = np.where(labels == crt, len(labels), 0)\n\n first_non_empty_col = MSER_Segmentation.first_nonzero(oneSymbol, axis=0)\n\n #print(\"data shape \" + str(oneSymbol.shape))\n #print(\"character \" + str(crt) + \" has \" + str(first_non_empty_col) + \" blank rows to the left.\")\n\n # delete empty rows and columns\n data = np.delete(oneSymbol, np.where(~oneSymbol.any(axis=0))[0], axis=1)\n data = data[~np.all(data == 0, axis=1)]\n\n symbols.append((data, first_non_empty_col))\n\n # sort the symbols based on the number of empty columns from left to right\n symbols = sorted(symbols, key=lambda x: x[1])\n\n for crt in range(len(symbols)):\n self.imshow_components(crt, symbols[crt][0], save=True)\n\n if self.visu:\n self.imshow_components(\"letters colored\",\n labels) # image with letter segmentation, one color for each symbol\n\n def imshow_components(self, crt, labels, save=False):\n # Map component labels to hue val\n label_hue = np.uint8(179 * labels / np.max(labels))\n blank_ch = 255 * np.ones_like(label_hue)\n labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])\n\n if labeled_img.shape[0] < 15 and labeled_img.shape[1] < 15 or labeled_img.shape[0] < 20:\n # discard small images that are not symbols for sure, just some failures\n return\n\n if labeled_img.shape[0] < 50:\n # cvt to BGR for display sequence of symbols, each with one color\n labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_BGR2GRAY)\n else:\n # cvt to grayscale for display one symbol\n labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)\n\n # set bg label to black\n labeled_img[label_hue == 0] = 0\n\n if self.visu:\n cv2.imshow(str(crt), labeled_img)\n\n if save:\n cv2.imwrite(ROOT_DIR + \"\\\\chars\\\\plate\" + self.palteNo + \"\\\\c\" + str(crt) + \".jpeg\", labeled_img)\n\n try:\n crt = int(crt)\n symbol = self.predictor.run(\"\\\\chars\\\\plate\" + self.palteNo + \"\\\\c\" + str(crt) + \".jpeg\")\n self.plateNumber = self.plateNumber + symbol\n except ValueError:\n pass\n\n @staticmethod\n def first_nonzero(arr, axis, invalid_val=-1):\n mask = arr != 0\n non_zero_indexes = np.where(mask.any(axis=axis), mask.argmax(axis=axis), invalid_val)\n mask = non_zero_indexes != -1\n return np.where(mask.any(axis=axis), mask.argmax(axis=axis), 0)\n","sub_path":"imageProcessor/ocr/MSERWrapper.py","file_name":"MSERWrapper.py","file_ext":"py","file_size_in_byte":7015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"439390299","text":"import sys\nimport os\nimport json\nimport boto3\nimport botocore\n\ndef add_env_var(d, name) :\n\tif name in os.environ :\n\t\tval = os.environ[name]\n\telse :\n\t\tval = \"-\"\n\td[name] = val\n\t\ndef get_lambda_context_vars(c) :\n\td = {}\n\tif type(c) != int :\n\t\td['remaining_time_ms'] = c.get_remaining_time_in_millis()\n\t\td['function_name'] = c.function_name\n\t\td['function_version'] = c.function_version\n\t\td['invoked_function_arn'] = c.invoked_function_arn\n\t\td['memory_limit_in_mb'] = c.memory_limit_in_mb\n\t\td['aws_request_id'] = c.aws_request_id\n\t\td['log_group_name'] = c.log_group_name\n\t\td['log_stream_name'] = c.log_stream_name\n\t\td['cognito_identity_id'] = c.identity.cognito_identity_id\n\t\td['cognito_identity_pool_id'] = c.identity.cognito_identity_pool_id\n\t\t# Stuff which is only available when invoked via Mobile ?\n\t\t# d['client_installation_id'] = c.client_context.client.installation_id \n\treturn d\n\ndef get_lambda_env_vars() :\n\td = {}\n\tadd_env_var(d, \"_HANDLER\")\n\tadd_env_var(d, \"AWS_REGION\")\n\tadd_env_var(d, \"AWS_EXECUTION_ENV\")\n\tadd_env_var(d, \"AWS_LAMBDA_FUNCTION_NAME\")\n\tadd_env_var(d, \"AWS_LAMBDA_FUNCTION_MEMORY_SIZE\")\n\tadd_env_var(d, \"AWS_LAMBDA_FUNCTION_VERSION\")\n\tadd_env_var(d, \"AWS_LAMBDA_LOG_GROUP_NAME\")\n\tadd_env_var(d, \"AWS_LAMBDA_LOG_STREAM_NAME\")\n\tadd_env_var(d, \"AWS_ACCESS_KEY_ID\")\n\tadd_env_var(d, \"AWS_SECRET_ACCESS_KEY\")\n\tadd_env_var(d, \"AWS_SESSION_TOKEN\")\n\tadd_env_var(d, \"LANG\")\n\tadd_env_var(d, \"TZ\")\n\tadd_env_var(d, \"LAMBDA_TASK_ROOT\")\n\tadd_env_var(d, \"LAMBDA_RUNTIME_DIR\")\n\tadd_env_var(d, \"PATH\")\n\tadd_env_var(d, \"LD_LIBRARY_PATH\")\n\tadd_env_var(d, \"PYTHONPATH\")\n\t# Variables defined with the Lambda itself\n\tadd_env_var(d, \"MYVAR1\")\n\tadd_env_var(d, \"MYVAR2\")\n\t\n\treturn d\n\ndef lambda_handler(event, context):\n\n\tpython_version = sys.version\n\tbotocore_version = botocore.__version__\n\tboto3_version = boto3.__version__\n\n\tsys_path = sys.path\n\tsys_platform = sys.platform\n\tenv_vars = get_lambda_env_vars()\n\n\t# Some info about the 'event' parameter\n\tevent_type = str(type(event))\n\tevent_length = 0\n\n\tevent_detail = \"\"\t\n\tif type(event) == int :\n\t\tevent_length = 1\n\t\tevent_detail = \"int \" + str(event)\n\telif type(event) == float :\n\t\tevent_length = 1\n\t\tevent_detail = \"float \" + str(event)\n\telif type(event) == str :\n\t\tevent_length = len(event)\t\n\t\tevent_detail = \"string \" + event\n\telif type(event) == list :\n\t\tevent_length = len(event)\n\t\tevent_detail = \"list\"\n\telif type(event) == dict :\n\t\tevent_length = len(event)\n\t\tevent_detail = \"dict: \"\n\t\tfor k,v in event.items() :\n\t\t\tif type(v) == int or type(v) == float or type(v) == str :\n\t\t\t\tevent_detail = event_detail + k + \"=\" + str(v) + \";\"\n\t\t\telse :\n\t\t\t\tevent_detail = event_detail + k + \"=\" + str(type(v)) + \";\"\n\telse :\n\t\tevent_length = -1\n\n\tcontext_vars = get_lambda_context_vars(context)\n\n\t# Info about the AWS session from SDK\n\tsession = boto3.Session()\n\tregion = session.region_name\n\tcred = session.get_credentials()\n\tif cred.token == None:\n\t\ttoken = \"-\"\n\telse :\n\t\ttoken = cred.token\n\n\t# NB get_user() with no parameters fails when called within a Lambda environment\n\t# user = boto3.client('iam').get_user()\n\n\t# Invoke some S3 calls to list buckets\n\ts3 = boto3.resource('s3')\n\tbuckets = ''\n\tfor bucket in s3.buckets.all() :\n\t\tbuckets = buckets + ',' + bucket.name\n\tbuckets = buckets[1:]\n\n\treturn {\n\t\t'python_version' : python_version,\n\t\t'sys_path' : sys_path,\n\t\t'sys_platform' : sys_platform,\n\t\t'region': region,\n\t\t'access_key': cred.access_key,\n\t\t'secret_key': cred.secret_key,\n\t\t'token': token,\n\t\t'method': cred.method,\n\t\t'known_env_vars': env_vars,\n\t\t'context_vars': context_vars,\n\t\t'botocore_version' : botocore_version,\n\t\t'boto3_version' : boto3_version,\n\t\t'event_type' : event_type,\n\t\t'event_length' : event_length,\n\t\t'event_detail' : event_detail,\n\t\t's3_buckets': buckets\n\t}\n\n\nif __name__ == \"__main__\" :\n\t#print(\"Running as main\")\n\t#ret = lambda_handler([0,1,2,3,5], 0)\n\tret = lambda_handler({\"a\":1,\"b\":2, \"c\" : []}, 0)\n\t#ret = lambda_handler(10.34, 0)\n\tprint(ret)\n\n","sub_path":"AWSTrials/Python/lambdatest1/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"210721385","text":"from .celery import my_celery\nimport requests\n#import cfscrape\nfrom bs4 import BeautifulSoup as bs\ndef get_html(region: str, query: str) -> int:\n\n\turl = 'https://www.avito.ru/{0}?q={1}'.format(region, query)\n\n\t\"\"\"session = requests.Session()\n\tsession.headers = {\n\t\t\t'scheme': 'https',\n\t\t\t'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n\t\t\t'accept-encoding': 'gzip, deflate, br',\n\t\t\t'accept-language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7',\n\t\t\t'cache-control': 'max-age=0',\n\t\t\t'sec-fetch-dest': 'document',\n\t\t\t'sec-fetch-mode': 'navigate',\n\t\t\t'sec-fetch-site': 'none',\n\t\t\t'sec-fetch-user': '?1',\n\t\t\t'upgrade-insecure-requests': '1',\n\t\t\t'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'\n\t\t}\n\tresult = cfscrape.create_scraper(sess=session)\"\"\"\n\t#response = result.get(url)\n\tresponse = requests.get(url)\n\tif response.status_code == 404:\n\t\treturn 0\n\telse:\n\t\tsp = bs(response.text, 'html.parser')\n\t\tdiv_class = sp.find('div', class_='page-title-root-3uh27 js-page-title')\n\t\tcount = div_class.find('span', class_='page-title-count-1oJOc')\n\t\treturn int(count.string.replace(' ', ''))\n\n@my_celery.task\ndef counter():\n\tfor i in Query.objects.all():\n\t\tcount = get_html(i.region, i.search_query)\n\t\ttemp = Time(count=count, query=i)\n\t\ttemp.save()\n\t\ti.qr.add(temp)","sub_path":"celery/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"124097449","text":"\nimport parse\nimport nltk\nimport indeedBot\nfrom parse import Parse_Manager\nfrom parse import Info\n\n# make use of nltk chunking \n\n\nparse_manager = Parse_Manager()\ninfo = Info()\n\n\nclass Patterns:\n\n # r\"\"\"\n#NP: {+}\n #{+}\n#\"\"\"\n\n #VB verb, base form take\n #VBD verb, past tense took\n #VBG verb, gerund/present participle taking\n #VBN verb, past participle taken\n #VBP verb, sing. present, non-3d take\n # VBZ verb, 3rd person sing. present takes\n\n def __init__(self):\n \n self.regEx = r\"\"\"NP: {}\"\"\"\n self.verb_noun = {\"VB\":r\"\"\"NP: {}\"\"\" ,\"VBD\":r\"\"\"NP: {}\"\"\",\n \"VBG\":r\"\"\"NP: {}\"\"\" ,\"VBN\":r\"\"\"NP: {}\"\"\" ,\n \"VBP\":r\"\"\"NP: {}\"\"\",\n \"VBZ\":r\"\"\"NP: {}\"\"\" }\n\n #programming,building,working\n self.verb_nouns = {\"VB\":r\"\"\"NP: {+}\"\"\" ,\"VBD\":r\"\"\"NP: {+}\"\"\",\n \"VBG\":r\"\"\"NP: {+}\"\"\" ,\"VBN\":r\"\"\"NP: {+}\"\"\" ,\n \"VBP\":r\"\"\"NP: {+}\"\"\",\n \"VBZ\":r\"\"\"NP: {+}\"\"\" }\n\n \n\n p\n\n# use nltk functionality to extract phrases with a certain structure\n\n\n\n\ndef chunkPhrases(taggedWords,pattern):\n phrases = []\n chunk = nltk.RegexpParser(pattern)\n phrases.append(chunk.parse(taggedWords))\n return phrases\n \n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n","sub_path":"JobBot/extract_phrases.py","file_name":"extract_phrases.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"45257880","text":"# Andreza Santana\n# Digito verificador de conta bancária\n\nconta = int(input())\n\ntamanho = str(conta)\nsoma = 0 \n\nfor i in range(len(tamanho)):\n soma += int(tamanho[i])\n\nverificador = soma % 11\n\nif verificador == 10:\n print(f\"{conta}-X\")\nelse:\n print(f\"{conta}-{verificador}\")\n\n","sub_path":"u4/dvconta/dvconta.py","file_name":"dvconta.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"297663279","text":"flag = 0\r\np = {'A':0,'S':0,'R':0}\r\nfor _ in range(3):\r\n z = input()\r\n if z[1]=='>':\r\n p[z[0]]+=1\r\n else:\r\n p[z[2]]+=1\r\nt = list(p.values())\r\nif 0 not in t or 1 not in t or 2 not in t:\r\n print('Impossible')\r\n flag = 1\r\nans = [0,1,2]\r\nif (not flag):\r\n for i in p:\r\n ans[p[i]]=i\r\n print(ans[0]+ans[1]+ans[2])\r\n","sub_path":"October Dusk 2020/Dhruv's - F.R.I.E.N.D.S/dhruv_friends.py","file_name":"dhruv_friends.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"305477965","text":"from copy import deepcopy\n\nfrom rest_framework import serializers\n\nfrom atm.models import Account, Atm\n\n\nclass AccountSerializer(serializers.ModelSerializer):\n class Meta:\n model = Account\n fields = ('id', 'name', 'balance')\n\n def to_internal_value(self, data):\n \"\"\"\n Method override to adapt API key into field name.\n\n E.g.: 'saldo' -> 'balance'\n \"\"\"\n original_keys = set(data)\n modified_data = deepcopy(data)\n\n for key in original_keys:\n mapping = {\n 'nome': 'name',\n 'saldo': 'balance',\n }\n original_name = mapping.get(key, None)\n if original_name:\n modified_data[mapping[key]] = modified_data[key]\n del modified_data[key]\n\n return super(AccountSerializer, self).to_internal_value(modified_data)\n\n def to_representation(self, instance):\n \"\"\"\n Method override to adapt field name into API key.\n\n E.g.: 'balance' -> 'saldo'\n \"\"\"\n ret = super(AccountSerializer, self).to_representation(instance)\n modified_ret = deepcopy(ret)\n original_keys = set(ret)\n\n for key in original_keys:\n mapping = {\n 'name': 'nome',\n 'balance': 'saldo',\n }\n api_name = mapping.get(key, None)\n if api_name:\n modified_ret[mapping[key]] = modified_ret[key]\n del modified_ret[key]\n\n return modified_ret\n\n\nclass AtmSerializer(serializers.ModelSerializer):\n bills_1 = serializers.IntegerField(min_value=0)\n bills_2 = serializers.IntegerField(min_value=0)\n bills_5 = serializers.IntegerField(min_value=0)\n bills_10 = serializers.IntegerField(min_value=0)\n bills_20 = serializers.IntegerField(min_value=0)\n bills_50 = serializers.IntegerField(min_value=0)\n bills_100 = serializers.IntegerField(min_value=0)\n\n class Meta:\n model = Atm\n fields = ('bills_1', 'bills_2', 'bills_5', 'bills_10', 'bills_20', 'bills_50', 'bills_100')\n\n def to_internal_value(self, data):\n \"\"\"\n Method override to adapt API key into field name.\n\n E.g.: '5' -> 'bills_5'\n \"\"\"\n original_keys = set(data)\n modified_data = deepcopy(data)\n for key in original_keys:\n if key.isdigit():\n modified_data[f\"bills_{key}\"] = modified_data[key]\n del modified_data[key]\n return super(AtmSerializer, self).to_internal_value(modified_data)\n\n def to_representation(self, instance):\n \"\"\"\n Method override to adapt field name into API key.\n\n E.g.: 'bills_5' -> '5'\n \"\"\"\n ret = super(AtmSerializer, self).to_representation(instance)\n modified_ret = deepcopy(ret)\n original_keys = set(ret)\n for key in original_keys:\n if 'bills_' in key:\n modified_ret[key.replace('bills_', '')] = modified_ret[key]\n del modified_ret[key]\n return modified_ret\n\n\nclass WithdrawalSerializer(serializers.Serializer):\n value = serializers.IntegerField(min_value=1)\n default = serializers.BooleanField(default=True)\n\n def to_internal_value(self, data):\n \"\"\"\n Method override to adapt API key into field name.\n\n E.g.: 'valor' -> 'value'\n \"\"\"\n original_keys = set(data)\n modified_data = deepcopy(data)\n if \"valor\" in original_keys:\n modified_data[\"value\"] = modified_data[\"valor\"]\n del modified_data[\"valor\"]\n return super(WithdrawalSerializer, self).to_internal_value(modified_data)\n\n def to_representation(self, instance):\n \"\"\"\n Method override to adapt field name into API key.\n\n E.g.: 'value' -> 'valor'\n \"\"\"\n ret = super(WithdrawalSerializer, self).to_representation(instance)\n modified_ret = deepcopy(ret)\n original_keys = set(ret)\n if \"value\" in original_keys:\n modified_ret[\"valor\"] = modified_ret[\"value\"]\n del modified_ret[\"value\"]\n return modified_ret\n","sub_path":"atm/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"338543028","text":"import sys\nimport os\nimport csv\nimport pyodbc \nimport scipy.stats\nimport pandas as pd\nimport pandas.io.sql as psql\nimport numpy as np\nfrom pandas import Series, DataFrame\n\n#1)CHKShot DB connecton Function -> return as dataframe -Updated 7/5/17\ndef CKS_Read_Query(query):\n try:\n #Create MS SQL conection via ODBC driver\n cnxn = pyodbc.connect(\"DSN=SQL_R_CHKShot\")\n cur = cnxn.cursor()\n df = psql.read_sql(query, cnxn)\n cnxn.close()\n #print(df)\n return df\n except \"Query Error\":\n print(\"WV Query Error...!\")\n\n#1)Eof \n\n#2)Wellview DB connecton Function -> return as dataframe -Updated 7/5/17\ndef WV_Read_Query(query):\n try:\n cnxn = pyodbc.connect(\"DSN=SQL_R_WV\") \n cur = cnxn.cursor()\n df = psql.read_sql(query, cnxn)\n cnxn.close()\n #print(df)\n return df\n except \"Wellview Query Error...!\":\n print(\"Wellview Query Error...!\")\n \n#2)Eof\n\n#3)VuMax Prd connecton Function -> return as dataframe -Updated 7/5/17\ndef VMX_PRD_Read_Query(query):\n try:\n cnxn = pyodbc.connect(driver='{SQL Server}', server='OKCSQLPRD1021', database='VuMaxDR',\n uid=\"Vmx_vumaxadmin\",pwd=\"vumaxadmin\",trusted_connection=\"no\")\n cur = cnxn.cursor()\n df = psql.read_sql(query, cnxn)\n cnxn.close()\n #print(df)\n return df\n except \"VuMax PRD Query Error...!\":\n print(\"VuMax PRDQuery Error...!\")\n\n#3)Eof\n\n#Clean Status'\n#Check if GPM is above the min threshold (helps to elininate error values)\ndef gpm_Clean_Status(x):\n if x >= well_Info['Threshold_GPM']:\n return 1\n else:\n return 0\n\n#Checks if the tq perc clean is abover the threshold \ndef tq_perc_Clean_Status(x):\n if x < well_Info['Threhold_Perc']:\n return 1\n else:\n return 0\n\n#Add 1 to the clean_Count column if tq_pctc_ewma is <= threshold -Good\ndef tq_perc_Clean_Count(x):\n if x == 1:\n return 1\n else:\n return 0\n\n#Cumulative Sum the clean_Count if tq_pctc_ewma is < threshold -Good\ndef tq_Clean_Count_Status(x):\n if x == 1:\n return x + 1\n else:\n return 0\n#..\n\ndef val_check(x):\n\tif(x == 1):\n\t\treturn 1\n\telse:\n\t\treturn 0\n\ndef val_check1(x):\n\tif(x == 1):\n\t\treturn 1\n\telse:\n\t\treturn 0\n\ndef val_check2(x):\n\tif(x >= 120):\n\t\treturn 1\n\telse:\n\t\treturn 0\n#..\n\ncont_Flag = 'y'\n\nwhile cont_Flag =='y':\n\tprint('Hello Man!')\n\tflag = input(\"Continue? y or n\")\n\tcont_Flag = flag\n\n\n\n#Well info\npn = '659335'\nwellbore_num = '1'\ntl_num = '1'\ndistrict = 'EAGLE FORD'\nwellname = 'JJ HENRY IX M 7H'\nrig = 'H&P 374'\nstart_time = '7/12/2017 3:54:46 AM'\nend_time = '7/12/2017 8:25:00 AM'\n\nfile_name = 'Files\\\\' + district + '_' + 'CUC_Events.csv'\nexported_well = '\\nExported ' + '_' + wellname + 'CUC Analysis Successfully'\n\n# #Well Info Section----------------------------------------------------------------------------------------------------------------------\nwell_Info = {\n 'PN': pn, \n 'Wellbore_NUM': wellbore_num,\n\t'TL_NUM': tl_num , \n\t'district': district,\n\t'well_name' : wellname,\n\t'rig_name' : rig,\n 'Start_Date': start_time,\n 'End_Date': end_time,\n\t'file_name': file_name,\n 'Threshold_GPM': 550,\n 'Threhold_Perc': 0.32,\n 'Threshold_CNT': 120\n}\n\n#Create simplified from and where statements\nfrom_VMX_TimeLog = \"From timelog\" + well_Info['PN'] + \"Wellbore\" + well_Info['Wellbore_NUM'] + \"TimeLog\" + well_Info['TL_NUM']\n# print(from_VMX_TimeLog)\nwhere_Cond = \" Where DATETIME between '\" + well_Info['Start_Date'] +\"' and '\" + well_Info['End_Date'] + \"'\"\n# print(where_Cond)\n\n# Query VMX Event Data ---------------------------------------------------------------------------------------------------------------------\nvmx_Tag_Data_Query = \"\"\" \nDeclare @district varchar(254) ='\"\"\"+ well_Info['district'] +\"';\" + \"\"\"\nDeclare @well_name varchar(254) ='\"\"\"+ well_Info['well_name']+\"';\" + \"\"\"\nDeclare @rig_name varchar(254) ='\"\"\"+ well_Info['rig_name']+\"';\" + \"\"\"\n\n--Query Section--------------------------------------------------------------------------------------------------------------------------------\n; With cte_CUC_Analysis_p1 as(\n\tSelect \n\t\t--Get needed columns\n\t\tDATA_INDEX as 'Id', \n\t\t'district' = @district,\n\t\t'well_name' = @well_name,\n\t\t'rig_name' = @rig_name,\n \t\tDATETIME as 'date_time', \n\t\tDepth as 'bit_depth',\n\t\tHDTH as 'hole_depth',\n\t\tBPOS as 'block_heigth',\n\t\tHKLD as 'hookload',\n\t\tSTOR as 'surf_tq',\n\t\tRPM as 'rpm',\n\t\tPump1 as 'mp1',\n\t\tPump2 as 'mp2',\n\t\tCirc as 'gpm',\n\t\tSPPA as 'pump_pressure',\n\t\tfloor(StrokeCount) as 'mp_strokes',\n\t\tMudFlowOutPercent as'flow_out',\n\t\tActiveTankVolume as 'pit_volume',\n\t\tROPAvg as 'rop',\n\t\tSWOB as 'wob',\n\n\t\t--Rig states\n\t\tCase \n\t\t\twhen RIG_STATE = 0 then 'rotary drill'\n\t\t\twhen RIG_STATE = 1 then 'slide drill'\n\t\t\twhen RIG_STATE = 2 then 'in slips'\n\t\t\twhen RIG_STATE = 3 then 'ream'\n\t\t\twhen RIG_STATE = 4 then 'pump in'\n\t\t\twhen RIG_STATE = 5 then 'rotate in'\n\t\t\twhen RIG_STATE = 6 then 'trip in'\n\t\t\twhen RIG_STATE = 7 then 'back ream'\n\t\t\twhen RIG_STATE = 8 then 'pump out'\n\t\t\twhen RIG_STATE = 9 then 'rotate out'\n\t\t\twhen RIG_STATE = 10 then 'trip out'\n\t\t\twhen RIG_STATE = 11 then 'Rotate and circ'\n\t\t\twhen RIG_STATE = 12 then 'circ'\n\t\t\twhen RIG_STATE = 13 then 'rotate'\n\t\t\twhen RIG_STATE = 14 then 'stationary'\n\t\t\twhen RIG_STATE = 16 then 'no data'\n\t\t\twhen RIG_STATE = 17 then 'packed off'\n\t\t\twhen RIG_STATE = 18 then 'Pressure testing'\n\t\t\twhen RIG_STATE = 19 then 'auto slide drilling'\n\t\t\twhen RIG_STATE = 20 then 'air drilling' END as'rig_state' \"\"\" + from_VMX_TimeLog + where_Cond + \")\"\"\"\" Select * from cte_CUC_Analysis_p1\"\"\"\n#EOQ\n\n# Query - Retrieve VMX telemetry data-------------------------------------------\ndf_vmx_TL_Event_Data = pd.DataFrame(VMX_PRD_Read_Query(vmx_Tag_Data_Query))\n\n#Set column values in new variables\ntq = pd.Series(df_vmx_TL_Event_Data['surf_tq']).astype(float)\nhkld = pd.Series(df_vmx_TL_Event_Data['hookload']).astype(float)\nspp = pd.Series(df_vmx_TL_Event_Data['pump_pressure']).astype(float)\n\n#TQ 1-hr weighted exponential movement functions\ndf_vmx_TL_Event_Data['tq_ewm'] = Series.ewm(tq,span=720,min_periods=0,adjust=True,ignore_na=False).mean() #exp moving mean/time\ndf_vmx_TL_Event_Data['tq_pctc_ewma'] = Series.ewm(tq.pct_change(),span=720,min_periods=0,adjust=True,ignore_na=False).mean() #exp moving pct_change/time\ndf_vmx_TL_Event_Data['tq_pctc_ewma'] = df_vmx_TL_Event_Data['tq_pctc_ewma'] * 100 \ndf_vmx_TL_Event_Data['tq_ewma_stdev'] = Series.ewm(tq,span=720,min_periods=0,adjust=True,ignore_na=False).std() #exp moving stdev/time \n\n#1-hr weighted exponential movement functions\ndf_vmx_TL_Event_Data['hkld_emw'] = Series.ewm(hkld,span=720,min_periods=0,adjust=True,ignore_na=False).mean() #exp moving mean/time\ndf_vmx_TL_Event_Data['spp_emw'] = Series.ewm(spp,span=720,min_periods=0,adjust=True,ignore_na=False).mean() #exp moving mean/time\n\n#New Calc'd columns\ndf_vmx_TL_Event_Data['clean_gpm_status'] = df_vmx_TL_Event_Data['gpm'].map(gpm_Clean_Status) #Loop thru column and set status if GPM is above threshold limit\ndf_vmx_TL_Event_Data['clean_tq_perc_status'] = df_vmx_TL_Event_Data['tq_pctc_ewma'].map(tq_perc_Clean_Status) #Loop thru column and set status if tq % is above threshold limit\ndf_vmx_TL_Event_Data['clean_Count'] = df_vmx_TL_Event_Data['clean_tq_perc_status'].map(tq_perc_Clean_Count) #Loop thru column and count num of clean statuses\ndf_vmx_TL_Event_Data['clean_Count_Status'] = df_vmx_TL_Event_Data['clean_Count'].cumsum()\n\ndf = df_vmx_TL_Event_Data.loc[:,['clean_gpm_status','clean_tq_perc_status','clean_Count_Status']]\n\ndf_vmx_TL_Event_Data['clean_Final_Status'] = df['clean_gpm_status'].map(val_check)\ndf_vmx_TL_Event_Data['clean_Final_Status'] += df['clean_tq_perc_status'].map(val_check1)\ndf_vmx_TL_Event_Data['clean_Final_Status'] += df['clean_Count_Status'].map(val_check2)\n\n#Save district files to csv\nwith open(file_name, 'a') as f:\n df_vmx_TL_Event_Data.to_csv(f, header=False, index=False)\nprint(exported_well)\n","sub_path":"CUC Export Files 8_7_17/CUC Export Script_basic.py","file_name":"CUC Export Script_basic.py","file_ext":"py","file_size_in_byte":8181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"595137435","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nimport os.path\nfrom random import randint\nimport random\nfrom pprint import pprint\nfrom flask import Flask\nfrom flask import request\nfrom flask import make_response\n\n# Flask app should start in global layout\napp = Flask(__name__)\n\n@app.route('/webhook', methods=['POST'])\ndef webhook():\n req = request.get_json(silent=True, force=True)\n\n print(\"Request:\")\n print(json.dumps(req, indent=4))\n\n res = processRequest(req)\n\n res = json.dumps(res, indent=4)\n \n print(\"Response:\")\n print(res)\n \n r = make_response(res)\n r.headers['Content-Type'] = 'application/json'\n \n return r\n\ndef processRequest(req):\n if req.get(\"result\").get(\"action\") == \"getAgentName\":\n data = getAgentName(req)\n ctx = createContext(req, data)\n res = makeWebhookResult(data, ctx)\n \n return res\n \ndef createContext(req, data):\n input_data = req.get(\"result\").get(\"contexts\")\n context_name = str(input_data[0][\"name\"])\n context_lifespan = str(input_data[0][\"lifespan\"])\n\n return {\"name\":context_name, \"lifespan\":context_lifespan, \"parameters\":{\"tech-name\":data}},\n\ndef makeWebhookResult(data, ctx):\n result = {}\n \n result['speech'] = data\n result['displayText'] = data\n result['data'] = data\n result['contextOut'] = ctx\n result['source'] = \"Custom Web Hook\"\n \n return result\n\ndef getAgentName(req):\n d = {'rg123q':'Ron Howard', 'vs098t':'Vladimir Putin', 'dd567p':'Dilip Kumar', 'vc345w': 'Vasim S Akram', 'dp345e':'Divya Rana'}\n result = req.get(\"result\")\n parameters = result.get(\"parameters\")\n agent_id = parameters.get(\"tech-id\")\n if agent_id is None:\n return None\n\n return d[agent_id]\n\nif __name__ == '__main__':\n port = int(os.getenv('PORT', 5000))\n\n print (\"Starting app on port %d\" % port)\n\n app.run(debug=False, port=port, host='0.0.0.0')\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"543569327","text":"from binarytree import BinaryTree\n\n# create a sample tree, bt\n\nbt = BinaryTree(1)\nbt.insertLeft(2)\nbt.insertRight(5)\n\nl1 = bt.left_node\nl1.insertLeft(3)\nl1.insertRight(4)\n\nr1 = bt.right_node\nr1.insertLeft(6)\nr1.insertRight(7)\n\n\nbt.bfs()\n","sub_path":"trees/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"93846557","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('telefonia', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Usuario',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nome', models.CharField(max_length=200)),\n ('login', models.CharField(max_length=20)),\n ('senha', models.CharField(max_length=20)),\n ('email', models.EmailField(max_length=254, null=True, blank=True)),\n ('consulta', models.BooleanField(default=True)),\n ],\n ),\n migrations.AlterModelOptions(\n name='problemas',\n options={'verbose_name_plural': 'problemas'},\n ),\n migrations.AlterModelOptions(\n name='ramal',\n options={'verbose_name_plural': 'ramais'},\n ),\n migrations.AlterModelOptions(\n name='status',\n options={'verbose_name_plural': 'status'},\n ),\n ]\n","sub_path":"telefonia/migrations/0002_auto_20150903_1422.py","file_name":"0002_auto_20150903_1422.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"613476749","text":"#!/usr/bin/env python3\n\n'''Imagination Technologies context priority extension for EGL.\n\nWith this extension, contexts may be created with hints (not\nrequirements) as to the priority of its processing, and the actual\npriority assigned may be queried.\n\nhttp://www.khronos.org/registry/egl/extensions/IMG/EGL_IMG_context_priority.txt\n\n'''\n# Copyright (c) 2012-13 Tim Pederick.\n#\n# This file is part of Pegl.\n#\n# Pegl is free software: you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Pegl is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY\n# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public\n# License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Pegl. If not, see .\n\n# Standard library imports.\nfrom collections import namedtuple\n\n# Local imports.\nfrom ..attribs.context import ContextAttribs\nfrom ..context import Context\n\n# New context attribute.\nPriorityLevels = namedtuple('PriorityLevels_tuple',\n ('HIGH', 'MEDIUM', 'LOW')\n )(0x3101, 0x3012, 0x3103)\nContextAttribs.extend('PRIORITY_LEVEL', 0x3100, PriorityLevels,\n PriorityLevels.MEDIUM)\n\n# New Context property, for querying the new attribute in ContextAttribs.\ndef priority(self):\n '''Get the actual priority level that this context was assigned.\n\n Returns:\n A value from the PriorityLevels tuple.\n\n '''\n return self._attr(ContextAttribs.PRIORITY_LEVEL)\nContext.priority = property(priority)\n","sub_path":"pegl/ext/img_contextpriority.py","file_name":"img_contextpriority.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"181388803","text":"class Drone():\n def __init__(self, drone_id):\n # Non-ROS variables\n self.drone_id = drone_id\n self.twist = [0, 0, 0, 0, 1500, 1500, 1500, 1500]\n \n # ROS state variables\n self.armed = False\n self.flight_mode = 0\n self.voltage = 0\n self.current = 0\n self.battery_remaining = 0\n\n def state_callback(self, data):\n self.armed = data.armed\n self.flight_mode = data.mode\n\n def battery_callback(self, data):\n self.voltage = data.voltage\n self.current = data.current\n self.battery_remaining = data.remaining\n","sub_path":"scripts/drone.py","file_name":"drone.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"387827316","text":"from typing import List, Tuple\n\n\ndef hanoi(disk: int, src: str, dest: str, support: str):\n if disk < 1:\n return\n\n hanoi(disk-1, src, support, dest)\n print(f'move {disk} from {src} to {dest}')\n hanoi(disk-1, support, dest, src)\n\n\ndef get_hanoi_movement(disk: int, src: str, dest: str, support: str) -> List[Tuple[int, str, str]]:\n result = []\n\n def _hanoi(disk: int, src: str, dest: str, support: str):\n if disk < 1:\n return\n\n _hanoi(disk-1, src, support, dest)\n result.append((disk, src, dest))\n _hanoi(disk-1, support, dest, src)\n\n _hanoi(disk, src, dest, support)\n return result\n\n\nif __name__ == '__main__':\n hanoi(3, 'A', 'C', 'B')\n for r in get_hanoi_movement(4, 'A', 'C', 'B'):\n print(r)\n\n\n","sub_path":"algo_sample/61_quiz_hanoi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"320249649","text":"\"\"\"\nAuthor: Gangeshwar\nDescription: Training file for Technique Classification.\n\"\"\"\n\nimport pickle\n\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.optimizers import Adam, RMSprop, SGD\nfrom sklearn.model_selection import train_test_split\n\nfrom models import *\n\ndev_template_labels_file = \"datasets/dev-task-TC-template.out\"\n\ndata_path = './processed_data/'\n\n\ndef read_predictions_from_file(filename):\n \"\"\"\n Reader for the gold file and the template output file.\n Return values are four arrays with article ids, labels\n (or ? in the case of a template file), begin of a fragment,\n end of a fragment.\n \"\"\"\n articles_id, span_starts, span_ends, gold_labels = ([], [], [], [])\n with open(filename, \"r\") as f:\n for row in f.readlines():\n article_id, gold_label, span_start, span_end = row.rstrip().split(\"\\t\")\n articles_id.append(article_id)\n gold_labels.append(gold_label)\n span_starts.append(span_start)\n span_ends.append(span_end)\n return articles_id, span_starts, span_ends, gold_labels\n\n\ndef load_data():\n train_x = pickle.load(open(data_path + 'train_x.p', 'rb'))\n train_seq_len = pickle.load(open(data_path + 'train_seq_len.p', 'rb'))\n train_y = pickle.load(open(data_path + 'train_y.p', 'rb'))\n dev_x = pickle.load(open(data_path + 'dev_x.p', 'rb'))\n dev_seq_len = pickle.load(open(data_path + 'dev_seq_len.p', 'rb'))\n GloVe_Embeddings = pickle.load(open(data_path + 'GloVe_Embeddings.p', 'rb'))\n # print(train_y)\n return train_x, train_seq_len, train_y, dev_x, dev_seq_len, GloVe_Embeddings\n\n\ndef create_split():\n pass\n\n\nindex2label = {\n 0: 'Appeal_to_Authority',\n 1: 'Appeal_to_fear-prejudice',\n 2: 'Bandwagon,Reductio_ad_hitlerum',\n 3: 'Black-and-White_Fallacy',\n 4: 'Causal_Oversimplification',\n 5: 'Doubt',\n 6: 'Exaggeration,Minimisation',\n 7: 'Flag-Waving',\n 8: 'Loaded_Language',\n 9: 'Name_Calling,Labeling',\n 10: 'Repetition',\n 11: 'Slogans',\n 12: 'Thought-terminating_Cliches',\n 13: 'Whataboutism,Straw_Men,Red_Herring'\n}\nif __name__ == '__main__':\n train_x, train_seq_len, train_y, dev_x, dev_seq_len, embeddings = load_data()\n\n # Split the data\n train_x, test_x, train_seq_len, test_seq_len, train_y, test_y = train_test_split(train_x,\n train_seq_len,\n train_y,\n test_size=0.10,\n shuffle=True, stratify=train_y)\n\n # s = int(train_x.shape[0] * 0.90)\n # print(s, train_x.shape[0])\n #\n # test_x = train_x[s:]\n # train_x = train_x[:s]\n #\n # test_seq_len = train_seq_len[s:]\n # train_seq_len = train_seq_len[:s]\n #\n # test_y = train_y[s:]\n # train_y = train_y[:s]\n\n model = model_LSTM(embeddings)\n\n lr = 0.0001\n bz = 256\n epochs = 300\n\n opt = SGD(0.01)\n opt = Adam(lr)\n # print(str(opt))\n # exit()\n model_name = 'text_LSTM_Adam_lr%s_bz%s' % (lr, bz)\n model_path = 'models/%s' % (model_name)\n checkpoint = ModelCheckpoint('%s.{epoch:02d}.hdf5' % (model_path), monitor='loss', verbose=1,\n save_best_only=False, mode='auto')\n\n model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['acc'])\n try:\n model.fit(train_x, train_y, validation_data=[test_x, test_y], epochs=epochs,\n batch_size=bz,\n shuffle=True, callbacks=[checkpoint])\n except:\n pass\n\n epoch = input(\"\\n\\nWhich epoch to load?\\nAns: \")\n epoch = int(epoch)\n load_model_path = '%s.%02d.hdf5' % (model_path, epoch)\n print('Loading model - ', load_model_path)\n model = load_model(load_model_path, custom_objects={'loss': 'categorical_crossentropy'})\n # print(model.summary())\n predictions = model.predict(dev_x)\n predictions = predictions.argmax(axis=1)\n\n # writing predictions to file\n task_TC_output_file = \"model-output-TC.txt\"\n dev_article_ids, dev_span_starts, dev_span_ends, dev_labels = read_predictions_from_file(dev_template_labels_file)\n\n with open(task_TC_output_file, \"w\") as fout:\n for article_id, prediction, span_start, span_end in zip(dev_article_ids, predictions, dev_span_starts,\n dev_span_ends):\n fout.write(\"%s\\t%s\\t%s\\t%s\\n\" % (article_id, index2label[prediction], span_start, span_end))\n print(\"Predictions written to file \" + task_TC_output_file)\n","sub_path":"train_tc.py","file_name":"train_tc.py","file_ext":"py","file_size_in_byte":4732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"58100750","text":"# -*- coding: utf-8 -*-\n__author__ = \"dzt\"\n__date__ = \"2019/10/15\"\nfrom django.conf.urls import url\nfrom .views import CSDataInfoView, CSAreaStatisticalTextView, CSAreaStatisticalTextExportView, CSDataInfoPush, \\\n CSDataInfoUpload, CSDataInfoExportView, CSDataInfoExportImageView, \\\n CSDataInfoExportCompressImageView, CSInfoDelView\n\nurlpatterns = [\n url(r'^info/$', CSDataInfoView.as_view(), name='CS_info'),\n url(r'^csDataInfoUpload/$', CSDataInfoUpload.as_view()),\n url(r'^csAreaStatisticalText/$', CSAreaStatisticalTextView.as_view(), name='CS_area_statistical_text'),\n url(r'^csAreaStatisticalTextExport/$', CSAreaStatisticalTextExportView.as_view(),\n name='CS_area_statistical_text_export'),\n url(r'^push/$', CSDataInfoPush.as_view()),\n url(r'^csExport/$', CSDataInfoExportView.as_view(), name='CS_Export'),\n url(r'^csExportImage/$', CSDataInfoExportImageView.as_view(), name='CS_Export_image'),\n url(r'^csExportCompressImage/$', CSDataInfoExportCompressImageView.as_view(), name='CS_Export_compress_image'),\n url(r'^csDel/', CSInfoDelView.as_view(), name='CS_del'),\n\n]\n","sub_path":"apps/cs_data_info/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"566377688","text":"import json\nimport subprocess\n\nfrom keywords.LiteServBase import LiteServBase\nfrom keywords.constants import BINARY_DIR\nfrom keywords.exceptions import LiteServError\nfrom keywords.utils import version_and_build\nfrom keywords.utils import log_info\nfrom keywords.utils import log_r\n\n\nclass LiteServiOS(LiteServBase):\n\n def __init__(self, version_build, host, port, storage_engine):\n\n # Initialize baseclass properies\n super(LiteServiOS, self).__init__(version_build, host, port, storage_engine)\n\n def download(self):\n \"\"\"\n 1. Check to see if package is downloaded already. If so, return\n 2. Download the LiteServ package from latest builds to 'deps/binaries'\n 3. Unzip the packages and make the binary executable\n \"\"\"\n raise NotImplementedError(\"iOS not a part of build yet\")\n\n def install(self):\n \"\"\"Installs / launches LiteServ on iOS device\n Warning: Only works with a single device at the moment\n \"\"\"\n\n if self.storage_engine != \"SQLite\":\n raise LiteServError(\"https://github.com/couchbaselabs/liteserv-ios/issues/1\")\n\n package_name = \"couchbase-lite-ios-liteserv-{}.app\".format(self.version_build)\n app_path = \"{}/{}\".format(BINARY_DIR, package_name)\n log_info(\"Installing: {}\".format(app_path))\n\n # install app / launch app to connected device\n output = subprocess.check_output([\n \"ios-deploy\", \"--justlaunch\", \"--bundle\", app_path\n ])\n log_info(output)\n\n bundle_id = \"com.couchbase.LiteServ-iOS\"\n output = subprocess.check_output([\"ios-deploy\", \"--list_bundle_id\"])\n log_info(output)\n\n if bundle_id not in output:\n raise LiteServError(\"Could not install LiteServ-iOS\")\n\n self.stop()\n\n def remove(self):\n \"\"\"\n Remove the iOS app from the connected device\n \"\"\"\n log_info(\"Removing LiteServ-iOS\")\n\n bundle_id = \"com.couchbase.LiteServ-iOS\"\n output = subprocess.check_output([\n \"ios-deploy\", \"--uninstall_only\", \"--bundle_id\", bundle_id\n ])\n log_info(output)\n\n # Check that removal is successful\n output = subprocess.check_output([\"ios-deploy\", \"--list_bundle_id\"])\n log_info(output)\n\n if bundle_id in output:\n raise LiteServError(\"LiteServ-iOS is still present after uninstall\")\n\n def start(self, logfile_name):\n \"\"\"\n 1. Starts a LiteServ with logging to provided logfile file object.\n The running LiteServ process will be stored in the self.process property.\n 2. The method will poll on the endpoint to make sure LiteServ is available.\n 3. The expected version will be compared with the version reported by http://:\n 4. Return the url of the running LiteServ\n \"\"\"\n\n if self.storage_engine != \"SQLite\":\n raise NotImplementedError(\"Need to make sure to support other storage types\")\n\n self._verify_not_running()\n\n if self.port == 59850:\n raise LiteServError(\"On iOS, port 59850 is reserved for the admin port\")\n\n liteserv_admin_url = \"http://{}:59850\".format(self.host)\n log_info(\"Starting LiteServ: {}\".format(liteserv_admin_url))\n\n data = {\n \"port\": int(self.port)\n }\n\n resp = self.session.put(\"{}/start\".format(liteserv_admin_url), data=json.dumps(data))\n log_r(resp)\n resp.raise_for_status()\n\n self._verify_launched()\n\n return \"http://{}:{}\".format(self.host, self.port)\n\n def _verify_launched(self):\n \"\"\" Poll on expected http://: until it is reachable\n Assert that the response contains the expected version information\n \"\"\"\n\n resp_obj = self._wait_until_reachable()\n log_info(resp_obj)\n\n if resp_obj[\"vendor\"][\"name\"] != \"Couchbase Lite (Objective-C)\":\n raise LiteServError(\"Unexpected LiteServ platform running!\")\n\n version, build = version_and_build(self.version_build)\n expected_version = \"{} (build {})\".format(version, build)\n running_version = resp_obj[\"vendor\"][\"version\"]\n\n if expected_version != running_version:\n raise LiteServError(\"Expected version: {} does not match running version: {}\".format(expected_version, running_version))\n\n def stop(self):\n \"\"\"\n 1. Flush and close the logfile capturing the LiteServ output\n 2. Kill the LiteServ process\n 3. Verify that no service is running on http://:\n \"\"\"\n\n log_info(\"Stopping LiteServ: http://{}:{}\".format(self.host, self.port))\n\n liteserv_admin_url = \"http://{}:59850\".format(self.host)\n log_info(\"Stopping LiteServ: {}\".format(liteserv_admin_url))\n resp = self.session.put(\"{}/stop\".format(liteserv_admin_url))\n log_r(resp)\n resp.raise_for_status()\n\n self._verify_not_running()\n","sub_path":"keywords/LiteServiOS.py","file_name":"LiteServiOS.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"531808707","text":"class HangMan:\r\n def __init__(self):\r\n self.errorCount = 0\r\n self.maxErrorCount = 5\r\n self.usersAnswer = \"\"\r\n self.currentAnswerIndex = 0\r\n\r\n def start(self):\r\n print(\"Soruyu Giriniz!:\")\r\n self.question = self.validateQuestion(input())\r\n print(\"Cevabı Giriniz!:\")\r\n self.answer = self.validateAnswer(input())\r\n print(\"\\n\\n\\n\\n\\nOyun başladı!\\n\")\r\n print(\"Soru: \" + self.question)\r\n\r\n def validateQuestion(self, question):\r\n if len(question) > 1:\r\n return question\r\n raise ValueError(\"Soruyu giriniz!\")\r\n\r\n def validateAnswer(self, answer):\r\n if len(answer) > 1:\r\n return answer\r\n raise ValueError(\"Cevabı giriniz!\")\r\n\r\n def getRemainingError(self):\r\n return self.maxErrorCount - self.errorCount\r\n\r\n def addAnswerChar(self, answerPart):\r\n self.usersAnswer += answerPart\r\n return self.usersAnswer == self.answer\r\n\r\n def validateAnswerChar(self, answerPart):\r\n return len(answerPart) == 1\r\n\r\n def end(self):\r\n print(\"Tebrikler cevabı bildiniz!!\\n \" + hangman.usersAnswer)\r\n\r\ntry:\r\n hangman = HangMan()\r\n hangman.start()\r\n\r\n while hangman.usersAnswer != hangman.answer:\r\n answerPart = input(\"Cevabın \" + str(hangman.currentAnswerIndex + 1) + \" harfini giriniz: \\n\")\r\n if hangman.validateAnswerChar(answerPart) == False:\r\n print(\"Lütfen tek karakter giriniz!\")\r\n continue\r\n\r\n if hangman.answer[hangman.currentAnswerIndex] != answerPart:\r\n hangman.errorCount += 1\r\n if hangman.errorCount == hangman.maxErrorCount:\r\n print(\"Kaybettiniz!\")\r\n break\r\n else:\r\n print(str(hangman.getRemainingError()) + \" yanlış hakkınız kaldı.\")\r\n else:\r\n if hangman.addAnswerChar(answerPart):\r\n hangman.end()\r\n hangman.currentAnswerIndex += 1\r\n\r\nexcept ValueError as error:\r\n print('Hata: ' + repr(error))\r\n# except Exception as other:\r\n# print(\"Bilinmeyen hata oluştu: \" + repr(other))\r\n","sub_path":"HomeWork/Day5.py","file_name":"Day5.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"612242706","text":"from typing import Optional\nfrom uuid import UUID\n\n\nclass EntityNotFound(RuntimeError):\n \"\"\"Base class for queried entities not being found\"\"\"\n entity_id: UUID\n\n def __init__(self, entity_id: UUID, message: Optional[str] = None):\n self.entity_id = entity_id\n if message is None:\n message = f\"Entity '{entity_id}' not found\"\n super().__init__(message)\n\n","sub_path":"src/myapp/application/adapter/spi/persistence/exceptions/entity_not_found.py","file_name":"entity_not_found.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"442939528","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport torch\nimport torch.nn as nn\nimport torchvision.models as models\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\n\n# In[2]:\n\n\nclass EncoderCNN(nn.Module):\n def __init__(self,embed_size):\n super(EncoderCNN,self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1]\n self.resnet = nn.Sequential(*modules)\n self.linear = nn.Linear(resnet.fc.in_features , embed_size)\n self.batch_norm = nn.BatchNorm1d(embed_size, momentum=0.01)\n \n def forward(self,images):\n with torch.no_grad():\n features = self.resnet(images)\n features = features.reshape(features.size(0),-1)\n features = self.batch_norm(self.linear(features))\n return features\n\n\n# In[4]:\n\n\nclass DecoderRNN(nn.Module):\n def __init__(self,vocab_size,embed_size,hidden_size,num_layers,max_seq_length = 20):\n super(DecoderRNN,self).__init__()\n self.embed = nn.Embedding(vocab_size , embed_size)\n self.lstm = nn.LSTM(embed_size , hidden_size , num_layers ,batch_first = True )\n self.linear = nn.Linear(hidden_size , vocab_size)\n self.max_seq_length = max_seq_length\n \n def forward(self,features,captions,lengths):\n embeddings = self.embed(captions)\n embeddings = torch.cat((features.unsqueeze(1) , embeddings),1)\n packed = pack_padded_sequence(embeddings , lengths , batch_first = True)\n hiddens,_ = self.lstm(packed)\n outputs = self.linear(hiddens[0])\n return outputs\n \n def sample(self,features,states=None):\n sample_ids = []\n inputs = features.unsqueeze(1)\n for i in range(self.max_seq_length):\n hiddens ,states = self.lstm(inputs,states)\n outputs = self.linear(hiddens.squeeze(1))\n _, predicted = outputs.max(1)\n sample_ids.append(predicted)\n inputs = self.embed(predicted)\n inputs = inputs.unsqueeze(1)\n sample_ids = torch.stack(sample_ids,1)\n return sample_ids\n \n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"255054034","text":"# usr/bin/python3\n# * encoding=utf_8*\n\nimport json\nimport numpy as np\nfrom preprocessing_fr import RangeRaw, SingleRaw\nimport time\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom tensorflow.python.saved_model import tag_constants\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix\nfrom sklearn.model_selection import train_test_split\nimport bert # install with !pip install bert-tensorflow\nfrom bert import run_classifier\nfrom bert import optimization\nfrom bert import tokenization\n\n\nclass ClassifierModel():\n \"\"\"\n Class that stores all steps from the model to ensure train and test\n are processed the same way\n Shall be instantiated only once\n - self.file_path, index transaction => preprocessing.singleRaw\n - self.test_size : test percentage within dataset\n - self.seed : random seed for sampling\n\n \"\"\"\n TEXT_RAW = 0\n TEXT_CLEAN = 1\n\n # MAX_SEQ_LENGTH = 128\n # BATCH_SIZE = 10\n # LEARNING_RATE = 2e-5\n # NUM_TRAIN_EPOCHS = 3.0\n # # Warmup is a period of time where hte learning rate\n # # is small and gradually increases--usually helps training.\n # WARMUP_PROPORTION = 0.1\n # # Model configs\n # SAVE_CHECKPOINTS_STEPS = 500\n # SAVE_SUMMARY_STEPS = 100\n #\n #BERT_MODEL_HUB = \"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\"\n BERT_MODEL_HUB = None\n #\n # OUTPUT_DIR = 'output-amazon-test' # @param {type:\"string\"}\n # # @markdown Whether or not to clear/delete the directory and create a new one\n # DO_DELETE = False # @param {type:\"boolean\"}\n\n def __init__(self):\n print(\"Model instantiated\")\n self.params = dict()\n self.text_preprocessing = None\n self.algo_name = \"\"\n #self.algo_accuracy = np.float64(0.0)\n #self.algo_recall = np.float64(0.0)\n #self.algo_precision = np.float64(0.0)\n #self.algo_f1_score = np.float64(0.0)\n self.is_set_up = False\n self.classifier_trained = False\n self.model_path = None\n self.test_size = None\n self.label_list = []\n self.tokenizer = None\n self.estimator = None\n\n def initialize(self,\n model_path,\n text_preprocessing=None,\n test_size=0.2, param_file='default_params.json'):\n \"\"\"\n Instance method:\n Its purpose is to:\n - set the output directory of the training\n - create tokenizer\n\n\n \"\"\"\n with open(param_file, 'r') as f:\n self.params = json.load(f)\n\n if self.params[\"DO_DELETE\"]:\n try:\n tf.gfile.DeleteRecursively(self.params[\"OUTPUT_DIR\"])\n except:\n # Doesn't matter if the directory didn't exist\n pass\n tf.gfile.MakeDirs(self.params[\"OUTPUT_DIR\"])\n print('***** Model output directory: {} *****'.format(self.params[\"OUTPUT_DIR\"]))\n\n if not text_preprocessing:\n self.text_preprocessing = ClassifierModel.TEXT_RAW\n else:\n self.text_preprocessing = ClassifierModel.TEXT_CLEAN\n self.is_set_up = True\n self.classifier_trained = False\n self.model_path = model_path\n self.test_size = test_size\n ClassifierModel.BERT_MODEL_HUB = self.params['BERT_MODEL_HUB']\n self.tokenizer = self.create_tokenizer_from_hub_module()\n print(\"init done\")\n\n def sentences_to_features(self, sentences, labels):\n \"\"\" one list with the sentences and one with the corresponding labels\n If prediction : put all labels to 0\n Transform the string into bert features\n \"\"\"\n input_examples = [run_classifier.InputExample(guid=\"\", text_a=s, text_b=None, label=l) for s, l in\n zip(sentences, labels)] # here, \"\" is just a dummy label\n input_features = run_classifier.convert_examples_to_features(input_examples, self.label_list,\n self.params[\"MAX_SEQ_LENGTH\"],\n self.tokenizer)\n return input_features\n\n def create_tokenizer_from_hub_module(self):\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n with tf.Graph().as_default():\n bert_module = hub.Module(self.params[\"BERT_MODEL_HUB\"])\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n return bert.tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)\n\n \"\"\"The four next methods are necessary for the estimator\"\"\"\n @staticmethod\n def create_model(is_predicting, input_ids, input_mask, segment_ids, labels,\n num_labels):\n \"\"\"Creates a classification model.\"\"\"\n\n bert_module = hub.Module(\n ClassifierModel.BERT_MODEL_HUB,\n trainable=True)\n bert_inputs = dict(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids)\n bert_outputs = bert_module(\n inputs=bert_inputs,\n signature=\"tokens\",\n as_dict=True)\n\n # Use \"pooled_output\" for classification tasks on an entire sentence.\n # Use \"sequence_outputs\" for token-level output.\n output_layer = bert_outputs[\"pooled_output\"]\n\n hidden_size = output_layer.shape[-1].value\n\n # Create our own layer to tune for politeness data.\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n # Dropout helps prevent overfitting\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n # Convert labels into one-hot encoding\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))\n # If we're predicting, we want predicted labels and the probabiltiies.\n if is_predicting:\n return (predicted_labels, log_probs)\n\n # If we're train/eval, compute loss between predicted and actual label\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, predicted_labels, log_probs)\n\n # model_fn_builder actually creates our model function\n # using the passed parameters for num_labels, learning_rate, etc.\n @staticmethod\n def model_fn_builder(num_labels, learning_rate, num_train_steps,\n num_warmup_steps):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(\n label_ids,\n predicted_labels)\n auc = tf.metrics.auc(\n label_ids,\n predicted_labels)\n recall = tf.metrics.recall(\n label_ids,\n predicted_labels)\n precision = tf.metrics.precision(\n label_ids,\n predicted_labels)\n true_pos = tf.metrics.true_positives(\n label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(\n label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(\n label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(\n label_ids,\n predicted_labels)\n\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn\n\n def run_config_builder(self):\n return tf.estimator.RunConfig(model_dir=self.params[\"OUTPUT_DIR\"],\n save_summary_steps=self.params[\"SAVE_SUMMARY_STEPS\"],\n save_checkpoints_steps=self.params[\"SAVE_CHECKPOINTS_STEPS\"])\n\n def estimator_builder(self, model_fn, run_config):\n return tf.estimator.Estimator(model_fn=model_fn, config=run_config,\n params={\"batch_size\": self.params[\"BATCH_SIZE\"]})\n\n def train(self, X, y):\n \"\"\"\n Split X, y as train and test sets. Then convert them to features, classify and score\n Input:\n - X: pd.Series containing text\n - y: pd.Series containing labels\n\n Shows train and test score to infer whether model is too simple\n or overfitted\n \"\"\"\n tf.logging.set_verbosity(tf.logging.INFO) #comment if you don't want to display the information during training/evaluation\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size, random_state=42, stratify=y)\n\n self.label_list = y.unique()\n\n train_features = self.sentences_to_features(X_train, y_train)\n test_features = self.sentences_to_features(X_test, y_test)\n print(\"transformation to features done\")\n\n num_train_steps = int(len(train_features) / self.params[\"BATCH_SIZE\"] * self.params[\"NUM_TRAIN_EPOCHS\"])\n num_warmup_steps = int(num_train_steps * self.params[\"WARMUP_PROPORTION\"])\n\n run_config = self.run_config_builder()\n model_fn = self.model_fn_builder(len(self.label_list), self.params[\"LEARNING_RATE\"], num_train_steps,\n num_warmup_steps)\n self.estimator = self.estimator_builder(model_fn, run_config)\n\n print(\"creation of all stuff needed done\")\n\n train_input_fn = bert.run_classifier.input_fn_builder(features=train_features,\n seq_length=self.params[\"MAX_SEQ_LENGTH\"],\n is_training=True, drop_remainder=False)\n\n print(f'Beginning Training!')\n current_time = time.time()\n self.estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n print(\"Training took time :\", time.time() - current_time, \"s, or \", (time.time() - current_time)/60, \"min\")\n\n self.classifier_trained = True\n\n test_input_fn = run_classifier.input_fn_builder(features=test_features,\n seq_length=self.params[\"MAX_SEQ_LENGTH\"],\n is_training=False, drop_remainder=False)\n\n #apply model on test set and print all metrics\n print(\"Evaluating\")\n self.estimator.evaluate(input_fn=test_input_fn, steps=None)\n\n def predict_estimator(self, X, y=None, labels=[0, 1]):\n \"\"\"\n predicts output based on training done during former step\n Input:\n - X: list or pd.Series of sentences\n - y: list or pd.Series of labels (if not provided, only the list of predictions is returned)\n\n Output:\n - prediction\n \"\"\"\n # throw an exception if classifier is not trained\n if not self.classifier_trained:\n raise Exception(\"Train estimator first\")\n if len(X) == 1: # predict doesn't work if only one element\n X.append(\"\")\n labels = labels\n result = []\n input_features = self.sentences_to_features(X, [0 for i in range(len(X))])\n predict_input_fn = run_classifier.input_fn_builder(features=input_features,\n seq_length=self.params[\"MAX_SEQ_LENGTH\"],\n is_training=False, drop_remainder=False)\n predictions = self.estimator.predict(predict_input_fn)\n\n # output : tuple (sentence, probability of each class, predicted class)\n for sentence, prediction in zip(X, predictions):\n result.append((sentence, np.exp(prediction['probabilities']), labels[prediction['labels']]))\n\n if y is not None:\n pred = [result[i][2] for i in range(len(result))]\n print(\"Accuracy: %s\" % accuracy_score(y, pred))\n print(\"Precision: %s\" % precision_score(y, pred))\n print(\"Recall: %s\" % recall_score(y, pred))\n print(\"f1: %s\" % f1_score(y, pred))\n print(confusion_matrix(y, pred))\n\n return result\n\n \"\"\" necessary to export the model\"\"\"\n def serving_input_fn(self):\n label_ids = tf.placeholder(tf.int32, [None], name='label_ids')\n input_ids = tf.placeholder(tf.int32, [None, self.params[\"MAX_SEQ_LENGTH\"]], name='input_ids')\n input_mask = tf.placeholder(tf.int32, [None, self.params[\"MAX_SEQ_LENGTH\"]], name='input_mask')\n segment_ids = tf.placeholder(tf.int32, [None, self.params[\"MAX_SEQ_LENGTH\"]], name='segment_ids')\n input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({\n 'label_ids': label_ids,\n 'input_ids': input_ids,\n 'input_mask': input_mask,\n 'segment_ids': segment_ids,\n })()\n return input_fn\n\n def export(self, export_path=\"export\"):\n self.estimator._export_to_tpu = False\n self.estimator.export_savedmodel(export_path, self.serving_input_fn)\n\n\nclass RangePredict(RangeRaw):\n \"\"\"\n Class to train the model:\n - Superclass: RangeRaw\n - Component: ClassifierModel\n\n \"\"\"\n\n def __init__(self, dataframe,\n index_sentences, index_labels,\n sequence_to_remove=[],\n seed=42,\n model_path=\"model_dir_fr\",\n text_preprocessing=ClassifierModel.TEXT_CLEAN,\n test_size=0.2, param_file='default_params.json'):\n RangeRaw.__init__(self, dataframe, index_sentences, index_labels)\n\n self.input_col = None\n self.output_col = None\n self.sequence_to_remove = sequence_to_remove\n self.seed = seed\n self.model_path = model_path\n self.text_preprocessing = text_preprocessing\n self.test_size = test_size\n # self.tensor_input_ids = None\n # self.tensor_input_mask = None\n # self.tensor_label_ids = None\n # self.tensor_segment_ids = None\n # self.tensor_outputs = None\n # self.sess = tf.Session()\n self.classifier_model = ClassifierModel()\n self.classifier_model.initialize(self.model_path,\n text_preprocessing=self.text_preprocessing,\n test_size=self.test_size, param_file=param_file)\n\n def clean_range(self):\n \"\"\"\n - Calls RangeRaw.standardize_range\n \"\"\"\n self.standardize_range(sequence_to_remove=self.sequence_to_remove)\n\n def preprocess(self):\n \"\"\"\n Use either raw text or clean text\n depending on the model\n - Defines input column according to model\n - Defines output column : labels\n \"\"\"\n\n if self.classifier_model.text_preprocessing == ClassifierModel.TEXT_CLEAN:\n self.clean_range()\n self.input_col = self.df[\"clean_text\"]\n\n elif self.classifier_model.text_preprocessing == ClassifierModel.TEXT_RAW:\n self.input_col = self.df[self.col_name_sentences]\n\n else:\n raise Exception(\"Text preprocessing unknown\")\n\n self.output_col = self.df[self.col_name_labels]\n print(\"preprocess done\")\n\n \"\"\"Call the Classifiermodel methods\"\"\"\n def train(self):\n if self.input_col is None:\n raise Exception(\"Preprocessing not specified\")\n self.classifier_model.train(self.input_col, self.output_col)\n\n def predict_estimator_test(self, X, y=None):\n \"\"\" Make predictions for the given dataset\n Warning, the train methods already split the dataset into train and test and evaluate the model on the test set,\n this method should be used only to test the good generalization of the model, on a data set it has never seen\n \"\"\"\n if self.classifier_model.classifier_trained:\n results = self.classifier_model.predict_estimator(X, y, self.classifier_model.label_list)\n return results\n\n def export(self, export_path=\"export\"):\n self.classifier_model.export(export_path=export_path)\n\n\nclass SinglePredict(SingleRaw):\n \"\"\"\n Class to predict the category:\n Superclass: SingleRaw\n Component: ClassifierModel\n \"\"\"\n # class attributes\n classifier_model_loaded = False\n # definition of the tensorflow graph\n tensor_input_ids = None\n tensor_input_mask = None\n tensor_label_ids = None\n tensor_segment_ids = None\n tensor_outputs = None\n sess = None\n # needed for bert\n BERT_MODEL_HUB = None\n tokenizer = None\n # needed for first prediction\n initialized = False\n initializing_raw = \"Hi, I'm here to run the model a first time before the users comes in to make his predictions faster.\"\n\n def __init__(self, sentence, label_list=[0, 1], sequence_to_remove=[], max_seq_length=128, model_path=\"model_dir_fr\"):\n\n SingleRaw.__init__(self, sentence)\n self.model_path = model_path\n self.predicted_class = None\n # self.predicted_proba = None\n # self.predicted_log_proba = None\n self.input_features = None\n self.label_list = label_list\n self.max_seq_length = max_seq_length\n self.empty = False\n self.sequence_to_remove = sequence_to_remove\n # in order to load model only once\n if not SinglePredict.classifier_model_loaded:\n SinglePredict.sess = tf.Session()\n t0 = time.time()\n SinglePredict.BERT_MODEL_HUB = \"https://tfhub.dev/google/bert_multi_cased_L-12_H-768_A-12/1\"\n SinglePredict.tokenizer = SinglePredict.create_tokenizer_from_hub_module()\n t1 = time.time()\n SinglePredict.import_model(model_path)\n t2 = time.time()\n print(\"Model loaded : \\n time bert: {}, time import: {}\".format(t1-t0, t2-t1))\n\n @staticmethod\n def initialize(model_path=\"model_dir_fr\"):\n \"\"\" Load the model and run it on a first sentence\"\"\"\n t2bis = time.time()\n init = SinglePredict(SinglePredict.initializing_raw, model_path=model_path)\n t3bis = time.time()\n init.preprocess()\n init.predict()\n t3 = time.time()\n print(\"time init complete: {}, time init sentence: {}\".format(t3-t2bis, t3-t3bis))\n\n @staticmethod\n def create_tokenizer_from_hub_module():\n \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n with tf.Graph().as_default():\n bert_module = hub.Module(SinglePredict.BERT_MODEL_HUB)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n return bert.tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)\n\n @staticmethod\n def import_model(dir_path):\n \"\"\" Import tensorflow model and store the tensors\"\"\"\n export_dir = dir_path\n\n tf.saved_model.loader.load(SinglePredict.sess, [tag_constants.SERVING], export_dir)\n SinglePredict.tensor_input_ids = SinglePredict.sess.graph.get_tensor_by_name('input_ids_1:0')\n SinglePredict.tensor_input_mask = SinglePredict.sess.graph.get_tensor_by_name('input_mask_1:0')\n SinglePredict.tensor_label_ids = SinglePredict.sess.graph.get_tensor_by_name('label_ids_1:0')\n SinglePredict.tensor_segment_ids = SinglePredict.sess.graph.get_tensor_by_name('segment_ids_1:0')\n SinglePredict.tensor_outputs = SinglePredict.sess.graph.get_tensor_by_name('loss/Squeeze:0')\n SinglePredict.classifier_model_loaded = True\n\n def preprocess(self):\n \"\"\"Method from preprocessing_en.py\"\"\"\n self.standardize_raw(self.sequence_to_remove)\n if len(self.clean_text.split()) == 0:\n self.empty = True\n # print(\"preprocess : \", self.clean_text)\n\n def predict(self):\n if not self.empty:\n input_examples = [run_classifier.InputExample(guid=\"\", text_a=self.clean_text, text_b=None,\n label=self.label_list[0])] # here, \"\" is just a dummy label\n self.input_features = run_classifier.convert_examples_to_features(input_examples, self.label_list,\n self.max_seq_length, self.tokenizer)\n #assert len(self.input_features) == 1\n t0 = time.time()\n input_ids = np.array(self.input_features[0].input_ids).reshape(-1, self.max_seq_length)\n t1 = time.time()\n input_mask = np.array(self.input_features[0].input_mask).reshape(-1, self.max_seq_length)\n t2 = time.time()\n label_ids = np.array(self.input_features[0].label_id).reshape(-1, )\n t3 = time.time()\n segment_ids = np.array(self.input_features[0].segment_ids).reshape(-1, self.max_seq_length)\n t4 = time.time()\n result = SinglePredict.sess.run(SinglePredict.tensor_outputs, feed_dict={\n SinglePredict.tensor_input_ids: input_ids,\n SinglePredict.tensor_input_mask: input_mask,\n SinglePredict.tensor_label_ids: label_ids,\n SinglePredict.tensor_segment_ids: segment_ids,\n })\n t5 = time.time()\n self.predicted_class = self.label_list[result]\n t6 = time.time()\n return t0, t1, t2, t3, t4, t5, t6\n else:\n return 0, 0, 0, 0, 0, 0, 0\n\n def clean_and_predict(self):\n self.preprocess()\n self.predict()\n print(\"The predicted class is: {}\".format(self.predicted_class))\n\n @staticmethod\n def unload():\n SinglePredict.sess.close()","sub_path":"prediction_fr.py","file_name":"prediction_fr.py","file_ext":"py","file_size_in_byte":25187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"475083466","text":"'''\nEdit this file to contain the variable assignments.\nTo get you started the first one is done for you.\nThe quotes are necessary for storing text in a\nvariable, but not for numbers.\n'''\n\nname = \"Lee Cardholder\" \n\nbalance = 120.50\n\npercent_apr = 20\n\nfrom fractions import Fraction\nmonthly_interest = Fraction((balance*percent_apr/100)/12)\n","sub_path":"Copies/PythFound1/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"241433032","text":"\"\"\"\nCreated By : Nikesh\nCreated On : \nReviewed By :\nReviewed On :\nVersion :\n\"\"\"\n\nimport xmltodict\n\n\nclass XmlConverter:\n def __init__(self):\n pass\n\n def convert_in_list(self, list_object: list, keys: list):\n updated_data = []\n for each_object in list_object:\n for each_key in keys:\n xml_data = each_object[each_key]\n if xml_data != \"\" and xml_data is not None:\n xml_to_dict = xmltodict.parse(xml_data, dict_constructor=dict)\n each_object[each_key] = xml_to_dict\n updated_data.append(each_object)\n return updated_data\n\n def convert_in_dict(self, dict_object: dict, keys: list):\n for each_key in keys:\n xml_data = dict_object[each_key]\n if xml_data != \"\" and xml_data is not None:\n xml_to_dict = xmltodict.parse(xml_data, dict_constructor=dict)\n dict_object[each_key] = xml_to_dict\n return dict_object\n","sub_path":"examsystemapp/utils/helpers/xml_helper.py","file_name":"xml_helper.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"4566933","text":"#!/usr/bin/env python3\n# coding: utf-8\nimport numpy as np\nfrom time import time\nimport sys\nimport Setup as p\nimport Likelihood\nimport argparse\n\n# Parse the input arguments\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument(\"--threads\", dest=\"threads\", help=\"Number of threads\",\n type=str, default=8)\nparser.add_argument(\"--perccat\", dest=\"perccat\", help=\"Sets how much of the catalog to exclude\",\n type=str, default=None)\nargs = parser.parse_args()\nncores= int(args.threads)\nperccat = float(args.perccat)\n\n\ncuts_def = p.load_pickle(\"../../Data/SMmatching/logMScuts_def.p\")\nlogSMlim = cuts_def[perccat]\n\n# Initiate my likelihood model\nmodel = Likelihood.Model(logSMlim, perccat, generator=True)\nprint(\"Initiated the model!\", perccat, logSMlim)\nsys.stdout.flush()\n\nalphas = np.linspace(p.min_alpha, p.max_alpha, p.Nalphas)\nscatters = np.linspace(p.min_scatter, p.max_scatter, p.Nscatters)\n\n# Say x-dimension corresponds to alpha, y-dimension corresponds to scatter\nXX, YY = np.meshgrid(alphas, scatters)\nndim1, ndim2 = XX.shape\n# Calculate the stochastic covariance matrix at these values\nNiter = 1\ncovmats = np.zeros(shape=(ndim1, ndim2, p.nbins, p.nbins))\nNtot = XX.size\nk = 1\nextime = list()\n\nfor i in range(ndim1):\n for j in range(ndim2):\n start = time()\n alpha, scatter = XX[i, j], YY[i, j]\n catalogs = model.abundance_match(alpha, scatter, Niter)\n cov_matrix = model.jackknife_sim(catalogs[0], ncores)[1]\n\n covmats[i, j, :, :] = cov_matrix\n\n t = time()-start\n extime.append(t)\n remtime = sum(extime)/len(extime)*(Ntot-k)/60**2\n print(\"Done with step {}/{} in time {:.1f}. Estimated remaining time is {:.2f} hours\".format(k, Ntot, t, remtime))\n sys.stdout.flush()\n k += 1\n\nres = {'alpha' : XX, 'scatter' : YY, 'covmat' : covmats}\np.dump_pickle(res, \"../../Data/SMmatching/Train_jackknife_covmats_{}_.p\".format(perccat))\n\nprint(\"Finished\")\nsys.stdout.flush()\n","sub_path":"src/SMmatching/Jackknife_covmat_generate.py","file_name":"Jackknife_covmat_generate.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"102443401","text":"#!/usr/bin/env python\n\nimport sys\nimport math\nimport rospy\nimport rospkg\nimport cv2\nimport numpy as np\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nimport argparse\n\nCONF_THRESH, NMS_THRESH = 0.5, 0.5\n\nPATH_TO_NAMES = \"\"\n\nnet = None\n\noutput_layers = []\n\n\nclass detection:\n\n def __init__(self):\n self.detected_pub = rospy.Publisher(\n '/cf1/camera/detected_objects', Image, queue_size=2)\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber(\n \"/cf1/camera/image_corrected\", Image, self.callback)\n\n def callback(self, data):\n\n if data.header.stamp < (rospy.Time.now() - rospy.Duration(0.1)):\n # only take a new pictures (otherwise it will take all the old stored images in the buffer)\n # print(\"\\nThrow away\\n\")\n return\n\n # Convert the image from rosmsg to opencv format\n try:\n cv_img = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n height, width = cv_img.shape[:2]\n\n blob = cv2.dnn.blobFromImage(\n cv_img, 0.00392, (416, 416), swapRB=True, crop=False)\n\n net.setInput(blob)\n layer_outputs = net.forward(output_layers)\n\n class_ids, confidences, b_boxes = [], [], []\n for output in layer_outputs:\n for detection in output:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > CONF_THRESH:\n center_x, center_y, w, h = (\n detection[0:4] * np.array([width, height, width, height])).astype('int')\n\n x = int(center_x - w / 2)\n y = int(center_y - h / 2)\n\n b_boxes.append([x, y, int(w), int(h)])\n confidences.append(float(confidence))\n class_ids.append(int(class_id))\n\n if len(cv2.dnn.NMSBoxes(b_boxes, confidences, CONF_THRESH, NMS_THRESH)) > 0:\n # Perform non maximum suppression for the bounding boxes to filter overlapping and low confident bounding boxes\n indices = cv2.dnn.NMSBoxes(\n b_boxes, confidences, CONF_THRESH, NMS_THRESH).flatten().tolist()\n\n # Draw the filtered bounding boxes with their class to the image\n with open(PATH_TO_NAMES, \"r\") as f:\n classes = [line.strip() for line in f.readlines()]\n colors = np.random.uniform(0, 255, size=(len(classes), 3))\n\n for index in indices:\n x, y, w, h = b_boxes[index]\n cv2.rectangle(cv_img, (x, y), (x + w, y + h), colors[index], 2)\n cv2.putText(cv_img, classes[class_ids[index]], (w + 1, y + 20),\n cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, colors[index], 2)\n\n try:\n self.detected_pub.publish(\n self.bridge.cv2_to_imgmsg(cv_img, \"bgr8\"))\n except CvBridgeError as e:\n print(e)\n\n\n# Global inits\nrospy.init_node('object_detection_yolov3_coco')\n\n\ndef main():\n global net, output_layers, PATH_TO_NAMES\n\n rospy.loginfo(\"Object detection node started! (Yolov3, Coco dataset)\")\n\n # setup\n\n rospack = rospkg.RosPack()\n rospack.list()\n pkg_path = rospack.get_path(\"drone_marker_detection\")\n param_path = pkg_path + \"/params\"\n\n PATH_TO_CFG = param_path + \"/yolov3.cfg\"\n PATH_TO_WEIGHTS = param_path + \"/yolov3.weights\"\n PATH_TO_NAMES = param_path + \"/coco.names\"\n\n # Load the network\n net = cv2.dnn.readNetFromDarknet(PATH_TO_CFG, PATH_TO_WEIGHTS)\n net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\n net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\n\n # Get the output layer from YOLO\n layers = net.getLayerNames()\n output_layers = [layers[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n detection() # Start image detection in openCV\n\n print(\"Object detection is running.\")\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down.\")\n\n cv2.destroyAllWindows()\n\n rospy.loginfo(\"Object detection node started was terminated.\")\n\n\nif __name__ == '__main__':\n try:\n main()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"yolov3Detection_node.py","file_name":"yolov3Detection_node.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"632753816","text":"import numpy as np\nfrom skimage.io import imread\nfrom skimage.transform import resize\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn import svm\nfrom sklearn.preprocessing import StandardScaler\nimport joblib\n\nfrom Phase2.transformers import RGB2GrayTransformer, HogTransformer\n\ndog = imread('Images/dog1.jpg')\ncat = imread('Images/cat1.jpg')\n\noutput_shape = (102, 136, 3)\n\ndog = resize(dog, output_shape=output_shape)\ncat = resize(cat, output_shape=output_shape)\n\ntrain_inputs = [dog, cat]\ntrain_labels = ['dog', 'cat']\n\nfor i in range(0, 9):\n index = str(i)\n path = 'Images/middlefinger' + index + '.jpg'\n img = imread(path)\n img = resize(img, output_shape=output_shape)\n train_inputs.append(img)\n train_labels.append('middlefinger')\n\nfor i in range(0, 9):\n index = str(i)\n path = 'Images/skull' + index + '.jpg'\n img = imread(path)\n img = resize(img, output_shape=output_shape)\n train_inputs.append(img)\n train_labels.append('skull')\n\n# print(train_labels)\n\nX_train = np.array(train_inputs)\ny_train = np.array(train_labels)\n\ngrayify = RGB2GrayTransformer()\nhogify = HogTransformer(\n pixels_per_cell=(14, 14),\n cells_per_block=(3, 3),\n orientations=9,\n block_norm='L2-Hys'\n)\nscalify = StandardScaler()\n\nX_train_gray = grayify.fit_transform(X_train)\nX_train_hog = hogify.fit_transform(X_train_gray)\nX_train_prepared = scalify.fit_transform(X_train_hog)\n\n# print(X_train_prepared.shape)\n\n# clf = SGDClassifier(random_state=42, max_iter=1000, tol=1e-3)\nclf = svm.SVC(kernel='linear')\nclf.fit(X_train_prepared, y_train)\n\njoblib.dump(clf, 'image_classification_model.joblib')\njoblib.dump(grayify, 'grayify.joblib')\njoblib.dump(hogify, 'hogify.joblib')\njoblib.dump(scalify, 'scalify.joblib')\n","sub_path":"train_image_classifier.py","file_name":"train_image_classifier.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"263512767","text":"from bottle import template, redirect\r\nimport json\r\n\r\n\r\nJSON_FOLDER = './data'\r\nAVAILABE_SHOWS = [\"7\", \"66\", \"73\", \"82\", \"112\", \"143\", \"175\", \"216\", \"1371\", \"1871\",\"2993\", \"305\"]\r\n\r\ndef getVersion():\r\n return \"0.0.1\"\r\n\r\ndef getJsonFromFile(showName):\r\n try:\r\n return template(\"{folder}/{filename}.json\".format(folder=JSON_FOLDER, filename=showName))\r\n except:\r\n return \"{}\"\r\n\r\ndef search_shows(q):\r\n content = []\r\n results = []\r\n for show in AVAILABE_SHOWS:\r\n content.append(json.loads(getJsonFromFile(show)))\r\n for cont in content:\r\n episodes = cont['_embedded']['episodes']\r\n for episode in episodes:\r\n if q in episode['name']:\r\n results.append({'showid':cont['id'],'episodeid':episode['id'],'text':cont['name'] + \": \" + episode['name']})\r\n elif q in str(episode['summary']):\r\n results.append({'showid':cont['id'],'episodeid':episode['id'],'text':cont['name'] + \": \" + episode['name']})\r\n return results\r\n\r\ndef show_shows(id):\r\n data = getJsonFromFile(id)\r\n if len(data) == 2:\r\n return \"error\"\r\n data = json.loads(data)\r\n sectionTemplate = \"./templates/show.tpl\"\r\n return template(\"./pages/index.html\", version=getVersion(), sectionTemplate=sectionTemplate,\r\n sectionData=data)\r\n\r\ndef show_episodes(showId, epId):\r\n data = getJsonFromFile(showId)\r\n data = json.loads(data)[\"_embedded\"][\"episodes\"]\r\n for episode in data:\r\n if episode[\"id\"] == epId:\r\n ep = episode\r\n sectionTemplate = \"./templates/episode.tpl\"\r\n return template(\"./pages/index.html\", version=getVersion(), sectionTemplate=sectionTemplate,\r\n sectionData=ep)\r\n return \"error\"\r\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"316802406","text":"# -*- coding: utf-8 -*-\n\"\"\"\n将两个有序链表合并为一个新的有序链表并返回。新链表是通过拼接给定的两个链表的所有节点组成的。 \n\n示例:\n\n输入:1->2->4, 1->3->4\n输出:1->1->2->3->4->4\n\"\"\"\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef stringToListNode(numbers):\n # Now convert that list into linked list\n dummyRoot = ListNode(0)\n ptr = dummyRoot\n for number in numbers:\n ptr.next = ListNode(number)\n ptr = ptr.next\n\n ptr = dummyRoot.next\n return ptr\n\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n # l1 = stringToListNode(l1)\n # l2 = stringToListNode(l2)\n\n head = ListNode(0)\n new_node = head\n while True:\n if not l1 and not l2:\n break\n elif not l2 and l1 or l2 and l1 and l1.val <= l2.val:\n new_node.next = ListNode(l1.val)\n new_node = new_node.next\n l1 = l1.next\n else:\n new_node.next = ListNode(l2.val)\n new_node = new_node.next\n l2 = l2.next\n\n return head.next\n","sub_path":"Easy/21.Merge Two Sorted Lists 合并两个有序链表.py","file_name":"21.Merge Two Sorted Lists 合并两个有序链表.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"485563221","text":"import casadi as ca\nimport numpy as np\nimport time\nfrom motion_plot.ackermann_motion_plot import UTurnMPC\nimport yaml\n\n\nclass CasADi_MPC_TDROBCA:\n def __init__(self):\n self.base = 2.0\n self.LF = 3.\n self.LB = 1.\n self.offset = (self.LF - self.LB) / 2\n\n self.nx = 8 - 2\n self.ng = 6 - 2\n self.obst_num = 0\n self.horizon = 0\n self.dt0 = 0.\n self.model = None\n self.x_opt = None\n self.op_lambda0 = None\n self.op_mu0 = None\n self.op_d0 = None\n self.op_control0 = None\n\n self.wg = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3]\n self.v_max = 2.\n self.steer_max = ca.pi * 40 / 180\n self.omega_max = ca.pi\n self.a_max = 4.\n self.steer_rate_max = ca.pi * 40 / 180\n self.jerk_max = 5.\n self.dmin = 0.\n\n def set_parameters(self, param):\n self.base = param[\"base\"]\n self.LF = param[\"LF\"] # distance from rear to vehicle front end\n self.LB = param[\"LB\"] # distance from rear to vehicle back end\n\n def Array2SX(self, array):\n rows, cols = np.shape(array)\n sx = ca.SX.zeros(rows * cols, 1)\n array_ = array.T.flatten()\n for i in range(rows * cols):\n sx[i] = array_[i]\n\n return ca.reshape(sx, rows, cols)\n\n def Array2DM(self, array):\n rows, cols = np.shape(array)\n sx = ca.DM.zeros(rows * cols, 1)\n array_ = array.T.flatten()\n for i in range(rows * cols):\n sx[i] = array_[i]\n\n return ca.reshape(sx, rows, cols)\n\n def init_dynamic_constraints(self, x, dt, gx):\n for i in range(self.horizon - 1):\n x_ = x[0, i]\n y_ = x[1, i]\n yaw_ = x[2, i]\n v_ = x[3, i]\n steer_ = x[4, i]\n a_ = x[5, i]\n # steer_rate_ = x[6, i]\n # jerk_ = x[7, i]\n\n k1_dx = v_ * ca.cos(yaw_)\n k1_dy = v_ * ca.sin(yaw_)\n k1_dyaw = v_ / self.base * ca.tan(steer_)\n k1_dv = a_\n # k1_dsteer = steer_rate_\n # k1_da = jerk_\n\n k2_dx = (v_ + 0.5 * dt * k1_dv) * ca.cos(yaw_ + 0.5 * dt * k1_dyaw)\n k2_dy = (v_ + 0.5 * dt * k1_dv) * ca.sin(yaw_ + 0.5 * dt * k1_dyaw)\n k2_dyaw = (v_ + 0.5 * dt * k1_dv) / self.base * ca.tan(steer_)\n k2_dv = a_\n # k2_dsteer = steer_rate_\n # k2_da = jerk_\n\n k3_dx = (v_ + 0.5 * dt * k2_dv) * ca.cos(yaw_ + 0.5 * dt * k2_dyaw)\n k3_dy = (v_ + 0.5 * dt * k2_dv) * ca.sin(yaw_ + 0.5 * dt * k2_dyaw)\n k3_dyaw = (v_ + 0.5 * dt * k2_dv) / self.base * ca.tan(steer_)\n k3_dv = a_\n # k3_dsteer = steer_rate_\n # k3_da = jerk_\n\n k4_dx = (v_ + 0.5 * dt * k3_dv) * ca.cos(yaw_ + 0.5 * dt * k3_dyaw)\n k4_dy = (v_ + 0.5 * dt * k3_dv) * ca.sin(yaw_ + 0.5 * dt * k3_dyaw)\n k4_dyaw = (v_ + 0.5 * dt * k3_dv) / self.base * ca.tan(steer_)\n k4_dv = a_\n # k4_dsteer = steer_rate_\n # k4_da = jerk_\n\n dx = dt * (k1_dx + 2 * k2_dx + 2 * k3_dx + k4_dx) / 6\n dy = dt * (k1_dy + 2 * k2_dy + 2 * k3_dy + k4_dy) / 6\n dyaw = dt * (k1_dyaw + 2 * k2_dyaw + 2 * k3_dyaw + k4_dyaw) / 6\n dv = dt * (k1_dv + 2 * k2_dv + 2 * k3_dv + k4_dv) / 6\n # dsteer = dt * (k1_dsteer + 2 * k2_dsteer + 2 * k3_dsteer + k4_dsteer) / 6\n # da = dt * (k1_da + 2 * k2_da + 2 * k3_da + k4_da) / 6\n\n gx[0, i] = x_ + dx - x[0, i + 1]\n gx[1, i] = y_ + dy - x[1, i + 1]\n gx[2, i] = yaw_ + dyaw - x[2, i + 1]\n gx[3, i] = v_ + dv - x[3, i + 1]\n # gx[4, i] = steer_ + dsteer - x[4, i + 1]\n # gx[5, i] = a_ + da - x[5, i + 1]\n\n return gx\n\n def init_OBCA_constraints(self, x_, lambda_, mu_, d_, shape, obst, gx):\n gT = self.Array2SX(shape[:, 2][:, None].T)\n GT = self.Array2SX(shape[:, :2].T)\n\n # lambda_v = ca.reshape(lambda_, -1, 1)\n # lambda_ = ca.reshape(lambda_v, self.horizon, 4 * self.obst_num).T\n # mu_v = ca.reshape(mu_, -1, 1)\n # mu_ = ca.reshape(mu_v, self.horizon, 4).T\n\n # G = ca.SX.zeros(4, 2)\n # G[0, 0] = 1\n # G[1, 0] = -1\n # G[2, 1] = 1\n # G[3, 1] = -1\n #\n # g = ca.SX.zeros(4, 1)\n # g[0, 0] = 3\n # g[1, 0] = 1\n # g[2, 0] = 1\n # g[3, 0] = 1\n # GT = G.T\n # gT = g.T\n\n l_ob = len(obst)\n for i in range(self.horizon - 1):\n mu_i = mu_[:, i]\n yaw_i = x_[2, i]\n offset = ca.SX.zeros(2, 1)\n offset[0, 0] = self.offset * ca.cos(yaw_i)\n offset[1, 0] = self.offset * ca.sin(yaw_i)\n t_i = x_[:2, i] + offset\n\n rotT_i = ca.SX.zeros(2, 2)\n rotT_i[0, 0] = ca.cos(yaw_i)\n rotT_i[1, 1] = ca.cos(yaw_i)\n rotT_i[1, 0] = -ca.sin(yaw_i)\n rotT_i[0, 1] = ca.sin(yaw_i)\n\n for j in range(l_ob):\n lambdaj = lambda_[(4 * j):(4 * (j + 1)), i]\n\n Aj = self.Array2SX(obst[j][:, :2])\n bj = self.Array2SX(obst[j][:, 2][:, None])\n constraint1 = ca.mtimes(ca.mtimes(rotT_i, Aj.T), lambdaj) + ca.mtimes(GT, mu_i)\n constraint2 = ca.mtimes((ca.mtimes(Aj, t_i) - bj).T, lambdaj) - ca.mtimes(gT, mu_i) + d_[j, i]\n constraint3 = ca.sumsqr(ca.mtimes(Aj.T, lambdaj))\n constraint10 = constraint1[0, 0]\n constraint11 = constraint1[1, 0]\n\n # Aj = obst[j][:, :2]\n # bj = obst[j][:, 2]\n # constraint10 = (mu_i[0] - mu_i[2]) \\\n # + ca.cos(x_[2, i]) * sum(Aj[k, 0] * lambdaj[k] for k in range(4)) \\\n # + ca.sin(x_[2, i]) * sum(Aj[k, 1] * lambdaj[k] for k in range(4))\n #\n # constraint11 = (mu_i[1] - mu_i[3]) \\\n # - ca.sin(x_[2, i]) * sum(Aj[k, 1] * lambdaj[k] for k in range(4)) \\\n # + ca.cos(x_[2, i]) * sum(Aj[k, 1] * lambdaj[k] for k in range(4))\n #\n # constraint2 = -(-sum(g[k] * mu_i[k] for k in range(4))) \\\n # + (x_[0, i] + self.offset * ca.cos(x_[2, i])) \\\n # * sum(Aj[k, 0] * lambdaj[k] for k in range(4)) \\\n # + (x_[1, i] + self.offset * ca.sin(x_[2, i])) \\\n # * sum(Aj[k, 1] * lambdaj[k] for k in range(4)) \\\n # - sum(bj[k] * lambdaj[k] for k in range(4)) + d_[j, i]\n #\n # constraint3 = sum(Aj[k, 0] * lambdaj[k] for k in range(4)) ** 2 \\\n # + sum(Aj[k, 1] * lambdaj[k] for k in range(4)) ** 2\n\n gx[j, i] = ca.fabs(constraint10)\n gx[j + l_ob, i] = ca.fabs(constraint11)\n gx[j + 2 * l_ob, i] = constraint2\n gx[j + 3 * l_ob, i] = constraint3\n\n return gx\n\n def init_objects(self, x_, d_, ref_path):\n\n sum_mindist = 0\n sum_states = 0\n sum_states_rate = 0\n sum_controls = 0\n sum_controls_rate = 0\n\n for i in range(self.horizon):\n sum_states += ca.sumsqr(x_[:4, i]) # xk\n sum_controls += ca.sumsqr(x_[4:, i]) # uk\n sum_mindist += ca.sumsqr(d_[:, i])\n if i > 0:\n sum_states_rate += ca.sumsqr(x_[:4, i] - x_[:4, i - 1]) # xk - xk-1\n sum_controls_rate += ca.sumsqr(x_[4:, i] - x_[4:, i - 1]) # uk - uk-1\n\n obj = self.wg[1] * sum_states + self.wg[6] * sum_states_rate + self.wg[3] * sum_controls \\\n + 1e1 * self.wg[9] * sum_controls_rate + 1e8 * self.wg[9] * ca.sumsqr(x_[:3, -1] - ref_path[:3, -1]) \\\n + 1e16 * self.wg[9] * sum_mindist\n\n return obj\n\n def init_bounds_OBCA(self, start, goal):\n lbx = ca.DM.zeros(self.nx + (self.obst_num + 1) * 4 + self.obst_num, self.horizon)\n ubx = ca.DM.zeros(self.nx + (self.obst_num + 1) * 4 + self.obst_num, self.horizon)\n lbg = ca.DM.zeros(self.ng + 4 * self.obst_num, self.horizon - 1)\n ubg = ca.DM.zeros(self.ng + 4 * self.obst_num, self.horizon - 1)\n\n for i in range(self.horizon):\n lbx[0, i] = -10. # x\n ubx[0, i] = 10. # x\n lbx[1, i] = -1. # y\n ubx[1, i] = 10. # 1.1y\n lbx[2, i] = -ca.pi # th\n ubx[2, i] = ca.pi # th\n lbx[3, i] = -self.v_max # v\n ubx[3, i] = self.v_max # v\n lbx[4, i] = -self.steer_max # steer\n ubx[4, i] = self.steer_max # steer\n lbx[5, i] = -self.a_max # a\n ubx[5, i] = self.a_max # a\n\n lbx[6:-self.obst_num, i] = 1e-5 # lambda, mu\n ubx[6:-self.obst_num, i] = 1. # lambda, mu\n lbx[-self.obst_num:, i] = -1e-5 # dmin\n ubx[-self.obst_num:, i] = -1e-9 # -4e-5 # dmin\n\n # constraint1 rotT_i @ Aj.T @ lambdaj + GT @ mu_i == 0\n lbg[self.ng:self.ng + 2 * self.obst_num, :] = 1e-5\n ubg[self.ng:self.ng + 2 * self.obst_num, :] = 1e-5\n\n # constraint2 (Aj @ t_i - bj).T @ lambdaj - gT @ mu_i + dmin == 0\n lbg[self.ng + 2 * self.obst_num:self.ng + 3 * self.obst_num, :] = 0.\n ubg[self.ng + 2 * self.obst_num:self.ng + 3 * self.obst_num, :] = 0. # 1e-5\n\n # constraint3 norm_2(Aj.T @ lambdaj) <=1\n lbg[self.ng + 3 * self.obst_num:self.ng + 4 * self.obst_num, :] = 0.\n ubg[self.ng + 3 * self.obst_num:self.ng + 4 * self.obst_num, :] = 1.\n\n lbx[0, 0] = start[0]\n lbx[1, 0] = start[1]\n lbx[2, 0] = start[2]\n lbx[3:, 0] = 0.\n\n ubx[0, 0] = start[0]\n ubx[1, 0] = start[1]\n ubx[2, 0] = start[2]\n ubx[3:, 0] = 0.\n\n # lbx[0, -1] = goal[0]\n # lbx[1, -1] = goal[1]\n lbx[2, -1] = goal[2]\n lbx[3:, -1] = 0.\n\n # ubx[0, -1] = goal[0]\n # ubx[1, -1] = goal[1]\n ubx[2, -1] = goal[2]\n ubx[3:, -1] = 0.\n\n lbx_ = ca.reshape(lbx, -1, 1)\n ubx_ = ca.reshape(ubx, -1, 1)\n\n lbg_ = ca.reshape(lbg, -1, 1)\n ubg_ = ca.reshape(ubg, -1, 1)\n\n return lbx_, ubx_, lbg_, ubg_\n\n def states_initialization(self, reference_path):\n x0 = ca.DM(self.nx, self.horizon)\n diff_s = np.diff(reference_path[:2, :], axis=1)\n sum_s = np.sum(np.hypot(diff_s[0], diff_s[1]))\n self.dt0 = 1.4 * (sum_s / self.v_max + self.v_max / self.a_max) / self.horizon\n\n last_v = 0.\n # last_a = 0.\n # last_steer = 0.\n for i in range(self.horizon):\n x0[0, i] = reference_path[0, i]\n x0[1, i] = reference_path[1, i]\n x0[2, i] = reference_path[2, i]\n\n if i > 1:\n ds = np.linalg.norm(reference_path[:2, i] - reference_path[:2, i - 1])\n dyaw = reference_path[2, i] - reference_path[2, i - 1]\n steer = ca.atan2(dyaw * self.base / ds, 1)\n x0[3, i] = ds / self.dt0\n x0[4, i] = steer\n x0[5, i] = (ds / self.dt0 - last_v) / self.dt0\n # x0[6, i] = (steer - last_steer) / self.dt0\n # x0[7, i] = ((ds / self.dt0 - last_v) / self.dt0 - last_a) / self.dt0\n\n last_v = x0[3, i]\n # last_steer = x0[4, i]\n # last_a = x0[5, i]\n\n if self.op_control0 is not None:\n x0[3:, :] = self.op_control0\n\n return x0\n\n def dual_variables_initialization(self):\n v0 = ca.DM.zeros(4 * (self.obst_num + 1) + self.obst_num, self.horizon)\n if (self.op_lambda0 is not None) and (self.op_mu0 is not None) and (self.op_d0 is not None):\n v0[:4 * self.obst_num, :] = self.Array2DM(self.op_lambda0)\n v0[4 * self.obst_num: -self.obst_num, :] = self.Array2DM(self.op_mu0)\n v0[-self.obst_num:, :] = self.Array2DM(self.op_d0)\n\n return v0\n\n def init_model_OBCA(self, reference_path, shape_m, obst_m):\n self.horizon = reference_path.shape[1]\n self.obst_num = len(obst_m)\n x0_ = self.states_initialization(reference_path)\n v0 = self.dual_variables_initialization()\n\n # initialize variables\n x = ca.SX.sym(\"x\", self.nx, self.horizon) # (x, y, theta, v, steer, a, steer_rate, jerk)\n lambda_ = ca.SX.sym(\"lambda\", 4 * self.obst_num, self.horizon)\n mu_ = ca.SX.sym(\"mu\", 4, self.horizon)\n d_ = ca.SX.sym(\"d\", self.obst_num, self.horizon)\n\n # initialize constraints\n g1 = ca.SX.sym(\"g1\", self.ng, self.horizon - 1)\n g1 = self.init_dynamic_constraints(x, self.dt0, g1)\n g2 = ca.SX.sym(\"g2\", self.obst_num * 4, self.horizon - 1)\n g2 = self.init_OBCA_constraints(x, lambda_, mu_, d_, shape_m, obst_m, g2)\n gx = ca.vertcat(g1, g2)\n\n X, G = self.organize_variables(x, lambda_, mu_, d_, gx)\n X0, XL, XU, GL, GU = self.organize_bounds(x0_, v0)\n\n # initialize objectives\n F = self.init_objects(x, d_, x0_)\n\n nlp = {\"x\": X, \"f\": F, \"g\": G}\n opts_setting = {\"expand\": False,\n \"ipopt.hessian_approximation\": \"exact\",\n 'ipopt.max_iter': 100,\n 'ipopt.print_level': 3,\n 'print_time': 1,\n 'ipopt.acceptable_tol': 1e-8,\n 'ipopt.acceptable_obj_change_tol': 1e-6}\n\n Sol = ca.nlpsol('S', 'ipopt', nlp, opts_setting)\n\n result = Sol(x0=X0, lbx=XL, ubx=XU, lbg=GL, ubg=GU)\n self.x_opt = result[\"x\"]\n\n def organize_variables(self, vx, vl, vm, vd, cg):\n vs = ca.vertcat(vx, vl)\n vs = ca.vertcat(vs, vm)\n vs = ca.vertcat(vs, vd)\n X = ca.reshape(vs, -1, 1)\n # X = ca.vertcat(dt, x_)\n G = ca.reshape(cg, -1, 1)\n\n return X, G\n\n def organize_bounds(self, x0, y0):\n lbx, ubx, lbg, ubg = self.init_bounds_OBCA(x0[:, 0], x0[:, -1])\n x_all = ca.vertcat(x0, y0)\n X0 = ca.reshape(x_all, -1, 1)\n # X0 = ca.vertcat(self.dt0, x0_)\n\n return X0, lbx, ubx, lbg, ubg\n\n def get_result_OBCA(self):\n op_dt = float(self.dt0)\n cal_traj = ca.reshape(self.x_opt, self.nx + (self.obst_num + 1) * 4 + self.obst_num, self.horizon)\n # cal_traj = ca.reshape(self.x_opt[1:], self.horizon, self.nx + (self.obst_num + 1) * 4).T\n op_controls = np.array(cal_traj[3:8, :])\n op_trajectories = np.array(cal_traj[:3, :])\n\n vl = np.array(cal_traj[8:20, :])\n vm = np.array(cal_traj[20:24, :])\n vd = np.array(cal_traj[24:, :])\n\n return op_dt, op_trajectories, op_controls, vl, vm\n\n\nif __name__ == '__main__':\n start_time = time.time()\n\n large = True\n if large:\n address = \"../config_OBCA_large.yaml\"\n else:\n address = \"../config_OBCA.yaml\"\n with open(address, 'r', encoding='utf-8') as f:\n param = yaml.load(f)\n\n ut = UTurnMPC()\n ut.reserve_footprint = True\n ut.set_parameters(param)\n # states: (x ,y ,theta ,v , steer, a, steer_rate, jerk)\n cmpc = CasADi_MPC_TDROBCA()\n cmpc.set_parameters(param)\n ref_traj, ob, obst = ut.initialize_saved_data()\n shape = ut.get_car_shape()\n\n cmpc.init_model_OBCA(ref_traj, shape, obst)\n op_dt, op_trajectories, op_controls, vl, vm = cmpc.get_result_OBCA()\n print(\"TDROBCA total time:{:.3f}s\".format(time.time() - start_time))\n ut.plot_results(op_dt, op_trajectories, op_controls, ref_traj, ob, four_states=True)\n","sub_path":"MPC_planning/casadi_MPC/backup/casadi_TDROBCA_v3.py","file_name":"casadi_TDROBCA_v3.py","file_ext":"py","file_size_in_byte":15659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"602291270","text":"# BB lib - Python Helper\nimport datetime\nimport json\nimport os\nimport sys\nimport subprocess\nimport pprint\n\nclass Util:\n def __init__(self, details = {}):\n self.secrets = []\n self.logfile = False\n self.logfilename = \"run_log.txt\"\n self.hfile = False\n if \"secrets\" in details:\n self.secrets = secrets\n if \"file\" in details:\n self.logfile = True\n\n self.somthin = \"more\"\n\n def init_log():\n self.logit(\"#------------- New Run ---------------#\")\n\n def set_details(self, details):\n if \"secrets\" in details:\n self.secrets = secrets\n if \"file\" in details:\n self.logfile = True\n\n def add_secret(self, item):\n self.secrets.append(str(item))\n\n def logit(self, message, log_type = \"INFO\", display_only = True):\n cur_date = datetime.datetime.now().strftime(\"%m/%d/%Y %H:%M:%S\")\n stamp = f\"{cur_date}|{log_type}> \"\n if type(message) == dict or type(message) == list:\n message = str(message)\n self.file_log(message)\n if log_type == \"raw\":\n message = \"Raw output, check file\"\n for line in message.splitlines():\n cleaned = self.sanitize(line)\n print(f\"{stamp}{cleaned}\")\n\n def message_box(self, msg, mtype = \"sep\"):\n tot = 100\n start = \"\"\n res = \"\"\n msg = msg[0:84] if len(msg) > 85 else msg\n ilen = tot - len(msg)\n if (mtype == \"sep\"):\n start = f'#{\"-\" * int(ilen/2)} {msg}'\n res = f'{start} {\"-\" * (tot - len(start) + 1)}#'\n else:\n res = f'#{\"-\" * tot}#\\n'\n start = f'#{\" \" * int(ilen/2)} {msg} '\n res += f'{start}{\" \" * (tot - len(start) + 1)}#\\n'\n res += f'#{\"-\" * tot}#\\n'\n\n self.logit(self.sanitize(res))\n return res\n\n def sanitize(self, txt):\n cleaned = str(txt).strip()\n for item in self.secrets:\n cleaned = cleaned.replace(item, \"*******\")\n return cleaned\n\n def run_shell(self, cmd = [\"ls\", \"-l\"]):\n self.logit(f'Running: {\" \".join(cmd)}')\n result = subprocess.run(cmd, capture_output=True)\n self.logit(\"The exit code was: %d\" % result.returncode)\n self.logit(\"#--------------- STDOUT ---------------#\")\n self.logit(result.stdout)\n if result.stderr:\n self.logit(\"#--------------- STDERR ---------------#\")\n self.logit(result.stderr)\n return result\n\n def separator(self, ilength = 102):\n dashy = \"-\" * (ilength - 2)\n self.logit(f'#{dashy}#')\n\n def print_timer(self, starttime):\n elapsed = datetime.datetime.now() - starttime\n self.logit(f'Elapsed time: {str(elapsed)}')\n\n def process_args(self, arglist):\n args = {}\n for arg in arglist:\n pair = arg.split(\"=\")\n if len(pair) == 2:\n args[pair[0].strip()] = pair[1].strip()\n else:\n args[arg] = \"\"\n return args\n\n def read_json(self, json_file, is_path = True):\n result = {}\n if is_path:\n with open(json_file) as jsonfile:\n result = json.load(jsonfile)\n else:\n result = json.loads(json_file)\n return result\n\n def save_json(self, json_file, data, is_path = True):\n if is_path:\n with open(json_file, 'w') as outfile:\n json.dump(data, outfile)\n else:\n result =json.dump(data, outfile)\n\n def file_log(self, content, action = \"none\"):\n if not self.logfile:\n return(\"goody\")\n if action == \"new\":\n ver = self.logfilename.split(\".\")[0].replace(\"run_log\",\"\")\n inc = 1 if ver == \"\" else int(ver) + 1\n self.logfilename = f\"run_log{inc}.txt\"\n cur_date = datetime.datetime.now().strftime(\"%m/%d/%Y %H:%M:%S\")\n stamp = f\"{cur_date}|I> \"\n with open(self.logfilename, 'a') as lgr:\n lgr.write(f'{stamp}{content}\\n')\n","sub_path":"clinical/bbutil.py","file_name":"bbutil.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"547748963","text":"from tkinter import *\nfrom tkinter import ttk\nfrom datetime import datetime\nfrom tkinter import messagebox\nimport coverpage\nimport deposit\nimport withdraw\nimport print_current\n\n\nclass operation():\n \n def __init__(self):\n self.root = Tk()\n self.options_frame = Frame(self.root, height = 400, width = 1500 ,bd = '7')\n\n \n def initialise(self):\n \n self.root.title('operation_page')\n self.head_frame()\n\n self.root.geometry('2000x2000')\n self.root.mainloop()\n\n def head_frame(self):\n head_frame = Frame(self.root, height = 300, width = 1500 ,bd = '7')\n head_frame.pack()\n head_label = Label(head_frame, text = 'WELCOME TO THE BANK SYSTEM', font = 'Times 50')\n head_label.pack()\n\n #Date\n current_time = datetime.now()\n time_label = Label(head_frame, text = current_time, font = 'Arial 30')\n time_label.config(justify = CENTER)\n time_label.pack()\n\n #Login heading\n after_login = Label(head_frame, text = 'OPERATION PAGE', font = 'Arial 30')\n after_login.config(justify = CENTER)\n after_login.pack(pady = 20)\n\n self.options_func()\n\n def options_func(self):\n self.options_frame.place(x = 0, y = 250)\n\n deposit = Label(self.options_frame, text = 'Deposit : ', font = 'Arial 25')\n deposit.grid(row = 0, column = 0, padx = 20, pady = 20)\n\n withdraw = Label(self.options_frame, text = 'Withdraw : ', font = 'Arial 25')\n withdraw.grid(row = 1, column = 0, padx = 20, pady = 20)\n\n print_balance = Label(self.options_frame, text = 'Print Current_balance : ', font = 'Arial 25')\n print_balance.grid(row = 2, column = 0, padx = 20, pady = 20)\n\n return_to_login = Label(self.options_frame, text = 'Return To Login Page : ', font = 'Arial 25')\n return_to_login.grid(row = 3, column = 0, padx = 20, pady = 20)\n\n exit_application = Label(self.options_frame, text = 'Exit Application : ', font = 'Arial 25')\n exit_application.grid(row = 4, column = 0, padx = 20, pady = 20) \n\n self.entry_func()\n\n def entry_func(self):\n deposit_button = Button(self.options_frame, text = 'DEPOSIT', font = 'Arial 25', command = self.deposit_func)\n deposit_button.grid(row = 0, column = 1, padx = 20)\n\n withdraw_button = Button(self.options_frame, text = 'WITHDRAW', font = 'Arial 25', command = self.withdraw_func)\n withdraw_button.grid(row = 1, column = 1, padx = 20)\n\n print_button = Button(self.options_frame, text = 'PRINT CURRENT BALANCE', font = 'Arial 25', command = self.print_func)\n print_button.grid(row = 2, column = 1, padx = 20)\n\n return_button = Button(self.options_frame, text = 'RETURN TO LOGIN', font = 'Arial 25', command = self.return_func)\n return_button.grid(row = 3, column = 1, padx = 20)\n\n exit_button = Button(self.options_frame, text = 'EXIT THE APPLICATION', font = 'Arial 25', command = self.exit_func)\n exit_button.grid(row = 4, column = 1, padx = 20)\n\n def deposit_func(self):\n self.root.destroy()\n deposit_object = deposit.deposit_class()\n deposit_object.initialise()\n pass\n\n def withdraw_func(self):\n self.root.destroy()\n withdraw_object = withdraw.withdraw_class()\n withdraw_object.initialise()\n\n def print_func(self):\n self.root.destroy()\n print_object = print_current.print_class()\n print_object.initialise()\n\n def return_func(self):\n self.root.destroy()\n cover_page_object = coverpage.cover()\n cover_page_object.new_window()\n \n def exit_func(self):\n exit_var = messagebox.askquestion('Exit', 'Do you want to exit the application, Transcation will be lost', icon = 'warning')\n if exit_var == 'yes':\n self.root.destroy()\n\n\n\n\n\n# a = operation()\n# a.initialise()\n\n\n\n\n\n\n\n\n","sub_path":"after_login_page.py","file_name":"after_login_page.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"480109834","text":"from tools.simulation.virus import Virus\nfrom tools.simulation.population import PopulationBase\nfrom tools.data_structure import TimeSeriesResult\n\n\"\"\"\nA basic example of how to use the PopulationBase class\n\"\"\"\n\n\ndef run_simulation(virus_type: str,\n population_size: int,\n n_days: int):\n virus = Virus.from_string(virus_type)\n\n population = PopulationBase(population_size)\n\n population.infect(10) # infect 10 randomly selected individuals\n\n days = []\n\n infected_numbers = []\n unaffected_numbers = []\n immune_numbers = []\n dead_numbers = []\n\n for day_i in range(n_days):\n health_states = population.get_health_states(\n int(population_size * 0.5)\n ) # get health states of randomly selected half of the population\n\n n_infected = health_states.astype(int).sum() # find the number of infected\n\n population.infect(\n n_infected\n ) # infect new people (each of the selected people meets another\n # random person from the population and infect them with 100% probability)\n\n population.heal(\n healing_days_mean=virus.illness_days_mean,\n healing_days_std=virus.illness_days_std\n ) # simulate recovery from the illness\n\n population.kill(\n mortality=virus.get_mortality(),\n healing_days=virus.illness_days_mean\n ) # let some of the ill people pass away\n\n # === log statistics ===\n\n days.append(day_i)\n\n infected_numbers.append(population.get_n_infected())\n unaffected_numbers.append(population.get_n_unaffected())\n immune_numbers.append(population.get_n_immune())\n dead_numbers.append(population.get_n_dead())\n\n # ======================\n\n population.next_day() # see you all tomorrow\n\n return TimeSeriesResult(\n simulation_days=days,\n infected=infected_numbers,\n unaffected=unaffected_numbers,\n immune=immune_numbers,\n dead=dead_numbers\n )\n","sub_path":"examples/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"303595991","text":"import boto3\nsession = boto3.session.Session(profile_name=\"saml\")\n#ec2=session.resource('ec2')\nclient = session.client('ec2')\n#instances = ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\ninstances = client.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}])\nprint(instances)\ninstance_ids=[]\nprivate_ips=[]\nfor i in range(0,len(instances['Reservations'])):\n instance_ids.append(instances['Reservations'][i]['Instances'][0]['InstanceId'])\n print(\"InstanceIDs: \"+instance_ids)\n for j in range(0,len(instances['Reservations'][i]['Instances'][0]['NetworkInterfaces'])):\n private_ips.append(instances['Reservations'][i]['Instances'][0]['NetworkInterfaces'][j])\n\n\n'''for i in instances:\n print(i.id,i.private_ip_address,i.tags)'''\n'''for i in instances:\n for tag in i.tags:\n if(tag['Key']=='Name'):\n print (tag['Value'])'''\n","sub_path":"get_running_instances.py","file_name":"get_running_instances.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"373052006","text":"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# 评估检验中忽略某些输入渠道的抽样导致的食品安全风险\r\n# 检验中网购食品是一个漏掉的重要的输入渠道\r\n# 网购会一直增长,在未来占有食品市场更大的一部分比重\r\n# 评估被忽略的输入渠道——网购食品的安全风险\r\n\r\n# 网购新鲜蔬菜水果,非同城极易致使到货时不新鲜\r\n# 商品页面安全标志不明显,缺乏关于商品合格证明\r\n# 很少对网购食品进行抽检的样例\r\n# 基于某购物网站评论数据评估网购新鲜蔬菜水果的食品安全风险\r\n\r\n\r\ncommentData = pd.read_excel('comment_fr.xls')\r\ncommentCount = pd.DataFrame(commentData['generalCount'] + commentData['goodCount'] + commentData['poorCount'])\r\ncommentData['commentCount'] = commentCount\r\nnewCol = ['commentCount', 'goodCount', 'generalCount', 'poorCount', 'goodRate', 'generalRate', 'poorRate']\r\ncommentData = commentData.reindex(columns=newCol) # .drop(['url', 'productId'], axis=1)\r\ncommentData = commentData.sort_values('commentCount', ascending=False)\r\n\r\n# 删除零评论\r\ncommentData = commentData.drop(commentData[commentData['commentCount'] == 0].index)\r\n\r\n# 评论数量和差评率相关性极弱\r\ncc = pd.Series(commentData['commentCount'])\r\npr = pd.Series(commentData['poorRate'])\r\ncorres = cc.corr(pr) # Pearson相关系数\r\nprint('评论数量和差评相关系数:%f' % corres)\r\n\r\nprint('非零评论商品数:%d' % commentData['commentCount'].count())\r\nprint('评论总数:%d' % commentData['commentCount'].sum())\r\nprint('差评总数:%d' % commentData['poorCount'].sum())\r\nprint('中评总数:%d' % commentData['generalCount'].sum())\r\n\r\n# 评估风险系数\r\ngeneralPoorRate = 0.5 # 中评风险权重设置为0.5\r\ncommentCountSum = commentData['commentCount'].sum()\r\ngeneralCountSum = commentData['generalCount'].sum()\r\npoorCountSum = commentData['poorCount'].sum() + generalCountSum * generalPoorRate\r\nrisk = poorCountSum / commentCountSum\r\nprint('粗略风险:%f' % risk) # 粗略风险系数\r\n\r\n\r\n# 直方图\r\n# plt.hist(x, bins, normed)\r\n# bins: 直方图的直方个数\r\n# normed: 1/0(归一化为概率/出现次数)\r\na = commentData['commentCount']\r\nplt.hist(a, 100, normed=1) # , histtype='stepfilled', facecolor='b', alpha=0.75\r\nplt.xlabel('销量', fontproperties='SimHei', fontsize=12)\r\nplt.ylabel('比例', fontproperties='SimHei', fontsize=12)\r\nplt.title('新鲜水果销量分布', fontproperties='SimHei', fontsize=12)\r\n#plt.savefig('ve', dpi=780)\r\n#plt.show()\r\n\r\n# 赋权风险系数\r\n# 销量越大,存在的食品安全问题被发现的几率越大\r\n# 推断销量趋于无穷时差评率趋近于风险率\r\n# 高销量差评对风险率贡献赋权为1\r\n# 中销量差评对风险率贡献赋权为3\r\n# 低销量差评对风险率贡献赋权为5\r\n# 高销量中评对风险率贡献赋权为0.5\r\n# 中销量中评对风险率贡献赋权为1.5\r\n# 低销量中评对风险率贡献赋权为2.5\r\n# 前10%为高销量\r\n# 中20%为中销量\r\n# 后70%为低销量\r\nnumOfGoods = commentData['commentCount'].count()\r\ncommentCountSum = commentData['commentCount'].sum()\r\nhRankRate = 0.2\r\nmRankRate = 0.3\r\nlRankRate = 0.5\r\nhRankCount = int(hRankRate * numOfGoods)\r\nmRankCount = int((hRankRate + mRankRate) * numOfGoods)\r\nlRankCount = int((hRankRate + mRankRate + lRankRate) * numOfGoods)\r\ncumSum = commentData.cumsum()\r\ncumSum.index = range(numOfGoods)\r\nhRankPoorCount = cumSum.ix[hRankCount - 1]['poorCount'] + \\\r\n cumSum.ix[hRankCount - 1]['generalCount'] * 0.5\r\nmRankPoorCount = (cumSum.ix[mRankCount - 1]['poorCount'] - \\\r\n cumSum.ix[hRankCount - 1]['poorCount']) * 2.4 + \\\r\n (cumSum.ix[mRankCount - 1]['generalCount'] - \\\r\n cumSum.ix[hRankCount - 1]['generalCount']) * 1.2\r\nlRankPoorCount = (cumSum.ix[lRankCount - 1]['poorCount'] - \\\r\n cumSum.ix[mRankCount - 1]['poorCount']) * 4.4 + \\\r\n (cumSum.ix[lRankCount - 1]['generalCount'] - \\\r\n cumSum.ix[mRankCount - 1]['generalCount']) * 2.2\r\n\r\nprint(hRankPoorCount)\r\nprint(mRankPoorCount)\r\nprint(lRankPoorCount)\r\nrisk = (hRankPoorCount + mRankPoorCount + lRankPoorCount) / commentCountSum\r\nprint('赋权风险:%f' % risk)\r\n\r\n# fr\r\n# 评论数量和差评相关系数:-0.023159\r\n# 粗略风险:0.036157\r\n# 182730.5\r\n# 31965.0\r\n# 16640.0\r\n# 赋权风险:0.042521\r\n\r\n# ve\r\n# 评论数量和差评相关系数:-0.028022\r\n# 粗略风险:0.020985\r\n# 9791.0\r\n# 8583.0\r\n# 5797.5\r\n# 赋权风险:0.036725\r\n\r\n# 样本\r\n# 评论数量和差评相关系数:-0.093818\r\n# 粗略风险:0.017678\r\n# 5220.0\r\n# 7494.0\r\n# 9917.5\r\n# 赋权风险:0.041239\r\n\r\n","sub_path":"deal_comments.py","file_name":"deal_comments.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"425107348","text":"#抄制版\nimport turtle\ndef koch(size,n):\n if n == 0:\n turtle.fd(size)\n else:\n for angle in [0,60,-120,60]:\n turtle.left(angle)\n koch(size/3,n-1)\n\n# if __name__ == '__main__':\n# turtle.setup(800,400)\n# turtle.speed(5)\n# turtle.up()\n# turtle.goto(300,50)\n# turtle.down()\n# turtle.pensize(2)\n# koch(600,3)\n# turtle.hideturtle()\n# turtle.done()\n\n#进阶版\nif __name__ == '__main__':\n turtle.setup(800,800)\n turtle.speed(0)\n turtle.up()\n turtle.goto(-300,200)\n turtle.down()\n turtle.pensize(2)\n\n level = 5\n koch(600,level)\n turtle.right(120)\n koch(600,level)\n turtle.right(120)\n koch(600,level)\n\n turtle.hideturtle()\n turtle.done()","sub_path":"计算机二级/chapter5/实例8科赫曲线绘制.py","file_name":"实例8科赫曲线绘制.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"239340467","text":"\nimport numpy as np\nimport copy as copy\nfrom pymongo import MongoClient\nimport pandas as pd\n\n\nDB_NAME = \"masterarbeit_balthi\"\nDB_HOST = \"ds141328.mlab.com\"\nDB_PORT = 41328\nDB_USER = \"user\"\nDB_PASS = \"userpw123\"\nprint('Connecting to DB...')\nconnection = MongoClient(DB_HOST, DB_PORT)\ndb = connection[DB_NAME]\ndb.authenticate(DB_USER, DB_PASS)\nprint('Connected')\n\npatients = []\nfor p in db.patients.find({}):\n patients.append(p)\n\nprint(len(patients))\nfeatures = [] #objects\nids = []\n\nfor p in patients:\n ctf = db.celltypefractions.find({'patientBarcode': p['submitter_id']}, {\"cell_type\":1, \"quanTIseq_lsei_TIL10\":1, \"cibersort_LM22\":1, \"_id\": 0})\n patientCells = {}\n patientCells['vital_status'] = p.get('vital_status', None)\n patientCells['age_at_diagnosis'] = p.get('age_at_diagnosis', None)\n patientCells['gender'] = p.get('gender', None)\n patientCells['tumor_stage'] = p.get('tumor_stage', None)\n patientCells['primary_diagnosis'] = p.get('primary_diagnosis', None)\n patientCells['morphology'] = p.get('morphology', None)\n patientCells['days_to_last_follow_up'] = p.get('days_to_last_follow_up', None)\n patientCells['tissue_or_organ_of_origin'] = p.get('tissue_or_organ_of_origin', None)\n patientCells['site_of_resection_or_biopsy'] = p.get('site_of_resection_or_biopsy', None)\n\n for c in ctf:\n cellType = c['cell_type']\n cellType_q = cellType + ' - quanTIseq_lsei_TIL10'\n cellType_c = cellType + ' - cibersort_LM22'\n patientCells[cellType_q] = c['quanTIseq_lsei_TIL10']\n patientCells[cellType_c] = c['cibersort_LM22']\n\n drugs = p.get('clinical_drug_coad', None)\n if drugs != None :\n patientCopy = copy.deepcopy(patientCells)\n for drug in drugs:\n patientCopy['pharmaceutical_therapy_drug_name'] = drug.get('pharmaceutical_therapy_drug_name', None)\n patientCopy['treatment_best_response'] = drug.get('treatment_best_response', None)\n features.append(patientCopy)\n ids.append(p['submitter_id'])\n else:\n patientCells['pharmaceutical_therapy_drug_name'] = None\n patientCells['treatment_best_response'] = None\n features.append(patientCells)\n ids.append(p['submitter_id'])\n\n\npatientsDF = pd.DataFrame(features, ids)\npatientsDF.to_csv('bigAll.csv')","sub_path":"experiment1/createCSVAllPatients.py","file_name":"createCSVAllPatients.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"305966562","text":"import numpy as np\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Flatten\nfrom keras.layers.convolutional import Conv1D, MaxPooling1D\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\nfrom keras.preprocessing.text import one_hot\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils.np_utils import to_categorical\n\nfrom sklearn.datasets import load_files\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import fbeta_score\n\n\nnp.random.seed(7)\n\nimport matplotlib.pyplot as plt\ndef show_overfit_plot(history):\n\tplt.plot(history.history['acc'])\n\tplt.plot(history.history['val_acc'])\n\tplt.legend(['train','test'], loc='upper left')\n\tplt.show()\n\n\n\n\n\ndocuments = load_files('../TEXTDATA/', shuffle=True)\n\nvocabulary_size = 200000 # Should be 150,000\nencoded_docs = np.array([np.array(one_hot(str(d), vocabulary_size)) for d in documents.data])\nnp.save(\"X\", encoded_docs)\nnp.save(\"y\", np.array(documents.target))\n\n\nquit()\n\n\n\n#Truncate and pad sequences\nmax_length = 2000\npadded_docs = sequence.pad_sequences(encoded_docs, maxlen=max_length)\n\nx_train, x_test, y_train, y_test = train_test_split(\n\tpadded_docs, documents.target, test_size=0.3\n)\n\ny_train = np.where((y_train == 2) | (y_train == 1), 1, 0)\ny_test = np.where((y_test == 2) | (y_test == 1), 1, 0)\n\n\nembedding_vector_length = 64\n\ndef create_model(top_words, embedding_vector_length, max_length):\n\tmodel = Sequential()\n\tmodel.add(Embedding(top_words, embedding_vector_length, input_length=max_length, name=\"embedding\"))\n\t#model.add(Flatten())\n\tmodel.add(Conv1D(filters=1, kernel_size=8, padding='same', activation='relu', name=\"convolution\"))\n\t#model.add(MaxPooling1D(pool_size=8))\n\tmodel.add(Flatten(name=\"flat\"))\n\t#model.add(LSTM(256, dropout=0.2, recurrent_dropout=0.2))\n\tmodel.add(Dense(1, activation='sigmoid'))\n\tmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n\t# Will need to be categorical_crossentropy\n\treturn model\n\nmodel = create_model(vocabulary_size, embedding_vector_length, max_length)\nprint(model.summary())\nhistory = model.fit(x_train, y_train, epochs=3, batch_size=32,\n\t\t\t\t\t\t\t\tvalidation_data=(x_test, y_test))\n\n\nscores = model.evaluate(x_test, y_test, verbose=0)\nprint(\"Accuracy: %.2f%%\" % (scores[1]*100))\n\n\ny_predicted = model.predict(x_test)\ny_predicted = np.where(y_predicted > 0.5, 1, 0)\n\nscore = fbeta_score(y_predicted, y_test, average=None, beta=2)\nprint(f\"F2-scores: {score}\")\nshow_overfit_plot(history)\n\n\nfrom keras.models import Model\nflat = Model(model.input, model.get_layer(\"flat\").output)\nflat_words = flat.predict(x_test)\nimport matplotlib.pyplot as plt\nplt.plot(flat_words[0])\nplt.show()\n\n\n\n\n","sub_path":"document_vectors/sequence_classification.py","file_name":"sequence_classification.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"557599016","text":"import torch\nimport torch.nn as nn\n\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(RNN, self).__init__()\n\n self.hidden_size = hidden_size\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(input_size + hidden_size, output_size)\n\n def forward(self, input, hidden):\n combined = torch.cat((input, hidden), 1)\n hidden = self.i2h(combined)\n output = self.i2o(combined)\n return output, hidden\n\n def initHidden(self):\n return torch.zeros(1, self.hidden_size)\n\nclass MyModel(nn.Module):\n def __init__(self, device, input_size=27, hidden_size=256, output_size_rnn=128, num_classes=27):\n super(MyModel, self).__init__()\n\n self.device = device\n self.rnn_src = RNN(input_size, hidden_size, output_size_rnn)\n self.rnn_targ = RNN(input_size, hidden_size, output_size_rnn)\n self.dense = nn.Linear(output_size_rnn*2, num_classes)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, input1, input2):\n hidden1 = self.rnn_src.initHidden().to(self.device)\n hidden2 = self.rnn_targ.initHidden().to(self.device)\n for i in range(input1.size()[0]):\n output1, hidden1 = self.rnn_src(input1[i], hidden1)\n for i in range(input2.size()[0]):\n output2, hidden2 = self.rnn_targ(input2[i], hidden2)\n output = self.dense(torch.cat((output1, output2), -1))\n output = self.softmax(output)\n\n return output\n","sub_path":"rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"91383444","text":"#A function that returns the longest palindromatic substring of a string of text.\nclass Solution:\n def longestPalindrome(self, s: str) -> str:\n palindrome = ''\n pLen = 0\n \n for i in range(len(s)):\n #odd palindromes\n leftPtr, rightPtr = i, i #pointers to outer characters of i\n while leftPtr >= 0 and rightPtr < len(s) and s[leftPtr] == s[rightPtr]: # palindrome check \n if(rightPtr - leftPtr + 1) > pLen:\n palindrome = s[leftPtr : rightPtr + 1]\n pLen = rightPtr - leftPtr + 1\n \n leftPtr -=1\n rightPtr +=1\n \n #even palindromes 'abba'\n leftPtr, rightPtr = i, i+1\n while leftPtr >= 0 and rightPtr < len(s) and s[leftPtr] == s[rightPtr]: #palindrome check\n if(rightPtr - leftPtr + 1) > pLen:\n palindrome = s[leftPtr : rightPtr + 1]\n pLen = rightPtr - leftPtr + 1\n \n leftPtr -=1\n rightPtr +=1\n \n \n \n \n \n return palindrome \n \n","sub_path":"longestPalindrome.py","file_name":"longestPalindrome.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"257836295","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# -*- mode: python -*-\n\"\"\"\n:mod:`ortograf.ortograf` -- Suggest service\n\n========\nOrtograf\n========\n\nSuggest webservice\n\"\"\"\nimport datetime\nimport json\nimport logging\nimport requests\nimport socket\nimport tornado.ioloop\n\nfrom pyutils import BaseHandler\nfrom pyutils import build_info\nfrom pyutils import JSONFormatter\nfrom pyutils import Solr\nfrom pyutils import Stat\nfrom pyutils import Statistics\nfrom pyutils import StaticHandler\nfrom pyutils import StatusHandler\n\nlogger = logging.getLogger(__name__)\nlogging.getLogger(\"requests\").setLevel(logging.WARNING)\n\nSTATS = Statistics(name='ortograf')\n\n\nclass OrtografError(Exception):\n pass\n\n\nTYPE_MAP = {'all': 'ortograf-all',\n 'creator': 'ortograf-creator',\n 'subject': 'ortograf-subject',\n 'title': 'ortograf-title'}\n\nSTATIC = \"\"\"\n{\"responseHeader\":\n {\"status\": 0,\n \"timings\":\n {\"QTime\": 7,\n \"service-time\": 1}},\n \"response\":\n {\"docs\":\n [\n {\"term\": \"dostojevskij f. m.\",\n \"weight\": 19703,\n \"payload\": \"f. m. dostojevskij|creator\",\n \"all\": [\"f. m. dostojevskij\"],\n \"type\": \"creator\"},\n {\"term\": \"dostojevskijs sidste rejse\",\n \"weight\": 2297,\n \"payload\": \"dostojevskijs sidste rejse|title\",\n \"all\": [\"dostojevskijs sidste rejse\"],\n \"type\": \"title\"\n }\n ]\n }\n}\n\"\"\".strip()\n\n\ndef to_milli(delta):\n return delta.total_seconds() * 1000\n\n\ndef validate(params):\n \"\"\" Validates params \"\"\"\n mandatory_params = ['q', 'type']\n for mp in mandatory_params:\n if mp not in params:\n raise OrtografError(\"missing mandatory argument '%s'\" % mp)\n\n if params['type'] not in TYPE_MAP:\n raise OrtografError(\"unknown type '%s'. supported types: %s\" % (params['type'], str(list(TYPE_MAP.keys()))))\n\n for key, value in params.items():\n if key not in ['q', 'count']:\n logger.warning(\"Unknown key '%s' used.\", key)\n\n\nclass OrtografHandler(BaseHandler):\n\n def initialize(self, url):\n self.url = url.rstrip('/')\n\n def get(self):\n with Stat(STATS):\n params = {k: self.get_argument(k) for k in self.request.arguments}\n logger.debug(\"params %s\", params)\n validate(params)\n\n suggestions = self.get_suggestions(params)\n self.write(json.dumps(suggestions))\n\n def get_suggestions(self, params):\n start = datetime.datetime.now()\n url = '%s/%s/suggest' % (self.url, 'ortograf-' + params['type'])\n if 'count' not in params:\n params['count'] = 10\n else:\n params['count'] = int(params['count'])\n r = requests.get(url, params=params)\n if not r.ok:\n r.raise_for_status()\n result = r.json()\n r.close()\n header = {'status': result['responseHeader']['status'],\n 'timings': {'QTime': result['responseHeader']['QTime'],\n 'service-time': to_milli(datetime.datetime.now() - start)}}\n return {'responseHeader': header,\n 'response': {'docs': self.__blend(params['q'], params['count'], result)}}\n\n def __blend(self, query, count, result):\n suggestions = []\n\n if len(query) == 1:\n for suggestion in result['suggest']['fst'][query]['suggestions']:\n suggestion['all'] = [suggestion['term']]\n suggestion['type'] = \"unknown\"\n suggestions.append(suggestion)\n else:\n suggestion_terms = set()\n prio = ['infix', 'blended_infix', 'fuzzy']\n for suggester in prio:\n for suggestion in result['suggest'][suggester][query]['suggestions']:\n all_, type_ = suggestion['payload'].rsplit('|')\n if all_ not in suggestion_terms:\n suggestion['all'] = [all_]\n suggestion['type'] = type_\n suggestions.append(suggestion)\n suggestion_terms.add(all_)\n if len(suggestion_terms) >= count:\n return suggestions\n return suggestions\n\n\ndef make_app(url, root):\n\n core_urls = [Solr(url + core) for core in ['ortograf-all', 'ortograf-creator', 'ortograf-subject', 'ortograf-title']]\n info = build_info.get_info('ortograf')\n handlers = [(r\"/%s/suggest\" % root, OrtografHandler, dict(url=url)),\n (r\"/%s/status\" % root, StatusHandler, dict(ab_id=1, info=info, statistics=[STATS], solr=core_urls)),\n (r\"/%s/static\" % root, StaticHandler, dict(content=STATIC))]\n return tornado.web.Application(handlers)\n\n\ndef main(url, port):\n root = 'ortograf'\n url = url.rstrip('/') + '/'\n app = make_app(url, root)\n logger.info(\"service up at 'http://%s:%s/%s'\" % (socket.gethostname(), port, root))\n app.listen(port)\n tornado.ioloop.IOLoop.current().start()\n\n\ndef setup_logger(json_formatter=None, level=logging.DEBUG, logfile_name=None):\n global logger\n if not json_formatter:\n json_formatter = JSONFormatter()\n\n logger = logging.getLogger('')\n ch = logging.StreamHandler()\n\n if logfile_name:\n ch.setFormatter(logging.Formatter('%(message)s'))\n fh = logging.FileHandler(logfile_name)\n fh.setFormatter(json_formatter)\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n else:\n ch.setFormatter(json_formatter)\n\n ch.setLevel(level)\n logger.addHandler(ch)\n logger.setLevel(logging.DEBUG)\n\n\ndef cli():\n \"\"\" Commandline interface \"\"\"\n import argparse\n\n parser = argparse.ArgumentParser(description='suggest service')\n\n parser.add_argument('solr_url', metavar='solr_url', help='solr_url')\n parser.add_argument('-l', '--logfile', dest='logfile',\n help='Name of logfile. Default is ortograf.log', default='ortograf.log')\n parser.add_argument('-p', '--port', dest='port', type=int, default=7081,\n help='port. defaukt us 7081')\n parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',\n help='verbose output')\n\n args = parser.parse_args()\n\n structured_formatter = JSONFormatter(tags={'type': 'service', 'port': args.port})\n level = logging.INFO\n if args.verbose:\n level = logging.DEBUG\n setup_logger(structured_formatter, level, logfile_name=args.logfile)\n\n main(args.solr_url, args.port)\n","sub_path":"src/ortograf/ortograf.py","file_name":"ortograf.py","file_ext":"py","file_size_in_byte":6436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"130958255","text":"import tkinter as tk\r\nimport tkinter.messagebox as Msgbox\r\nimport mysql.connector as sql\r\n\r\nclass ChainManagerHome(tk.Frame):\r\n\r\n def __init__(self, parent, controller):\r\n tk.Frame.__init__(self, parent)\r\n self.controller = controller\r\n self.label = tk.Label(self, text=\"Chain Manager Home\")\r\n self.label.grid(row=0, column=0)\r\n\r\n self.view_tech = tk.Button(self, text=\"View Drone Technician\",\r\n command=lambda: self.controller.show_frame('ChainManagerViewDroneTechnicians'))\r\n self.view_tech.grid(row=1, column=0)\r\n \r\n self.view_drones = tk.Button(\r\n self, text=\"View Drones\", command=lambda: self.controller.show_frame('ChainManagerViewDrones'))\r\n self.view_drones.grid(row=1, column=1)\r\n \r\n self.create_chain_items = tk.Button(\r\n self, text=\"Create Chain Items\", command=lambda: self.controller.show_frame('ChainManagerCreateChainItem'))\r\n self.create_chain_items.grid(row=2, column=0)\r\n \r\n self.manage_stores = tk.Button(\r\n self, text=\"Manage Stores\", command=lambda: self.controller.show_frame('ChainManagerManageStores'))\r\n self.manage_stores.grid(row=2, column=1)\r\n \r\n \r\n","sub_path":"scr_part2/ChainManagerHome.py","file_name":"ChainManagerHome.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"367268464","text":"#!/usr/bin/python\n__author__ = 'ebbers'\n\nimport time\nimport subprocess\nimport os\nfrom datetime import datetime\nfrom PIL import Image\nimport picamera\nfrom fractions import Fraction\n\n\ndef main():\n time.sleep(10)\n # config\n workingDir = '/home/pi/picam/'\n recordingDir = workingDir + \"temp/\"\n storageDir = workingDir + \"backup/\"\n uploadScript = workingDir + 'dropbox_uploader.sh'\n camName = \"cam1\" # \"cam2\"\n filenamePrefix = \"capture\"\n testImage = recordingDir + \"testImage.bmp\"\n streamImageName = \"streamImage.jpeg\"\n streamImage = recordingDir + streamImageName\n camRes = (640, 480)\n camfps = 5\n rotation = 0 #90\n recordingTime = 60 # seconds\n diskSpaceToReserve = 512 * 1024 * 1024 # 512 MB\n hostList = [\"janek\", \"gabi\", \"hannes\"]\n pingIntervallSleep = 60 # seconds\n uploadDelay = 5*60 # seconds\n pingSensitivity = 10\n threshold = 40\n relSensitivity = 0.02\n testAreaCount = 1\n relTestBorders = [[[0, 1], [0, 1]]]\n streamImageIntervall = 10\n\n # init\n camera = picamera.PiCamera()\n camera.resolution = camRes\n camera.framerate = Fraction(camfps, 1)\n camera.rotation = rotation\n state = \"sleep\"\n testRes = (128, 96)\n sensitivity = relSensitivity * testRes[0] * testRes[1]\n numberOfPingFails = 0\n lastPing = 0\n lastStreamImage = 0\n uploadList = []\n time.sleep(2)\n\n subprocess.call(\"mv %scapture* %s\" % (recordingDir, storageDir), shell=True)\n\n # outsourcing to bash script\n streamerScriptPath = workingDir + 'mjpg-streamer/'\n subprocess.Popen(\"LD_LIBRARY_PATH=%s %smjpg_streamer -i \\\"input_file.so -f %s -n %s\\\" -o \\\"output_http.so -w %swww\\\"\" % (streamerScriptPath, streamerScriptPath, recordingDir, streamImageName, streamerScriptPath), shell=True)\n\n filename = buildfilename(filenamePrefix, camName)\n actualFile = startrecording(camera, recordingDir, filename, 1)\n lastSplit = time.time()\n change = False\n processUpload = subprocess.Popen('%s > /dev/null 2>&1' % uploadScript, shell=True)\n camera.capture(testImage, format=\"bmp\", resize=testRes, use_video_port=True, splitter_port=0)\n bufferNeu = loadTestImage(testImage)\n\n while True:\n\n keepDiskSpaceFree(recordingDir, diskSpaceToReserve, storageDir)\n if time.time() - lastStreamImage > streamImageIntervall:\n camera.capture(streamImage, format=\"jpeg\", use_video_port=True, splitter_port=0)\n lastStreamImage = time.time()\n print(\"captured stream image\")\n\n if state == \"sleep\":\n\n if time.time() - lastSplit > recordingTime:\n filename = buildfilename(filenamePrefix, camName)\n (actualFile, storedFile) = split(camera, recordingDir, storageDir, actualFile, filename)\n lastSplit = time.time()\n\n if time.time() - lastPing > pingIntervallSleep:\n anyHost = sendPing(hostList)\n lastPing = time.time()\n if not anyHost:\n numberOfPingFails += 1\n else:\n numberOfPingFails = 0\n\n if numberOfPingFails >= pingSensitivity:\n state = \"active\"\n print(\"Status: active\")\n numberOfPingFails = 0\n camera.capture(testImage, format=\"bmp\", resize=testRes, use_video_port=True, splitter_port=0)\n bufferNeu = loadTestImage(testImage)\n\n elif state == \"active\":\n\n camera.capture(testImage, format=\"bmp\", resize=testRes, use_video_port=True, splitter_port=0)\n bufferAlt = bufferNeu\n bufferNeu = loadTestImage(testImage)\n change = change or compareImages(bufferNeu, bufferAlt, testAreaCount, relTestBorders, testRes, threshold, sensitivity)\n\n if time.time() - lastSplit > recordingTime:\n filename = buildfilename(filenamePrefix, camName)\n (actualFile, storedFile) = split(camera, recordingDir, storageDir, actualFile, filename)\n lastSplit = time.time()\n if change:\n uploadList.append([storedFile, time.time()])\n change = False\n\n anyHost = sendPing(hostList)\n lastPing = time.time()\n if anyHost:\n state = \"sleep\"\n print(\"Status: sleep\")\n uploadList = []\n\n if uploadList and time.time() - uploadList[0][1] > uploadDelay and processUpload.poll() is not None:\n processUpload = subprocess.Popen('%s upload %s /' % (uploadScript, uploadList[0][0]), shell=True)\n uploadList.pop(0)\n\n\ndef sendPing(hosts):\n for host in hosts:\n response = os.system(\"ping -c 1 -w 1 \" + host + \" > /dev/null 2>&1\")\n if not response:\n return True\n return False\n\n\n# Keep free space above given level\ndef keepDiskSpaceFree(recordingDir, bytesToReserve, storagePath):\n if getFreeSpace(recordingDir) < bytesToReserve:\n for filename in sorted(os.listdir(storagePath)):\n os.remove(storagePath + filename)\n print(\"Deleted %s%s to avoid filling disk\" % (storagePath, filename))\n if getFreeSpace(recordingDir) > bytesToReserve:\n return\n return\n\n# Get available disk space\ndef getFreeSpace(path):\n st = os.statvfs(path)\n du = st.f_bavail * st.f_frsize\n return du\n\n\ndef buildfilename(prefix, suffix):\n actualtime = datetime.now()\n filename = prefix + \"-%04d%02d%02d-%02d%02d%02d-\" % (actualtime.year, actualtime.month, actualtime.day, actualtime.hour, actualtime.minute, actualtime.second) + suffix\n return filename\n\n\ndef startrecording(camera, recordingDir, filename, port):\n file = recordingDir + filename + \".h264\"\n camera.start_recording(file, format=\"h264\", splitter_port=port)\n return file\n\n\ndef split(camera, recordingDir, storageDir, file, filename):\n camera.stop_recording(splitter_port=1)\n newFile = startrecording(camera, recordingDir, filename, 1)\n storageName = storageDir + filename + \".mp4\"\n print(\"Aufnahme gesplittet\")\n subprocess.call(\"MP4Box -add %s %s\" % (file, storageName + \" > /dev/null 2>&1\"), shell=True)\n print(\"Datei in %s verschoben\" % storageDir)\n subprocess.call(\"rm %s\" % file, shell=True)\n return newFile, storageName\n\n\ndef loadTestImage(path):\n try:\n im = Image.open(path)\n buffer = im.load()\n except:\n buffer = 0\n return buffer\n\n\ndef compareImages(buffer1, buffer2, areaCount, borders, res, threshold, sensitivity):\n changedPixels = 0\n for z in range(0, areaCount): # = range(0,1) with default-values = z will only have the value of 0 = only one scan-area = whole picture\n for x in range(borders[z][0][0], int(borders[z][0][1] * res[0])-1): # = range(0,100) with default-values\n for y in range(borders[z][1][0], int(borders[z][1][1] * res[1])-1): # = range(0,75) with default-values; testBorders are NOT zero-based, buffer1[x,y] are zero-based (0,0 is top left of image, testWidth-1,testHeight-1 is botton right)\n # Just check green channel as it's the highest quality channel\n pixdiff = abs(buffer1[x, y][1] - buffer2[x, y][1])\n if pixdiff > threshold:\n changedPixels += 1\n if changedPixels > sensitivity:\n return True\n return False\n\nif __name__ == \"__main__\":\n main()","sub_path":"picam.py","file_name":"picam.py","file_ext":"py","file_size_in_byte":7435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"127761428","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table as dt\nimport pandas as pd\nfrom dash.dependencies import Input, Output\nimport plotly.graph_objs as go\n\n# Nutrients in the food supply, by source of nutritional equivalent and commodity\n# https://www150.statcan.gc.ca/t1/tbl1/en/tv.action?pid=3210033301&pickMembers%5B0%5D=3.1&pickMembers%5B1%5D=4.35\n\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets = external_stylesheets)\nserver = app.server\n\ndf= pd.read_csv('Nutrient.csv', low_memory = False)\n\n#SELECTING unique cities,categories, and stars for dropdown menu and slider\nMeasures_indicators = df['Measures'].unique()\nSex_indicators = df['Sex'].unique()\nAge_indicators =df['Age group'].unique()\n\n\n\n# Groupby commodity and date for time series graph\ngroupbynutrients = df.groupby(['Measures','REF_DATE']).sum().unstack()['VALUE']\n\n# Create empty trace to store data\ntraces= []\n\n# traces\nfor i in range(0,len(groupbynutrients)):\n traces.append(go.Scatter(x = groupbynutrients.columns.tolist(), y =groupbynutrients.iloc[i], mode = 'lines',name = str(groupbynutrients.index.tolist()[i])))\n\n# tab2 barplot with drop down\ndropdown_choices = df.groupby(['Measures']).sum().unstack()['VALUE'].index.tolist()\ndropdown_choice =[]\n\nfor i in dropdown_choices:\n dropdown_choice.append({'label':i, 'value':i})\n\ncolors = ['Bluered','Reds', 'Blues', 'Picnic', 'Rainbow', 'Hot', 'Blackbody', 'Earth', 'Viridis']\n\nimport random\n\nrandom_color = random.choice(colors)\n\n\nmapbox_access_token = 'pk.eyJ1IjoiYmlwYXJ0aXRlaHlwZXJjdWJlIiwiYSI6ImNqczFjZzUydjF0MGc0OW1sYWIwYW5lY2UifQ.l6OVeSa3poXp6S4s8km8kA'\n\n\ntabs_styles = {\n 'vertical-align': 'middle',\n 'float': 'left'\n}\n\ntab_style = {\n 'borderBottom': '1px solid #d6d6d6',\n 'padding': '10px',\n 'fontWeight': 'bold'\n}\n\ntab_selected_style = {\n 'borderTop': '1px solid #d6d6d6',\n 'borderBottom': '1px solid #d6d6d6',\n 'backgroundColor': '#119DFF',\n 'color': 'white',\n 'padding': '6px'\n}\n\n# Cummulative Piechart of total supply of milk Products from 1976 to 2019\n# Piechart with year slider or some sort of cummulative slider by year since we can sum up all the records that are in 1976, 1977, etc.\n\n\napp.layout = app.layout = html.Div(\n html.Div([\n html.Div(\n [\n html.H1(children='Nutrients in the food supply, by source of nutritional equivalent and commodity',className='nine columns'),\n html.H6(children='This table contains 12354 series, with data for years 1976 - 2009 (not all combinations necessarily have data for all years), and is no longer being released. This table contains data described by the following dimensions (Not all combinations are available): Geography (1 item: Canada); Nutrients (29 items: Calcium;Carbohydrates;Cholesterol;Copper; ...); Source of nutritional equivalent (2 items: Nutrients available;Nutrients available adjusted for losses); Commodity (213 items: All commodities;Cereal products, total;Breakfast food;Corn flour and meal; ...)',\n className = 'nine columns'),\n html.Label(['Please click here to be directed to Statistics Canada', html.A(' Data link ', href='https://www150.statcan.gc.ca/t1/tbl1/en/tv.action?pid=3210033301&pickMembers%5B0%5D=3.1&pickMembers%5B1%5D=4.35')])\n ], className=\"row\"\n ),\n\n #Dropdown menu and slider\n html.Div(\n [dcc.Store(id='memory-output',storage_type='session'),\n #Category Dropdown\n html.Div(\n [\n html.P('Choose Province:',\n style = {'fontWeight':600}\n ),\n\n dcc.Dropdown(id='measure-column',\n options=[{'label': i, 'value': i} for i in Measures_indicators],\n value='Vitamin D (nmol/L)',\n style={'position':'relative', 'zIndex':'999'}\n ),\n ],\n className='six columns',\n style={'margin-top': '0'}\n ),\n\n # Category Dropdown\n html.Div(\n [\n html.P('Choose Commodity: ',\n style={'fontWeight': 600}\n ),\n\n dcc.Dropdown(id='age-column',\n options=[{'label': i, 'value': i} for i in Age_indicators],\n value='Ages 6 to 19',\n style={'position':'relative', 'zIndex':'999'}\n ),\n ],\n className='six columns',\n style={'margin-top': '0'}\n ),\n ], className=\"row\"\n ),\n\n\n html.Div ([\n #DataTable\n html.Div(\n [\n dt.DataTable(\n id='datatable',\n data=df.to_dict('rows'),\n columns=[\n {\"name\": i, \"id\": i, \"deletable\": True} for i in df[['REF_DATE','GEO','DGUID','Measures', 'Sex', 'Age group', 'Statistics', 'SCALAR_FACTOR', 'VALUE']].columns\n ],\n style_header={'backgroundColor':\n 'rgb(192, 192, 192)',\n 'color':'#333'},\n style_cell={'textAlign': 'left',\n 'minWidth': '0px','maxWidth': '180px',\n 'whiteSpace': 'normal',\n 'backgroundColor': 'rgb(249, 249, 249)',\n 'color':'#333'\n },\n css=[{\n 'selector': '.dash-cell div.dash-cell-value',\n 'rule': 'display: inline; white-space: inherit; overflow: inherit; text-overflow: inherit;'\n }],\n\n\n style_table={\n 'maxHeight': '500',\n 'overflowY': 'scroll',\n 'overflowX': 'scroll'\n },\n\n pagination_mode=\"fe\",\n pagination_settings={\n \"displayed_pages\": 1,\n \"current_page\": 0,\n \"page_size\": 15,\n },\n\n navigation=\"page\",\n n_fixed_rows=1,\n sorting=True,\n sorting_type=\"multi\",\n selected_rows=[]\n ),\n ], className=\"twelve columns\", style={'margin-top': '10'}\n )\n ], className=\"row\"\n ),\n html.Div(\n children=[\n dcc.Tabs(id='tabs', vertical=True, children=[\n dcc.Tab(label='Time Series', style=tab_style, selected_style=tab_selected_style,\n children=[html.Div([dcc.Graph(id='nutrient utilization',\n figure={'data': traces,\n 'layout': go.Layout(title='Nutrient Utilization',\n titlefont=dict(family='Arial', size=18,\n color='black'), width=1300,\n height=500)})])]),\n\n dcc.Tab(label='Barplot', style=tab_style, selected_style=tab_selected_style,\n children=[html.Div([dcc.Dropdown(id='my-dropdown', options=dropdown_choice,\n value='Ferritin (µg/L)',\n style={'height': '40px', 'width': '1300px', 'font-size': \"100%\",\n 'min-height': '3px', }),\n html.Div(id='graph-with-dropdown')])]),\n\n dcc.Tab(label='Pie chart', style=tab_style, selected_style=tab_selected_style,\n children = [html.Div([dcc.Slider(id='year-slider', min=2013, max=2017, step=1, value=2015),\n html.Div(id='graph-with-slider')])]),\n\n dcc.Tab(label='Geolocation', style=tab_style, selected_style=tab_selected_style,\n children=[html.Div([dcc.Graph(id='geolocation-plot', config={'scrollZoom': True},\n figure={'data': [],\n 'layout': go.Layout(autosize=False, width=1300, height=600,\n hovermode='closest',\n mapbox=dict(\n accesstoken=mapbox_access_token,\n bearing=0,\n center=dict(lat=53, lon=-100),\n pitch=35, zoom=3),\n title='No Geolocation Data')})])])],\n parent_style={'float': 'left'}),\n html.Div(id='tab-output', style={'float': 'left', 'width': '400'})])\n ]))\n\n@app.callback(dash.dependencies.Output('datatable', 'data'),\n [dash.dependencies.Input('measure-column', 'value'),\n dash.dependencies.Input('age-column', 'value')\n ])\n\ndef update_rows(selected_measure,selected_age):\n filtered = df.loc[( df['Measures'] == selected_measure) & (df['Age group'] == selected_age)]\n return filtered.to_dict('rows')\n\n\n@app.callback(dash.dependencies.Output('graph-with-dropdown', 'children'),[dash.dependencies.Input('my-dropdown', 'value')])\n\ndef update_figure(value):\n data_milk = df[df['Measures'] == value].groupby(['Measures', 'Age group']).sum().unstack()['VALUE']\n y_values = data_milk.iloc[0].tolist()\n x_values = data_milk.columns.tolist()\n\n # return the dcc.Graph so it will graph out\n return html.Div([dcc.Graph(id='Graph-with-dropdown',\n figure={'data': [go.Bar(x=x_values, y=y_values, name=str(value), marker={\n 'color': y_values,\n 'colorscale':random_color,\n 'showscale': True,\n 'reversescale': True,\n 'line': dict(color='rgb(8,48,107)', width=2)})],\n 'layout':go.Layout(title = (str(value) + ' by Age groups'),width = 1300, height = 500,\n titlefont=dict(family='Arial',size=18,color='black'),\n yaxis=dict(title='Tonnes',titlefont=dict(family='Arial',size=18,color='black')),\n xaxis=dict(title='Category',titlefont=dict(family='Arial',size=18,color='black')))})])\n\n\n@app.callback(dash.dependencies.Output('graph-with-slider', 'children'),[dash.dependencies.Input('year-slider', 'value')])\n\ndef update_figure1(value):\n data_pie = df.groupby(['REF_DATE', 'Measures']).sum()['VALUE'][value]\n pie_labels = data_pie.index.tolist()\n pie_values = data_pie.values.tolist()\n\n return html.Div([dcc.Graph(id='Graph-with-slider',\n figure=go.Figure(\n data=[go.Pie(labels=pie_labels,\n values=pie_values,\n hoverinfo='label+value',\n textinfo='percent',\n textfont=dict(size=10),\n marker=dict(colors=colors,\n line=dict(color='#000000', width=2)))],\n layout=go.Layout(\n title='Nutritional status of '+str(value)+'in the household population',\n height=600,\n width=1300\n )))])\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, port =8000)\n","sub_path":"Capstone/Code/UtilizationDashboard/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"85865484","text":"import os, sys\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom math import ceil\r\n\r\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\r\nsys.path.append(BASE_DIR)\r\nPOINTS_PATH = os.path.join(BASE_DIR, 'data/points')\r\nSPLIT_PATH = os.path.join(BASE_DIR, 'data/split')\r\n\r\n\r\ndef train_test_split(x, y, test_size, train_file, test_file):\r\n\tlabels = {}\r\n\ttest = []\r\n\ttrain = []\r\n\r\n\tprint(x.shape)\r\n\tprint(y.shape)\r\n\r\n\tfor i, val in enumerate(x):\r\n\t\tif y[i][0] in labels:\r\n\t\t\tlabels[y[i][0]].append(i)\r\n\t\telse:\r\n\t\t\tlabels[y[i][0]] = [i]\r\n\r\n\tfor key, val in labels.items():\r\n\t\tlength = ceil(len(val) * test_size)\r\n\t\tnp.random.shuffle(val)\r\n\t\tfor i in range(length):\r\n\t\t\ttest.append(val[i])\r\n\t\tfor i in range(length, len(val)):\r\n\t\t\ttrain.append(val[i])\r\n\r\n\tpd.DataFrame(train).to_csv(train_file, header=None, index=None)\r\n\tpd.DataFrame(test).to_csv(test_file, header=None, index=None)\r\n\r\n\r\ndef main():\r\n\tif not os.path.exists(POINTS_PATH):\r\n\t\tprint('Please run augment.py to augment the point clouds first.')\r\n\t\treturn\r\n\tif not os.path.exists(SPLIT_PATH):\r\n\t\tos.mkdir(SPLIT_PATH)\r\n\r\n\tx = np.load(os.path.join(POINTS_PATH, 'points_crop.npy'))\r\n\ty = pd.read_csv(os.path.join(POINTS_PATH, 'labels_crop.csv'), header=None).values\r\n\ttrain_test_split(x, y, 0.2, os.path.join(SPLIT_PATH, 'train_points.csv'), os.path.join(SPLIT_PATH, 'test_points.csv'))\t\r\n\r\n\tx = np.load(os.path.join(POINTS_PATH, 'augment.npy'))\r\n\ty = pd.read_csv(os.path.join(POINTS_PATH, 'augment.csv'), header=None).values\r\n\ttrain_test_split(x, y, 0.2, os.path.join(SPLIT_PATH, 'train_augment.csv'), os.path.join(SPLIT_PATH, 'test_augment.csv'))\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"177852337","text":"##\n## /src/datasets/common/dataset_writer.py\n##\n## Created by Paul Warkentin on 13/07/2018.\n## Updated by Paul Warkentin on 25/07/2018.\n##\n\nimport numpy as np\nimport os\nimport sys\nimport tensorflow as tf\n\n__exec_dir = sys.path[0]\nwhile os.path.basename(__exec_dir) != \"src\":\n\t__exec_dir = os.path.dirname(__exec_dir)\n\tsys.path.insert(0, __exec_dir)\n\nfrom utils.common.files import mkdir\n\n\nclass DatasetWriter(object):\n\t\"\"\"Handle simple writing to TFRecords files.\n\n\tAttributes:\n\t\tpath: TFrecords file path.\n\t\tcompression_type: Compression of the TFRecords file.\n\t\twriter: Writer for TFRecords files.\n\t\"\"\"\n\n\tdef __init__(self, path, compression_type = tf.python_io.TFRecordCompressionType.GZIP):\n\t\t\"\"\"Initialize the class.\n\n\t\tArguments:\n\t\t\tpath: TFrecords file path.\n\t\t\tcompression_type: Compression of the TFRecords file. Defaults to tf.python_io.TFRecordCompressionType.GZIP.\n\t\t\"\"\"\n\t\tself.path = path\n\t\tif not self.path.endswith(\".tfrecords\"):\n\t\t\tself.path = \"{}.tfrecords\".format(self.path)\n\t\tself.compression_type = compression_type\n\n\t\tself.writer = None\n\n\n\tdef open(self):\n\t\t\"\"\"Open the TFRecords file.\n\t\t\"\"\"\n\t\tparent_path = os.path.dirname(self.path)\n\t\tmkdir(parent_path)\n\n\t\t# open the TFRecords file\n\t\toptions = None\n\t\tif self.compression_type is not None:\n\t\t\toptions = tf.python_io.TFRecordOptions(self.compression_type)\n\t\tself.writer = tf.python_io.TFRecordWriter(self.path, options=options)\n\n\n\tdef close(self):\n\t\t\"\"\"Close the TFRecords file.\n\t\t\"\"\"\n\t\tself.writer.close()\n\t\tself.writer = None\n\n\n\tdef write_single_example(self, features):\n\t\t\"\"\"Write a single sample of features to the TFRecords file.\n\n\t\tArguments:\n\t\t\tfeatures: Dictionary containing features.\n\t\t\"\"\"\n\t\texample = tf.train.Example(features=tf.train.Features(feature=features))\n\t\tself.writer.write(example.SerializeToString())\n\n\n\tdef int64_feature(self, value):\n\t\t\"\"\"Convert an integer to a TFRecords compatible feature.\n\n\t\tArguments:\n\t\t\tvalue: Integer or list of integers.\n\n\t\tReturns:\n\t\t\tTFRecords compatible feature.\n\t\t\"\"\"\n\t\tif not isinstance(value, (list, tuple)):\n\t\t\tvalue = [value]\n\t\treturn tf.train.Feature(int64_list=tf.train.Int64List(value=list(value)))\n\n\n\tdef float_feature(self, value):\n\t\t\"\"\"Convert a float to a TFRecords compatible feature.\n\n\t\tArguments:\n\t\t\tvalue: Float or list of floats.\n\n\t\tReturns:\n\t\t\tTFRecords compatible feature.\n\t\t\"\"\"\n\t\tif not isinstance(value, (list, tuple)):\n\t\t\tvalue = [value]\n\t\treturn tf.train.Feature(float_list=tf.train.FloatList(value=list(value)))\n\n\n\tdef bytes_feature(self, value):\n\t\t\"\"\"Convert bytes to a TFRecords compatible feature.\n\n\t\tArguments:\n\t\t\tvalue: Bytes or list of bytes.\n\n\t\tReturns:\n\t\t\tTFRecords compatible feature.\n\t\t\"\"\"\n\t\tif not isinstance(value, (list, tuple)):\n\t\t\tvalue = [value]\n\t\tvalue_bytes = []\n\t\tfor v in value:\n\t\t\tif isinstance(v, bytes):\n\t\t\t\tvalue_bytes.append(v)\n\t\t\t\tcontinue\n\t\t\tvalue_bytes.append(tf.compat.as_bytes(v))\n\t\treturn tf.train.Feature(bytes_list=tf.train.BytesList(value=list(value_bytes)))\n\n\n\tdef dtype_feature(self, value):\n\t\t\"\"\"Convert an array to a TFRecords compatible feature.\n\n\t\tArguments:\n\t\t\tvalue: Float or integer array.\n\n\t\tReturns:\n\t\t\tTFRecords compatible feature.\n\t\t\"\"\"\n\t\tif value.dtype == np.float32 or value.dtype == np.float64:\n\t\t\treturn tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\t\telif value.dtype == np.int64:\n\t\t\treturn tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\t\traise TypeError(\"The dtype '{}' is not supported.\".format(value.dtype))\n","sub_path":"src/datasets/common/dataset_writer.py","file_name":"dataset_writer.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"638705977","text":"from math import * #sqrt()関数のために必要。\nspeed(30)\nsize(400,400)\ndef circle(x,y,r):\n oval(x-r,y-r,r*2,r*2)\n\n\n#2つの位置を引数とし、斥力(逆6乗)を計算する。\ndef Repulsion( pos1,pos2 ):\n dx = pos1[0] - pos2[0]\n dy = pos1[1] - pos2[1]\n r = sqrt(dx**2+dy**2)\n f = 6.0 / r**8\n fx = f * dx\n fy = f * dy\n return [fx,fy]\n \n#2つの位置を引数とし、ばね力を計算する。\ndef Spring( pos1,pos2 ):\n k = 5.0\n dx = pos1[0] - pos2[0]\n dy = pos1[1] - pos2[1]\n r = sqrt(dx**2+dy**2)\n fx = -k * dx\n fy = -k * dy\n return [fx,fy]\n\n#すべての物体の相互作用を計算し、加速度を求める。\ndef Force(objs):\n #物体の数\n n = len(objs)\n #それぞれの物体について\n for i in range(0,n):\n #i番目の物体に加わる力をfに積算する。\n f = [0.0,0.0]\n #外場から物体iに与えられる力は、ここで計算しておく。\n # x,y = objs[i][0]で座標をx,yに入れ、それを使って外場の力を求める。\n x,y = objs[i][0]\n dx = x - 20\n dy = y - 20\n r = sqrt(dx**2+dy**2)\n k = 0.4\n fx = -k * dx\n fy = -k * dy\n f = [fx,fy]\n #相手の物体それぞれについて\n for j in range(0,n):\n #自分自身とは相互作用しない\n if i != j:\n #i番目とj番目の物体の位置を渡し、\n #斥力を計算する。\n rep = Repulsion( objs[i][0], objs[j][0] )\n #repは力の成分\n f[0] += rep[0]\n f[1] += rep[1]\n #ばねの力は特定の組合せのみ。\n if ((i==0 and j==1) or\n (i==1 and j==0) or\n (i==1 and j==2) or\n (i==2 and j==1)):\n spr = Spring( objs[i][0], objs[j][0] )\n f[0] += spr[0]\n f[1] += spr[1]\n mass = objs[i][3]\n #加速度を計算する。\n ax = f[0] / mass\n ay = f[1] / mass\n #i番目の物体の2番目の属性=加速度\n objs[i][2] = [ax,ay]\n\n#加速度を速度に加算する。\ndef Accel(objs,Dt):\n #物体の数\n n = len(objs)\n #それぞれの物体について\n for i in range(0,n):\n #リストのままで計算するとわかりにくいので、\n #一旦変数に出し、計算して、戻す。\n vx,vy = objs[i][1]\n ax,ay = objs[i][2]\n vx += ax * Dt\n vy += ay * Dt\n objs[i][1] = [vx,vy]\n\n#速度を位置に加算する。\ndef Progress(objs,Dt):\n #物体の数\n n = len(objs)\n #それぞれの物体について\n for i in range(0,n):\n #リストのままで計算するとわかりにくいので、\n #一旦変数に出し、計算して、戻す。\n x,y = objs[i][0]\n vx,vy = objs[i][1]\n x += vx * Dt\n y += vy * Dt\n objs[i][0] = [x,y]\n\n#物体を表示する。\ndef Show(objs,zoom):\n n = len(objs)\n #物体は円。\n for i in range(0,n):\n x,y = objs[i][0]\n circle(x*zoom,y*zoom,zoom/2)\n #ばねは線であらわす。\n #0-1, 1-2の間を線でつなぐ。\n stroke(0)\n line(objs[0][0][0]*zoom,objs[0][0][1]*zoom,\n objs[1][0][0]*zoom,objs[1][0][1]*zoom)\n line(objs[1][0][0]*zoom,objs[1][0][1]*zoom,\n objs[2][0][0]*zoom,objs[2][0][1]*zoom)\n \n \n#アニメーションの初期化。 \ndef setup():\n #力学変数はみんなsetup()で初期化し、\n #draw()関数と共通に使えるようにする。\n global objs,Dt\n #前から順に、位置、速度、加速度、質量。\n objs = [[[10.0, 10.0], [0.0,0.0], [0.0,0.0], 1.0],\n [[13.0, 10.0], [0.0,0.0], [0.0,0.0], 1.0],\n [[11.0, 15.0], [0.0,0.0], [0.0,0.0], 1.0],\n [[11.0, 20.0], [0.0,0.0], [0.0,0.0], 1.0],\n [[11.0, 25.0], [0.0,0.0], [0.0,0.0], 1.0],\n [[11.0, 30.0], [0.0,0.0], [0.0,0.0], 1.0],\n [[11.0, 35.0], [0.0,0.0], [0.0,0.0], 1.0],\n [[11.0, 40.0], [0.0,0.0], [0.0,0.0], 1.0],]\n Dt = 0.01\n#アニメーションの描画\ndef draw():\n global objs,Dt\n objs[0][0] = [MOUSEX/10.0, MOUSEY/10.0]\n #すべての物体に働く力を求め、加速度を計算する\n Force(objs)\n #すべての物体の速度を、Dtだけ進める。\n Accel(objs,Dt)\n #すべての物体の位置を、Dtだけ進める。\n Progress(objs,Dt)\n #すべての物体を表示する。\n Show(objs,zoom=10.0)","sub_path":"PythonSamples/NodeBox/multibody.py","file_name":"multibody.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"153974067","text":"#!/usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\n#\r\n# Purpose: Plotting the top 10 states...\r\n#\r\nfrom datetime import datetime\r\nfrom datetime import timedelta\r\nfrom timeit import default_timer as timer\r\n\r\nimport os\r\nfrom os import listdir, path\r\nfrom os.path import isfile, join\r\nimport numpy as np\r\nimport sys\r\n\r\nimport covid_structures as cs\r\nimport covid_tools as ct\r\nimport covid_models as cm\r\n\r\nif __name__ == '__main__':\r\n\tstart = timer()\r\n\tnow = datetime.now()\r\n\tdt_string = now.strftime('%d/%m/%Y %H:%M:%S')\r\n\tprint('Starting... (' + dt_string + ' Z)')\r\n\t\r\n\tworld = ct.fetchWorld()\r\n\t\r\n\tbasepath = path.abspath(path.join(path.dirname(__file__), '..', 'tmp',))\r\n\tif not os.path.exists(basepath): os.makedirs(basepath)\r\n\t\r\n\t# plot individual areas\r\n\tset = {}\r\n\tsort_set = {}\r\n\tv_thresh = 5 # threshhold for starting particular plots\r\n\t\r\n\tstate = world.getArea('US').getArea('Virginia')\r\n\tfor area in state.areas():\r\n\t\t# delete if exists - not necessary but can be useful\r\n\t\tfilename = path.abspath(path.join(basepath, area.a['name'].replace(' ','_').replace(',','') + '.png'))\r\n\t\tif os.path.isfile(filename):\r\n\t\t\tos.remove(filename)\r\n\t\t# filter out more affected areas\r\n\t\tif max(area.getData('CONFIRMED')) > 100:\r\n\t\t\tset[area.key()] = area\r\n\t\t\tsort_set[area.key()] = int(area.getData('CONFIRMED')[-1]) # last value\r\n\t\t\tprint('Plotting ' + area.name() + '...')\r\n\t\t\tct.simplePlot(area, area.a['name'], filename, v_thresh)\r\n\t\r\n\t# work with the top 20 subset\r\n\tprint('++++++++++++++++++++++++++++++++++++++++++++')\r\n\tbag = {}\r\n\ti = 0\r\n\tfor k, v in sorted(sort_set.items(), key = lambda kv:(kv[1], kv[0]), reverse = True):\r\n\t\tprint(k, v)\r\n\t\tbag[k] = set[k]\r\n\t\ti += 1\r\n\t\tif (i > 20): break\r\n\t\r\n\t# plot top 20 subset\r\n\tprint('++++++++++++++++++++++++++++++++++++++++++++')\r\n\tfilename = path.abspath(path.join(basepath, 'multiplot_va_c.png'))\r\n\tct.multiPlot(bag, 'Confirmed', filename, 'CONFIRMED', v_thresh)\r\n\tfilename = path.abspath(path.join(basepath, 'multiplot_va_d.png'))\r\n\tct.multiPlot(bag, 'Deaths', filename, 'DEATHS', v_thresh, overlay=['avg'])\r\n\t\r\n\tprint('\\nDone.')\r\n\tduration = timer()-start\r\n\tprint('Execution Time: {:0.2f}s'.format(duration))\r\n","sub_path":"code/test_plotting_va.py","file_name":"test_plotting_va.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"451179968","text":"\r\n# coding: utf-8\r\n\r\n# In[1]:\r\n\r\nimport os\r\nimport math\r\nimport random\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport cv2\r\nimport glob\r\nimport os\r\nimport argparse\r\nimport lxml.etree\r\n\r\nfrom datetime import datetime\r\n\r\nslim = tf.contrib.slim\r\n\r\n# In[2]:\r\n\r\n#get_ipython().magic('matplotlib inline')\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\n#from skimage import io\r\nimport time\r\nimport subprocess\r\n\r\nimport pafy\r\n\r\nfrom common import *\r\n\r\ndef parseXML(xmlfile):\r\n \r\n # create element tree object\r\n tree = lxml.etree.parse(xmlfile)\r\n \r\n # get root element\r\n entry = tree.getroot()\r\n \r\n filename = entry.xpath('/annotation/filename/text()')[0] \r\n name = entry.xpath('/annotation/object/name/text()')[0] \r\n xmin = entry.xpath('/annotation/object/bndbox/xmin/text()')[0] \r\n #print(\"xmin\",xmin)\r\n ymin = entry.xpath('/annotation/object/bndbox/ymin/text()')[0] \r\n xmax = entry.xpath('/annotation/object/bndbox/xmax/text()')[0] \r\n ymax = entry.xpath('/annotation/object/bndbox/ymax/text()')[0] \r\n\r\n # create empty list for news items\r\n box = [filename,name,int(xmin),int(ymin),int(xmax),int(ymax)]\r\n \r\n return box\r\n\r\n# In[3]:\r\n\r\nimport sys\r\nsys.path.append('../')\r\n\r\nprecision = 10\r\ndef getCurrentClock():\r\n #return time.clock()\r\n return datetime.now()\r\n\r\n\r\ndef click_and_crop(event, x, y, flags, param):\r\n\t# grab references to the global variables\r\n\tglobal refPt, cropping, tracks\r\n\r\n\t# if the left mouse button was clicked, record the starting\r\n\t# (x, y) coordinates and indicate that cropping is being\r\n\t# performed\r\n\tif event == cv2.EVENT_LBUTTONDOWN:\r\n\t\trefPt = [(x, y)]\r\n\r\n\t# check to see if the left mouse button was released\r\n\telif event == cv2.EVENT_LBUTTONUP:\r\n\t\t# record the ending (x, y) coordinates and indicate that\r\n\t\t# the cropping operation is finished\r\n\t\trefPt.append((x, y))\r\n\t\tif abs(refPt[0][0]-refPt[1][0]) > 10:\r\n\t\t cropping = True\r\n\t\t tracks = [] #reset tracking\r\n\t\telse:\r\n\t\t\tcropping = False\r\n\t\t#cropping = False\r\n\t\t# draw a rectangle around the region of interest\r\n\t\t#cv2.rectangle(img, refPt[0], refPt[1], (0, 255, 0), 2)\r\n\t\t#cv2.imshow(\"ssd\", img)\r\n\r\n# In[4]:\r\n\r\n\r\n# construct the argument parser and parse the arguments\r\nap = argparse.ArgumentParser()\r\nap.add_argument(\"-v\", \"--video\", help=\"path to the video file\")\r\nap.add_argument(\"-s\", \"--skipNr\", help=\"skip frames nr\")\r\nap.add_argument(\"-m\", \"--modelType\", help=\"model type\")\r\nap.add_argument(\"-t\", \"--trackCnt\", help=\"track max corners\")\r\nap.add_argument(\"-w\", \"--webcam\", help=\"webcam mode\")\r\nap.add_argument(\"-r\", \"--resolution\", help=\"resolution default (640,480)\")\r\nap.add_argument(\"-f\", \"--framerate\", help=\"frames per second, default 30\")\r\n#ap.add_argument(\"-i\", \"--images\", help=\"path to the images\")\r\n\r\nargs = vars(ap.parse_args())\r\n\r\n#url = args.get(\"images\", None)\r\n\r\nmodelType=args.get(\"modelType\", None)\r\n\r\nif modelType is None :\r\n modelType = \"ssd\"\r\n\r\nwebcam=args.get(\"webcam\", None)\r\n\r\nif webcam is None or int(webcam)==0:\r\n webcam = False\r\nelse:\r\n webcam = True\r\n\r\ntracking=args.get(\"trackCnt\", None)\r\n\r\nif tracking is None:\r\n tracking = 0\r\nelse:\r\n tracking = int(tracking) \r\n\r\nframerate=args.get(\"framerate\", None)\r\n\r\nif framerate is None:\r\n framerate = 30\r\nelse:\r\n framerate = int(framerate) \r\n\r\n\r\n#procWidth = 1920 #640 # processing width (x resolution) of frame\r\n#procHeight = 1080 # processing width (x resolution) of frame\r\nprocWidth = 1280 # processing width (x resolution) of frame\r\nprocHeight = int(procWidth*(1080/1920)) # processing width (x resolution) of frame\r\n\r\nresolution=args.get(\"resolution\", None)\r\n\r\nif resolution is None:\r\n (procWidth,procHeight) = (640,480)\r\nelse:\r\n (procWidth,procHeight) = resolution.split(\",\") \r\n\r\nprocWidth = int(procWidth)\r\nprocHeight = int(procHeight)\r\n\r\nshapeWidth=512\r\nshapeHeight=512\r\nshapeWidth=300\r\nshapeHeight=300\r\n\r\nif modelType==\"ssd\":\r\n if shapeWidth==300:\r\n from nets import ssd_vgg_300, ssd_common, np_methods\r\n else:\r\n from nets import ssd_vgg_512, ssd_common, np_methods\r\n from preprocessing import ssd_vgg_preprocessing\r\n import visualization\r\nelif modelType==\"tensorflow\":\r\n from utils import label_map_util\r\n from utils import visualization_utils as vis_util\r\n from collections import defaultdict\r\n from PIL import Image\r\n\r\nprint(\"procWidth\",procWidth,\"procHeight\", procHeight)\r\n\r\n#print(\"Test\")\r\n\r\n# In[5]:\r\n\r\n# In[6]:\r\n\r\n\r\ndef is_number(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False\r\n\r\nurl = args.get(\"video\", None)\r\n\r\nif url is None:\r\n url = \"https://www.youtube.com/watch?v=uuQlMCMT71I\"\r\n\r\nskipNr=args.get(\"skipNr\", None)\r\n\r\nif skipNr is not None :\r\n skipNr = int(skipNr)\r\nelse:\r\n skipNr=0\r\n\r\nprint(\"skipNr\", skipNr)\r\n\r\n#A smooth drive in The Crew on PS4 - OSSDC Simulator ACC Train 30fps\r\n#videoUrl = subprocess.Popen(\"youtube-dl.exe -f22 -g https://www.youtube.com/watch?v=uuQlMCMT71I\", shell=True, stdout=subprocess.PIPE).stdout.read()\r\n#videoUrl = videoUrl.decode(\"utf-8\").rstrip()\r\n\r\n\r\ndef getVideoURL(url):\r\n videoUrl = url\r\n video = pafy.new(url)\r\n streams = video.streams\r\n videoUrlList={}\r\n for s in streams:\r\n videoUrlList[s.resolution] = s.url\r\n #print(s.resolution, s.extension, s.get_filesize(), s.url)\r\n\r\n if videoUrlList.get(\"1280x720\",None) is not None:\r\n videoUrl = videoUrlList.get(\"1280x720\",None)\r\n print(\"1280x720\")\r\n\r\n if videoUrlList.get(\"1920x1080\",None) is not None:\r\n videoUrl = videoUrlList.get(\"1920x1080\",None)\r\n print(\"1920x1080\")\r\n return videoUrl\r\n\r\norigVideoUrl = url\r\n\r\nif \"youtube.\" in url: \r\n videoUrl = getVideoURL(url)\r\nelse:\r\n videoUrl = url\r\n\r\n# if the video argument is None, then we are reading from webcam\r\n#videoUrl = args.get(\"video\", None)\r\n\r\nprint(\"videoUrl=\",videoUrl)\r\n\r\ndef load_image_into_numpy_array(image):\r\n (im_width, im_height) = image.size\r\n return np.array(image.getdata()).reshape(\r\n (im_height, im_width, 3)).astype(np.uint8)\r\n\r\n\r\n\r\n# ## Post-processing pipeline\r\n# \r\n# The SSD outputs need to be post-processed to provide proper detections. Namely, we follow these common steps:\r\n# \r\n# * Select boxes above a classification threshold;\r\n# * Clip boxes to the image shape;\r\n# * Apply the Non-Maximum-Selection algorithm: fuse together boxes whose Jaccard score > threshold;\r\n# * If necessary, resize bounding boxes to original image shape.\r\n\r\n# In[7]:\r\n\r\n# Main image processing routine.\r\ndef process_image(img, select_threshold=0.5, nms_threshold=.45, net_shape=(shapeWidth, shapeHeight)):\r\n # Run SSD network.\r\n rimg, rpredictions, rlocalisations, rbbox_img = isess.run([image_4d, predictions, localisations, bbox_img],\r\n feed_dict={img_input: img})\r\n \r\n # Get classes and bboxes from the net outputs.\r\n rclasses, rscores, rbboxes = np_methods.ssd_bboxes_select(\r\n rpredictions, rlocalisations, ssd_anchors,\r\n select_threshold=select_threshold, img_shape=net_shape, num_classes=21, decode=True)\r\n \r\n rbboxes = np_methods.bboxes_clip(rbbox_img, rbboxes)\r\n rclasses, rscores, rbboxes = np_methods.bboxes_sort(rclasses, rscores, rbboxes, top_k=400)\r\n rclasses, rscores, rbboxes = np_methods.bboxes_nms(rclasses, rscores, rbboxes, nms_threshold=nms_threshold)\r\n # Resize bboxes to original image shape. Note: useless for Resize.WARP!\r\n rbboxes = np_methods.bboxes_resize(rbbox_img, rbboxes)\r\n return rclasses, rscores, rbboxes\r\n\r\n\r\n\r\nif modelType==\"ssd\":\r\n\r\n with tf.device('/gpu:0'):\r\n\r\n # TensorFlow session: grow memory when needed. TF, DO NOT USE ALL MY GPU MEMORY!!!\r\n #gpu_options = tf.GPUOptions(allow_growth=True)\r\n #config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)\r\n #isess = tf.InteractiveSession(config=config)\r\n isess = tf.InteractiveSession() #config=tf.ConfigProto(log_device_placement=True))\r\n\r\n # ## SSD 300 Model\r\n # \r\n # The SSD 300 network takes 300x300 image inputs. In order to feed any image, the latter is resize to this input shape (i.e.`Resize.WARP_RESIZE`). Note that even though it may change the ratio width / height, the SSD model performs well on resized images (and it is the default behaviour in the original Caffe implementation).\r\n # \r\n # SSD anchors correspond to the default bounding boxes encoded in the network. The SSD net output provides offset on the coordinates and dimensions of these anchors.\r\n\r\n\r\n # Input placeholder.\r\n #net_shape = (300, 300)\r\n net_shape = (shapeWidth, shapeHeight)\r\n\r\n data_format = 'NHWC' #'NHWC' #'NCHW'\r\n img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))\r\n # Evaluation pre-processing: resize to SSD net shape.\r\n image_pre, labels_pre, bboxes_pre, bbox_img = ssd_vgg_preprocessing.preprocess_for_eval(\r\n img_input, None, None, net_shape, data_format, resize=ssd_vgg_preprocessing.Resize.WARP_RESIZE)\r\n image_4d = tf.expand_dims(image_pre, 0)\r\n\r\n\r\n # Define the SSD model.\r\n reuse = True if 'ssd_net' in locals() else None\r\n if shapeWidth==300:\r\n ssd_net = ssd_vgg_300.SSDNet()\r\n else:\r\n ssd_net = ssd_vgg_512.SSDNet()\r\n\r\n with slim.arg_scope(ssd_net.arg_scope(data_format=data_format)):\r\n predictions, localisations, _, _ = ssd_net.net(image_4d, is_training=False, reuse=reuse)\r\n\r\n # Restore SSD model.\r\n if shapeWidth==300:\r\n ckpt_filename = 'checkpoints/ssd_300_vgg.ckpt'\r\n else:\r\n ckpt_filename = 'checkpoints/VGG_VOC0712_SSD_512x512_ft_iter_120000.ckpt'\r\n # ckpt_filename = '../checkpoints/VGG_VOC0712_SSD_300x300_ft_iter_120000.ckpt'\r\n isess.run(tf.global_variables_initializer())\r\n saver = tf.train.Saver()\r\n saver.restore(isess, ckpt_filename)\r\n\r\n # SSD default anchor boxes.\r\n ssd_anchors = ssd_net.anchors(net_shape)\r\n\r\n\r\n\r\n# In[10]:\r\n\r\n\r\nif modelType==\"tensorflow\":\r\n\r\n # What model to download.\r\n MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'\r\n MODEL_FILE = MODEL_NAME + '.tar.gz'\r\n DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'\r\n\r\n # Path to frozen detection graph. This is the actual model that is used for the object detection.\r\n PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\r\n\r\n # List of the strings that is used to add correct label for each box.\r\n PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')\r\n\r\n NUM_CLASSES = 90\r\n\r\n detection_graph = tf.Graph()\r\n with detection_graph.as_default():\r\n od_graph_def = tf.GraphDef()\r\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\r\n serialized_graph = fid.read()\r\n od_graph_def.ParseFromString(serialized_graph)\r\n tf.import_graph_def(od_graph_def, name='')\r\n \r\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\r\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\r\n category_index = label_map_util.create_category_index(categories)\r\n\r\n #with tf.device('/gpu:0'):\r\n\r\n sess = tf.InteractiveSession(graph=detection_graph) #,config=tf.ConfigProto(log_device_placement=True)) #tf.InteractiveSession()\r\n\r\nstart_time = getCurrentClock()\r\n\r\nfrom webcamvideostream import *\r\n\r\nimport mss\r\nimport numpy\r\n\r\n'''\r\nfpsValue=0\r\n\r\nframeCnt=0\r\nprevFrameCnt=0\r\nprevTime=getCurrentClock()\r\n\r\nfor imgFile in sorted(glob.glob(url+\"/*.jpg\")):\r\n bi = parseXML(url+os.path.splitext(os.path.basename(imgFile))[0]+\".xml\")\r\n print(bi)\r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\n img = cv2.imread(imgFile)\r\n cv2.rectangle(img, (bi[2],bi[3]), (bi[4],bi[5]), (0, 255, 0), 2)\r\n frameCnt=frameCnt+1\r\n nowMicro = getCurrentClock()\r\n delta = (nowMicro-prevTime).total_seconds()\r\n #print(\"%f \" % (delta))\r\n if delta>=1.0:\r\n fpsValue = ((frameCnt-prevFrameCnt)/delta)\r\n prevTime = getCurrentClock()\r\n prevFrameCnt=frameCnt\r\n \r\n draw_str(img, (20, 20), \"FPS = %03.2f, Frame = %05d, Object = %8s, File = %10s\" % (fpsValue,frameCnt,bi[1],bi[0]))\r\n\r\n cv2.imshow(\"tracking\", img)\r\n'''\r\n\r\n# 800x600 windowed mode\r\n#mon = {'top': 100, 'left': 2020, 'width': 1280, 'height': 720}\r\n#mon = {'top': 0, 'left': 1920, 'width': 1280, 'height': 720}\r\nmon = {'top': 0, 'left': 0, 'width': 1280, 'height': 720}\r\nsct = None\r\n\r\ndef getCap(videoUrl):\r\n global sct\r\n if \"screen\" in url:\r\n sct = mss.mss()\r\n cap = None\r\n if sct is None:\r\n if webcam:\r\n #cap = WebcamVideoStream(src=\"\"+str(videoUrl)+\"\").start()\r\n cap = WebcamVideoStream(videoUrl,(procWidth,procHeight),framerate)\r\n cap.start()\r\n else:\r\n cap = cv2.VideoCapture(videoUrl)\r\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, procWidth)\r\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, procHeight)\r\n cap.set(cv2.CAP_PROP_FPS, framerate)\t\r\n return cap\r\n\r\n\r\ncap = getCap(videoUrl)\r\n\r\ncount=50\r\n#skip=2000\r\nskip=skipNr\r\n\r\nSKIP_EVERY=150 #pick a frame every 5 seconds\r\n\r\ncount=1000000\r\n#skip=0 #int(7622-5)\r\nSKIP_EVERY=0\r\n\r\nevery=SKIP_EVERY\r\ninitial_time = getCurrentClock()\r\nflag=True\r\n\r\nframeCnt=0\r\nprevFrameCnt=0\r\nprevTime=getCurrentClock()\r\n\r\nshowImage=False\r\nshowImage=True\r\n\r\nprocessImage=False\r\nprocessImage=True\r\n\r\nzoomImage=0\r\n#zoomImage=True\r\nrclasses = []\r\nrscores = []\r\nrbboxes = []\r\n\r\nrecord = False\r\n#record = True\r\n\r\nout = None\r\nif record:\r\n fourcc = cv2.VideoWriter_fourcc(*'MPEG')\r\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\r\n out = cv2.VideoWriter('output-'+timestr+'.mp4',fourcc, 30.0, (int(procWidth),int(procHeight)))\r\n\r\n#output_side_length = int(1920/zoomImage)\r\n\r\n#height_offset = int((height - output_side_length) / 2)\r\n#width_offset = int((width - output_side_length) / 2)\r\n\r\n\r\nflag = True\r\n\r\n\r\n# initialize the list of reference points and boolean indicating\r\n# whether cropping is being performed or not\r\nrefPt = [(0, 0),([procWidth],procHeight)]\r\ncropping = False\r\n\r\ncv2.namedWindow(\"ossdc.org source: \" + origVideoUrl)\r\ncv2.setMouseCallback(\"ossdc.org source: \" + origVideoUrl, click_and_crop)\r\n\r\nfpsValue=0\r\n\r\ntracks = []\r\n\r\nif tracking>0:\r\n lk_params = dict( winSize = (15, 15),#(15, 15),\r\n maxLevel = 3,#2,\r\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 3, 0.01))\r\n\r\n feature_params = dict( maxCorners = tracking, #5000 #500,\r\n qualityLevel = 0.1, #0.3,\r\n minDistance = 3, #7,\r\n blockSize = 3 ) #7 )\r\n\r\n #procWidth = 1280 #640 # processing width (x resolution) of frame\r\n #procHeight = 720 # processing width (x resolution) of frame\r\n\r\n track_len = 25\r\n detect_interval = 15\r\n frame_idx = 0\r\n\r\nprevZoomImage = zoomImage\r\n\r\nimgFiles = sorted(glob.glob(url+\"/*.jpg\"))\r\nimgCnt=0\r\nflag=True\r\n\r\nif 1==1:\r\n while True:\r\n #frame = cap.read()\r\n #if True:\r\n if imgFiles[imgCnt] is not None or sct is not None or webcam or cap.grab():\r\n if sct is not None:\r\n frame = numpy.asarray(sct.grab(mon))\r\n else:\r\n if webcam:\r\n frame = cap.read()\r\n elif imgFiles[imgCnt] is not None:\r\n imgFile = imgFiles[imgCnt]\r\n print(\"imgFile\",imgCnt,\"=\",imgFile)\r\n frame = cv2.imread(imgFile)\r\n frame = cv2.resize(frame,(int(640*2),int(360*2)))\r\n imgCnt=imgCnt+1\r\n else:\r\n flag, frame = cap.retrieve() \r\n if not flag:\r\n continue\r\n else:\r\n frameCnt=frameCnt+1\r\n nowMicro = getCurrentClock()\r\n delta = (nowMicro-prevTime).total_seconds()\r\n #print(\"%f \" % (delta))\r\n if delta>=1.0:\r\n fpsValue = ((frameCnt-prevFrameCnt)/delta) \r\n print(\"FPS = %3.2f, Track points = %5d, Frame = %6d\" % (fpsValue,len(tracks), frameCnt))\r\n prevTime = nowMicro\r\n prevFrameCnt=frameCnt\r\n\r\n if skip>0:\r\n skip=skip-1\r\n continue\r\n \r\n if every>0:\r\n every=every-1\r\n continue\r\n every=SKIP_EVERY\r\n \r\n count=count-1\r\n if count==0:\r\n break\r\n\r\n img = frame\r\n if processImage: \r\n if zoomImage>0:\r\n #crop center of image, crop width is output_side_length\r\n output_side_length = int(procWidth/zoomImage)\r\n height, width, depth = frame.shape\r\n #print (height, width, depth)\r\n height_offset = int((height - output_side_length) / 2)\r\n width_offset = int((width - output_side_length) / 2)\r\n #print (height, width, depth, height_offset,width_offset,output_side_length)\r\n\r\n #crop based on zoomImage value\r\n img = frame[height_offset:height_offset + output_side_length,width_offset:width_offset + output_side_length]\r\n\r\n \r\n if zoomImage!=prevZoomImage:\r\n #reset tracking\r\n tracks = []\r\n prevZoomImage = zoomImage\r\n\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n start_time = getCurrentClock()\r\n\r\n if cropping:\r\n fullImg = img.copy()\r\n img=img[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]\r\n\r\n if modelType==\"ssd\":\r\n rclasses, rscores, rbboxes = process_image(img)\r\n elif modelType==\"tensorflow\":\r\n # the array based representation of the image will be used later in order to prepare the\r\n # result image with boxes and labels on it.\r\n cv2_im = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n pil_im = Image.fromarray(cv2_im)\r\n image_np = load_image_into_numpy_array(pil_im)\r\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\r\n image_np_expanded = np.expand_dims(image_np, axis=0)\r\n\r\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\r\n # Each box represents a part of the image where a particular object was detected.\r\n boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\r\n # Each score represent how level of confidence for each of the objects.\r\n # Score is shown on the result image, together with the class label.\r\n scores = detection_graph.get_tensor_by_name('detection_scores:0')\r\n classes = detection_graph.get_tensor_by_name('detection_classes:0')\r\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\r\n # Actual detection.\r\n (boxes, scores, classes, num) = sess.run(\r\n [boxes, scores, classes, num_detections],\r\n feed_dict={image_tensor: image_np_expanded})\r\n \r\n if len(rclasses)>0:\r\n nowMicro = getCurrentClock()\r\n if modelType==\"ssd\":\r\n print(\"# %s - %s - %0.4f seconds ---\" % (frameCnt,rclasses.astype('|S3'), (nowMicro - start_time).total_seconds()))\r\n elif modelType==\"tensorflow\":\r\n print(\"# %s - %s - %0.4f seconds ---\" % (frameCnt, classes.astype('|S3'), (nowMicro - start_time).total_seconds()))\r\n start_time = nowMicro\r\n if showImage:\r\n if modelType==\"ssd\":\r\n visualization.bboxes_draw_on_img(img, rclasses, rscores, rbboxes, visualization.colors_plasma)\r\n elif modelType==\"tensorflow\":\r\n # Visualization of the results of a detection.\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n img,\r\n np.squeeze(boxes),\r\n np.squeeze(classes).astype(np.int32),\r\n np.squeeze(scores),\r\n category_index,\r\n use_normalized_coordinates=True,\r\n line_thickness=8)\r\n\r\n #img = image_np\r\n if cropping:\r\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\r\n cv2.imshow(\"crop\",img)\r\n\r\n if tracking>0:\r\n frame_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n if len(tracks) > 0:\r\n img0, img1 = prev_gray, frame_gray\r\n p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2)\r\n p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)\r\n p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)\r\n d = abs(p0-p0r).reshape(-1, 2).max(-1)\r\n good = d < 1\r\n new_tracks = []\r\n for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2), good):\r\n if not good_flag:\r\n continue\r\n tr.append((x, y))\r\n if len(tr) > track_len:\r\n del tr[0]\r\n new_tracks.append(tr)\r\n cv2.circle(img, (x, y), 2, (0, 255, 0), -1)\r\n tracks = new_tracks\r\n if(showImage):\r\n cv2.polylines(img, [np.int32(tr) for tr in tracks], False, (0, 255, 0))\r\n #draw_str(img, (20, 20), 'track count: %5d FPS = %0.2f' % (len(tracks), fpsValue))\r\n frame_idx += 1\r\n prev_gray = frame_gray\r\n\r\n if frame_idx % detect_interval == 0:\r\n mask = np.zeros_like(frame_gray)\r\n mask[:] = 255\r\n for x, y in [np.int32(tr[-1]) for tr in tracks]:\r\n cv2.circle(mask, (x, y), 5, 0, -1)\r\n p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)\r\n if p is not None:\r\n for x, y in np.float32(p).reshape(-1, 2):\r\n tracks.append([(x, y)])\r\n\r\n if cropping:\r\n img = fullImg\r\n\r\n #if record == 9:\r\n # cv2.imwrite('frame'.png',img)\r\n\r\n if showImage:\r\n #visualization.bboxes_draw_on_img(img, rclasses, rscores, rbboxes, visualization.colors_plasma)\r\n #visualization.plt_bboxes(img, rclasses, rscores, rbboxes)\r\n if processImage:\r\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\r\n if cropping:\r\n try:\r\n #if len(tracks)<5:\r\n # print('\\a')\r\n draw_str(img, (20, 20), \"FPS = %3.2f, Track points = %5d, Frame = %6d\" % (fpsValue,len(tracks), frameCnt))\r\n cv2.rectangle(img, refPt[0], refPt[1], (0, 255, 0), 2)\r\n cv2.imshow(\"ossdc.org source: \" + origVideoUrl, img)\r\n except Exception:\r\n pass\r\n else:\r\n #draw_str(img, (20, 20), 'track count: %5d FPS = %0.2f' % (len(tracks), fpsValue))\r\n #if len(tracks)<5:\r\n # print('\\a')\r\n draw_str(img, (20, 20), \"FPS = %3.2f, Track points = %5d, Frame = %6d\" % (fpsValue,len(tracks), frameCnt))\r\n cv2.imshow(\"ossdc.org source: \" + origVideoUrl, img)\r\n if record:\r\n #if processImage:\r\n #img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\r\n newimage = cv2.resize(img,(procWidth,procHeight))\r\n out.write(newimage)\r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\n elif key == ord('c'):\r\n url = input(\"Enter new url: \")\r\n cv2.destroyWindow(\"ossdc.org source: \" + origVideoUrl)\r\n origVideoUrl = url\r\n cv2.namedWindow(\"ossdc.org source: \" + origVideoUrl)\r\n cv2.setMouseCallback(\"ossdc.org source: \" + origVideoUrl, click_and_crop)\r\n\r\n videoUrl = getVideoURL(url)\r\n cap = getCap(videoUrl)\r\n elif key == ord('u'):\r\n showImage= not(showImage)\r\n elif key == ord('p'):\r\n processImage= not(processImage)\r\n elif key == ord('z'):\r\n zoomImage=zoomImage+1\r\n if zoomImage==10:\r\n zoomImage=0\r\n elif key == ord('x'):\r\n zoomImage=zoomImage-1\r\n if zoomImage<0:\r\n zoomImage=0\r\n\r\nnowMicro = getCurrentClock()\r\nprint(\"# %s -- %0.4f seconds - FPS: %0.4f ---\" % (frameCnt, (nowMicro - initial_time).total_seconds(), frameCnt/(nowMicro - initial_time).total_seconds()))\r\n\r\n\r\n# In[ ]:\r\n\r\n\r\n\r\n\r\n# In[ ]:\r\n\r\n\r\n\r\n\r\n# In[ ]:\r\n\r\n\r\n\r\n\r\n# In[ ]:\r\n\r\n\r\n\r\n","sub_path":"object_detection/ossdc-vbacc-object-detection_dac.py","file_name":"ossdc-vbacc-object-detection_dac.py","file_ext":"py","file_size_in_byte":26348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"135418382","text":"import asyncio as _asyncio\nimport os as _os\nfrom copy import deepcopy\nfrom typing import List\nfrom unittest import SkipTest, expectedFailure, skip, skipIf, skipUnless # noqa\n\nfrom asynctest import TestCase as _TestCase\nfrom asynctest import _fail_on\n\nfrom tortoise import Tortoise\nfrom tortoise.backends.base.config_generator import generate_config as _generate_config\nfrom tortoise.exceptions import DBConnectionError\nfrom tortoise.transactions import start_transaction\n\n__all__ = ('SimpleTestCase', 'IsolatedTestCase', 'TestCase', 'SkipTest', 'expectedFailure',\n 'skip', 'skipIf', 'skipUnless', 'initializer', 'finalizer')\n_TORTOISE_TEST_DB = _os.environ.get('TORTOISE_TEST_DB', 'sqlite:///tmp/test-{}.sqlite')\n\nexpectedFailure.__doc__ = \"\"\"\nMark test as expecting failiure.\n\nOn success it will be marked as unexpected success.\n\"\"\"\n\n_CONFIG = {} # type: dict\n_APPS = {} # type: dict\n_CONNECTIONS = {} # type: dict\n\n\ndef getDBConfig(app_label: str, modules: List[str]) -> dict:\n \"\"\"\n DB Config factory, for use in testing.\n \"\"\"\n return _generate_config(\n _TORTOISE_TEST_DB,\n app_modules={\n app_label: modules\n },\n testing=True,\n connection_label=app_label\n )\n\n\nasync def _init_db(config):\n try:\n await Tortoise.init(config)\n await Tortoise._drop_databases()\n except DBConnectionError: # pragma: nocoverage\n pass\n\n await Tortoise.init(config, _create_db=True)\n await Tortoise.generate_schemas()\n\n\ndef initializer():\n \"\"\"\n Sets up the DB for testing. Must be called as part of test environment setup.\n \"\"\"\n # pylint: disable=W0603\n global _CONFIG\n global _APPS\n global _CONNECTIONS\n _CONFIG = getDBConfig(\n app_label='models',\n modules=['tortoise.tests.testmodels'],\n )\n\n loop = _asyncio.get_event_loop()\n loop.run_until_complete(_init_db(_CONFIG))\n _APPS = deepcopy(Tortoise.apps)\n _CONNECTIONS = Tortoise._connections.copy()\n loop.run_until_complete(Tortoise._reset_connections())\n\n\ndef finalizer():\n \"\"\"\n Cleans up the DB after testing. Must be called as part of the test environment teardown.\n \"\"\"\n Tortoise.apps = deepcopy(_APPS)\n Tortoise._connections = _CONNECTIONS.copy()\n loop = _asyncio.get_event_loop()\n loop.run_until_complete(Tortoise._drop_databases())\n\n\nclass SimpleTestCase(_TestCase):\n \"\"\"\n An asyncio capable test class that provides some helper functions.\n\n Will run any ``test_*()`` function either as sync or async, depending\n on the signature of the function.\n If you specify ``async test_*()`` then it will run it in an event loop.\n\n Based on `asynctest `_\n \"\"\"\n\n async def _setUpDB(self):\n pass\n\n async def _tearDownDB(self) -> None:\n pass\n\n def _setUp(self) -> None:\n self._init_loop()\n\n # initialize post-test checks\n test = getattr(self, self._testMethodName)\n checker = getattr(test, _fail_on._FAIL_ON_ATTR, None)\n self._checker = checker or _fail_on._fail_on() # pylint: disable=W0201\n self._checker.before_test(self)\n\n self.loop.run_until_complete(self._setUpDB())\n if _asyncio.iscoroutinefunction(self.setUp):\n self.loop.run_until_complete(self.setUp())\n else:\n self.setUp()\n\n # don't take into account if the loop ran during setUp\n self.loop._asynctest_ran = False\n\n def _tearDown(self) -> None:\n self.loop.run_until_complete(self._tearDownDB())\n if _asyncio.iscoroutinefunction(self.tearDown):\n self.loop.run_until_complete(self.tearDown())\n else:\n self.tearDown()\n\n # post-test checks\n self._checker.check_test(self)\n\n\nclass IsolatedTestCase(SimpleTestCase):\n \"\"\"\n An asyncio capable test class that will ensure that an isolated test db\n is available for each test.\n\n It will create and destroy a new DB instance for every test.\n This is obviously slow, but guarantees a fresh DB.\n\n It will define a ``self.db`` which is the fully initialised (with DB schema)\n DB Client object.\n \"\"\"\n # pylint: disable=C0103,W0201\n\n async def _setUpDB(self):\n config = getDBConfig(\n app_label='models',\n modules=['tortoise.tests.testmodels'],\n )\n await Tortoise.init(config, _create_db=True)\n await Tortoise.generate_schemas()\n\n async def _tearDownDB(self) -> None:\n await Tortoise._drop_databases()\n\n\nclass TestCase(SimpleTestCase):\n \"\"\"\n An asyncio capable test class that will ensure that each test will be run at\n separate transaction that will rollback on finish.\n \"\"\"\n\n async def _setUpDB(self):\n Tortoise.apps = deepcopy(_APPS)\n Tortoise._connections = _CONNECTIONS.copy()\n await Tortoise.init(_CONFIG)\n\n self.transaction = await start_transaction() # pylint: disable=W0201\n\n async def _tearDownDB(self) -> None:\n await self.transaction.rollback()\n # Have to reset connections because tests are run in different loops\n await Tortoise._reset_connections()\n","sub_path":"tortoise/contrib/test/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"452385000","text":"from django.urls import path\r\n\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('event/', views.event, name='event'),\r\n path('hot/title/', views.hot_title, name='hot_title'),\r\n path('hot/event/', views.hot_event, name='hot_event'),\r\n path('setup/', views.setup, name='setup'),\r\n]\r\n","sub_path":"backend/server/search/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"536236086","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n # Examples:\n url(r'^$', 'newsletter.views.home', name='home'),\n url(r'^feedback/$', 'feedback.views.feedBackView', name='feedBackView'),\n url(r'^graph/$', 'drawgraph.views.draw', name='draw'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n]\n","sub_path":"WorkflowDjango/WorkflowDjango/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"269797592","text":"class Settings():\r\n\r\n def __init__(self):\r\n self.screen_width = 720\r\n self.screen_height = 720\r\n self.bg_color = (230, 230, 230)\r\n \r\n self.ship_speed_factor = 2\r\n self.Eship_speed_factor = 1\r\n\r\n self.Eship_Timer = 0.25\r\n self.Eship_Shoot_Chance = 50\r\n\r\n self.bullet_speed_factor = 2\r\n self.bullet_width = 3\r\n self.bullet_height = 15\r\n self.bullet_color = 60, 60, 60\r\n self.bullets_allowed = 3\r\n \r\n def difficulty(self):\r\n choice = str(input(\"What difficulty would you like?\\n1. Easy\\n2. Normal\\n3. Hard\\n>>> \").lower())\r\n if choice in ('easy', '1'):\r\n self.Eship_speed_factor = 0.5\r\n self.Eship_Timer = 0.5\r\n self.Eship_Shoot_Chance = 25\r\n elif choice in ('normal', '2'):\r\n self.Eship_speed_factor = 1\r\n self.Eship_Timer = 0.25\r\n self.Eship_Shoot_Chance = 50\r\n elif choice in ('hard', '3'):\r\n self.Eship_speed_factor = 1.5\r\n self.Eship_Timer = 0.25\r\n self.Eship_Shoot_Chance = 75\r\n","sub_path":"Pygame/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"289922300","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n# \n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2009 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see . \n#\n##############################################################################\n\nfrom osv import fields,osv\nimport netsvc\nimport openerp.addons.decimal_precision as dp\n\nclass sale_order(osv.osv):\n _name = 'sale.order'\n _inherit = 'sale.order'\n\n def _get_state_selection(self, cursor, user_id, context=None):\n return (\n ('created','Creado'),\n ('approved', 'Aprobado'),\n ('shipping', 'Enviado'),\n ('cancelled', 'Cancelado'),\n ('inreview', 'En Revision'),\n ('invoiced', 'Facturado'),\n ('paid', 'Pagado'),\n ('delivered', 'Entregado'),\n ('stopped', 'Detenido'))\n \n def _so_extra_cost(self, cr, uid, ids, field_names=None, arg=False, context=None):\n \"\"\" \n @return: Dictionary of values\n \"\"\"\n lines = self.pool.get('sm.delivery.costs.sale.line').search(cr, uid,[('order_id','in',ids)])\n \n res = {}\n \n for id in ids:\n res[id] = {}.fromkeys(field_names, 0)\n \n for f in field_names:\n c = context.copy()\n \n stock = self.get_product_available(cr, uid, ids, context=c)\n for id in ids:\n res[id][f] = stock.get(id, 0.0)\n \n cost = 0.0\n for line in lines:\n cost += line.amount\n res[line.order_id]['sm_cost'] = cost\n \n return res\n \n def _get_stock(self, cr, uid, ids, field_name, arg, context=None):\n \"\"\" Gets stock of products for locations\n @return: Dictionary of values\n \"\"\"\n if context is None:\n context = {}\n if 'location_id' not in context:\n locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')], context=context)\n else:\n locations = context['location_id'] and [context['location_id']] or []\n\n if isinstance(ids, (int, long)):\n ids = [ids]\n\n res = {}.fromkeys(ids, 0.0)\n if locations:\n cr.execute('''select\n prodlot_id,\n sum(qty)\n from\n stock_report_prodlots\n where\n location_id IN %s and prodlot_id IN %s group by prodlot_id''',(tuple(locations),tuple(ids),))\n res.update(dict(cr.fetchall()))\n\n return res\n\n\n _columns = {\n 'sm_carrier_id': fields.many2one('sm.delivery.simple.carrier', 'Transportista'),\n 'carrier_ref': fields.char('Num. de Guia', size=32),\n 're_carrier_name': fields.char('Reembarque', size=128),\n 're_carrier_ref': fields.char('Guia Reembarque', size=32),\n 'carrier_notes': fields.char('Observaciones Transportista', size=128),\n 'carrier_delivery_date': fields.date('Fecha de Envio'),\n 'carrier_shipped_date': fields.date('Fecha de Entrega Mercancia'),\n 'check_guide': fields.boolean('Guia Revisada'),\n 're_check_guide': fields.boolean('Guia reembarque Revisada'),\n 'carrier_exclude_check': fields.boolean('Excluir', help='si marca esta casilla se puede usar en reportes y analisis para exlcuir estos pedidos del analisis.'),\n 'costs_lines': fields.one2many('sm.delivery.costs.sale.line', 'order_id', 'Costos'),\n 'sm_state': fields.selection(_get_state_selection, 'Estado', required=True, help='Indica el estado del Pedido.'),\n 'sm_cost': fields.function(_so_extra_cost, multi='sm_cost',\n type='float', digits_compute=dp.get_precision('Account'),\n string='Costos Extra',\n help=\"Suma total de costos extra.\"),\n }\n _defaults = {\n 'sm_carrier_id': False,\n 'sm_state': 'created', \n }\nsale_order()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"sm_delivery_simple/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"396436791","text":"_author_ = 'Mkoijn'\n\n\"\"\"\nGet Integer from user and convert to a chosen base.\nThen get number(in base 2 to 16) and convert to Integer\n\"\"\"\n\nprint('Integer to a chosen base...')\nint_str = input('Give me an int: ')\nmy_int = int(int_str)\nint_base = input('Give me a base: ')\nmy_base = int(int_base)\n\nbase_digits = '0123456789abcdef'\nbase_str = ''\nwhile my_int > 0:\n base_digit = str(base_digits[my_int % my_base])\n base_str = base_digit + base_str\n my_int //= my_base\n\nprint(int_str, 'in your chosen base is', base_str)\n\nprint('\\nA chosen number(in chosen base) to integer...')\nnew_number_str = input('Give me a number using 1 to 9 and letters a to f: ')\nnew_base_str = input('What base is this number in?: ')\nnew_base = int(new_base_str)\ntemp = new_number_str\nnew_int = 0\npower = 0\nint_equivalent = 0\nwhile len(temp) > 0:\n for pos in range(len(base_digits)):\n if base_digits[pos] == temp[-1]:\n int_equivalent = int(pos)\n break\n new_int += int_equivalent * new_base ** power\n temp = temp[:-1]\n power += 1\n\nprint(new_number_str, 'to integer is', new_int)","sub_path":"baseConversion.py","file_name":"baseConversion.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"104592520","text":"import re\n\n\ndef read_file(file_path):\n with open(file_path, 'r')as f:\n return f.read()\n\n\ndef filter_lines(regexp, lines):\n result = []\n for line in lines:\n if re.match(regexp, line):\n result.append(line)\n else:\n print(f\"line {line} don't match\")\n return [line for line in lines if re.match(regexp, line)]\n\n\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"90249861","text":"# -*- coding:UTF-8 -*-\n'''\nCreated on 2016-9-6\n广场页UI操作,主要包括爆料和说说切换\n@author: hongzenghui\n'''\nfrom Mobile import MobileUtil\nfrom COMMON import Log\nfrom CONFIG.Define import LogLevel\n\n'''\n @功能:点击爆料tab\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-9-6\n'''\n\ndef click_baoliao_tab():\n xpath = \"//android.widget.TextView[@resource-id='com.tianque.linkage:id/btn_news']\"\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"点击爆料tab成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"点击爆料tab失败\")\n return False\n \n \n'''\n @功能:点击说说tab\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-9-6\n'''\n\ndef click_shuoshuo_tab():\n xpath = \"//android.widget.TextView[@resource-id='com.tianque.linkage:id/btn_talk']\"\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"点击说说tab成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"点击说说tab失败\")\n return False","sub_path":"testAPI/Web_Test/Mobile/UI/Clue/SquareUI.py","file_name":"SquareUI.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"547969464","text":"import pygame,sys,random\r\nfrom pygame import display, event, draw, time, font\r\n\r\npygame.init()\r\nscreen=display.set_mode((800,500))\r\ndisplay.set_caption(\"Snake Game\")\r\n\r\nclock = time.Clock()\r\n\r\nlavender=(241,203,255)\r\ngreen = (0,255,0)\r\nred = (255,0,0)\r\n\r\nx1 = 300\r\ny1 = 200\r\nx1_change = 0\r\ny1_change = 0\r\nsnake_list = []\r\nsnake_length = 1\r\n\r\nfoodx = int(random.randint(0, 790) / 10) * 10\r\nfoody = int(random.randint(0, 490) / 10) * 10\r\n\r\nblue = (0,0,255)\r\ntext_font = font.Font(\"freesansbold.ttf\", 32)\r\n\r\nwhile True:\r\n allevents = event.get()\r\n for myevent in allevents:\r\n if myevent.type == pygame.QUIT:\r\n sys.exit()\r\n if myevent.type == pygame.KEYDOWN:\r\n if myevent.key == pygame.K_LEFT:\r\n x1_change = -10\r\n y1_change = 0\r\n if myevent.key == pygame.K_RIGHT:\r\n x1_change = 10\r\n y1_change = 0\r\n if myevent.key == pygame.K_UP:\r\n y1_change = -10\r\n x1_change = 0\r\n if myevent.key == pygame.K_DOWN:\r\n y1_change = 10\r\n x1_change = 0\r\n \r\n if x1 >= 800 or x1 < 0 or y1 >= 500 or y1 < 0:\r\n screen.fill(lavender)\r\n text_render = text_font.render(\"GAME OVER\", True, blue)\r\n screen.blit(text_render, (300, 250))\r\n screen.blit(text_render, (10, 10))\r\n display.flip()\r\n time.delay(5000)\r\n break\r\n\r\n x1 = x1 + x1_change\r\n y1 = y1 + y1_change\r\n\r\n screen.fill(lavender)\r\n \r\n food_rect = (foodx, foody, 10, 10)\r\n draw.rect(screen, red, food_rect)\r\n \r\n snake_list.append([x1, y1])\r\n if len(snake_list) > snake_length:\r\n del snake_list[0]\r\n for x in snake_list:\r\n snake_rect = (x[0], x[1], 10, 10)\r\n draw.rect(screen, green, snake_rect)\r\n for x in snake_list[:-1]:\r\n if x == [x1, y1]:\r\n screen.fill(lavender)\r\n text_render = text_font.render(\"GAME OVER\", True, blue)\r\n screen.blit(text_render, (300, 250))\r\n screen.blit(text_render, (10, 10))\r\n display.flip()\r\n time.delay(5000)\r\n break\r\n score = snake_length - 1\r\n text_render = text_font.render(\"SCORE: \" +str(score), True, blue)\r\n screen.blit(text_render, (10, 10))\r\n\r\n display.flip()\r\n \r\n if x1 == foodx and y1 == foody:\r\n foodx = int(random.randint(0, 590) / 10) * 10\r\n foody = int(random.randint(0, 390) / 10) * 10\r\n snake_length = snake_length + 1\r\n \r\n clock.tick(3)\r\n\r\n\r\n\r\n","sub_path":"Bhukha saap game.py","file_name":"Bhukha saap game.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"212733879","text":"import assemblyai\nimport os\n\n\naai = assemblyai.Client()\n\n\ndef get_dir_files(dir):\n \"\"\"Return a list of all files found in a directory.\"\"\"\n files = []\n for (dirpath, dirnames, filenames) in os.walk(dir):\n for f in filenames:\n f = os.path.join(dirpath, f)\n files.append(f)\n return files\n\n\nfiles = get_dir_files('/home/chappy/Downloads/audio')\n\ntranscripts = []\nfor f in files:\n transcript = aai.transcribe(filename=f)\n transcripts.append(transcript)\n","sub_path":"examples/transcribe_folder.py","file_name":"transcribe_folder.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"621188583","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom nn import NeuralNetwork\n\nprint(\"Do it again and do it right!\")\n\nnp.random.seed(0)\n\ndata = []\nN = 1000\n\nfor i in range(N):\n x = i*np.pi*2/N\n data.append([[[x]], [np.sin(x)]])\n\n\ndef tanh(x):\n return np.tanh(x)\n\n\ndef Dtanh(y):\n return 1-y*y\n\n\ndef linear(x):\n return x\n\n\ndef Dlinear(y):\n return 1\n\n\nnn = NeuralNetwork(1, 1)\n\nnn.add(5, [tanh, Dtanh])\nnn.add(5, [tanh, Dtanh])\nnn.init([linear, Dlinear])\n\nn = 25000\ncost = []\n\nfor i in range(n):\n index = np.random.randint(0, N)\n x = data[index][0]\n y = data[index][1]\n c = nn.cost(x, y)\n cost.append(c)\n nn.train(x, y)\n\n if i % 100 == 0:\n print(\"Iter \" + str(i) + \" Cost \" + str(c))\n\nnnG = []\nsin = []\nfor i in range(N):\n x = i*np.pi*2/N\n nnG.append(nn.predict(x)[0])\n sin.append(np.sin(x))\n\nc = 0\nfor i in range(len(nnG)):\n c += nnG[i]/len(nnG)\n\nprint(c)\nprint(\"____\")\nprint(nn.predict([[0]]))\nprint(nn.predict([[np.pi/2]]))\n\nplt.plot(np.arange(0.0, n), cost)\nplt.ylabel(\"Cost\")\nplt.xlabel(\"Time\")\nplt.show()\nplt.plot(np.linspace(0.0, 2*np.pi, N), nnG, 'b')\nplt.plot(np.linspace(0.0, 2*np.pi, N), sin, 'r')\nplt.show()\n","sub_path":"Neural Network with Momentum/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"104397444","text":"# standard lib\nimport os\nimport sys\nimport string\nimport csv\nimport operator\nimport json\n\n# data / numeric\nimport pandas as pd\nimport numpy as np\n\n# algorithmic\nimport ahocorasick\n\n# graphics\nfrom yattag import Doc\nimport pdfkit\n\noptions = {\n 'page-size': 'Letter',\n 'margin-top': '.1in',\n 'margin-right': '.1in',\n 'margin-bottom': '.1in',\n 'margin-left': '.1in',\n 'encoding': \"UTF-8\",\n 'no-outline': None,\n 'dpi': '100'\n}\n\ndef rating_sequence(rating):\n points = int(rating*10)\n buckets = [0,0,0,0,0]\n for i, _ in enumerate(buckets):\n if points < 10:\n buckets[i] = points\n return buckets\n else:\n buckets[i] = 10\n points -= 10\n return buckets\n\n\"\"\"\nFolds the data into a dictionary from values in the KEY column to a summary of\nvalues from the val column\n\"\"\"\ndef pandas_reduce(dataframe, key, val):\n # df = dataframe.groupby(by=key)[key, val]\n # return df.mean()\n collapsed_data = {}\n for i in dataframe.keys():\n row = dataframe[i]\n names = row[key].split(',')\n for name in names:\n name = name.strip()\n if name == '':\n continue\n ordinal = int(row[val])\n if name not in collapsed_data:\n collapsed_data[name] = []\n collapsed_data[name].append(ordinal)\n for entry in collapsed_data:\n collapsed_data[entry] = (np.mean(collapsed_data[entry]), len(collapsed_data[entry]))\n return collapsed_data\n\ndef get_collapsed_data(data, label, sortBy):\n collapsed_data = {}\n for row in data:\n names = row[label].split(',')\n for name in names:\n name = name.strip()\n if name == '':\n continue\n print(row)\n ordinal = int(row[sortBy])\n if name not in collapsed_data:\n collapsed_data[name] = []\n collapsed_data[name].append(ordinal)\n for entry in collapsed_data:\n collapsed_data[entry] = (np.mean(collapsed_data[entry]), len(collapsed_data[entry]))\n return collapsed_data\n\ndef display(item_info, rank=None):\n name, (rating, rating_count) = item_info\n if rank:\n return \"{}. {}\".format(rank, name)\n else:\n return \"{}\".format(name)\n\ndef buildHTML(restaurant_name, good_dish_data, bad_dish_data, good_descriptor_data, bad_descriptor_data, all_data, asset_path, style_path):\n doc,tag,text = Doc().tagtext()\n\n def nl():\n doc.asis('\\n')\n\n def br():\n doc.asis('
')\n\n def display_review(sentence, sentiment, words):\n table = str.maketrans(dict.fromkeys(string.punctuation))\n keywords = list(map(lambda s: s.translate(table), words.split()))\n segmentations = []\n ptr = 0\n # if len(keywords) != 0:\n # A = ahocorasick.Automaton()\n # for i, w in enumerate(keywords):\n # A.add_word(w, (i,w))\n # A.make_automaton()\n # for end_idx, (insert_order, original_value) in A.iter(sentence):\n # segmentations.append((end_idx, original_value))\n doc.asis('\\n')\n seq = rating_sequence(float(sentiment))\n with tag('p'):\n text('\\\"')\n for end, word in segmentations:\n start = end - len(word) + 1\n text(sentence[ptr:start])\n with tag('b'):\n text(word)\n ptr = end + 1\n text(sentence[ptr:])\n text('\\\"')\n sentence_star_div(seq, sentiment, ratings=0, size=20, show_number=True, c='sentence_stars')\n\n def sentence_star_div(seq, rating, ratings=0, size=20, show_number=True, c=None):\n doc.asis('\\n')\n with tag('div'):\n if c:\n doc.attr(klass = c)\n for s in seq:\n nl()\n doc.stag('img', src=os.path.join(asset_path, '{}.png'.format(s)), width=str(size), height=str(size))\n if show_number:\n doc.asis('\\n')\n with tag('h8'):\n doc.attr(klass=\"sentence_star_number\")\n text(\" {}/5 \".format(int(rating)))\n if ratings != 0:\n doc.asis('\\n')\n with tag('h3'):\n doc.attr(klass=\"ratings_number\")\n text(\"{} rating{}\".format(ratings, '' if ratings == 1 else 's'))\n\n def star_div(seq, rating, ratings=0, size=30, show_number=True, c=None):\n doc.asis('\\n')\n with tag('div'):\n if c:\n doc.attr(klass = c)\n for s in seq:\n nl()\n doc.stag('img', src=os.path.join(asset_path, '{}.png'.format(s)), width=str(size), height=str(size))\n if show_number:\n doc.asis('\\n')\n with tag('h3'):\n doc.attr(klass=\"star_number\")\n text(\" {:.1f} \".format(float(rating)))\n if ratings != 0:\n doc.asis('\\n')\n with tag('h3'):\n doc.attr(klass=\"ratings_number\")\n text(\"{} rating{}\".format(ratings, '' if ratings == 1 else 's'))\n\n doc.asis(\"\")\n\n # Open the HTML\n doc.asis('\\n')\n with tag('html'):\n\n # Add the stylesheet\n doc.asis('\\n')\n with tag('head'):\n doc.asis(\"\".format(style_path))\n\n # Add the header banner.\n doc.asis('\\n')\n with tag('header'):\n\n doc.asis('\\n')\n with tag('div'):\n doc.attr(klass = \"rectangle\")\n doc.asis('

')\n\n doc.asis('\\n')\n with tag('h5'):\n text(\" Restaurant Analytics powered by Foodie \")\n\n doc.asis('\\n')\n with tag('h1'):\n text(restaurant_name)\n\n # Begin the body\n doc.asis('\\n')\n with tag('body'):\n\n # Add the highest rated dishes\n doc.asis(\"

\")\n doc.asis('\\n')\n with tag('h2'):\n text(\"Highest Rated Dishes:\")\n i = 1\n for dish in good_dish_data:\n rating = dish[1][0]\n rating_count = dish[1][1]\n seq = rating_sequence(rating)\n doc.asis('\\n')\n with tag('h3'):\n text(display(dish, rank=i))\n star_div(seq, rating, rating_count, 30, c='stars')\n i += 1\n\n # Add the lowest rated dishes\n doc.asis('\\n')\n doc.asis(\"
\")\n doc.asis('\\n')\n with tag('h2'):\n text(\"Lowest Rated Dishes:\")\n for dish in bad_dish_data:\n rating = dish[1][0]\n rating_count = dish[1][1]\n seq = rating_sequence(rating)\n doc.asis('\\n')\n with tag('h3'):\n text(display(dish))\n star_div(seq, rating, rating_count, 30, c='stars')\n\n # Add the highest rated adjectives\n # nl()\n # br()\n # nl()\n # with tag('h2'):\n # text(\"Highest Rated Descriptors:\")\n # i = 1\n # for descriptor in good_descriptor_data:\n # rating = descriptor[1][0]\n # rating_count = descriptor[1][1]\n # seq = rating_sequence(rating)\n # nl()\n # with tag('h3'):\n # text(display(descriptor, rank=i))\n # star_div(seq, rating, rating_count, 30, c='stars')\n # i += 1\n\n # Add the lowest rated adjectives\n # nl()\n # br()\n # nl()\n # with tag('h2'):\n # text(\"Lowest Rated Descriptors:\")\n # for descriptor in bad_descriptor_data:\n # rating = descriptor[1][0]\n # rating_count = descriptor[1][1]\n # seq = rating_sequence(rating)\n # nl()\n # with tag('h3'):\n # text(display(descriptor))\n # star_div(seq, rating, rating_count, 30, c='stars')\n\n nl()\n br()\n nl()\n with tag('h2'):\n text(\"Comments by Dish:\")\n\n for dish in sorted(all_data.keys()):\n nl()\n with tag('h7'):\n br()\n nl()\n text(dish)\n\n for rating, review, words in sorted(all_data[dish])[::-1]:\n display_review(review, rating, words)\n\n return doc.getvalue()\n\ndef main(r_name, input_path, output_path, style_path, asset_path):\n if input_path.endswith('.csv'):\n print(\"Currently does not have support for csvs. Coming soon.\")\n if input_path.endswith('.xlsx'):\n df = pd.read_excel(input_path, sheet_name=2).replace(np.nan, '', regex=True).T\n all_data = {}\n\n for i in df.keys():\n row = df[i]\n d = row['Item']\n if d not in all_data:\n all_data[d] = []\n all_data[d].append((row['Rating'], row['Sentence'], row['Keywords']))\n\n n = 3\n\n dish_data = pandas_reduce(df, key='Item', val='Rating')\n dish_sort = sorted(dish_data.items(), key=operator.itemgetter(1))\n good_dish_data = dish_sort[::-1][:n]\n bad_dish_data = dish_sort[:n][::-1]\n\n keyword_data = pandas_reduce(df, key='Keywords', val='Rating')\n keyword_sort = sorted(keyword_data.items(), key=operator.itemgetter(1))\n good_keyword_data = keyword_sort[::-1][:n]\n bad_keyword_data = keyword_sort[:n][::-1]\n\n html_string = buildHTML(r_name, good_dish_data, bad_dish_data, good_keyword_data, bad_keyword_data, all_data, asset_path, style_path)\n html_path = output_path + '.html'\n with open(html_path, 'w+') as htmlOut:\n htmlOut.write(html_string)\n print(\"Generated HTML file at: {}\".format(html_path))\n pdfkit.from_string(html_string, output_path + '.pdf', options=options, css=style_path)\n\n buildJSON(r_name, good_dish_data, bad_dish_data, all_data, output_path)\n\ndef buildJSON(r_name, good_dish_data, bad_dish_data, all_data, output_path):\n output_path = output_path + \".json\"\n data = {}\n data['Restaurant Name'] = r_name\n \n data['Highest Rated Dishes'] = []\n for dish in good_dish_data:\n data['Highest Rated Dishes'].append([dish[0], dish[1][0], dish[1][1]])\n\n data['Lowest Rated Dishes'] = []\n for dish in bad_dish_data:\n data['Lowest Rated Dishes'].append([dish[0], dish[1][0], dish[1][1]])\n\n data['Comments by Dish'] = all_data\n\n with open(output_path, 'w') as f:\n json.dump(data, f)\n\ndef run_generate_analytics_pdf(row, foodie_id):\n r_name = row['Name']\n script_dir = os.path.abspath(os.path.join(__file__ ,\"../..\"))\n input_path = script_dir + \"/csvfiles/final/\" + foodie_id + \"-final.xlsx\"\n output_path = script_dir + \"/csvfiles/pdfs/\" + foodie_id\n style_path = script_dir + \"/scraping_scripts/visual/styles.css\" \n asset_path = script_dir + \"/scraping_scripts/visual/assets/\"\n main(r_name, input_path, output_path, style_path, asset_path)\n\nif __name__ == \"__main__\":\n # try:\n asset_path = os.path.join(os.getcwd(), sys.argv[5])\n style_path = os.path.join(os.getcwd(), sys.argv[4])\n input_path = os.path.join(os.getcwd(), sys.argv[2])\n output_path = os.path.join(os.getcwd(), sys.argv[3])\n main(sys.argv[1], input_path, output_path, style_path, asset_path)\n # except IndexError:\n # sys.stderr.write('Error. Usage: caller \\n')\n","sub_path":"generate_analytics_pdf.py","file_name":"generate_analytics_pdf.py","file_ext":"py","file_size_in_byte":11771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"75639208","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\nAuthor: CCCM\n\n\"\"\"\n\n#%% Preambulo\n\ndef limpiar_kernel(): \n try:\n from IPython import get_ipython\n get_ipython().magic('clear')\n get_ipython().magic('reset -f')\n except:\n pass\n \n# limpiar_kernel()\n\nimport pandas as pd\nimport geopandas\nimport statsmodels.api as sm\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.experimental import enable_iterative_imputer\nfrom sklearn.impute import IterativeImputer\nfrom itertools import cycle\n\n#funciones\n\ndef regresion(x_,y_):\n X = sm.add_constant(x_)\n resultados_fit = sm.OLS(y_,X,missing='drop').fit()\n N = resultados_fit.params[0]\n M = resultados_fit.params[1]\n R2 = resultados_fit.rsquared\n return [M,N,R2]\n \ndef mejoresCorrelaciones(df, col, Nestaciones):\n ordenados = df.sort_values(by=col, ascending = False)\n ordenados = ordenados[ordenados[col] >= 0.75]\n return ordenados.index[:Nestaciones]\n\n# Función rut\ndef digito_verificador(rut):\n reversed_digits = map(int, reversed(str(rut)))\n factors = cycle(range(2, 8))\n s = sum(d * f for d, f in zip(reversed_digits, factors))\n if (-s) % 11 > 9:\n return 'K'\n else:\n return (-s) % 11\n\ndef extenderQ(dfDGA, dfCR2bueno):\n \n dfDGA_aux = dfDGA.copy()\n dfDGA_aux = dfDGA_aux.loc[dfDGA_aux.index <= '01-01-2008']\n \n for columna in dfDGA.columns:\n missing_DGA = dfDGA_aux[columna][dfDGA_aux[columna].isna()]\n dfDGA.loc[missing_DGA.index,columna] = dfCR2bueno.loc[missing_DGA.index,columna]\n return dfDGA\n\n# ================================\n# Encontrar puntos cercanos \n# ================================\n \ndef min_dist(point, gpd2, n_multivariables):\n gpd2['Dist'] = gpd2.apply(lambda row: point.distance(row.geometry),axis=1)\n return gpd2.sort_values(by=['Dist']).loc[gpd2.sort_values(by=['Dist']).index[0:n_multivariables],'Codigo Est']\n \n#%% \n \ndef main(): \n\n#%% \n ruta_GitHub = r'D:\\GitHub'\n ruta_GitHub = r'C:\\Users\\ccalvo\\Documents\\GitHub'\n ruta_OD = r'E:\\CIREN\\OneDrive - ciren.cl'\n # ruta_OD = r'C:\\Users\\ccalvo\\OneDrive - ciren.cl'\n \n # rutas de caudales\n ruta_Q = ruta_OD+r'\\Of hidrica\\AOHIA_ZC\\Etapa 1 y 2\\datos\\Q_mon_Mataquito_flags.csv'\n ruta_obs = ruta_OD + r'\\Of hidrica\\AOHIA_ZC\\Etapa 1 y 2\\datos\\RIO MATAQUITO_mensual.xlsx'\n ruta_shp = ruta_OD+r'\\Of hidrica\\GIS\\estaciones_Mataquito.shp'\n \n # leer shp estaciones\n est_geo = geopandas.read_file(ruta_shp)\n\n # leer caudales\n Q_monthly = pd.read_csv(ruta_Q, index_col = 0, parse_dates = True, encoding = 'utf8')\n q_obs = pd.read_excel(ruta_obs, parse_dates = True, index_col = 0, sheet_name = 'data')\n q_obs_flags = pd.read_excel(ruta_obs, parse_dates = True, index_col = 0, sheet_name = 'info data')\n for col in ['Ano','Mes']:\n del q_obs[col]\n del q_obs_flags[col]\n q_obs[q_obs_flags < 20] = np.nan\n \n #----------meses\n meses = [4,5,6,7,8,9,10,11,12,1,2,3]\n \n year_i = 1979\n year_f = 2020\n \n #------------fechas\n inicio = pd.to_datetime(str(year_i)+'-03-31',format='%Y-%m-%d')\n fin = pd.to_datetime(str(year_f)+'-03-31',format='%Y-%m-%d')\n Q_monthly = Q_monthly.loc[(Q_monthly.index <= fin) & (Q_monthly.index >= inicio)] \n \n #--------Estaciones que no son canales\n \n # estaciones_nocanales = ['07121011-1', '07104001-1', '07116001-7', '07103001-6', '07112001-5', '07117008-K',\n # '07123001-5', '07121006-5', '07115001-1', '07102005-3', '07104002-K', '07102001-0']\n \n estaciones_nocanales = ['07121011-1', '07104001-1', '07116001-7', '07103001-6', '07112001-5', '07117008-K',\n '07123001-5', '07121006-5', '07115001-1', '07102005-3', '07104002-K', '07102001-0']\n \n Q_monthly = Q_monthly[[x for x in Q_monthly.columns if x in estaciones_nocanales]]\n\n #---------minimo de años con datos\n minYr = 20\n \n #%%Crear indice de fechas, convertir años a int y calcular frecuencia de datos\n\n data = Q_monthly.notnull().astype('int')\n data = data.groupby(Q_monthly.index.year) \n data_anual = data.aggregate(np.sum)\n data_anual = data_anual/(12*0.8) \n data_anual = data_anual.apply(lambda x: [y if y < 1 else 1 for y in x])\n data_anual = data_anual.transpose()\n \n data_anual = data_anual.sort_index()\n estaciones_minimas = pd.DataFrame(data_anual.sum(axis=1), columns = ['registro'])\n estaciones_minimas = estaciones_minimas[estaciones_minimas['registro']>= minYr]\n estaciones_minimas = estaciones_minimas.index.tolist()\n estaciones_naturalizadas = ['07102005-3', '07103001-6', '07104002-K', '07121006-5', '07123001-5']\n # estaciones_naturalizadas = ['07102005-3', '07103001-6', '07104002-K', '07123001-5']\n\n \n for est in estaciones_naturalizadas:\n if est not in estaciones_minimas:\n estaciones_minimas.append(est)\n \n Q_monthly_filtradas = Q_monthly.copy()[estaciones_minimas]\n \n # --- no tiene sentido extender con CR2 pre 1980 porque tengo más información DGA que CR2\n \n # --- recuperar las series naturalizadas\n for est in estaciones_naturalizadas:\n Q_monthly_filtradas[est] = Q_monthly[est]\n \n \n #Correlaciones según metodología de Yuri & McPhee (2015)\n \n q_obs = pd.read_excel(ruta_obs, index_col = 0, parse_dates = True, sheet_name = 'data')\n q_obs_flags = pd.read_excel(ruta_obs, index_col = 0, parse_dates = True, sheet_name = 'info data')\n q_obs = q_obs[q_obs_flags > 20]\n q_obs = q_obs[(q_obs.index <= '2020-03-01') & (q_obs.index >= '1979-04-01')]\n estaciones_obs = ['07121006-5','07123001-5'] \n \n #%% Multivariable \n \n n_multivariables = 4\n \n stdOutliers = 3.129 \n Q_monthly_MLR = Q_monthly_filtradas.copy()\n estaciones = Q_monthly_filtradas.columns\n \n # primero todas las estaciones\n \n for ind,col in enumerate([x for x in estaciones if x not in estaciones_obs]):\n \n print('Rellenando estación '+str(col))\n \n for mes in meses:\n \n Q_monthly_mes = Q_monthly_filtradas.loc[Q_monthly_filtradas.index.month == mes].copy()\n \n y = Q_monthly_mes[col]\n \n if y.count() < 1:\n \n continue\n \n correl = Q_monthly_mes.astype(float).corr()\n est_indep = mejoresCorrelaciones(correl, col, n_multivariables)\n est_cercanas = min_dist(est_geo[est_geo['Codigo Est'] == col].geometry,\n est_geo[est_geo['Codigo Est'].isin(estaciones)],n_multivariables)\n \n est_indep = list(set(est_indep) & set(est_cercanas))\n \n x = pd.DataFrame(Q_monthly_mes.loc[Q_monthly_mes.index.month == mes][est_indep].copy(),dtype = float) \n x = x[np.abs(x-x.mean())<=(stdOutliers*x.std())]\n x = x[x[x.count().idxmax()].notna()] \n\n est_na = x.count()[x.count() < 2].index.values.tolist()\n \n x = x.drop(est_na, axis = 1)\n \n max_value_ = x.mean()+stdOutliers*x.std()\n \n imp = IterativeImputer( imputation_order='random', random_state=0, max_iter=15, min_value = 0, max_value = max_value_, sample_posterior = True)\n Y = imp.fit_transform(x)\n Q_monthly_MLR_mes = pd.DataFrame(Y, columns = x.columns, index = x.index )\n Q_monthly_MLR_mes = Q_monthly_MLR_mes.dropna()\n\n Q_monthly_MLR.loc[Q_monthly_MLR_mes.index,col] = Q_monthly_MLR_mes[col]\n\n Q_monthly_MLR.loc[Q_monthly_mes.index,col] = Q_monthly_MLR.loc[Q_monthly_mes.index,col].fillna(Q_monthly_MLR.loc[Q_monthly_mes.index,col].median())\n\n # ahora las estaciones del VIC\n for est in estaciones_obs:\n for mes in meses:\n \n # --------filtrar q RN y q observado\n q_mes_RN = Q_monthly_MLR.loc[Q_monthly_MLR.index.month == mes,est].copy()\n q_mes_obs = q_obs.loc[q_obs.index.month == mes,est].copy()\n \n # inicializar las series a correlacionar\n x = pd.DataFrame([], index = pd.date_range('1979-04-01','2020-03-01',freq = 'MS'),dtype = float, columns = ['obs'])\n y = pd.DataFrame([], index = pd.date_range('1979-04-01','2020-03-01',freq = 'MS'),dtype = float)\n x = x.loc[x.index.month == mes]\n y = y.loc[y.index.month == mes]\n \n if est == '07121006-5':\n x['obs'] = q_obs.loc[q_obs.index.month == mes,'07123001-5'].copy()\n else:\n idx = q_mes_obs[q_mes_obs.notnull()].index\n x.loc[idx,'obs'] = q_mes_obs[q_mes_obs.notnull()].values\n\n y.loc[q_mes_RN[q_mes_RN.notnull()].index,'RN'] = q_mes_RN[q_mes_RN.notnull()].values\n \n X = sm.add_constant(x)\n\n model = sm.OLS(y[:],X[:], missing='drop')\n results = model.fit()\n \n y_hat = results.params[0]+x*results.params[1]\n y_hat = y_hat.astype(float)\n \n idx = y['RN'][y['RN'].isnull()].index.intersection(y_hat['obs'][y_hat['obs'].notnull()].index)\n y.loc[idx] = y_hat.loc[idx]\n \n Q_monthly_MLR_mes = pd.DataFrame(y, columns = y.columns, index = y.index )['RN']\n Q_monthly_MLR_mes = Q_monthly_MLR_mes.dropna()\n\n Q_monthly_MLR.loc[Q_monthly_MLR_mes.index,est] = Q_monthly_MLR_mes.values\n \n Q_monthly_MLR.loc[y.index[:2],est] = Q_monthly_MLR.loc[y.index[:2],est].fillna(Q_monthly_MLR.loc[y.index[:2],est].median())\n\n Q_monthly_MLR.loc[y.index,est] = Q_monthly_MLR.loc[y.index,est].fillna(Q_monthly_MLR.loc[y.index,est].median())\n\n \n#Graficar\n nticks = 1\n plt.close(\"all\")\n fig, ax = plt.subplots(6,2)\n ax = ax.reshape(-1)\n Q_monthly_filtradas = pd.DataFrame(Q_monthly_filtradas, dtype = float)\n Q_monthly_MLR = pd.DataFrame(Q_monthly_MLR, dtype = float)\n diff = Q_monthly_MLR - Q_monthly_filtradas\n diff.index.names = ['']\n logplot = False\n\n for ind,col in enumerate(estaciones):\n# fig.add_subplot(2,1,ind+1)\n Q_monthly_MLR_sim = Q_monthly_MLR[col].copy()\n Q_monthly_MLR_sim.index.names = ['']\n Q_monthly_MLR_sim.loc[Q_monthly_filtradas[col][Q_monthly_filtradas[col].isna()].index] = np.nan\n Q_monthly_MLR_sim.plot(ax = ax[ind], linewidth = 3, logy = logplot)\n Q_monthly_filtradas[col].plot(ax = ax[ind], linewidth = 1, logy = logplot)\n diff[col].plot(ax = ax[ind], linewidth = 4, color = 'k', logy = logplot)\n ticks = ax[ind].xaxis.get_ticklocs()[::nticks]\n fig.canvas.draw()\n ticklabels = [l.get_text() for l in ax[ind].xaxis.get_ticklabels()][::nticks]\n \n ax[ind].xaxis.set_ticks(ticks)\n ax[ind].xaxis.set_ticklabels(ticklabels)\n ax[ind].figure.show()\n plt.ylabel('Q $m^3/s$')\n plt.title('Estación '+col)\n ax[ind].set_ylim(bottom = 0)\n plt.legend(['Predictor','Original','Residual'],bbox_to_anchor=(1.05, 1), loc='upper left') \n Q_monthly_MLR.to_csv('..//Etapa 1 y 2//datos//Q_relleno_MLR_Mataquito_'+str(year_i+1)+'-'+str(year_f)+'_monthly_NAT_v2.csv')\n\n","sub_path":"Hydrology/CIREN/Correlaciones_regresiones_predictivo_Mataquito.py","file_name":"Correlaciones_regresiones_predictivo_Mataquito.py","file_ext":"py","file_size_in_byte":11469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"210047637","text":"from marshmallow import Schema, fields, post_load\nfrom jsonschema import validate, exceptions\nfrom pprint import pprint\nimport json\nimport requests\nimport warlock\n\nclass Methods(object):\n\n def __get_schema(self, schema_url=None):\n try:\n if schema_url == None:\n return\n# raise KeyError('Schema url cannot be blank. It must be a '+\\\n# 'URL to the schema.'\\\n# )\n\n r = requests.get(schema_url)\n\n if not r.status_code == 200:\n raise Exception('Schema not found at {0}'.format(schema_url))\n\n if 'success' in r.json():\n if 'data' in r.json()['success']:\n return r.json()['success']['data']\n else:\n raise Exception('Schema not found. Expected {\"success\":{'+\n '\"data\":{...schema...}} but found {0}'\\\n .format(r.json()))\n except Exception:\n raise\n\n def __input_dataitems(self, \n schema_properties=None, \n schema_required=None,\n data_item=None,\n update_mode=False\n ):\n if schema_properties == None:\n raise KeyError('A dict of schema properties must be provided.')\n if data_item == None:\n raise KeyError(\"For some reasons (this shouldn't happen), there \"+\\\n \"is no data item.\")\n\n print('This request requires data...')\n try:\n for k in schema_properties.keys():\n while True:\n try:\n if not k in data_item:\n break\n if type(data_item[k]) == None:\n break\n\n prompt_string = ' '+k+\\\n ' ('+schema_properties[k]['description']+')'\n if update_mode:\n prompt_string += ' [' + str(data_item[k]) + ']'\n prompt_string += ': '\n\n temp = input(prompt_string)\n\n if k in schema_required \\\n and temp == ''\\\n and not update_mode:\n raise ValueError('{0} is a required property.'\\\n .format(k))\n elif temp == '':\n if update_mode:\n break\n if type(data_item[k]) == int:\n break\n temp = None\n\n if type(data_item[k]) == int:\n data_item[k] = int(temp)\n else:\n data_item[k] = temp\n break\n except ValueError as ve:\n if type(data_item[k]) == int:\n print('{0} must be an integer number'\\\n .format(k))\n else:\n print(ve)\n except TypeError as te:\n print(te)\n except KeyError:\n raise\n except warlock.InvalidOperation as io:\n print(io)\n except Exception as e:\n print(repr(e))\n print()\n return data_item\n except Exception:\n raise\n\n def __parameters(self, url, parameter_collection = []):\n __url_string = url\n\n if parameter_collection == []:\n return url\n\n print('Please enter the following parameters:')\n for p in parameter_collection:\n while True:\n try:\n input_value = input(' '+p['name']+' : ')\n if p['type'] == int:\n input_value = int(input_value)\n\n if not (input_value == None or input_value == '')\\\n or not p['required']:\n __parameter = __url_string[\\\n __url_string.index('<'):__url_string.index('>')+1]\n __url_string = __url_string.replace\\\n (__parameter, str(input_value))\n break\n else:\n raise ValueError()\n except ValueError as ve:\n print(' Invalid value provided. {0} needs to be a {1}'\\\n .format(p['name'], \\\n 'string' if p['type'] == str else 'int'\\\n )\n )\n pass\n except Exception:\n raise\n\n print()\n return __url_string\n\n def __fetch_data(self, url_string=None):\n if url_string == None:\n raise KeyError('A URL must be provided for __fetch_data')\n try:\n result = requests.get(url_string)\n if not result.status_code == 200:\n if 'error' in result.json():\n if 'message' in result.json()['error']:\n error_message = result.json()['error']['message']\n else:\n error_message = 'HTTP {0}'.format(result.status_code)\n raise Exception(error_message)\n elif not 'json' in result.headers['content-type']:\n raise Exception('Expected JSON data but did not receive it.')\n\n data = result.json()\n\n if 'success' in data:\n if 'data' in data['success']:\n data = data['success']['data']\n if type(data) == list \\\n and len(data) == 1:\n data = data[0]\n\n return data, result.headers\n except Exception:\n raise\n\n def post(self, link_collection=None, route_identifier=-1):\n try:\n route = int(route_identifier)\n put_route = link_collection.links[route]\n\n url_string = self.__parameters(put_route.href,put_route.parameters)\n\n schema = self.__get_schema(put_route.schema)\n schema_properties = schema['properties']\n schema_required = schema['required']\n object_factory = warlock.model_factory(schema)\n\n data_dict = {}\n for key in schema_properties.keys():\n data_dict[str(key)] = schema_properties[key]['default']\n\n temp = object_factory(**data_dict)\n\n temp = self.__input_dataitems(\n schema_properties,\n schema_required,\n temp,\n update_mode=False)\n\n insert = requests.post(url_string,\n data=json.dumps(temp),\n headers={'Content-Type':'application/json'}\n )\n\n return insert.json(), insert.headers\n\n except Exception as e:\n raise\n\n\n def put(self, link_collection=None, route_identifier=-1):\n try:\n route = int(route_identifier)\n put_route = link_collection.links[route]\n\n url_string = self.__parameters(put_route.href,put_route.parameters)\n\n original_data, headers = self.__fetch_data(url_string)\n\n schema = self.__get_schema(put_route.schema)\n schema_properties = schema['properties']\n schema_required = schema['required']\n object_factory = warlock.model_factory(schema)\n\n temp = object_factory(**original_data)\n\n temp = self.__input_dataitems(\n schema_properties,\n schema_required,\n temp,\n update_mode=True)\n\n update = requests.put(url_string,\n data=json.dumps(temp),\n headers={'Content-Type':'application/json'}\n )\n\n return update.json(), update.headers\n except Exception as e:\n raise\n\n def get(self,\n link_collection=None,\n route_identifier=-1,\n parameters_needed=True):\n try:\n route = int(route_identifier)\n put_route = link_collection.links[route]\n\n url_string = self.__parameters(put_route.href,put_route.parameters)\n\n original_data, headers = self.__fetch_data(url_string)\n get_data = requests.get(url_string,\n headers={'Content-Type':'application/json'}\n )\n\n\n return get_data.json(), headers\n except Exception as e:\n raise\n\n def delete(self,\n link_collection=None,\n route_identifier=-1):\n try:\n route = int(route_identifier)\n del_route = link_collection.links[route]\n\n url_string = self.__parameters(del_route.href, del_route.parameters)\n\n original_data, headers = self.__fetch_data(url_string)\n\n delete = requests.delete(url_string,\n headers={'Content-Type':'application/json'}\n )\n\n return delete.json(), delete.headers\n except Exception:\n raise\n\n def options(self,\n link_collection=None,\n route_identifier=-1):\n try:\n route = int(route_identifier)\n opt_route = link_collection.links[route]\n\n url_string = self.__parameters(opt_route.href, opt_route.parameters)\n\n original_data, headers = self.__fetch_data(url_string)\n\n options = requests.options(url_string,\n headers={'Content-Type':'application/json'}\n )\n\n return {'allowed':options.headers['allow']}, options.headers\n except Exception:\n raise\n\n def head(self,\n link_collection=None,\n route_identifier=-1):\n try:\n route = int(route_identifier)\n opt_route = link_collection.links[route]\n\n url_string = self.__parameters(opt_route.href, opt_route.parameters)\n\n original_data, headers = self.__fetch_data(url_string)\n\n options = requests.head(url_string,\n headers={'Content-Type':'application/json'}\n )\n\n return {}, options.headers\n except Exception:\n raise\n\n","sub_path":"stage1/bluetooth/test/Methods.py","file_name":"Methods.py","file_ext":"py","file_size_in_byte":10716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"617687211","text":"import json\nfrom inspect import isclass\nfrom functools import partial, reduce\n\nfrom pulsar.utils.slugify import slugify\n\nimport lux\nfrom lux import Html\nfrom lux.utils.crypt import get_random_string\n\n\n__all__ = ['Fieldset', 'Submit', 'Layout', 'Row']\n\n\ndef angular_fields(form_class, fields, missings):\n '''Utility function for checking fields in layouts'''\n for field in fields:\n if field in missings:\n field = form_class.base_fields.get(field)\n if field:\n missings.remove(field.name)\n yield field\n elif isinstance(field, AngularFormElement):\n field.setup(form_class, missings)\n yield field\n else:\n raise ValueError(field)\n\n\ndef as_angular_dict(field, form):\n if isinstance(field, AngularFormElement):\n return field.as_dict(form)\n else:\n data = field.getattrs(form)\n data['name'] = field.name\n if form.is_bound:\n pass\n elif field.name in form.initial:\n data['value'] = form.initial[field.name]\n for name in tuple(data):\n if name.startswith('data_'):\n value = data.pop(name)\n data[name.replace('_', '-')] = value\n return {'field': data}\n\n\nclass AngularFormElement(object):\n type = None\n\n def as_dict(self, form=None):\n raise NotImplementedError\n\n def setup(self, form_class, missings):\n pass\n\n\nclass Submit(AngularFormElement):\n type = 'button'\n\n def __init__(self, label, name=None, **attrs):\n self.attrs = attrs\n self.attrs['label'] = label\n self.attrs['name'] = slugify(name or label, separator='')\n if not self.attrs.get('type'):\n self.attrs['type'] = self.type\n\n def as_dict(self, form=None):\n return {'field': self.attrs}\n\n\nclass Fieldset(AngularFormElement):\n type = 'fieldset'\n\n def __init__(self, *children, **attrs):\n self.children = children\n self.attrs = attrs\n self.all = attrs.pop('all', False)\n if not self.attrs.get('type'):\n self.attrs['type'] = self.type\n self.type = self.attrs['type']\n\n def as_dict(self, form=None):\n return {\n 'field': self.attrs.copy(),\n 'children': [as_angular_dict(c, form) for c in self.children]\n }\n\n def setup(self, form_class, missings):\n children = self.children\n self.children = []\n if self.all:\n children = missings[:]\n for field in angular_fields(form_class, children, missings):\n self.children.append(field)\n\n\nclass Row(Fieldset):\n type = 'div'\n\n\nclass Layout(Fieldset):\n type = 'form'\n form_class = None\n default_element = Fieldset\n\n def __init__(self, form, *children, **attrs):\n super().__init__(*children, **attrs)\n self.setup(form)\n\n def __call__(self, *args, **kwargs):\n form = self.form_class(*args, **kwargs)\n return AngularForm(self, form)\n\n def setup(self, instance_type):\n self.form_class = instance_type\n missings = list(self.form_class.base_fields)\n children = self.children\n self.children = []\n for field in angular_fields(self.form_class, children, missings):\n self.children.append(field)\n if missings:\n field = self.default_element(*missings)\n field.setup(self.form_class, missings)\n self.children.append(field)\n\n\nclass AngularForm(object):\n\n def __init__(self, layout, form):\n self.layout = layout\n self.form = form\n\n def as_dict(self, id=None, **attrs):\n data = self.layout.as_dict(self.form)\n form = data['field']\n if 'model' not in form:\n form['model'] = self.form.__class__.__name__\n if id is None and not form.get('id'):\n id = '%s_%s' % (self.form.__class__.__name__.lower(),\n get_random_string(5))\n if id:\n form['id'] = id\n form.update(attrs)\n return data\n\n def as_form(self, ng_controller=None, **attrs):\n data = self.as_dict(**attrs)\n form = data['field']\n directive = form.pop('directive', 'lux-form')\n id = form['id']\n script = form_script % (id, json.dumps(data))\n html = Html(directive, script).data('options', 'luxforms.%s' % id)\n ng_controller = ng_controller or form.pop('ng_controller', None)\n # add controller if required\n if ng_controller:\n html.data('ng-controller', ng_controller)\n return html\n\n\nform_script = ('')\n","sub_path":"lux/forms/serialise.py","file_name":"serialise.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"503409663","text":"#Developer: Curly60e\n#Python BlockClock its a clock of the Bitcoin blockchain.\n#Version: 0.0.4\n\nimport os\nimport time as t\nfrom art import *\n\n\ndef getblock(): # get access to bitcoin-cli with the command getblockchaininfo\n bitcoincli = \" getblockchaininfo\"\n os.system(path + bitcoincli)\n\ndef getblockcount(): # get access to bitcoin-cli with the command getblockcount\n bitcoincli = \" getblockcount\"\n os.system(path + bitcoincli)\n\ndef clear(): # clear the screen\n os.system('cls' if os.name=='nt' else 'clear')\n\ndef getgenesis():\n bitcoincli = \" getblock 000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f 0 | xxd -r -p | hexyl -n 256\"\n os.system(path + bitcoincli)\n\n#-------------------From this line the program starts----------------------\n\ndef connected(info): # here we complete the connection to the external node\n if info == \"Y\" or info == \"y\":\n clear()\n prt()\n print(\"\\nAdd your node information\\n\")\n menuUserConn()\n else:\n menu()\n \n \ndef nodeinfo(): \n connected(input(\"Are you sure you want to connect to a node? Y/n: \")) # call 'connected' function to make the connection with the node information\n\n \ndef artist(): # here we convert the result of the command 'getblockcount' on a random art design\n custom = input(\"Do you want random designs? Y/n: \")\n if custom == \"Y\" or custom == \"y\":\n while True:\n clear()\n design()\n tmp()\n else:\n while True:\n clear()\n getblockcount()\n tmp()\n \n\ndef design():\n bitcoinclient = path + \" getblockcount\"\n block = os.popen(str(bitcoinclient)).read() # 'getblockcount' convert to string\n b = block \n tprint(b, font=\"rnd-large\")\n \n\ndef userconn():\n option = input(\"Select option: \")\n if option == \"A\" or option == \"a\":\n user = input(\"USER@NODE: \")\n s = input(\"Do you want to continue Y/n: \")\n if s == \"Y\" or s == \"y\":\n clear()\n c = input(\"Do you want to see custom design? Y/n: \")\n if c == \"Y\" or c == \"y\": \n while True: # connection via ssh \n clear()\n sshb = \"ssh \" + user + \" '\" + \"{}\".format(path) + \"'\" + \" getblockcount\"\n sshc = os.popen(str(sshb)).read()\n sshd = sshc\n tprint(sshd, font=\"rnd-large\")\n tmp()\n else:\n menu()\n \n else:\n while True: # connection via ssh\n clear()\n sshb = \"ssh \" + user + \" '\" + \"{}\".format(path) + \"'\" + \" getblockchaininfo\"\n os.system(sshb)\n tmp()\n else:\n menu()\n \n elif option == \"B\" or option == \"b\":\n user = input(\"USER@NODE: \")\n while True:\n clear()\n prt()\n sshb = \"ssh \" + user + \" '\" + \"{}\".format(path) + \"'\" + \" getblock 000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f 0 | xxd -r -p | hexyl -n 256\"\n os.system(sshb)\n a = input(\"Do you want to return to the Node Connection Menu? Y/n: \")\n if a == \"Y\" or a == \"y\":\n menuUserConn()\n else:\n b = input(\"Do you want to exit? Y/n: \")\n if b == \"Y\" or b == \"y\":\n exit()\n else:\n menuUserConn()\n \n tmp()\n \n elif option == \"C\" or option == \"c\":\n menu()\n \n \ndef tmp():\n t.sleep(15)\n\n\ndef menu():\n clear()\n prt()\n print(\"\"\"\\t\\t\n Python BlockClock Menu\n Version 0.0.4\n \n A. Connect to an external node through SSH\n B. Show Blockchain information in your own node\n C. Run BlockClock in your own node\n D. Show the Genesis Block\n E. Exit\n \\n\\n\"\"\")\n menuA(input(\"Select option: \"))\n \ndef menuUserConn():\n clear()\n prt()\n print(\"\"\"\\t\\t\n Python BlockClock Menu\n Version 0.0.4\n \n A. Run BlockClock in this external node \n B. Show the Genesis Block\n C. Return Main Menu\n \\n\\n\"\"\")\n userconn()\n\n \ndef menuA(menuS):\n if menuS == \"A\" or menuS == \"a\":\n nodeinfo()\n elif menuS == \"B\" or menuS == \"b\":\n while True:\n clear()\n getblock()\n tmp()\n elif menuS == \"C\" or menuS == \"c\":\n artist()\n elif menuS == \"D\" or menuS == \"d\":\n clear()\n prt()\n getgenesis()\n a = input(\"Do you want to return to the Main Menu? Y/n: \")\n if a == \"Y\" or a == \"y\":\n menu()\n else:\n b = input(\"Do you want to exit? Y/n: \")\n if b == \"Y\" or b == \"y\":\n exit()\n else:\n menu()\n elif menuS == \"E\" or menuS == \"e\":\n exit()\n \n \n \ndef prt():\n tprint(\"BlockClock\", font=\"rnd-large\") # random title design\n \n \nwhile True: # Loop\n clear() # call clear function that clears the screen\n prt()\n print(\"Welcome to Python BlockClock\\n\\n\")\n path = input(\"Insert the Path to Bitcoin-Cli: \") # path to the bitcoin-cli\n clear()\n menu()\n\n","sub_path":"Python-BlockClock.py","file_name":"Python-BlockClock.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"533794373","text":"#!/usr/bin/python\n#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n#|R|a|s|p|b|e|r|r|y|P|i|.|c|o|m|.|t|w|\n#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n# Copyright (c) 2016, raspberrypi.com.tw\n# All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n#\n# receiver.py\n# A demo program for lora to receive message compatible with python2/3\n#\n# Author : sosorry\n# Date : 08/06/2016\n#\n# Example: $ sudo python receiver.py\n# Serial Port? /dev/ttyAMA0\n#\n\nimport serial\nimport time\nimport re\n\nBAUDRATE = 57600 # the baud rate we talk to the microchip RN2483\nTIMEOUT = 1\n\ntry:\n input = raw_input\nexcept NameError:\n pass\n\nserial_port = input(\"Serial Port? \")\n\n# open up the FTDI serial port to get data transmitted to lora\nser = serial.Serial(serial_port, BAUDRATE)\n\nif ser.isOpen() == False:\n ser.open()\n\n# The default settings for the UART interface are \n# 57600 bps, 8 bits, no parity, 1 Stop bit, no flow control. \nser.bytesize = 8\nser.parity = \"N\"\nser.stopbits = 1\nser.timeout = 5\n\nprint(\"Device Opening: \" + str(ser.isOpen()))\nprint('----------------------------------')\n\n# signed decimal number representing the transceiver output power, \n# from -3 to 15.\nprint('radio set pwr 14')\nser.write(b'radio set pwr 14\\r\\n')\nprint(str(ser.readline()))\n\n# decimal representing the operating radio bandwidth, in kHz. \n# Parameter values can be: 125, 250, 500.\nprint('radio set bw 250')\nser.write(b'radio set bw 250\\r\\n')\nprint(str(ser.readline()))\n\n# decimal representing the frequency, \n# from 433000000 to 434800000 or from 863000000 to 870000000, in Hz.\nprint('radio set freq 868100000')\nser.write(b'radio set freq 868100000\\r\\n')\nprint(str(ser.readline()))\n\n# pauses the LoRaWAN stack functionality to allow transceiver (radio) configuration \n# must be called before any radio transmission or reception\nprint('mac pause')\nser.write(b'mac pause\\r\\n')\nprint(str(ser.readline()))\n\n#print('radio set wdt 2000')\n#ser.write(b'radio set wdt 2000\\r\\n')\n#print(str(ser.readline()))\n\n# puts the radio into continuous Receive mode.\n# from 0 to 65535, set '0' in order to enable the Continuous Reception mode\nprint('radio rx 0')\nser.write(b'radio rx 0\\r\\n')\nprint(str(ser.readline()))\n\ntry:\n while True:\n response = ser.readline()\n\n # for python3\n try:\n utf8_response = str(response, encoding=\"UTF-8\").strip()\n\n if re.match('^radio_rx', utf8_response):\n print('radio rx 0')\n ser.write(b'radio rx 0\\r\\n')\n\n text = utf8_response.split(\" \", 1)[1]\n print(\"receive: \" + text + \"\\r\\n\")\n\n # for python2\n except:\n if re.match('^radio_rx', str(response).strip()):\n print('radio rx 0')\n ser.write(b'radio rx 0\\r\\n')\n\n text = response.split(\" \", 1)[1]\n print(\"receive: \" + text + \"\\r\\n\")\n\n time.sleep(1)\n\nfinally:\n ser.close()\n\n\n","sub_path":"RN2483/receiver.py","file_name":"receiver.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"637402108","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom PIL import Image\r\n\r\nfrom mobileye_part3 import SFM\r\n\r\n\r\ndef seperate_by_color(candidates, colors):\r\n red_x = [point[0] for ind, point in enumerate(candidates) if\r\n colors[ind] is \"red\"]\r\n red_y = [point[1] for ind, point in enumerate(candidates) if\r\n colors[ind] is \"red\"]\r\n green_x = [point[0] for ind, point in enumerate(candidates) if\r\n colors[ind] is \"green\"]\r\n green_y = [point[1] for ind, point in enumerate(candidates) if\r\n colors[ind] is \"green\"]\r\n return green_x, green_y, red_x, red_y\r\n\r\n\r\ndef visualize(part1_candidates, part1_auxiliary, prev_container, current_container, focal, pp):\r\n fig, (part1, part2, part3) = plt.subplots(1, 3, figsize=(12, 6))\r\n fig.canvas.set_window_title('Mobileye Project 2020')\r\n img = Image.open(\"mobileye_logo.png\")\r\n img = img.resize((280, 200), Image.ANTIALIAS)\r\n fig.figimage(img, fig.bbox.xmax - 300, fig.bbox.ymin - 15)\r\n plt.suptitle(current_container.img_path[current_container.img_path.rfind('\\\\') + 1:-4], size=16)\r\n part1.set_title('candidates')\r\n part1.imshow(current_container.img)\r\n red_x, red_y, green_x, green_y = seperate_by_color(part1_candidates, part1_auxiliary)\r\n part1.plot(red_x, red_y, 'ro', color='r', markersize=4)\r\n part1.plot(green_x, green_y, 'ro', color='g', markersize=4)\r\n\r\n part2.set_title('traffic lights')\r\n part2.imshow(current_container.img)\r\n red_x, red_y, green_x, green_y = seperate_by_color(current_container.traffic_light, current_container.auxiliary)\r\n part2.plot(red_x, red_y, 'ro', color='r', markersize=4)\r\n part2.plot(green_x, green_y, 'ro', color='g', markersize=4)\r\n\r\n part3.set_title('distances')\r\n if prev_container is not None:\r\n norm_prev_pts, norm_curr_pts, R, norm_foe, tZ = SFM.prepare_3D_data(prev_container,current_container,\r\n focal, pp)\r\n norm_rot_pts = SFM.rotate(norm_prev_pts, R)\r\n rot_pts = SFM.unnormalize(norm_rot_pts, focal, pp)\r\n foe = np.squeeze(SFM.unnormalize(np.array([norm_foe]), focal, pp))\r\n part3.imshow(current_container.img)\r\n curr_p = current_container.traffic_light\r\n part3.plot(curr_p[:][0], curr_p[:][1], 'b+')\r\n\r\n for i in range(len(curr_p)):\r\n part3.plot([curr_p[i][0], foe[0]], [curr_p[i][1], foe[1]], 'b')\r\n if current_container.valid[i]:\r\n part3.text(curr_p[i][0], curr_p[i][1],\r\n r'{0:.1f}'.format(current_container.traffic_lights_3d_location[i, 2]), color='r')\r\n part3.plot(foe[0], foe[1], 'r+')\r\n part3.plot(rot_pts[:, 0], rot_pts[:, 1], 'g+')\r\n\r\n plt.show()\r\n","sub_path":"visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"324393976","text":"from collections import namedtuple, Counter\nimport os\nimport re\nfrom typing import List\n\nimport tweepy\nimport readability\n\n\ndef get_tweets():\n for tw in tweepy.Cursor(api.user_timeline, screen_name=TWITTER_ACCOUNT,\n exclude_replies=False, include_rts=True).items():\n yield Tweet(tw.id, tw.text, tw.created_at, tw.favorite_count, tw.retweet_count)\n\n\nTweet = namedtuple('Tweet', 'id text created likes rts')\n\nTWITTER_ACCOUNT = 'feoh'\n\nTWITTER_KEY = os.environ['TWITTER_KEY']\nTWITTER_SECRET = os.environ['TWITTER_SECRET']\nTWITTER_ACCESS_TOKEN = os.environ['TWITTER_ACCESS_TOKEN']\nTWITTER_ACCESS_SECRET = os.environ['TWITTER_ACCESS_SECRET']\n\nauth = tweepy.OAuthHandler(TWITTER_KEY, TWITTER_SECRET)\nprint(auth)\nauth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_SECRET)\napi = tweepy.API(auth)\nprint(api)\n\ntweets: List[Tweet] = list(get_tweets())\n\nscore_total = 0\n\nfor tweet in tweets:\n results = readability.getmeasures(tweet.text, lang='en')\n score = results['readability grades']['GunningFogIndex']\n print(f\"Tweet Text: {tweet.text} Gunning Fog Score: {score}\")\n score_total = score_total + score\n\naverage_score = score_total / len(tweets)\nprint(f\"The average Gunning Fog Index Score for {len(tweets)} is {average_score}\")\n","sub_path":"mydays/tweepy/testtweepy.py","file_name":"testtweepy.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"408462976","text":"import time\n\npentagonals = [0]\n\ndef pentagonal(num):\n if num >= len(pentagonals):\n start = len(pentagonals)\n for i in range(start, num+1):\n # print(num, start, i)\n pentagonals.append(int((i * ((3 * i) - 1)) / 2))\n # print(\">\", pentagonals)\n\n # print(pentagonals, num)\n return pentagonals[num]\n\ndef add_pentagonals(stop_after):\n while pentagonals[-1] < stop_after:\n pentagonal(len(pentagonals))\n # print(pentagonals)\n\na = 2\nkeep_going = True\nsolutions = []\nwhile keep_going:\n if a % 1000 == 0:\n print(\"Checking a:\", a)\n print(solutions)\n print()\n time.sleep(0.5)\n p_a = pentagonal(a)\n for b in range(1, a):\n if b % 1000 == 0:\n print(\"Checking b:\", b)\n p_b = pentagonal(b)\n if (p_a - p_b) in pentagonals:\n # print(a, b, p_a, p_b, p_a-p_b, \"diff is pentagonal\")\n # time.sleep(2)\n add_pentagonals(p_a + p_b)\n if (p_a + p_b) in pentagonals:\n print(a, b, p_a, p_b, p_a+p_b, \"sum is pentagonal\")\n solutions.append((p_a, p_b, p_a-p_b))\n print(solutions)\n time.sleep(5)\n keep_going = (len(solutions) < 5) and (a < 10)\n\n a += 1","sub_path":"44.py","file_name":"44.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"71133101","text":"'''\nextract.py - Extract UMI from fastq\n====================================================\n\n:Author: Ian Sudbery, Tom Smith\n:Release: $Id$\n:Date: |today|\n:Tags: Python UMI\n\nPurpose\n-------\n\nExtract UMI barcode from a read and add it to the read name, leaving\nany sample barcode in place. Can deal with paired end reads and UMIs\nsplit across the paired ends. Can also optionally extract cell\nbarcodes and append these to the read name also. See the section below\nfor an explanation for how to encode the barcode pattern(s) to\nspecficy the position of the UMI +/- cell barcode.\n\n\nFiltering and correcting cell barcodes\n--------------------------------------\n\numi_tools extract can optionally filter cell barcodes\n(--filter-cell-barcode) against a user-supplied whitelist\n(--whitelist). If a whitelist is not available for your data, e.g\nif you have performed droplet-based scRNA-Seq, you can use the\nwhitelist tool.\n\nCell barcodes which do not match the whitelist (user-generated or\nautomatically generated) can also be optionally corrected using the\n--error-correct-cell option.\n\nThe whitelist should be in the following format (tab-separated):\n\n AAAAAA\tAGAAAA\n AAAATC\n AAACAT\n AAACTA\tAAACTN,GAACTA\n AAATAC\n AAATCA\tGAATCA\n AAATGT\tAAAGGT,CAATGT\n\nWhere column 1 is the whitelisted cell barcodes and column 2 is\nthe list (comma-separated) of other cell barcodes which should be\ncorrected to the barcode in column 1. If the --error-correct-cell\noption is not used, this column will be ignored. Any additional columns\nin the whitelist input, such as the counts columns from the output of\numi_tools whitelist, will be ignored.\n\n\nUsage:\n------\n\nFor single ended reads:\n umi_tools extract --extract-method=string\n --bc-pattern=[PATTERN] -L extract.log [OPTIONS]\n\nreads from stdin and outputs to stdout.\n\nFor paired end reads:\n umi_tools extract --extract-method=string\n --bc-pattern=[PATTERN] --bc-pattern2=[PATTERN]\n --read2-in=[FASTQIN] --read2-out=[FASTQOUT] -L extract.log [OPTIONS]\n\nreads end one from stdin and end two from FASTQIN and outputs end one to stdin\nand end two to FASTQOUT.\n\n\nUsing regex and filtering against a whitelist of cell barcodes:\n umi_tools extract --extract-method=regex --filter-cell-barcode\n --bc-pattern=[REGEX] --whitlist=[WHITELIST_TSV]\n -L extract.log [OPTIONS]\n\n'''\nfrom __future__ import absolute_import\nimport sys\nimport regex\nimport collections\nimport optparse\n\n# python 3 doesn't require izip\ntry:\n # Python 2\n from itertools import izip\nexcept ImportError:\n # Python 3\n izip = zip\n\nimport umi_tools.Utilities as U\nimport umi_tools.umi_methods as umi_methods\n\n# add the generic docstring text\n__doc__ = __doc__ + U.GENERIC_DOCSTRING_WE\n\n\ndef main(argv=None):\n \"\"\"script main.\n\n parses command line options in sys.argv, unless *argv* is given.\n \"\"\"\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = U.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n # (Experimental option) Retain the UMI in the sequence read\"\n parser.add_option(\"--retain-umi\", dest=\"retain_umi\", action=\"store_true\",\n help=optparse.SUPPRESS_HELP)\n parser.add_option(\"-p\", \"--bc-pattern\", dest=\"pattern\", type=\"string\",\n help=\"Barcode pattern\")\n parser.add_option(\"--bc-pattern2\", dest=\"pattern2\", type=\"string\",\n help=\"Barcode pattern for paired reads\")\n parser.add_option(\"--3prime\", dest=\"prime3\", action=\"store_true\",\n help=\"barcode is on 3' end of read.\")\n parser.add_option(\"--read2-in\", dest=\"read2_in\", type=\"string\",\n help=\"file name for read pairs\")\n parser.add_option(\"--read2-out\", dest=\"read2_out\", type=\"string\",\n help=\"file to output processed paired read to\")\n parser.add_option(\"--read2-stdout\", dest=\"read2_stdout\",\n action=\"store_true\",\n help=\"Paired reads, send read2 to stdout, discarding read1\")\n parser.add_option(\"--quality-filter-threshold\",\n dest=\"quality_filter_threshold\", type=\"int\",\n help=(\"Remove reads where any UMI base quality score \"\n \"falls below this threshold\"))\n parser.add_option(\"--quality-filter-mask\",\n dest=\"quality_filter_mask\", type=\"int\",\n help=(\"If a UMI base has a quality below this threshold, \"\n \"replace the base with 'N'\"))\n parser.add_option(\"--quality-encoding\",\n dest=\"quality_encoding\", type=\"choice\",\n choices=[\"phred33\", \"phred64\", \"solexa\"],\n help=(\"Quality score encoding. Choose from 'phred33'\"\n \"[33-77] 'phred64' [64-106] or 'solexa' [59-106]\"))\n parser.add_option(\"--extract-method\",\n dest=\"extract_method\", type=\"choice\",\n choices=[\"string\", \"regex\"],\n help=(\"How to extract the umi +/- cell barcodes, Choose \"\n \"from 'string' or 'regex'\"))\n parser.add_option(\"--filter-cell-barcode\",\n dest=\"filter_cell_barcode\",\n action=\"store_true\",\n help=\"Filter the cell barcodes\")\n parser.add_option(\"--error-correct-cell\",\n dest=\"error_correct_cell\",\n action=\"store_true\",\n help=(\"Correct errors in the cell barcode\"))\n parser.add_option(\"--whitelist\",\n dest=\"whitelist\", type=\"string\",\n help=(\"A whitelist of accepted cell barcodes\"))\n parser.add_option(\"--blacklist\",\n dest=\"blacklist\", type=\"string\",\n help=(\"A blacklist of accepted cell barcodes\"))\n parser.add_option(\"--reads-subset\",\n dest=\"reads_subset\", type=\"int\",\n help=(\"Only extract from the first N reads. If N is \"\n \"greater than the number of reads, all reads will \"\n \"be used\"))\n parser.add_option(\"--reconcile-pairs\",\n dest=\"reconcile\", action=\"store_true\",\n help=(\"Allow the presences of reads in read2 input that are\"\n \"not present in read1 input. This allows cell barcode\"\n \"filtering of read1s without considering read2s\"))\n parser.set_defaults(extract_method=\"string\",\n filter_cell_barcodes=False,\n whitelist=None,\n blacklist=None,\n error_correct_cell=False,\n pattern=None,\n pattern2=None,\n read2_in=None,\n read2_out=False,\n read2_stdout=False,\n quality_filter_threshold=None,\n quality_encoding=None,\n reconcile=False)\n\n # add common options (-h/--help, ...) and parse command line\n\n (options, args) = U.Start(parser, argv=argv,\n add_group_dedup_options=False,\n add_sam_options=False)\n\n if options.retain_umi and not options.extract_method == \"regex\":\n raise ValueError(\"option --retain-umi only works with --extract-method=regex\")\n\n if options.quality_filter_threshold or options.quality_filter_mask:\n if not options.quality_encoding:\n U.error(\"must provide a quality encoding (--quality-\"\n \"encoding) to filter UMIs by quality (--quality\"\n \"-filter-threshold) or mask low quality bases \"\n \"with (--quality-filter-mask)\")\n\n if not options.pattern and not options.pattern2:\n if not options.read2_in:\n U.error(\"Must supply --bc-pattern for single-end\")\n else:\n U.error(\"Must supply --bc-pattern and/or --bc-pattern \"\n \"if paired-end \")\n\n if options.pattern2:\n if not options.read2_in:\n U.error(\"must specify a paired fastq ``--read2-in``\")\n\n if not options.pattern2:\n options.pattern2 = options.pattern\n\n extract_cell = False\n extract_umi = False\n\n # If the pattern is a regex we can compile the regex(es) prior to\n # ExtractFilterAndUpdate instantiation\n if options.extract_method == \"regex\":\n if options.pattern:\n try:\n options.pattern = regex.compile(options.pattern)\n except regex.error:\n U.error(\"barcode_regex '%s' is not a \"\n \"valid regex\" % options.pattern)\n\n if options.pattern2:\n try:\n options.pattern2 = regex.compile(options.pattern2)\n except regex.Error:\n U.error(\"barcode_regex2 '%s' is not a \"\n \"valid regex\" % options.pattern2)\n\n # check whether the regex contains a umi group(s) and cell groups(s)\n if options.extract_method == \"regex\":\n if options.pattern:\n for group in options.pattern.groupindex:\n if group.startswith(\"cell_\"):\n extract_cell = True\n elif group.startswith(\"umi_\"):\n extract_umi = True\n if options.pattern2:\n for group in options.pattern2.groupindex:\n if group.startswith(\"cell_\"):\n extract_cell = True\n elif group.startswith(\"umi_\"):\n extract_umi = True\n\n # check whether the pattern string contains umi/cell bases\n elif options.extract_method == \"string\":\n if options.pattern:\n if \"C\" in options.pattern:\n extract_cell = True\n if \"N\" in options.pattern:\n extract_umi = True\n if options.pattern2:\n if \"C\" in options.pattern2:\n extract_cell = True\n if \"N\" in options.pattern2:\n extract_umi = True\n\n if not extract_umi:\n if options.extract_method == \"string\":\n U.error(\"barcode pattern(s) do not include any umi bases \"\n \"(marked with 'Ns') %s, %s\" % (\n options.pattern, options.pattern2))\n elif options.extract_method == \"regex\":\n U.error(\"barcode regex(es) do not include any umi groups \"\n \"(starting with 'umi_') %s, %s\" (\n options.pattern, options.pattern2))\n\n if options.filter_cell_barcodes:\n\n if not options.whitelist:\n U.error(\"must provide a whitelist (--whitelist) if using \"\n \"--filter-cell-barcode option\")\n\n if not extract_cell:\n if options.extract_method == \"string\":\n U.error(\"barcode pattern(s) do not include any cell bases \"\n \"(marked with 'Cs') %s, %s\" % (\n options.pattern, options.pattern2))\n elif options.extract_method == \"regex\":\n U.error(\"barcode regex(es) do not include any cell groups \"\n \"(starting with 'cell_') %s, %s\" (\n options.pattern, options.pattern2))\n\n read1s = umi_methods.fastqIterate(options.stdin)\n\n # set up read extractor\n ReadExtractor = umi_methods.ExtractFilterAndUpdate(\n options.extract_method,\n options.pattern,\n options.pattern2,\n options.prime3,\n extract_cell,\n options.quality_encoding,\n options.quality_filter_threshold,\n options.quality_filter_mask,\n options.filter_cell_barcode,\n options.retain_umi)\n\n if options.filter_cell_barcode:\n cell_whitelist, false_to_true_map = umi_methods.getUserDefinedBarcodes(\n options.whitelist, options.error_correct_cell)\n\n ReadExtractor.cell_whitelist = cell_whitelist\n ReadExtractor.false_to_true_map = false_to_true_map\n\n if options.blacklist:\n blacklist = set()\n with U.openFile(options.blacklist, \"r\") as inf:\n for line in inf:\n blacklist.add(line.strip().split(\"\\t\")[0])\n ReadExtractor.cell_blacklist = blacklist\n\n # variables for progress monitor\n progCount = 0\n displayMax = 100000\n U.info(\"Starting barcode extraction\")\n\n if options.read2_in is None:\n for read in read1s:\n\n # incrementing count for monitoring progress\n progCount += 1\n\n # Update display in every 100kth iteration\n if progCount % displayMax == 0:\n U.info(\"Parsed {} reads\".format(progCount))\n\n new_read = ReadExtractor(read)\n\n if options.reads_subset:\n if (ReadExtractor.read_counts['Input Reads'] >\n options.reads_subset):\n break\n\n if not new_read:\n continue\n\n options.stdout.write(str(new_read) + \"\\n\")\n\n else:\n read2s = umi_methods.fastqIterate(U.openFile(options.read2_in))\n\n if options.read2_out:\n read2_out = U.openFile(options.read2_out, \"w\")\n\n if options.reconcile:\n strict = False\n else:\n strict = True\n\n for read1, read2 in umi_methods.joinedFastqIterate(\n read1s, read2s, strict):\n\n # incrementing count for monitoring progress\n progCount += 1\n\n # Update display in every 100kth iteration\n if progCount % displayMax == 0:\n U.info(\"Parsed {} reads\".format(progCount))\n sys.stdout.flush()\n\n reads = ReadExtractor(read1, read2)\n\n if options.reads_subset:\n if (ReadExtractor.read_counts['Input Reads'] >\n options.reads_subset):\n break\n\n if not reads:\n continue\n else:\n new_read1, new_read2 = reads\n\n if options.read2_stdout:\n options.stdout.write(str(new_read2) + \"\\n\")\n else:\n options.stdout.write(str(new_read1) + \"\\n\")\n\n if options.read2_out:\n read2_out.write(str(new_read2) + \"\\n\")\n\n if options.read2_out:\n read2_out.close()\n\n for k, v in ReadExtractor.getReadCounts().most_common():\n U.info(\"%s: %s\" % (k, v))\n\n U.Stop()\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","sub_path":"umi_tools/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":14641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"335761078","text":"import imageio\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import svm\nfrom sklearn.cluster import KMeans\nimport cv2\n\n\ndef viewImage(name, image):\n cv2.namedWindow(name, cv2.WINDOW_NORMAL)\n cv2.resizeWindow(name, 1000, 1000)\n cv2.imshow(name, image)\n\n while(True):\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cv2.destroyAllWindows()\n\n\ndef GetMostPopulatedRegion(bins, counts):\n imax = np.argmax(counts)\n binWidth = np.gradient(bins)[0]\n lo = bins[imax] - binWidth/2.\n hi = bins[imax+1] + binWidth/2.\n return lo, hi\n\n\ndef open_image(filename):\n img = cv2.imread(filename)\n return img\n\n\n\n\n\n\nstripped_resistors = []\nfor i in range(1, 7):\n filename = \"cropped_resistor_{}.jpg\".format(i)\n bgr_resistor_img = open_image(filename)\n rgb_resistor_img = cv2.cvtColor(bgr_resistor_img, cv2.COLOR_BGR2RGB)\n grayscale = cv2.cvtColor(bgr_resistor_img, cv2.COLOR_BGR2GRAY)\n\n ret, threshhold = cv2.threshold(\n grayscale, 225, 255, cv2.THRESH_BINARY\n )\n kernel = np.ones((4,4),np.uint8)\n dilation = cv2.dilate(threshhold, kernel, iterations = 1)\n mask = dilation\n inpainted_img = cv2.inpaint(bgr_resistor_img, mask, 128, cv2.INPAINT_NS)\n rgb_inpainted_img = cv2.cvtColor(inpainted_img, cv2.COLOR_BGR2RGB)\n hsv_resistor_img = cv2.cvtColor(inpainted_img, cv2.COLOR_BGR2HSV)\n\n # Усредненный оттенок в поперечном направлении сечения резистора\n hue_mean_line = hsv_resistor_img[:,:,0].mean(axis = 0)\n saturation_mean_line = hsv_resistor_img[:,:,1].mean(axis = 0)\n visibility_mean_line = hsv_resistor_img[:,:,2].mean(axis = 0)\n\n plt.clf()\n counts, bins, bars = plt.hist(\n hue_mean_line, bins=6\n )\n lo1, hi1 = GetMostPopulatedRegion(bins, counts)\n # plt.show()\n\n grad = np.abs(np.gradient(hue_mean_line))\n length = np.linspace(0, len(hue_mean_line)-1, len(hue_mean_line))\n\n # 1\n # ix = ((hue_mean_line > lo1) & (hue_mean_line < hi1)) | (grad > max(grad)*0.05)\n # 2\n ix = ((hue_mean_line < lo1) | (hue_mean_line > hi1)) & (grad < max(grad)*0.13)\n\n # KMeans\n stripCount = 5\n length_ix = length[ix].reshape(-1, 1)\n kmeans = KMeans(n_clusters=stripCount, random_state=0).fit(length_ix)\n resistor_stripes = []\n for ctr_float in kmeans.cluster_centers_:\n ctr_int = int(ctr_float)\n H_mean = round(hue_mean_line[ctr_int-1:ctr_int+2].sum()/3., 2)\n S_mean = round(saturation_mean_line[ctr_int-1:ctr_int+2].sum()/3., 2)\n V_mean = round(visibility_mean_line[ctr_int-1:ctr_int+2].sum()/3., 2)\n resistor_stripes.append((ctr_int, [H_mean, S_mean, V_mean]))\n\n resistor_stripes = sorted(resistor_stripes, key=lambda index: index[0])\n stripped_resistors.append(resistor_stripes)\n\n\nfor i in stripped_resistors:\n print(i)\n print()\n\n\n plt.clf()\n\n # 1 - оригинал изображения\n ax1 = plt.subplot(4, 1, 1)\n ax1.margins(x=0, y=0)\n plt.imshow(rgb_resistor_img)\n\n # 2 - закрашивание б��иков\n ax2 = plt.subplot(4, 1, 2)\n ax2.margins(x=0, y=0)\n plt.imshow(rgb_inpainted_img)\n\n # 3 - полный график усредненных оттенков в разрезе\n ax3 = plt.subplot(4, 1, 3)\n ax3.margins(x=0, y=0)\n plt.scatter(length, hue_mean_line)\n\n # 4 - результат фильтрации оттенков\n ax4 = plt.subplot(4, 1, 4)\n ax4.margins(x=0, y=0)\n plt.scatter(length[ix], hue_mean_line[ix])\n\n plt.show()\n","sub_path":"test/test_v2.py","file_name":"test_v2.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"5034721","text":"\"\"\"\n :author: Gaurav Nagoshe\n :since: 08/07/2019\n :overview:\n\n\"\"\"\n\nimport jwt\nimport json\nfrom django.contrib.auth import authenticate\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.template.loader import render_to_string\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth import logout as auth_logout\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponse\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework import status\nfrom .serializers import UserSerializer\nfrom Fundoo import settings\nfrom .decorators import user_login_required\nfrom .services.Redis_Service import RedisCache\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .services.s3_service import ImageUpload\nfrom .services.RabbitMqService import RabbitMqService\nfrom .models.profile_model import UserProfile\nfrom .util import Util\nfrom .serializers import UserProfilePic\n\nfileUpload = ImageUpload()\n# making object of redis service class\nredis = RedisCache()\nutil = Util()\nrabbit_mq = RabbitMqService()\n# simple function to render to home\n@user_login_required\ndef home(request):\n \"\"\"\n\n :param request: request to server\n :return: renders or redirects to home.html\n \"\"\"\n return render(request, 'home.html')\n\n\n# Function to Logout User\n@user_login_required\ndef logout(request):\n \"\"\"\n\n :param request: request to server\n :return: redirects to register.html\n \"\"\"\n\n redis.flush() # clearing redis cache when user logs out\n # auth_logout(request) # logout user\n\n return render(request, 'register.html')\n\n\n@permission_classes((AllowAny,))\nclass UserView(APIView):\n\n \"\"\"\n Class for UserView\n Class has different methods like post,put,get,delete\n Method post is for Registration of user\n\n \"\"\"\n\n def post(self, request, format=None):\n\n \"\"\"\n\n :param request: type of request to server here it is POST\n :param format: format of data to be passed to server. default is None\n :return: json response with status code for registration of user\n \"\"\"\n try:\n data = request.data\n if data is None:\n return Response({'error': 'Enter Data'}, status=status.HTTP_400_BAD_REQUEST)\n\n # define a serializer object of UserSerializer and get data\n serializer = UserSerializer(data=data)\n username = data['username']\n\n if User.objects.filter(username=username).exists():\n return Response({'Error': 'User already Exists'}, status=status.HTTP_400_BAD_REQUEST)\n\n if serializer.is_valid(): # check for valid data in serializer\n\n user = serializer.save() # make user object from serializer\n # payload to generate token\n payload = {\n \"user_id\": user.pk,\n \"username\": user.username\n }\n\n # generate jwt token\n token = jwt.encode(payload=payload, key=settings.JWT_SECRET_KEY).decode('utf-8')\n\n # get current site\n current_site = get_current_site(request)\n\n # subject for mail\n subject = \"Activate Your ChatApp Account\"\n\n # message to be sent in mail\n message = render_to_string('account_activate.html',\n {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': token\n })\n\n # sender and receiver mails\n from_email = settings.EMAIL_HOST_USER\n to_send = [user.email]\n\n # send mail method to send mail\n send_mail(subject, message, from_email, to_send, fail_silently=False)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n except ValueError:\n return Response({'data': serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n\ndef activate(request, uidb64, token):\n \"\"\"\n\n :param request: type of request to the server\n :param uidb64: encoded uid of user from link of url\n :param token: token generated for user\n :return: this functions activates user if user is not active\n\n \"\"\"\n try:\n # decode uid from link and get user\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n\n if user: # check for user in database\n\n if not user.is_active: # checking user is active or not\n\n # defining payload for user\n payload = {\n \"user_id\": user.pk,\n \"username\": user.username\n }\n\n # checking if payload equals decoded token from link\n if payload == jwt.decode(token, settings.JWT_SECRET_KEY):\n\n # make user active\n user.is_active = True\n\n # save and login user\n user.save()\n login(request, user)\n # return redirect('home')\n\n response_data = {\n \"activate\": \"success\"\n }\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n else:\n response_data = {\n \"activate\": \"false or already activated\"\n }\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n except user.DoesNotExists:\n response_data = {\n \"activate\": \"false or already activated\"\n }\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n\n@api_view([\"POST\"])\n@permission_classes((AllowAny,))\ndef login(request):\n \"\"\"\n\n :param request: request type. here, it is POST as in decorator @api_view\n :return: returns a json response with username , email and generated jwt token\n\n \"\"\"\n # get username and password\n username = request.data.get(\"username\")\n password = request.data.get(\"password\")\n try:\n # check for username and password\n if username is None or password is None:\n\n raise ValueError\n\n # authenticate user\n user = authenticate(username=username, password=password)\n\n # if user is not present\n if not user:\n\n return Response({'Error': 'No such User Exists'}, status=status.HTTP_404_NOT_FOUND)\n\n payload = {\n \"user_id\": user.pk,\n \"usermname\": user.username\n }\n # token generated by using jwt\n token = jwt.encode(payload, settings.JWT_SECRET_KEY, algorithm='HS256').decode('utf-8')\n print(user.is_authenticated)\n # setting token in Redis Cache\n redis.set('token', token)\n\n # login the user\n # auth_login(request, user)\n\n except ValueError:\n\n return Response({'error': 'provide username and password'},\n status=status.HTTP_400_BAD_REQUEST)\n\n context = {\n 'token': token,\n 'username': user.username,\n 'email': user.email,\n 'id':user.pk\n }\n\n return Response(context, status=status.HTTP_202_ACCEPTED)\n\n\n@csrf_exempt\ndef upload(request):\n if request.method == 'POST':\n\n # get file as input from user\n imagename = request.FILES.get('image')\n userdata = util.GetUser()\n print(userdata)\n uid = userdata['user_id']\n username1 = userdata['usermname']\n user = request.user\n x = fileUpload.file_upload(imagename, uid)\n profile_model = UserProfile.objects.get(user=uid)\n profile_model.profile_pic = x\n profile_model.save()\n\n if x:\n return HttpResponse('Success')\n\n return HttpResponse('Where is Image...')\n\n\n@api_view(['GET'])\n@csrf_exempt\n@permission_classes((AllowAny,))\ndef ProfilePic(request):\n userdata = util.GetUser()\n uid=userdata['user_id']\n profile_model = UserProfile.objects.get(user=uid)\n pic_ser = UserProfilePic(profile_model)\n return Response(pic_ser.data, status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\n@permission_classes((AllowAny,))\ndef userList(request):\n users = User.objects.all()\n\n if users:\n user_ser = UserSerializer(users, many=True)\n return Response({'users': user_ser.data}, status=status.HTTP_200_OK)\n\n else:\n return Response({'users': 'No user'}, status=status.HTTP_200_OK)\n\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"593828544","text":"N = int(input())\na= [int(x) for x in input().split()]\n\na.sort()\n\n# 0 to N-1 never use\n#\nans = 0\nfor i in range(N,3*N-1,2):\n ans += a[i]\n\nprint(ans)","sub_path":"agc012/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"328117443","text":"#!/usr/bin/env python\n# coding: utf-8\nimport json\nimport os\nimport os.path as osp\nfrom collections import Counter\n\nimport numpy as np\nimport pandas as pd\n\nfrom settings import NS_RAW_DATA_DIR, POLLEN_TYPES, NEW_RAW_DATA, NEW_DATA\nfrom utils.preprocessing import label_to_index, calculate_and_check_shapes\n\n\ndef transform_raw_data(raw_data_path, classes_to_take=None):\n files = sorted(os.listdir(raw_data_path))\n data = {\"scatter\": [], \"size\": [], \"life_1\": [], \"spectrum\": [], \"life_2\": []}\n enocoded_labels = []\n\n class_to_num, string_labels = label_to_index(files)\n\n for index, file_name in enumerate(files):\n if index % 2 == 0:\n print(f'Current file is {file_name}')\n\n if file_name.split(\".\")[-1] != \"json\":\n continue\n\n if classes_to_take is not None:\n if not file_name.split(\".\")[0] in classes_to_take:\n continue\n\n raw_data = json.loads(open(osp.join(raw_data_path, file_name)).read())\n\n for i in range(len(raw_data[\"Data\"])):\n specmax = np.max(raw_data[\"Data\"][i][\"Spectrometer\"])\n file_data = raw_data[\"Data\"][i]\n calculate_and_check_shapes(file_data, file_name, specmax, data, enocoded_labels, class_to_num)\n\n feature_names = [\"scatter\", \"size\", \"life_1\", \"spectrum\", \"life_2\"]\n return data, enocoded_labels, string_labels, class_to_num, feature_names\n\n\ndef create_lifetime(data, path_to_save):\n lista = []\n for i in range(len(data[2])):\n\n if i == 0:\n l = [\"Cupressus\"]\n elif i == 1:\n l = [\"Fraxinus excelsior\"]\n else:\n l = [\"Ulmus\"]\n\n if np.where(data[2][i][0, :] == np.max(data[2][i][0, :]))[0].shape[0] > 1:\n l.append(\"Yes\")\n else:\n l.append(\"No\")\n\n for k in range(4):\n if k != 2:\n l.append(np.max(data[2][i][k, :]) / np.e)\n lista.append(l)\n\n features = [\"Pollen type\", \"Saturated\", \"Time of band 1\", \"Time of band 2\", \"Time of band 3\"]\n amb = pd.DataFrame(columns=features)\n\n for i in range(len(lista)):\n amb.loc[len(amb)] = lista[i]\n amb.to_csv(osp.join(path_to_save, \"Time of lifetime.csv\"), index=False)\n\n\ndef dict_int_to_string(my_dict):\n return dict((str(k), str(v)) for k, v in my_dict.items())\n\n\ndef find_most_common(label_dict, n=10):\n label_counter = Counter(label_dict)\n most_common = label_counter.most_common(n)\n return [label for label in most_common]\n\n\nif __name__ == '__main__':\n class_to_num, string_labels = transform_raw_data(NEW_RAW_DATA)\n data, enocoded_labels, string_labels, class_to_num, feature_names = transform_raw_data(NEW_RAW_DATA)\n print()\n","sub_path":"utils/converting_raw_data.py","file_name":"converting_raw_data.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"583822312","text":"# This is the World Series Program.\n# This program will allow the user to enter a team name.\n# The entry will allow the user to enter either uppercase or lowercase.\n# It will then search for the team and display how many times they won.\n\n# main\ndef enterTeam():\n infile = open('WorldSeriesWinners.txt', 'r') \n\n for line in open('WorldSeriesWinners.txt'):\n line = line.rstrip('\\n')\n \n# This will read the file and place the contents into a list.\n worldchamps = infile.read().splitlines() \n infile.close()\n## print(\"World Series Winners you may choose from\")\n## print('-----------------------------------------------------------------------')\n## print(worldchamps)\n## print('-----------------------------------------------------------------------')\n team = input('Now please pick a team: ') \n\n# Starting counter on teams.\n counter = 0 \n\n# This will make sure that if user enters a lower case letter\n# for the first character in the first and last name of the team being searched for.\n# This allows entry for lowercase to be counted still.\n\n for winner in worldchamps: \n if team.lower() == winner.lower():\n counter = counter + 1\n \n# Display the results for the user.\n if counter == 1:\n print('-----------------------------------------------------------------------')\n print(\"The\", team, \"won the world series\", counter, \"times, from 1903-2009.\")\n print('-----------------------------------------------------------------------')\n elif counter > 1:\n print('-----------------------------------------------------------------------')\n print(\"The\", team, \"won the world series\", counter, \"times, from 1903-2009.\")\n print('-----------------------------------------------------------------------')\n else:\n print('-----------------------------------------------------------------------')\n print(\"The\", team, \"never won the world series.\")\n print('-----------------------------------------------------------------------')\n \n\n\n","sub_path":"baseballProgram/PickATeam.py","file_name":"PickATeam.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"639064102","text":"import pygame\nimport pygame.camera\nimport pygame.image\nfrom pygame.locals import *\n\nFPS = 30\n\nclass Camera(object):\n\n def __init__(self):\n self.cam = None\n\n def __del__(self):\n if self.cam:\n self.cam.stop()\n\n def live_stream(self, display, pos=(0, 0)):\n # if you don't want to tie the framerate to the camera, you can check \n # if the camera has an image ready. note that while this works\n # on most cameras, some will never return true.\n if self.cam and self.cam.query_image():\n snapshot = self.cam.get_image()\n display.blit(snapshot, pos)\n\n def get_cameras(self):\n return pygame.camera.list_cameras()\n\n def set_select_cam(self, cam, size=(640, 480)):\n if self.cam:\n self.cam.stop()\n\n if cam:\n self.cam = pygame.camera.Camera(cam, size)\n\n try:\n self.cam.start()\n return True\n except Exception as e:\n self.cam = None\n print(e)\n\n return False\n\n def capture(self):\n if not self.cam:\n return\n\n return self.cam.get_image()\n\n def capture_raw(self):\n if not self.cam:\n return\n\n return self.cam.get_raw()\n\n def save_img(self, img, filename):\n ''' This will save your Surface as either a BMP, TGA, PNG, or JPEG image. '''\n pygame.image.save(img, filename)\n\nclass Text(object):\n\n def __init__(self, msg, font_name=None, font_size=14, color=(255, 255, 255), bgcolor=None):\n font = pygame.font.SysFont(font_name, font_size)\n\n # bgcolor cannot be None or we'll get\n # Invalid background RGBA argument\n if bgcolor is None:\n self._text = font.render(msg, True, color)\n else:\n self._text = font.render(msg, True, color, bgcolor)\n\n def draw(self, display, position=(0, 0)):\n rect = self._text.get_rect()\n rect.centerx = position[0]\n rect.centery = position[1]\n display.blit(self._text, rect)\n\nclass Button(object):\n\n def __init__(self, text, pos=None, size=None, bgcolor=None, forecolor=None, enabled=True):\n self.text = text\n self.pos = pos\n self.size = size\n self.bgcolor = bgcolor\n self.forecolor = forecolor\n self.enabled = enabled\n\n def raise_event(self, event):\n pass\n\n def set_enabled(self, enabled):\n self.enabled = enabled\n\n def draw(self, display):\n pass\n\nclass App(object):\n\n def __init__(self, size=None):\n pygame.init()\n pygame.display.set_caption(\"PyCam\")\n pygame.camera.init()\n pygame.font.init()\n\n self.size = size or (800, 600)\n self.display = pygame.display.set_mode(self.size, 0)\n self.clock = pygame.time.Clock()\n\n def __del__(self):\n pygame.font.quit()\n pygame.camera.quit()\n pygame.quit()\n\n def clear(self):\n self.display.fill((0, 0, 0))\n\n def main(self):\n running = True\n\n Cam = Camera()\n clist = Cam.get_cameras()\n\n if not clist:\n print(\"Sorry, no cameras detected.\")\n\n has_camera = clist and Cam.set_select_cam(clist[0])\n\n btn_capture = Button(\"Capture\", (0 ,0), (75, 35))\n\n diff = 0\n reverse = False\n\n self.display.fill((180, 180, 180))\n\n while running:\n events = pygame.event.get()\n\n for e in events:\n btn_capture.raise_event(e)\n if e.type == QUIT or (e.type == KEYDOWN and e.key == K_ESCAPE):\n running = False\n\n if has_camera:\n Cam.live_stream(self.display, (800 - 640, 0))\n else:\n s = pygame.Surface((640, 480))\n s.fill((255, 255, 255))\n pos = (s.get_rect().centerx + diff, s.get_rect().centery)\n Text(\"Camera not found.\", font_size=36, color=(0, 0, 0)).draw(s, pos)\n self.display.blit(s, (800 - 640, 0))\n\n if diff > 100:\n reverse = True\n elif diff < -100:\n reverse = False\n\n if reverse:\n diff -=1\n else:\n diff += 1\n\n #btn_capture.draw(self.display)\n\n pygame.display.flip()\n self.clock.tick(FPS)\n\nif __name__ == '__main__':\n A = App()\n A.main()","sub_path":"pycam.py","file_name":"pycam.py","file_ext":"py","file_size_in_byte":4448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"238902465","text":"from dombase import *\n\nclass Copper(Treasure, CardAdd):\n\tname = 'Copper'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Copper, self).__init__(game, **kwargs)\n\t\tself.value = 1\n\t\tself.price = 0\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tfor i in range(60-len(game.players)*7):\n\t\t\tpile.append(type(self)(game))\n\nclass Silver(Treasure, CardAdd):\n\tname = 'Silver'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Silver, self).__init__(game, **kwargs)\n\t\tself.value = 2\n\t\tself.price = 3\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tfor i in range(40):\n\t\t\tpile.append(type(self)(game))\n\t\t\nclass Gold(Treasure, CardAdd):\n\tname = 'Gold'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Gold, self).__init__(game, **kwargs)\n\t\tself.value = 3\n\t\tself.price = 6\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tfor i in range(30):\n\t\t\tpile.append(type(self)(game))\n\t\t\nclass Estate(Victory, CardAdd):\n\tname = 'Estate'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Estate, self).__init__(game, **kwargs)\n\t\tself.value = 1\n\t\tself.price = 2\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tif len(game.players)>2:\n\t\t\tamnt = 12\n\t\telse:\n\t\t\tamnt = 8\n\t\tfor i in range(amnt+len(game.players)*3):\n\t\t\tpile.append(type(self)(game))\n\nclass Duchy(Victory, CardAdd):\n\tname = 'Duchy'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Duchy, self).__init__(game, **kwargs)\n\t\tself.value = 3\n\t\tself.price = 5\n\nclass Province(Victory, CardAdd):\n\tname = 'Province'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Province, self).__init__(game, **kwargs)\n\t\tself.value = 6\n\t\tself.price = 8\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tsuper(Province, self).onPileCreate(pile, game)\n\t\tpile.terminator = True\n\t\t\nclass Curse(Cursed, CardAdd):\n\tname = 'Curse'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Curse, self).__init__(game, **kwargs)\n\t\tself.value = -1\n\t\tself.price = 0\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tfor i in range(10*np.max((len(game.players)-1, 1))):\n\t\t\tpile.append(type(self)(game))\n\t\t\nbaseSetBase = [Copper, Silver, Gold, Estate, Duchy, Province, Curse]\n\nclass Cellar(Action, CardAdd):\n\tname = 'Cellar'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Cellar, self).__init__(game, **kwargs)\n\t\tself.price = 2\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Cellar, self).onPlay(player, **kwargs)\n\t\tplayer.addAction()\n\t\tdiscardedAmnt = 0\n\t\twhile player.hand:\n\t\t\tchoice = player.user([o.name for o in player.hand]+['EndCellar'], 'Choose discard')\n\t\t\tif choice+1>len(player.hand): break\n\t\t\tplayer.discard(choice)\n\t\t\tdiscardedAmnt += 1\n\t\tplayer.draw(amnt=discardedAmnt)\n\t\t\nclass Chapel(Action, CardAdd):\n\tname = 'Chapel'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Chapel, self).__init__(game, **kwargs)\n\t\tself.price = 2\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Chapel, self).onPlay(player, **kwargs)\n\t\tif not player.hand: return\n\t\toptions = copy.copy(player.hand)\n\t\ttrashed = []\n\t\tfor i in range(4):\n\t\t\tchoice = player.user([o.name for o in options]+['EndChapel'], 'Choose trash')\n\t\t\tif choice+1>len(options): break\n\t\t\ttrashed.append(options.pop(choice))\n\t\t\tif not options: break\n\t\tfor card in trashed:\n\t\t\tfor i in range(len(player.hand)):\n\t\t\t\tif player.hand[i]==card:\n\t\t\t\t\tplayer.trash(i)\n\t\t\t\t\tbreak\n\t\t\t\nclass Moat(Action, Reaction, CardAdd):\n\tname = 'Moat'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Moat, self).__init__(game, **kwargs)\n\t\tReaction.__init__(self, game, **kwargs)\n\t\tself.price = 2\n\t\tself.signal = 'attack'\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Moat, self).onPlay(player, **kwargs)\n\t\tplayer.draw(amnt=2)\n\tdef trigger(self, signal, **kwargs):\n\t\tif kwargs['player']==self.owner and self in self.owner.hand and self.owner.user(('no', 'yes'), 'Use moat'):\n\t\t\tself.owner.reveal(self)\n\t\t\treturn True\n\tdef connect(self, **kwargs):\n\t\tkwargs['player'].game.dp.connect(self.trigger, signal=self.signal)\n\tdef disconnect(self, **kwargs):\n\t\tkwargs['player'].game.dp.connect(self.trigger, signal=self.signal)\n\tdef onGain(self, player, **kwargs):\n\t\tself.connect(**kwargs)\n\tdef onTrash(self, player, **kwargs):\n\t\tself.disconnect(**kwargs)\n\tdef onReturn(self, player, **kwargs):\n\t\tself.disconnect(**kwargs)\n\t\t\nclass Chancellor(Action, CardAdd):\n\tname = 'Chancellor'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Chancellor, self).__init__(game, **kwargs)\n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Chancellor, self).onPlay(player, **kwargs)\n\t\tplayer.addCoin(amnt=2)\n\t\tif player.user(('no', 'yes'), 'Discard library'):\n\t\t\twhile player.library:\n\t\t\t\tplayer.discardPile.append(player.library.pop())\n\t\t\nclass Village(Action, CardAdd):\n\tname = 'Village'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Village, self).__init__(game, **kwargs)\n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Village, self).onPlay(player, **kwargs)\n\t\tplayer.draw()\n\t\tplayer.addAction(amnt=2)\n\t\t\nclass Woodcutter(Action, CardAdd):\n\tname = 'Woodcutter'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Woodcutter, self).__init__(game, **kwargs)\n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Woodcutter, self).onPlay(player, **kwargs)\n\t\tplayer.addCoin(amnt=2)\n\t\tplayer.addBuy()\n\t\t\nclass Workshop(Action, CardAdd):\n\tname = 'Workshop'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Workshop, self).__init__(game, **kwargs)\n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Workshop, self).onPlay(player, **kwargs)\n\t\toptions = []\n\t\tfor pile in player.game.piles:\n\t\t\tif player.game.piles[pile].viewTop() and player.game.piles[pile].viewTop().getPrice(player)<5 and player.game.piles[pile].viewTop().getPotionPrice(player)<1:\n\t\t\t\toptions.append(pile)\n\t\tif not options: return\n\t\tchoice = player.user(options, 'Choose gain')\n\t\tplayer.gainFromPile(player.game.piles[options[choice]])\n\t\t\nclass Bureaucrat(Action, Attack, CardAdd):\n\tname = 'Bureaucrat'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Bureaucrat, self).__init__(game, **kwargs)\n\t\tAttack.__init__(self, game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Bureaucrat, self).onPlay(player, **kwargs)\n\t\tplayer.gain(player.game.piles['Silver'].gain(), to=player.library, fromz=player.game.piles['Silver'].gain())\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif aplayer==player: continue\n\t\t\taplayer.attack(self.attack, self)\n\tdef attack(self, player, **kwargs):\n\t\thasVictory = False\n\t\tfor card in player.hand:\n\t\t\tif 'VICTORY' in card.types:\n\t\t\t\thasVictory = True\n\t\t\t\tbreak\n\t\tif not hasVictory:\n\t\t\tfor card in player.hand:\n\t\t\t\tplayer.reveal(card)\n\t\t\treturn\n\t\twhile True:\n\t\t\tchoice = player.user([o.name for o in player.hand], 'Choose top')\n\t\t\tif not 'VICTORY' in player.hand[choice].types: continue\n\t\t\tplayer.reveal(player.hand[choice])\n\t\t\tplayer.library.append(player.hand.pop(choice))\n\t\t\tbreak\n\nclass Feast(Action, CardAdd):\n\tname = 'Feast'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Feast, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Feast, self).onPlay(player, **kwargs)\n\t\tfor i in range(len(player.inPlay)):\n\t\t\tif player.inPlay[i]==self:\n\t\t\t\tplayer.trashCard(player.getFromPlay(i))\n\t\t\t\tbreak\n\t\toptions = []\n\t\tfor pile in player.game.piles:\n\t\t\tif player.game.piles[pile].viewTop() and player.game.piles[pile].viewTop().getPrice(player)<6 and player.game.piles[pile].viewTop().getPotionPrice(player)<1:\n\t\t\t\toptions.append(pile)\n\t\tif not options: return\n\t\tchoice = player.user(options, 'Choose gain')\n\t\tplayer.gainFromPile(player.game.piles[options[choice]])\n\t\t\nclass Gardens(Victory, CardAdd):\n\tname = 'Gardens'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Gardens, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\tdef onGameEnd(self, player, **kwargs):\n\t\tplayer.addVictory(amnt=m.floor(len(player.library)/10))\n\t\nclass Militia(Action, Attack, CardAdd):\n\tname = 'Militia'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Militia, self).__init__(game, **kwargs)\n\t\tAttack.__init__(self, game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Militia, self).onPlay(player, **kwargs)\n\t\tplayer.addCoin(amnt=2)\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif aplayer==player: continue\n\t\t\taplayer.attack(self.attack, self)\n\tdef attack(self, player, **kwargs):\n\t\twhile len(player.hand)>3:\n\t\t\tchoice = player.user([o.name for o in player.hand], 'Choose discard')\n\t\t\tplayer.discard(choice)\n\nclass Moneylender(Action, CardAdd):\n\tname = 'Moneylender'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Moneylender, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Moneylender, self).onPlay(player, **kwargs)\n\t\twhile True:\n\t\t\tchoice = player.user([o.name for o in player.hand]+['EndMoneylender'], 'Choose trash')\n\t\t\tif choice+1>len(player.hand): break\n\t\t\tif player.hand[choice].name=='Copper':\n\t\t\t\tplayer.trash(choice)\n\t\t\t\tplayer.addCoin(amnt=3)\n\t\t\t\tbreak\n\t\t\t\t\nclass Remodel(Action, CardAdd):\n\tname = 'Remodel'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Remodel, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Remodel, self).onPlay(player, **kwargs)\n\t\tif not player.hand: return\n\t\tchoice = player.user([o.name for o in player.hand], 'Choose trash')\n\t\tcoinVal, potionVal = player.hand[choice].getPrice(player), player.hand[choice].getPotionPrice(player)\n\t\tplayer.trash(choice)\n\t\toptions = []\n\t\tfor pile in player.game.piles:\n\t\t\tif player.game.piles[pile].viewTop() and player.game.piles[pile].viewTop().getPrice(player)<=coinVal+2 and player.game.piles[pile].viewTop().getPotionPrice(player)<=potionVal: options.append(pile)\n\t\tif not options: return\n\t\tchoice = player.user(options, 'Choose gain')\n\t\tplayer.gainFromPile(player.game.piles[options[choice]])\n\t\t\nclass Smithy(Action, CardAdd):\n\tname = 'Smithy'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Smithy, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Smithy, self).onPlay(player, **kwargs)\n\t\tplayer.draw(amnt=3)\n\t\t\nclass Spy(Action, Attack, CardAdd):\n\tname = 'Spy'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Spy, self).__init__(game, **kwargs)\n\t\tAttack.__init__(self, game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Spy, self).onPlay(player, **kwargs)\n\t\tplayer.addAction()\n\t\tplayer.draw()\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\taplayer.attack(self.attack, self, controller=player)\n\tdef attack(self, player, **kwargs):\n\t\tif not player.revealPosition(-1, fromZ=player.library): return\n\t\tif kwargs['controller'].user(('Top', 'Bottom'), ''):\n\t\t\tplayer.game.dp.send(signal='spyBottom')\n\t\t\tplayer.library.insert(0, player.library.pop())\n\t\telse:\n\t\t\tplayer.game.dp.send(signal='spyTop')\n\nclass Thief(Action, Attack, CardAdd):\n\tname = 'Thief'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Thief, self).__init__(game, **kwargs)\n\t\tAttack.__init__(self, game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Thief, self).onPlay(player, **kwargs)\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif aplayer==player: continue\n\t\t\taplayer.attack(self.attack, self, controller=player)\n\tdef attack(self, player, **kwargs):\n\t\tcards = player.getCards(2)\n\t\toptions = []\n\t\tfor card in cards:\n\t\t\tplayer.reveal(card)\n\t\t\tif card and 'TREASURE' in card.types:\n\t\t\t\toptions.append(card)\n\t\tif not options: return\n\t\tchoice = kwargs['controller'].user([o.name for o in options], 'Choose trash')\n\t\tgain = kwargs['controller'].user(('No', 'Yes'), 'Gain')\n\t\tfor card in copy.copy(cards):\n\t\t\tif card==options[choice]:\n\t\t\t\tplayer.trashCard(card)\n\t\t\t\tif gain: kwargs['controller'].gain(trash.pop())\n\t\t\telse: player.discardCard(card)\n\t\t\t\nclass ThroneRoom(Action, CardAdd):\n\tname = 'Throne Room'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(ThroneRoom, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\t\tself.links = []\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(ThroneRoom, self).onPlay(player, **kwargs)\n\t\tself.links = []\n\t\toptions = []\n\t\tfor card in player.hand:\n\t\t\tif 'ACTION' in card.types:\n\t\t\t\toptions.append(card)\n\t\tif not options: return\n\t\tprint('\\t', player.hand)\n\t\tchoice = player.user([o.name for o in options], 'Choose action')\n\t\tfor i in range(len(player.hand)):\n\t\t\tif player.hand[i]==options[choice]:\n\t\t\t\tplayer.inPlay.append(player.hand.pop(i))\n\t\t\t\tbreak\n\t\tprint('\\t', player.hand)\n\t\tself.links.append(options[choice])\n\t\tplayer.game.dp.connect(self.trigger, signal='destroy')\n\t\tfor i in range(2): player.playAction(options[choice])\n\tdef onDestroy(self, player, **kwargs):\n\t\tif self.links: return True\n\tdef onLeavePlay(self, player, **kwargs):\n\t\tplayer.game.dp.disconnect(self.trigger, signal='destroy')\n\tdef trigger(self, signal, **kwargs):\n\t\tfor i in range(len(self.links)-1, -1, -1):\n\t\t\tif self.links[i]==kwargs['card']: self.links.pop(i)\n\t\tif self.links: return\n\t\tfor i in range(len(self.owner.inPlay)):\n\t\t\tif self.owner.inPlay[i]==self:\n\t\t\t\tself.owner.destroyCard(self.owner.inPlay.pop(i))\n\t\t\t\tbreak\n\t\t\nclass CouncilRoom(Action, CardAdd):\n\tname = 'Council Room'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(CouncilRoom, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(CouncilRoom, self).onPlay(player, **kwargs)\n\t\tplayer.draw(amnt=3)\n\t\tplayer.addBuy()\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\taplayer.draw()\n\t\t\t\nclass Festival(Action, CardAdd):\n\tname = 'Festival'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Festival, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Festival, self).onPlay(player, **kwargs)\n\t\tplayer.addCoin(amnt=2)\n\t\tplayer.addAction(amnt=2)\n\t\tplayer.addBuy()\n\t\t\nclass Laboratory(Action, CardAdd):\n\tname = 'Laboratory'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Laboratory, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Laboratory, self).onPlay(player, **kwargs)\n\t\tplayer.draw(amnt=2)\n\t\tplayer.addAction()\n\t\t\nclass Library(Action, CardAdd):\n\tname = 'Library'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Library, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Library, self).onPlay(player, **kwargs)\n\t\taside = []\n\t\twhile len(player.hand)<8 and not player.draw()=='Empty':\n\t\t\tif 'ACTION' in player.hand[-1].types and not player.user((player.hand[-1].name, 'NotSetAside'), ''):\n\t\t\t\tactionCard = player.hand.pop()\n\t\t\t\tplayer.reveal(actionCard)\n\t\t\t\taside.append(actionCard)\n\t\twhile aside: player.discard(aside.pop())\n\t\t\t\nclass Market(Action, CardAdd):\n\tname = 'Market'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Market, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Market, self).onPlay(player, **kwargs)\n\t\tplayer.draw()\n\t\tplayer.addAction()\n\t\tplayer.addCoin()\n\t\tplayer.addBuy()\n\t\t\nclass Mine(Action, CardAdd):\n\tname = 'Mine'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Mine, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Mine, self).onPlay(player, **kwargs)\n\t\toptions = []\n\t\tfor card in player.hand:\n\t\t\tif 'TREASURE' in card.types:\n\t\t\t\toptions.append(card)\n\t\tif not options: return\n\t\tchoice = player.user([o.name for o in options], 'Choose treasure trash')\n\t\tfor i in range(len(player.hand)):\n\t\t\tif player.hand[i]==options[choice]:\n\t\t\t\tcoinVal, potionVal = player.hand[i].getPrice(player), player.hand[i].getPotionPrice(player)\n\t\t\t\tplayer.trash(i)\n\t\t\t\tbreak\n\t\toptions = []\n\t\tfor pile in player.game.piles:\n\t\t\tif 'TREASURE' in player.game.piles[pile].viewTop().types and player.game.piles[pile].viewTop().getPrice(player)<=coinVal+3 and player.game.piles[pile].viewTop().getPotionPrice(player)<=potionVal:\n\t\t\t\toptions.append(player.game.piles[pile])\n\t\tif not options: return\n\t\tchoice = player.user((o.name for o in options), 'Choose gain')\n\t\tplayer.gainFromplayer.game.piles(player.game.piles[options[choice]])\n\t\nclass Witch(Action, Attack, CardAdd):\n\tname = 'Witch'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Witch, self).__init__(game, **kwargs)\n\t\tAttack.__init__(self, game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Witch, self).onPlay(player, **kwargs)\n\t\tplayer.draw(amnt=2)\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif aplayer==player: continue\n\t\t\taplayer.attack(self.attack, self, controller=player)\n\tdef attack(self, player, **kwargs):\n\t\tplayer.gainFromPile(player.game.piles['Curse'])\n\t\t\nclass Adventurer(Action, CardAdd):\n\tname = 'Adventurer'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Adventurer, self).__init__(game, **kwargs)\n\t\tself.price = 6\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Adventurer, self).onPlay(player, **kwargs)\n\t\taside = []\n\t\tfoundTreasure = []\n\t\twhile len(foundTreasure)<2 and player.revealPosition(-1, fromZ=player.library):\n\t\t\tcard = player.library.pop()\n\t\t\tif 'TREASURE' in card.types: foundTreasure.append(card)\n\t\t\telse: aside.append(card)\n\t\tfor treasure in foundTreasure: player.hand.append(treasure)\n\t\tfor card in aside: player.discardCard(card)\n\t\nbaseSet = [Cellar, Chapel, Moat, Chancellor, Village, Woodcutter, Workshop, Bureaucrat, Feast, Gardens, Militia, Remodel, Smithy, Spy, Thief, ThroneRoom, CouncilRoom, Festival, Laboratory, Library, Market, Mine, Witch, Adventurer]\n\nclass Loan(Treasure, CardAdd):\n\tname = 'Loan'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Loan, self).__init__(game, **kwargs)\n\t\tself.value = 1\n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Loan, self).onPlay(player, **kwargs)\n\t\taside = []\n\t\tcard = None\n\t\ttreasure = None\n\t\twhile True:\n\t\t\tcard = player.getCard()\n\t\t\tif card=='Empty': break\n\t\t\tplayer.reveal(card)\n\t\t\tif 'TREASURE' in card.types:\n\t\t\t\ttreasure = card\n\t\t\t\tbreak\n\t\t\taside.append(card)\n\t\tif treasure:\n\t\t\tif player.user(('Trash', 'Discard'), ''): player.discard(treasure)\n\t\t\telse: player.trashCard(treasure)\n\t\twhile aside:\n\t\t\tplayer.discardCard(aside.pop())\n\nclass TradeToken(Token, CardAdd):\n\tname = 'TradeToken'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(TradeToken, self).__init__(game, **kwargs)\n\t\tgame.dp.connect(self.onGain, signal='gain')\n\tdef onGain(self, signal, **kwargs):\n\t\tif 'fromz' in kwargs and kwargs['fromz']==self.owner:\n\t\t\tfor i in range(len(self.owner.tokens)):\n\t\t\t\tif self.owner.tokens[i]==self:\n\t\t\t\t\tself.game.globalMats['TradeRouteMat'].append(self.owner.tokens.pop(i))\n\t\t\t\t\tself.owner.removeToken(self, self.game)\n\t\t\t\t\tbreak\n\t\t\t\t\t\t\nclass TradeRoute(Action, CardAdd):\n\tname = 'Trade Route'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(TradeRoute, self).__init__(game, **kwargs)\n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(TradeRoute, self).onPlay(player, **kwargs)\n\t\tplayer.addCoin(amnt=len(player.game.globalMats['TradeRouteMat']))\n\t\tif player.hand: player.trash(player.user([o.name for o in player.hand], 'Choose trash'))\n\tdef addTokens(self, **kwargs):\n\t\tfor pile in kwargs['game'].piles:\n\t\t\tif kwargs['game'].piles[pile].viewTop() and 'VICTORY' in kwargs['game'].piles[pile].viewTop().types: kwargs['game'].piles[pile].addToken(TradeToken(kwargs['game']), kwargs['game'])\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tsuper(TradeRoute, self).onPileCreate(pile, game, **kwargs)\n\t\tgame.delayedActions.append([self.addTokens, {'game': game}])\n\t\tgame.addGlobalMat('TradeRouteMat')\n\t\t\t\nclass Watchtower(Action, Reaction, CardAdd):\n\tname = 'Watchtower'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Watchtower, self).__init__(game, **kwargs)\n\t\tReaction.__init__(self, game, **kwargs)\n\t\tself.price = 3\n\t\tself.signal = 'gain'\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Watchtower, self).onPlay(player, **kwargs)\n\t\twhile len(player.hand)<6 and not player.draw()=='Empty': pass\n\tdef trigger(self, signal, **kwargs):\n\t\tif kwargs['player']==self.owner and self in self.owner.hand and self.owner.user(('no', 'UseWatchTower'), ''):\n\t\t\tself.owner.reveal(self)\n\t\t\tif self.owner.user(('top', 'trash'), ''): kwargs['kwargs']['to'] = trash\n\t\t\telse: kwargs['kwargs']['to'] = self.owner.library\n\tdef onGain(self, player, **kwargs):\n\t\tself.connect()\n\tdef onTrash(self, player, **kwargs):\n\t\tself.disconnect()\n\tdef onReturn(self, player, **kwargs):\n\t\tself.disconnect()\n\t\t\nclass Bishop(Action, CardAdd):\n\tname = 'Bishop'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Bishop, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Bishop, self).onPlay(player, **kwargs)\t\t\n\t\tplayer.addCoin()\n\t\tplayer.addVictory()\n\t\tif player.hand:\n\t\t\tchoice = player.user([o.name for o in player.hand], 'Choose trash')\n\t\t\tplayer.addVictory(amnt=m.floor(player.hand[choice].getPrice(player)/2))\n\t\t\tplayer.trash(choice)\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif aplayer==player: continue\n\t\t\tchoice = aplayer.user([o.name for o in aplayer.hand]+['NoTrash'], 'Choose trash')\n\t\t\tif choice+1>len(aplayer.hand): continue\n\t\t\taplayer.trash(choice)\n\t\t\t\nclass Monument(Action, CardAdd):\n\tname = 'Monument'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Monument, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Monument, self).onPlay(player, **kwargs)\n\t\tplayer.addVictory()\n\t\tplayer.addCoin(amnt=2)\n\t\t\nclass Quarry(Treasure, CardAdd):\n\tname = 'Quarry'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Quarry, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\t\tself.value = 1\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Quarry, self).onPlay(player, **kwargs)\n\t\tfor card in player.game.allCards:\n\t\t\tif 'ACTION' in card.types: card.price-=2\n\tdef onLeavePlay(self, player, **kwargs):\n\t\tfor card in player.game.allCards:\n\t\t\tif 'ACTION' in card.types: card.price+=2\n\t\t\t\nclass Talisman(Treasure, CardAdd):\n\tname = 'Talisman'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Talisman, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\t\tself.value = 1\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Talisman, self).onPlay(player, **kwargs)\n\t\tself.connect()\n\tdef onLeavePlay(self, player, **kwargs):\n\t\tself.disconnect()\n\tdef trigger(self, signal, **kwargs):\n\t\tif kwargs['player']==self.owner and 'pile' in kwargs and kwargs['pile'].viewTop() and not 'VICTORY' in kwargs['pile'].viewTop().types: self.owner.gainFromPile(kwargs['pile'])\n\tdef connect(self, **kwargs):\n\t\tself.owner.game.dp.connect(self.trigger, signal='buy')\n\tdef disconnect(self, **kwargs):\n\t\tself.owner.game.dp.connect(self.trigger, signal='buy')\n\t\nclass WorkersVillage(Action, CardAdd):\n\tname = \"Worker's Village\"\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(WorkersVillage, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(WorkersVillage, self).onPlay(player, **kwargs)\t\n\t\tplayer.addAction(amnt=2)\n\t\tplayer.draw()\n\t\tplayer.addBuy()\n\t\t\nclass City(Action, CardAdd):\n\tname = 'City'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(City, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(City, self).onPlay(player, **kwargs)\n\t\tplayer.addAction(amnt=2)\n\t\tif len(player.game.emptyPiles)>0:\n\t\t\tplayer.draw(amnt=2)\n\t\t\tif len(player.game.emptyPiles)>1:\n\t\t\t\tplayer.addCoin()\n\t\t\t\tplayer.addBuy()\n\t\telse: player.draw()\n\t\t\nclass Contraband(Treasure, CardAdd):\n\tname = 'Contraband'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Contraband, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\t\tself.value = 3\n\t\tself.banning = None\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Contraband, self).onPlay(player, **kwargs)\n\t\tplayer.addBuy()\n\t\tself.banning = player.game.piles[list(player.game.piles)[player.game.getNextPlayer(player).user(list(player.game.piles), 'Choose ban')]]\n\t\tplayer.game.dp.send(signal='Choice', choice=self.banning, card=self)\n\t\tself.connect()\n\tdef onLeavePlay(self, player, **kwargs):\n\t\tself.disconnect()\n\tdef trigger(self, signal, **kwargs):\n\t\tif kwargs['player']==self.owner and kwargs['pile']==self.banning: return True\n\tdef connect(self, **kwargs):\n\t\tplayer.game.dp.connect(self.trigger, signal='tryBuy')\n\tdef disconnect(self, **kwargs):\n\t\tplayer.game.dp.connect(self.trigger, signal='tryBuy')\t\n\t\t\nclass CountingHouse(Action, CardAdd):\n\tname = 'Counting House'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(CountingHouse, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(CountingHouse, self).onPlay(player, **kwargs)\n\t\tcoppers = 0\n\t\tfor card in player.discardPile:\n\t\t\tif card.name=='Copper': coppers+=1\n\t\tfor i in range(player.user(list(range(coppers+1)), 'Choose amnount')):\n\t\t\tfor n in range(len(player.discardPile)-1, -1, -1):\n\t\t\t\tif player.discardPile[n].name=='Copper': player.hand.append(player.discardPile.pop(n))\n\t\t\nclass Mint(Action, CardAdd):\n\tname = 'Mint'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Mint, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\t\tgame.dp.connect(self.trigger, signal='buy')\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Mint, self).onPlay(player, **kwargs)\n\t\toptions = []\n\t\tfor card in player.hand:\n\t\t\tif 'TREASURE' in card.types: optins.append(card)\n\t\tif not options: return\n\t\tchoice = player.user(options+['NoMint'], 'Choose gain')\n\t\tif choice>=len(options): return\n\t\tplayer.gainFromPile(player.game.piles[options[choice]])\n\tdef trigger(self, signal, **kwargs):\n\t\tif 'pile' in kwargs and not self==kwargs['pile'].viewTop(): return\n\t\tfor i in range(len(kwargs['player'].inPlay)-1, -1, -1):\n\t\t\tif 'TREASURE' in kwargs['player'].inPlay[i].types: kwargs['player'].trashCard(kwargs['player'].getFromPlay(i))\n\t\t\nclass Mountebank(Action, Attack, CardAdd):\n\tname = 'Mountebank'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Mountebank, self).__init__(game, **kwargs)\n\t\tAttack.__init__(self, game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Mountebank, self).onPlay(player, **kwargs)\n\t\tplayer.addCoin(amnt=2)\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif not player==aplayer: aplayer.attack(self.attack, self)\n\tdef attack(self, player, **kwargs):\n\t\tfor i in range(len(player.hand)):\n\t\t\tif player.hand[i].name=='Curse':\n\t\t\t\tif player.user(('keep', 'discardCurse'), ''):\n\t\t\t\t\tplayer.discard(i)\n\t\t\t\t\treturn\n\t\t\t\tbreak\n\t\tplayer.gainFromPile(player.game.piles['Curse'])\n\t\tplayer.gainFromPile(player.game.piles['Copper'])\n\nclass Rabble(Action, Attack, CardAdd):\n\tname = 'Rabble'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Rabble, self).__init__(game, **kwargs)\n\t\tAttack.__init__(self, game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Rabble, self).onPlay(player, **kwargs)\n\t\tplayer.draw(amnt=3)\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif not player==aplayer: aplayer.attack(self.attack, self)\n\tdef attack(self, player, **kwargs):\n\t\tcards = player.getCards(3)\n\t\tfor i in range(len(cards)-1, -1, -1):\n\t\t\tif 'ACTION' in cards[i].types or 'TREASURE' in cards[i].types: player.discardCard(cards.pop(i))\n\t\twhile cards:\n\t\t\tplayer.library.append(cards.pop(player.user([o.name for o in cards], 'Choose top')))\n\nclass Vault(Action, CardAdd):\n\tname = 'Vault'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Vault, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Vault, self).onPlay(player, **kwargs)\n\t\tplayer.draw(amnt=2)\n\t\twhile player.hand:\n\t\t\tchoice = player.user(player.hand+['EndDiscard'], 'Choose discard')\n\t\t\tif choice>len(player.hand): break\n\t\t\tplayer.discard(choice)\n\t\t\tplayer.addCoin()\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif len(aplayer.hand)>1 and aplayer.user(('discard', 'DoNotDiscard'), ''):\n\t\t\t\tfor i in range(2): aplayer.discard(aplayer.user([o.name for o in aplayer.hand], 'Choose discard '+str(1+i)))\n\t\t\t\taplayer.draw()\n\nclass Venture(Treasure, CardAdd):\n\tname = 'Venture'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Venture, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\t\tself.value = 1\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Venture, self).onPlay(player, **kwargs)\n\t\taside = []\n\t\tcard = None\n\t\ttreasure = None\n\t\twhile True:\n\t\t\tcard = player.getCard()\n\t\t\tif card=='Empty': break\n\t\t\tplayer.reveal(card)\n\t\t\tif 'TREASURE' in card.types:\n\t\t\t\ttreasure = card\n\t\t\t\tbreak\n\t\t\taside.append(card)\n\t\twhile aside: player.discard(aside.pop())\n\t\tif treasure:\n\t\t\tplayer.inPlay.append(treasure)\n\t\t\tplayer.playTreasure(treasure)\n\nclass Goons(Action, Attack, CardAdd):\n\tname = 'Goons'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Goons, self).__init__(game, **kwargs)\n\t\tAttack.__init__(self, game, **kwargs)\n\t\tself.price = 6\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Goons, self).onPlay(player, **kwargs)\n\t\tself.connect()\n\t\tplayer.addBuy()\n\t\tplayer.addCoin(amnt=2)\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif not player==aplayer: aplayer.attack(self.attack, self)\n\tdef attack(self, player, **kwargs):\n\t\twhile len(player.hand)>3:\n\t\t\tchoice = player.user([o.name for o in player.hand], 'Choose discard')\n\t\t\tplayer.discard(choice)\t\t\n\tdef onLeavePlay(self, player, **kwargs):\n\t\tself.disconnect()\n\tdef trigger(self, signal, **kwargs):\n\t\tif kwargs['player']==self.owner: self.owner.addVictory()\n\tdef connect(self, **kwargs):\n\t\tplayer.game.dp.connect(self.trigger, signal='buy')\n\tdef disconnect(self, **kwargs):\n\t\tplayer.game.dp.connect(self.trigger, signal='buy')\n\t\t\nclass GrandMarket(Action, CardAdd):\n\tname = 'Grand Market'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(GrandMarket, self).__init__(game, **kwargs)\n\t\tself.price = 6\n\t\tgame.dp.connect(self.trigger, signal='tryBuy')\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(GrandMarket, self).onPlay(player, **kwargs)\n\t\tplayer.addBuy()\n\t\tplayer.draw()\n\t\tplayer.addAction()\n\t\tplayer.addCoin(amnt=2)\n\tdef trigger(self, signal, **kwargs):\n\t\tif not self==kwargs['card']: return\n\t\tfor card in kwargs['player'].inPlay:\n\t\t\tif card.name=='Copper': return True\n\t\t\nclass Hoard(Treasure, CardAdd):\n\tname = 'Hoard'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Hoard, self).__init__(game, **kwargs)\n\t\tself.price = 6\n\t\tself.value = 2\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Hoard, self).onPlay(player, **kwargs)\n\t\tself.connect()\n\tdef onLeavePlay(self, player, **kwargs):\n\t\tself.disconnect()\n\tdef trigger(self, signal, **kwargs):\n\t\tif kwargs['player']==self.owner and 'pile' in kwargs and kwargs['pile'].viewTop() and 'VICTORY' in kwargs['pile'].viewTop().types: self.owner.gainFromPile(player.game.piles['Gold'])\n\tdef connect(self, **kwargs):\n\t\tplayer.game.dp.connect(self.trigger, signal='buy')\n\tdef disconnect(self, **kwargs):\n\t\tplayer.game.dp.connect(self.trigger, signal='buy')\n\nclass Bank(Treasure, CardAdd):\n\tname = 'Bank'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Bank, self).__init__(game, **kwargs)\n\t\tself.price = 7\n\tdef onPlay(self, player, **kwargs):\n\t\tvsum = 0\n\t\tfor card in player.inPlay:\n\t\t\tif 'TREASURE' in card.types: vsum+=1\n\t\tplayer.addCoin(amnt=vsum)\n\t\t\nclass Expand(Action, CardAdd):\n\tname = 'Expand'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Expand, self).__init__(game, **kwargs)\n\t\tself.price = 7\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Expand, self).onPlay(player, **kwargs)\n\t\tif not player.hand: return\n\t\tchoice = player.user([o.name for o in player.hand], 'Choose trash')\n\t\tcoinVal, potionVal = player.hand[choice].getPrice(player), player.hand[choice].getPotionPrice(player)\n\t\tplayer.trash(choice)\n\t\toptions = []\n\t\tfor pile in player.game.piles:\n\t\t\tif player.game.piles[pile].viewTop() and player.game.piles[pile].viewTop().getPrice(player)<=coinVal+3 and player.game.piles[pile].viewTop().getPotionPrice(player)<=potionVal:\n\t\t\t\toptions.append(pile)\n\t\tif not options: return\n\t\tchoice = player.user(options, 'Choose gain')\n\t\tplayer.gainFromPile(player.game.piles[options[choice]])\n\t\t\nclass Forge(Action, CardAdd):\n\tname = 'Forge'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Forge, self).__init__(game, **kwargs)\n\t\tself.price = 7\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Forge, self).onPlay(player, **kwargs)\n\t\ttrashedValue = 0\n\t\twhile self.hand:\n\t\t\tchoice = player.user([o.name for o in player.hand]+['EndForge'], 'Choose trash')\n\t\t\tif choice+1>len(player.hand): break\n\t\t\ttrashedValue += player.hand[choice].getValue(player)\n\t\t\tplayer.trash(choice)\n\t\toptions = []\n\t\tfor pile in player.game.piles:\n\t\t\tif player.game.piles[pile].viewTop() and player.game.piles[pile].viewTop().getPrice(player)==trashedValue:\n\t\t\t\toptions.append(pile)\n\t\tif not options: return\n\t\tchoice = player.user(options, 'Choose gain')\n\t\tplayer.gainFromPile(player.game.piles[options[choice]])\n\t\t\nclass KingsCourt(Action, CardAdd):\n\tname = \"King's Court\"\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(KingsCourt, self).__init__(game, **kwargs)\n\t\tself.price = 7\n\t\tself.links = []\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(KingsCourt, self).onPlay(player, **kwargs)\n\t\tself.links = []\n\t\toptions = []\n\t\tfor card in player.hand:\n\t\t\tif 'ACTION' in card.types:\n\t\t\t\toptions.append(card)\n\t\tif not options: return\n\t\tprint('\\t', player.hand)\n\t\tchoice = player.user([o.name for o in options], 'Choose action')\n\t\tfor i in range(len(player.hand)):\n\t\t\tif player.hand[i]==options[choice]:\n\t\t\t\tplayer.inPlay.append(player.hand.pop(i))\n\t\t\t\tbreak\n\t\tprint('\\t', player.hand)\n\t\tself.links.append(options[choice])\n\t\tplayer.game.dp.connect(self.trigger, signal='destroy')\n\t\tfor i in range(3): player.playAction(options[choice])\n\tdef onDestroy(self, player, **kwargs):\n\t\tif self.links: return True\n\tdef onLeavePlay(self, player, **kwargs):\n\t\tplayer.game.dp.disconnect(self.trigger, signal='destroy')\n\tdef trigger(self, signal, **kwargs):\n\t\tfor i in range(len(self.links)-1, -1, -1):\n\t\t\tif self.links[i]==kwargs['card']: self.links.pop(i)\n\t\tif self.links: return\n\t\tfor i in range(len(self.owner.inPlay)):\n\t\t\tif self.owner.inPlay[i]==self:\n\t\t\t\tself.owner.destroyCard(self.owner.inPlay.pop(i))\n\t\t\t\tbreak\n\nclass Peddler(Action, CardAdd):\n\tname = 'Peddler'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Peddler, self).__init__(game, **kwargs)\n\t\tself.price = 8\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Peddler, self).onPlay(player, **kwargs)\n\t\tplayer.addCoin()\n\t\tplayer.addAction()\n\t\tplayer.draw()\n\tdef getPrice(self, player, **kwargs):\n\t\tisBuy = False\n\t\tfor i in range(len(events)-1, -1, -1):\n\t\t\tif events[i][0]=='actionPhase' or events[i][0]=='treasurePhase': break\n\t\t\telif events[i][0]=='buyPhase':\n\t\t\t\tisBuy = events[i][1]['player']==player\n\t\t\t\tbreak\n\t\tif not isBuy: return np.max((self.price, 0))\n\t\tactions = 0\n\t\tfor card in player.inPlay:\n\t\t\tif 'ACTION' in card.types: actions+=1\n\t\treturn np.max((self.price-actions*2, 0))\n\t\t\nprosperity = [Loan, TradeRoute, Watchtower, Bishop, Monument, Quarry, Talisman, WorkersVillage, City, Contraband, CountingHouse, Mint, Mountebank, Rabble, Vault, Venture, Goons, GrandMarket, Hoard, Bank, Expand, Forge, KingsCourt, Peddler]\n\nclass Colony(Victory, CardAdd):\n\tname = 'Colony'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Colony, self).__init__(game, **kwargs)\n\t\tself.value = 10\n\t\tself.price = 11\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tpile.terminator = True\n\t\tif len(game.players)>3:\n\t\t\tamnt = 12\n\t\telse:\n\t\t\tamnt = 8\n\t\tfor i in range(amnt):\n\t\t\tpile.append(type(self)(game))\n\t\t\t\nclass Platinum(Treasure, CardAdd):\n\tname = 'Platinum'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Platinum, self).__init__(game, **kwargs)\n\t\tself.value = 5\n\t\tself.price = 9\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tfor i in range(12):\n\t\t\tpile.append(type(self)(game))\n\t\t\t\nplatColony = [Colony, Platinum]\n\nclass EmbargoToken(Token):\n\tname = 'Embargo Token'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(EmbargoToken, self).__init__(game, **kwargs)\n\t\tgame.dp.connect(self.trigger, signal='buy')\n\tdef trigger(self, signal, **kwargs):\n\t\tif kwargs['pile']==self.owner:\n\t\t\tprint('EMBARGO', kwargs['player'])\n\t\t\tkwargs['player'].gainFromPile(player.game.piles['Curse'])\n\t\t\t\t\t\t\nclass Embargo(Action, CardAdd):\n\tname = 'Embargo'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Embargo, self).__init__(game, **kwargs)\n\t\tself.price = 2\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Embargo, self).onPlay(player, **kwargs)\n\t\tplayer.addCoin(amnt=2)\n\t\tfor i in range(len(player.inPlay)):\n\t\t\tif player.inPlay[i]==self:\n\t\t\t\tplayer.trashCard(player.getFromPlay(i))\n\t\t\t\tbreak\n\t\tplayer.game.piles[list(player.game.piles)[player.user(list(player.game.piles), 'Choose embargo')]].addToken(EmbargoToken(player.game), player.game)\n\t\nclass Haven(Action, Duration, CardAdd):\n\tname = 'Haven'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Haven, self).__init__(game, **kwargs)\n\t\tDuration.__init__(self, game, **kwargs)\n\t\tself.price = 2\n\t\tself.saved = []\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Haven, self).onPlay(player, **kwargs)\n\t\tplayer.draw()\n\t\tplayer.addAction()\n\t\tself.age = 0\n\t\tplayer.game.dp.connect(self.nextage, signal='startTurn')\n\t\tif not player.hand: return\n\t\tself.saved.append(player.hand.pop(player.user([o.name for o in player.hand], 'Choose saved')))\n\tdef nextage(self, signal, **kwargs):\n\t\tif kwargs['player']==self.owner:\n\t\t\tself.age+=1\n\t\t\twhile self.saved: self.owner.hand.append(self.saved.pop())\n\tdef onGameEnd(self, player, **kwargs):\n\t\twhile self.saved: player.library.append(self.saved.pop())\n\t\t\t\nclass Lighthouse(Action, Duration, CardAdd):\n\tname = 'Lighthouse'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Lighthouse, self).__init__(game, **kwargs)\n\t\tDuration.__init__(self, game, **kwargs)\n\t\tself.price = 2\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Lighthouse, self).onPlay(player, **kwargs)\n\t\tplayer.addAction()\n\t\tplayer.addCoin()\n\t\tself.age = 0\n\t\tplayer.game.dp.connect(self.nextage)\n\tdef nextage(self, signal, **kwargs):\n\t\tif signal=='startTurn' and kwargs['player']==self.owner:\n\t\t\tself.age+=1\n\t\t\tself.owner.addCoin()\n\t\telif signal=='attack' and kwargs['player']==self.owner: return True\n\nclass NativeVillage(Action, CardAdd):\n\tname = 'Native Village'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(NativeVillage, self).__init__(game, **kwargs)\n\t\tself.price = 2\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(NativeVillage, self).onPlay(player, **kwargs)\n\t\tplayer.addAction(amnt=2)\n\t\tif player.user(('takeCards', 'addCard'), ''):\n\t\t\ttopCard = player.getCard()\n\t\t\tplayer.mats['NativeVillageMat'].append(topCard)\n\t\t\tplayer.reveal(topCard, hidden=player)\n\t\telse:\n\t\t\twhile player.mats['NativeVillageMat']: player.hand.append(player.mats['NativeVillageMat'].pop())\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tsuper(NativeVillage, self).onPileCreate(pile, game, **kwargs)\n\t\tgame.addMat('NativeVillageMat')\n\nclass PearlDiver(Action, CardAdd):\n\tname = 'Pearl Diver'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(PearlDiver, self).__init__(game, **kwargs)\n\t\tself.price = 2\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(PearlDiver, self).onPlay(player, **kwargs)\n\t\tplayer.addAction()\n\t\tplayer.draw()\n\t\tif not player.library: return\n\t\tplayer.reveal(player.library[0], hidden=player)\n\t\tif not player.user(('top', 'bot'), ''): player.library.append(player.library.pop(0))\n\nclass Ambassador(Action, Attack, CardAdd):\n\tname = 'Ambassador'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Ambassador, self).__init__(game, **kwargs)\n\t\tAttack.__init__(self, game, **kwargs)\n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Ambassador, self).onPlay(player, **kwargs)\n\t\tif not player.hand: return\n\t\tchoice = player.user([o.name for o in player.hand], 'Choose return')\n\t\trevealedCard = player.hand[choice]\n\t\tplayer.reveal(revealedCard)\n\t\tif not revealedCard.name in player.game.piles: return\n\t\tamnt = player.user(list(range(3)), 'Choose return amount')\n\t\treturned = 0\n\t\tfor i in range(len(player.hand)-1, -1, -1):\n\t\t\tif returned==amnt: break\n\t\t\tif player.hand[i].name==revealedCard.name:\n\t\t\t\tplayer.returnCard(player.hand.pop(i))\n\t\t\t\treturned+=1\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif not aplayer==player: aplayer.attack(self.attack, self, pile=player.game.piles[revealedCard.name])\n\tdef attack(self, player, **kwargs):\n\t\tplayer.gainFromPile(kwargs['pile'])\n\t\t\nclass FishingVillage(Action, Duration, CardAdd):\n\tname = 'Fishing Village'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(FishingVillage, self).__init__(game, **kwargs)\n\t\tDuration.__init__(self, game, **kwargs)\n\t\tself.price = 3\n\t\tself.nexts = []\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(FishingVillage, self).onPlay(player, **kwargs)\n\t\tplayer.addAction(amnt=2)\n\t\tplayer.addCoin()\n\tdef next(self, **kwargs):\n\t\tself.owner.addAction()\n\t\tself.owner.addCoin()\n\nclass Lookout(Action, CardAdd):\n\tname = 'Lookout'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Lookout, self).__init__(game, **kwargs)\n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Lookout, self).onPlay(player, **kwargs)\n\t\tplayer.addAction()\n\t\tcards = player.getCards(3)\n\t\tif not cards: return\n\t\tplayer.trashCard(cards.pop(player.user([o.name for o in cards], 'Choose trash')))\n\t\tif not cards: return\n\t\tplayer.discardCard(cards.pop(player.user([o.name for o in cards], 'Choose discard')))\n\t\tif not cards: return\n\t\twhile cards: player.library.append(cards.pop())\n\nclass Smugglers(Action, CardAdd):\n\tname = 'Smugglers'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Smugglers, self).__init__(game, **kwargs)\n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Smugglers, self).onPlay(player, **kwargs)\n\t\tpreviousPlayer = player.game.getPreviousPlayer(player)\n\t\toptions = []\n\t\tfor i in range(len(events)-1, -1, -1):\n\t\t\tif events[i][0]=='startTurn' and events[i][1]['player']==previousPlayer:\n\t\t\t\tfor n in range(i, len(events)):\n\t\t\t\t\tif events[n][0]=='gain' and events[n][1]['player']==previousPlayer and events[n][1]['card'].getPrice(player)<7: options.append(events[n][1]['card'].name)\n\t\t\t\t\telif events[n][0]=='endTurn': break\n\t\t\t\tbreak\n\t\tif not options: return\n\t\tplayer.gainFromPile(player.game.piles[options[player.user(options, 'Choose smuggle')]])\n\t\t\t\t\t\nclass Warehouse(Action, CardAdd):\n\tname = 'Warehouse'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Warehouse, self).__init__(game, **kwargs)\n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Warehouse, self).onPlay(player, **kwargs)\n\t\tplayer.addAction()\n\t\tplayer.draw(amnt=3)\n\t\tfor i in range(3):\n\t\t\tif not player.hand: return\n\t\t\tplayer.discard(player.user([o.name for o in player.hand], 'Discard card'+str(1+i)))\n\t\t\t\nclass Caravan(Action, Duration, CardAdd):\n\tname = 'Caravan'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Caravan, self).__init__(game, **kwargs)\n\t\tDuration.__init__(self, game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Caravan, self).onPlay(player, **kwargs)\n\t\tplayer.addAction()\n\t\tplayer.draw()\n\tdef next(self, **kwargs):\n\t\tself.owner.draw()\n\t\t\nclass Cutpurse(Action, CardAdd):\n\tname = 'Cutpurse'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Cutpurse, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Cutpurse, self).onPlay(player, **kwargs)\n\t\tplayer.addCoin(amnt=2)\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif not aplayer==player: aplayer.attack(self.attack, self)\n\tdef attack(self, player, **kwargs):\n\t\tfor i in range(len(player.hand)):\n\t\t\tif player.hand[i].name=='Copper':\n\t\t\t\tplayer.discard(i)\n\t\t\t\treturn\n\t\tfor card in player.hand:\n\t\t\tplayer.reveal(card)\n\t\t\nclass Island(Action, Victory, CardAdd):\n\tname = 'Island'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Island, self).__init__(game, **kwargs)\n\t\tVictory.__init__(self, game, **kwargs)\n\t\tself.price = 4\n\t\tself.value = 2\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Island, self).onPlay(player, **kwargs)\n\t\tfor i in range(len(player.inPlay)):\n\t\t\tif player.inPlay[i]==self: player.mats['IslandMat'].append(player.getFromPlay(i))\n\t\tif not player.hand: return\n\t\tplayer.mats['IslandMat'].append(player.hand.pop(player.user([o.name for o in player.hand], 'Choose hide')))\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tsuper(Island, self).onPileCreate(pile, game, **kwargs)\n\t\tgame.addMat('IslandMat')\n\t\t\nclass Navigator(Action, CardAdd):\n\tname = 'Navigator'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Navigator, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Navigator, self).onPlay(player, **kwargs)\n\t\tplayer.addCoin(amnt=2)\n\t\tcards = player.getCards(5)\n\t\tif not cards: return\n\t\tfor card in cards: player.reveal(card, hidden=player)\n\t\tif player.user(('top', 'discard'), ''):\n\t\t\twhile cards: player.discardCard(cards.pop())\n\t\telse:\n\t\t\twhile cards: player.library.append(cards.pop(player.user([o.name for o in cards], 'Choose top order')))\n\t\t\nclass PirateToken(Token): pass\n\t\t\nclass PirateShip(Action, Attack, CardAdd):\n\tname = 'Pirate Ship'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(PirateShip, self).__init__(game, **kwargs)\n\t\tAttack.__init__(self, game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(PirateShip, self).onPlay(player, **kwargs)\n\t\tif player.user(('attack', 'money'), ''): player.addCoin(amnt=len(player.mats['PirateMat']))\n\t\telse:\n\t\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\t\tif not player==aplayer: aplayer.attack(self.attack, self)\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tsuper(PirateShip, self).onPileCreate(pile, game, **kwargs)\n\t\tgame.addMat('PirateMat')\n\tdef attack(self, player, **kwargs):\n\t\tcards = player.getCards(2)\n\t\tif not cards: return\n\t\toptions = []\n\t\tfor card in cards:\n\t\t\tplayer.reveal(card)\n\t\t\tif 'TREASURE' in card.types: options.append(card)\n\t\tif options:\n\t\t\tchoice = self.owner.user([o.name for o in options], 'Choose trash')\n\t\t\tfor i in range(len(cards)):\n\t\t\t\tif cards[i]==options[choice]:\n\t\t\t\t\tplayer.trashCard(cards.pop(i))\n\t\t\t\t\tself.owner.mats['PirateMat'].append(PirateToken())\n\t\t\t\t\tbreak\n\t\twhile cards: player.discardCard(cards.pop())\n\t\t\nclass Salvager(Action, CardAdd):\n\tname = 'Salvager'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Salvager, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Salvager, self).onPlay(player, **kwargs)\n\t\tplayer.addBuy()\n\t\tif not player.hand: return\n\t\tchoice = player.user([o.name for o in player.hand], 'Choose trash')\n\t\tplayer.addCoin(amnt=player.hand[choice].getPrice(player))\n\t\tplayer.trash(choice)\n\t\t\nclass SeaHag(Action, Attack, CardAdd):\n\tname = 'Sea Hag'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(SeaHag, self).__init__(game, **kwargs)\n\t\tAttack.__init__(self, game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(SeaHag, self).onPlay(player, **kwargs)\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif not player==aplayer: aplayer.attack(self.attack, self)\n\tdef attack(self, player, **kwargs):\n\t\tplayer.discardCard(player.getCard())\n\t\tplayer.gainFromPile(player.game.piles['Curse'], to=player.library)\n\t\t\nclass TreasureMap(Action, CardAdd):\n\tname = 'Treasure Map'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(TreasureMap, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(TreasureMap, self).onPlay(player, **kwargs)\n\t\tfor i in range(len(player.inPlay)):\n\t\t\tif player.inPlay[i]==self:\n\t\t\t\tplayer.trashCard(player.getFromPlay(i))\n\t\t\t\tbreak\n\t\tfor i in range(len(player.hand)):\n\t\t\tif player.hand[i].name=='Treasure Map':\n\t\t\t\tplayer.trash(i)\n\t\t\t\tfor n in range(4): player.gainFromPile(player.game.piles['Gold'], to=player.library)\n\t\t\t\treturn\n\t\t\nclass Bazaar(Action, CardAdd):\n\tname = 'Bazaar'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Bazaar, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Bazaar, self).onPlay(player, **kwargs)\n\t\tplayer.addAction(amnt=2)\n\t\tplayer.draw()\n\t\tplayer.addCoin()\n\t\t\nclass Explorer(Action, CardAdd):\n\tname = 'Explorer'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Explorer, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Explorer, self).onPlay(player, **kwargs)\n\t\tif player.hand and player.user(('no', 'revealProvince'), ''):\n\t\t\tfor card in player.hand:\n\t\t\t\tif card.name=='Province':\n\t\t\t\t\tplayer.gainFromPile(player.game.piles['Gold'], to=player.hand)\n\t\t\t\t\treturn\n\t\tplayer.gainFromPile(player.game.piles['Silver'], to=player.hand)\n\t\t\nclass GhostShip(Action, Attack, CardAdd):\n\tname = 'Ghost Ship'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(GhostShip, self).__init__(game, **kwargs)\n\t\tAttack.__init__(self, game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(GhostShip, self).onPlay(player, **kwargs)\n\t\tplayer.draw(amnt=2)\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif aplayer==player: continue\n\t\t\taplayer.attack(self.attack, self)\n\tdef attack(self, player, **kwargs):\n\t\twhile len(player.hand)>3:\n\t\t\tchoice = player.user([o.name for o in player.hand], 'Choose top')\n\t\t\tplayer.library.append(player.hand.pop(choice))\n\t\t\nclass MerchantShip(Action, Duration, CardAdd):\n\tname = 'Merchant Ship'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(MerchantShip, self).__init__(game, **kwargs)\n\t\tDuration.__init__(self, game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(MerchantShip, self).onPlay(player, **kwargs)\n\t\tplayer.addCoin(amnt=2)\n\tdef next(self, **kwargs):\n\t\tself.owner.addCoin(amnt=2)\n\t\t\nclass Tactician(Action, Duration, CardAdd):\n\tname = 'Tactician'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Tactician, self).__init__(game, **kwargs)\n\t\tDuration.__init__(self, game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Tactician, self).onPlay(player, **kwargs)\n\t\tplayer.game.dp.connect(self.trigger, signal='startTurn')\n\t\tself.age = 0\n\t\tif not player.hand: return\n\t\tplayer.discardHand()\n\t\tself.nexts.append(self.next)\n\tdef next(self, **kwargs):\n\t\tself.owner.draw(amnt=5)\n\t\tself.owner.addBuy()\n\t\tself.owner.addAction()\n\t\t\nclass Treasury(Action, CardAdd):\n\tname = 'Treasury'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Treasury, self).__init__(game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Treasury, self).onPlay(player, **kwargs)\n\t\tplayer.addAction()\n\t\tplayer.addCoin()\n\t\tplayer.draw()\n\tdef onDestroy(self, player, **kwargs):\n\t\tfor i in range(len(events)-1, -1, -1):\n\t\t\tif events[0]=='buy' and events[1]['card'] and 'VICTORY' in events[1]['card'].types: return\n\t\t\telif events[0]=='startTurn': break\n\t\tif not player.user(('bot', 'top')): return\n\t\tprint(player.inPlay)\n\t\tfor i in range(len(player.inPlay)):\n\t\t\tprint(i, player.inPlay[i], self)\n\t\t\tif player.inPlay[i]==self: player.library.append(player.inPlay.pop(i))\n\t\t\treturn True\n\nclass Outpost(Action, Duration, CardAdd):\n\tname = 'Outpost'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Outpost, self).__init__(game, **kwargs)\n\t\tDuration.__init__(self, game, **kwargs)\n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Outpost, self).onPlay(player, **kwargs)\n\t\tplayer.eotdraw = 3\n\t\tprint(player.eotdraw)\n\t\tplayer.game.delayedActions.append((self.outpostTurn, {'player': player}))\n\tdef outpostTurn(self, **kwargs):\n\t\tturns = 0\n\t\tfor i in range(len(kwargs['player'].game.events)-1, -1, -1):\n\t\t\tif kwargs['player'].game.events[0]=='startTurn':\n\t\t\t\tif kwargs['player'].game.event[1]['player']==self: turns+=1\n\t\t\t\telse: break\n\t\tif turns>1: return\n\t\tkwargs['player'].game.activePlayer = self\n\t\tkwargs['player'].game.turnFlag = 'outpost'\n\t\tkwargs['player'].resetValues()\n\t\tkwargs['player'].game.dp.send(signal='startTurn', player=self)\n\t\tkwargs['player'].actionPhase()\n\t\tkwargs['player'].treasurePhase()\n\t\tkwargs['player'].buyPhase()\n\t\tkwargs['player'].endTurn()\n\t\t\t\nseaside = [Embargo, Haven, Lighthouse, NativeVillage, Ambassador, FishingVillage, Lookout, Smugglers, Caravan, Cutpurse, Island, Navigator, PirateShip, Salvager, SeaHag, TreasureMap, Bazaar, Explorer, GhostShip, MerchantShip, Outpost, Tactician, Treasury]\n\nclass BoonToken(Token):\n\tname = 'Base Boon Token'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(PlusCard, self).__init__(game, **kwargs)\n\t\tself.playerOwner = None\n\t\tself.types.append('BOONTOKEN')\n\tdef trigger(self, signal, **kwargs):\n\t\tif kwargs['card'].name==self.owner.cardType.name and kwargs['player']==self.playerOwner: self.boon(self.playerOwner)\n\tdef boon(self, player, **kwargs):\n\t\tpass\n\tdef onAddPile(self, pile, **kwargs):\n\t\tpile.game.dp.connect(self.trigger, signal='playAction')\n\tdef onLeavePile(self, pile, **kwargs):\n\t\tpile.game.dp.diccnnect(self.trigger, signal='playAction')\n\nclass PlusCard(BoonToken):\n\tname = 'PlusCard'\n\tdef boon(self, player, **kwargs):\n\t\tplayer.draw()\n\t\t\nclass CoinOfTheRealm(Treasure, Reserve, CardAdd):\n\tname = 'Coin of the Realm'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(CoinOfTheRealm, self).__init__(game, **kwargs)\n\t\tReserve.__init__(self, game, **kwargs)\n\t\tself.triggerSignal = 'playAction'\n\t\tself.value = 1\n\t\tself.price = 2\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(CoinOfTheRealm, self).onPlay(player, **kwargs)\n\t\tReserve.onPlay(self, player, **kwargs)\n\tdef call(self, signal, **kwargs):\n\t\tself.owner.addAction(amnt=2)\n\t\t\nclass Page(Action, Traveler, CardAdd):\n\tname = 'Page'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Page, self).__init__(game, **kwargs)\n\t\tTraveler.__init__(self, game, **kwargs)\n\t\tself.morph = TreasureHunter \n\t\tself.price = 2\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Page, self).onPlay(player, **kwargs)\n\t\tTraveler.onPlay(self, player, **kwargs)\n\t\tplayer.draw()\n\t\tplayer.addAction()\n\t\t\nclass TreasureHunter(Action, Traveler, CardAdd):\n\tname = 'Treasure Hunter'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(TreasureHunter, self).__init__(game, **kwargs)\n\t\tTraveler.__init__(self, game, **kwargs)\n\t\tself.morph = Warrior \n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(TreasureHunter, self).onPlay(player, **kwargs)\n\t\tTraveler.onPlay(self, player, **kwargs)\n\t\tplayer.addCoin()\n\t\tplayer.addAction()\n\t\tpreviousPlayer = None\n\t\tfor i in range(len(player.game.events)-1, -1, -1):\n\t\t\tif player.game.events[i][0]=='startTurn' and player.game.events[i][1]['player']!=player:\n\t\t\t\tpreviousPlayer = player.game.events[i][1]['player']\n\t\t\t\tfor n in range(i, len(player.game.events)):\n\t\t\t\t\tif player.game.events[n][0]=='gain' and player.game.events[n][1]['player']==previousPlayer: player.gainFromPile(player.game.piles['Silver'])\n\t\t\t\t\telif player.game.events[n][0]=='endTurn': return\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tgame.require(self.morph)\n\t\tfor i in range(5): pile.append(type(self)(game))\n\t\t\t\t\nclass Warrior(Action, Traveler, Attack, CardAdd):\n\tname = 'Warrior'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Warrior, self).__init__(game, **kwargs)\n\t\tTraveler.__init__(self, game, **kwargs)\n\t\tAttack.__init__(self, game, **kwargs)\n\t\tself.morph = Hero \n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Warrior, self).onPlay(player, **kwargs)\n\t\tTraveler.onPlay(self, player, **kwargs)\n\t\tplayer.draw(amnt=2)\n\t\ttravelers = 0\n\t\tfor card in player.inPlay:\n\t\t\tif 'TRAVELER' in card.types: travelers += 1\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif aplayer==player: continue\n\t\t\taplayer.attack(self.attack, self, amnt=travelers)\n\tdef attack(self, player, **kwargs):\n\t\tfor i in range(kwargs['amnt']):\n\t\t\tcard = player.getCard()\n\t\t\tif not card: break\n\t\t\tplayer.discardCard(card)\n\t\t\tif not (card.getPrice(player) in (3, 4) and card.getPotionPrice(player)==0): continue\n\t\t\tfor i in range(len(player.discardPile)-1, -1, -1):\n\t\t\t\tif player.discardPile[i]==card: player.trashCard(player.discardPile.pop(i))\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tgame.require(self.morph)\n\t\tfor i in range(5): pile.append(type(self)(game))\n\t\t\t\t\nclass Hero(Action, Traveler, CardAdd):\n\tname = 'Hero'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Hero, self).__init__(game, **kwargs)\n\t\tTraveler.__init__(self, game, **kwargs)\n\t\tself.morph = Champion \n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Hero, self).onPlay(player, **kwargs)\n\t\tplayer.addCoin(amnt=2)\n\t\toptions = []\n\t\tfor pile in player.game.piles:\n\t\t\tif player.game.piles[pile].viewTop() and 'TREASURE' in player.game.piles[pile].viewTop().types: options.append(pile)\n\t\tif not options: return\n\t\tchoice = player.user(options, 'Choose gain')\n\t\tplayer.gainFromPile(player.game.piles[options[choice]])\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tgame.require(self.morph)\n\t\tfor i in range(5): pile.append(type(self)(game))\n\t\t\nclass Champion(Action, Duration, CardAdd):\n\tname = 'Champion'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Champion, self).__init__(game, **kwargs)\n\t\tDuration.__init__(self, game, **kwargs)\n\t\tself.price = 6\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Champion, self).onPlay(player, **kwargs)\n\t\tplayer.addAction()\n\t\tplayer.game.dp.connect(self.defence, signal='attack')\n\t\tplayer.game.dp.connect(self.addAction, signal='playAction')\n\tdef defence(self, signal, **kwargs):\n\t\tif kwargs['player']==self.owner: return True\n\tdef addAction(self, signal, **kwargs):\n\t\tif kwargs['player']==self.owner: self.owner.addAction()\n\tdef onLeavePlay(self, player, **kwargs):\n\t\tpass\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tfor i in range(5): pile.append(type(self)(game))\n\t\t\nclass Peasant(Action, Traveler, CardAdd):\n\tname = 'Peasant'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Peasant, self).__init__(game, **kwargs)\n\t\tTraveler.__init__(self, game, **kwargs)\n\t\tself.morph = Soldier \n\t\tself.price = 2\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Peasant).onPlay(player, **kwargs)\n\t\tTraveler.onPlay(self, player, **kwargs)\n\t\tplayer.addCoin()\n\t\tplayer.addBuy()\n\nclass Soldier(Action, Traveler, Attack, CardAdd):\n\tname = 'Soldier'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Soldier, self).__init__(game, **kwargs)\n\t\tTraveler.__init__(self, game, **kwargs)\n\t\tAttack.__init__(self, game, **kwargs)\n\t\tself.morph = Figitive \n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Soldier, self).onPlay(player, **kwargs)\n\t\tTraveler.onPlay(self, player, **kwargs)\n\t\tplayer.addCoin(amnt=2)\n\t\tattacks = 0\n\t\tfor card in player.inPlay:\n\t\t\tif 'ATTACK' in card.types and card==self: attacks += 1\n\t\tplayer.addCoin(amnt=attacks)\n\t\tfor aplayer in player.game.getPlayers(player):\n\t\t\tif aplayer==player: continue\n\t\t\taplayer.attack(self.attack, self)\n\tdef attack(self, player, **kwargs):\n\t\tif len(player.hand)>3: player.discard(player.user([o.name for o in player.hand], 'Choose discard'))\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tgame.require(self.morph)\n\t\tfor i in range(5): pile.append(type(self)(game))\n\t\t\nclass Fugitive(Action, Traveler, CardAdd):\n\tname = 'Fugitive'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Fugitive, self).__init__(game, **kwargs)\n\t\tTraveler.__init__(self, game, **kwargs)\n\t\tself.morph = Disciple \n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Fugitive, self).onPlay(player, **kwargs)\n\t\tTraveler.onPlay(self, player, **kwargs)\n\t\tplayer.draw(amnt=2)\n\t\tplayer.addAction()\n\t\tif player.hand: player.discard(player.user([o.name for o in player.hand], 'Choose discard'))\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tgame.require(self.morph)\n\t\tfor i in range(5): pile.append(type(self)(game))\n\t\t\nclass Disciple(Action, Traveler, CardAdd):\n\tname = 'Disciple'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Disciple, self).__init__(game, **kwargs)\n\t\tTraveler.__init__(self, game, **kwargs)\n\t\tself.morph = Teacher \n\t\tself.price = 5\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Disciple, self).onPlay(player, **kwargs)\n\t\tTraveler.onPlay(self, player, **kwargs)\n\t\tself.links = []\n\t\toptions = []\n\t\tfor card in player.hand:\n\t\t\tif 'ACTION' in card.types:\n\t\t\t\toptions.append(card)\n\t\tif not options: return\n\t\tchoice = player.user([o.name for o in options]+'Play no action', 'Choose action')\n\t\tif choice+1>len(options): return\n\t\tfor i in range(len(player.hand)):\n\t\t\tif player.hand[i]==options[choice]:\n\t\t\t\tplayer.inPlay.append(player.hand.pop(i))\n\t\t\t\tbreak\n\t\tself.links.append(options[choice])\n\t\tplayer.game.dp.connect(self.trigger, signal='destroy')\n\t\tfor i in range(2): player.playAction(options[choice])\n\t\tif options[choice].name in list(player.game.piles): player.gainFromPile(player.game.piles[options[choice].name])\n\tdef onDestroy(self, player, **kwargs):\n\t\tif self.links: return True\n\tdef onLeavePlay(self, player, **kwargs):\n\t\tplayer.game.dp.disconnect(self.trigger, signal='destroy')\n\tdef trigger(self, signal, **kwargs):\n\t\tfor i in range(len(self.links)-1, -1, -1):\n\t\t\tif self.links[i]==kwargs['card']: self.links.pop(i)\n\t\tif self.links: return\n\t\tfor i in range(len(self.owner.inPlay)):\n\t\t\tif self.owner.inPlay[i]==self:\n\t\t\t\tself.owner.destroyCard(self.owner.inPlay.pop(i))\n\t\t\t\tbreak\n\tdef onPileCreate(self, pile, game, **kwargs):\n\t\tgame.require(self.morph)\n\t\tfor i in range(5): pile.append(type(self)(game))\n\nclass Teacher(Action, Reserve, CardAdd):\n\tname = 'Teacher'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Teacher).__init__(game, **kwargs)\n\t\tReserve.__init__(self, game, **kwargs)\n\t\tself.triggerSignal = 'startTurn'\n\t\tself.price = 6\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Teacher, self).onPlay(player, **kwargs)\n\tdef call(self, signal, **kwargs):\n\t\tpass\t\n\nclass Ratcatcher(Action, Reserve, CardAdd):\n\tname = 'Ratcatcher'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Ratcatcher, self).__init__(game, **kwargs)\n\t\tself.triggerSignal = 'startTurn'\n\t\tself.price = 2\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Ratcatcher, self).onPlay(player, **kwargs)\n\t\tReserve.onPlay(self, player, **kwargs)\n\t\tplayer.addAction()\n\t\tplayer.draw()\n\tdef call(self, signal, **kwargs):\n\t\tif not self.owner.hand: return\n\t\tself.owner.trash(self.owner.user([o.name for o in self.owner.hand], 'Choose trash'))\n\t\t\nclass Raze(Action, CardAdd):\n\tname = 'Raze'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Raze, self).__init__(game, **kwargs)\n\t\tself.price = 2\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Raze, self).onPlay(player, **kwargs)\n\t\tplayer.addAction()\n\t\tchoice = player.user([o.name for o in player.hand]+['Raze itself'], 'Choose action')\n\t\tif choice+1>len(player.hand):\n\t\t\ttrashedPrice = self.getPrice(player)\n\t\t\tfor i in range(len(player.inPlay)):\n\t\t\t\tif player.inPlay[i]==self: player.trashCard(player.inPlay.pop(i))\n\t\telse:\n\t\t\ttrashedPrice = player.hand[choice].getPrice(player)\n\t\t\tplayer.trash(choice)\n\t\tif not trashedPrice: return\n\t\tcards = player.getCards(trashedPrice)\n\t\tif not cards: return\n\t\tplayer.hand.append(cards.pop(player.user([o.name for o in cards], 'Choose card')))\n\t\twhile cards: player.discardCard(cards.pop())\n\t\t\t\nclass Amulet(Action, Duration, CardAdd):\n\tname = 'Amulet'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Amulet, self).__init__(game, **kwargs)\n\t\tDuration.__init__(self, game, **kwargs)\n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Amulet, self).onPlay(player, **kwargs)\n\t\tself.next()\n\tdef next(self, **kwargs):\n\t\tchoice = self.owner.user(('+1 coin', 'Trash card', 'Gain silver'), 'Choose one')\n\t\tif choice==0: self.owner.addCoin()\n\t\telif choice==1:\n\t\t\tif not self.owner.hand: return\n\t\t\tself.owner.trash(self.owner.user([o.name for o in self.owner.hand], 'Choose trash'))\n\t\telif choice==2: self.owner.gainFromPile(self.owner.game.piles['Silver'])\n\t\t\t\nclass CaravanGuard(Action, Duration, Reaction, CardAdd):\n\tname = 'Caravan Guard'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(CaravanGuard, self).__init__(game, **kwargs)\n\t\tDuration.__init__(self, game, **kwargs)\n\t\tReaction.__init__(self, game, **kwargs)\n\t\tself.price = 3\n\t\tself.signal = 'Attack'\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(CaravanGuard, self).onPlay(player, **kwargs)\n\t\tplayer.draw()\n\t\tplayer.addAction()\n\tdef next(self, **kwargs):\n\t\tself.owner.addCoin()\n\tdef trigger(self, signal, **kwargs):\n\t\tif kwargs['player']==self.owner and self in self.owner.hand and self.owner.user(('no', 'yes'), 'Use Caravan Guard'):\n\t\t\tfor i in range(len(self.owner.hand)):\n\t\t\t\tif self==self.owner.hand[i]:\n\t\t\t\t\tself.owner.inPlay.append(self.owner,hand.pop(i))\n\t\t\t\t\tself.owner.playAction(self)\n\t\t\t\t\tbreak\n\tdef connect(self, **kwargs):\n\t\tkwargs['player'].game.dp.connect(self.trigger, signal=self.signal)\n\tdef disconnect(self, **kwargs):\n\t\tkwargs['player'].game.dp.connect(self.trigger, signal=self.signal)\n\tdef onGain(self, player, **kwargs):\n\t\tself.connect(player=player)\n\tdef onTrash(self, player, **kwargs):\n\t\tself.disconnect(player=player)\n\tdef onReturn(self, player, **kwargs):\n\t\tself.disconnect(player=player)\n\nclass Dungeon(Action, Duration, CardAdd):\n\tname = 'Dungeon'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Dungeon, self).__init__(game, **kwargs)\n\t\tDuration.__init__(self, game, **kwargs)\n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Dungeon, self).onPlay(player, **kwargs)\n\t\tplayer.addAction()\n\t\tself.next()\n\tdef next(self, **kwargs):\n\t\tself.owner.draw(amnt=2)\n\t\tfor i in range(2):\n\t\t\tif not self.owner.hand: break\n\t\t\tself.owner.discard(self.owner.user([o.name for o in self.owner.hand], 'Choose discard '+str(i+1)))\n\nclass Gear(Action, Duration, CardAdd):\n\tname = 'Gear'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Gear, self).__init__(game, **kwargs)\n\t\tDuration.__init__(self, game, **kwargs)\n\t\tself.price = 3\n\t\tself.saved = []\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Gear, self).onPlay(player, **kwargs)\n\t\tplayer.draw(amnt=2)\n\t\tself.age = 0\n\t\tplayer.game.dp.connect(self.trigger, signal='startTurn')\n\t\tfor i in range(2):\n\t\t\tif not player.hand: return\n\t\t\tself.saved.append(player.hand.pop(player.user([o.name for o in player.hand], 'Choose saved '+str(i+1))))\n\tdef nextage(self, signal, **kwargs):\n\t\tif kwargs['player']==self.owner:\n\t\t\tself.age+=1\n\t\t\twhile self.saved: self.owner.hand.append(self.saved.pop())\n\tdef onGameEnd(self, player, **kwargs):\n\t\twhile self.saved: player.library.append(self.saved.pop())\n\t\t\t\nclass Guide(Action, Reserve, CardAdd):\n\tname = 'Guide'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Guide, self).__init__(game, **kwargs)\n\t\tReserve.__init__(self, game, **kwargs)\n\t\tself.triggerSignal = 'startTurn'\n\t\tself.price = 3\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Guide, self).onPlay(player, **kwargs)\n\t\tplayer.addAction()\n\t\tplayer.draw()\n\tdef call(self, signal, **kwargs):\n\t\twhile self.owner.hand: self.owner.discardCard(self.owner.hand.pop())\n\t\tself.owner.draw(amnt=5)\n\t\t\t\nclass Duplicate(Action, Reserve, CardAdd):\n\tname = 'Duplicate'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Duplicate, self).__init__(game, **kwargs)\n\t\tself.triggerSignal = 'gain'\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Duplicate, self).onPlay(player, **kwargs)\n\t\tReserve.onPlay(self, player, **kwargs)\n\tdef requirements(self, **kwargs):\n\t\treturn self.owner==kwargs['player'] and kwargs['card'].getPrice(self.owner)<7 and kwargs['card'].name in list(self.owner.game.piles)\n\tdef call(self, signal, **kwargs):\n\t\tself.owner.gainFromPile(self.owner.game.piles[kwargs['card'].name])\n\t\t\t\nclass Magpie(Action, CardAdd):\n\tname = 'Magpie'\n\tdef __init__(self, game, **kwargs):\n\t\tsuper(Magpie, self).__init__(game, **kwargs)\n\t\tself.price = 4\n\tdef onPlay(self, player, **kwargs):\n\t\tsuper(Magpie, self).onPlay(player, **kwargs)\n\t\tplayer.draw()\n\t\tplayer.addAction()\n\t\trevealedCard = player.revealPosition(-1, fromZ=player.library)\n\t\tif 'TREASURE' in revealedCard.types: player.draw()\n\t\tif 'ACTION' in revealedCard.types or 'VICTORY' in revealedCard.types: player.gainFromPile(player.game.piles['Magpie'])\n\t\t\t\nadventures = [CoinOfTheRealm, Page, Ratcatcher, Raze, Amulet, CaravanGuard, Dungeon, Gear, Guide, Duplicate, Magpie]","sub_path":"cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":66521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"41518457","text":"import argparse\nfrom datetime import datetime\nimport json\nimport logging\nimport os\nimport time\n\nimport cv2\nimport numpy as np\n\nfrom tf_pose.estimator import TfPoseEstimator\nfrom tf_pose.networks import get_graph_path, model_wh\nfrom tf_pose.json_tools import humans_to_keypoints_dict\n\nlogger = logging.getLogger('TfPoseEstimator-Video')\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\nfps_time = 0\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='tf-pose-estimation Video')\n parser.add_argument('--video', type=str, default='')\n parser.add_argument('--resolution', type=str, default='432x368', help='network input resolution. default=432x368')\n parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')\n parser.add_argument('--show-process', type=bool, default=False,\n help='for debug purpose, if enabled, speed for inference is dropped.')\n parser.add_argument('--showBG', type=bool, default=True, help='False to show skeleton only.')\n parser.add_argument('--log', type=str, required=False, default=None)\n parser.add_argument('--output', type=str, required=False, default=None)\n args = parser.parse_args()\n\n file_name = os.path.basename(args.video)\n file_prefix = os.path.splitext(file_name)[0]\n\n if args.log:\n log_name = args.log\n else:\n log_name = f\"{file_prefix}-{args.model}-keypoints.json\"\n if args.output:\n output_name = args.output\n else:\n output_name = f\"{file_prefix}-{args.model}-output.mp4\"\n\n logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))\n w, h = model_wh(args.resolution)\n e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))\n cap = cv2.VideoCapture(args.video)\n fps = cap.get(cv2.CAP_PROP_FPS)\n source_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # float\n source_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # float\n\n if cap.isOpened() is False:\n print(\"Error opening video stream or file\")\n\n fourcc = cv2.VideoWriter_fourcc(*\"MP4V\")\n output_video = cv2.VideoWriter(output_name, fourcc, fps, (source_width, source_height))\n\n frame_count = 0\n frames_dict = {}\n while True:\n ret_val, image = cap.read()\n if ret_val:\n humans = e.inference(image, resize_to_default=True, upsample_size=4)\n if not args.showBG:\n image = np.zeros(image.shape)\n image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)\n\n cv2.putText(image, \"FPS: %f\" % (1.0 / (time.time() - fps_time)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n cv2.imshow('tf-pose-estimation result', image)\n fps_time = time.time()\n output_video.write(image)\n if cv2.waitKey(1) == 27:\n break\n\n frames_dict[str(frame_count)] = {\"humans\": humans_to_keypoints_dict(humans)}\n frame_count += 1\n else:\n break\n\n cv2.destroyAllWindows()\n output_video.release()\n\n scene_dict = {\n \"datetime\": datetime.utcnow().strftime(\"%Y%m%d-%H%M\"),\n \"file_name\": file_name,\n \"model_height\": h,\n \"model_width\": w,\n \"source_height\": source_height,\n \"source_width\": source_width,\n \"model\": args.model,\n \"fps\": fps,\n \"frames\": frames_dict,\n }\n\n with open(log_name, \"w\") as f:\n json.dump(scene_dict, f, indent=4)\n\nlogger.debug('finished+')\n","sub_path":"run_video.py","file_name":"run_video.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"647292069","text":"#Insert Cactus Code Here\n\nimport pygame\nfrom utilities import load_png\n\nclass Cactus(pygame.sprite.Sprite):\n\n\tdef __init__(self):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.image, self.rect = load_png('../assets/Cactus.png')\n\t\tself.x = 1080\n\t\tself.y = 425\n\t\tself.v = 2\n\n\t\tscreen = pygame.display.get_surface()\n\t\tself.area = screen.get_rect()\n\t\tself.state = \"moving\"\n\n\t\tself.imageCounter = 0\n\t\tself.counter = 10\n\n\tdef draw(self, screen):\n\t\tif self.x < -50:\n\t\t\tself.x = 1080\n\t\tself.x -= self.v\n\t\tself.v += 1/500\n\n\t\tscreen.blit(self.image, (self.x, self.y))","sub_path":"src/Cactus.py","file_name":"Cactus.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"638695341","text":"\n# coding: utf-8\n\n# In[3]:\n\n# Module to text a number using Sachin's Twilio Account\n# Usage:\n# import SendSMS\n\n# SendSMS.send_sms(message_to_be_sent, number_to_send_message_to)\n# Example - SendSMS.send_sms('Hello World', '+14087185110')\n\n\nfrom twilio.rest import Client\nimport sys\n\ndef send_sms(message, to=\"+14087185110\"):\n # Using sachinsdesai@gmail.com's Twilio credentials\n myTwilioNumber = '16509004867'\n account_sid = 'AC3e56004deed890bdad687550f0f18655'\n auth_token = '26051b37b95c37bb5102baa0d210d94d'\n\n client = Client(account_sid, auth_token)\n\n dest_number = to\n message_to_be_sent = (message[:1500] + '..') if len(message) > 1500 else message\n\n print(\"Sending to {} from {}, message - {}...., length - {}\".format(dest_number, myTwilioNumber, message_to_be_sent[:10], len(message_to_be_sent)))\n result = client.api.account.messages.create(to=dest_number, from_=myTwilioNumber, body=message_to_be_sent,\n #media_url='https://c1.staticflickr.com/3/2899/14341091933_1e92e62d12_b.jpg'\\\n )\n \n return result\n\nif __name__ == \"__main__\":\n if (len(sys.argv) < 3):\n print(\"Insufficient number of inputs\\nSyntax: SendSMS , \")\n else:\n send_sms(sys.argv[1], sys.argv[2])\n\n\n\n\n","sub_path":"Automate/SendSMS.py","file_name":"SendSMS.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"85928495","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport unittest\n\nfrom sqlalchemy import create_engine\n\nfrom sqlalchemy_profile import StatementProfiler\n\n\nclass Test_RawExecute(unittest.TestCase):\n\n def test(self):\n engine = create_engine('sqlite:///')\n connection = engine.connect()\n\n profiler = StatementProfiler(engine)\n profiler.start()\n\n connection.execute('SELECT 1')\n connection.execute('SELECT 2')\n\n profiler.stop()\n\n assert profiler.count == 2\n assert profiler.select == 2\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"examples/rawexecute.py","file_name":"rawexecute.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"197095084","text":"import allure\nimport pytest\nfrom allure_commons.types import AttachmentType\n\nfrom selenium.webdriver.support.event_firing_webdriver import EventFiringWebDriver\n\nfrom utils.DriverManager import get_driver\nfrom utils.EventDriverListener import EventDriverListener\n\n\n\ndef pytest_addoption(parser):\n\n parser.addoption(\"--node\", action=\"store\", default='', help=\"Type for running current node\")\n parser.addoption(\"--hub_ip\", action=\"store\", default=\"\", help=\"Type for connect to current hub\")\n parser.addoption(\"--build\", action=\"store\", default=\"\", help=\"Type for running current build\")\n\n\n@pytest.fixture(scope=\"function\")\ndef ev_driver(request):\n _remote_web_driver = get_driver(hub_ip=request.config.getoption(\"--hub_ip\"),\n node_name=request.config.getoption(\"--node\"),\n build_version=request.config.getoption(\n \"--build\")) # - Initialize webdriver (local or remote) ,config node name and build version\n _event_driver_ = EventFiringWebDriver(_remote_web_driver,EventDriverListener()) # - register event listener to webdrier\n _event_driver_.implicitly_wait(5)\n\n def teardown_method():\n allure.attach(name='Ending screen', body=_event_driver_.get_screenshot_as_png(),attachment_type=AttachmentType.PNG)\n _event_driver_.close()\n _event_driver_.quit()\n request.addfinalizer(teardown_method)\n return _event_driver_\n\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"18570356","text":"from heapq import *\n\n\nclass Solution:\n def findClosestElements(self, arr: List[int], K: int, X: int) -> List[int]:\n \"\"\"\n Time complexity:\n The time complexity of the above algorithm is O(logN + K*logK). We\n need O(logN) for Binary Search and O(K*logK) to insert the numbers in\n the Min Heap, as well as to sort the output array.\n\n Space complexity:\n The space complexity will be O(K), as we need to put a maximum of\n 2K numbers in the heap.\n \"\"\"\n\n index = self.binary_search(arr, X)\n low, high = index - K, index + K\n\n low = max(low, 0) # 'low' should not be less than zero\n # 'high' should not be greater the size of the array\n high = min(high, len(arr) - 1)\n\n minHeap = []\n # add all candidate elements to the min heap, sorted by their absolute difference from 'X'\n for i in range(low, high+1):\n heappush(minHeap, (abs(arr[i] - X), arr[i]))\n\n # we need the top 'K' elements having smallest difference from 'X'\n result = []\n for _ in range(K):\n result.append(heappop(minHeap)[1])\n\n result.sort()\n return result\n\n def binary_search(self, arr, target):\n low, high = 0, len(arr) - 1\n while low <= high:\n mid = int(low + (high - low) / 2)\n if arr[mid] == target:\n return mid\n if arr[mid] < target:\n low = mid + 1\n else:\n high = mid - 1\n if low > 0:\n return low - 1\n return low\n","sub_path":"Problems/Leetcode/658_FindKClosestElements.py","file_name":"658_FindKClosestElements.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"419500671","text":"# /Users/louisvilpoux/anaconda/bin/jupyter_mac.command ; exit;\n# cd Documents/Manchester/Dissertation/trackingbydetection/\n\n# repere used : x_ord go to the right (->) ; y_ord go to the bottom (|)\n\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport imutils\nimport matplotlib.pyplot as plt\nfrom scipy.stats import multivariate_normal\nimport scipy.spatial as ssp\nimport scipy.stats as ss\nimport itertools\nimport datetime\nimport math\nimport random\nimport argparse\n\nsave_particles = []\nsave_detections = []\ndict_detection = dict()\ndict_particle = dict()\ncolors = {\"red\" : (255, 0, 0), \"green\" : (0, 255, 0), \"white\" : (255, 255, 255), \n \"blue\" : (0, 0, 255), \"yellow\" : (255, 255, 0) , \"turquoise\" : (0, 255, 255), \"purple\" : (255, 0, 255)}\n\nnumber_particles = 30\n\n#video = \"/Users/louisvilpoux/Documents/Manchester/Dissertation/Data/mot1.mp4\"\n#video = \"/Users/louisvilpoux/Documents/Manchester/Dissertation/Data/pets.mp4\"\n#video = \"/Users/louisvilpoux/Documents/Manchester/Dissertation/Data/highway.mp4\"\n\n# Minimum size of the contours that will be considered. It permits to not deal with very little detections (noise)\n#min_area = 300\n#min_area = 700\nmin_area = 700\n#min_area = 700\n\nnb = 0\n\n# First frame in the video\nfirstFrame = None\n\nframe_number = 0\nframe_number_delete = 2\n\n# Parameters in the formulas\nalpha = 2\nbeta = 20\ngamma = 2\netha = 1\n\nthreshold_compare_hist_dist = 0.81\n\n# TO DEFINE\nthreshold_velocity_target = 20\n\n# TO DEFINE\nthreshold_matching_score = 20\n\n# TO DEFINE\nnb_detect_save = 2\n\n#cap = cv2.VideoCapture(video)\nap = argparse.ArgumentParser()\nap.add_argument(\"-v\", \"--video\", help=\"path to the input video\")\nargs = vars(ap.parse_args())\ncap = cv2.VideoCapture(args[\"video\"])\nfgbg = cv2.BackgroundSubtractorMOG2()\n#fgbg = cv2.BackgroundSubtractorMOG()\n\n# Normal distribution\nstandard_dev = 0.1\nnormal_distrib = ss.norm(0,standard_dev)\n\n# Start of the timestamp\nts = datetime.datetime.now()\n\n\nwhile(1):\n save_association = dict()\n frame_number = frame_number + 1\n ts2 = datetime.datetime.now()\n ret, frame = cap.read()\n # Resize because of the performance\n width = 500\n frame = imutils.resize(frame, width=width)\n\n #learning rate set to 0\n #fgmask = fgbg.apply(frame)\n fgmask = fgbg.apply(frame, learningRate=1.0/10)\n\n # Dilation\n dilation = cv2.dilate(fgmask,None,iterations = 2);\n \n (cnts, _) = cv2.findContours(dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # Limit of the zone for the instantiation of the new trackers\n x_limit = width/8\n y_limit = width/8\n x_w_limit = 9*width/10\n y_h_limit = 7*width/10\n #cv2.rectangle(frame, (x_limit, y_limit), (x_w_limit, y_h_limit), (0, 255, 255), 2)\n\n index_add = 0\n to_add_to_dict_particle = dict()\n\n for c in cnts:\n # If the contour is too small, ignore it\n if cv2.contourArea(c) < min_area:\n continue\n \n # Compute the bounding box for the contour, draw it on the frame\n (x, y, w, h) = cv2.boundingRect(c)\n color_detect = (random.randint(0,255),random.randint(0,255),random.randint(0,255))\n\n # Compute the center of the contour for each detection. cX and cY are the coords of the detection center\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n cv2.circle(frame, (cX, cY), 2, (255, 255, 255), -1)\n\n # Compute the descriptor : histogram of the color of the detection\n # First convert the detection into a RGB image : OpenCV stores images in BGR format rather than RGB ; \n # matplotlib is going to be used to display the results and matplotlib assumes the image is in RGB format\n # Second the histogram is computed and then it is normalized\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n hist = cv2.calcHist([frame_rgb[x:x+w, y:y+h]], [0, 1, 2], None, [8, 8, 8],[0, 256, 0, 256, 0, 256])\n hist = cv2.normalize(hist).flatten()\n\n # If it is the first frame, add all the detections in the dictionary with their histogram\n if firstFrame is None:\n detect_group = len(dict_detection)\n velocity_target = 0\n timestamp = datetime.datetime.now()\n dict_detection[len(dict_detection)] = []\n dict_detection[len(dict_detection)-1].append([hist,cX,cY,x,y,x+w,y+h,detect_group,velocity_target,timestamp,w*h,color_detect])\n cv2.putText(frame, str(len(dict_detection)), (cX - 20, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n cv2.rectangle(frame, (x, y), (x + w, y + h), color_detect, 2)\n\n # Comparison of the descriptor of the past detections.\n # Try first if it is not the first frame and if we already have detect some objects.\n # Different methods to compare histograms :\n # cv2.cv.CV_COMP_CORREL ; cv2.cv.CV_COMP_CHISQR ; cv2.cv.CV_COMP_INTERSECT ; cv2.cv.CV_COMP_BHATTACHARYYA\n # Given a threshold for the comparison, we decide if it is a new detection or not\n # If it is not a new detection, update the values of the dictionary using the distance to the center.\n # If the value of the comparison is greater than a threshold, the histogram is considered as a candidate \n # histogram. The distance between the centers will help us to understand from which detection does the \n # candidate histogram belong. The candidate detections are those which have a similar histogram.\n # The best candidate is the detection that has its center closest to the testing detection center.\n # In the case that there is no candidate after this test, the histogram will be add as a new detection.\n # Different distances can be used :\n # sqeuclidean ; cosine ; correlation ; hamming ; jaccard\n # A text describes the number of the detection\n candidate_key = []\n candidate_dist = []\n if firstFrame is not None:\n used = False\n if len(dict_detection) != 0:\n for key_detec, val_detec in dict_detection.iteritems():\n if len(val_detec) != 0:\n comp_hist_dist = cv2.compareHist(hist,val_detec[len(val_detec)-1][0],cv2.cv.CV_COMP_CORREL)\n if comp_hist_dist > threshold_compare_hist_dist:\n dist_centers = ssp.distance.cdist([(val_detec[len(val_detec)-1][1],val_detec[len(val_detec)-1][2])],[(cX,cY)],'euclidean')[0][0]\n used = True\n candidate_key.append(key_detec)\n candidate_dist.append(dist_centers)\n # We must update a detector\n # Build the matrix of the detections respecting data model\n # Detection : histogram of colors, x_center, y_center, x_min_contour, y_min_contour, x_max_contour, \n # y_max_contour, group of detection, velocity_target, timestamp, size_target, index\n if used == True:\n index = candidate_dist.index(min(candidate_dist))\n detect_group = candidate_key[index]\n timestamp = datetime.datetime.now() - ts\n timestamp = timestamp.total_seconds()\n velocity_target = min(candidate_dist) / timestamp\n color_group = dict_detection[candidate_key[index]][len(dict_detection[candidate_key[index]]) - 1][11]\n dict_detection[candidate_key[index]].append([hist,cX,cY,x,y,x+w,y+h,detect_group,velocity_target,timestamp,w*h,color_group])\n cv2.putText(frame, str(candidate_key[index]), (cX - 20, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n cv2.rectangle(frame, (x, y), (x + w, y + h), color_group, 2)\n # We must add a new detector\n else:\n detect_group = len(dict_detection)\n velocity_target = 0\n timestamp = datetime.datetime.now()\n dict_detection[len(dict_detection)] = []\n #if (cX < x_limit or cX > x_w_limit) and (cY < y_limit or cY > y_h_limit):\n dict_detection[len(dict_detection)-1].append([hist,cX,cY,x,y,x+w,y+h,detect_group,velocity_target,timestamp,w*h,color_detect])\n cv2.putText(frame, str(len(dict_detection)), (cX - 20, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n cv2.rectangle(frame, (x, y), (x + w, y + h), color_detect, 2)\n\n\n # draw the particles\n # find the optimal size limit for the particles spread\n mean = [cX, cY]\n val_cov = min(w,h)\n cov = [[val_cov, 0], [0, val_cov]]\n part_x, part_y = np.random.multivariate_normal(mean, cov, number_particles).T\n\n # Build the matrix of the particles respecting data model\n # particle : [x_ord, y_ord], weight, x_detection_center, y_detection_center, size_target, frame_count_since_born,\n # initial motion direction, initial velocity, future key in the dict\n weight = 1\n success_track_frame = 1\n frame_born = frame_number\n particles = []\n for i,j in zip(part_x,part_y):\n # Initialisation of the motion direction : orthogonal to the closest image borders\n dist_right = width - i\n dist_up = j\n dist_left = i\n dist_bottom = width - j\n distance_border = [dist_right,dist_up,dist_left,dist_bottom]\n distance_min_border = min(distance_border)\n if distance_min_border == dist_right:\n init_motion_dir = [-1,0]\n if distance_min_border == dist_left:\n init_motion_dir = [1,0]\n if distance_min_border == dist_up:\n init_motion_dir = [0,1]\n if distance_min_border == dist_bottom:\n init_motion_dir = [0,-1]\n # if (i < x_limit or i > x_w_limit or j < y_limit or j > y_h_limit) and frame_number > 2:\n # particles.append([[i,j],weight,None,None,None,frame_born,init_motion_dir,[0,0],len(dict_particle),success_track_frame])\n\t # Plot the particles\n #cv2.circle(frame,(int(i),int(j)),1,(0, 0, 255), 0)\n # if frame_number <= 2:\n\n #particles.append([[i,j],weight,None,None,None,frame_born,init_motion_dir,[velocity_target,velocity_target],len(dict_particle),success_track_frame,(255,255,255)])\n particles.append([[i,j],weight,None,None,None,frame_born,init_motion_dir,[0,0],len(dict_particle),success_track_frame,(255,255,255)])\n\t \n # Plot the particles\n #cv2.circle(frame,(int(i),int(j)),1,(0, 0, 255), 0) \t\n # The particles are added to save_particles by tracker\n #dict_particle[len(dict_particle)] = particles\n if particles != []:\n to_add_to_dict_particle[len(dict_particle)+index_add] = particles\n\n index_add = index_add + 1\n\n\n ### Delete not associated Particles ###\n\n for part in list(itertools.chain.from_iterable(dict_particle.values())):\n #if frame_number - part[5] > frame_number_delete and part[2] == None:\n if frame_number - part[5] > frame_number_delete:\n idx = part[8]\n dict_particle[idx].remove(part)\n if len(dict_particle[idx]) == 0:\n dict_particle.pop(idx,None)\n\n ### End of Delete not associated Particles ###\n\n\n\n ### Delete too old Detections ###\n # It conserves the last nb_detect_save detections of each group\n for key_detec, val_detec in dict_detection.iteritems():\n \tif len(val_detec) > nb_detect_save:\n \t\tdict_detection[key_detec] = val_detec[nb_detect_save:]\n\n ### End of Delete too old Detections ###\n\n\n\n ### Data Association ###\n\n # Calculate the distance between each detection,tracker pair.\n # Because two for loops is too computationaly expensive, another way to try all the possible values of both lists\n # has been found. It used the Python library itertools and make the product of the data of both lists.\n # From all the associations available, pick the assocation that has the best score (greater than the threshold)\n if dict_detection != dict():\n # The loop iterate over the trackers and not the particles\n prev_detect = None\n for tracker,detect in list(itertools.product(dict_particle.values(),list(itertools.chain.from_iterable(dict_detection.values())))):\n d = [detect[1],detect[2]]\n size_detection = detect[10]\n group = detect[7]\n if prev_detect != None and prev_detect[7] == group:\n size_tracker = prev_detect[10]\n pos_tracker = [prev_detect[1],prev_detect[2]]\n velocity = detect[8]\n agreement_target_detection = normal_distrib.cdf((size_tracker - size_detection) / float(size_tracker))\n #print abs(velocity)\n if abs(velocity) < threshold_velocity_target and abs(ssp.distance.euclidean(d,pos_tracker)) != 0:\n gating = agreement_target_detection * normal_distrib.cdf(abs(ssp.distance.euclidean(d,pos_tracker)))\n else:\n distance_detection_motiontracker = (abs(tracker[0][6][0]*detect[1] + tracker[0][6][1]*detect[2]))/math.sqrt((tracker[0][6][0]**2)+(tracker[0][6][1]**2))\n gating = agreement_target_detection * normal_distrib.cdf(distance_detection_motiontracker)\n sum_part_tracker = 0\n for part in tracker:\n sum_part_tracker = sum_part_tracker + normal_distrib.cdf(ssp.distance.euclidean(d,part[0]))\n matching_score = gating * (1 + alpha * sum_part_tracker)\n if matching_score > threshold_matching_score:\n index_tracker = tracker[0][8]\n if save_association.has_key(index_tracker):\n if matching_score > save_association[index_tracker][0][2]:\n save_association[index_tracker] = []\n save_association[index_tracker].append([tracker,detect,matching_score])\n else:\n continue\n else:\n save_association[index_tracker] = []\n save_association[index_tracker].append([tracker,detect,matching_score])\n prev_detect = detect\n\n ### End of Data Association ###\n\n\n ### Bootstrap Filter : Observation Model ###\n # Choose this parameter\n for partic in list(itertools.chain.from_iterable(dict_particle.values())):\n key = partic[8]\n if save_association.has_key(key):\n coord_part = partic[0]\n coord_detec = [save_association[key][0][1][1],save_association[key][0][1][2]]\n # Detection term\n detection_term = beta * 1 * normal_distrib.cdf(abs(ssp.distance.euclidean(coord_part,coord_detec)))\n # Update the data obtained from the association\n partic[2] = save_association[key][0][1][1]\n partic[3] = save_association[key][0][1][2]\n partic[4] = save_association[key][0][1][10]\n partic[9] = partic[9] + 1\n partic[5] = frame_number\n partic[10] = save_association[key][0][1][11]\n # Classifier term\n detect = save_association[key][0][1]\n detect_group = detect[7]\n index_detect = len(dict_detection[detect_group])\n if index_detect != 0:\n \tprev_hist_detect = dict_detection[detect_group][index_detect-1][0]\n \thist_detect = detect[0]\n \t#always 1.0 -> problem\n #reduce the number of bin and normalize red & green\n \tclassifier_term = cv2.compareHist(hist_detect,prev_hist_detect,cv2.cv.CV_COMP_BHATTACHARYYA)\n else:\n \tclassifier_term = 0\n new_weight = detection_term + classifier_term\n #partic[1] = (1/float(number_particles)) * new_weight\n partic[1] = new_weight\n\n ### End of the Bootstrap Filter : Observation Model ###\n\n\n\n ### Resampling ###\n\n copy_dict_particle = dict()\n for key,track in dict_particle.iteritems():\n sum_part = 0\n norm_weight = []\n weight = []\n # Sum of the weights\n for part in track:\n sum_part = sum_part + part[1]\n # Normalize the weight\n for part in track:\n norm_weight.append(part[1]/float(sum_part))\n weight.append(track.index(part))\n random_weight = np.random.choice(weight,number_particles,p=norm_weight)\n # Build the new sample of particles\n copy_dict_particle[key] = []\n for idx in random_weight:\n copy_dict_particle[key].append(track[idx])\n dict_particle = dict()\n dict_particle = copy_dict_particle\n\n ### End of Resampling ###\n\n\n\n ### Propagation ###\n\n copy_dict_particle = dict()\n for part in list(itertools.chain.from_iterable(dict_particle.values())):\n old_position = part[0]\n old_velocity = part[7]\n key = part[8]\n if part[4] != None:\n #noise_position = np.random.normal(0,part[4])\n noise_position = 0\n else:\n noise_position = 0\n noise_velocity = np.random.normal(0,1/float(part[9]))\n\n #noise_velocity = 0\n\n timestamp = datetime.datetime.now() - ts2\n timestamp = timestamp.total_seconds()\n new_position = [old_position[0] + old_velocity[0] * timestamp + noise_position , old_position[1] + old_velocity[1] * timestamp + noise_position]\n new_velocity = [old_velocity[0] + noise_velocity , old_velocity[1] + noise_velocity]\n new_motion_direction = [new_position[0] - old_position[0] , new_position[1] - old_position[1]]\n new_motion_direction = part[6]\n new_part = [new_position,part[1],part[2],part[3],part[4],part[5],new_motion_direction,new_velocity,key,part[9],part[10]]\n \n # if old_position == new_position:\n # print old_position[0],new_position[0]\n # else:\n # print \"no\"\n \n if new_position[0] > 0 or new_position[0] < width or new_position[1] > 0 or new_position[1] < width:\n if copy_dict_particle.has_key(key):\n copy_dict_particle[key].append(new_part)\n else:\n copy_dict_particle[key] = []\n copy_dict_particle[key].append(new_part)\n dict_particle = dict()\n dict_particle = copy_dict_particle\n\n ### End of the Propagation ###\n\n\n\n ## Instantiation of the new trackers ##\n # Only for the new detections. Could be bad in the case of new not associated detections.\n\n for key_track, track in to_add_to_dict_particle.iteritems():\n # if dict_particle.has_key(key_track):\n # print len(dict_particle),key_track\n dict_particle[key_track] = track\n\n ## End of the Instantiation of the new Trackers ##\n\n\n\n ## Print the Particles ##\n\n # for part in list(itertools.chain.from_iterable(to_add_to_dict_particle.values())):\n # \tcv2.circle(frame,(int(part[0][0]),int(part[0][1])),3,(0, 0, 255), 0)\n\n for part in list(itertools.chain.from_iterable(dict_particle.values())):\n \t#print len(part),part[9]\n \tcv2.circle(frame,(int(part[0][0]),int(part[0][1])),3,part[10], 0)\n\n ## End of the Print of the Particles ##\n\n\n\n # Print the data of a special frame\n # nb = nb + 1\n # if nb == 10:\n # print(\"save_association\", save_association)\n\n\n # not anymore the first frame\n firstFrame = 1\n\n cv2.imshow('fgmask',fgmask)\n cv2.imshow('frame',frame)\n\n \n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n\n\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"Tests/opti_detection.py","file_name":"opti_detection.py","file_ext":"py","file_size_in_byte":19722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"630298918","text":"__author__ = \"Jie Lei\"\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\n\ndef parse_log(logfile):\n with open(logfile, \"r\") as f:\n lines = [l.strip(\"\\n\") for l in f.readlines()]\n loss_lines = [l for l in lines if \"loss_classifier\" in l]\n info_dict = dict(\n iter=[],\n loss=[],\n loss_box_reg=[],\n loss_rpn_box_reg=[],\n loss_classifier=[],\n loss_objectness=[],\n lr=[]\n )\n for l in tqdm(loss_lines):\n words = l.split()\n for k in info_dict.keys():\n convert_dtype = float if k != \"iter\" else int\n idx = words.index(k+\":\")\n info_dict[k].append(convert_dtype(words[idx+1]))\n return info_dict\n\n\ndef plot_curve(x, y, xlabel=\"\", ylabel=\"\", title=\"\"):\n \"\"\"\n Assuming already imported:\n import matplotlib.pyplot as plt\n %matplotlib inline\n\n \"\"\"\n fig = plt.figure()\n fig.suptitle(title, fontsize=14, fontweight='bold')\n ax = fig.add_subplot(111)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.plot(x, y)\n plt.show()\n\n","sub_path":"demo/visualize_loss.py","file_name":"visualize_loss.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"581823416","text":"# import cython\n# import pyximport\n# pyximport.install(build_in_temp=True)\n# import c_utils\nimport networkx\nimport numpy as np\n\nw_home = 1\nw_ex = 2\nw_away = 6\n\ndef calculate_all_paths(bot_position, target_positions, graph, length_only=True):\n if length_only:\n return [networkx.shortest_path_length(graph, bot_position, loc, weight='weight') for loc in target_positions]\n else:\n return [networkx.shortest_path(graph, bot_position, loc, weight='weight') for loc in target_positions]\n\n\ndef closest_position(bot_position, target_positions, graph):\n # iterate through targets and calculate path lenghts\n all_distances = []\n for target in target_positions:\n all_distances.append(networkx.shortest_path_length(graph, bot_position, target, weight=\"weight\"))\n\n # returning the index of the closest target\n return np.argmin(all_distances)\n\n\ndef next_step(bot_position, target_position, graph):\n# import pdb; pdb.set_trace()\n \"\"\"Given a graph representation of the maze, return the next position\n in the (shortest-)path to target_position.\n\n The shortest path is computed on the graph using the a-star algorithm\"\"\"\n try:\n return networkx.shortest_path(graph, bot_position, target_position, weight=\"weight\")[1]\n except KeyError:\n return bot_position\n\n\ndef update_with_enemies(enemy_positions, graph):\n ## utility to get the new position\n def find_new_position(original, move):\n return (original, (original[0] + move[0], original[1] + move[1]))\n ## utility to update the weight based on multiplier\n def update_weight(graph, previous_graph, position, weight_multiplier):\n try:\n if previous_graph.edges[position[0], position[1]]['weight'] > w_ex:\n graph.edges[position[0], position[1]]['weight'] = previous_graph.edges[position[0], position[1]]['weight'] * weight_multiplier\n except KeyError:\n ## the target node does not exist\n pass\n return graph\n # Let's make a copy\n updated_graph = graph.copy()\n # possible moves\n moves = [(1, 0), (-1, 0), (0, 1), (0,-1)]\n for enemy in enemy_positions:\n for m1 in moves:\n # here we look at round +1 moves\n pos1 = find_new_position(enemy, m1)\n for m2 in moves:\n # here we look at round +2 moves\n pos2 = find_new_position(pos1[1], m1)\n for m3 in moves:\n # here we look at round +3 moves\n pos3 = find_new_position(pos2[1], m1)\n ## cascade update to not overwrite previous changes (round+1 has priority over round=2 and so on)\n updated_graph = update_weight(updated_graph, graph, pos3, 1.2)\n updated_graph = update_weight(updated_graph, graph, pos2, 2)\n updated_graph = update_weight(updated_graph, graph, pos1, 4)\n updated_graph = update_weight(updated_graph, graph, enemy, 8)\n return updated_graph\n\n\n# def inspect_updated(enemy_positions, graph):\n# def find_new_position(original, move):\n# return (original[0] + move[0], original[1] + move[1])\n# def update_node(graph, position, weight_multiplier):\n# try:\n# print(position, graph.nodes[position]['weight'])\n# except KeyError:\n# pass\n# return\n# updated_graph = graph.copy()\n# moves = [(1, 0), (-1, 0), (0, 1), (0,-1)]\n# for m1 in moves:\n# pos1 = find_new_position(enemy_positions, m1)\n# update_node(updated_graph, pos1, 10)\n# for m2 in moves:\n# pos2 = find_new_position(pos1, m1)\n# update_node(updated_graph, pos2, 5)\n# for m3 in moves:\n# pos3 = find_new_position(pos2, m1)\n# update_node(updated_graph, pos3, 2)\n# return\n\ndef find_gaps(width, heigth, walls):\n for x in range(width):\n for y in range(heigth):\n if (x, y) not in walls:\n yield (x, y)\n\ndef walls_to_nxgraph(walls, homezone=None):\n \"\"\"Return a networkx Graph object given a pelita maze (walls)\"\"\"\n graph = networkx.Graph()\n width = max([coord[0] for coord in walls]) + 1\n heigth = max([coord[1] for coord in walls]) + 1\n middle_x = int(width / 2)\n middle_y = int(heigth / 2)\n # central_heigth = (y for y in range(2, heigth - 2))\n if (middle_x - 1) == max([coord[0] for coord in homezone]):\n # you are starting on the right\n exclusion = [middle_x-1, middle_x-2]\n else:\n exclusion = [middle_x, middle_x+1]\n # you are starting on the left\n for coords in find_gaps(width, heigth, walls):\n # if coords not in homezone:\n # graph.add_node(coords, weight=100)\n # elif coords[0] in exclusion:\n # graph.add_node(coords, weight=10)\n # else:\n # graph.add_node(coords, weight=1)\n for delta_x, delta_y in ((1,0), (-1,0), (0,1), (0,-1)):\n neighbor = (coords[0] + delta_x, coords[1] + delta_y)\n if coords not in homezone:\n coords_weight = w_away\n elif coords[0] in exclusion:\n coords_weight = w_ex\n else:\n coords_weight = w_home\n\n # we don't need to check for getting neighbors out of the maze\n # because our mazes are all surrounded by walls, i.e. our\n # deltas will not put us out of the maze\n if neighbor not in walls:\n if neighbor not in homezone:\n neighbor_weight = w_away\n elif neighbor[0] in exclusion:\n neighbor_weight = w_ex\n else:\n neighbor_weight = w_home\n weight = (coords_weight + neighbor_weight) / 2\n # this is a genuine neighbor, add an edge in the graph\n graph.add_edge(coords, neighbor, weight=weight)\n return graph\n","sub_path":"Alessio/group0_utils_alessio.py","file_name":"group0_utils_alessio.py","file_ext":"py","file_size_in_byte":5933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"97354142","text":"import sys\nif sys.version_info.major == 2:\n raise('Must be run in Python 3') \n \nimport json\nimport logging, logging.config\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom base import BRefMatch, BRefSeason\n\nLEAGUES_TO_PATH = {\n 'nba': 'http://www.basketball-reference.com/leagues/NBA_{0}_games.html',\n 'acb': 'http://www.basketball-reference.com/euro/spain-liga-acb/{0}-schedule.html',\n 'lnb': 'http://www.basketball-reference.com/euro/france-lnb-pro-a/{0}-schedule.html',\n 'seriea': 'http://www.basketball-reference.com/euro/italy-basket-serie-a/{0}-schedule.html',\n 'greek': 'http://www.basketball-reference.com/euro/greek-basket-league/{0}-schedule.html',\n }\n\nfrom utils import TimeoutException, convert_to_min\n\nwith open('logging.json', 'r') as f:\n logging.config.dictConfig(json.load(f))\nlogger = logging.getLogger('stringer-bell')\n\n\nclass NbaBRefMatch(BRefMatch):\n \n uri_base = 'http://www.basketball-reference.com/boxscores/{0}.html'\n\n def _read_table(self, table, last_col):\n \"\"\"\n reads given table and updates relevant stats in match dict\n \"\"\"\n away, home = table[0], table[1]\n for team, table in zip(['away', 'home'], [away, home]):\n metrics = table.find('thead').find_all('tr')[1].find_all('th')[1:]\n metrics = [metric.text for metric in metrics]\n\n # generate team totals stats\n team_totals = table.find('tfoot').find_all('td')[1:]\n if not last_col:\n team_totals.pop(-1)\n m = metrics[:-1]\n else:\n m = metrics\n t = [float(total.text) for total in team_totals]\n self.match_[team]['totals'].update(dict(zip(m, t)))\n\n # generate players stats\n rows = table.find('tbody').find_all('tr')\n rows.pop(5)\n for player in rows:\n player_stats = [inf.text for inf in player.find_all('td')]\n player_name = player_stats.pop(0)\n\n for metric, stat in zip(metrics, player_stats):\n if stat == '':\n stat = None\n if metric == 'MP':\n if stat is None or stat == 'Did Not Play' or stat == 'Player Suspended':\n stat = 0.0\n else:\n stat = convert_to_min(stat)\n stat = float(stat) if stat else None\n self.match_[team]['players'][player_name][metric] = stat\n\n def _gen_scoring(self):\n \"\"\"\n generate and add scoring information to match dict\n \"\"\"\n def gen_scoring(table):\n rows = table.find_all('tr')\n quarters = [row.text for row in rows[1].find_all('th')[1:]]\n away, home = rows[2:4]\n scores = {}\n for team, scoring in zip(['away', 'home'], [away, home]):\n scoring = [score.text for score in scoring.find_all('td')[1:]]\n quarters_score = dict(zip(quarters, scoring))\n scores[team] = quarters_score\n return scores\n\n scoring_table = self.soup_.find('table', {'class': 'nav_table stats_table'})\n quarters_score = gen_scoring(scoring_table)\n for team, scores in quarters_score.items():\n self.match_[team]['scores'] = scores\n\n def _gen_extra_info(self):\n \"\"\"\n generate and add attendance, duration and officials info to match dict \n \"\"\"\n rows = self.soup_.find('table', {'class': 'margin_top small_text'}).find_all('tr')\n for row in rows:\n data = [r.text for r in row.find_all('td')]\n var = data[0]\n val = data[1]\n if var == 'Inactive:':\n continue\n elif var == 'Officials:':\n self.match_['officials'] = val.split(', ')\n elif var == 'Attendance:':\n self.match_['attendance'] = int(val.replace(',', ''))\n elif var == 'Time of Game:':\n hours, minutes = val.split(':')\n self.match_['duration'] = int(hours) * 60 + int(minutes)\n\n\nclass NbaBRefSeason(BRefSeason):\n\n def _crawl_match(self, code, match_type):\n match = NbaBRefMatch(self.country, self.league, self.season, code, match_type)\n if not match.is_crawled():\n for j in range(5):\n try:\n match.crawl()\n logger.info('Crawled - {0}'.format(code))\n break\n except TimeoutException:\n logger.info(\"Timeout. Couldn't crawl match {0}. Retrying {1}/5\".format(code, j+1))\n continue\n except:\n logger.exception(\"Couldn't crawl match{0}\".format(code))\n break\n\n def _gen_matches_codes(self):\n \"\"\"\n generates b-reference codes for given league, season and date to crawl\n \"\"\"\n self.reg_s_codes_, self.post_s_codes_ = [], []\n url = LEAGUES_TO_PATH['nba'].format(self.season.split('-')[1])\n rv = requests.get(url)\n soup = BeautifulSoup(rv.text, \"lxml\")\n seasons = soup.find_all('table', {'class': 'sortable stats_table'})\n if len(seasons) == 2:\n reg_season, post_season = seasons \n else: \n reg_season, post_season = seasons[0], None\n for codes, table in zip([self.reg_s_codes_, self.post_s_codes_],\n [reg_season, post_season]):\n if table:\n rows = table.tbody.find_all('tr')\n for row in rows:\n match = row.find('a', href=True, text='Box Score')\n if match:\n match_code = match['href'].split('/')[2].split('.')[0]\n codes.append(match_code)\n\n\n","sub_path":"code/basketball-reference-master/nba.py","file_name":"nba.py","file_ext":"py","file_size_in_byte":5971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"97618620","text":"import tornado.ioloop\nimport tornado.web\nimport tornado.websocket\nimport webbrowser\n\nfrom tornado.options import define, options, parse_command_line\n\nfrom pysoundcard import Stream, continue_flag\nimport numpy as np\n\n\nsample_rate = 44100\nblock_length = 2048\ntau = 0.125\n\ndefine(\"port\", default=8888, help='run on the given port', type=int)\n\n\nclass SlmWebSocket(tornado.websocket.WebSocketHandler):\n def open(self):\n print(\"WebSocket opened\")\n self.set_exponential_smoothing_tau(tau=tau)\n self.init_stream(sample_rate=sample_rate,\n block_length=block_length)\n self.start_stream()\n\n def on_message(self, message):\n\n if '#start#' in message:\n self.start_stream()\n\n if '#stop#' in message:\n self.stop_stream()\n\n if '#set_tau#' in message:\n msg = message.split()\n print(msg)\n if len(msg)>1:\n tau = float(msg[1])\n self.set_exponential_smoothing_tau(tau)\n\n def on_close(self):\n print(\"Websocket is closed\")\n\n def set_exponential_smoothing_tau(self, tau):\n if tau > 0:\n alpha = np.exp(-1.0/(tau*(sample_rate/block_length)))\n self._b0 = 1 - alpha\n self._a1 = -alpha\n print(\"Tau accepted.\")\n else:\n print(\"Tau '{}' not accepted.\".format(tau))\n\n\n def init_stream(self, sample_rate, block_length):\n self._psc_sample_rate = sample_rate\n self._psc_block_length = block_length\n self._level = -120\n\n def callback(in_data, frame_count, time_info, status):\n new_value = self._b0*np.mean(in_data**2) - self._a1*10**(self._level*0.1)\n self._level = 10*np.log10(new_value)\n self.write_message('{:.1f}'.format(self._level))\n return in_data, continue_flag\n\n self._psc_stream = Stream(\n callback=callback,\n sample_rate=self._psc_sample_rate,\n block_length=self._psc_block_length,\n output_device=False\n )\n\n def start_stream(self):\n if not self._psc_stream.is_active():\n print('Start stream')\n self._psc_stream.start()\n\n def stop_stream(self):\n if self._psc_stream.is_active():\n print('Stop stream')\n self._psc_stream.stop()\n\napp = tornado.web.Application([(r'/', SlmWebSocket),])\n\nif __name__=='__main__':\n parse_command_line()\n app.listen(options.port)\n webbrowser.open_new_tab('slm.html')\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"slm.py","file_name":"slm.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"20241790","text":"import numpy as np\n\ndef nussinov_folding(v):\n n = len(v)\n s = np.zeros((n, n), dtype=np.int)\n\n def score(a, b):\n if a == 'A':\n return b == 'U'\n elif a == 'U':\n return b == 'A'\n elif a == 'C':\n return b == 'G'\n else: # a == 'G'\n return b == 'C'\n \n for i in range(n-1):\n s[i,i] = 0\n s[i+1,i] = 0\n\n s[n-1,n-1] = 0\n for k in range(1,n):\n for i in range(n-k):\n j = i + k\n\n if range(i+1, j-1):\n s_bi = [s[i,l] + s[l+1,j] for l in range(i+1, j-1)]\n else:\n s_bi = [0]\n\n s[i,j] = np.amax([s[i+1,j],\\\n s[i,j-1],\\\n s[i+1,j-1] + score(v[i],v[j]), \\\n np.amax(s_bi)])\n \n print(v)\n print(s)\n\n\n# Parse the input\nf = open('in.txt', 'r')\nv = f.readline().strip()\nf.close()\n\nnussinov_folding(v)","sub_path":"problems/nussinov/nussinov.py","file_name":"nussinov.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"220099265","text":"import cv2\nimport os\nimport numpy\nimport json \nimport time\nimport argparse\n\n\n\nparser = argparse.ArgumentParser(description='keyframe extraction')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('outdir', metavar='DIR',\n help='path to output dir')\nparser.add_argument('--mode', type=str, default='train',choices=['train', 'val', 'test'], help='type of dataset')\n\nargs = parser.parse_args()\n\nlabel_path = os.path.join(args.data, args.mode, 'label')\n\nout_root = os.path.join(args.outdir, args.mode)\n\nvideolist = open(os.path.join(args.data, args.mode, 'videolist.txt'),'r')\nfaillist = open('keyframe_faillist.txt','w')\n\nstart = time.time()\ncount = 0\n\nlabel = []\nfor line in videolist:\n label.append('sample_' + line.rstrip().split('.')[0] + '.json')\ntotal = len(label)\n\nfor i in range(0, total):\n annotation = json.load(open(os.path.join(label_path, label[i])))\n video_id = annotation['videoid']\n shots = annotation['shots']\n for shot in shots:\n keyframe = shot['keyframe']\n try:\n output_dir = os.path.join(out_root, video_id)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n output_file_name = os.path.join(output_dir, '%05d.jpg' % (keyframe))\n if not os.path.isfile(output_file_name):\n video_path = os.path.join(args.data, args.mode, 'video',video_id)\n video = cv2.VideoCapture(video_path)\n video.set(cv2.CAP_PROP_POS_FRAMES,keyframe) #设置要获取的帧号\n ret, frame = video.read()\n cv2.imwrite(output_file_name,frame)\n \n except:\n print('%d %s process error' % (i, video_id))\n faillist.write(video_id+'\\n')\n faillist.flush()\n\n if i % 100 == 0:\n print('%d / %d finished' % (i, total))\n end = time.time()\n print(end - start)\n start = end\n\nend = time.time()\nprint(end - start)","sub_path":"extract_keyframes.py","file_name":"extract_keyframes.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"503728594","text":"\nlist1 = [] # list 선언(안해도 상관없음)\n\nlist1 = [ i for i in range(1,11) ] # for문을 이용, 1~10 까지 list1에 삽입\nprint(list1)\n\nlist2 = [i for i in range(0,11) if i%2 == 0 ] # for, if 이용 0 ~ 10 중 짝수만 삽입\nprint(list2)\n\nlist3 = [(x,y) for x in range(1,10) for y in range(1,10)] # 이중for문 + 튜플을 이용한 삽입\nprint(list3)\n\n# comprehension 을 이용한 list -> dict 변환 (for 문 사용)\n\nstudents = ['one','two','three']\n\ndict1 = {'{}'.format(num+1): name for num, name in enumerate(students) }\nprint(dict1)\n\n# zip(list1, list2) - list1, list2의 값을 key값에 맞춰 순차적으로 반환\n\nproduct = ['풀','가위','크레파스']\nprice = [800,2500,5000]\ndict2 = { product : price for product, price in zip(product, price)}\n\nprint(dict2)","sub_path":"listComprehension.py","file_name":"listComprehension.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"620744526","text":"import numpy\nfrom math import sqrt\n\nclass HoughCircle(object):\n BLOB_RADIUS = 0\n\n def __init__(self, canny_img_in, canny_image_in, radius_bounds_in):\n self.radius_bounds = radius_bounds_in\n self.canny_img = canny_img_in\n self.canny_image = canny_image_in\n self.init_accumulator_matrix()\n self.fill_accumulator_matrix()\n\n def init_accumulator_matrix(self):\n self.accumulator_matrix = numpy.zeros((self.radius_bounds[1] - self.radius_bounds[0], self.canny_img.size[0], self.canny_img.size[1]))\n\n def fill_accumulator_matrix(self):\n for radius_add in range(0, self.radius_bounds[1] - self.radius_bounds[0]):\n for x in range(0, self.canny_img.size[0]):\n for y in range(0, self.canny_img.size[1]):\n if self.canny_image[x,y] != 0:\n self.vote_pixel_with_radius((x,y), self.radius_bounds[0]+radius_add)\n if HoughCircle.BLOB_RADIUS != 0:\n self.blob_accumulator_matrix()\n\n def blob_accumulator_matrix(self):\n copy_matrix = numpy.zeros(self.accumulator_matrix.shape)\n for radius_index in range(0, self.accumulator_matrix.shape[0]):\n for x in range(HoughCircle.BLOB_RADIUS, self.accumulator_matrix.shape[1] - HoughCircle.BLOB_RADIUS):\n for y in range(HoughCircle.BLOB_RADIUS, self.accumulator_matrix.shape[2] - HoughCircle.BLOB_RADIUS):\n copy_matrix[radius_index][x][y] = self.get_blob_sum(radius_index, (x,y))\n self.accumulator_matrix = copy_matrix\n\n def get_blob_sum(self, radius_index, pixel):\n sum = 0\n for x in range(pixel[0]-HoughCircle.BLOB_RADIUS, pixel[0]+HoughCircle.BLOB_RADIUS+1):\n for y in range(pixel[1]-HoughCircle.BLOB_RADIUS, pixel[1]+HoughCircle.BLOB_RADIUS+1):\n if self.pixel_in_bounds((x,y)):\n sum += self.accumulator_matrix[radius_index][x][y]\n return sum\n\n def vote_pixel_with_radius(self, pixel, radius):\n radius_index = radius - self.radius_bounds[0]\n for dx in range(-radius, radius):\n dy = int(round( sqrt(radius**2 - dx**2) ))\n point1 = (pixel[0]+dx, pixel[1]+dy)\n point2 = (pixel[0]+dx, pixel[1]-dy)\n if self.pixel_in_bounds(point1):\n self.accumulator_matrix[radius_index][point1[0]][point1[1]] += 1\n if self.pixel_in_bounds(point2):\n self.accumulator_matrix[radius_index][point2[0]][point2[1]] += 1\n\n def pixel_in_bounds(self, pixel):\n return pixel[0] > 0 and pixel[0] < self.canny_img.size[0] and pixel[1] > 0 and pixel[1] < self.canny_img.size[1]\n\n def get_highest_vote_num_of_layer(self, radius_index):\n highest_num = 0\n for x in range(0, self.accumulator_matrix.shape[1]):\n for y in range(0, self.accumulator_matrix.shape[2]):\n if self.accumulator_matrix[radius_index][x][y] > highest_num:\n highest_num = self.accumulator_matrix[radius_index][x][y]\n return highest_num\n\n def get_highest_vote_and_radius(self):\n highest_vote = 0\n highest_vote_radius_index = 0\n for radius_index in range(0, self.accumulator_matrix.shape[0]):\n highest_vote_in_layer = self.get_highest_vote_num_of_layer(radius_index)\n if highest_vote_in_layer > highest_vote:\n highest_vote = highest_vote_in_layer\n highest_vote_radius_index = radius_index\n return (highest_vote, highest_vote_radius_index + self.radius_bounds[0])\n","sub_path":"ImgProcessingCLI/ImgProcessingCLI/Geometry/HoughCircle.py","file_name":"HoughCircle.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"407295478","text":"import asyncio\nfrom typing import Dict\nfrom typing import Type\n\nOPTIONAL = object()\n\n\nclass Field:\n def __init__(\n self,\n default=OPTIONAL,\n dump_to=None,\n load_from=None,\n outer_name=None,\n ):\n if outer_name is not None:\n assert dump_to is None and load_from is None\n self.default = default\n self.dump_to = dump_to or outer_name\n self.load_from = load_from or outer_name\n\n async def dump(self, data, attr, context):\n value = data.get(attr, self.default)\n if value == OPTIONAL:\n return value\n return await self.post_dump(value, context)\n\n async def post_dump(self, value, context):\n return value\n\n async def load(self, data, attr, context):\n value = data.get(attr, self.default)\n if value == OPTIONAL:\n return value\n return await self.post_load(value, context)\n\n async def post_load(self, value, context):\n return value\n\n\nclass Nested(Field):\n def __init__(\n self,\n schema: Type['Schema'],\n default=OPTIONAL,\n dump_to=None,\n load_from=None,\n outer_name=None,\n ):\n self.schema = schema\n super().__init__(default, dump_to, load_from, outer_name)\n\n async def post_dump(self, value, context):\n return await self.schema.dump(value, context=context)\n\n async def post_load(self, value, context):\n return await self.schema.load(value, context=context)\n\n\nclass List(Field):\n def __init__(\n self,\n schema: Type['Schema'] = None,\n default=OPTIONAL,\n dump_to=None,\n load_from=None,\n outer_name=None,\n ):\n self.schema = schema\n super().__init__(default, dump_to, load_from, outer_name)\n\n async def post_dump(self, value, context):\n return [\n (\n await self.schema.dump(item, context=context)\n if self.schema\n else item\n )\n for item in value\n ]\n\n async def post_load(self, value, context):\n return [\n (\n await self.schema.load(item, context=context)\n if self.schema\n else item\n )\n for item in value\n ]\n\n\nclass SchemaMeta(type):\n def __new__(mcs, name, bases, attrs):\n fields = {}\n for base in bases:\n if issubclass(base, Schema):\n fields.update(base.__fields__)\n fields.update(\n {\n name: field\n for name, field in attrs.items()\n if isinstance(field, Field)\n },\n )\n attrs['__fields__'] = fields\n return super().__new__(mcs, name, bases, attrs)\n\n\nclass Schema(metaclass=SchemaMeta):\n __fields__: Dict[str, Field]\n\n @classmethod\n async def dump(cls, data, *, context=None):\n if context is None:\n context = {}\n values = await asyncio.gather(\n *[\n field.dump(data, attr, context)\n for attr, field in cls.__fields__.items()\n ],\n )\n attrs = [\n field.dump_to or attr for attr, field in cls.__fields__.items()\n ]\n return {\n attr: value\n for attr, value in zip(attrs, values)\n if value != OPTIONAL\n }\n\n @classmethod\n def dump_many(cls, items, *, context=None):\n if context is None:\n context = {}\n return asyncio.gather(\n *[cls.dump(item, context=context) for item in items],\n )\n\n @classmethod\n async def load(cls, data, *, context=None):\n if context is None:\n context = {}\n values = await asyncio.gather(\n *[\n field.load(\n data,\n field.load_from if field.load_from else attr,\n context,\n )\n for attr, field in cls.__fields__.items()\n ],\n )\n return {\n attr: value\n for attr, value in zip(cls.__fields__, values)\n if value != OPTIONAL\n }\n\n @classmethod\n def load_many(cls, items, *, context=None):\n if context is None:\n context = {}\n return asyncio.gather(\n *[cls.load(item, context=context) for item in items],\n )\n","sub_path":"iko.py","file_name":"iko.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"448709207","text":"\nimport json\nimport boto3\ndynamodb = boto3.resource('dynamodb')\ntable = dynamodb.Table('StudentInfomationDB')\ndef lambda_handler(event, context):\n \n errors=[]\n result=[]\n resp={\n \"success\":False ,\n \"StatusCode\":400 ,\n \"message\":\"\"\n }\n \n response = table.scan()\n items = response['Items']\n result.append(items)\n if result[0]:\n resp[\"success\"]=True\n resp[\"StatusCode\"]=200\n resp[\"message\"]=' Successfully get complete data'\n resp.setdefault(\"data\",[]).append(result)\n else:\n resp[\"success\"]=False\n resp[\"StatusCode\"]=404\n resp[\"message\"]=' Data not Found'\n errors.append( 'errormessage : Database is empty ') \n \n resp.setdefault(\"errors\", []).append(errors) \n return resp \n \n\n \n ","sub_path":"ListFunctions.py","file_name":"ListFunctions.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"316460122","text":"def pandigitalproducts():\n answer = set()\n for a in range(1, 2000):\n a_str = str(a)\n if len(a_str) != len(set(a_str)) or '0' in a_str:\n continue\n for b in range(1, 2000):\n b_str = str(b)\n ab_str = str(a*b)\n if len(b_str) != len(set(b_str)) or '0' in b_str or '0' in ab_str:\n continue\n var = a_str + b_str + str(a * b)\n if len(set(var)) == len(var) == 9:\n answer.add(a*b)\n return sum(answer)\nprint(pandigitalproducts())","sub_path":"SeeTheSaenz/standardlibrary/euler31-40/euler32.py","file_name":"euler32.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"397839146","text":"import re\nimport tkinter\nimport tkinter.messagebox\n\nroot = tkinter.Tk()\n# 设置窗口的大小和位置\nroot.geometry(\"300x270+0+0\") # 300x270代表了初始化时主窗口的大小,0,0代表了初始化时窗口所在的位置\n# 不允许改变窗口大小\nroot.resizable(False, False)\n# 设置窗口标题\nroot.title(\"python简易计算器\")\n\n# 放置用来显示信息的文本框,并设置为只读\ncontentVar = tkinter.StringVar(root, \"\")\ncontentEntry = tkinter.Entry(root, textvariable=contentVar, state=\"readonly\")\n# contentEntry['state']='readonly'\ncontentEntry.place(x=10, y=10, width=280, height=20)\n\n\n# 按钮通用代码\ndef buttonClick(btn):\n content = contentVar.get()\n # 如果已有内容是小数点开头,前面加0\n if content.startswith(\".\"):\n content = \"0\" + content\n\n # 根据不同按钮做出相应的处理\n if btn in \"0123456789\":\n content += btn\n elif btn == \".\":\n # 使用运算符风格表达式\n # 最后一个运算数中最多只能有一个小数点\n lastPart = re.split(r\"\\+|-|\\*|/]\", content)[-1]\n if \".\" in lastPart:\n tkinter.messagebox.showerror(\"错误\", \"小数点太多了\")\n return\n else:\n content += btn\n elif btn == \"C\":\n # 清除整个表达式\n content = \"\"\n elif btn == \"=\":\n try:\n # 对输入的表达式求值\n content = str(eval(content))\n except:\n tkinter.messagebox.showerror(\"错误\", \"表达式错误\")\n return\n elif btn in operators:\n if content.endswith(operators):\n tkinter.messagebox.showerror(\"错误\", \"不允许存在连续运算符\")\n return\n content += btn\n elif btn == \"Sqrt\":\n n = content.split(\".\")\n if all(map(lambda x: x.isdigit(), n)):\n content = eval(content) ** 0.5\n else:\n tkinter.messagebox.showerror(\"错误\", \"表达式错误\")\n return\n\n contentVar.set(content)\n\n\n# 放置清除按钮和等号按钮\nbtnClear = tkinter.Button(root, text=\"Clear\", command=lambda: buttonClick(\"C\"))\nbtnClear.place(x=40, y=40, width=80, height=20)\nbtnCompute = tkinter.Button(root, text=\"=\", command=lambda: buttonClick(\"=\"))\nbtnCompute.place(x=170, y=40, width=80, height=20)\n# 放置10个数字、小数点和计算平方根的按钮\ndigits = list(\"0123456789.\") + [\"Sqrt\"]\nindex = 0\nfor row in range(4):\n for col in range(3):\n d = digits[index]\n index += 1\n btnDigit = tkinter.Button(root, text=d, command=lambda x=d: buttonClick(x))\n btnDigit.place(x=20 + col * 70, y=80 + row * 50, width=50, height=20)\n# 放置运算符按钮\noperators = (\"+\", \"-\", \"*\", \"/\", \"**\", \"//\")\nfor index, operator in enumerate(operators):\n btnOperator = tkinter.Button(\n root, text=operator, command=lambda x=operator: buttonClick(x)\n )\n btnOperator.place(x=230, y=80 + index * 30, width=50, height=20)\nroot.mainloop()\n","sub_path":"tkinter简易计算器/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"203984068","text":"import sqlite3\nfrom datetime import datetime, timedelta\nimport os\nimport random\nfrom sqlite3 import Error\n\nDATABASE_PATH = '/home/nngfantasy/mysite/fantasy.db'\n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\ndef create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n return conn\n\ndef check_login(data):\n conn = create_connection(DATABASE_PATH)\n conn.row_factory = dict_factory\n c = conn.cursor()\n query = \"\"\"\n SELECT *FROM user WHERE username = ? AND password = ?\n \"\"\"\n res = c.execute(query, (data['username'], data['password'])).fetchone()\n conn.close()\n if res is not None:\n if res['activated'] == 0:\n return {\"status_code\": 404, \"status_message\": \"Unsuccessful\", \"error\": \"Username not activated\"}\n return {\"status_code\": 200, \"status_message\": \"Successful\", \"user_id\": res['user_ID'], \"username\": res['username']}\n return {\"status_code\": 404, \"status_message\": \"Unsuccessful\", \"error\": \"Username or Password Incorrect\"}\n\ndef signup_user(data):\n random_icon_ID = random.randint(1, 62)\n conn = create_connection(DATABASE_PATH)\n c = conn.cursor()\n query = \"\"\"\n INSERT INTO user(username, password, icon_ID) VALUES(?, ?, ?)\n \"\"\"\n query2 = \"\"\"\n SELECT *FROM user WHERE username = ?\n \"\"\"\n res = c.execute(query2, (data['username'], )).fetchall()\n if len(res) > 0:\n return {\"status_code\": 404, \"status_message\": \"unsuccessful\", \"error\": \"Username already exists\"}\n try:\n c.execute(query, (data['username'], data['password'], random_icon_ID))\n conn.commit()\n return {\"status_code\": 200, \"status_message\": \"Successful\"}\n except Exception as e:\n return {\"status_code\": 404, \"status_message\": \"unsuccessful\", \"error\": str(e)}\n\ndef get_home_data(user_id):\n conn = create_connection(DATABASE_PATH)\n conn.row_factory = dict_factory\n c = conn.cursor()\n time_now = datetime.now() + timedelta(hours=5, minutes =30)\n string_now = (time_now - timedelta(hours=4)).strftime(\"%B %d, %Y\") + \"%\"\n string_now_with_time = (time_now - timedelta(hours=4)).strftime(\"%B %d, %Y %I:%M%p\")\n query = \"\"\"\n SELECT i.schedule_ID AS schedule_ID, t1.team_name AS team1_name, t1.team_code AS team1_code, t2.team_name AS team2_name, t2.team_code AS team2_code, i.deadline FROM ipl_schedule i, team t1, team t2 WHERE scheduled_date LIKE ? AND t1.team_ID = i.team1_ID AND t2.team_ID = i.team2_ID\n \"\"\"\n query2 = \"\"\"\n SELECT t.team_code, m.member_name FROM bet b, team t, team_member m, bet_on_person bp WHERE b.schedule_ID = ? AND b.user_ID = ? AND t.team_ID = b.for_team_ID AND b.bet_ID = bp.bet_ID AND bp.member_ID = m.member_ID\n \"\"\"\n res = c.execute(query, (string_now,)).fetchall()\n for i in range(len(res)):\n deadline_time = datetime.strptime(res[i]['deadline'], \"%B %d, %Y %I:%M%p\")\n if deadline_time < time_now:\n res[i]['finished'] = True\n else:\n res[i]['finished'] = False\n res2 = c.execute(query2, (res[i]['schedule_ID'], user_id)).fetchall()\n if len(res2)>0:\n res[i] = {\"data\": res2}\n res[i]['placed'] = True\n else:\n res[i]['placed'] = False\n print(res)\n conn.close()\n return {\"status_code\": 200, \"status_message\": \"Successful\", \"data\": res}\n\ndef verify_bet_ajax(user_id, schedule_id):\n conn = create_connection(DATABASE_PATH)\n conn.row_factory = dict_factory\n c = conn.cursor()\n time_now = datetime.now() + timedelta(hours=5, minutes =30)\n string_now_with_time = time_now.strftime(\"%B %d, %Y %I:%M%p\")\n query = \"\"\"SELECT i.schedule_ID AS schedule_id, t1.team_name AS team1_name, t1.team_code AS team1_code, t2.team_name AS team2_name, t2.team_code AS team2_code, i.deadline FROM ipl_schedule i, team t1, team t2 WHERE t1.team_ID = i.team1_ID AND t2.team_ID = i.team2_ID AND i.schedule_ID = ?\n \"\"\"\n res = c.execute(query, (schedule_id, )).fetchone()\n if res is None or datetime.strptime(res['deadline'], \"%B %d, %Y %I:%M%p\") < time_now:\n return {\"status_code\": 500, \"status_message\": \"unsuccessful\", \"error\": \"deadline passed or invalid schedule ID\"}\n return {\"status_code\": 200, \"status_message\": \"successful\", \"data\": res}\n\ndef get_players(user_id, team_code):\n return_data = []\n team_code = team_code.upper()\n for each_name in os.listdir(f\"static/images/teams/{team_code}\"):\n return_data.append([f\"/static/images/teams/{team_code}/{each_name}\", each_name.split(\".\")[0]])\n return {\"status_code\": 200, \"status_message\": \"successful\", \"data\": return_data}\n\ndef place_bet(user_id, data):\n print(user_id)\n conn = create_connection(DATABASE_PATH)\n conn.row_factory = dict_factory\n c = conn.cursor()\n query = \"\"\"\n SELECT *FROM bet WHERE user_ID = ? AND schedule_ID = ?\n \"\"\"\n res = c.execute(query, (user_id, data['schedule_id'])).fetchone()\n if res:\n return {\"status_code\": 500, \"status_message\": \"unsuccessful\", \"error\": \"Already placed\"}\n time_now = datetime.now() + timedelta(hours=5, minutes =30)\n string_now_with_time = time_now.strftime(\"%B %d, %Y %I:%M%p\")\n query = \"\"\"\n SELECT *FROM ipl_schedule WHERE schedule_ID = ? AND deadline > ?\n \"\"\"\n res = c.execute(query, (data['schedule_id'], string_now_with_time)).fetchone()\n if res is None:\n return {\"status_code\": 500, \"status_message\": \"unsuccessful\", \"error\": \"Deadline is crossed\"}\n query = \"\"\"\n SELECT team_ID FROM team WHERE team_code = ?\n \"\"\"\n team_id = c.execute(query, (data['team'], )).fetchone()['team_ID']\n query = \"\"\"\n INSERT INTO bet(user_ID, schedule_ID, for_team_ID, placed_at) VALUES(?, ?, ?, ?)\n \"\"\"\n c.execute(query, (user_id, data['schedule_id'], team_id, string_now_with_time))\n conn.commit()\n bet_id = c.lastrowid\n query = \"\"\"\n SELECT member_ID from team_member WHERE member_name = ?\n \"\"\"\n query2 = \"\"\"\n INSERT INTO bet_on_person(bet_ID, member_ID) VALUES(?, ?)\n \"\"\"\n for names in data['players']:\n member_id = c.execute(query, (names, )).fetchone()['member_ID']\n c.execute(query2, (bet_id, member_id))\n conn.commit()\n return {\"status_code\": 200, \"status_message\": \"successful\"}\n\ndef leaderboard(data):\n conn = create_connection(DATABASE_PATH)\n conn.row_factory = dict_factory\n c = conn.cursor()\n query = \"\"\"\n SELECT *FROM user ORDER BY points DESC\n \"\"\"\n res = c.execute(query).fetchall()\n conn.close()\n return {\"status_code\": 200, \"status_message\": \"successful\", \"data\": res}\n\ndef verify_can_bet(user_id, data):\n conn = create_connection(DATABASE_PATH)\n conn.row_factory = dict_factory\n c = conn.cursor()\n query = \"\"\"\n SELECT *FROM bet WHERE user_ID = ? AND schedule_ID = ?\n \"\"\"\n res = c.execute(query, (user_id, data['schedule_id'])).fetchone()\n if res:\n return {\"status_code\": 400, \"status_message\": \"unsuccessful\"}\n conn.close()\n return {\"status_code\": 200, \"status_message\": \"successful\"}","sub_path":"webapp/database_operations.py","file_name":"database_operations.py","file_ext":"py","file_size_in_byte":7191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"9414488","text":"__author__ = 'Phantooom'\r\n'''zouruixp@sina.com'''\r\n'''http://ihacker.cc'''\r\nfrom app import db\r\nfrom app.models import Job, Company, Location, Product, CompanyPic, Tag, History, Manger\r\nfrom manage import app\r\nclass DbUtils(object):\r\n def __init__(self, JobDict, CompanyDict):\r\n self.JobDict = JobDict\r\n self.CompanyDict = CompanyDict\r\n\r\n def Job2Db(self):\r\n\r\n job = Job(company=self.company, location=self.location, CompanyPage=self.JobDict[\"CompanyPage\"],\r\n Salary=self.JobDict[\"Salary\"], City=self.JobDict[\"City\"], Experience=self.JobDict[\"Experience\"],\r\n Education=self.JobDict[\"Education\"], Type=self.JobDict[\"Type\"], TimeSatmp=self.JobDict[\"TimeSatmp\"],\r\n Welfare=self.JobDict[\"Welfare\"], JobDescription=self.JobDict[\"JobDescription\"],\r\n CompanyLogo=self.JobDict[\"CompanyLogo\"])\r\n with app.app_context():\r\n db.session.add(job)\r\n\r\n def Company2Db(self):\r\n self.company = Company(CompanyFullName=self.CompanyDict['CompanyFullName'],\r\n CompanyName=self.CompanyDict['CompanyName'], CompanyPage=self.CompanyDict['CompanyPage'],\r\n Identification=self.CompanyDict['Identification'],\r\n Population=self.CompanyDict['Population'], Field=self.CompanyDict['Field'],\r\n WebSite=self.CompanyDict['WebSite'], Stage=self.CompanyDict['Stage'],\r\n Investment=self.CompanyDict['Investment'], Location=self.CompanyDict['Location'],\r\n City=self.CompanyDict['City'], CompanyWord=self.CompanyDict['CompanyWord'],\r\n CompanyIntroduce=self.CompanyDict['CompanyIntroduce'])\r\n with app.app_context():\r\n db.session.add(self.company)\r\n\r\n def Location2Db(self):\r\n self.location=None\r\n if self.JobDict['positionLat']:\r\n self.location = Location(PositionLat=float(self.JobDict['positionLat']), PositionLng=float(self.JobDict['positionLng']))\r\n with app.app_context():\r\n db.session.add(self.location)\r\n\r\n def Product2Db(self):\r\n for product in self.CompanyDict[\"ProductList\"]:\r\n with app.app_context():\r\n db.session.add(\r\n Product(company=self.company, ProductPic=product[\"ProductPic\"], ProductWeb=product[\"ProductWeb\"],\r\n ProductDescription=product[\"ProductDescription\"]))\r\n\r\n def Manger2Db(self):\r\n if \"MangerList\" in self.CompanyDict:\r\n for manger in self.CompanyDict[\"MangerList\"]:\r\n with app.app_context():\r\n db.session.add(Manger(company=self.company, Name=manger[\"name\"], Title=manger[\"title\"],\r\n Description=manger[\"description\"]))\r\n\r\n def Tag2Db(self):\r\n if self.CompanyDict[\"Tag\"]:\r\n for tag in self.CompanyDict[\"Tag\"]:\r\n with app.app_context():\r\n db.session.add(Tag(company=self.company, Name=tag))\r\n\r\n def History2Db(self):\r\n for history in self.CompanyDict[\"HistoryList\"]:\r\n with app.app_context():\r\n db.session.add(History(company=self.company, Title=history[\"title\"], Link=history[\"link\"]))\r\n\r\n def DbCommit(self):\r\n with app.app_context():\r\n db.session.commit()\r\n","sub_path":"spider/dbUtils.py","file_name":"dbUtils.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"380035795","text":"# 가장 긴 증가하는 부분 수열\n\n# 문제\n# 수열 A가 주어졌을 때, 가장 긴 증가하는 부분 수열을 구하는 프로그램을 작성하시오.\n\n# 예를 들어, 수열 A = {10, 20, 10, 30, 20, 50} 인 경우에 가장 긴 증가하는 부분 수열은 A = {10, 20, 10, 30, 20, 50} 이고, 길이는 4이다.\n\n# 입력\n# 첫째 줄에 수열 A의 크기 N (1 ≤ N ≤ 1,000)이 주어진다.\n\n# 둘째 줄에는 수열 A를 이루고 있는 Ai가 주어진다. (1 ≤ Ai ≤ 1,000)\n\n# 출력\n# 첫째 줄에 수열 A의 가장 긴 증가하는 부분 수열의 길이를 출력한다.\n\n# 예제 입력 1 \n# 6\n# 10 20 10 30 20 50\n# 예제 출력 1 \n# 4\n\nN = int(input()) #수열의 크기\nseq = list(map(int, input().split())) \n\ndp = [0] * N # 초기값 0인 리스트 생성 (수열크기에 맞춰)\n\nfor i in range(N):\n tmp = 0 # 길이 초기값\n for j in range(i): \n if seq[i] > seq[j]: #수열 뒤에자리랑 앞에자리랑 비교해서 뒤에자리가 더 크면 \n tmp = max(tmp, dp[j]) # 해당 dp[j]에 담겨있는 길이값과 지금 tmp값중 젤 높은값으로 tmp 갱신\n dp[i] = tmp + 1 \n \nprint(max(dp))","sub_path":"백준/11053.py","file_name":"11053.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"384148001","text":"from typing import Set, List, Dict\nimport re\nimport csv\nimport time\n\ndef get_csv(path: str) -> List[Dict[str, str]]:\n with open(path) as csvfile:\n reader = csv.DictReader(csvfile)\n queries = [query for query in reader]\n return queries\n\ndouble_quote_string_literals = re.compile(r\"\\\".+?\\\"\", flags=re.MULTILINE)\nsingle_quote_string_literals = re.compile(r\"\\'.+?\\'\", flags=re.MULTILINE)\ndigits = re.compile(r'\\d+', flags=re.MULTILINE) \n\ndef strip_query(q: str) -> str:\n s = q\n s = double_quote_string_literals.sub(r'?', s)\n s = single_quote_string_literals.sub(r\"?\", s)\n s = digits.sub(r\"0\", s)\n return s\n \ndef main() -> None:\n errors = []\n\n queries = get_csv('brooke/distinctuserqueries.csv')\n\n unique_queries = set()\n\n tally = 0\n\n for row in queries:\n tally += 1\n\n # if tally == 5:\n # break\n\n # \"host\",\"user\",\"db\",\"query\",\"date\",\"time\"\n row[\"query_stripped\"] = strip_query(row[\"query\"])\n unique_queries.add(row[\"query_stripped\"])\n\n\n with open(\"joaquin/distinctuserqueriesstripped.csv\", \"w\") as multi_file:\n fieldnames = [\"host\",\"user\",\"db\",\"query\",\"query_stripped\",\"date\",\"time\"]\n writer = csv.DictWriter(multi_file, fieldnames=fieldnames, quoting=csv.QUOTE_ALL)\n writer.writeheader()\n\n for row in queries:\n try:\n writer.writerow(row)\n except Exception as e:\n print(e)\n print(\"oops\")\n print(row)\n errors.append({ \"error\": e, \"row\": row })\n\n for error in errors:\n print(error[\"error\"])\n print(error[\"row\"])\n\n print(f\"{len(unique_queries)} unique out of {len(queries)}\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"distinct_user_queries_with_stripping.py","file_name":"distinct_user_queries_with_stripping.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"10376256","text":"# importing necessary libiries \n\nimport requests\nfrom bs4 import BeautifulSoup\nimport csv\nimport re\nimport seaborn as sns\n\nsulpak_laptop = pd.read_csv('data2/Final_laptop_sulpak.csv')\nalser_laptop = pd.read_csv('data/laptopss_alser_kz.csv', sep=';', encoding=\"cp1251\")\nalfa_laptop = pd.read_csv('data2/alfakz_notebooks.csv')\n\nHOST = 'https://www.sulpak.kz'\nURL = 'https://www.sulpak.kz/f/noutbuki'\nHEADERS = {\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/78.0.3904.97 Chrome/78.0.3904.97 Safari/537.36'\n}\n\n\ndef get_html(url): #function for getting html page\n r = requests.get(url)\n return r\n\ndef write_csv(data): # function for save data in csv file\n with open('Sulpak.csv', 'a', encoding='utf8') as f:\n writer = csv.writer(f)\n writer.writerow((data['title'],\n data['price'],\n data['brand'],\n data['list'],\n data['availability'],\n data['code'],\n data['size'],\n data['os'],\n data['model_cpu'],\n data['freq'],\n data['ram_size'],\n data['hard_typee'],\n data['storage'],\n data['videocard'],\n data['videocard_mem'],\n data['aa']\n ))\n\ndef get_content(html): #function for getting content of the webpage\n soup = BeautifulSoup(html, 'html.parser')\n items = soup.find_all('div', class_='tile-container')\n\n\n for laptop in items:\n url_info = HOST+laptop.find('a', class_='title').get('href')\n html_info = get_html(str(url_info))\n laptop_info = BeautifulSoup(html_info.text, 'html.parser')\n item = laptop_info.find('table', class_='short-description-table').find_all('a')\n try:\n title = laptop.get('data-name')\n except:\n title = None\n try:\n price = laptop.get('data-price')\n except:\n price = None\n try:\n brand = laptop.get('data-brand')\n except:\n brand = None\n try:\n availability = laptop.find('span', class_='availability').get_text(strip=True)\n except:\n availability = None\n try:\n code = laptop.get('data-code') #laptop.find('span', class_='code').get_text(strip=True)\n except:\n code = None\n try:\n list = laptop.get('data-list') #laptop.find('span', class_='code').get_text(strip=True)\n except:\n list = None\n try:\n size = item[0].get_text(strip=True)\n except:\n size = None\n try:\n os = item[1].get_text(strip=True)\n except:\n os = None\n try:\n model_cpu = item[2].get_text(strip=True)\n except:\n model_cpu = None\n try:\n freq = item[3].get_text(strip=True)\n except:\n freq = None\n try:\n ram_size = item[4].get_text(strip=True)\n except:\n ram_size = None\n try:\n hard_typee = item[5].get_text(strip=True)\n except:\n hard_typee = None\n try:\n storage = item[6].get_text(strip=True)\n except:\n storage = None\n try:\n videocard = item[7].get_text(strip=True)\n except:\n videocard = None\n try:\n videocard_mem = item[8].get_text(strip=True)\n except:\n videocard_mem = None\n try:\n video_card = item[9].get_text(strip=True)\n except:\n video_card = None\n\n laptops = { # store data in a dictionary \n 'title': title,\n 'price': price,\n 'brand': brand,\n 'list': list,\n 'availability': availability,\n 'code': code,\n 'size': size,\n 'os': os,\n 'model_cpu': model_cpu,\n 'freq': freq,\n 'ram_size': ram_size,\n 'hard_typee': hard_typee,\n 'storage': storage,\n 'videocard': videocard,\n 'videocard_mem': videocard_mem,\n 'video_card': video_card\n }\n write_csv(laptops) # call function to write daata into CSV file\n\nif __name__=='__main__':\n page_part = '?page='\n\n total_pages = 10\n\n for i in range(1, total_pages): # loop all pages of the site\n print(i)\n url_gen = URL + page_part + str(i)\n print(url_gen)\n html = get_html(url_gen)\n get_content(html.text)\n","sub_path":"parsing_sulpak/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"243607536","text":"#coding: utf-8\nimport os\nimport sys\nimport re\nimport jieba\nimport math\n\nclass Test_vectorize:\n\tdef __init__(self,input_path):\n\t\tself.input_path = input_path\n\t\tself.allClass = []\n\t\tself.FileWord_tuple = []\n\t\tfor (dirname,dirs,files) in os.walk(self.input_path):\n\t\t\tfor eachdir in dirs:\n\t\t\t\tself.allClass.append(eachdir)\n\n\tdef read_feadture(self,feature_txt):\n\t\tfeature_file = open(feature_txt,'r')\n\t\tfeature_content = feature_file.read().split('\\n')\n\t\tfeature_file.close()\n\t\tfeature = list()\n\t\tfor each_feature in feature_content:\n\t\t\teach_feature = each_feature.split(\" \")\n\t\t\tif (len(each_feature)==1):\n\t\t\t\tfeature.append(each_feature[0])\n\t\tself.feature = []\n\t\tfor k in range(0,len(feature)-1):\n\t\t\tself.feature.append(feature[k])\n\t\t\n\tdef buildItemSets(self):\n\t\twith open('F:/classify/stop.txt','r') as s:\n\t\t\tstopword_list = s.readlines()\n\t\tfor i in xrange(0,len(stopword_list)):\n\t\t\tword = stopword_list[i].strip()\n\t\t\tstopword_list[i] = word\n\t\tfor eachclass in self.allClass:\n\t\t\tcurrent_path = os.path.join(self.input_path,eachclass)\n\t\t\tfor (dirname,dirs,filenames) in os.walk(current_path):\n\t\t\t\tfor filename in filenames:\n\t\t\t\t\tclassname = re.sub(r'F:/classify/classify_content/data/TestFile\\\\','',dirname)\n\t\t\t\t\tself.FileWord_tuple.append(classname)\n\t\t\t\t\tself.FileWord_tuple.append(filename)\n\t\t\t\t\tvector_tf_list = []\n\t\t\t\t\twith open(os.path.join(current_path,filename),'r') as f:\n\t\t\t\t\t\tcontent = f.read()\n\t\t\t\t\t\tcontent = self.process_line(content)\n\t\t\t\t\t\twords = jieba.cut(content.strip(),cut_all = False)\n\t\t\t\t\t\tcut_words = []\n\t\t\t\t\t\tfor word in words:\n\t\t\t\t\t\t\tword = word.strip()\n\t\t\t\t\t\t\tif word.encode('utf-8') not in stopword_list and len(word)>1:\n\t\t\t\t\t\t\t\tcut_words.append(word)\n\t\t\t\t\tfor feature in self.feature:\n\t\t\t\t\t\ttf = 0\n\t\t\t\t\t\t#print feature\n\t\t\t\t\t\tfor left_word in cut_words:\n\t\t\t\t\t\t\t#print left_word.encode('utf-8')\n\t\t\t\t\t\t\tif left_word.encode('utf-8') == feature:\n\t\t\t\t\t\t\t\t#print Ture\n\t\t\t\t\t\t\t\ttf +=1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t#print False\n\t\t\t\t\t\t\t\ttf = tf\n\t\t\t\t\t\tvalue = feature + \":\" + str(tf)\n\t\t\t\t\t\tvector_tf_list.append(value)\n\t\t\t\t\tself.FileWord_tuple.append(vector_tf_list)\n\n\tdef process_line(self,content):\n\t\ttry:\n\t\t\tcontent = re.sub(r'[\\ ]','',content)\n\t\t\tcontent = re.sub(r'[0-9]','',content)\n\t\t\treturn content\n\t\texcept UnicodeDecodeError:\n\t\t\treturn content\n\n\tdef write_to_file(self,filename):\n\t \tfile = open(filename,'w+')\n\t \tlen_of_fileword = len(self.FileWord_tuple)\n\t \tfor i in xrange(0,len_of_fileword,3):\n\t \t\tfile.write(self.FileWord_tuple[i])\n\t \t\tfile.write(\" \")\n\t \t\tlast_vector = self.FileWord_tuple[i+2]\n\t \t\tfor content in last_vector:\n\t \t\t\tfile.write(content)\n\t \t\t\tfile.write(\" \")\n\t \t\tfile.write('\\n')\n\n#if __name__ == '__main__':\ntext_vector = Test_vectorize('F:/classify/classify_content/data/TestFile')\ntext_vector.read_feadture('F:/classify/classify_content/result/feature.txt')\ntext_vector.buildItemSets()\ntext_vector.write_to_file('F:/classify/classify_content/result/test.txt')\n\n\n","sub_path":"sklearn_weibo_news_classify/Test_vectorize_content.py","file_name":"Test_vectorize_content.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"445093360","text":"import argparse\nimport logging\nfrom os import mkdir\nfrom os.path import join\nimport sys\n\nimport numpy as np\nfrom seedpoint_tracking.training import train_recentering_net\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Regression net training')\n\n parser.add_argument('--root_folder', type=str,\n default='../../../../external_hd/ILSVRC2015/Data/VID',\n help='Path to data folder')\n\n parser.add_argument('--train_set_path', type=str,\n default='../../../data/tracking/toy_car/'\n 'dataset.ilsvrc_train.eGauss-5.pruned.n02958343.csv',\n help='Full path of training dataset CSV (unshuffled)')\n\n parser.add_argument('--save_dir', type=str, default='out',\n help='Where to save logs and general outputs')\n\n parser.add_argument('--rescale_short_side', type=int, default=256,\n help='Resize all input video frames so that this is '\n 'their short side (preserve aspect ratio)')\n\n parser.add_argument('--crop_side', type=int, default=127,\n help='Side of the crop (after resize) extracted around '\n 'seed point (must be odd number)')\n\n parser.add_argument('--batch_size', type=int, default=64)\n\n parser.add_argument('--epochs', type=int, default=1, metavar='N',\n help='number of total epochs to run')\n\n parser.add_argument('--print_freq', '-p', type=int, default=10,\n metavar='N', help='loss print frequency')\n\n parser.add_argument('--num_workers', type=int, default=4,\n help='number of threads used by the data loader')\n\n parser.add_argument('--gpu_id', type=str, default='0')\n\n args = parser.parse_args()\n\n mkdir(args.saveDir)\n\n assert np.mod(args.cropSide, 2) == 1\n\n # Set up the general logger\n logger = logging.getLogger('seedpoint_tracking')\n logger.setLevel(logging.INFO)\n file_handler = logging.FileHandler(join(args.saveDir, 'output.log'))\n logger.addHandler(file_handler)\n stdout_handler = logging.StreamHandler(sys.stdout)\n logger.addHandler(stdout_handler)\n logger.info(args)\n\n train_recentering_net(**vars(args))\n","sub_path":"seedpoint_tracking/scripts/train_recentering_net.py","file_name":"train_recentering_net.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"543099942","text":"# play_vibrato_ver2.py\r\n# Reads a specified wave file (mono) and plays it with a vibrato effect.\r\n# (Sinusoidal time-varying delay)\r\n# This implementation uses a circular buffer with two buffer indices.\r\n# Uses linear interpolation\r\n\r\nimport pyaudio\r\nimport wave\r\nimport struct\r\nimport math\r\nfrom myfunctions import clip16\r\n\r\n# # TRY BOTH WAVE FILES\r\n# wavfile = 'sin01_mono.wav'\r\n# wavfile = 'decay_cosine_mono.wav'\r\n\r\n# Open wave file\r\n\r\n# Read wave file pr\r\n\r\n# Vibrato parameters\r\nf0 = 2\r\nW = 0.2\r\n# W = 0 # for no effct\r\n\r\n# f0 = 10\r\n# W = 0.2\r\n\r\n# OR\r\n# f0 = 20\r\n# ratio = 1.06\r\n# W = (ratio - 1.0) / (2 * math.pi * f0 )\r\n# print W\r\n\r\n# Create a buffer (delay line) for past values\r\nbuffer_MAX = 64 # Buffer length\r\nbuffer = [0.0 for i in range(buffer_MAX)] # Initialize to zero\r\n\r\noutput_value = [0.0 for i in range(buffer_MAX)] # Initialize to zero\r\n# Buffer (delay line) indices\r\nkr = 0 # read index\r\nkw = int(0.5 * buffer_MAX) # write index (initialize to middle of buffer)\r\nkw = buffer_MAX/2\r\n\r\n# print('The delay of {0:.3f} seconds is {1:d} samples.'.format(delay_sec, delay_samples))\r\nprint('The buffer is {0:d} samples long.'.format(buffer_MAX))\r\n\r\nWIDTH = 2\r\nCHANNELS = 1 \r\nRATE = 16000\r\nDURATION = 5\r\n# Open an output audio stream\r\np = pyaudio.PyAudio()\r\n\r\nstream = p.open(format = pyaudio.get_format_from_width(WIDTH),\r\n channels = CHANNELS,\r\n rate = RATE,\r\n input = True,\r\n output = True )\r\n\r\n\r\n# output_all = '' # output signal in all (string)\r\noutput_all = bytes([]) # output signal in all (string)\r\n\r\nprint ('* Playing...')\r\noutput_buffer = []\r\nNblocks = math.ceil(DURATION*RATE/buffer_MAX)\r\n\r\n\r\ninput_string = stream.read(buffer_MAX, exception_on_overflow = False)\r\n# Loop through wave file \r\nfor n in range(Nblocks):\r\n\r\n # Get sample from wave file\r\n\r\n input_value = struct.unpack('h'*buffer_MAX, input_string)\r\n # print('Length of input_value list: ',len(input_value))\r\n\r\n\r\n # input_string = wf.readframes(buffer_MAX)\r\n # print(len(input_value))\r\n # Convert string to number\r\n # print('Length of output_value list: ',len(output_value))\r\n\r\n for z in range(len(input_value)):\r\n \r\n # Get previous and next buffer values (since kr is fractional)\r\n kr_prev = int(math.floor(kr)) \r\n kr_next = kr_prev + 1\r\n frac = kr - kr_prev # 0 <= frac < 1\r\n if kr_next >= buffer_MAX:\r\n kr_next = kr_next - buffer_MAX\r\n\r\n # Compute output value using interpolation\r\n output_value[z] = (1-frac) * buffer[kr_prev] + frac * buffer[kr_next]\r\n output_value[z] = int(clip16(output_value[z]))\r\n # Update buffer (pure delay)\r\n buffer[int(kw)] = input_value[z]\r\n\r\n # Increment read index\r\n kr = kr + 1 + W * math.sin( 2 * math.pi * f0 * z / RATE )\r\n # Note: kr is fractional (not integer!)\r\n\r\n # Ensure that 0 <= kr < buffer_MAX\r\n if kr >= buffer_MAX:\r\n # End of buffer. Circle back to front.\r\n kr = 0\r\n\r\n # Increment write index \r\n kw = kw + 1\r\n if kw == buffer_MAX:\r\n # End of buffer. Circle back to front.\r\n kw = 0\r\n # output_value.append()\r\n # Clip and convert output value to binary string\r\n\r\n output_string = struct.pack('h'*buffer_MAX, *output_value)\r\n # Write output to audio stream\r\n stream.write(output_string)\r\n\r\n output_all = output_all + output_string # append new to total\r\n input_string = stream.read(buffer_MAX, exception_on_overflow = False)\r\n\r\nprint('* Finished')\r\n\r\nstream.stop_stream()\r\nstream.close()\r\np.terminate()\r\n","sub_path":"dsp_lab/5_2_3.py","file_name":"5_2_3.py","file_ext":"py","file_size_in_byte":3734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"399927818","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 1 16:56:47 2017\n\n@author: nicholas\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\ndef shape_func(xi):\n '''\n Defines the shape functions used in FEA\n \n Parameters\n ----------\n xi: float\n local coordinate of element\n \n Returns\n -------\n N: array of float\n shape function\n '''\n N = np.zeros(2)\n N[0] = 0.5*(1-xi)\n N[1]= 0.5*(1+xi)\n return N\n \ndef shape_func_dev(xi):\n '''\n Defines the derivative of the shape function w.r.t the local coordinate.\n \n Parameters\n ----------\n xi: float\n local coordinate \n \n Returns\n -------\n dNdxi: array of float\n derivative of shape function w.r.t xi\n '''\n dNdxi = np.zeros(2)\n dNdxi[0] = -0.5\n dNdxi[1] = 0.5\n return dNdxi\n \ndef finite_element_1d(N_elements,q,L,E,nu,kappa,h,b):\n '''\n Main FEA program. Computes deflection and rotation of timoshenko beam using\n finite elements. Computes the stiffness matrix and force vector and populates\n appropriately. Computes deflection and rotation based on cantilever beam \n boundary conditions.\n \n Parameters\n ----------\n N_elements: int\n number of finite elements\n q: array of floats\n load acting on each element\n L: float\n length of beam\n E: float\n modulus of elasticity(Youngs Modulus)\n nu: float\n poissions ratio\n kappa: float\n curvature constant\n h: float\n beam thickness\n b: float\n beam bredth\n Returns\n -------\n U: array of floats\n deflections and rotations of beams at nodes\n free_dof: array of floats\n array of the nodes which are free to deflect and rotate.\n '''\n G = E/(2*(1+nu))\n \n I = [(t**3)*b[i]/12 for i,t in enumerate(h)]\n \n \n EI = [E*i for i in I]\n J = (b*h**3/3)*(1 - (192/(np.pi**5*b))*np.tanh(np.pi*b/(2*h)))\n \n \n \n # Even grid\n nodes, dx = np.linspace(0, L, N_elements+1, retstep=True)\n \n # Location matrix\n LM = np.zeros((2, N_elements), dtype=np.int)\n for e in range(N_elements):\n LM[0, e] = e\n LM[1, e] = e+1\n \n # Global stiffness matrix and force vector\n K = np.zeros(((N_elements+1), (N_elements+1)))\n \n F = np.zeros(((N_elements+1),))\n dxdxi = dx/2\n integration_points = [-np.sqrt(1/3),np.sqrt(1/3)]\n # Loop over elements\n for e in range(N_elements):\n for xi in integration_points:\n N = shape_func(xi)\n dNdxi = shape_func_dev(xi)\n dNdx = dNdxi * (1/dxdxi)\n #B = np.zeros((2,4))\n B = dNdx\n A,C = np.meshgrid(LM[:,e], LM[:,e])\n \n K[A,C] += np.dot(np.transpose(B),B) * G* J[e] * dxdxi\n \n \n F[LM[:,e]] += N * q[e] * dxdxi\n \n \"\"\"\n B = np.zeros((2,4))\n B[1,:2] = dNdx\n B[1,2:] = -N\n K[A,C] += np.dot(np.transpose(B),B) * kappa * h[e] * b[e] * G * dxdxi\n \"\"\"\n #Boundary Conditions\n fixedNodeW = [0]\n #fixedNodeTheta = [0+N_elements+1]\n \n # Boundary conditions- fixed at 0.25c\n #fixedNodeW = [int(N_elements/4)]\n #fixedNodeTheta = [int(N_elements/4) + N_elements+1]\n \n free_dof = np.delete(np.arange(0,1*(N_elements+1)),[fixedNodeW])\n \n print(K[0,0])\n print(K[1,1])\n print(K[1,0])\n print(G*J/L)\n active_nodes_A,active_nodes_B = np.meshgrid(free_dof,free_dof)\n \n U = np.linalg.solve(K[active_nodes_A,active_nodes_B],F[free_dof])\n \n \n \n return U,free_dof\nE = 5e6\nnu = 0.3\nN_elements = 1\nL = 10\nkappa = 5/6\nh = 2 * np.ones(N_elements)\nb = h\n# q is pressure per element area not force\nq = np.zeros(N_elements)\nq[:] = 100\nU,free_dof = finite_element_1d(N_elements,q,L,E,nu,kappa,h,b)\nplt.plot(free_dof[0::2],U[0::2])","sub_path":"Python/saint_venant_torsion.py","file_name":"saint_venant_torsion.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"495344706","text":"#! /usr/bin/env python\n\nimport locale\nimport os\nimport sys\nimport traceback\n\nfrom mwlib.log import Log\n\nimport mwlib\nfrom mwlib.pdf import html2pdf\n\nreload(sys)\nsys.setdefaultencoding(\"UTF8\")\nlog = Log(\"mwlib.pdf.writer\")\n\n\ndef patch_logging(output_filename):\n fn = os.path.join(os.path.dirname(output_filename), \"render.log\")\n mwlib.utils.start_logging(fn)\n\n\ndef writer(env, output, status_callback, debug=True, lang=None, x=False):\n if not lang:\n _locale = locale.getlocale(locale.LC_NUMERIC)\n if _locale:\n lang = _locale[0]\n crop_marks = False\n if not x:\n patch_logging(output)\n renderer = html2pdf.PrincePdfWriter(env, output, debug=debug, crop_marks=crop_marks, lang=lang)\n\n try:\n renderer.render_zip()\n except Exception:\n traceback.print_exc()\n else:\n log.info(\"rendering {} finished\".format(output))\n\n\nwriter.description = \"HTML writer\"\nwriter.content_type = \"application/pdf\"\nwriter.file_extension = \"pdf\"\n\nwriter.options = {\n \"lang\": {\"help\": \"language of book and its containing text\", \"param\": \"LANG\"},\n \"debug\": {\"help\": \"turn debug mode on\",},\n \"x\": {\"help\": \"turn off forced file logging\"},\n}\n","sub_path":"mwlib/pdf/writer_interface.py","file_name":"writer_interface.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"108694771","text":"from turtle import *\ndef scwer(l):\n circle(l)\n left(45)\ndef draw(l, col, num):\n speed(200)\n color(col)\n width(2)\n for i in range(int(360/num)):\n color(col)\n scwer(l)\n color(\"red\")\n scwer(l)\n left(num)\ndraw(100, \"blue\", 4)\ndraw(120, \"white\", 5)\n","sub_path":"archive/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"610689233","text":"'''\nWrite a program which can compute the factorial of a given number.\nThe results should be printed in a comma-separated sequence on a \nsingle line.\nSuppose the following input is supplied to the program:\n8\nThen, the output should be:\n40320\n\n'''\n\n\ndef factorial(number):\n if number == 0:\n return 1\n\n else:\n return (number * factorial(number - 1))\n\n\nnumber = int(input('Enter a number: '))\n\nresult = factorial(number)\nprint(f'Factorial of {number} is {result}')\n","sub_path":"q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"147943867","text":"# =============================================================================\n# Tasks to be callable async &/or on a Schedule\n# @ToDo: Rewrite a lot of these to use s3db_task or settings_task instead of\n# having a lot of separate tasks defined here\n# =============================================================================\n\nhas_module = settings.has_module\n\n# -----------------------------------------------------------------------------\ndef dummy():\n \"\"\"\n Dummy Task\n - can be used to populate a table with a task_id\n \"\"\"\n return\n\n# -----------------------------------------------------------------------------\ndef s3db_task(function, user_id=None, **kwargs):\n \"\"\"\n Generic Task\n - can be used to call any s3db.function(**kwargs)\n - saves having to create separate Tasks for many cases\n \"\"\"\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n # Run the Task & return the result\n result = s3db[function](**kwargs)\n db.commit()\n return result\n\n# -----------------------------------------------------------------------------\ndef settings_task(taskname, user_id=None, **kwargs):\n \"\"\"\n Generic Task\n - can be used to call any settings.tasks.taskname(**kwargs)\n - saves having to create separate Tasks for many cases\n \"\"\"\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n task = settings.get_task(taskname)\n if task:\n # Run the Task & return the result\n result = task(**kwargs)\n db.commit()\n return result\n\n# -----------------------------------------------------------------------------\ndef maintenance(period = \"daily\"):\n \"\"\"\n Run all maintenance tasks which should be done daily\n - instantiates and calls the Daily() class defined in the template's\n maintenance.py file - if it exists\n - falls back to the default template's maintenancy.py\n \"\"\"\n\n maintenance = None\n result = \"NotImplementedError\"\n\n templates = settings.get_template()\n if templates != \"default\":\n # Try to import maintenance routine from template\n if not isinstance(templates, (tuple, list)):\n templates = (templates,)\n for template in templates[::-1]:\n package = \"templates.%s\" % template\n name = \"maintenance\"\n try:\n maintenance = getattr(__import__(package, fromlist=[name]), name)\n except (ImportError, AttributeError):\n pass\n else:\n break\n\n if maintenance is None:\n try:\n # Fallback to default maintenance routine\n from templates.default import maintenance\n except ImportError:\n pass\n\n if maintenance is not None:\n if period == \"daily\":\n result = maintenance.Daily()()\n db.commit()\n\n return result\n\n# -----------------------------------------------------------------------------\n# GIS: always-enabled\n# -----------------------------------------------------------------------------\ndef gis_download_kml(record_id, filename, session_id_name, session_id,\n user_id=None):\n \"\"\"\n Download a KML file\n - will normally be done Asynchronously if there is a worker alive\n\n @param record_id: id of the record in db.gis_layer_kml\n @param filename: name to save the file as\n @param session_id_name: name of the session\n @param session_id: id of the session\n @param user_id: calling request's auth.user.id or None\n \"\"\"\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n # Run the Task & return the result\n result = gis.download_kml(record_id, filename, session_id_name, session_id)\n db.commit()\n return result\n\n# -----------------------------------------------------------------------------\ndef gis_update_location_tree(feature, user_id=None):\n \"\"\"\n Update the Location Tree for a feature\n - will normally be done Asynchronously if there is a worker alive\n\n @param feature: the feature (in JSON format)\n @param user_id: calling request's auth.user.id or None\n \"\"\"\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n # Run the Task & return the result\n feature = json.loads(feature)\n path = gis.update_location_tree(feature)\n db.commit()\n return path\n\n# -----------------------------------------------------------------------------\n# Org: always-enabled\n# -----------------------------------------------------------------------------\ndef org_site_check(site_id, user_id=None):\n \"\"\" Check the Status for Sites \"\"\"\n\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n\n # Check for Template-specific processing\n customise = settings.get_org_site_check()\n if customise:\n customise(site_id)\n db.commit()\n\n# -----------------------------------------------------------------------------\ntasks = {\"dummy\": dummy,\n \"s3db_task\": s3db_task,\n \"settings_task\": settings_task,\n \"maintenance\": maintenance,\n \"gis_download_kml\": gis_download_kml,\n \"gis_update_location_tree\": gis_update_location_tree,\n \"org_site_check\": org_site_check,\n }\n\n# -----------------------------------------------------------------------------\n# Optional Modules\n# -----------------------------------------------------------------------------\nif has_module(\"cap\"):\n\n # -------------------------------------------------------------------------\n def cap_ftp_sync(user_id=None):\n \"\"\" Get all the FTP repositories and synchronize them \"\"\"\n\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n\n rows = db(s3db.sync_repository.apitype == \"ftp\").select()\n\n if rows:\n sync = current.sync\n for row in rows:\n sync.synchronize(row)\n\n tasks[\"cap_ftp_sync\"] = cap_ftp_sync\n\n# -----------------------------------------------------------------------------\nif has_module(\"msg\"):\n\n # -------------------------------------------------------------------------\n def msg_process_outbox(contact_method, user_id=None):\n \"\"\"\n Process Outbox\n - will normally be done Asynchronously if there is a worker alive\n\n @param contact_method: one from S3Msg.MSG_CONTACT_OPTS\n @param user_id: calling request's auth.user.id or None\n \"\"\"\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n\n # Run the Task & return the result\n result = msg.process_outbox(contact_method)\n db.commit()\n return result\n\n tasks[\"msg_process_outbox\"] = msg_process_outbox\n\n # -------------------------------------------------------------------------\n def msg_twitter_search(search_id, user_id=None):\n \"\"\"\n Perform a Search of Twitter\n - will normally be done Asynchronously if there is a worker alive\n\n @param search_id: one of s3db.msg_twitter_search.id\n @param user_id: calling request's auth.user.id or None\n\n \"\"\"\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n\n # Run the Task & return the result\n result = msg.twitter_search(search_id)\n db.commit()\n return result\n\n tasks[\"msg_twitter_search\"] = msg_twitter_search\n\n # -------------------------------------------------------------------------\n def msg_process_keygraph(search_id, user_id=None):\n \"\"\"\n Process Twitter Search Results with KeyGraph\n - will normally be done Asynchronously if there is a worker alive\n\n @param search_id: one of s3db.msg_twitter_search.id\n @param user_id: calling request's auth.user.id or None\n \"\"\"\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n\n # Run the Task & return the result\n result = msg.process_keygraph(search_id)\n db.commit()\n return result\n\n tasks[\"msg_process_keygraph\"] = msg_process_keygraph\n\n # -------------------------------------------------------------------------\n def msg_poll(tablename, channel_id, user_id=None):\n \"\"\"\n Poll an inbound channel\n \"\"\"\n if user_id:\n auth.s3_impersonate(user_id)\n\n # Run the Task & return the result\n result = msg.poll(tablename, channel_id)\n db.commit()\n return result\n\n tasks[\"msg_poll\"] = msg_poll\n\n # -----------------------------------------------------------------------------\n def msg_parse(channel_id, function_name, user_id=None):\n \"\"\"\n Parse Messages coming in from a Source Channel\n \"\"\"\n if user_id:\n auth.s3_impersonate(user_id)\n\n # Run the Task & return the result\n result = msg.parse(channel_id, function_name)\n db.commit()\n return result\n\n tasks[\"msg_parse\"] = msg_parse\n\n # -------------------------------------------------------------------------\n def msg_gcm(title, uri, message, registration_ids, user_id=None):\n \"\"\" Push the data relating to google cloud messaging server \"\"\"\n\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n\n msg.gcm_push(title, uri, message, eval(registration_ids))\n\n tasks[\"msg_gcm\"] = msg_gcm\n\n# -----------------------------------------------------------------------------\nif has_module(\"req\"):\n\n def req_add_from_template(req_id, user_id=None):\n \"\"\"\n Add a Request from template\n \"\"\"\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n\n # Run the Task & return the result\n result = s3db.req_add_from_template(req_id)\n db.commit()\n return result\n\n tasks[\"req_add_from_template\"] = req_add_from_template\n\n# -----------------------------------------------------------------------------\nif has_module(\"stats\"):\n\n def stats_demographic_update_aggregates(records = None,\n user_id = None,\n ):\n \"\"\"\n Update the stats_demographic_aggregate table for the given\n stats_demographic_data record(s)\n\n @param records: JSON of Rows of stats_demographic_data records to\n update aggregates for\n @param user_id: calling request's auth.user.id or None\n \"\"\"\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n\n # Run the Task & return the result\n result = s3db.stats_demographic_update_aggregates(records)\n db.commit()\n return result\n\n tasks[\"stats_demographic_update_aggregates\"] = stats_demographic_update_aggregates\n\n # -------------------------------------------------------------------------\n def stats_demographic_update_location_aggregate(location_level,\n root_location_id,\n parameter_id,\n start_date,\n end_date,\n user_id = None,\n ):\n \"\"\"\n Update the stats_demographic_aggregate table for the given location and parameter\n - called from within stats_demographic_update_aggregates\n\n @param location_level: gis level at which the data needs to be accumulated\n @param root_location_id: id of the location\n @param parameter_id: parameter for which the stats are being updated\n @param start_date: start date of the period in question\n @param end_date: end date of the period in question\n @param user_id: calling request's auth.user.id or None\n \"\"\"\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n\n # Run the Task & return the result\n result = s3db.stats_demographic_update_location_aggregate(location_level,\n root_location_id,\n parameter_id,\n start_date,\n end_date,\n )\n db.commit()\n return result\n\n tasks[\"stats_demographic_update_location_aggregate\"] = stats_demographic_update_location_aggregate\n\n # --------------------e----------------------------------------------------\n # Disease: Depends on Stats\n # --------------------e----------------------------------------------------\n if has_module(\"disease\"):\n\n def disease_stats_update_aggregates(records = None,\n all = False,\n user_id = None,\n ):\n \"\"\"\n Update the disease_stats_aggregate table for the given\n disease_stats_data record(s)\n\n @param records: JSON of Rows of disease_stats_data records to\n update aggregates for\n @param user_id: calling request's auth.user.id or None\n \"\"\"\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n\n # Run the Task & return the result\n result = s3db.disease_stats_update_aggregates(records, all)\n db.commit()\n return result\n\n tasks[\"disease_stats_update_aggregates\"] = disease_stats_update_aggregates\n\n # ---------------------------------------------------------------------\n def disease_stats_update_location_aggregates(location_id,\n children,\n parameter_id,\n dates,\n user_id = None,\n ):\n \"\"\"\n Update the disease_stats_aggregate table for the given location and parameter\n - called from within disease_stats_update_aggregates\n\n @param location_id: location to aggregate at\n @param children: locations to aggregate from\n @param parameter_id: parameter to aggregate\n @param dates: dates to aggregate for\n @param user_id: calling request's auth.user.id or None\n \"\"\"\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n\n # Run the Task & return the result\n result = s3db.disease_stats_update_location_aggregates(location_id,\n children,\n parameter_id,\n dates,\n )\n db.commit()\n return result\n\n tasks[\"disease_stats_update_location_aggregates\"] = disease_stats_update_location_aggregates\n\n# -----------------------------------------------------------------------------\nif has_module(\"sync\"):\n\n def sync_synchronize(repository_id, user_id=None, manual=False):\n \"\"\"\n Run all tasks for a repository, to be called from scheduler\n \"\"\"\n if user_id:\n # Authenticate\n auth.s3_impersonate(user_id)\n\n rtable = s3db.sync_repository\n query = (rtable.deleted != True) & \\\n (rtable.id == repository_id)\n repository = db(query).select(limitby=(0, 1)).first()\n if repository:\n sync = s3base.S3Sync()\n status = sync.get_status()\n if status.running:\n message = \"Synchronization already active - skipping run\"\n sync.log.write(repository_id=repository.id,\n resource_name=None,\n transmission=None,\n mode=None,\n action=\"check\",\n remote=False,\n result=sync.log.ERROR,\n message=message)\n db.commit()\n return sync.log.ERROR\n sync.set_status(running=True, manual=manual)\n try:\n sync.synchronize(repository)\n finally:\n sync.set_status(running=False, manual=False)\n db.commit()\n return s3base.S3SyncLog.SUCCESS\n\n tasks[\"sync_synchronize\"] = sync_synchronize\n\n# -----------------------------------------------------------------------------\n# Instantiate Scheduler instance with the list of tasks\ns3.tasks = tasks\ns3task = s3base.S3Task()\ncurrent.s3task = s3task\n\n# -----------------------------------------------------------------------------\n# Reusable field for scheduler task links\nscheduler_task_id = FieldTemplate(\"scheduler_task_id\",\n \"reference %s\" % s3base.S3Task.TASK_TABLENAME,\n ondelete = \"CASCADE\",\n )\ns3.scheduler_task_id = scheduler_task_id\n\n# END =========================================================================\n","sub_path":"models/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":17854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"375948308","text":"import tkinter as tk\nimport tkinter.ttk as ttk\nimport random\nimport names_list\nimport copypaste as cp\n\n\nclass Main(tk.Tk):\n\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n tk.Tk.title(self, \"Name Generator\")\n w = 300\n h = 130\n tk.Tk.geometry(self, f\"{w}x{h}\")\n tk.Tk.resizable(self, False, False)\n\n self.init_widgets()\n\n def init_widgets(self):\n self.main_frame = ttk.Frame(self)\n self.main_frame.pack(padx=X, pady=Y, expand=1, fill=tk.BOTH)\n\n self.lab_output = ttk.Label(self.main_frame, font=20)\n self.lab_output.grid(row=0, column=0, sticky=tk.W, columnspan=3)\n\n self.var_ch = tk.StringVar()\n self.var_ch.set(\"male\")\n cb_sex_choice_male = ttk.Radiobutton(self.main_frame, text=\"Male\", variable=self.var_ch, value=\"male\")\n cb_sex_choice_male.grid(row=1, column=0, sticky=tk.W)\n\n cb_sex_choice_female = ttk.Radiobutton(self.main_frame, text=\"Female\", variable=self.var_ch, value=\"female\")\n cb_sex_choice_female.grid(row=2, column=0, sticky=tk.W)\n\n btn_generate = ttk.Button(self.main_frame, text=\"Generate\", command=self.generate_name)\n btn_generate.grid(row=3, column=0, sticky=tk.W)\n\n self.lab_status_bar = ttk.Label(self, text=\"Press Generate\", relief=tk.SUNKEN)\n self.lab_status_bar.pack(side=tk.LEFT, expand=1, fill=tk.X)\n\n btn_copy = ttk.Button(self.main_frame, text=\"Copy\", command=self.copy_output)\n btn_copy.grid(row=3, column=1, sticky=tk.W)\n\n def generate_name(self):\n if self.var_ch.get() == \"male\":\n self.lab_output.configure(text=f\"{random.choice(names_list.name_male_list)} \"\n f\"{random.choice(names_list.surname_male_list)}\")\n else:\n self.lab_output.configure(text=f\"{random.choice(names_list.name_female_list)} \"\n f\"{random.choice(names_list.surname_female_list)}\")\n\n self.lab_status_bar.configure(text=\"Press Copy\")\n\n def copy_output(self):\n cp.copy(self.lab_output.cget('text'))\n self.lab_status_bar.configure(text=\"Name copy to clipboard\")\n\n\nif __name__ == '__main__':\n X = 20\n Y = 10\n app = Main()\n app.mainloop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"250107072","text":"'''test'''\r\nimport math\r\n\r\n\"\"\"while计算1+。。。+100\"\"\"\r\ndef add():\r\n a = 100\r\n sum = 0\r\n while (a):\r\n sum = sum + a\r\n a = a - 1\r\n return sum\r\n\r\n\"\"\"判别闰年\"\"\"\r\ndef leapyear(year):\r\n if year%4 == 0 and (year%100==0 and year%400==0):\r\n print(year,\"is leap year.\")\r\n else:\r\n print(year,\"is not leap year.\")\r\n\r\n\"\"\"输出金字塔\"\"\"\r\ndef showGraph():\r\n a = 9\r\n b = 1\r\n c = 4\r\n while(a):\r\n if(b <= 5):\r\n print(\"*\"*b)\r\n b=b+1\r\n if b>5 and c > 0:\r\n print(\"*\" * c)\r\n c = c - 1\r\n a = a-1\r\n\r\n'''比较两个数大小并升序打印'''\r\ndef sort():\r\n a = input(\"please input numble1:\")\r\n b = input(\"please input number2:\")\r\n if(a>=b):\r\n print(b,a)\r\n else:\r\n print(a,b)\r\n\r\n'''给定数字集合{1,2,3,4} 计算满足下述条件的三位数的个数\r\n条件1:组成的三位数互异\r\n条件2:每个三位数中无重复数字'''\r\ndef a():\r\n count = 0\r\n list = [1,2,3,4]\r\n for i in list:\r\n for j in list:\r\n for k in list:\r\n if i!=j and j!=k and k!=i:\r\n print(i*100+j*10+k)\r\n count = count+1\r\n print(\"满足条件的数共有\",count,\"个。\")\r\n\r\n\r\ndef findnum():\r\n x=1\r\n while(x):\r\n y = int(math.sqrt(x+100))\r\n z = int(math.sqrt(x+268))\r\n if y*y-100==x and z*z-268==x:\r\n print(x)\r\n break\r\n x=x+1\r\n # print(x)\r\n\r\ndef findnum_17_max():\r\n x = 1\r\n y = 0\r\n while(x):\r\n if x%17==0 and x>y:\r\n y = x\r\n x = x+1\r\n if(x>200):\r\n print(y)\r\n break\r\n\r\ndef factorization():\r\n a = int(input(\"please input number:\"))\r\n # print(id(a))\r\n b = a\r\n j=\"\"\r\n for i in range(2,a):\r\n # print(id(a))\r\n while(i!=a):\r\n if(a%i==0):\r\n a = a/i\r\n # print(id(a),\"aaaa\")\r\n j = j+str(i)+\"*\"\r\n else:\r\n # j = j + str(i) + \"*\"\r\n break\r\n if (a % i == 0):\r\n a = a / i\r\n # print(id(a), \"aaaa\")\r\n j = j + str(i) + \"*\"\r\n print(b, \"=\", j[:-1])\r\n\r\ndef ball(high,num):\r\n for i in range(0,num):\r\n S = (3-0.5**(num-2))*high\r\n print(\"第{}次弹起时小球所经过的路程为{}m\".format(num,S))\r\n H = 0.5**num*high\r\n print(\"小球第{}次反弹的高度为{}m\".format(num, H))\r\n\r\n\r\ndef b(n):\r\n for j in range(2, n):\r\n if (n % j == 0):\r\n return 0\r\n else:\r\n return 1\r\ndef c():\r\n for n in range(100,201):\r\n if(b(n)):\r\n print(n)\r\ndef d():\r\n dict = {'a':1,'b':2,'c':3}\r\n for i in dict:\r\n print(\"key is {}, value is{}\".format(i, dict[i]))\r\n\r\nclass test():\r\n def __init__(self,n):\r\n self.a = n\r\n def __add__(self):\r\n return self.a+1\r\n\r\nif __name__ == \"__main__\":\r\n # print(add()) #计算1+...+100\r\n\r\n # leapyear(int(input(\"请输入年份:\"))) #判别闰年\r\n\r\n # showGraph() #金字塔\r\n\r\n # sort() #比较两个数大小并升序打印\r\n\r\n # a()\r\n\r\n # findnum() #x+100=y*y and x+268 =z*z\r\n\r\n # findnum_17_max()\r\n\r\n # factorization()\r\n\r\n # ball(100, 11)\r\n\r\n # c()\r\n\r\n d()\r\n\r\n # with open(\"D:\\\\aaa.txt\", \"w\") as f:\r\n # f.write(\"123\")\r\n\r\n\r\n a=test(1)\r\n print(a.add())","sub_path":"1.Python+线性代数/First/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"507473792","text":"#!/usr/bin/python3\n\n# date : 22 MAY 2018\n# author : kneerunjun@gmail.com\n# this module helps us measure the temprature using the LM35 sensor\nimport RPi.GPIO as GPIO\nimport Adafruit_ADS1x15\nimport time, threading\nfrom exithandling import GracefulExit, Interruption\n\nclass LightSensor():\n '''This is a just a sugar coating around the adafruit's ADS1115\n it is more suited to get the temperature\n '''\n def __init__(self,*args, **kwargs):\n self.adc = Adafruit_ADS1x15.ADS1115()\n self.gainfactor=(8, 0.512) # we are hoping the temperature would never cross 51.2oC\n self.channel =2\n self._darkvolts =0.0\n self._lightvolts=self.gainfactor[1]\n # since we are attaching the output of LM to channel 3 of the ADS\n def measure(self):\n volts = round((self.gainfactor[1]*self.adc.read_adc(self.channel, gain=self.gainfactor[0]))/32768,4)\n light = volts/self._lightvolts\n return round(light,2)*100\ndef light_loop(ke):\n '''This is a just a sugar coating around the adafruit's ADS1115\n it is more suited to get the temperature\n '''\n lgt = LightSensor()\n while not ke.wait(2):\n print(lgt.measure())\nif __name__ == \"__main__\":\n try:\n ge= GracefulExit()\n ke= threading.Event()\n task = threading.Thread(target=light_loop, args=(ke,))\n task.start()\n task.join()\n except Interruption as ie:\n ke.set()\n time.sleep(1)\n print(\"now closing the light measurement\")\n","sub_path":"device/ldr.py","file_name":"ldr.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"181993557","text":"\"\"\"\nThis file contains an implementation of the top hat version of the Jensen model\npresented in N. O. Jensen \"A note on wind generator interaction\" 1983\n\nCreated by: Jared J. Thomas\nDate: 2015\n\n\"\"\"\n\nimport numpy as np\n\n\ndef jensen_topHat(Uinf, rotorDiameter, axialInd, turbineX, turbineY, k, WindDirDeg, Cp, airDensity):\n \"\"\"\n :param Uinf: float; inflow wind velocity\n :param rotorDiameter: numpy array; diameter of each turbine\n :param axialInd: numpy array; axial induction of each turbine\n :param turbineX: numpy array; X position of each turbine\n :param turbineY: numpy array; Y position of each turbine\n :param k: float; wake decay constant\n :param WindDirDeg: float; wind direction FROM in deg. CW from north (as in meteorological data)\n :return: numpy array; effective wind speed at each turbine\n \"\"\"\n\n # adjust reference frame to wind direction\n turbineXw, turbineYw = referenceFrameConversion(turbineX, turbineY, WindDirDeg)\n\n # overlap calculations as per Jun et. al 2012\n OLRatio = wakeOverlapJensen(turbineXw, turbineYw, rotorDiameter, k)\n\n # Single wake effective windspeed calculations as per N.O. Jensen 1983\n UTilde = velocityDeficitJensen(Uinf, axialInd, turbineXw, rotorDiameter, k, OLRatio)\n\n # Wake Combination as per Jun et. al 2012 (I believe this is essentially what is done in WAsP)\n wt_velocity = wakeCombinationSumSquares(Uinf, OLRatio, UTilde)\n\n # simple power calculations\n wt_power = powerCalc(wt_velocity, Cp, rotorDiameter, airDensity)\n\n return wt_power, wt_velocity\n\n\ndef referenceFrameConversion(turbineX, turbineY, WindDirDeg):\n \"\"\" adjust turbine positions to wind direction reference frame \"\"\"\n\n WindDirDeg = 270. - WindDirDeg\n if WindDirDeg < 0.:\n WindDirDeg += 360.\n WindDirRad = np.pi*WindDirDeg/180.0 # inflow wind direction in radians\n turbineXw = turbineX*np.cos(-WindDirRad)-turbineY*np.sin(-WindDirRad)\n turbineYw = turbineX*np.sin(-WindDirRad)+turbineY*np.cos(-WindDirRad)\n\n return turbineXw, turbineYw\n\n\ndef wakeOverlapJensen(turbineXw, turbineYw, rotorDiameter, k):\n \"\"\" overlap calculations as per Jun et. al 2012 and WAsP \"\"\"\n\n nTurbines = np.size(turbineXw)\n Rr = rotorDiameter/2.0\n OLRatio = np.zeros([nTurbines, nTurbines]) # Overlap ratio\n\n for turbI in range(0, nTurbines):\n for turb in range(0, nTurbines):\n dx = turbineXw[turbI] - turbineXw[turb] # downwind turbine separation\n dy = abs(turbineYw[turbI] - turbineYw[turb]) # crosswind turbine separation\n if turb != turbI and dx > 0:\n Rw = Rr[turb] + k*dx\n OLArea = 0.0\n if dy <= Rw - Rr[turbI]:\n OLArea = np.pi*Rr[turbI]**2\n\n if Rw - Rr[turbI] < dy < Rw + Rr[turbI]:\n\n # print Rr[turb], dy, Rw\n\n a = (Rr[turb]**2+dy**2-Rw**2)/(2.0*dy*Rr[turb])\n b = (Rw**2+dy**2-Rr[turb]**2)/(2.0*dy*Rw)\n\n alpha1 = 2.0*np.arccos(a)\n alpha2 = 2.0*np.arccos(b)\n # print alpha1, alpha2\n OLArea = 0.5*(Rr[turbI]**2)*(alpha1 - np.sin(alpha1)) + 0.5*(Rw**2)*(alpha2 - np.sin(alpha2))\n\n Ar = np.pi*Rr[turbI]**2 # rotor area of waked rotor\n OLRatio[turb, turbI] = OLArea/Ar # ratio of area of wake-i and turb-j to rotor area of turb-j\n\n return OLRatio\n\n\ndef velocityDeficitJensen(Uinf, axialInd, turbineXw, rotorDiameter, k, OLRatio):\n \"\"\" Single wake effective windspeed calculations as per N.O. Jensen 1983 \"\"\"\n\n nTurbines = np.size(turbineXw) # number of turbines in the farm\n Rr = rotorDiameter/2.0 # radii of the rotors\n UTilde = np.zeros([nTurbines, nTurbines]) # UTilde[i, j] of turb-i at turb-j\n\n for turbI in range(0, nTurbines):\n for turb in range(0, nTurbines):\n if turb != turbI and OLRatio[turb, turbI] != 0:\n dx = turbineXw[turbI] - turbineXw[turb]\n UTilde[turb, turbI] = Uinf*(1.0-2.0*axialInd[turb]*(Rr[turb]/(Rr[turb]+k*dx))**2)\n\n return UTilde\n\n\ndef wakeCombinationSumSquares(Uinf, OLRatio, UTilde):\n \"\"\" Wake Combination as per Jun et. al 2012 (I believe this is essentially what is done in WAsP) \"\"\"\n\n nTurbines = np.size(OLRatio[0])\n wt_velocity = np.zeros(nTurbines) # wt_velocity[i, j] of turb-i at turb-j\n\n for turbI in range(0, nTurbines):\n sumterm = 0.0\n for turb in range(0, nTurbines):\n if turb != turbI and OLRatio[turb, turbI] != 0:\n sumterm += (OLRatio[turb, turbI]*(1-UTilde[turb, turbI]/Uinf))**2\n wt_velocity[turbI] = Uinf*(1.0-np.sqrt(sumterm))\n\n return wt_velocity\n\n\ndef powerCalc(wt_velocity, Cp, rotorDiameter, airDensity):\n\n Ar = 0.25*np.pi*rotorDiameter**2 # Rotor area of each turbine\n wt_power = 0.5*airDensity*Ar*Cp*wt_velocity**3 # power of each turbine\n\n return wt_power","sub_path":"wake-model-JensenTopHat/jensen_topHat.py","file_name":"jensen_topHat.py","file_ext":"py","file_size_in_byte":5072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"528334279","text":"__author__ = 'Abenk'\n\nimport tweepy\nimport re\n\nauth = tweepy.OAuthHandler('QQRLbKUnsy01BzT7vOOQ3gHg8', 'ipkWaYalXjpRnqiCsobj8nMVss5wrnDvVspW1QInflyyjf7Pe7')\nauth.set_access_token('193535811-7NTh8qhHArKnOC0I9uE4XB66KBey1u2MsJp0c5vZ', 'EERRbSYvdUpWt77usLDgwAG9YI1YNankbRTahPRymQz6j')\n\napi = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n\ntweets = []\ntweetsTgl = []\ntweetsGeo = []\nusername = 'HillaryClinton'\n\nuser = api.get_user(username)\n\n# print user.id_str\n# print user.description\n# print user.created_at\n# print user.name\n\n# c = tweepy.Cursor(api.search, q = 'banjir')\n\nfor status in tweepy.Cursor(api.user_timeline, id=username).items(1000):\n # print status.id_str\n\n # status.text.encode('ASCII','ignore')\n tweet = status.text.encode(\"ASCII\",'ignore')\n tweetTgl= str(status.created_at)\n print [tweet, tweetTgl]\n # tweetGeo = str(status.coordinates['coordinates']) if status.coordinates is not None else 'unknown'\n # twitGeo = str(status.geo)\n # if re.search(r'(haha*)+',tweet):\n tweets.append(tweet)\n tweetsTgl.append(tweetTgl)\n # tweetsGeo.append(tweetGeo)\n # if re.search(r'^@[^ ]+', tweet):\n # tweets.append(tweet)\n # tweetsTgl.append(tweetTgl)\n # print 'ini tweet reply'\n # elif re.search(r'^(RT |\")@[^ ]+:', tweet):\n # tweets.append(tweet)\n # tweetsTgl.append(tweetTgl)\n # print 'ini retweet'\n # hahahahhhahhhahhaha\n # (hah*)+\n\n # Tweet reply @jokowi: ...\n # RT @jokowi: ... / \"@jokowi: ...\"\n\n\n\nimport pandas\n\ndata_to_save = pandas.DataFrame({'TWEET': tweets, 'Tanggal': tweetsTgl})\nexcel_writer = pandas.ExcelWriter('contoh1.xlsx', engine='xlsxwriter',options={'encoding': 'utf-8'})\ndata_to_save.to_excel(excel_writer, sheet_name='Sheet1')\nexcel_writer.close()\n\n","sub_path":"testwiter.py","file_name":"testwiter.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"106251247","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nimport time\nfrom rl_plots import RLPlots\nfrom noise import OUActionNoise\nfrom replay_mem import ReplayBuffer\nfrom ddpg_models import ActorNet, CriticNet\nfrom PIL import Image\n\n# Agent class that contains actor and critic models that can train on and play unity games\nclass DDPGAgent(object):\n def __init__(self, alpha, beta, tau, gamma, state_space, l1_size,\n l2_size, l3_size, l4_size, action_space, env, brain_name,\n multibrain, version, mem_capacity=1e6, batch_size=128, \n multiagent=False, n_agents=None, eval=False):\n\n # Initialize memory\n self.batch_size = batch_size\n self.memory = ReplayBuffer(mem_capacity)\n \n # Initialize noise\n # In case of a multiagent environment, create a separate noise object for each agent\n self.noise = [OUActionNoise(np.zeros(action_space)) for i in range(n_agents)] if multiagent else \\\n OUActionNoise(np.zeros(action_space))\n\n # Setup device used for torch computations\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n # Create actor critic and target networks\n self.actor = ActorNet(alpha, state_space, l1_size, l2_size, l3_size, \n l4_size, action_space, name='actor_' + version + '_ddpg_model').to(self.device)\n self.target_actor = ActorNet(alpha, state_space, l1_size, l2_size, l3_size, \n l4_size, action_space).to(self.device)\n\n self.critic = CriticNet(beta, state_space, l1_size, l2_size, l3_size, \n l4_size, action_space, name='critic_' + version + '_ddpg_model').to(self.device)\n self.target_critic = CriticNet(beta, state_space, l1_size, l2_size, l3_size, \n l4_size, action_space).to(self.device)\n \n # Initialize target nets to be identical to actor and critic networks\n self.init_networks()\n\n # Target networks set to eval, since they are not \n # trained but simply updated with the target_network_update function\n self.target_actor.eval()\n self.target_critic.eval()\n\n # Set global parameters\n self.gamma = gamma\n self.env = env\n self.tau = tau\n self.eval = eval\n self.state_space = state_space\n self.action_space = action_space\n self.multiagent = multiagent\n self.multibrain = multibrain\n self.brain_name = brain_name\n self.n_agents = n_agents if self.multiagent else None\n\n # Initialize plotter for showing live training graphs and saving them\n self.plotter = RLPlots('ddpg_training')\n\n # Makes target network params identical to actor and critic network params \n def init_networks(self):\n self.target_actor.load_state_dict(self.actor.state_dict())\n self.target_critic.load_state_dict(self.critic.state_dict())\n\n # Saves models\n def save_models(self):\n self.actor.save_model()\n self.critic.save_model()\n\n # Exports an onnx model of the actor network\n def save_onnx_model(self):\n # Create a dummy input to pass through the model and export the current actor model\n dummy_input = torch.autograd.Variable(torch.randn(1, 1, 1, self.state_space)).to(self.device)\n torch.onnx.export(self.actor, dummy_input, './models/ddpg_model.onnx', verbose=True, \n input_names=['vector_observation'], output_names=['action'])\n print('ONNX model saved to ./models/ddpg_model.onnx')\n\n # Passes the state through the actor network to get actions\n def choose_action(self, state):\n # Assure actor is set to eval mode as we do not want to train when taking actions\n self.actor.eval()\n\n # Convert state to tensor and send to device\n state = torch.tensor(state).float().to(self.device)\n\n # Pass state through actor network to get actions\n actions = self.actor(state)\n\n # In case of multiagent environment, stack a noise vector\n # to have the same shape as the actions matrix (n_agents x num_actions)\n if self.multiagent:\n noise = np.vstack(tuple(ounoise() for ounoise in self.noise))\n noise = torch.tensor(noise).float().to(self.device)\n else:\n noise = torch.tensor(self.noise()).float().to(self.device)\n\n # Add noise to actions to get final mu action values\n actions = actions + noise\n\n # Send to cpu, detach from computation graph and convert to list\n # for the gym environment\n return actions.cpu().detach().numpy()\n\n # Stores transitions (single or multiple depending on multiagent environment)\n def store_transitions(self, state, action, reward, state_, done):\n \n if self.multiagent:\n for i in range(self.n_agents):\n self.memory.add_transition(state[i], action[i], reward[i], state_[i], int(done[i]))\n else:\n self.memory.add_transition(state, action, reward, state_, int(done))\n\n # This function samples a batch of transitions from memory, and \n # puts them onto the device defined by self.device\n def sample_transitions(self):\n batch = self.memory.sample_batch(self.batch_size)\n\n # 'reward' and 'done' are unsequeezed to get the right dimensions, (batch_size, 1) instead of (batch_size)\n state = torch.tensor(batch.state).float().to(self.device)\n action = torch.tensor(batch.action).float().to(self.device)\n reward = torch.tensor(batch.reward).unsqueeze(dim=1).float().to(self.device)\n state_ = torch.tensor(batch.state_).float().to(self.device)\n done = torch.tensor(batch.done).unsqueeze(dim=1).float().to(self.device)\n\n return state, action, reward, state_, done\n\n def learn(self):\n # Only start learning when there's enough transitions stored for a full batch\n if self.memory.pointer < self.batch_size:\n return\n\n # Sample random batch of transitions\n state, action, reward, state_, done = self.sample_transitions()\n\n # We first handle the critic update, then the actor update\n # To evaluate the critic net, we set actor to eval, critic to train mode\n self.actor.eval()\n self.critic.train()\n\n # Order of operation is: \n # 1) zero_grad > 2) forward pass > 3) loss > 4) backward pass > 5) optimizer step\n \n # 1) Zero grad on Critic\n self.critic.optim.zero_grad()\n\n # 2) Forward pass \n # Calculate critic predicted and target values\n critic_pred = self.critic(state, action)\n critic_target = reward + self.gamma * self.target_critic(state_, self.target_actor(state_)) * (1 - done)\n \n # 3) Calculate loss\n # Calulcate critic loss, then perform backprop\n critic_loss = self.critic.loss(critic_pred, critic_target)\n \n # 4) Backward pass\n critic_loss.backward()\n \n # 5) Optimizer step\n self.critic.optim.step()\n\n # Now switch train and eval mode again on actor and critic to\n # do the actor update\n self.actor.train()\n self.critic.eval()\n\n # 1) Zero grad on actor\n self.actor.optim.zero_grad()\n \n # 2) Forward pass\n # Calculate actor loss and perform backprop\n mu = self.actor(state)\n \n # 3) Calculate loss\n actor_loss = torch.mean(-self.critic(state, mu))\n \n # 4) Backward pass\n actor_loss.backward()\n \n # 5) Optimizer step\n self.actor.optim.step()\n\n # Set actor back to eval mode\n self.actor.eval()\n\n # Update the target networks\n self.update_target_networks()\n\n # Steps through the environment with the given action and returns the next state, reward, done flag \n # whether or not the max_steps has been reached. Also returns visual observation for saving gifs of episodes\n def env_step(self, actions):\n vis_obs = None\n\n if self.multibrain:\n # In case of multibrain, we provide the actions as a dictionary, with the brain\n # name being the key, and actions as the value\n actions = {\n self.brain_name[0]: actions[1:],\n self.brain_name[1]: actions[0] \n }\n env_info = self.env.step(actions)\n env_info_b0 = env_info[self.brain_name[1]]\n env_info_b1 = env_info[self.brain_name[0]]\n\n # Stack and concatenate the next state, rewards, done and max reached flags for the different brains\n state_ = np.vstack((env_info_b0.vector_observations, env_info_b1.vector_observations))\n rewards = env_info_b0.rewards + env_info_b1.rewards\n done = env_info_b0.local_done + env_info_b1.local_done\n max_reached = env_info_b0.max_reached + env_info_b1.max_reached\n\n # Collect visual observation from 'WalkerVis' brain\n vis_obs = env_info_b0.visual_observations[0][0]\n else:\n # In case of single brain, simply collect next state, rewards, done and max reached flags\n env_info = self.env.step(actions)[self.brain_name]\n state_ = env_info.vector_observations\n rewards = env_info.rewards\n done = env_info.local_done\n max_reached = env_info.max_reached\n\n return state_, rewards, done, max_reached, vis_obs\n\n # Trains either a multi or single agent environment\n # eval is whether or not to simply play the game or to actually train the agent\n def train(self, num_eps):\n if self.eval:\n self.actor.eval()\n self.critic.eval()\n\n if self.multiagent:\n self.train_multi(num_eps)\n else:\n self.train_single(num_eps)\n\n # Training loop for environment with single agent\n def train_single(self, num_eps):\n \n # Keep track of scores\n scores = []\n avg_ph_scores = []\n\n # Save interval\n save_interval = 100\n\n # Plot interval\n plot_interval = 200\n\n # The minimum score to save a gif\n gif_score_threshold = 100\n\n # Play number of episodes\n for ep_num in range(num_eps):\n\n done = False\n state = self.env.reset()[self.brain_name].vector_observations\n \n ep_score = 0\n\n # Initialize frame list for saving gifs\n frames = []\n\n # Keep playing until done\n while not done:\n # Pick action using actor network\n actions = self.choose_action(state)\n # Take action and observe next state and reward\n state_, rewards, done, _, vis_obs = self.env_step(actions)\n # Store transition into memory\n self.store_transitions(state, actions, rewards, state_, done)\n # Sample batch of transitions and train networks (if eval mode is off)\n if not self.eval:\n self.learn()\n else:\n frames.append(vis_obs)\n\n ep_score += rewards\n\n # Set next state to now be the current state\n state = state_\n\n print(f'Episode: {ep_num}\\n\\tScore: {ep_score}\\n\\tAvg past 100 score: {np.mean(scores[-100:])}')\n\n scores.append(ep_score)\n avg_ph_scores.append(np.mean(scores[-100:]))\n\n # Reset noise each episode\n self.reset_noise()\n\n # Save models every save_interval steps\n if ep_num % save_interval == 0:\n self.save_models()\n\n # Plots average rewards every plot_interval steps\n if ep_num % plot_interval == 0:\n self.plot_rewards(avg_ph_scores)\n\n # Record a gif if it's larger than gif score threshold\n # or if it's larger than previously achieved score\n if self.eval and ep_score > gif_score_threshold:\n self.save_gif(frames)\n gif_score_threshold = ep_score\n \n # Save the final plot\n self.plot_rewards(avg_ph_scores, save=True)\n\n # Close environment\n self.env.close()\n\n # Training loop for environment with multiple agents\n def train_multi(self, num_eps):\n \n # Keep track of scores\n scores = []\n avg_ph_scores = []\n\n # Save interval\n save_interval = 100\n\n # Plot interval\n plot_interval = 250\n\n # The minimum score to save a gif\n gif_score_threshold = 100\n\n # Play number of episodes\n for ep_num in range(num_eps):\n done = [False for i in range(self.n_agents)]\n # In case of multibrain environments, stack the initial state vectors of the brains\n if self.multibrain:\n env_init = self.env.reset()\n state_one = env_init[self.brain_name[0]].vector_observations\n state = env_init[self.brain_name[1]].vector_observations\n state = np.vstack((state, state_one))\n else:\n state = self.env.reset()[self.brain_name].vector_observations\n\n ep_score = 0\n\n # Initialize frame list for saving gifs\n frames = []\n\n # Keep playing until one of the agents is done\n while True not in done:\n # Pick action using actor network\n actions = self.choose_action(state)\n # Take action and observe next state and reward\n state_, rewards, done, _, vis_obs = self.env_step(actions)\n # Store transition into memory\n self.store_transitions(state, actions, rewards, state_, done)\n # Sample batch of transitions and train networks (if eval mode is off)\n if not self.eval:\n self.learn()\n else:\n frames.append(vis_obs)\n \n ep_score += np.mean(rewards)\n\n # Set next state to now be the current state\n state = state_\n\n print(f'Episode: {ep_num}\\n\\tScore: {ep_score}\\n\\tAvg past 100 score: {np.mean(scores[-100:])}')\n\n scores.append(ep_score)\n avg_ph_scores.append(np.mean(scores[-100:]))\n\n # Reset noise each episode\n self.reset_noise()\n\n # Save models every save_interval steps\n if ep_num % save_interval == 0 and self.eval is not True:\n self.save_models()\n\n # Plots average rewards every plot_interval steps\n if ep_num % plot_interval == 0:\n self.plot_rewards(avg_ph_scores)\n\n # Record a gif if it's larger than gif score threshold\n # or if it's larger than previously achieved score\n if self.eval and ep_score > gif_score_threshold:\n self.save_gif(frames)\n gif_score_threshold = ep_score\n \n # Save the final plot\n self.plot_rewards(avg_ph_scores, save=True)\n\n # Close environment\n self.env.close()\n\n # This function updates the target networks according to the DDPG algorithm\n # theta^q' = tau * theta^q + (1-tau) * theta^q'\n # theta^mu' = tau * theta^mu + (1-tau) * theta^mu'\n # Where q and q' are the critic and target critic networks\n # and mu and mu' are the actor and target actor networks respectively\n def update_target_networks(self):\n # Load all four state dictionaries for the networks\n actor_params = self.actor.state_dict()\n target_actor_params = self.target_actor.state_dict()\n\n critic_params = self.critic.state_dict()\n target_critic_params = self.target_critic.state_dict()\n\n # Load the a new state dict using a dictionary comprehension\n self.target_actor.load_state_dict({ key:\n (self.tau * params) + (1 - self.tau) * target_actor_params[key].clone()\n for key, params in actor_params.items()\n })\n\n self.target_critic.load_state_dict({ key:\n (self.tau * params) + (1 - self.tau) * target_critic_params[key].clone()\n for key, params in critic_params.items()\n })\n\n # Resets the noise\n def reset_noise(self):\n if self.multiagent:\n [noise.reset() for noise in self.noise]\n else:\n self.noise.reset()\n\n def plot_rewards(self, scores, save=False):\n self.plotter.plot_rewards(scores, save)\n\n # Save gif of an episode given a list of frames\n def save_gif(self, frames):\n print('Saving gif')\n frames = [Image.fromarray((frame*255).astype(dtype=np.uint8)) for frame in frames]\n frames[0].save('episode.gif', format='GIF', append_images=frames[1:], save_all=True, duration=100, loop=0)","sub_path":"Walker/ddpg_agent_unity.py","file_name":"ddpg_agent_unity.py","file_ext":"py","file_size_in_byte":16939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"445501937","text":"from __future__ import unicode_literals\n\nimport bz2\nimport simplejson as json\nimport logging\nimport csv\nimport time\nimport uuid\n\nlogger = logging.getLogger(__name__)\ndef wikidata_to_csv(wikidata_file, doc_id='Wikidata', limit=None, to_print=True, lang=\"en\", parse_descr=True):\n # Read the JSON wiki data and parse out the entities. Takes about 7-10h to parse 55M lines.\n # get latest-all.json.bz2 from https://dumps.wikimedia.org/wikidatawiki/entities/\n\n site_filter = '{}wiki'.format(lang)\n\n WD_META_ITEMS = [\n \"Q163875\",\n \"Q191780\",\n \"Q224414\",\n \"Q4167836\",\n \"Q4167410\",\n \"Q4663903\",\n \"Q11266439\",\n \"Q13406463\",\n \"Q15407973\",\n \"Q18616576\",\n \"Q19887878\",\n \"Q22808320\",\n \"Q23894233\",\n \"Q33120876\",\n \"Q42104522\",\n \"Q47460393\",\n \"Q64875536\",\n \"Q66480449\",\n ]\n\n # filter: currently defined as OR: one hit suffices to be removed from further processing\n exclude_list = WD_META_ITEMS\n \n # punctuation\n exclude_list.extend([\"Q1383557\", \"Q10617810\"])\n\n # letters etc\n exclude_list.extend([\"Q188725\", \"Q19776628\", \"Q3841820\", \"Q17907810\", \"Q9788\", \"Q9398093\"])\n\n neg_prop_filter = {\n 'P31': exclude_list, # instance of\n 'P279': exclude_list # subclass\n }\n\n title_to_id = dict()\n id_to_descr = dict()\n id_to_alias = dict()\n\n # parse appropriate fields - depending on what we need in the KB\n parse_properties = True\n parse_qualifiers = False\n parse_sitelinks = True\n parse_labels = True\n parse_aliases = True\n parse_claims = True\n \n # create the header of the csv file\n header=[]\n header.append('id')\n header.append('node')\n header.append('property')\n header.append('value')\n header.append('document_id')\n with open('edge.csv', 'w', newline='') as myfile:\n wr = csv.writer(myfile, quoting=csv.QUOTE_NONE,delimiter=\"\\t\",escapechar=\"\\n\",quotechar='')\n wr.writerow(header)\n \n rows=[]\n tempid=uuid.uuid1()\n with bz2.open(wikidata_file, mode='rb') as file:\n for cnt, line in enumerate(file):\n if limit and cnt >= limit:\n break\n if cnt % 500000 == 0 and cnt > 0:\n logger.info(\"processed {} lines of WikiData JSON dump\".format(cnt))\n print(cnt)\n clean_line = line.strip()\n if clean_line.endswith(b\",\"):\n clean_line = clean_line[:-1]\n if len(clean_line) > 1:\n obj = json.loads(clean_line)\n entry_type = obj[\"type\"]\n if entry_type == \"item\" or entry_type == \"property\":\n keep = True\n \n claims = obj[\"claims\"]\n\n if parse_claims:\n for prop, value_set in neg_prop_filter.items():\n claim_property = claims.get(prop, None)\n if claim_property:\n for cp in claim_property:\n cp_id = (\n cp[\"mainsnak\"]\n .get(\"datavalue\", {})\n .get(\"value\", {})\n .get(\"id\")\n )\n cp_rank = cp[\"rank\"]\n if cp_rank != \"deprecated\" and cp_id in value_set:\n keep = False\n \n if keep:\n qnode=obj[\"id\"]\n if parse_properties:\n for prop, claim_property in claims.items():\n #cp_vals=[]\n seq_no=1\n for cp in claim_property:\n if cp['rank']!='deprecated' and cp['mainsnak']['snaktype']=='value':\n val=cp['mainsnak']['datavalue'].get('value')\n typ=cp['mainsnak']['datatype']\n sid=qnode+'-'+prop+'-'+str(seq_no)\n if typ.startswith('wikibase'):\n rows.append([sid,qnode,prop,val.get('id',''),doc_id])\n elif typ=='quantity':\n if val.get('upperbound',None) or val.get('lowerbound',None):\n rows.append([sid,qnode,prop,val['amount']+'['+val.get('upperbound','')+','+val.get('lowerbound','')+']',doc_id])\n else:\n rows.append([sid,qnode,prop,val['amount'],doc_id])\n elif typ=='globe-coordinate':\n rows.append([sid,qnode,prop,'@'+str(val['latitude'])+'/'+str(val['longitude']),doc_id])\n elif typ=='time':\n rows.append([sid,qnode,prop,\"^\"+val['time'][1:]+'/'+str(val['precision']),doc_id])\n elif typ=='monolingualtext':\n rows.append([sid,qnode,prop,'\\''+val['text']+'\\''+'@'+val['language'],doc_id]) \n else:\n rows.append([sid,qnode,prop,'\\\"'+val+'\\\"',doc_id])\n \n # add an edge from the item to the statement with prefix 'ps:' for the property \n #prop_name='ps:'+prop\n #statement_id=qnode+'-'+prop_name+'-'+str(seq_no)\n #rows.append([statement_id,qnode,prop_name,sid,doc_id])\n seq_no+=1\n # get qualifiers for the statements that we are importing \n if parse_qualifiers:\n if cp.get('qualifiers',None):\n quals=cp['qualifiers']\n for qual_prop, qual_claim_property in quals.items():\n qual_seq_no=1\n for qcp in qual_claim_property:\n if qcp['snaktype']=='value':\n val=qcp['datavalue'].get('value')\n typ=qcp['datatype']\n tempid=sid+'-'+qual_prop+'-'+str(qual_seq_no)\n qual_seq_no+=1\n if typ.startswith('wikibase'):\n rows.append([tempid,sid,qual_prop,val.get('id',''),doc_id])\n elif typ=='quantity':\n if val.get('upperbound',None) or val.get('lowerbound',None):\n rows.append([tempid,sid,qual_prop,val['amount']+'['+val.get('upperbound','')+','+val.get('lowerbound','')+']',doc_id])\n else:\n rows.append([tempid,sid,qual_prop,val['amount'],doc_id])\n elif typ=='globe-coordinate':\n rows.append([tempid,sid,qual_prop,'@'+str(val['latitude'])+'/'+str(val['longitude']),doc_id])\n elif typ=='time':\n rows.append([tempid,sid,qual_prop,\"^\"+val['time'][1:]+'/'+str(val['precision']),doc_id])\n elif typ=='monolingualtext':\n rows.append([tempid,sid,qual_prop,'\\\"'+val['text']+'\\\"'+'@'+val['language'],doc_id]) \n else:\n rows.append([tempid,sid,qual_prop,'\\\"'+val+'\\\"',doc_id])\n if cnt % 50000 == 0 and cnt > 0:\n with open('edge.csv', 'a', newline='') as myfile:\n for row in rows:\n wr = csv.writer(myfile, quoting=csv.QUOTE_NONE,delimiter=\"\\t\",escapechar=\"\\n\",quotechar='')\n wr.writerow(row)\n rows=[]\n with open('edge.csv', 'a', newline='') as myfile:\n for row in rows:\n wr = csv.writer(myfile, quoting=csv.QUOTE_NONE,delimiter=\"\\t\",escapechar=\"\\n\",quotechar='')\n wr.writerow(row)\nstart=time.time()\nwikidata_to_csv('wikidata-20200203-all.json.bz2','wikidata-20200203')\nend=time.time()\nprint((end-start)/3600)","sub_path":"import/wikidata_edges.py","file_name":"wikidata_edges.py","file_ext":"py","file_size_in_byte":9190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"567862342","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 2 11:36:07 2018\r\n\r\n@author: Xi Yu\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom keras.datasets import imdb\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import LSTM\r\nfrom keras.layers.embeddings import Embedding\r\n#from keras.preprocessing import sequence\r\n# fix random seed for reproducibility\r\nnp.random.seed(7)\r\n\r\n\r\n# input image dimensions\r\nimg_rows, img_cols = 14, 4\r\n# the data, split between train and test sets\r\ndef loadtraining():\r\n tmp = np.loadtxt(\"train.xls\", dtype=np.str, delimiter=\",\")\r\n data = tmp[1:,0].astype(np.float)\r\n label = tmp[1:,2].astype(np.float)\r\n sequence = tmp[1:,1].astype(np.str)\r\n return data,sequence,label\r\n\r\ndef loadtest():\r\n tmp = np.loadtxt(\"test.xls\", dtype=np.str, delimiter=\",\")\r\n data = tmp[1:,0].astype(np.float)\r\n sequence = tmp[1:,1].astype(np.str)\r\n return data,sequence\r\n\r\ndata,sequence,label = loadtraining()\r\ninputdata = np.zeros([2000,14,4])\r\nfor i in range(2000):\r\n DNA_sequence = sequence[i]\r\n vector = np.zeros([14,4])\r\n for j in range(14):\r\n component = DNA_sequence[j]\r\n if (component=='A'):\r\n vector[j,:] = np.array([1,0,0,0])\r\n if (component=='G'):\r\n vector[j,:] = np.array([0,1,0,0])\r\n if (component=='C'):\r\n vector[j,:] = np.array([0,0,1,0]) \r\n if (component=='T'):\r\n vector[j,:] = np.array([0,0,0,1])\r\n inputdata[i,:,:]=vector\r\ntraining = inputdata.reshape((2000, -1)) \r\ntrain = training[0:1700]\r\nlabel_train = label[0:1700]\r\nallvector = np.column_stack([train,label_train])\r\nnp.random.shuffle(allvector) \r\nX_train = allvector[:,0:56]\r\ny_train = allvector[:,56]\r\n#%%\r\nX_test = training[1700:2000]\r\ny_test = label[1700:2000]\r\ny_label = np.zeros([2000,2])\r\ny_train_ture = np.zeros([1700,2])\r\nfor j, class_idx in enumerate(y_train):\r\n y_train_ture[j,int(class_idx)] = 1\r\n\r\ny_test_ture = np.zeros([300,2])\r\nfor j, class_idx in enumerate(y_test):\r\n y_test_ture[j,int(class_idx)] = 1\r\n#for n, class_idx in enumerate(label):\r\n # y_label[n,int(class_idx)] = 1 \r\n#%%\r\ntest_data,test_sequence = loadtest()\r\ntest_inputdata = np.zeros([400,14,4])\r\nfor i in range(400):\r\n DNA_test_sequence = test_sequence[i]\r\n test_vector = np.zeros([14,4])\r\n for j in range(14):\r\n test_component = DNA_test_sequence[j]\r\n if (test_component=='A'):\r\n test_vector[j,:] = np.array([1,0,0,0])\r\n if (test_component=='G'):\r\n test_vector[j,:] = np.array([0,1,0,0])\r\n if (test_component=='C'):\r\n test_vector[j,:] = np.array([0,0,1,0]) \r\n if (test_component=='T'):\r\n test_vector[j,:] = np.array([0,0,0,1])\r\n test_inputdata[i,:,:]=test_vector\r\ntest = test_inputdata.reshape((400, -1))\r\n#%%\r\n#expected input data shape: (batch_size, timesteps, data_dim)\r\n# create the model\r\nmodel = Sequential()\r\nmodel.add(Embedding(4, 100))\r\nmodel.add(LSTM(100))\r\nmodel.add(Dense(1, activation='sigmoid'))\r\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\r\nprint(model.summary())\r\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=64)\r\n\r\n# Final evaluation of the model\r\nscores = model.evaluate(X_test, y_test, verbose=1)\r\nprint(\"Accuracy: %.2f%%\" % (scores[1]*100))\r\n#%%\r\np = model.predict(test)\r\ng = np.zeros(400)\r\nfor i in range(400):\r\n g[i] = np.argmax(p[i,:])","sub_path":"programming assignment 3/DNA sequence classification/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"215863273","text":"# Uses python3\nimport sys\nimport random\n\ndef partition3(a, l, r):\n x = a[l]\n j = l\n k = l\n for i in range(l+1,r+1):\n if a[i][0] == x[0]:\n k += 1\n a[i], a[k] = a[k], a[i]\n elif a[i][0] <= x[0]:\n k += 1\n a[i], a[k] = a[k], a[i]\n a[j], a[k] = a[k], a[j]\n j += 1\n return j, k\n\n\ndef randomized_quick_sort(a, l, r):\n if l >= r:\n return\n k = random.randint(l, r)\n a[l], a[k] = a[k], a[l]\n m, n = partition3(a, l, r)\n if m != n:\n a[m:n+1] = sorted(a[m:n+1])\n randomized_quick_sort(a, l, m - 1);\n randomized_quick_sort(a, n + 1, r);\n\n\ndef fast_count_segments(starts, ends, points):\n count_dict = {}\n line = []\n for s in starts:\n line.append((s, 'l'))\n for e in ends:\n line.append((e, 'r'))\n for p in points:\n line.append((p, 'p'))\n randomized_quick_sort(line, 0, len(line) -1)\n count = 0\n for i in range(len(line)):\n if line[i][1] == 'l':\n count += 1\n elif line[i][1] == 'r':\n count -= 1\n else:\n count_dict[line[i][0]] = count\n cnt =[]\n for i in range(len(points)):\n cnt.append(count_dict[points[i]])\n return cnt\n\n\ndef naive_count_segments(starts, ends, points):\n cnt = [0] * len(points)\n for i in range(len(points)):\n for j in range(len(starts)):\n if starts[j] <= points[i] <= ends[j]:\n cnt[i] += 1\n return cnt\n\n\n# while True:\n# n_segments = 50\n# n_points = 100\n# starts = []\n# ends = []\n# points = []\n# for i in range(n_segments):\n# a = random.randint(0,10000)\n# b = random.randint(0,10000)\n# starts.append(a)\n# ends.append(a+b)\n#\n# for i in range(n_points):\n# points.append(random.randint(0,20000))\n#\n# res1 = naive_count_segments(starts,ends,points)\n# res2 = fast_count_segments(starts,ends,points)\n#\n# if res1 == res1:\n# print('OK')\n# else:\n# print(starts)\n# print(ends)\n# print(points)\n# print(res1)\n# print(res2)\n# break\n\n\n\n\n\n\n# data = list(map(int, input().split()))\n# n = data[0]\n# m = data[1]\n# starts = data[2:2 * n + 2:2]\n# ends = data[3:2 * n + 2:2]\n# points = data[2 * n + 2:]\n# cnt = fast_count_segments(starts, ends, points)\n# for x in cnt:\n# print(x, end=' ')\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n = data[0]\n m = data[1]\n starts = data[2:2 * n + 2:2]\n ends = data[3:2 * n + 2:2]\n points = data[2 * n + 2:]\n #use fast_count_segments\n cnt = fast_count_segments(starts, ends, points)\n for x in cnt:\n print(x, end=' ')\n","sub_path":"week4_divide_and_conquer/5_organizing_a_lottery/points_and_segments.py","file_name":"points_and_segments.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"146098240","text":"from flask import jsonify, request\n\nfrom common import DB\n\ndef _user_body_weight_update(user_id):\n data = request.get_json()\n with DB() as db:\n db.proc_update('user_body_weight_upd', [user_id, data['body_weight']])\n return jsonify({'success': True})\n\ndef _user_select(user_id):\n with DB() as db:\n row = db.proc_select('user_sel', [user_id])[0]\n res = {\n 'id': row['id'],\n 'first_name': row['first_name'],\n 'last_name': row['last_name'],\n 'username': row['username'],\n 'body_height': row['body_height'],\n 'body_weight': row['body_weight'],\n 'birth_date': row['birth_date']\n }\n return jsonify(res)\n\ndef _user_memo_select(user_id):\n with DB() as db:\n row = db.proc_select('user_memo_sel', [user_id])[0]\n res = {\n 'memo': row['memo']\n }\n return jsonify(res)\n\ndef _user_memo_update(user_id):\n data = request.get_json()\n with DB() as db:\n db.proc_update('user_memo_upd', [user_id, data['memo']])\n return jsonify({'success': True})","sub_path":"api/user/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"630006371","text":" # use a PyQT QDial knob to select a temperature\n # show Celsius and corresponding Fahrenheit values\n # as the knob is rotated\n # tested with PyQT4.5 and Python3.0\n # ene\nimport sys\n # has minimal namespace conflicts\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\nclass MyFrame(QWidget):\n def __init__(self, parent=None):\n QWidget.__init__(self, parent)\n # setGeometry(x_pos, y_pos, width, height)\n self.setGeometry(150, 150, 328, 440)\n self.setWindowTitle('Convert Temperature')\n self.dial = QDial(self)\n self.dial.setGeometry(QRect(10, 20, 311, 351))\n self.dial.setMinimum(-50)\n self.dial.setMaximum(310)\n self.dial.setPageStep(10)\n # initial dial pointer position\n dial_pos = 50\n self.dial.setSliderPosition(dial_pos)\n self.dial.setWrapping(True)\n self.dial.setNotchTarget(3.6)\n self.dial.setNotchesVisible(True)\n self.label_c = QLabel(self)\n self.label_c.setGeometry(QRect(30, 390, 121, 31))\n font = QFont()\n font.setPointSize(11)\n self.label_c.setFont(font)\n self.label_c.setAlignment(Qt.AlignCenter)\n self.label_f = QLabel(self)\n self.label_f.setGeometry(QRect(170, 390, 131, 31))\n font = QFont()\n font.setPointSize(11)\n self.label_f.setFont(font)\n self.label_f.setAlignment(Qt.AlignCenter)\n # start up\n self.show_temps()\n # update show_temps()\n self.connect(self.dial, SIGNAL(\"sliderMoved(int)\"),\n self.show_temps)\n\n def show_temps(self):\n \"\"\"\n get the C value from the dial label\n calculate F value and show\n \"\"\"\n c = float(self.dial.value())\n s_c = \"%0.1f Celsius\" % c\n self.label_c.setText(s_c)\n f = c*9/5.0 + 32\n s_f = \"%0.1f Fahrenheit\" % f\n self.label_f.setText(s_f)\n app = QApplication(sys.argv)\n \nframe = MyFrame()\nframe.show()\nsys.exit(app.exec_())\n","sub_path":"qdial.py","file_name":"qdial.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"173387043","text":"# Copyright 2011-2015 Luc Saffre\n# License: BSD (see file COPYING for details)\n\n\"\"\"Defines the choicelists for :mod:`lino.modlib.users`.\n\n\"\"\"\n\nfrom __future__ import unicode_literals\nfrom builtins import str\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.conf import settings\nfrom django.db import models\n\nfrom lino.core.choicelists import ChoiceList, Choice\nfrom lino.core.roles import UserRole, SiteUser, SiteAdmin\n\nfrom lino.api import dd\n\n\nclass UserType(Choice):\n \"\"\"Base class for all user profiles.\n\n \"\"\"\n\n hidden_languages = None\n \"\"\"A subset of :attr:`languages` which\n should be hidden in this user profile. Default value is\n :attr:`hidden_languages`. This is\n used on multilingual sites with more than 4 or 5 languages.\n\n \"\"\"\n\n readonly = False\n \"\"\"Whether users with this profile get only write-proteced access.\"\"\"\n\n # authenticated = True\n # \"\"\"Whether users with this profile should be considered authenticated.\"\"\"\n\n role = None\n \"\"\"The role of users having this profile. This is an instance of\n :class:`` or some subclass thereof.\n\n \"\"\"\n\n def __init__(self, value, text, role_class,\n name=None, # authenticated=True,\n readonly=False,\n **kw):\n # if value is None:\n # value = self.__module__.split('.')[-2] + '.' \\\n # + self.__class__.__name__\n super(UserType, self).__init__(value, text, name)\n self.role = role_class()\n self.readonly = readonly\n # self.authenticated = authenticated\n self.kw = kw\n\n def attach(self, cls):\n super(UserType, self).attach(cls)\n self.kw.setdefault('hidden_languages', cls.hidden_languages)\n\n for k, vf in list(cls.virtual_fields.items()):\n if vf.has_default():\n self.kw.setdefault(k, vf.get_default())\n elif vf.return_type.blank:\n self.kw.setdefault(k, None)\n\n for k, v in list(self.kw.items()):\n setattr(self, k, v)\n\n if self.hidden_languages is not None:\n self.hidden_languages = set(\n settings.SITE.resolve_languages(self.hidden_languages))\n\n del self.kw\n\n def __repr__(self):\n #~ s = self.__class__.__name__\n s = str(self.choicelist)\n if self.name:\n s += \".\" + self.name\n s += \":\" + self.value\n return s\n\n def has_required_roles(self, required_roles):\n \"\"\"Return `True` if this user profile's :attr:`role` satisfies the\n specified requirements. See\n :meth:`lino.core.roles.UserRole.has_required_roles`.\n\n \"\"\"\n return self.role.has_required_roles(required_roles)\n # try:\n # return self.role.has_required_roles(required_roles)\n # except TypeError:\n # raise Exception(\"Invalid roles specified: {0}\".format(\n # required_roles))\n\n\n##\n\n\nclass UserTypes(ChoiceList):\n \"\"\"The list of user profiles available on this site.\n \n You can see the user profiles available in your application via\n :menuselection:`Explorer --> System --> User Profiles`.\n\n \"\"\"\n required_roles = dd.login_required(SiteAdmin)\n item_class = UserType\n verbose_name = _(\"User type\")\n verbose_name_plural = _(\"User types\")\n show_values = True\n max_length = 20\n # column_names = \"value name text readonly\"\n\n preferred_foreignkey_width = 20\n\n # readonly = models.BooleanField(_(\"Read-only\"), default=False)\n \n hidden_languages = settings.SITE.hidden_languages\n \"\"\"Default value for the\n :attr:`hidden_languages` of newly\n attached choice item.\n\n \"\"\"\n\n\nadd = UserTypes.add_item\nadd('000', _(\"Anonymous\"), UserRole, name='anonymous', readonly=True)\nadd('100', _(\"User\"), SiteUser, name='user')\nadd('900', _(\"Administrator\"), SiteAdmin, name='admin')\n\n\n","sub_path":"lino/modlib/users/choicelists.py","file_name":"choicelists.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"516453768","text":"from flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\nclass Menu(db.Model):\n __tablename__ = \"menu\"\n id = db.Column(\"id\", db.Integer,primary_key = True)\n unitOid = db.Column(\"unitOid\", db.Integer, nullable = False)\n oid = db.Column(\"oid\", db.Integer, nullable = False)\n eateryName = db.Column(\"eateryName\", db.String, nullable = False)\n menuName = db.Column(\"menuName\", db.String, nullable = False)\n\n def __init__(self, **kwargs):\n self.unitOid = kwargs['unitOid']\n self.oid = kwargs['oid']\n self.eateryName = kwargs['eateryName']\n self.menuName = kwargs['menuName']\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"unitOid\": self.unitOid,\n \"oid\": self.oid,\n \"eateryName\": self.eateryName,\n \"menuName\": self.menuName\n }\n\nclass Food(db.Model):\n __tablename__ = \"food\"\n id = db.Column(\"id\", db.Integer, primary_key = True)\n oid = db.Column(\"oid\", db.Integer, nullable = False)\n detailOid = db.Column(\"detailOid\", db.Integer, nullable = False)\n menuName = db.Column(\"menuName\", db.String, nullable = False)\n foodName = db.Column(\"foodName\", db.String, nullable = False)\n\n def __init__(self, **kwargs):\n self.oid = kwargs['oid']\n self.detailOid = kwargs['detailOid']\n self.menuName = kwargs['menuName']\n self.foodName = kwargs['foodName']\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"oid\": self.oid,\n \"detailOid\": self.detailOid,\n \"menuName\": self.menuName,\n \"foodName\": self.foodName\n }\n\nclass AllergensIngredients(db.Model):\n __tablename__ = \"allergens_ingredients\"\n id = db.Column(\"id\", db.Integer, primary_key = True)\n detailOid = db.Column(\"detailOid\", db.Integer, nullable = False)\n foodName = db.Column(\"foodName\", db.String, nullable = False)\n ingredients = db.Column(\"ingredients\", db.String, nullable = False)\n allergens = db.Column(\"allergens\", db.String, nullable = False)\n\n alcohol = db.Column(\"alcohol\", db.Boolean, nullable = False)\n eggs = db.Column(\"eggs\", db.Boolean, nullable = False)\n fish = db.Column(\"fish\", db.Boolean, nullable = False)\n gluten = db.Column(\"gluten\", db.Boolean, nullable = False)\n milk = db.Column(\"milk\", db.Boolean, nullable = False)\n peanuts = db.Column(\"peanuts\", db.Boolean, nullable = False)\n pork_U = db.Column(\"pork(U)\", db.Boolean, nullable = False)\n sesame = db.Column(\"sesame\", db.Boolean, nullable = False)\n soy = db.Column(\"soy\", db.Boolean, nullable = False)\n treeNuts = db.Column(\"treeNuts\", db.Boolean, nullable = False)\n wheat = db.Column(\"wheat\", db.Boolean, nullable = False)\n\n def __init__(self, **kwargs):\n self.detailOid = kwargs['detailOid']\n self.foodName = kwargs['foodName']\n self.ingredients = kwargs['ingredients']\n self.allergens = kwargs['allergens']\n self.alcohol = kwargs.get('alcohol', False)\n self.eggs = kwargs.get('eggs', False)\n self.fish = kwargs.get('fish', False)\n self.gluten = kwargs.get('gluten', False)\n self.milk = kwargs.get('milk', False)\n self.peanuts = kwargs.get('peanuts', False)\n self.pork_U = kwargs.get('pork_U', False)\n self.sesame = kwargs.get('sesame', False)\n self.soy = kwargs.get('soy', False)\n self.treeNuts = kwargs.get('treeNuts', False)\n self.wheat = kwargs.get('wheat', False)\n\n def serialize(self):\n return{\n \"id\": self.id,\n \"detailOid\": self.detailOid,\n \"foodName\": self.foodName,\n \"ingredients\": self.ingredients,\n \"allergens\": self.allergens,\n \"alochol\": self.alcohol,\n \"eggs\": self.eggs,\n \"fish\": self.fish,\n \"gluten\": self.gluten,\n \"milk\": self.milk,\n \"peanuts\": self.peanuts,\n \"pork_U\" : self.pork_U,\n \"sesame\" : self.sesame,\n \"soy\" : self.soy,\n \"treeNuts\" : self.treeNuts,\n \"wheat\" : self.wheat\n }\n","sub_path":"HackChallenge/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"163603104","text":"from tkinter import *\r\nfrom sudoku import Sudoku, SudokuState\r\n\r\n\r\nclass SudokuInterface():\r\n def __init__(self, frame):\r\n self.cells = self.get_empty_cells(frame)\r\n\r\n def get_empty_cells(self, frame):\r\n cells = [[(rw, cl) for cl in range(9)] for rw in range(9)]\r\n register = frame.register(SudokuInterface.is_valid_cell_input)\r\n for rw in range(9):\r\n for cl in range(9):\r\n # Create a cell for that Sudoku entry\r\n cell = Entry(frame, width=2, justify=CENTER)\r\n\r\n # Configure the cell to conform to input validation\r\n cell.config(validate=\"key\", validatecommand=(register, '%P'))\r\n\r\n cells[rw][cl] = cell\r\n\r\n # Display the cell onto the GUI\r\n cell.grid(row=rw, column=cl)\r\n return cells\r\n\r\n @staticmethod\r\n def is_valid_cell_input(input):\r\n if input is \"\":\r\n return True\r\n if input.isdigit():\r\n return 1 <= int(input) <= 9\r\n return False\r\n\r\n def make_sudoku(self):\r\n puzzle = [[(rw, cl) for cl in range(9)] for rw in range(9)]\r\n for rw in range(9):\r\n for cl in range(9):\r\n input = self.cells[rw][cl].get()\r\n if input is \"\":\r\n puzzle[rw][cl] = 0\r\n if input.isdigit():\r\n puzzle[rw][cl] = int(input)\r\n return Sudoku(puzzle)\r\n\r\n def highlight(self, positions):\r\n for position in positions:\r\n self.cells[position[0]][position[1]].config(fg=\"red\")\r\n\r\n def unhighlight_all(self):\r\n for row in range(9):\r\n for col in range(9):\r\n self.cells[row][col].config(fg=\"black\")\r\n\r\n\r\nroot = Tk()\r\nroot.title(\"Sudoku Solver\")\r\n\r\ninstruction = Label(root, text=\"Input your Sudoku puzzle below!\")\r\ninstruction.grid(row=0)\r\n\r\nsudokuFrame = Frame(root)\r\nsudokuFrame.grid(row=1)\r\n\r\nentries = SudokuInterface(sudokuFrame)\r\n\r\n\r\ndef get_solution():\r\n entries.unhighlight_all()\r\n sudoku = entries.make_sudoku()\r\n answer = sudoku.solve()\r\n if sudoku.get_state() is SudokuState.UNSOLVABLE:\r\n conflicting_positions = sudoku.get_invalid_assignment()[\"conflicting-positions\"]\r\n entries.highlight(conflicting_positions)\r\n answerLabel.configure(text=answer)\r\n\r\n\r\nsolveButton = Button(root, text=\"Solve\", command=get_solution)\r\nsolveButton.grid()\r\n\r\nanswerLabel = Label(root, text=\"\")\r\nanswerLabel.grid()\r\n\r\nroot.mainloop()\r\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"502019066","text":"########\n# this program will read in user input by using input()\n# function in python.\n# we can store the user input in a variable\n# and manipulate it\n\n################\n\n#print(\"The number entered is\", x)\n#print(type (x))\n\n#y = float(input(\"Enter something dawwg \"))\n#print(\"the number entered is\", y)\n\n#print(x, '+', y, '=', x + y)\n\n#x1 = float(input(\"Enter in a number to subtract \"))\n#y1 = float(input(\"Enter in a the second number \"))\n\n#print(x1, '-', y1, '=', x1-y1)\n\n\n# multiplication functionality of our calc\n\n#x2 = float(input(\"Enter a number \"))\n#y2 = float(input(\"Pick another number\"))\n#print(x2, '*', y2, '=', x2 * y2)\n\nx3 = float(input(\"Enter a number \"))\ny3 = float(input(\"Pick another number\"))\nprint(x3, '/', y3, '=', x3 / y3)\n\n# for homework, implement the power functionality\n#implement mod functionality %\n# import math module and implement any functionality","sub_path":"venv/interactive.py","file_name":"interactive.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"530738968","text":"VERSION = '0.0.0'\nAPPNAME = 'appname'\n\ndef options(opt):\n opt.load('compiler_cxx')\n opt.load('unittest_gtest')\n\ndef configure(conf):\n conf.env.CXXFLAGS += ['-O2', '-Wall', '-g', '-pipe']\n conf.load('compiler_cxx')\n conf.load('unittest_gtest')\n\n conf.check_cfg(package='pficommon', args='--cflags --libs')\n # conf.check_cxx(lib='libname', header_name='header.h')\n\ndef build(bld):\n bld.program(\n source='src/sample.cpp',\n target='sample',\n use='PFICOMMON'\n )\n\ndef cpplint(ctx):\n cpplint_args = '--filter=-runtime/references,-legal/copyright,-build/include_order --extensions=cpp,hpp'\n\n src_dir = ctx.path.find_node('src')\n files = []\n for f in src_dir.ant_glob('**/*.cpp **/*.hpp'):\n files.append(f.path_from(ctx.path))\n\n args = 'cpplint.py %s %s' % (cpplint_args,' '.join(files))\n result = ctx.exec_command(args)\n if result != 0:\n ctx.fatal('cpplint failed')\n\ndef gcovr(ctx):\n excludes = [\n '.*\\.unittest-gtest.*',\n '.*_test\\.cpp',\n ]\n\n args = 'gcovr --branches -r . '\n for e in excludes:\n args += ' -e \"%s\"' % e\n\n ctx.exec_command(args)\n","sub_path":"wscript","file_name":"wscript","file_ext":"","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"612289356","text":"'''\n目标网址:http://k.360kan.com\n软件目的: 视频采集\n时间:2018-2-23\n作者:liuyu\nV:1.0\n'''\nimport urllib.request\nimport json\nimport re\nurl = \"http://pc.k.360kan.com/pc/list?n=10&p=3&f=json&ajax=1&uid=2a3e0fc71c8bbf9bf72302647c7a63e4&channel_id=2&dl=\"\nfor eve in json.loads(urllib.request.urlopen(url).read().decode(\"utf-8\"))[\"data\"][\"res\"]:\n title = eve[\"t\"]\n username = eve[\"f\"]\n id_data = re.findall(\"detail/(.*?)\\?\",eve[\"u\"])[0]\n content_url = \"http://pc.k.360kan.com/pc/play?id=\" + id_data\n video_url = json.loads(urllib.request.urlopen(content_url).read().decode(\"utf-8\"))[\"data\"][\"url\"]\n with open(title + \".mp4\", \"wb\") as f:\n f.write(urllib.request.urlopen(video_url).read())\n","sub_path":"Spider/BiliBili_Article/2.【代码】快视频采集器 - Python网络爬虫实战/cv251313.py","file_name":"cv251313.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"208870224","text":"#\r\n# @lc app=leetcode.cn id=393 lang=python3\r\n#\r\n# [393] UTF-8 编码验证\r\n#\r\n# https://leetcode-cn.com/problems/utf-8-validation/description/\r\n#\r\n# algorithms\r\n# Medium (37.39%)\r\n# Likes: 33\r\n# Dislikes: 0\r\n# Total Accepted: 3.6K\r\n# Total Submissions: 9.6K\r\n# Testcase Example: '[197,130,1]'\r\n#\r\n# UTF-8 中的一个字符可能的长度为 1 到 4 字节,遵循以下的规则:\r\n#\r\n#\r\n# 对于 1 字节的字符,字节的第一位设为0,后面7位为这个符号的unicode码。\r\n# 对于 n 字节的字符 (n > 1),第一个字节的前 n 位都设为1,第 n+1\r\n# 位设为0,后面字节的前两位一律设为10。剩下的没有提及的二进制位,全部为这个符号的unicode码。\r\n#\r\n#\r\n# 这是 UTF-8 编码的工作方式:\r\n#\r\n#\r\n# ⁠ Char. number range | UTF-8 octet sequence\r\n# ⁠ (hexadecimal) | (binary)\r\n# ⁠ --------------------+---------------------------------------------\r\n# ⁠ 0000 0000-0000 007F | 0xxxxxxx\r\n# ⁠ 0000 0080-0000 07FF | 110xxxxx 10xxxxxx\r\n# ⁠ 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx\r\n# ⁠ 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx\r\n#\r\n#\r\n# 给定一个表示数据的整数数组,返回它是否为有效的 utf-8 编码。\r\n#\r\n# 注意:\r\n# 输入是整数数组。只有每个整数的最低 8 个有效位用来存储数据。这意味着每个整数只表示 1 字节的数据。\r\n#\r\n# 示例 1:\r\n#\r\n#\r\n# data = [197, 130, 1], 表示 8 位的序列: 11000101 10000010 00000001.\r\n#\r\n# 返回 true 。\r\n# 这是有效的 utf-8 编码,为一个2字节字符,跟着一个1字节字符。\r\n#\r\n#\r\n# 示例 2:\r\n#\r\n#\r\n# data = [235, 140, 4], 表示 8 位的序列: 11101011 10001100 00000100.\r\n#\r\n# 返回 false 。\r\n# 前 3 位都是 1 ,第 4 位为 0 表示它是一个3字节字符。\r\n# 下一个字节是开头为 10 的延续字节,这是正确的。\r\n# 但第二个延续字节不以 10 开头,所以是不符合规则的。\r\n#\r\n#\r\n#\r\n\r\n# @lc code=start\r\ntry:\r\n MOD = 10**9 + 7\r\n from collections import defaultdict\r\n from typing import *\r\n import os, sys, random, math, threading\r\n curFileParentPath = os.path.dirname(\r\n os.path.dirname(os.path.realpath(__file__)))\r\n sys.path.append(curFileParentPath)\r\n from Utils.Tree import *\r\n from Utils.Executor import Execute\r\nexcept Exception as err:\r\n print('Import failed: ' + str(err))\r\n\r\n\r\nclass Solution:\r\n def validUtf8(self, data: List[int]) -> bool:\r\n # 根据规则来判断\r\n # 注意单字节的开头一定是0\r\n # 注意多字节的长度是2~4, 所以cnt是1也是错误的\r\n # 注意遍历完的时候要确认末尾的cnt是0\r\n mask = (1 << 8) - 1\r\n\r\n def getCode(d):\r\n s = d & mask\r\n s = bin(s)[2:]\r\n if len(s) < 8:\r\n s = '0' * (8 - len(s)) + s\r\n return s\r\n\r\n cnt = 0\r\n for d in data:\r\n s = getCode(d)\r\n if cnt == 0:\r\n if s[0] == '0':\r\n continue\r\n else:\r\n i = 0\r\n while i < 8 and s[i] == '1':\r\n i += 1\r\n if i < 2 or i > 4:\r\n return False\r\n cnt = i - 1\r\n else:\r\n if s[:2] != '10':\r\n return False\r\n cnt -= 1\r\n return cnt == 0\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n print(Solution().validUtf8([197, 130, 1])) # or Execute()\r\n print(Solution().validUtf8([235, 140, 4])) # or Execute()\r\n print(Solution().validUtf8([237])) # or Execute()\r\n print(Solution().validUtf8([145])) # or Execute()\r\n except Exception as err:\r\n print(err)\r\n\r\n# @lc code=end\r\n","sub_path":"Medium/393.utf-8-编码验证.py","file_name":"393.utf-8-编码验证.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"449538282","text":"from rest_framework import serializers, validators\r\nfrom .models import *\r\n\r\nclass FavorSerializers(serializers.ModelSerializer):\r\n # 隐藏user字段,并且赋值为当前登录用户\r\n user = serializers.HiddenField(\r\n default=serializers.CurrentUserDefault()\r\n )\r\n class Meta:\r\n model = Favor\r\n fields = \"__all__\"\r\n # fields = ('user','goods')\r\n extra_kwargs = { # 对模型已有参数重新设置和编辑\r\n 'created_time':{'required':False,'read_only':True}# 可以不填 read_only 不显示该字段\r\n }\r\n # 收藏夹 用户和商品的联合唯一限制(验证器)\r\n validators = [\r\n validators.UniqueTogetherValidator(\r\n queryset= Favor.objects.all(),\r\n fields = (\"user\",\"goods\"),\r\n message= \"该商品已收藏,请勿重复收藏!\"\r\n )\r\n ]\r\nclass ShopCartSerializers(serializers.ModelSerializer):\r\n user = serializers.HiddenField(\r\n default=serializers.CurrentUserDefault()\r\n )\r\n class Meta:\r\n model = ShoppingCart\r\n fields = \"__all__\"\r\n extra_kwargs = {\r\n 'created_time':{'required':False,'read_only':True}\r\n }\r\n validators.UniqueTogetherValidator(\r\n queryset=ShoppingCart.objects.all(),\r\n fields = (\"user\"),\r\n message= None\r\n )\r\n","sub_path":"apps/operations/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"458578359","text":"# Python program to find the factorial of a number provided by the user.\n\n# function to print factorial of given number\n\ndef isOddEven(number):\n factorial = 1\n if number < 0:\n print(\"Sorry, factorial does not exist for negative numbers\")\n elif number == 0:\n print(\"The factorial of 0 is 1\")\n else:\n for i in range(1,number + 1):\n factorial = factorial*i\n print(\"The factorial of\",number,\"is\",factorial) \n\n# change the value for a different result\nnum = int(input(\"Enter a number: \"))\n\nisOddEven(num)\n","sub_path":"prac2.py","file_name":"prac2.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"433630175","text":"import string\n\nBLACK_LIST = string.punctuation.replace('%', '') + '\\n'\n\n\ndef normalize(text,\n black_list=BLACK_LIST,\n vocab=None, lowercase=True, tokenize=False):\n if black_list:\n text = text.translate(string.maketrans(BLACK_LIST, ' ' * len(BLACK_LIST)))\n if lowercase:\n text = text.lower()\n if vocab:\n text = ' '.join([word for word in text.split() if word in vocab])\n if tokenize:\n return text.split()\n else:\n return ' '.join(text.split())\n","sub_path":"embedding/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"345247038","text":"#!/usr/env/bin python\r\n\r\n\"\"\"Program to check if a sudoku solution is valid; assumes it is handed an even grid.\"\"\"\r\n\r\n\r\n# Solution A\r\n# def check_sudoku(grid):\r\n#\r\n# num = len(grid) # gives the size of the grid\r\n# to_check = grid[:] # initialize containing rows, afterwards, add the columns\r\n#\r\n# # create columns\r\n# for i in range(num): # x axis of grid\r\n# sublist = []\r\n# for j in range(num): # y axis of grid\r\n# sublist.append(grid[j][i]) # append col items to sublist\r\n# to_check.append(sublist) # append created column contents to to_check\r\n#\r\n# # check everything\r\n# for array in to_check:\r\n# if sorted(array) != list(range(1, num+1)):\r\n# return False\r\n# return True # if nothing returns False... we have a winner!\r\n\r\n\r\n# One line version of solution A\r\ndef check_sudoku(grid):\r\n return all(sorted(x) == list(range(1, len(grid)+1)) for x in grid + list(zip(*grid)))\r\n\r\n\r\n# Solution B\r\n# def check_sudoku(grid):\r\n# \"\"\"\r\n# This solution takes advantage of the knowledge that there can only be one\r\n# occurance of each number in each row and column.\r\n# It also assume that if for example it's a 4x4 grid the numbers should be 1 - 4.\r\n# \"\"\"\r\n# n = len(grid) # Extract the size of the grid\r\n# digit = 1 # Start with 1\r\n# while digit <= n: # Go through each digit\r\n# i = 0\r\n# while i < n: # Go through each row and column\r\n# row_count = 0\r\n# col_count = 0\r\n# j = 0\r\n# while j < n: # For each entry in the ith row/column\r\n# if grid[i][j] == digit: # Check row count\r\n# row_count += 1\r\n# if grid[j][i] == digit: # Check column count\r\n# col_count += 1\r\n# j += 1 # Increment index on the current row i.e. the column\r\n# if row_count != 1 or col_count != 1:\r\n# return False\r\n# i += 1 # Increment index to the next row\r\n# digit += 1 # Check the next digit (1 through 9)\r\n# return True # Nothing was wrong\r\n\r\n# ############################### TEST CASES ###################################\r\n\r\ncorrect = [[1, 2, 3],\r\n [2, 3, 1],\r\n [3, 1, 2]]\r\nincorrect = [[1, 2, 3, 4],\r\n [2, 3, 1, 3],\r\n [3, 1, 2, 3],\r\n [4, 4, 4, 4]]\r\nincorrect2 = [[1, 2, 3, 4],\r\n [2, 3, 1, 4],\r\n [4, 1, 2, 3],\r\n [3, 4, 1, 2]]\r\nincorrect3 = [[1, 2, 3, 4, 5],\r\n [2, 3, 1, 5, 6],\r\n [4, 5, 2, 1, 3],\r\n [3, 4, 5, 2, 1],\r\n [5, 6, 4, 3, 2]]\r\nincorrect4 = [['a', 'b', 'c'],\r\n ['b', 'c', 'a'],\r\n ['c', 'a', 'b']]\r\nincorrect5 = [[1, 1.5],\r\n [1.5, 1]]\r\n\r\n# ================================ TESTING ============== WORKS: 13 Jan-00:12 ==\r\n\r\n# print(check_sudoku(correct))\r\n# >>> True\r\n# print(check_sudoku(incorrect))\r\n# >>> False\r\n# print(check_sudoku(incorrect2))\r\n# >>> False\r\n# print(check_sudoku(incorrect3))\r\n# >>> False\r\n# print(check_sudoku(incorrect4))\r\n# >>> False\r\n# print(check_sudoku(incorrect5))\r\n# >>> False\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n","sub_path":"sudoku_check.py","file_name":"sudoku_check.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"617064535","text":"import random\nimport numpy as np\n\nfrom keras.optimizers import Adam\nfrom keras import backend as K\n\nimport gym\n\nfrom qmemory import qmem\nfrom qagents import agentQNet\nfrom experiment import lab\n\nseed = 16\n\n\nenv = gym.make('LunarLander-v2')\n\nnp.random.seed(seed)\nrandom.seed(seed)\nenv.seed(seed)\n\ndef hubert_loss(y_true, y_pred):\n err = y_pred - y_true\n return K.mean( K.sqrt(1+K.square(err))-1, axis=-1 )\n\ndef epsdecay(epsilon):\n if epsilon>0.13:\n return epsilon*0.98\n if epsilon<0.09:\n return epsilon*0.95\n return epsilon*0.999\n\n\nnet_param = {'hidden_layers': [ (50, 'relu'), (40, 'relu')],\n 'loss': hubert_loss,'optimizer': Adam(lr=0.0005)}\n\nlander = agentQNet(name='DQN_3', dim_state=env.observation_space.shape[0], n_action=env.action_space.n, epsdecay=0.98, minepsilon=0.0, epdecayfunc=epsdecay,\n memsize=300000, batch_size=32, size2train=1000, gamma=0.99, update_freq=600, priority_sampling=True, net_params=net_param)\n\nlab_lander = lab(name='train', env=env, agent=lander,num_episodes=600)\nlab_lander.run()\n\nenv2 = gym.make('LunarLander-v2')\nenv2.seed(seed)\nlab_lander = lab(name='test', env=env2, agent=lander,num_episodes=100)\nlab_lander.run(testmode=True)","sub_path":"lunar_lander/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"313726752","text":"import flask\nfrom flask import jsonify, request\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\nimport numpy as np\nfrom datasets import NMvW\nfrom engines import rand_engine\n\nengine_list = sorted([rand_engine])\n\ndataset_list = sorted([NMvW])\n\nwith open(\"home.html\") as handle:\n home_html = handle.read() \n\n\n@app.route(\"/\", methods=[\"GET\"])\n@app.route(\"/api/v1/\", methods=[\"GET\"])\ndef home():\n # prepare data here\n return home_html\n\n@app.route(\"/datasets\", methods=[\"GET\"])\ndef list_datasets():\n return jsonify([d.to_dict() for d in dataset_list])\n \n\n@app.route(\"/datasets//autocomplete\", methods=[\"GET\"])\ndef autocomplete(datasetID):\n param_name = request.args[\"attribute\"]\n kw = request.args[\"keyword\"]\n \n d = dataset_list[int(datasetID)]\n try:\n param = d.params[param_name]\n except KeyError:\n raise KeyError(f\"There is no DatasetParam {param_name}\")\n \n list_of_options = param.autocomplete(d.data, param_name, kw)\n return jsonify(list_of_options)\n\n\n@app.route(\"/engines\", methods=[\"GET\"])\ndef get_engines():\n return jsonify([e.to_dict() for e in engine_list])\n \n@app.route(\"/engines//html\", methods=[\"GET\"])\ndef get_engine_detail(engineID):\n cur_engine = engine_list[int(engineID)]\n return cur_engine.summary()\n\n\n@app.route(\"/objects//search\", methods=[\"GET\"])\ndef search_objects(datasetID):\n # object\n kws = request.args[\"object_keywords\"]\n start = request.args[\"object_start_date\"]\n end = request.args[\"object_end_date\"]\n obj_params = [a for a in request.args if a.startswith(\"object_param_\")]\n \n # engine\n engine_id = request.args[\"engine_id\"]\n engine_min = request.args[\"engine_min_score\"]\n engine_max = request.args[\"engine_max_score\"]\n engine_params = [a for a in request.args if a.startswith(\"engine_param_\")]\n \n #vocabulary\n vocab = request.args[\"vocabulary_terms\"]\n \n # produces list of Result\n \n print(type(request.args))\n return jsonify(request.args)\n \n \n@app.route(\"/objects//details/\", methods=[\"GET\"])\ndef get_object_details(datasetID, objectID):\n d = dataset_list[int(datasetID)]\n rand_id = np.random.choice(d.objectIDs)\n return jsonify(d.get_object(rand_id)) # ATTENTION: random ID\n\n\n# @app.errorhandler(404)\n# def page_not_found(e):\n# return render_template('404.html'), 404\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"v0_0/app_backup.py","file_name":"app_backup.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"480673727","text":"from django import template\n\nregister = template.Library()\n\n\n@register.filter(name='get_type')\ndef the_type_getting_function(obj):\n return type(obj)\n\n\n@register.filter(name='hasattr')\ndef the_has_attr_function(obj, attribute):\n if hasattr(obj, attribute):\n return True\n else:\n return False","sub_path":"sportsmerchandise/templatetags/sportsmerch.py","file_name":"sportsmerch.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"141383961","text":"import os\nimport sys\nfrom argparse import ArgumentParser\nfrom Resource.reply import SetMenuMessage_Object , send_flex\nfrom flask import Flask, request, abort ,send_from_directory\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import *\n\napp = Flask(__name__)\nrichmenu = {\n 'menu1':'richmenu-28e64a0f986d964993257c63ab2c46a9',\n 'menu2':'richmenu-3d8a86629113fd74959bb3f88e0ac7f4'\n}\n\n# get channel_secret and channel_access_token from your environment variable\nchannel_secret = '85d6c7b172bc420bcf6fb1e60601b9ae'\nchannel_access_token = \"zVxemPBnRLwSfHpWiziWpsAHCYkqGTYe9FOlirMAUI03NRtmDIyXyMrd4WcBTGCGEN5i/3SnVi/TsZs9hmxE08UDme4HaYtk45S+0mBJO6VxRQeaWYyYSGyBngtPBL7b7AnjFD6p9UZWNXlP3c6BqwdB04t89/1O/w1cDnyilFU=\"\n\n\nline_bot_api = LineBotApi(channel_access_token)\nhandler = WebhookHandler(channel_secret)\n\n\n@app.route(\"/webhook\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n return 'OK'\n\n\n@handler.add(MessageEvent, message=TextMessage)\ndef message_text(event):\n print(event)\n Text_fromUser = event.message.text\n reply_Token = event.reply_token\n\n if 'เช็คราคา' in Text_fromUser:\n from Resource.bxAPI import GetBxPrice\n msg = GetBxPrice(Number_to_get=10)\n from Resource.FlexMessage import setCarousel\n flex = setCarousel(msg) ### setup carousel\n flex = SetMenuMessage_Object(flex) ### setup flex\n send_flex(reply_Token , flex , channel_access_token)\n \n elif 'เช็คข่าวสาร' in Text_fromUser:\n from Resource.GetNews import get_cnn_news\n data = get_cnn_news()\n from Resource.FlexMessage import news_setbubble\n msg = news_setbubble(data['title'],data['description'],data['url'],data['image_url'])\n flex = SetMenuMessage_Object(msg)\n send_flex(reply_Token , flex , channel_access_token)\n \n else :\n message = '' ### message ที่เราจะส่งกลับไปให้ยูสเสอ\n text = []\n user_data = None\n\n from dialogflow_uncle import detect_intent_texts\n project_id = os.getenv('DIALOGFLOW_PROJECT_ID')\n session_id = event.source.user_id ## get user id\n message = detect_intent_texts(project_id,session_id,Text_fromUser,'th')\n \n\n for i in message['fulfillment_messages']: ### เพิ่มจากในคลิบ\n txt = TextSendMessage(text=i)### เพิ่มจากในคลิบ\n text.append(txt)### เพิ่มจากในคลิบ\n\n ## adding imagemap message เรียนอะไร\n if message['action'] == 'register':\n from MessageTemplate.Imgmap import selectCourse\n imagemap = Base.get_or_new_from_json_dict(selectCourse(),ImagemapSendMessage)\n text.append(imagemap)\n ### prepare imagemap message to send\n\n #### เรียนที่ไหน\n elif message['action'] == 'selectcourses':\n from MessageTemplate.Imgmap import selectWhere\n imagemap = Base.get_or_new_from_json_dict(selectWhere(),ImagemapSendMessage)\n text.append(imagemap)\n ### prepare Imagemap message to send\n\n ### ให้ยูสเซอลงเวลา\n elif message['action'] == 'selecttype':\n from MessageTemplate.Imgmap import selectTime\n msg = Base.get_or_new_from_json_dict(selectTime(),TemplateSendMessage)\n text.append(msg)\n \n #### confirm message\n elif message['action'] == 'selectmonth':\n from MessageTemplate.Imgmap import confirmRegis\n msg = Base.get_or_new_from_json_dict(confirmRegis(),TemplateSendMessage)\n text.append(msg)\n \n ### sumarize message\n elif message['action'] == 'confirm':\n from google.protobuf.json_format import MessageToDict\n data = message['parameters']\n data = MessageToDict(data)\n\n from MessageTemplate.Imgmap import GetStudentCard\n\n\n ####เนื้องจากการส่งข้อความแบบ flex2019 SDK python ยังไม่รองรับทำให้เราต้องกลับมาส่งแบบ manual\n msg = []\n for i in text:\n Dict = i.as_json_dict()\n msg.append(Dict)\n msg.append(GetStudentCard(data['courses']))\n flex = SetMenuMessage_Object(msg)\n send_flex(reply_Token,file_data = flex,bot_access_key = channel_access_token)\n #### กรณีเข้าโหมดถามตอบ\n elif message['action'] == 'q&a':\n line_bot_api.link_rich_menu_to_user(session_id,richmenu['menu2'])\n #### กรณีกำลังถามตอบ\n elif message['action'] == 'qa.qa-fallback':\n from MessageTemplate.Imgmap import AnwserMsg\n\n from Resource.wolf import search_wiki\n\n result = search_wiki(Text_fromUser)\n\n ### set msg + answer\n Flex_Ans = AnwserMsg(Text_fromUser,result)\n\n ### set quick reply\n qbtn = QuickReplyButton(image_url='https://cdn0.iconfinder.com/data/icons/online-education-butterscotch-vol-2/512/Questions_And_Answers-512.png'\n ,action=MessageAction('หยุดถาม','หยุดถาม'))\n q = QuickReply(items=[qbtn])\n\n ### set text as json\n new_text1 = TextSendMessage(text='ลุงขอหาแปรป...').as_json_dict()\n new_text2 = TextSendMessage(text='อยากถามต่อไหม ถ้าไม่... กดปุ่ม หยุดถามได้เลยจร้า',quick_reply=q).as_json_dict()\n\n ### send msg with quick reply\n flex = SetMenuMessage_Object(Message_data=[new_text1,Flex_Ans,new_text2])\n\n r = send_flex(Reply_token,file_data = flex,bot_access_key = channel_access_token)\n #### กรณีหยุดถาม\n elif message['action'] == 'qa.qa-custom':\n line_bot_api.link_rich_menu_to_user(session_id,richmenu['menu1'])\n text = TextSendMessage(text='แล้วกลับมาถามใหม่น่าาาา...')\n line_bot_api.reply_message(reply_Token,text)\n print(message['action'])\n line_bot_api.reply_message(reply_Token,text)\n \n \n return 'OK'\n\n@handler.add(FollowEvent)\ndef LinkRichmenu(event):\n userid = event.source.user_id ### get user id มาก่อน\n disname = line_bot_api.get_profile(user_id=userid).display_name ## display name\n\n qbtn1 = QuickReplyButton(image_url='https://i1.wp.com/sbo2com.net/wp-content/uploads/2018/10/register-icon.png?fit=250%2C242&ssl=1',\n action=MessageAction(label='สมัครเรียนไพทอน',text='สนใจสมัครเรียนไพทอน'))\n qbtn2 = QuickReplyButton(action=MessageAction(label='ทดสอบความรู้บอท',text='ไหนขอทดสอบความรู้หน่อยซฺิ'))\n q_reply = QuickReply(items=[qbtn1,qbtn2])\n\n text = TextSendMessage(text='ยินดีต้อนรับคุณ {} เข้าสู่บริการ ลุงวิศวกรสอนคำนวณ'.format(disname),quick_reply=q_reply)\n reply_Token = event.reply_token\n line_bot_api.reply_message(reply_Token,text)\n\n line_bot_api.link_rich_menu_to_user(userid,richmenu['menu1'])\n\n@handler.add(PostbackEvent) ### สำหรับรับ date time picker\ndef GetPostback(event):\n Reply_token = event.reply_token\n Text_fromUser = event.postback.params['datetime']\n print(Text_fromUser)\n\n month = int(Text_fromUser[5:7])\n print(month)\n\n import datetime\n\n monthinteger = month\n\n month = str(datetime.date(1900, monthinteger, 1).strftime('%B'))\n if month == 'October':\n month = 'ตุลาคม'\n elif month == 'December':\n month = 'ธันวาคม'\n\n print (month)\n\n message = '' ### message ที่เราจะส่งกลับไปให้ยูสเสอ\n from dialogflow_uncle import detect_intent_texts\n project_id = os.getenv('DIALOGFLOW_PROJECT_ID')\n session_id = event.source.user_id ## get user id\n message = detect_intent_texts(project_id,session_id,month,'en')\n \n text = []\n user_data = None\n\n for i in message['fulfillment_messages']:### เพิ่มจากในคลิบ\n txt = TextSendMessage(text=i)### เพิ่มจากในคลิบ\n text.append(txt)### เพิ่มจากในคลิบ\n \n if message['action'] == 'selectmonth':\n from MessageTemplate.Imgmap import summary_msg\n data = message['parameters']\n from google.protobuf.json_format import MessageToDict\n data = MessageToDict(data)\n when = data['month']\n where = data['Type']\n course = data['courses']\n msg = Base.get_or_new_from_json_dict(summary_msg(when,where,course),FlexSendMessage)\n text.append(msg)### เพิ่มจากในคลิบ\n \n\n\n res = line_bot_api.reply_message(Reply_token,messages=text)\n\n@app.route('/MessageTemplate//1040')\ndef serveimage(image):\n return send_from_directory('MessageTemplate',image)\n\n### serve simple image\n@app.route('/MessageTemplate/')\ndef getpicbyname_static(PICNAME):\n print('PIC : ' + PICNAME)\n path = 'MessageTemplate'\n return send_from_directory(path,PICNAME)\n\n\nif __name__ == \"__main__\":\n os.environ['DIALOGFLOW_PROJECT_ID'] = 'loongwissawa-uixaxu'\n os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'Credentials.json'\n app.run(port=8000)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"3521645","text":"\"# -- coding: utf-8 --\"\nimport maya.OpenMayaUI as omui\nfrom PySide import QtGui\nfrom PySide import QtCore\nfrom shiboken import wrapInstance\n\n\n# noinspection PyArgumentList\ndef maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtGui.QWidget)\n\n\nclass StartShotGUI(QtGui.QDialog):\n\n def __init__(self, parent=maya_main_window()):\n super(StartShotGUI, self).__init__(parent)\n self.setWindowTitle('START SETUP ASSET')\n self.returnedInfo = []\n self.setWindowFlags(QtCore.Qt.Tool)\n self.setAttribute(QtCore.Qt.WA_DeleteOnClose)\n self.create_layout()\n self.create_connections()\n self.exec_()\n\n def create_layout(self):\n # combo box\n self.tipoitemCBox = QtGui.QComboBox()\n self.tipoitemCBox.insertItem(0, \"char\")\n self.tipoitemCBox.insertItem(1, \"prop\")\n self.tipoitemCBox.insertItem(1, \"set\")\n # nombre\n self.nameLine = QtGui.QLineEdit()\n self.nameLine.setPlaceholderText(\"Asset Name\")\n # btn\n self.clearFirstRowBtn = QtGui.QPushButton(\"CLEAR!!!\")\n self.goBtn = QtGui.QPushButton(\"GO!!!\")\n # layout\n self.main_layout = QtGui.QHBoxLayout()\n self.grid_layout = QtGui.QGridLayout()\n self.main_layout.insertLayout(0, self.grid_layout)\n # widgets\n self.grid_layout.addWidget(self.tipoitemCBox, 0, 0)\n self.grid_layout.addWidget(self.clearFirstRowBtn, 0, 7)\n self.grid_layout.addWidget(self.nameLine, 0, 1, 1, 6)\n self.grid_layout.addWidget(self.goBtn, 1, 0, 1, 8)\n # set\n self.setLayout(self.main_layout)\n self.setGeometry(400, 300, 250, 150)\n self.resize(330, 100)\n \n def create_connections(self):\n self.clearFirstRowBtn.clicked.connect(self.clear_first_row)\n self.goBtn.clicked.connect(self.doIt)\n \n def clear_first_row(self):\n self.nameLine.setText(\"\")\n self.tipoitemCBox.setCurrentIndex(0)\n self.setFocus()\n \n def doIt(self):\n self.returnedInfo = [self.tipoitemCBox.currentText().encode('ascii'),\n str(self.nameLine.text().encode('ascii'))]\n StartShotGUI.close(self)\n\n def closeEvent(self, event):\n event.accept()\n\n\nif __name__ == '__main__':\n try:\n gui.close()\n except:\n pass\n gui = StartShotGUI()\n gui.exec_()\n","sub_path":"maya/stp/TUTU_maya_stp_StartShotGUI.py","file_name":"TUTU_maya_stp_StartShotGUI.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"573959480","text":"#import\r\nimport pandas\r\nfrom pandas import ExcelWriter\r\nfrom pandas.core.frame import DataFrame\r\n#create a dictionary\r\ndata = {\r\n 'Firstname': [\"Sathish\", \"Satvik\", \"Shahish\"],\r\n 'Lastname':[\"Kumar\", \"Darsh\", \"Sharma\"],\r\n 'Email': [\"saku@gmail.com\", \"sadar@gmail.com\", \"shsh@gmail.com\"],\r\n 'Phonenumber':[\"8768996509\", \"8765469877\", \"9754205464\"]\r\n}\r\n#create dataframe\r\ndataframe=pandas.DataFrame(data)\r\nprint(dataframe)\r\n#write to excel\r\nwriter = ExcelWriter('sample.xlsx')\r\ndataframe.to_excel(writer, 'Sheet1', index=False)\r\n#save\r\nwriter.save()","sub_path":"Python/Activities/activity19.py","file_name":"activity19.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"502818257","text":"#!/usr/bin/env python\n\nimport os\nimport youtube_dl\nydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})\n\n#from time import *\nfrom config import *\nimport YoutubeFullUrl\nfrom daemon_state import *\nimport logging\nlogger = logging.getLogger(\" | RaspberryCast | \")\n\ndef launchvideo(url, sub, slow):\n\t#Waking up screen\n\tos.system(\"echo -ne '\\033[9;0]' >/dev/tty1\")\n\n\tif state() == \"1\" : \n\t\tlogger.debug('CASTING: OMXPlayer already running, killing previous instance.')\n\t\tos.system(\"killall omxplayer.bin\")\n\telif state() == \"2\" : \n\t\tlogger.debug('CASTING: Youtube-dl already running, killing previous instance.')\n\t\tos.system(\"killall youtube-dl\")\n\n\tlogger.info('CASTING: Trying to retrieve source video URL...')\t\n\n\tout = return_full_url(url, sub, slow)\n\n\t#logger.debug(\"CASTING: Full video URL fetched.\")\n\tlogger.debug(\"CASTING: Full video URL is : \" + out)\t\n\n\tlogger.info(\"CASTING: Sarting OMXPlayer now.\")\n\t\n\tif sub == True :\n\t\tlogger.debug('CASTING: Starting OMX with subtitles.')\n\t\tomx = \"omxplayer -b -r -o both '\"+out+\"' --subtitles subtitle.srt < /tmp/cmd\"\n\t\t\n\telse :\n\t\tlogger.debug('CASTING: Starting OMX without subtitles.')\n\t\tomx = \"omxplayer -b -r -o both '\"+out+\"' < /tmp/cmd\"\n\t\t\t\n\ttry :\n\t\tos.system(omx+\" &\")\n\texcept :\n\t\tlogger.error('CASTING: Unable to start OMX. Giving up.')\n\n\tos.system(\"echo . > /tmp/cmd &\")\n\ndef return_full_url(url, sub, slow):\n\tif (url[-4:] in (\".avi\", \".mkv\", \".mp4\", \".mp3\")) or (sub == True):\t\n\t\tlogger.info('CASTING: Direct video URL, no need to use youtube-dl.')\n\t\treturn url\n\n\t\"\"\"\n\tif (\"youtu\" in url) and (slow == False):\n\t\tlogger.debug('CASTING: Youtube link detected, extracting url in maximal quality.')\n\t\treturn YoutubeFullUrl.get_flux_url(url) #A lot faster than youtube-dl \"\"\"\n\n\twith ydl: #Downloading youtub-dl infos\n\t result = ydl.extract_info(\n\t url,\n\t download=False # We just want to extract the info\n\t )\n\n\tif 'entries' in result: # Can be a playlist or a list of videos\n\t video = result['entries'][0]\n\telse:\n\t video = result # Just a video\n\n\tif \"youtu\" in url == True:\n\t\tif slow == True:\n\t\t\tfor i in video['formats']:\n\t\t\t\tif i['format_id'] == \"18\":\n\t\t\t\t\tlogger.debug(\"CASTING: Youtube link detected, extracting url in 360p\")\n\t\t\t\t\treturn i['url']\n\t\telse:\n\t\t\tlogger.debug('CASTING: Youtube link detected, extracting url in maximal quality.')\n\t\t\treturn video['url']\n\telif \"vimeo\" in url:\n\t\tif slow == True:\n\t\t\tfor i in video['formats']:\n\t\t\t\tif i['format_id'] == \"http-360p\":\n\t\t\t\t\tlogger.debug(\"CASTING: Vimeo link detected, extracting url in 360p\")\n\t\t\t\t\treturn i['url']\n\t\telse:\n\t\t\tlogger.debug('CASTING: Vimeo link detected, extracting url in maximal quality.')\n\t\t\treturn video['url']\n\telse :\n\t\treturn video['url']\n","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"235509777","text":"from bs4 import BeautifulSoup\nimport requests\nimport json\nimport pandas as pd\nimport base64\nimport io\n\nimport os\nimport pickle\nimport uuid\nimport re\n\nimport streamlit as st\n\n##### Web Scraping\n# Information to scrape\n# CAS Number, Compound name, Synonym, Theoretical MW, Formula, SMILES, Structure, InChIKey\n\ncompound_cid = '2244' # user input, CID number\n\nparent_url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug_view/data/compound/'\nurl = ''.join([parent_url, compound_cid, '/JSON'])\n\nresponse = requests.get(url) # 200 means OK\njson_file = response.json()\n\n\n##### Get Information\n# Define function to get information\n\ndef create_library(input_list): \n \n # To store information\n data = {\n 'CAS #': [], \n 'Compound Name': [],\n 'Synonym': [],\n 'Theory MW': [],\n 'Formula': [],\n 'Class': [],\n 'Comment': [],\n 'Column': [],\n 'RT': [],\n 'Chromatogram Comment': [],\n 'Adduct Ion': [],\n 'Adduct m/z': [],\n 'Precursor Ion': [],\n 'SP ID of Precursor Ion': [],\n 'MS Stage': [],\n 'Spectrum Comment': [],\n 'Ionization': [],\n 'Mass Range': [],\n 'Collision Energy': [],\n 'Collision Gas Vol.': [],\n 'Polarity': [],\n 'Instrument': [],\n 'Data Folder': [],\n 'Data Filename': [],\n 'Acquired By': [],\n 'Acquired': [],\n 'Sample Info': [],\n 'SMILES': [],\n 'Structure': [],\n 'InChIKey': []\n }\n \n library = pd.DataFrame(data)\n \n cas_list = []\n name_list = []\n iupac_list = []\n inchi_key_list = []\n smiles_list = []\n molecular_formula_list = []\n theoretical_mw_list = []\n \n for compound_cid in input_list:\n \n parent_url = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug_view/data/compound/'\n url = ''.join([parent_url, compound_cid, '/JSON'])\n\n # Get response\n response = requests.get(url)\n \n # Get json\n json_file = response.json()\n\n # Get information for library\n try:\n cas = json_file['Record']['Section'][2]['Section'][3]['Section'][0]['Information'][0]['Value']['StringWithMarkup'][0]['String']\n except:\n cas = None\n \n try:\n name = json_file['Record']['RecordTitle']\n except:\n name = None\n \n try:\n iupac = json_file['Record']['Section'][2]['Section'][1]['Section'][0]['Information'][0]['Value']['StringWithMarkup'][0]['String']\n except:\n iupac = None\n \n try:\n inchi_key = json_file['Record']['Section'][2]['Section'][1]['Section'][2]['Information'][0]['Value']['StringWithMarkup'][0]['String']\n except:\n inchi_key = None\n \n try:\n smiles = json_file['Record']['Section'][2]['Section'][1]['Section'][3]['Information'][0]['Value']['StringWithMarkup'][0]['String']\n except:\n smiles = None\n \n try:\n molecular_formula = json_file['Record']['Section'][2]['Section'][2]['Information'][0]['Value']['StringWithMarkup'][0]['String']\n except:\n molecular_formula = None\n \n try:\n theoretical_mw = json_file['Record']['Section'][3]['Section'][0]['Section'][0]['Information'][0]['Value']['StringWithMarkup'][0]['String']\n except:\n molecular_formula = None\n \n cas_list.append(cas)\n name_list.append(name)\n iupac_list.append(iupac)\n theoretical_mw_list.append(theoretical_mw)\n molecular_formula_list.append(molecular_formula)\n smiles_list.append(smiles)\n inchi_key_list.append(inchi_key)\n \n library['CAS #'] = cas_list\n library['Compound Name'] = name_list\n library['Synonym'] = iupac_list\n library['Theory MW'] = theoretical_mw_list\n library['Formula'] = molecular_formula_list\n library['SMILES'] = smiles_list\n library['InChIKey'] = inchi_key_list\n \n return library\n\n##### Streamlit Interface\n\nst.header('Upload')\ninput_list = st.file_uploader('Upload text file with CID Numbers.',\n type=['txt'])\n\nif not input_list:\n st.text('You have not uploaded a file. Please upload a file. :)')\n \nif input_list is not None:\n\n # Output upload details\n st.success(\"File successfully uploaded!\")\n st.write('---')\n file_details = {'filename': input_list.name,\n 'filetype': input_list.type,\n 'filesize': input_list.size}\n st.write(file_details)\n st.write('---')\n \n input_list = pd.read_csv(input_list, header=None, names=['CID Number']).astype(str)['CID Number'].to_list()\n \n library = create_library(input_list)\n \n st.write(library)\n \n ##### Download button \n \n def download_button(object_to_download, download_filename, button_text, pickle_it=False):\n \"\"\"\n Generates a link to download the given object_to_download.\n Params:\n ------\n object_to_download: The object to be downloaded.\n download_filename (str): filename and extension of file. e.g. mydata.csv,\n some_txt_output.txt download_link_text (str): Text to display for download\n link.\n button_text (str): Text to display on download button (e.g. 'click here to download file')\n pickle_it (bool): If True, pickle file.\n Returns:\n -------\n (str): the anchor tag to download object_to_download\n Examples:\n --------\n download_link(your_df, 'YOUR_DF.csv', 'Click to download data!')\n download_link(your_str, 'YOUR_STRING.txt', 'Click to download text!')\n \"\"\"\n if pickle_it:\n try:\n object_to_download = pickle.dumps(object_to_download)\n except pickle.PicklingError as e:\n st.write(e)\n return None\n\n else:\n if isinstance(object_to_download, bytes):\n pass\n\n elif isinstance(object_to_download, pd.DataFrame):\n # object_to_download = object_to_download.to_csv(index=False)\n towrite = io.BytesIO()\n object_to_download = object_to_download.to_excel(towrite, encoding='utf-8', index=False, header=True)\n towrite.seek(0)\n\n # Try JSON encode for everything else\n else:\n object_to_download = json.dumps(object_to_download)\n\n try:\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(object_to_download.encode()).decode()\n\n except AttributeError as e:\n b64 = base64.b64encode(towrite.read()).decode()\n\n button_uuid = str(uuid.uuid4()).replace('-', '')\n button_id = re.sub('\\d+', '', button_uuid)\n\n custom_css = f\"\"\" \n \"\"\"\n\n dl_link = custom_css + f'{button_text}

'\n\n return dl_link\n\n filename = 'library.xlsx'\n download_button_str = download_button(library, filename, f'Download Library as Excel File', pickle_it=False)\n st.markdown(download_button_str, unsafe_allow_html=True)","sub_path":"app_2.py","file_name":"app_2.py","file_ext":"py","file_size_in_byte":8309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"62472047","text":"class TextCalc:\n @classmethod\n def class_method(cls, price):\n assert cls.__name__ == TextCalc.__name__\n return int(price * 0.08)\n\n @staticmethod\n def static_method(price):\n return int(price * 0.08)\n\n\nprint(TextCalc.class_method(1000))\nprint(TextCalc.static_method(1000))\n","sub_path":"PyQ/34_3_2.py","file_name":"34_3_2.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"361785981","text":"import requests\nimport json\n\n\n\nurl = 'http://172.16.152.150:8080/ins'\nusername = 'admin'\npassword = 'Cisco123'\n\nheaders = {\n 'Content-Type': 'application/json'\n}\n\npayload = {\n \"ins_api\": {\n \"version\": \"1.0\",\n \"type\": \"cli_show\",\n \"chunk\": \"0\",\n \"sid\": \"sid\",\n \"input\": \"show ip int brief\",\n \"output_format\": \"json\"\n }\n}\n\n\n\nresponse = requests.post(\n url,\n data=json.dumps(payload), #convert to string\n headers=headers,\n auth=(username,password),\n verify = False).json()\n\n\n#print(response)\n#print(json.dumps(response, indent = 2, sort_keys = True))\n#rdict = response\n\n#print(rdict['ins_api']['outputs']['output']['body']['TABLE_intf']['ROW_intf']['intf-name'])\n\n\n########### login ###########\n\nauth_url = \"https://172.16.152.150/api/aaaLogin.json\"\nauth_body = {\n \"aaaUser\":{\n \"attributes\":{\n \"name\":\"admin\",\n \"pwd\":\"Cisco123\"\n }\n }\n}\nauth_response = requests.post(\n auth_url,\n data=json.dumps(auth_body), #convert to string\n timeout=5,\n verify = False).json()\n\ntoken = auth_response['imdata'][0]['aaaLogin']['attributes']['token']\n\ncookies={}\ncookies['APIC-cookie']=token\nprint(cookies)\n","sub_path":"nxapi-9k/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"46294293","text":"import re\n\n\n# LaTeX-Templates\ndef make_header(title, subtitle, author, language, theme=0):\n # Select Theme: 0 = Beamer 1 = Theme Dresden 2 = TU Dresden CD\n used_theme = '\\\\documentclass[]{beamer}'\n if theme == 1:\n used_theme = '''\\documentclass[]{beamer}\n\\\\usetheme{Dresden}'''\n elif theme == 2:\n used_theme = '\\documentclass[]{tudbeamer}'\n\n header = '{theme}'.format(theme=used_theme) + '''\n\\\\usepackage[utf8]{inputenc}\n\\\\usepackage[T1]{fontenc}\n\\\\usepackage{graphicx}\n\\\\usepackage{listings}\n\\\\usepackage{verbatim}\n\\\\usepackage{menukeys}\n\\\\usepackage{ngerman}\n\\\\usepackage{listings}\n\n\\\\definecolor{codegray}{gray}{0.97}\n\\\\definecolor{stringgreen}{rgb}{0.0, 0.7, 0.6}\n\n\\\\lstset{language=''' + '{language}'.format(language=language) + ''',\n backgroundcolor=\\\\color{codegray},\n frame=single,\n basicstyle=\\\\ttfamily\\\\footnotesize,\n keywordstyle=\\\\color{orange},\n commentstyle=\\\\color{gray},\n numberstyle=\\\\tiny\\color{gray},\n stringstyle=\\\\color{stringgreen},\n tabsize=4,\n showstringspaces=false,\n breaklines=true,\n keepspaces=true,\n numbers=left,\n}\n\n\n\\\\title{''' + '{title}'.format(title=title) + '''}\n\\\\subtitle{''' + '{subtitle}'.format(subtitle=subtitle) + '''}\n\\\\author{''' + '{author}'.format(author=author) + '''}\n\n\\\\begin{document}\n\n\\\\maketitle\n\n% -------------------------- Table of Contens ---------------------------------\n\\\\begin{frame}\n\\\\tableofcontents\n'''\n return header\n\n\ndef make_footer():\n footer = '''\\\\end{frame}\n\\\\end{document}\n'''\n return footer\n\n\ndef make_section(sectiontype, name):\n name = name\n section = '''\\\\end{frame}\n\n% -------------------------- ''' + '{name}'.format(name=name) + ''' ---------------------------------\n\\\\''' + '{sectiontype}'.format(sectiontype=sectiontype) + '{' + '{name}'.format(\n name=name) + '''}\n\\\\begin{frame}[fragile]{''' + '{name}'.format(name=name) + '''}\n'''\n return section\n\n\ndef make_quote(text):\n quote = '''\\\\begin{quote}\n''' + '{text}'.format(text=text) + '''\n\\\\end{quote}\n'''\n return quote\n\n\ndef make_bold(text):\n bold = '''{\\\\textbf{''' + '{text}'.format(text=text) + '''}}'''\n return bold\n\n\ndef make_italic(text):\n italic = '''{\\\\textit{''' + '{text}'.format(text=text) + '''}}'''\n return italic\n\n\ndef make_code(text):\n code = '''\\\\begin{lstlisting}\n''' +'{text}'.format(text=text) + '''\n\\\\end{lstlisting}\n'''\n return code\n\n\ndef make_code_single(text, language):\n code = '\\\\lstinline{' + '{code}'.format(code=text) + '} '\n return code\n\n\n# Patterns\n\n# Headings\nre_h6 = re.compile('######.*')\nre_h5 = re.compile('#####.*')\nre_h4 = re.compile('####.*')\nre_h3 = re.compile('###.*')\nre_h2 = re.compile('##.*')\nre_h1 = re.compile('#.*')\n\n# Paragraph\nre_two_white = re.compile('.* ')\nre_new_line = re.compile('.*\\\\n')\n\n# Quotes\nre_quote = re.compile('>.*')\n\n# Styling\nre_bold = re.compile('.*((__.*__)|(\\\\*\\\\*.*\\\\*\\\\*)).*')\nre_bold_under = re.compile('.*__.*__.*')\nre_italic = re.compile('.*(( _.*_ )|(\\\\*.*\\\\*)).*')\nre_italic_under = re.compile('.*_.*_.*')\n\n# List\nre_list_unsorted = re.compile('[ ]*((\\\\*)|(-)) .*')\nre_list_sorted = re.compile('[ ]*[\\d]+ .*')\n\n# Code\nre_code_single = re.compile('.*\\\\`.*\\\\`.*')\nre_code_multiple = re.compile('\\\\`\\\\`\\\\`.*')\nre_python_comment = re.compile('# .*')\n\n# Links\nre_link = re.compile('.*\\\\[.*\\\\]\\\\(.*\\\\).*')\n\n# Strikethrough\nre_strikethrough = re.compile('.*~~.*~~.*')\n\n# Table\nre_table = re.compile('\\\\|?[.*\\\\|]+.*')\nre_table_head = re.compile('\\\\|?[\\\\:\\\\-\\\\|]+')\n","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"498458702","text":"\"\"\"Data loading\"\"\"\n\nfrom import_images import *\n\n\"\"\"Data preparation for patch based classification\nBatch cropping functions\nCombines padding where necessary along with square cropping of variable \nsize around labeled pixels. This process creates a new image training set \nwhich provides a patch-based classification method for each labeled pixel. \nEvery patch will be classified according to the label of its central pixel.\n\"\"\"\nfrom batch_crop import *\n\n\"\"\"CUDA\"\"\"\n\nimport torch\n\ncuda = torch.cuda.is_available()\nif cuda:\n torch.cuda.empty_cache()\n device = torch.device(\"cuda\") \n print(\"\\nGPU is available\")\nelse:\n device = torch.device(\"cpu\")\n print(\"\\nGPU not available, CPU used\")\n\n\"\"\"Dataset\"\"\"\n\nfrom dataset import *\n\n\"\"\"Training tools\"\"\"\nfrom training import trainNN\n\n\"\"\"Settings and hyperparameters\"\"\"\nfrom config import *\n\n\"\"\"Models\"\"\"\nfrom models import *\nmodel = models[selected_model].to(device)\nif pre_model:\n pre_model.to(device)\n\nfrom torchsummary import summary\nif not pretrained_encoder:\n print(summary(model, (176, patch_size, patch_size)))\n\n\"\"\"Train / Test split\nIn this section, patches of selected dimensions are created and split into train / test / validation sets. Firstly, it is essential to set exclusive subsets of each image to functions as banks of train / test / validation patches so that the respective patches are prevented from overlaps. Overlaps mean cheating that is bad generalisation to the test images. It was considered preferable to split the images horizontally given that such a split retains in the best possible degree the existence of all classes in each set.\n\"\"\"\n\nif not train_all:\n \n train = [dioni[:, dioni.shape[1]//4:, :], loukia[:, loukia.shape[1]//4:, :]]\n \n train_gt = [dioni_gt[:, dioni_gt.shape[1]//4:, :], \n loukia_gt[:, loukia_gt.shape[1]//4:, :]]\n \nelse: # train over all of dioni and loukia\n \n train = [dioni, loukia]\n \n train_gt = [dioni_gt, loukia_gt]\n \n \nval = [dioni[:, dioni.shape[1]//8:dioni.shape[1]//4, :],\n loukia[:, loukia.shape[1]//8:loukia.shape[1]//4, :]]\n\ntest = [dioni[:, :dioni.shape[1]//8, :],\n loukia[:, :loukia.shape[1]//8, :]]\n\n\nval_gt = [dioni_gt[:, dioni_gt.shape[1]//8:dioni_gt.shape[1]//4, :],\n loukia_gt[:, loukia_gt.shape[1]//8:loukia_gt.shape[1]//4, :]]\n\ntest_gt = [dioni_gt[:, :dioni_gt.shape[1]//8, :],\n loukia_gt[:, :loukia_gt.shape[1]//8, :]]\n\nX_train = []\ny_train = []\nX_val = []\ny_val = []\nX_test = []\ny_test= []\ncoords = []\n\nif supervision:\n criterion = torch.nn.CrossEntropyLoss().to(device)\n \n # patch training set\n for (img, gt) in zip(train, train_gt):\n patch, label = patches_supervised(img=img, gt=gt, size=patch_size)\n X_train.extend(patch)\n y_train.extend(label)\n \n # patch validation set\n for (img, gt) in zip(val, val_gt):\n patch, label = patches_supervised(img=img, gt=gt, size=patch_size)\n X_val.extend(patch)\n y_val.extend(label)\n\n # patch test set\n for (img, gt) in zip(test, test_gt):\n patch, label = patches_supervised(img=img, gt=gt, size=patch_size)\n X_test.extend(patch)\n y_test.extend(label)\n \n \nelse: # unsupervised = self supervised pretraining\n \n criterion = torch.nn.MSELoss().to(device)\n\n for img in train:\n # I keep the coords so as to know which pixel I infer every time\n patch, coord_set = patches_unsupervised(img=img, size=patch_size)\n X_train.extend(patch)\n coords.extend(coord_set)\n\n\"\"\"Dataloaders\"\"\"\n\n## Dataset\ntrain = HyRank(X=X_train, y=y_train, transform=available_transformations, \n supervision=supervision)\nval = HyRank(X=X_val, y=y_val, transform=False, supervision=supervision)\ntest = HyRank(X=X_test, y=y_test, transform=False, supervision=supervision)\n\n# Dataloaders\nfrom torch.utils.data import DataLoader\nn = workers\ntrainloader = DataLoader(train, batch_size=batch, num_workers=n, shuffle=True)\nvalloader = DataLoader(val, batch_size=batch, num_workers=n, shuffle=False)\ntestloader = DataLoader(test, batch_size=batch, num_workers=n, shuffle=False)\n \n\"\"\"Training\"\"\"\n\ntorch.cuda.empty_cache()\n\n# optimizer\noptimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\n# Training\nylimit = 1.5 if supervision else 0.003\ntrainNN(model=model, patience=patience, model_folder_name=selected_model, \n batch=batch, patch_size=patch_size, max_epochs=max_epochs, \n trainloader=trainloader, valloader=valloader, testloader=testloader, \n device=device, optimizer=optimizer, criterion=criterion, lr=lr, \n transform=available_transformations, save_to=save_to, y_limit=ylimit,\n classification=supervision, supervision=supervision,\n pre_model=pre_model, dropout=dropout)\n","sub_path":"DioniNN.py","file_name":"DioniNN.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"552842054","text":"import matplotlib.pyplot as plt\nfrom datetime import datetime\n\n\n\nclass exp:\n \n def top_12(module_level_df):\n top_12_module = module_level_df['module_name'].value_counts().keys()[0:12]\n number_of_line_added = 0\n number_of_line_deleted = 0\n print(\"Experment 01 :) \")\n print(\"==================================\")\n print(\"TOP 12 COMMITED MODULE :) \")\n print(\"==========================================================================================================\")\n for i in range(len(top_12_module)): \n number_of_commit = 0 \n for j in range(len(module_level_df['module_name'])): \n if top_12_module[i]==module_level_df['module_name'][j]: \n number_of_commit = number_of_commit + 1\n number_of_line_added = number_of_line_added + module_level_df['number_of_line_added'][j]\n number_of_line_deleted = number_of_line_deleted + module_level_df['number_of_line_deleted'][j] \n print(\"TOP \" + str(i+1) + \":: Module Name: \" + top_12_module[i] + \" || Number of Commit: \" + \n str(number_of_commit) + \" || Number of Line Added: \" + str(number_of_line_added) + \n \" || Number of Line Deleted: \" + str(number_of_line_deleted))\n print(\"======================================================================================================\")\n \n \n def commit_per_month(module_level_df): \n month_range = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n list_month = []\n list_commit = []\n for i in range(len(month_range)):\n count = 0\n for j in range(len(module_level_df['commit_Date'])):\n month_find = int(module_level_df['commit_Date'][j].replace('/', ' ').split()[1])\n if month_find == month_range[i]:\n count = count + 1\n if count!=0:\n list_month.append(month_range[i])\n list_commit.append(count)\n print(\"Experment 02 :) \")\n print(\"==================================\")\n print(\"Number of Commit in each month for last six month period :) \")\n print(\"=========================================\")\n plt.rcParams[\"figure.figsize\"] = (10,3)\n plt.plot(list_month, list_commit)\n plt.xlabel('Month')\n plt.ylabel('Number of Commit in Month')\n plt.title('Numer of Commit in Month.') \n plt.show()\n \n\n def module_per_commit(module_level_df): \n # It will consider only top 130 commit for better visualization, if you want to check on entire set of commit\n # then replace 13 with \"len(module_level_df['commit_ID'].value_counts())\" code\n top_commit = module_level_df['commit_ID'].value_counts().keys()[0:len(module_level_df['commit_ID'].value_counts())]\n date_list = []\n number_of_module = []\n for i in range(len(top_commit)):\n count = 0\n flag = 0\n for j in range(len(module_level_df['commit_ID'])):\n if top_commit[i] == module_level_df['commit_ID'][j]:\n count = count + 1\n if flag == 0:\n date_list.append(str(module_level_df['commit_Date'][j]))\n flag = 1\n number_of_module.append(count)\n # To rearrance date of commit with numner of module committed\n arrange = sorted(zip(date_list, number_of_module), key = lambda x: datetime.strptime(x[0],'%d/%m/%Y'))\n for i in range(len(arrange)):\n date_list[i]=arrange[i][0]\n number_of_module[i] = arrange[i][1]\n print(\"Experment 04 :)\")\n print(\"==================================\")\n print(\"Number of Module Committed in a Single Commit for last six month period :) \")\n print(\"=========================================\")\n plt.rcParams[\"figure.figsize\"] = (15,3)\n plt.xlabel('Month Period')\n plt.ylabel('Number of Module in Each Commit')\n plt.title('Number of Module Committed in each Commit.')\n plt.plot(date_list, number_of_module)\n plt.xticks(rotation=90)\n \n \n def code_churn_over_commit(module_level_df):\n # It will consider only top 130 commit for better visualization, if you want to check on entire set of commit\n # then replace 13 with \"len(module_level_df['commit_ID'].value_counts())\" code\n all_unique_commits = module_level_df['commit_ID'].value_counts().keys()[0:130]\n list_of_churn = []\n date_list = []\n for i in range(len(all_unique_commits)):\n count = 0\n flag = 0\n for j in range(len(module_level_df['commit_ID'])):\n if all_unique_commits[i] == module_level_df['commit_ID'][j]:\n count = count + module_level_df['number_of_line_added'][j] + module_level_df[\n 'number_of_line_deleted'][j]\n if flag == 0:\n date_list.append(str(module_level_df['commit_Date'][j]))\n flag = 1\n list_of_churn.append(count) \n # To rearrance date of commit with numner of module committed\n arrange = sorted(zip(date_list, list_of_churn), key = lambda x: datetime.strptime(x[0],'%d/%m/%Y'))\n for i in range(len(arrange)):\n date_list[i]=arrange[i][0]\n list_of_churn[i] = arrange[i][1]\n print(\"Experment 03 :)\")\n print(\"==================================\")\n print(\"Number of churn in each Commit for the time period :) \")\n print(\"=========================================\")\n plt.rcParams[\"figure.figsize\"] = (20,3)\n plt.xlabel('Month Period')\n plt.ylabel('Churn in Each Commit')\n plt.title('Number of Churn is each commit for time period.') \n plt.plot(date_list, list_of_churn)\n plt.xticks(rotation=90) \n \n \n \n \n \n \n \n \n ","sub_path":"NovaChurn/experimentation.py","file_name":"experimentation.py","file_ext":"py","file_size_in_byte":6091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"414842372","text":"#!/usr/bin/env python\n\nfrom oslo_config import cfg\nimport oslo_messaging\nimport logging\n\n__author__ = 'Yuvv'\n\n\nclass DREndpoint(object):\n def __init__(self, tpt):\n self.client = oslo_messaging.RPCClient(tpt,\n oslo_messaging.Target(topic='DR'))\n\n def info(self, ctxt, publisher_id, event_type, payload, metadata):\n logging.info('Get a msg of type %s from %s',\n event_type,\n publisher_id)\n if publisher_id == 'glance':\n self.glance(ctxt, event_type, payload)\n elif publisher_id == 'nova':\n self.nova(ctxt, event_type, payload)\n elif publisher_id == 'cinder':\n self.cinder(ctxt, event_type, payload)\n elif publisher_id == 'swift':\n self.swift(ctxt, event_type, payload)\n else:\n logging.warning('Unknown info publisher: %s, event: %s. Nothing to be done!',\n publisher_id, event_type)\n return\n logging.info('Call remote procedure done.')\n\n def glance(self, ctxt, event_type, payload):\n if event_type == 'insert':\n self.client.cast(ctxt, 'add_image',\n img_id=payload['image_id'],\n img_path=payload['value'])\n elif event_type == 'delete':\n self.client.cast(ctxt, 'del_image',\n img_id=payload['image_id'])\n else:\n logging.warning('Unknown event_type: %s for glance. Nothing to be done!',\n event_type)\n\n def swift(self, ctxt, event_type, payload):\n if event_type == 'c-insert':\n self.client.cast(ctxt, 'add_container',\n contianer_name=payload['name'],\n account_name=payload['account'])\n elif event_type == 'c-delete':\n self.client.cast(ctxt, 'del_container',\n contianer_name=payload['name'],\n account_name=payload['account'])\n elif event_type == 'o-insert':\n self.client.cast(ctxt, 'add_object',\n object_name=payload['name'],\n container_name=payload['container'],\n account_name=payload['account'])\n elif event_type == 'o-delete':\n self.client.cast(ctxt, 'del_object',\n object_name=payload['name'],\n container_name=payload['container'],\n account_name=payload['account'])\n else:\n logging.warning('Unknown event type: %s for swift. Nothing to be done.',\n event_type)\n\n def nova(self, ctxt, event_type, payload):\n if event_type == 'insert':\n self.client.cast(ctxt, 'add_instance',\n vm_id=payload['id'])\n elif event_type == 'delete':\n self.client.cast(ctxt, 'del_instance',\n vm_id=payload['id'])\n else:\n logging.warning('Unknown event_type: %s for nova. Nothing to be done!',\n event_type)\n\n def cinder(self, ctxt, event_type, payload):\n if event_type == 'insert':\n self.client.cast(ctxt, 'add_volume',\n vol_id=payload['id'])\n elif event_type == 'delete':\n self.client.cast(ctxt, 'del_volume',\n vol_id=payload['id'])\n else:\n logging.warning('Unknown event_type: %s for cinder. Nothing to be done!',\n event_type)\n\n\ndef main():\n transport = oslo_messaging.get_transport(cfg.CONF)\n targets = [\n oslo_messaging.Target(topic='DR'),\n ]\n endpoints = [\n DREndpoint(transport),\n ]\n\n server = oslo_messaging.get_notification_listener(transport,\n targets,\n endpoints)\n try:\n logging.info('Listener started!')\n server.start()\n server.wait()\n except KeyboardInterrupt:\n server.stop()\n logging.info('Listener stopped!')\n except Exception as err:\n logging.error(err.message)\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='/var/log/mylog/listener.log', filemode='a',\n level=logging.INFO,\n format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p')\n main()\n","sub_path":"py-django/DataBackup/mylistener.py","file_name":"mylistener.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"347642428","text":"import sqlite3\n\nDB_FILENAME = 'available_log.db'\n\n\"\"\"this function returns list of probabilitis for finding parking places according to the \ninserted time\nthe probabilities are according to the following order: ['mosachim', 'shapira', 'north', 'jerusalem']\"\"\"\n\ndef calculat_probs(user_time, delta=10/60):\n delta = (10/60)\n with sqlite3.connect(DB_FILENAME) as con:\n cur = con.cursor()\n stmt = \"\"\" SELECT AVG(found)\n FROM log_tbl \n WHERE (? - ? < time) AND (time < ? + ?)\n GROUP BY area_id\n \"\"\"\n cur.execute(stmt, (user_time,delta,user_time,delta))\n res = [p[0] for p in cur.fetchall()]\n cur.close()\n return res","sub_path":"backend/probability/prob_calc.py","file_name":"prob_calc.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"510955548","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d, Axes3D \nimport numpy as np\nimport cx_Oracle\n\ndef getCprice(s_name):\n conn = cx_Oracle.connect('python/python@localhost:1521/xe')\n cs = conn.cursor()\n rs = cs.execute(\"select cprice FROM stock where s_name='\"+s_name+\"' order by in_time\")\n \n ret = []\n for record in rs:\n ret.append(record[0])\n \n cs.close()\n conn.close()\n return ret\n\n\n\nfig = plt.figure()\nax = fig.gca(projection='3d') \n\nax.set_zlabel('Z')\nax.set_ylabel('Y')\nax.set_xlabel('X')\n\ns_names = [\"삼성전자\",\"LG\",\"SK\",\"SK\",\"SK\"]\n\ncnt = len(getCprice(\"삼성전자\"))\ny = range(cnt)\nx = np.zeros(cnt)\n\nfor i,s_name in enumerate(s_names):\n z = getCprice(s_name)\n ax.plot(x+i, y, z) \n\n# z = getCprice(s_names[0])\n# ax.plot(x+0, y, z) \n# \n# z = getCprice(s_names[1])\n# ax.plot(x+1, y, z) \n# \n# z = getCprice(s_names[2])\n# ax.plot(x+2, y, z) \n\n\n\nplt.show()\n\n\n\n\n\n","sub_path":"HELLOPYTHON/day10/mygraph02.py","file_name":"mygraph02.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"435104425","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport shutil\nimport tensorflow as tf\ntf.compat.v1.disable_eager_execution()\n\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications.vgg19 import VGG19\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.layers import Input\nfrom PIL import Image\nfrom utils_tmp import *\nimport os\nimport time\nimport pickle\n\ndef get_model(model_name):\n\n if model_name == 'vgg16':\n model1 = VGG16(input_tensor=input_tensor)\n elif model_name == 'vgg19':\n model1 = VGG19(input_tensor=input_tensor)\n elif model_name == 'resnet50':\n model1 = ResNet50(input_tensor=input_tensor)\n else:\n print('please specify model name')\n os._exit(0)\n\n print(model1.name)\n return model1\n\nif __name__ == '__main__':\n IS_MINMAX = None\n # input image dimensions\n img_rows, img_cols = 224, 224\n input_shape = (img_rows, img_cols, 3)\n\n # define input tensor as a placeholder\n input_tensor = Input(shape=input_shape)\n\n model_name = 'vgg16'\n model1 = get_model(model_name)\n # model_layer_dict1 = init_coverage_tables(model1)\n model_layer_times1 = init_coverage_times(model1) # times of each neuron covered\n model_layer_times2 = init_coverage_times(model1) # update when new image and adversarial images found\n model_layer_value1 = init_coverage_value(model1)\n model_layer_thresold = init_coverage_value(model1)\n\n # start gen inputs\n\n if IS_MINMAX == None:\n # img_dir = './seeds'\n # img_names = os.listdir(img_dir)\n # img_num = len(img_names)\n # model_layer_minmax = init_coverage_minmax(model1) # min,max\n # for img_name in img_names:\n # img_path = os.path.join(img_dir, img_name)\n # image = preprocess_image(img_path)\n # outputval= update_coverage_minmax(image, model1, model_layer_minmax)\n # print(\"get_minmax_done\")\n # IS_MINMAX = outputval\n with open('./minmax.pkl','rb') as f:\n IS_MINMAX= pickle.load(f)\n # print(IS_MINMAX)\n img_dir = './seeds_20'\n img_paths = os.listdir(img_dir)\n img_num = len(img_paths)\n\n # e.g.[0,1,2] None for neurons not covered, 0 for covered often, 1 for covered rarely, 2 for high weights\n neuron_select_strategy = ['0']\n threshold = 0.2\n neuron_to_cover_num = 50\n subdir = '0524'\n iteration_times = 50\n neuron_to_cover_weight = 0.5\n predict_weight = 0.5\n learning_step = 0.02\n save_dir = './generated_inputs/' + subdir + '/'\n\n if os.path.exists(save_dir):\n for i in os.listdir(save_dir):\n path_file = os.path.join(save_dir, i)\n if os.path.isfile(path_file):\n os.remove(path_file)\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n # start = time.clock()\n total_time = 0\n total_norm = 0\n adversial_num = 0\n\n total_perturb_adversial = 0\n\n for i in range(img_num):\n\n start_time = time.process_time()\n\n img_list = []\n\n img_name = img_paths[i].split('.')[0]\n print(img_name)\n img_path = os.path.join(img_dir,img_paths[i])\n\n\n print(img_path)\n\n tmp_img = preprocess_image(img_path)\n\n orig_img = tmp_img.copy()\n\n img_list.append(tmp_img)\n\n get_thresold(IS_MINMAX, model_layer_thresold, 1)\n\n # update_coverage(tmp_img, model1, model_layer_times2, threshold)\n update_coverage2(tmp_img, model1, model_layer_times2, model_layer_thresold)\n\n while len(img_list) > 0:\n\n gen_img = img_list[0]\n\n img_list.remove(gen_img)\n\n # first check if input already induces differences\n pred1 = model1.predict(gen_img)\n label1 = np.argmax(pred1[0])\n\n label_top5 = np.argsort(pred1[0])[-5:]\n\n update_coverage_value(gen_img, model1, model_layer_value1)\n # update_coverage(tmp_img, model1, model_layer_times1, threshold)\n update_coverage2(gen_img, model1, model_layer_times1, model_layer_thresold)\n\n orig_label = label1\n orig_pred = pred1\n\n if model1.name == 'resnet50':\n loss_1 = K.mean(model1.get_layer('fc1000').output[..., orig_label])\n loss_2 = K.mean(model1.get_layer('fc1000').output[..., label_top5[-2]])\n loss_3 = K.mean(model1.get_layer('fc1000').output[..., label_top5[-3]])\n loss_4 = K.mean(model1.get_layer('fc1000').output[..., label_top5[-4]])\n loss_5 = K.mean(model1.get_layer('fc1000').output[..., label_top5[-5]])\n\n else:\n loss_1 = K.mean(model1.get_layer('predictions').output[..., orig_label])\n loss_2 = K.mean(model1.get_layer('predictions').output[..., label_top5[-2]])\n loss_3 = K.mean(model1.get_layer('predictions').output[..., label_top5[-3]])\n loss_4 = K.mean(model1.get_layer('predictions').output[..., label_top5[-4]])\n loss_5 = K.mean(model1.get_layer('predictions').output[..., label_top5[-5]])\n\n layer_output = (predict_weight * (loss_2 + loss_3 + loss_4 + loss_5) - loss_1)\n\n # neuron coverage loss\n loss_neuron = neuron_selection(model1, model_layer_times1, model_layer_value1, neuron_select_strategy,\n neuron_to_cover_num,threshold)\n\n # extreme value means the activation value for a neuron can be as high as possible ...\n EXTREME_VALUE = False\n if EXTREME_VALUE:\n neuron_to_cover_weight = 2\n\n layer_output += neuron_to_cover_weight * K.sum(loss_neuron)\n\n # for adversarial image generation\n final_loss = K.mean(layer_output)\n\n # we compute the gradient of the input picture wrt this loss\n grads = normalize(K.gradients(final_loss, input_tensor)[0])\n\n grads_tensor_list = [loss_1, loss_2, loss_3, loss_4, loss_5]\n grads_tensor_list.extend(loss_neuron)\n grads_tensor_list.append(grads)\n # this function returns the loss and grads given the input picture\n\n iterate = K.function([input_tensor], grads_tensor_list)\n\n # we run gradient ascent for some steps\n for iters in range(iteration_times):\n\n loss_neuron_list = iterate([gen_img])\n\n perturb = loss_neuron_list[-1] * learning_step\n\n gen_img += perturb\n\n # previous accumulated neuron coverage\n previous_coverage = neuron_covered(model_layer_times1)[2]\n\n pred1 = model1.predict(gen_img)\n label1 = np.argmax(pred1[0])\n\n # update_coverage(gen_img, model1, model_layer_times1, threshold) # for seed selection\n update_coverage2(gen_img, model1, model_layer_times1, model_layer_thresold)\n\n current_coverage = neuron_covered(model_layer_times1)[2]\n\n diff_img = gen_img - orig_img\n\n L2_norm = np.linalg.norm(diff_img)\n\n orig_L2_norm = np.linalg.norm(orig_img)\n\n perturb_adversial = L2_norm / orig_L2_norm\n\n if current_coverage - previous_coverage > 0.01 / (i + 1) and perturb_adversial < 0.02:\n img_list.append(gen_img)\n # print('coverage diff = ', current_coverage - previous_coverage, 'perturb_adversial = ', perturb_adversial)\n\n if label1 != orig_label:\n # update_coverage(gen_img, model1, model_layer_times2, threshold)\n update_coverage2(gen_img, model1, model_layer_times2, model_layer_thresold)\n\n total_norm += L2_norm\n\n total_perturb_adversial += perturb_adversial\n\n # print('L2 norm : ' + str(L2_norm))\n # print('ratio perturb = ', perturb_adversial)\n\n gen_img_tmp = gen_img.copy()\n\n gen_img_deprocessed = deprocess_image(gen_img_tmp)\n\n image = Image.fromarray(gen_img_deprocessed)\n\n save_img1 = save_dir + decode_label(pred1) + '-' + decode_label(orig_pred)+ '-' + img_name + + '-' +str(get_signature()) + '.png'\n print(save_img1)\n image.save(save_img1, 'PNG')\n\n adversial_num += 1\n\n end_time = time.process_time()\n\n print('covered neurons percentage %d neurons %.3f'\n % (len(model_layer_times2), neuron_covered(model_layer_times2)[2]))\n\n duration = end_time - start_time\n\n print('used time : ' + str(duration))\n\n total_time += duration\n\n print('covered neurons percentage %d neurons %.3f'\n % (len(model_layer_times2), neuron_covered(model_layer_times2)[2]))\n\n print('total_time = ' + str(total_time))\n print('average_norm = ' + str(total_norm / adversial_num))\n print('adversial num = ' + str(adversial_num))\n print('average perb adversial = ' + str(total_perturb_adversial / adversial_num))\n\n\n\n","sub_path":"DLFuzz/ImageNet/gen_diff_v2.py","file_name":"gen_diff_v2.py","file_ext":"py","file_size_in_byte":9081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"}